Linux Audio

Check our new training course

Embedded Linux training

Mar 10-20, 2025, special US time zones
Register
Loading...
v3.1
  1/*
  2 * drivers/misc/spear13xx_pcie_gadget.c
  3 *
  4 * Copyright (C) 2010 ST Microelectronics
  5 * Pratyush Anand<pratyush.anand@st.com>
  6 *
  7 * This file is licensed under the terms of the GNU General Public
  8 * License version 2. This program is licensed "as is" without any
  9 * warranty of any kind, whether express or implied.
 10 */
 11
 
 12#include <linux/clk.h>
 13#include <linux/slab.h>
 14#include <linux/delay.h>
 15#include <linux/io.h>
 16#include <linux/interrupt.h>
 17#include <linux/irq.h>
 18#include <linux/kernel.h>
 19#include <linux/module.h>
 20#include <linux/platform_device.h>
 21#include <linux/pci_regs.h>
 22#include <linux/configfs.h>
 23#include <mach/pcie.h>
 24#include <mach/misc_regs.h>
 25
 26#define IN0_MEM_SIZE	(200 * 1024 * 1024 - 1)
 27/* In current implementation address translation is done using IN0 only.
 28 * So IN1 start address and IN0 end address has been kept same
 29*/
 30#define IN1_MEM_SIZE	(0 * 1024 * 1024 - 1)
 31#define IN_IO_SIZE	(20 * 1024 * 1024 - 1)
 32#define IN_CFG0_SIZE	(12 * 1024 * 1024 - 1)
 33#define IN_CFG1_SIZE	(12 * 1024 * 1024 - 1)
 34#define IN_MSG_SIZE	(12 * 1024 * 1024 - 1)
 35/* Keep default BAR size as 4K*/
 36/* AORAM would be mapped by default*/
 37#define INBOUND_ADDR_MASK	(SPEAR13XX_SYSRAM1_SIZE - 1)
 38
 39#define INT_TYPE_NO_INT	0
 40#define INT_TYPE_INTX	1
 41#define INT_TYPE_MSI	2
 42struct spear_pcie_gadget_config {
 43	void __iomem *base;
 44	void __iomem *va_app_base;
 45	void __iomem *va_dbi_base;
 46	char int_type[10];
 47	ulong requested_msi;
 48	ulong configured_msi;
 49	ulong bar0_size;
 50	ulong bar0_rw_offset;
 51	void __iomem *va_bar0_address;
 52};
 53
 54struct pcie_gadget_target {
 55	struct configfs_subsystem subsys;
 56	struct spear_pcie_gadget_config config;
 57};
 58
 59struct pcie_gadget_target_attr {
 60	struct configfs_attribute	attr;
 61	ssize_t		(*show)(struct spear_pcie_gadget_config *config,
 62						char *buf);
 63	ssize_t		(*store)(struct spear_pcie_gadget_config *config,
 64						 const char *buf,
 65						 size_t count);
 66};
 67
 68static void enable_dbi_access(struct pcie_app_reg __iomem *app_reg)
 69{
 70	/* Enable DBI access */
 71	writel(readl(&app_reg->slv_armisc) | (1 << AXI_OP_DBI_ACCESS_ID),
 72			&app_reg->slv_armisc);
 73	writel(readl(&app_reg->slv_awmisc) | (1 << AXI_OP_DBI_ACCESS_ID),
 74			&app_reg->slv_awmisc);
 75
 76}
 77
 78static void disable_dbi_access(struct pcie_app_reg __iomem *app_reg)
 79{
 80	/* disable DBI access */
 81	writel(readl(&app_reg->slv_armisc) & ~(1 << AXI_OP_DBI_ACCESS_ID),
 82			&app_reg->slv_armisc);
 83	writel(readl(&app_reg->slv_awmisc) & ~(1 << AXI_OP_DBI_ACCESS_ID),
 84			&app_reg->slv_awmisc);
 85
 86}
 87
 88static void spear_dbi_read_reg(struct spear_pcie_gadget_config *config,
 89		int where, int size, u32 *val)
 90{
 91	struct pcie_app_reg __iomem *app_reg = config->va_app_base;
 92	ulong va_address;
 93
 94	/* Enable DBI access */
 95	enable_dbi_access(app_reg);
 96
 97	va_address = (ulong)config->va_dbi_base + (where & ~0x3);
 98
 99	*val = readl(va_address);
100
101	if (size == 1)
102		*val = (*val >> (8 * (where & 3))) & 0xff;
103	else if (size == 2)
104		*val = (*val >> (8 * (where & 3))) & 0xffff;
105
106	/* Disable DBI access */
107	disable_dbi_access(app_reg);
108}
109
110static void spear_dbi_write_reg(struct spear_pcie_gadget_config *config,
111		int where, int size, u32 val)
112{
113	struct pcie_app_reg __iomem *app_reg = config->va_app_base;
114	ulong va_address;
115
116	/* Enable DBI access */
117	enable_dbi_access(app_reg);
118
119	va_address = (ulong)config->va_dbi_base + (where & ~0x3);
120
121	if (size == 4)
122		writel(val, va_address);
123	else if (size == 2)
124		writew(val, va_address + (where & 2));
125	else if (size == 1)
126		writeb(val, va_address + (where & 3));
127
128	/* Disable DBI access */
129	disable_dbi_access(app_reg);
130}
131
132#define PCI_FIND_CAP_TTL	48
133
134static int pci_find_own_next_cap_ttl(struct spear_pcie_gadget_config *config,
135		u32 pos, int cap, int *ttl)
136{
137	u32 id;
138
139	while ((*ttl)--) {
140		spear_dbi_read_reg(config, pos, 1, &pos);
141		if (pos < 0x40)
142			break;
143		pos &= ~3;
144		spear_dbi_read_reg(config, pos + PCI_CAP_LIST_ID, 1, &id);
145		if (id == 0xff)
146			break;
147		if (id == cap)
148			return pos;
149		pos += PCI_CAP_LIST_NEXT;
150	}
151	return 0;
152}
153
154static int pci_find_own_next_cap(struct spear_pcie_gadget_config *config,
155			u32 pos, int cap)
156{
157	int ttl = PCI_FIND_CAP_TTL;
158
159	return pci_find_own_next_cap_ttl(config, pos, cap, &ttl);
160}
161
162static int pci_find_own_cap_start(struct spear_pcie_gadget_config *config,
163				u8 hdr_type)
164{
165	u32 status;
166
167	spear_dbi_read_reg(config, PCI_STATUS, 2, &status);
168	if (!(status & PCI_STATUS_CAP_LIST))
169		return 0;
170
171	switch (hdr_type) {
172	case PCI_HEADER_TYPE_NORMAL:
173	case PCI_HEADER_TYPE_BRIDGE:
174		return PCI_CAPABILITY_LIST;
175	case PCI_HEADER_TYPE_CARDBUS:
176		return PCI_CB_CAPABILITY_LIST;
177	default:
178		return 0;
179	}
180
181	return 0;
182}
183
184/*
185 * Tell if a device supports a given PCI capability.
186 * Returns the address of the requested capability structure within the
187 * device's PCI configuration space or 0 in case the device does not
188 * support it. Possible values for @cap:
189 *
190 * %PCI_CAP_ID_PM	Power Management
191 * %PCI_CAP_ID_AGP	Accelerated Graphics Port
192 * %PCI_CAP_ID_VPD	Vital Product Data
193 * %PCI_CAP_ID_SLOTID	Slot Identification
194 * %PCI_CAP_ID_MSI	Message Signalled Interrupts
195 * %PCI_CAP_ID_CHSWP	CompactPCI HotSwap
196 * %PCI_CAP_ID_PCIX	PCI-X
197 * %PCI_CAP_ID_EXP	PCI Express
198 */
199static int pci_find_own_capability(struct spear_pcie_gadget_config *config,
200		int cap)
201{
202	u32 pos;
203	u32 hdr_type;
204
205	spear_dbi_read_reg(config, PCI_HEADER_TYPE, 1, &hdr_type);
206
207	pos = pci_find_own_cap_start(config, hdr_type);
208	if (pos)
209		pos = pci_find_own_next_cap(config, pos, cap);
210
211	return pos;
212}
213
214static irqreturn_t spear_pcie_gadget_irq(int irq, void *dev_id)
215{
216	return 0;
217}
218
219/*
220 * configfs interfaces show/store functions
221 */
222static ssize_t pcie_gadget_show_link(
223		struct spear_pcie_gadget_config *config,
224		char *buf)
225{
226	struct pcie_app_reg __iomem *app_reg = config->va_app_base;
 
 
 
 
 
 
 
227
228	if (readl(&app_reg->app_status_1) & ((u32)1 << XMLH_LINK_UP_ID))
229		return sprintf(buf, "UP");
230	else
231		return sprintf(buf, "DOWN");
232}
233
234static ssize_t pcie_gadget_store_link(
235		struct spear_pcie_gadget_config *config,
236		const char *buf, size_t count)
237{
238	struct pcie_app_reg __iomem *app_reg = config->va_app_base;
239
240	if (sysfs_streq(buf, "UP"))
241		writel(readl(&app_reg->app_ctrl_0) | (1 << APP_LTSSM_ENABLE_ID),
242			&app_reg->app_ctrl_0);
243	else if (sysfs_streq(buf, "DOWN"))
244		writel(readl(&app_reg->app_ctrl_0)
245				& ~(1 << APP_LTSSM_ENABLE_ID),
246				&app_reg->app_ctrl_0);
247	else
248		return -EINVAL;
249	return count;
250}
251
252static ssize_t pcie_gadget_show_int_type(
253		struct spear_pcie_gadget_config *config,
254		char *buf)
255{
256	return sprintf(buf, "%s", config->int_type);
257}
258
259static ssize_t pcie_gadget_store_int_type(
260		struct spear_pcie_gadget_config *config,
261		const char *buf, size_t count)
262{
 
263	u32 cap, vec, flags;
264	ulong vector;
265
266	if (sysfs_streq(buf, "INTA"))
267		spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 1);
268
269	else if (sysfs_streq(buf, "MSI")) {
270		vector = config->requested_msi;
271		vec = 0;
272		while (vector > 1) {
273			vector /= 2;
274			vec++;
275		}
276		spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 0);
277		cap = pci_find_own_capability(config, PCI_CAP_ID_MSI);
278		spear_dbi_read_reg(config, cap + PCI_MSI_FLAGS, 1, &flags);
279		flags &= ~PCI_MSI_FLAGS_QMASK;
280		flags |= vec << 1;
281		spear_dbi_write_reg(config, cap + PCI_MSI_FLAGS, 1, flags);
282	} else
283		return -EINVAL;
284
285	strcpy(config->int_type, buf);
286
287	return count;
288}
289
290static ssize_t pcie_gadget_show_no_of_msi(
291		struct spear_pcie_gadget_config *config,
292		char *buf)
293{
294	struct pcie_app_reg __iomem *app_reg = config->va_app_base;
 
295	u32 cap, vec, flags;
296	ulong vector;
297
298	if ((readl(&app_reg->msg_status) & (1 << CFG_MSI_EN_ID))
299			!= (1 << CFG_MSI_EN_ID))
300		vector = 0;
301	else {
302		cap = pci_find_own_capability(config, PCI_CAP_ID_MSI);
303		spear_dbi_read_reg(config, cap + PCI_MSI_FLAGS, 1, &flags);
304		flags &= ~PCI_MSI_FLAGS_QSIZE;
305		vec = flags >> 4;
306		vector = 1;
307		while (vec--)
308			vector *= 2;
309	}
310	config->configured_msi = vector;
311
312	return sprintf(buf, "%lu", vector);
313}
314
315static ssize_t pcie_gadget_store_no_of_msi(
316		struct spear_pcie_gadget_config *config,
317		const char *buf, size_t count)
318{
319	if (strict_strtoul(buf, 0, &config->requested_msi))
320		return -EINVAL;
 
 
 
 
321	if (config->requested_msi > 32)
322		config->requested_msi = 32;
323
324	return count;
325}
326
327static ssize_t pcie_gadget_store_inta(
328		struct spear_pcie_gadget_config *config,
329		const char *buf, size_t count)
330{
331	struct pcie_app_reg __iomem *app_reg = config->va_app_base;
332	ulong en;
 
333
334	if (strict_strtoul(buf, 0, &en))
335		return -EINVAL;
 
336
337	if (en)
338		writel(readl(&app_reg->app_ctrl_0) | (1 << SYS_INT_ID),
339				&app_reg->app_ctrl_0);
340	else
341		writel(readl(&app_reg->app_ctrl_0) & ~(1 << SYS_INT_ID),
342				&app_reg->app_ctrl_0);
343
344	return count;
345}
346
347static ssize_t pcie_gadget_store_send_msi(
348		struct spear_pcie_gadget_config *config,
349		const char *buf, size_t count)
350{
 
351	struct pcie_app_reg __iomem *app_reg = config->va_app_base;
352	ulong vector;
353	u32 ven_msi;
 
354
355	if (strict_strtoul(buf, 0, &vector))
356		return -EINVAL;
 
357
358	if (!config->configured_msi)
359		return -EINVAL;
360
361	if (vector >= config->configured_msi)
362		return -EINVAL;
363
364	ven_msi = readl(&app_reg->ven_msi_1);
365	ven_msi &= ~VEN_MSI_FUN_NUM_MASK;
366	ven_msi |= 0 << VEN_MSI_FUN_NUM_ID;
367	ven_msi &= ~VEN_MSI_TC_MASK;
368	ven_msi |= 0 << VEN_MSI_TC_ID;
369	ven_msi &= ~VEN_MSI_VECTOR_MASK;
370	ven_msi |= vector << VEN_MSI_VECTOR_ID;
371
372	/* generating interrupt for msi vector */
373	ven_msi |= VEN_MSI_REQ_EN;
374	writel(ven_msi, &app_reg->ven_msi_1);
375	udelay(1);
376	ven_msi &= ~VEN_MSI_REQ_EN;
377	writel(ven_msi, &app_reg->ven_msi_1);
378
379	return count;
380}
381
382static ssize_t pcie_gadget_show_vendor_id(
383		struct spear_pcie_gadget_config *config,
384		char *buf)
385{
386	u32 id;
387
388	spear_dbi_read_reg(config, PCI_VENDOR_ID, 2, &id);
389
390	return sprintf(buf, "%x", id);
391}
392
393static ssize_t pcie_gadget_store_vendor_id(
394		struct spear_pcie_gadget_config *config,
395		const char *buf, size_t count)
396{
397	ulong id;
 
398
399	if (strict_strtoul(buf, 0, &id))
400		return -EINVAL;
 
401
402	spear_dbi_write_reg(config, PCI_VENDOR_ID, 2, id);
403
404	return count;
405}
406
407static ssize_t pcie_gadget_show_device_id(
408		struct spear_pcie_gadget_config *config,
409		char *buf)
410{
411	u32 id;
412
413	spear_dbi_read_reg(config, PCI_DEVICE_ID, 2, &id);
414
415	return sprintf(buf, "%x", id);
416}
417
418static ssize_t pcie_gadget_store_device_id(
419		struct spear_pcie_gadget_config *config,
420		const char *buf, size_t count)
421{
422	ulong id;
 
423
424	if (strict_strtoul(buf, 0, &id))
425		return -EINVAL;
 
426
427	spear_dbi_write_reg(config, PCI_DEVICE_ID, 2, id);
428
429	return count;
430}
431
432static ssize_t pcie_gadget_show_bar0_size(
433		struct spear_pcie_gadget_config *config,
434		char *buf)
435{
436	return sprintf(buf, "%lx", config->bar0_size);
437}
438
439static ssize_t pcie_gadget_store_bar0_size(
440		struct spear_pcie_gadget_config *config,
441		const char *buf, size_t count)
442{
 
443	ulong size;
444	u32 pos, pos1;
445	u32 no_of_bit = 0;
 
 
 
 
 
446
447	if (strict_strtoul(buf, 0, &size))
448		return -EINVAL;
449	/* min bar size is 256 */
450	if (size <= 0x100)
451		size = 0x100;
452	/* max bar size is 1MB*/
453	else if (size >= 0x100000)
454		size = 0x100000;
455	else {
456		pos = 0;
457		pos1 = 0;
458		while (pos < 21) {
459			pos = find_next_bit((ulong *)&size, 21, pos);
460			if (pos != 21)
461				pos1 = pos + 1;
462			pos++;
463			no_of_bit++;
464		}
465		if (no_of_bit == 2)
466			pos1--;
467
468		size = 1 << pos1;
469	}
470	config->bar0_size = size;
471	spear_dbi_write_reg(config, PCIE_BAR0_MASK_REG, 4, size - 1);
472
473	return count;
474}
475
476static ssize_t pcie_gadget_show_bar0_address(
477		struct spear_pcie_gadget_config *config,
478		char *buf)
479{
480	struct pcie_app_reg __iomem *app_reg = config->va_app_base;
481
482	u32 address = readl(&app_reg->pim0_mem_addr_start);
483
484	return sprintf(buf, "%x", address);
485}
486
487static ssize_t pcie_gadget_store_bar0_address(
488		struct spear_pcie_gadget_config *config,
489		const char *buf, size_t count)
490{
 
491	struct pcie_app_reg __iomem *app_reg = config->va_app_base;
492	ulong address;
 
493
494	if (strict_strtoul(buf, 0, &address))
495		return -EINVAL;
 
496
497	address &= ~(config->bar0_size - 1);
498	if (config->va_bar0_address)
499		iounmap(config->va_bar0_address);
500	config->va_bar0_address = ioremap(address, config->bar0_size);
501	if (!config->va_bar0_address)
502		return -ENOMEM;
503
504	writel(address, &app_reg->pim0_mem_addr_start);
505
506	return count;
507}
508
509static ssize_t pcie_gadget_show_bar0_rw_offset(
510		struct spear_pcie_gadget_config *config,
511		char *buf)
512{
513	return sprintf(buf, "%lx", config->bar0_rw_offset);
514}
515
516static ssize_t pcie_gadget_store_bar0_rw_offset(
517		struct spear_pcie_gadget_config *config,
518		const char *buf, size_t count)
519{
520	ulong offset;
 
521
522	if (strict_strtoul(buf, 0, &offset))
523		return -EINVAL;
 
524
525	if (offset % 4)
526		return -EINVAL;
527
528	config->bar0_rw_offset = offset;
529
530	return count;
531}
532
533static ssize_t pcie_gadget_show_bar0_data(
534		struct spear_pcie_gadget_config *config,
535		char *buf)
536{
 
537	ulong data;
538
539	if (!config->va_bar0_address)
540		return -ENOMEM;
541
542	data = readl((ulong)config->va_bar0_address + config->bar0_rw_offset);
543
544	return sprintf(buf, "%lx", data);
545}
546
547static ssize_t pcie_gadget_store_bar0_data(
548		struct spear_pcie_gadget_config *config,
549		const char *buf, size_t count)
550{
 
551	ulong data;
 
552
553	if (strict_strtoul(buf, 0, &data))
554		return -EINVAL;
 
555
556	if (!config->va_bar0_address)
557		return -ENOMEM;
558
559	writel(data, (ulong)config->va_bar0_address + config->bar0_rw_offset);
560
561	return count;
562}
563
564/*
565 * Attribute definitions.
566 */
567
568#define PCIE_GADGET_TARGET_ATTR_RO(_name)				\
569static struct pcie_gadget_target_attr pcie_gadget_target_##_name =	\
570	__CONFIGFS_ATTR(_name, S_IRUGO, pcie_gadget_show_##_name, NULL)
571
572#define PCIE_GADGET_TARGET_ATTR_WO(_name)				\
573static struct pcie_gadget_target_attr pcie_gadget_target_##_name =	\
574	__CONFIGFS_ATTR(_name, S_IWUSR, NULL, pcie_gadget_store_##_name)
575
576#define PCIE_GADGET_TARGET_ATTR_RW(_name)				\
577static struct pcie_gadget_target_attr pcie_gadget_target_##_name =	\
578	__CONFIGFS_ATTR(_name, S_IRUGO | S_IWUSR, pcie_gadget_show_##_name, \
579			pcie_gadget_store_##_name)
580PCIE_GADGET_TARGET_ATTR_RW(link);
581PCIE_GADGET_TARGET_ATTR_RW(int_type);
582PCIE_GADGET_TARGET_ATTR_RW(no_of_msi);
583PCIE_GADGET_TARGET_ATTR_WO(inta);
584PCIE_GADGET_TARGET_ATTR_WO(send_msi);
585PCIE_GADGET_TARGET_ATTR_RW(vendor_id);
586PCIE_GADGET_TARGET_ATTR_RW(device_id);
587PCIE_GADGET_TARGET_ATTR_RW(bar0_size);
588PCIE_GADGET_TARGET_ATTR_RW(bar0_address);
589PCIE_GADGET_TARGET_ATTR_RW(bar0_rw_offset);
590PCIE_GADGET_TARGET_ATTR_RW(bar0_data);
591
592static struct configfs_attribute *pcie_gadget_target_attrs[] = {
593	&pcie_gadget_target_link.attr,
594	&pcie_gadget_target_int_type.attr,
595	&pcie_gadget_target_no_of_msi.attr,
596	&pcie_gadget_target_inta.attr,
597	&pcie_gadget_target_send_msi.attr,
598	&pcie_gadget_target_vendor_id.attr,
599	&pcie_gadget_target_device_id.attr,
600	&pcie_gadget_target_bar0_size.attr,
601	&pcie_gadget_target_bar0_address.attr,
602	&pcie_gadget_target_bar0_rw_offset.attr,
603	&pcie_gadget_target_bar0_data.attr,
604	NULL,
605};
606
607static struct pcie_gadget_target *to_target(struct config_item *item)
608{
609	return item ?
610		container_of(to_configfs_subsystem(to_config_group(item)),
611				struct pcie_gadget_target, subsys) : NULL;
612}
613
614/*
615 * Item operations and type for pcie_gadget_target.
616 */
617
618static ssize_t pcie_gadget_target_attr_show(struct config_item *item,
619					   struct configfs_attribute *attr,
620					   char *buf)
621{
622	ssize_t ret = -EINVAL;
623	struct pcie_gadget_target *target = to_target(item);
624	struct pcie_gadget_target_attr *t_attr =
625		container_of(attr, struct pcie_gadget_target_attr, attr);
626
627	if (t_attr->show)
628		ret = t_attr->show(&target->config, buf);
629	return ret;
630}
631
632static ssize_t pcie_gadget_target_attr_store(struct config_item *item,
633					struct configfs_attribute *attr,
634					const char *buf,
635					size_t count)
636{
637	ssize_t ret = -EINVAL;
638	struct pcie_gadget_target *target = to_target(item);
639	struct pcie_gadget_target_attr *t_attr =
640		container_of(attr, struct pcie_gadget_target_attr, attr);
641
642	if (t_attr->store)
643		ret = t_attr->store(&target->config, buf, count);
644	return ret;
645}
646
647static struct configfs_item_operations pcie_gadget_target_item_ops = {
648	.show_attribute		= pcie_gadget_target_attr_show,
649	.store_attribute	= pcie_gadget_target_attr_store,
650};
651
652static struct config_item_type pcie_gadget_target_type = {
653	.ct_attrs		= pcie_gadget_target_attrs,
654	.ct_item_ops		= &pcie_gadget_target_item_ops,
655	.ct_owner		= THIS_MODULE,
656};
657
658static void spear13xx_pcie_device_init(struct spear_pcie_gadget_config *config)
659{
660	struct pcie_app_reg __iomem *app_reg = config->va_app_base;
661
662	/*setup registers for outbound translation */
663
664	writel(config->base, &app_reg->in0_mem_addr_start);
665	writel(app_reg->in0_mem_addr_start + IN0_MEM_SIZE,
666			&app_reg->in0_mem_addr_limit);
667	writel(app_reg->in0_mem_addr_limit + 1, &app_reg->in1_mem_addr_start);
668	writel(app_reg->in1_mem_addr_start + IN1_MEM_SIZE,
669			&app_reg->in1_mem_addr_limit);
670	writel(app_reg->in1_mem_addr_limit + 1, &app_reg->in_io_addr_start);
671	writel(app_reg->in_io_addr_start + IN_IO_SIZE,
672			&app_reg->in_io_addr_limit);
673	writel(app_reg->in_io_addr_limit + 1, &app_reg->in_cfg0_addr_start);
674	writel(app_reg->in_cfg0_addr_start + IN_CFG0_SIZE,
675			&app_reg->in_cfg0_addr_limit);
676	writel(app_reg->in_cfg0_addr_limit + 1, &app_reg->in_cfg1_addr_start);
677	writel(app_reg->in_cfg1_addr_start + IN_CFG1_SIZE,
678			&app_reg->in_cfg1_addr_limit);
679	writel(app_reg->in_cfg1_addr_limit + 1, &app_reg->in_msg_addr_start);
680	writel(app_reg->in_msg_addr_start + IN_MSG_SIZE,
681			&app_reg->in_msg_addr_limit);
682
683	writel(app_reg->in0_mem_addr_start, &app_reg->pom0_mem_addr_start);
684	writel(app_reg->in1_mem_addr_start, &app_reg->pom1_mem_addr_start);
685	writel(app_reg->in_io_addr_start, &app_reg->pom_io_addr_start);
686
687	/*setup registers for inbound translation */
688
689	/* Keep AORAM mapped at BAR0 as default */
690	config->bar0_size = INBOUND_ADDR_MASK + 1;
691	spear_dbi_write_reg(config, PCIE_BAR0_MASK_REG, 4, INBOUND_ADDR_MASK);
692	spear_dbi_write_reg(config, PCI_BASE_ADDRESS_0, 4, 0xC);
693	config->va_bar0_address = ioremap(SPEAR13XX_SYSRAM1_BASE,
694			config->bar0_size);
695
696	writel(SPEAR13XX_SYSRAM1_BASE, &app_reg->pim0_mem_addr_start);
697	writel(0, &app_reg->pim1_mem_addr_start);
698	writel(INBOUND_ADDR_MASK + 1, &app_reg->mem0_addr_offset_limit);
699
700	writel(0x0, &app_reg->pim_io_addr_start);
701	writel(0x0, &app_reg->pim_io_addr_start);
702	writel(0x0, &app_reg->pim_rom_addr_start);
703
704	writel(DEVICE_TYPE_EP | (1 << MISCTRL_EN_ID)
705			| ((u32)1 << REG_TRANSLATION_ENABLE),
706			&app_reg->app_ctrl_0);
707	/* disable all rx interrupts */
708	writel(0, &app_reg->int_mask);
709
710	/* Select INTA as default*/
711	spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 1);
712}
713
714static int __devinit spear_pcie_gadget_probe(struct platform_device *pdev)
715{
716	struct resource *res0, *res1;
717	unsigned int status = 0;
718	int irq;
719	struct clk *clk;
720	static struct pcie_gadget_target *target;
721	struct spear_pcie_gadget_config *config;
722	struct config_item		*cg_item;
723	struct configfs_subsystem *subsys;
724
725	/* get resource for application registers*/
726
727	res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
728	if (!res0) {
729		dev_err(&pdev->dev, "no resource defined\n");
730		return -EBUSY;
731	}
732	if (!request_mem_region(res0->start, resource_size(res0),
733				pdev->name)) {
734		dev_err(&pdev->dev, "pcie gadget region already	claimed\n");
735		return -EBUSY;
736	}
737	/* get resource for dbi registers*/
738
739	res1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
740	if (!res1) {
741		dev_err(&pdev->dev, "no resource defined\n");
742		goto err_rel_res0;
743	}
744	if (!request_mem_region(res1->start, resource_size(res1),
745				pdev->name)) {
746		dev_err(&pdev->dev, "pcie gadget region already	claimed\n");
747		goto err_rel_res0;
748	}
749
750	target = kzalloc(sizeof(*target), GFP_KERNEL);
751	if (!target) {
752		dev_err(&pdev->dev, "out of memory\n");
753		status = -ENOMEM;
754		goto err_rel_res;
755	}
756
757	cg_item = &target->subsys.su_group.cg_item;
758	sprintf(cg_item->ci_namebuf, "pcie_gadget.%d", pdev->id);
759	cg_item->ci_type	= &pcie_gadget_target_type;
760	config = &target->config;
761	config->va_app_base = (void __iomem *)ioremap(res0->start,
762			resource_size(res0));
763	if (!config->va_app_base) {
 
 
764		dev_err(&pdev->dev, "ioremap fail\n");
765		status = -ENOMEM;
766		goto err_kzalloc;
767	}
768
 
 
769	config->base = (void __iomem *)res1->start;
770
771	config->va_dbi_base = (void __iomem *)ioremap(res1->start,
772			resource_size(res1));
773	if (!config->va_dbi_base) {
774		dev_err(&pdev->dev, "ioremap fail\n");
775		status = -ENOMEM;
776		goto err_iounmap_app;
777	}
778
779	dev_set_drvdata(&pdev->dev, target);
780
781	irq = platform_get_irq(pdev, 0);
782	if (irq < 0) {
783		dev_err(&pdev->dev, "no update irq?\n");
784		status = irq;
785		goto err_iounmap;
786	}
787
788	status = request_irq(irq, spear_pcie_gadget_irq, 0, pdev->name, NULL);
 
789	if (status) {
790		dev_err(&pdev->dev,
791			"pcie gadget interrupt IRQ%d already claimed\n", irq);
792		goto err_iounmap;
793	}
794
795	/* Register configfs hooks */
796	subsys = &target->subsys;
797	config_group_init(&subsys->su_group);
798	mutex_init(&subsys->su_mutex);
799	status = configfs_register_subsystem(subsys);
800	if (status)
801		goto err_irq;
802
803	/*
804	 * init basic pcie application registers
805	 * do not enable clock if it is PCIE0.Ideally , all controller should
806	 * have been independent from others with respect to clock. But PCIE1
807	 * and 2 depends on PCIE0.So PCIE0 clk is provided during board init.
808	 */
809	if (pdev->id == 1) {
810		/*
811		 * Ideally CFG Clock should have been also enabled here. But
812		 * it is done currently during board init routne
813		 */
814		clk = clk_get_sys("pcie1", NULL);
815		if (IS_ERR(clk)) {
816			pr_err("%s:couldn't get clk for pcie1\n", __func__);
817			goto err_irq;
818		}
819		if (clk_enable(clk)) {
 
820			pr_err("%s:couldn't enable clk for pcie1\n", __func__);
821			goto err_irq;
822		}
823	} else if (pdev->id == 2) {
824		/*
825		 * Ideally CFG Clock should have been also enabled here. But
826		 * it is done currently during board init routne
827		 */
828		clk = clk_get_sys("pcie2", NULL);
829		if (IS_ERR(clk)) {
830			pr_err("%s:couldn't get clk for pcie2\n", __func__);
831			goto err_irq;
832		}
833		if (clk_enable(clk)) {
 
834			pr_err("%s:couldn't enable clk for pcie2\n", __func__);
835			goto err_irq;
836		}
837	}
838	spear13xx_pcie_device_init(config);
839
840	return 0;
841err_irq:
842	free_irq(irq, NULL);
843err_iounmap:
844	iounmap(config->va_dbi_base);
845err_iounmap_app:
846	iounmap(config->va_app_base);
847err_kzalloc:
848	kfree(target);
849err_rel_res:
850	release_mem_region(res1->start, resource_size(res1));
851err_rel_res0:
852	release_mem_region(res0->start, resource_size(res0));
853	return status;
854}
855
856static int __devexit spear_pcie_gadget_remove(struct platform_device *pdev)
857{
858	struct resource *res0, *res1;
859	static struct pcie_gadget_target *target;
860	struct spear_pcie_gadget_config *config;
861	int irq;
862
863	res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
864	res1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
865	irq = platform_get_irq(pdev, 0);
866	target = dev_get_drvdata(&pdev->dev);
867	config = &target->config;
868
869	free_irq(irq, NULL);
870	iounmap(config->va_dbi_base);
871	iounmap(config->va_app_base);
872	release_mem_region(res1->start, resource_size(res1));
873	release_mem_region(res0->start, resource_size(res0));
874	configfs_unregister_subsystem(&target->subsys);
875	kfree(target);
876
877	return 0;
878}
879
880static void spear_pcie_gadget_shutdown(struct platform_device *pdev)
881{
882}
883
884static struct platform_driver spear_pcie_gadget_driver = {
885	.probe = spear_pcie_gadget_probe,
886	.remove = spear_pcie_gadget_remove,
887	.shutdown = spear_pcie_gadget_shutdown,
888	.driver = {
889		.name = "pcie-gadget-spear",
890		.bus = &platform_bus_type
891	},
892};
893
894static int __init spear_pcie_gadget_init(void)
895{
896	return platform_driver_register(&spear_pcie_gadget_driver);
897}
898module_init(spear_pcie_gadget_init);
899
900static void __exit spear_pcie_gadget_exit(void)
901{
902	platform_driver_unregister(&spear_pcie_gadget_driver);
903}
904module_exit(spear_pcie_gadget_exit);
905
906MODULE_ALIAS("pcie-gadget-spear");
907MODULE_AUTHOR("Pratyush Anand");
908MODULE_LICENSE("GPL");
v4.6
  1/*
  2 * drivers/misc/spear13xx_pcie_gadget.c
  3 *
  4 * Copyright (C) 2010 ST Microelectronics
  5 * Pratyush Anand<pratyush.anand@gmail.com>
  6 *
  7 * This file is licensed under the terms of the GNU General Public
  8 * License version 2. This program is licensed "as is" without any
  9 * warranty of any kind, whether express or implied.
 10 */
 11
 12#include <linux/device.h>
 13#include <linux/clk.h>
 14#include <linux/slab.h>
 15#include <linux/delay.h>
 16#include <linux/io.h>
 17#include <linux/interrupt.h>
 18#include <linux/irq.h>
 19#include <linux/kernel.h>
 20#include <linux/module.h>
 21#include <linux/platform_device.h>
 22#include <linux/pci_regs.h>
 23#include <linux/configfs.h>
 24#include <mach/pcie.h>
 25#include <mach/misc_regs.h>
 26
 27#define IN0_MEM_SIZE	(200 * 1024 * 1024 - 1)
 28/* In current implementation address translation is done using IN0 only.
 29 * So IN1 start address and IN0 end address has been kept same
 30*/
 31#define IN1_MEM_SIZE	(0 * 1024 * 1024 - 1)
 32#define IN_IO_SIZE	(20 * 1024 * 1024 - 1)
 33#define IN_CFG0_SIZE	(12 * 1024 * 1024 - 1)
 34#define IN_CFG1_SIZE	(12 * 1024 * 1024 - 1)
 35#define IN_MSG_SIZE	(12 * 1024 * 1024 - 1)
 36/* Keep default BAR size as 4K*/
 37/* AORAM would be mapped by default*/
 38#define INBOUND_ADDR_MASK	(SPEAR13XX_SYSRAM1_SIZE - 1)
 39
 40#define INT_TYPE_NO_INT	0
 41#define INT_TYPE_INTX	1
 42#define INT_TYPE_MSI	2
 43struct spear_pcie_gadget_config {
 44	void __iomem *base;
 45	void __iomem *va_app_base;
 46	void __iomem *va_dbi_base;
 47	char int_type[10];
 48	ulong requested_msi;
 49	ulong configured_msi;
 50	ulong bar0_size;
 51	ulong bar0_rw_offset;
 52	void __iomem *va_bar0_address;
 53};
 54
 55struct pcie_gadget_target {
 56	struct configfs_subsystem subsys;
 57	struct spear_pcie_gadget_config config;
 58};
 59
 60struct pcie_gadget_target_attr {
 61	struct configfs_attribute	attr;
 62	ssize_t		(*show)(struct spear_pcie_gadget_config *config,
 63						char *buf);
 64	ssize_t		(*store)(struct spear_pcie_gadget_config *config,
 65						 const char *buf,
 66						 size_t count);
 67};
 68
 69static void enable_dbi_access(struct pcie_app_reg __iomem *app_reg)
 70{
 71	/* Enable DBI access */
 72	writel(readl(&app_reg->slv_armisc) | (1 << AXI_OP_DBI_ACCESS_ID),
 73			&app_reg->slv_armisc);
 74	writel(readl(&app_reg->slv_awmisc) | (1 << AXI_OP_DBI_ACCESS_ID),
 75			&app_reg->slv_awmisc);
 76
 77}
 78
 79static void disable_dbi_access(struct pcie_app_reg __iomem *app_reg)
 80{
 81	/* disable DBI access */
 82	writel(readl(&app_reg->slv_armisc) & ~(1 << AXI_OP_DBI_ACCESS_ID),
 83			&app_reg->slv_armisc);
 84	writel(readl(&app_reg->slv_awmisc) & ~(1 << AXI_OP_DBI_ACCESS_ID),
 85			&app_reg->slv_awmisc);
 86
 87}
 88
 89static void spear_dbi_read_reg(struct spear_pcie_gadget_config *config,
 90		int where, int size, u32 *val)
 91{
 92	struct pcie_app_reg __iomem *app_reg = config->va_app_base;
 93	ulong va_address;
 94
 95	/* Enable DBI access */
 96	enable_dbi_access(app_reg);
 97
 98	va_address = (ulong)config->va_dbi_base + (where & ~0x3);
 99
100	*val = readl(va_address);
101
102	if (size == 1)
103		*val = (*val >> (8 * (where & 3))) & 0xff;
104	else if (size == 2)
105		*val = (*val >> (8 * (where & 3))) & 0xffff;
106
107	/* Disable DBI access */
108	disable_dbi_access(app_reg);
109}
110
111static void spear_dbi_write_reg(struct spear_pcie_gadget_config *config,
112		int where, int size, u32 val)
113{
114	struct pcie_app_reg __iomem *app_reg = config->va_app_base;
115	ulong va_address;
116
117	/* Enable DBI access */
118	enable_dbi_access(app_reg);
119
120	va_address = (ulong)config->va_dbi_base + (where & ~0x3);
121
122	if (size == 4)
123		writel(val, va_address);
124	else if (size == 2)
125		writew(val, va_address + (where & 2));
126	else if (size == 1)
127		writeb(val, va_address + (where & 3));
128
129	/* Disable DBI access */
130	disable_dbi_access(app_reg);
131}
132
133#define PCI_FIND_CAP_TTL	48
134
135static int pci_find_own_next_cap_ttl(struct spear_pcie_gadget_config *config,
136		u32 pos, int cap, int *ttl)
137{
138	u32 id;
139
140	while ((*ttl)--) {
141		spear_dbi_read_reg(config, pos, 1, &pos);
142		if (pos < 0x40)
143			break;
144		pos &= ~3;
145		spear_dbi_read_reg(config, pos + PCI_CAP_LIST_ID, 1, &id);
146		if (id == 0xff)
147			break;
148		if (id == cap)
149			return pos;
150		pos += PCI_CAP_LIST_NEXT;
151	}
152	return 0;
153}
154
155static int pci_find_own_next_cap(struct spear_pcie_gadget_config *config,
156			u32 pos, int cap)
157{
158	int ttl = PCI_FIND_CAP_TTL;
159
160	return pci_find_own_next_cap_ttl(config, pos, cap, &ttl);
161}
162
163static int pci_find_own_cap_start(struct spear_pcie_gadget_config *config,
164				u8 hdr_type)
165{
166	u32 status;
167
168	spear_dbi_read_reg(config, PCI_STATUS, 2, &status);
169	if (!(status & PCI_STATUS_CAP_LIST))
170		return 0;
171
172	switch (hdr_type) {
173	case PCI_HEADER_TYPE_NORMAL:
174	case PCI_HEADER_TYPE_BRIDGE:
175		return PCI_CAPABILITY_LIST;
176	case PCI_HEADER_TYPE_CARDBUS:
177		return PCI_CB_CAPABILITY_LIST;
178	default:
179		return 0;
180	}
181
182	return 0;
183}
184
185/*
186 * Tell if a device supports a given PCI capability.
187 * Returns the address of the requested capability structure within the
188 * device's PCI configuration space or 0 in case the device does not
189 * support it. Possible values for @cap:
190 *
191 * %PCI_CAP_ID_PM	Power Management
192 * %PCI_CAP_ID_AGP	Accelerated Graphics Port
193 * %PCI_CAP_ID_VPD	Vital Product Data
194 * %PCI_CAP_ID_SLOTID	Slot Identification
195 * %PCI_CAP_ID_MSI	Message Signalled Interrupts
196 * %PCI_CAP_ID_CHSWP	CompactPCI HotSwap
197 * %PCI_CAP_ID_PCIX	PCI-X
198 * %PCI_CAP_ID_EXP	PCI Express
199 */
200static int pci_find_own_capability(struct spear_pcie_gadget_config *config,
201		int cap)
202{
203	u32 pos;
204	u32 hdr_type;
205
206	spear_dbi_read_reg(config, PCI_HEADER_TYPE, 1, &hdr_type);
207
208	pos = pci_find_own_cap_start(config, hdr_type);
209	if (pos)
210		pos = pci_find_own_next_cap(config, pos, cap);
211
212	return pos;
213}
214
215static irqreturn_t spear_pcie_gadget_irq(int irq, void *dev_id)
216{
217	return 0;
218}
219
220/*
221 * configfs interfaces show/store functions
222 */
223
224static struct pcie_gadget_target *to_target(struct config_item *item)
 
225{
226	return item ?
227		container_of(to_configfs_subsystem(to_config_group(item)),
228				struct pcie_gadget_target, subsys) : NULL;
229}
230
231static ssize_t pcie_gadget_link_show(struct config_item *item, char *buf)
232{
233	struct pcie_app_reg __iomem *app_reg = to_target(item)->va_app_base;
234
235	if (readl(&app_reg->app_status_1) & ((u32)1 << XMLH_LINK_UP_ID))
236		return sprintf(buf, "UP");
237	else
238		return sprintf(buf, "DOWN");
239}
240
241static ssize_t pcie_gadget_link_store(struct config_item *item,
 
242		const char *buf, size_t count)
243{
244	struct pcie_app_reg __iomem *app_reg = to_target(item)->va_app_base;
245
246	if (sysfs_streq(buf, "UP"))
247		writel(readl(&app_reg->app_ctrl_0) | (1 << APP_LTSSM_ENABLE_ID),
248			&app_reg->app_ctrl_0);
249	else if (sysfs_streq(buf, "DOWN"))
250		writel(readl(&app_reg->app_ctrl_0)
251				& ~(1 << APP_LTSSM_ENABLE_ID),
252				&app_reg->app_ctrl_0);
253	else
254		return -EINVAL;
255	return count;
256}
257
258static ssize_t pcie_gadget_int_type_show(struct config_item *item, char *buf)
 
 
259{
260	return sprintf(buf, "%s", to_target(item)->int_type);
261}
262
263static ssize_t pcie_gadget_int_type_store(struct config_item *item,
 
264		const char *buf, size_t count)
265{
266	struct spear_pcie_gadget_config *config = to_target(item)
267	u32 cap, vec, flags;
268	ulong vector;
269
270	if (sysfs_streq(buf, "INTA"))
271		spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 1);
272
273	else if (sysfs_streq(buf, "MSI")) {
274		vector = config->requested_msi;
275		vec = 0;
276		while (vector > 1) {
277			vector /= 2;
278			vec++;
279		}
280		spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 0);
281		cap = pci_find_own_capability(config, PCI_CAP_ID_MSI);
282		spear_dbi_read_reg(config, cap + PCI_MSI_FLAGS, 1, &flags);
283		flags &= ~PCI_MSI_FLAGS_QMASK;
284		flags |= vec << 1;
285		spear_dbi_write_reg(config, cap + PCI_MSI_FLAGS, 1, flags);
286	} else
287		return -EINVAL;
288
289	strcpy(config->int_type, buf);
290
291	return count;
292}
293
294static ssize_t pcie_gadget_no_of_msi_show(struct config_item *item, char *buf)
 
 
295{
296	struct spear_pcie_gadget_config *config = to_target(item)
297	struct pcie_app_reg __iomem *app_reg = to_target(item)->va_app_base;
298	u32 cap, vec, flags;
299	ulong vector;
300
301	if ((readl(&app_reg->msg_status) & (1 << CFG_MSI_EN_ID))
302			!= (1 << CFG_MSI_EN_ID))
303		vector = 0;
304	else {
305		cap = pci_find_own_capability(config, PCI_CAP_ID_MSI);
306		spear_dbi_read_reg(config, cap + PCI_MSI_FLAGS, 1, &flags);
307		flags &= ~PCI_MSI_FLAGS_QSIZE;
308		vec = flags >> 4;
309		vector = 1;
310		while (vec--)
311			vector *= 2;
312	}
313	config->configured_msi = vector;
314
315	return sprintf(buf, "%lu", vector);
316}
317
318static ssize_t pcie_gadget_no_of_msi_store(struct config_item *item,
 
319		const char *buf, size_t count)
320{
321	int ret;
322
323	ret = kstrtoul(buf, 0, &to_target(item)->requested_msi);
324	if (ret)
325		return ret;
326
327	if (config->requested_msi > 32)
328		config->requested_msi = 32;
329
330	return count;
331}
332
333static ssize_t pcie_gadget_inta_store(struct config_item *item,
 
334		const char *buf, size_t count)
335{
336	struct pcie_app_reg __iomem *app_reg = to_target(item)->va_app_base;
337	ulong en;
338	int ret;
339
340	ret = kstrtoul(buf, 0, &en);
341	if (ret)
342		return ret;
343
344	if (en)
345		writel(readl(&app_reg->app_ctrl_0) | (1 << SYS_INT_ID),
346				&app_reg->app_ctrl_0);
347	else
348		writel(readl(&app_reg->app_ctrl_0) & ~(1 << SYS_INT_ID),
349				&app_reg->app_ctrl_0);
350
351	return count;
352}
353
354static ssize_t pcie_gadget_send_msi_store(struct config_item *item,
 
355		const char *buf, size_t count)
356{
357	struct spear_pcie_gadget_config *config = to_target(item)
358	struct pcie_app_reg __iomem *app_reg = config->va_app_base;
359	ulong vector;
360	u32 ven_msi;
361	int ret;
362
363	ret = kstrtoul(buf, 0, &vector);
364	if (ret)
365		return ret;
366
367	if (!config->configured_msi)
368		return -EINVAL;
369
370	if (vector >= config->configured_msi)
371		return -EINVAL;
372
373	ven_msi = readl(&app_reg->ven_msi_1);
374	ven_msi &= ~VEN_MSI_FUN_NUM_MASK;
375	ven_msi |= 0 << VEN_MSI_FUN_NUM_ID;
376	ven_msi &= ~VEN_MSI_TC_MASK;
377	ven_msi |= 0 << VEN_MSI_TC_ID;
378	ven_msi &= ~VEN_MSI_VECTOR_MASK;
379	ven_msi |= vector << VEN_MSI_VECTOR_ID;
380
381	/* generating interrupt for msi vector */
382	ven_msi |= VEN_MSI_REQ_EN;
383	writel(ven_msi, &app_reg->ven_msi_1);
384	udelay(1);
385	ven_msi &= ~VEN_MSI_REQ_EN;
386	writel(ven_msi, &app_reg->ven_msi_1);
387
388	return count;
389}
390
391static ssize_t pcie_gadget_vendor_id_show(struct config_item *item, char *buf)
 
 
392{
393	u32 id;
394
395	spear_dbi_read_reg(to_target(item), PCI_VENDOR_ID, 2, &id);
396
397	return sprintf(buf, "%x", id);
398}
399
400static ssize_t pcie_gadget_vendor_id_store(struct config_item *item,
 
401		const char *buf, size_t count)
402{
403	ulong id;
404	int ret;
405
406	ret = kstrtoul(buf, 0, &id);
407	if (ret)
408		return ret;
409
410	spear_dbi_write_reg(to_target(item), PCI_VENDOR_ID, 2, id);
411
412	return count;
413}
414
415static ssize_t pcie_gadget_device_id_show(struct config_item *item, char *buf)
 
 
416{
417	u32 id;
418
419	spear_dbi_read_reg(to_target(item), PCI_DEVICE_ID, 2, &id);
420
421	return sprintf(buf, "%x", id);
422}
423
424static ssize_t pcie_gadget_device_id_store(struct config_item *item,
 
425		const char *buf, size_t count)
426{
427	ulong id;
428	int ret;
429
430	ret = kstrtoul(buf, 0, &id);
431	if (ret)
432		return ret;
433
434	spear_dbi_write_reg(to_target(item), PCI_DEVICE_ID, 2, id);
435
436	return count;
437}
438
439static ssize_t pcie_gadget_bar0_size_show(struct config_item *item, char *buf)
 
 
440{
441	return sprintf(buf, "%lx", to_target(item)->bar0_size);
442}
443
444static ssize_t pcie_gadget_bar0_size_store(struct config_item *item,
 
445		const char *buf, size_t count)
446{
447	struct spear_pcie_gadget_config *config = to_target(item)
448	ulong size;
449	u32 pos, pos1;
450	u32 no_of_bit = 0;
451	int ret;
452
453	ret = kstrtoul(buf, 0, &size);
454	if (ret)
455		return ret;
456
 
 
457	/* min bar size is 256 */
458	if (size <= 0x100)
459		size = 0x100;
460	/* max bar size is 1MB*/
461	else if (size >= 0x100000)
462		size = 0x100000;
463	else {
464		pos = 0;
465		pos1 = 0;
466		while (pos < 21) {
467			pos = find_next_bit((ulong *)&size, 21, pos);
468			if (pos != 21)
469				pos1 = pos + 1;
470			pos++;
471			no_of_bit++;
472		}
473		if (no_of_bit == 2)
474			pos1--;
475
476		size = 1 << pos1;
477	}
478	config->bar0_size = size;
479	spear_dbi_write_reg(config, PCIE_BAR0_MASK_REG, 4, size - 1);
480
481	return count;
482}
483
484static ssize_t pcie_gadget_bar0_address_show(struct config_item *item,
 
485		char *buf)
486{
487	struct pcie_app_reg __iomem *app_reg = to_target(item)->va_app_base;
488
489	u32 address = readl(&app_reg->pim0_mem_addr_start);
490
491	return sprintf(buf, "%x", address);
492}
493
494static ssize_t pcie_gadget_bar0_address_store(struct config_item *item,
 
495		const char *buf, size_t count)
496{
497	struct spear_pcie_gadget_config *config = to_target(item)
498	struct pcie_app_reg __iomem *app_reg = config->va_app_base;
499	ulong address;
500	int ret;
501
502	ret = kstrtoul(buf, 0, &address);
503	if (ret)
504		return ret;
505
506	address &= ~(config->bar0_size - 1);
507	if (config->va_bar0_address)
508		iounmap(config->va_bar0_address);
509	config->va_bar0_address = ioremap(address, config->bar0_size);
510	if (!config->va_bar0_address)
511		return -ENOMEM;
512
513	writel(address, &app_reg->pim0_mem_addr_start);
514
515	return count;
516}
517
518static ssize_t pcie_gadget_bar0_rw_offset_show(struct config_item *item,
 
519		char *buf)
520{
521	return sprintf(buf, "%lx", to_target(item)->bar0_rw_offset);
522}
523
524static ssize_t pcie_gadget_bar0_rw_offset_store(struct config_item *item,
 
525		const char *buf, size_t count)
526{
527	ulong offset;
528	int ret;
529
530	ret = kstrtoul(buf, 0, &offset);
531	if (ret)
532		return ret;
533
534	if (offset % 4)
535		return -EINVAL;
536
537	to_target(item)->bar0_rw_offset = offset;
538
539	return count;
540}
541
542static ssize_t pcie_gadget_bar0_data_show(struct config_item *item, char *buf)
 
 
543{
544	struct spear_pcie_gadget_config *config = to_target(item)
545	ulong data;
546
547	if (!config->va_bar0_address)
548		return -ENOMEM;
549
550	data = readl((ulong)config->va_bar0_address + config->bar0_rw_offset);
551
552	return sprintf(buf, "%lx", data);
553}
554
555static ssize_t pcie_gadget_bar0_data_store(struct config_item *item,
 
556		const char *buf, size_t count)
557{
558	struct spear_pcie_gadget_config *config = to_target(item)
559	ulong data;
560	int ret;
561
562	ret = kstrtoul(buf, 0, &data);
563	if (ret)
564		return ret;
565
566	if (!config->va_bar0_address)
567		return -ENOMEM;
568
569	writel(data, (ulong)config->va_bar0_address + config->bar0_rw_offset);
570
571	return count;
572}
573
574CONFIGFS_ATTR(pcie_gadget_, link);
575CONFIGFS_ATTR(pcie_gadget_, int_type);
576CONFIGFS_ATTR(pcie_gadget_, no_of_msi);
577CONFIGFS_ATTR_WO(pcie_gadget_, inta);
578CONFIGFS_ATTR_WO(pcie_gadget_, send_msi);
579CONFIGFS_ATTR(pcie_gadget_, vendor_id);
580CONFIGFS_ATTR(pcie_gadget_, device_id);
581CONFIGFS_ATTR(pcie_gadget_, bar0_size);
582CONFIGFS_ATTR(pcie_gadget_, bar0_address);
583CONFIGFS_ATTR(pcie_gadget_, bar0_rw_offset);
584CONFIGFS_ATTR(pcie_gadget_, bar0_data);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
585
586static struct configfs_attribute *pcie_gadget_target_attrs[] = {
587	&pcie_gadget_attr_link,
588	&pcie_gadget_attr_int_type,
589	&pcie_gadget_attr_no_of_msi,
590	&pcie_gadget_attr_inta,
591	&pcie_gadget_attr_send_msi,
592	&pcie_gadget_attr_vendor_id,
593	&pcie_gadget_attr_device_id,
594	&pcie_gadget_attr_bar0_size,
595	&pcie_gadget_attr_bar0_address,
596	&pcie_gadget_attr_bar0_rw_offset,
597	&pcie_gadget_attr_bar0_data,
598	NULL,
599};
600
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
601static struct config_item_type pcie_gadget_target_type = {
602	.ct_attrs		= pcie_gadget_target_attrs,
 
603	.ct_owner		= THIS_MODULE,
604};
605
606static void spear13xx_pcie_device_init(struct spear_pcie_gadget_config *config)
607{
608	struct pcie_app_reg __iomem *app_reg = config->va_app_base;
609
610	/*setup registers for outbound translation */
611
612	writel(config->base, &app_reg->in0_mem_addr_start);
613	writel(app_reg->in0_mem_addr_start + IN0_MEM_SIZE,
614			&app_reg->in0_mem_addr_limit);
615	writel(app_reg->in0_mem_addr_limit + 1, &app_reg->in1_mem_addr_start);
616	writel(app_reg->in1_mem_addr_start + IN1_MEM_SIZE,
617			&app_reg->in1_mem_addr_limit);
618	writel(app_reg->in1_mem_addr_limit + 1, &app_reg->in_io_addr_start);
619	writel(app_reg->in_io_addr_start + IN_IO_SIZE,
620			&app_reg->in_io_addr_limit);
621	writel(app_reg->in_io_addr_limit + 1, &app_reg->in_cfg0_addr_start);
622	writel(app_reg->in_cfg0_addr_start + IN_CFG0_SIZE,
623			&app_reg->in_cfg0_addr_limit);
624	writel(app_reg->in_cfg0_addr_limit + 1, &app_reg->in_cfg1_addr_start);
625	writel(app_reg->in_cfg1_addr_start + IN_CFG1_SIZE,
626			&app_reg->in_cfg1_addr_limit);
627	writel(app_reg->in_cfg1_addr_limit + 1, &app_reg->in_msg_addr_start);
628	writel(app_reg->in_msg_addr_start + IN_MSG_SIZE,
629			&app_reg->in_msg_addr_limit);
630
631	writel(app_reg->in0_mem_addr_start, &app_reg->pom0_mem_addr_start);
632	writel(app_reg->in1_mem_addr_start, &app_reg->pom1_mem_addr_start);
633	writel(app_reg->in_io_addr_start, &app_reg->pom_io_addr_start);
634
635	/*setup registers for inbound translation */
636
637	/* Keep AORAM mapped at BAR0 as default */
638	config->bar0_size = INBOUND_ADDR_MASK + 1;
639	spear_dbi_write_reg(config, PCIE_BAR0_MASK_REG, 4, INBOUND_ADDR_MASK);
640	spear_dbi_write_reg(config, PCI_BASE_ADDRESS_0, 4, 0xC);
641	config->va_bar0_address = ioremap(SPEAR13XX_SYSRAM1_BASE,
642			config->bar0_size);
643
644	writel(SPEAR13XX_SYSRAM1_BASE, &app_reg->pim0_mem_addr_start);
645	writel(0, &app_reg->pim1_mem_addr_start);
646	writel(INBOUND_ADDR_MASK + 1, &app_reg->mem0_addr_offset_limit);
647
648	writel(0x0, &app_reg->pim_io_addr_start);
649	writel(0x0, &app_reg->pim_io_addr_start);
650	writel(0x0, &app_reg->pim_rom_addr_start);
651
652	writel(DEVICE_TYPE_EP | (1 << MISCTRL_EN_ID)
653			| ((u32)1 << REG_TRANSLATION_ENABLE),
654			&app_reg->app_ctrl_0);
655	/* disable all rx interrupts */
656	writel(0, &app_reg->int_mask);
657
658	/* Select INTA as default*/
659	spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 1);
660}
661
662static int spear_pcie_gadget_probe(struct platform_device *pdev)
663{
664	struct resource *res0, *res1;
665	unsigned int status = 0;
666	int irq;
667	struct clk *clk;
668	static struct pcie_gadget_target *target;
669	struct spear_pcie_gadget_config *config;
670	struct config_item		*cg_item;
671	struct configfs_subsystem *subsys;
672
673	target = devm_kzalloc(&pdev->dev, sizeof(*target), GFP_KERNEL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
674	if (!target) {
675		dev_err(&pdev->dev, "out of memory\n");
676		return -ENOMEM;
 
677	}
678
679	cg_item = &target->subsys.su_group.cg_item;
680	sprintf(cg_item->ci_namebuf, "pcie_gadget.%d", pdev->id);
681	cg_item->ci_type	= &pcie_gadget_target_type;
682	config = &target->config;
683
684	/* get resource for application registers*/
685	res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
686	config->va_app_base = devm_ioremap_resource(&pdev->dev, res0);
687	if (IS_ERR(config->va_app_base)) {
688		dev_err(&pdev->dev, "ioremap fail\n");
689		return PTR_ERR(config->va_app_base);
 
690	}
691
692	/* get resource for dbi registers*/
693	res1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
694	config->base = (void __iomem *)res1->start;
695
696	config->va_dbi_base = devm_ioremap_resource(&pdev->dev, res1);
697	if (IS_ERR(config->va_dbi_base)) {
 
698		dev_err(&pdev->dev, "ioremap fail\n");
699		return PTR_ERR(config->va_dbi_base);
 
700	}
701
702	platform_set_drvdata(pdev, target);
703
704	irq = platform_get_irq(pdev, 0);
705	if (irq < 0) {
706		dev_err(&pdev->dev, "no update irq?\n");
707		return irq;
 
708	}
709
710	status = devm_request_irq(&pdev->dev, irq, spear_pcie_gadget_irq,
711				  0, pdev->name, NULL);
712	if (status) {
713		dev_err(&pdev->dev,
714			"pcie gadget interrupt IRQ%d already claimed\n", irq);
715		return status;
716	}
717
718	/* Register configfs hooks */
719	subsys = &target->subsys;
720	config_group_init(&subsys->su_group);
721	mutex_init(&subsys->su_mutex);
722	status = configfs_register_subsystem(subsys);
723	if (status)
724		return status;
725
726	/*
727	 * init basic pcie application registers
728	 * do not enable clock if it is PCIE0.Ideally , all controller should
729	 * have been independent from others with respect to clock. But PCIE1
730	 * and 2 depends on PCIE0.So PCIE0 clk is provided during board init.
731	 */
732	if (pdev->id == 1) {
733		/*
734		 * Ideally CFG Clock should have been also enabled here. But
735		 * it is done currently during board init routne
736		 */
737		clk = clk_get_sys("pcie1", NULL);
738		if (IS_ERR(clk)) {
739			pr_err("%s:couldn't get clk for pcie1\n", __func__);
740			return PTR_ERR(clk);
741		}
742		status = clk_enable(clk);
743		if (status) {
744			pr_err("%s:couldn't enable clk for pcie1\n", __func__);
745			return status;
746		}
747	} else if (pdev->id == 2) {
748		/*
749		 * Ideally CFG Clock should have been also enabled here. But
750		 * it is done currently during board init routne
751		 */
752		clk = clk_get_sys("pcie2", NULL);
753		if (IS_ERR(clk)) {
754			pr_err("%s:couldn't get clk for pcie2\n", __func__);
755			return PTR_ERR(clk);
756		}
757		status = clk_enable(clk);
758		if (status) {
759			pr_err("%s:couldn't enable clk for pcie2\n", __func__);
760			return status;
761		}
762	}
763	spear13xx_pcie_device_init(config);
764
765	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
766}
767
768static int spear_pcie_gadget_remove(struct platform_device *pdev)
769{
 
770	static struct pcie_gadget_target *target;
 
 
771
772	target = platform_get_drvdata(pdev);
 
 
 
 
773
 
 
 
 
 
774	configfs_unregister_subsystem(&target->subsys);
 
775
776	return 0;
777}
778
779static void spear_pcie_gadget_shutdown(struct platform_device *pdev)
780{
781}
782
783static struct platform_driver spear_pcie_gadget_driver = {
784	.probe = spear_pcie_gadget_probe,
785	.remove = spear_pcie_gadget_remove,
786	.shutdown = spear_pcie_gadget_shutdown,
787	.driver = {
788		.name = "pcie-gadget-spear",
789		.bus = &platform_bus_type
790	},
791};
792
793module_platform_driver(spear_pcie_gadget_driver);
 
 
 
 
 
 
 
 
 
 
794
795MODULE_ALIAS("platform:pcie-gadget-spear");
796MODULE_AUTHOR("Pratyush Anand");
797MODULE_LICENSE("GPL");