Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Copyright (C) 2005-2007  Kristian Hoegsberg <krh@bitplanet.net>
  4 */
  5
  6#include <linux/bug.h>
  7#include <linux/completion.h>
  8#include <linux/crc-itu-t.h>
  9#include <linux/device.h>
 10#include <linux/errno.h>
 11#include <linux/firewire.h>
 12#include <linux/firewire-constants.h>
 13#include <linux/jiffies.h>
 14#include <linux/kernel.h>
 15#include <linux/kref.h>
 16#include <linux/list.h>
 17#include <linux/module.h>
 18#include <linux/mutex.h>
 19#include <linux/spinlock.h>
 20#include <linux/workqueue.h>
 21
 22#include <linux/atomic.h>
 23#include <asm/byteorder.h>
 24
 25#include "core.h"
 
 26
 27#define define_fw_printk_level(func, kern_level)		\
 28void func(const struct fw_card *card, const char *fmt, ...)	\
 29{								\
 30	struct va_format vaf;					\
 31	va_list args;						\
 32								\
 33	va_start(args, fmt);					\
 34	vaf.fmt = fmt;						\
 35	vaf.va = &args;						\
 36	printk(kern_level KBUILD_MODNAME " %s: %pV",		\
 37	       dev_name(card->device), &vaf);			\
 38	va_end(args);						\
 39}
 40define_fw_printk_level(fw_err, KERN_ERR);
 41define_fw_printk_level(fw_notice, KERN_NOTICE);
 42
 43int fw_compute_block_crc(__be32 *block)
 44{
 45	int length;
 46	u16 crc;
 47
 48	length = (be32_to_cpu(block[0]) >> 16) & 0xff;
 49	crc = crc_itu_t(0, (u8 *)&block[1], length * 4);
 50	*block |= cpu_to_be32(crc);
 51
 52	return length;
 53}
 54
 55static DEFINE_MUTEX(card_mutex);
 56static LIST_HEAD(card_list);
 57
 58static LIST_HEAD(descriptor_list);
 59static int descriptor_count;
 60
 61static __be32 tmp_config_rom[256];
 62/* ROM header, bus info block, root dir header, capabilities = 7 quadlets */
 63static size_t config_rom_length = 1 + 4 + 1 + 1;
 64
 65#define BIB_CRC(v)		((v) <<  0)
 66#define BIB_CRC_LENGTH(v)	((v) << 16)
 67#define BIB_INFO_LENGTH(v)	((v) << 24)
 68#define BIB_BUS_NAME		0x31333934 /* "1394" */
 69#define BIB_LINK_SPEED(v)	((v) <<  0)
 70#define BIB_GENERATION(v)	((v) <<  4)
 71#define BIB_MAX_ROM(v)		((v) <<  8)
 72#define BIB_MAX_RECEIVE(v)	((v) << 12)
 73#define BIB_CYC_CLK_ACC(v)	((v) << 16)
 74#define BIB_PMC			((1) << 27)
 75#define BIB_BMC			((1) << 28)
 76#define BIB_ISC			((1) << 29)
 77#define BIB_CMC			((1) << 30)
 78#define BIB_IRMC		((1) << 31)
 79#define NODE_CAPABILITIES	0x0c0083c0 /* per IEEE 1394 clause 8.3.2.6.5.2 */
 80
 81/*
 82 * IEEE-1394 specifies a default SPLIT_TIMEOUT value of 800 cycles (100 ms),
 83 * but we have to make it longer because there are many devices whose firmware
 84 * is just too slow for that.
 85 */
 86#define DEFAULT_SPLIT_TIMEOUT	(2 * 8000)
 87
 88#define CANON_OUI		0x000085
 89
 90static void generate_config_rom(struct fw_card *card, __be32 *config_rom)
 91{
 92	struct fw_descriptor *desc;
 93	int i, j, k, length;
 94
 95	/*
 96	 * Initialize contents of config rom buffer.  On the OHCI
 97	 * controller, block reads to the config rom accesses the host
 98	 * memory, but quadlet read access the hardware bus info block
 99	 * registers.  That's just crack, but it means we should make
100	 * sure the contents of bus info block in host memory matches
101	 * the version stored in the OHCI registers.
102	 */
103
104	config_rom[0] = cpu_to_be32(
105		BIB_CRC_LENGTH(4) | BIB_INFO_LENGTH(4) | BIB_CRC(0));
106	config_rom[1] = cpu_to_be32(BIB_BUS_NAME);
107	config_rom[2] = cpu_to_be32(
108		BIB_LINK_SPEED(card->link_speed) |
109		BIB_GENERATION(card->config_rom_generation++ % 14 + 2) |
110		BIB_MAX_ROM(2) |
111		BIB_MAX_RECEIVE(card->max_receive) |
112		BIB_BMC | BIB_ISC | BIB_CMC | BIB_IRMC);
113	config_rom[3] = cpu_to_be32(card->guid >> 32);
114	config_rom[4] = cpu_to_be32(card->guid);
115
116	/* Generate root directory. */
117	config_rom[6] = cpu_to_be32(NODE_CAPABILITIES);
118	i = 7;
119	j = 7 + descriptor_count;
120
121	/* Generate root directory entries for descriptors. */
122	list_for_each_entry (desc, &descriptor_list, link) {
123		if (desc->immediate > 0)
124			config_rom[i++] = cpu_to_be32(desc->immediate);
125		config_rom[i] = cpu_to_be32(desc->key | (j - i));
126		i++;
127		j += desc->length;
128	}
129
130	/* Update root directory length. */
131	config_rom[5] = cpu_to_be32((i - 5 - 1) << 16);
132
133	/* End of root directory, now copy in descriptors. */
134	list_for_each_entry (desc, &descriptor_list, link) {
135		for (k = 0; k < desc->length; k++)
136			config_rom[i + k] = cpu_to_be32(desc->data[k]);
137		i += desc->length;
138	}
139
140	/* Calculate CRCs for all blocks in the config rom.  This
141	 * assumes that CRC length and info length are identical for
142	 * the bus info block, which is always the case for this
143	 * implementation. */
144	for (i = 0; i < j; i += length + 1)
145		length = fw_compute_block_crc(config_rom + i);
146
147	WARN_ON(j != config_rom_length);
148}
149
150static void update_config_roms(void)
151{
152	struct fw_card *card;
153
154	list_for_each_entry (card, &card_list, link) {
155		generate_config_rom(card, tmp_config_rom);
156		card->driver->set_config_rom(card, tmp_config_rom,
157					     config_rom_length);
158	}
159}
160
161static size_t required_space(struct fw_descriptor *desc)
162{
163	/* descriptor + entry into root dir + optional immediate entry */
164	return desc->length + 1 + (desc->immediate > 0 ? 1 : 0);
165}
166
167int fw_core_add_descriptor(struct fw_descriptor *desc)
168{
169	size_t i;
170	int ret;
171
172	/*
173	 * Check descriptor is valid; the length of all blocks in the
174	 * descriptor has to add up to exactly the length of the
175	 * block.
176	 */
177	i = 0;
178	while (i < desc->length)
179		i += (desc->data[i] >> 16) + 1;
180
181	if (i != desc->length)
182		return -EINVAL;
183
184	mutex_lock(&card_mutex);
185
186	if (config_rom_length + required_space(desc) > 256) {
187		ret = -EBUSY;
188	} else {
189		list_add_tail(&desc->link, &descriptor_list);
190		config_rom_length += required_space(desc);
191		descriptor_count++;
192		if (desc->immediate > 0)
193			descriptor_count++;
194		update_config_roms();
195		ret = 0;
196	}
197
198	mutex_unlock(&card_mutex);
 
 
 
 
 
199
200	return ret;
201}
202EXPORT_SYMBOL(fw_core_add_descriptor);
203
204void fw_core_remove_descriptor(struct fw_descriptor *desc)
205{
206	mutex_lock(&card_mutex);
207
208	list_del(&desc->link);
209	config_rom_length -= required_space(desc);
210	descriptor_count--;
211	if (desc->immediate > 0)
212		descriptor_count--;
213	update_config_roms();
214
215	mutex_unlock(&card_mutex);
216}
217EXPORT_SYMBOL(fw_core_remove_descriptor);
218
219static int reset_bus(struct fw_card *card, bool short_reset)
220{
221	int reg = short_reset ? 5 : 1;
222	int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET;
223
 
 
224	return card->driver->update_phy_reg(card, reg, 0, bit);
225}
226
227void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset)
228{
 
 
229	/* We don't try hard to sort out requests of long vs. short resets. */
230	card->br_short = short_reset;
231
232	/* Use an arbitrary short delay to combine multiple reset requests. */
233	fw_card_get(card);
234	if (!queue_delayed_work(fw_workqueue, &card->br_work,
235				delayed ? DIV_ROUND_UP(HZ, 100) : 0))
236		fw_card_put(card);
237}
238EXPORT_SYMBOL(fw_schedule_bus_reset);
239
240static void br_work(struct work_struct *work)
241{
242	struct fw_card *card = container_of(work, struct fw_card, br_work.work);
243
244	/* Delay for 2s after last reset per IEEE 1394 clause 8.2.1. */
245	if (card->reset_jiffies != 0 &&
246	    time_before64(get_jiffies_64(), card->reset_jiffies + 2 * HZ)) {
 
 
247		if (!queue_delayed_work(fw_workqueue, &card->br_work, 2 * HZ))
248			fw_card_put(card);
249		return;
250	}
251
252	fw_send_phy_config(card, FW_PHY_CONFIG_NO_NODE_ID, card->generation,
253			   FW_PHY_CONFIG_CURRENT_GAP_COUNT);
254	reset_bus(card, card->br_short);
255	fw_card_put(card);
256}
257
258static void allocate_broadcast_channel(struct fw_card *card, int generation)
259{
260	int channel, bandwidth = 0;
261
262	if (!card->broadcast_channel_allocated) {
263		fw_iso_resource_manage(card, generation, 1ULL << 31,
264				       &channel, &bandwidth, true);
265		if (channel != 31) {
266			fw_notice(card, "failed to allocate broadcast channel\n");
267			return;
268		}
269		card->broadcast_channel_allocated = true;
270	}
271
272	device_for_each_child(card->device, (void *)(long)generation,
273			      fw_device_set_broadcast_channel);
274}
275
276static const char gap_count_table[] = {
277	63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
278};
279
280void fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
281{
282	fw_card_get(card);
283	if (!schedule_delayed_work(&card->bm_work, delay))
284		fw_card_put(card);
285}
286
287static void bm_work(struct work_struct *work)
288{
289	struct fw_card *card = container_of(work, struct fw_card, bm_work.work);
290	struct fw_device *root_device, *irm_device;
291	struct fw_node *root_node;
292	int root_id, new_root_id, irm_id, bm_id, local_id;
293	int gap_count, generation, grace, rcode;
294	bool do_reset = false;
295	bool root_device_is_running;
296	bool root_device_is_cmc;
297	bool irm_is_1394_1995_only;
298	bool keep_this_irm;
299	__be32 transaction_data[2];
300
301	spin_lock_irq(&card->lock);
302
303	if (card->local_node == NULL) {
304		spin_unlock_irq(&card->lock);
305		goto out_put_card;
306	}
307
308	generation = card->generation;
309
310	root_node = card->root_node;
311	fw_node_get(root_node);
312	root_device = root_node->data;
313	root_device_is_running = root_device &&
314			atomic_read(&root_device->state) == FW_DEVICE_RUNNING;
315	root_device_is_cmc = root_device && root_device->cmc;
316
317	irm_device = card->irm_node->data;
318	irm_is_1394_1995_only = irm_device && irm_device->config_rom &&
319			(irm_device->config_rom[2] & 0x000000f0) == 0;
320
321	/* Canon MV5i works unreliably if it is not root node. */
322	keep_this_irm = irm_device && irm_device->config_rom &&
323			irm_device->config_rom[3] >> 8 == CANON_OUI;
324
325	root_id  = root_node->node_id;
326	irm_id   = card->irm_node->node_id;
327	local_id = card->local_node->node_id;
328
329	grace = time_after64(get_jiffies_64(),
330			     card->reset_jiffies + DIV_ROUND_UP(HZ, 8));
331
332	if ((is_next_generation(generation, card->bm_generation) &&
333	     !card->bm_abdicate) ||
334	    (card->bm_generation != generation && grace)) {
335		/*
336		 * This first step is to figure out who is IRM and
337		 * then try to become bus manager.  If the IRM is not
338		 * well defined (e.g. does not have an active link
339		 * layer or does not responds to our lock request, we
340		 * will have to do a little vigilante bus management.
341		 * In that case, we do a goto into the gap count logic
342		 * so that when we do the reset, we still optimize the
343		 * gap count.  That could well save a reset in the
344		 * next generation.
345		 */
346
347		if (!card->irm_node->link_on) {
348			new_root_id = local_id;
349			fw_notice(card, "%s, making local node (%02x) root\n",
350				  "IRM has link off", new_root_id);
351			goto pick_me;
352		}
353
354		if (irm_is_1394_1995_only && !keep_this_irm) {
355			new_root_id = local_id;
356			fw_notice(card, "%s, making local node (%02x) root\n",
357				  "IRM is not 1394a compliant", new_root_id);
358			goto pick_me;
359		}
360
361		transaction_data[0] = cpu_to_be32(0x3f);
362		transaction_data[1] = cpu_to_be32(local_id);
363
364		spin_unlock_irq(&card->lock);
365
366		rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
367				irm_id, generation, SCODE_100,
368				CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID,
369				transaction_data, 8);
370
371		if (rcode == RCODE_GENERATION)
372			/* Another bus reset, BM work has been rescheduled. */
373			goto out;
374
375		bm_id = be32_to_cpu(transaction_data[0]);
376
377		spin_lock_irq(&card->lock);
378		if (rcode == RCODE_COMPLETE && generation == card->generation)
379			card->bm_node_id =
380			    bm_id == 0x3f ? local_id : 0xffc0 | bm_id;
381		spin_unlock_irq(&card->lock);
382
383		if (rcode == RCODE_COMPLETE && bm_id != 0x3f) {
384			/* Somebody else is BM.  Only act as IRM. */
385			if (local_id == irm_id)
386				allocate_broadcast_channel(card, generation);
387
388			goto out;
389		}
390
391		if (rcode == RCODE_SEND_ERROR) {
392			/*
393			 * We have been unable to send the lock request due to
394			 * some local problem.  Let's try again later and hope
395			 * that the problem has gone away by then.
396			 */
397			fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8));
398			goto out;
399		}
400
401		spin_lock_irq(&card->lock);
402
403		if (rcode != RCODE_COMPLETE && !keep_this_irm) {
404			/*
405			 * The lock request failed, maybe the IRM
406			 * isn't really IRM capable after all. Let's
407			 * do a bus reset and pick the local node as
408			 * root, and thus, IRM.
409			 */
410			new_root_id = local_id;
411			fw_notice(card, "BM lock failed (%s), making local node (%02x) root\n",
412				  fw_rcode_string(rcode), new_root_id);
413			goto pick_me;
414		}
415	} else if (card->bm_generation != generation) {
416		/*
417		 * We weren't BM in the last generation, and the last
418		 * bus reset is less than 125ms ago.  Reschedule this job.
419		 */
420		spin_unlock_irq(&card->lock);
421		fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8));
422		goto out;
423	}
424
425	/*
426	 * We're bus manager for this generation, so next step is to
427	 * make sure we have an active cycle master and do gap count
428	 * optimization.
429	 */
430	card->bm_generation = generation;
431
432	if (root_device == NULL) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
433		/*
434		 * Either link_on is false, or we failed to read the
435		 * config rom.  In either case, pick another root.
436		 */
437		new_root_id = local_id;
438	} else if (!root_device_is_running) {
439		/*
440		 * If we haven't probed this device yet, bail out now
441		 * and let's try again once that's done.
442		 */
443		spin_unlock_irq(&card->lock);
444		goto out;
445	} else if (root_device_is_cmc) {
446		/*
447		 * We will send out a force root packet for this
448		 * node as part of the gap count optimization.
449		 */
450		new_root_id = root_id;
451	} else {
452		/*
453		 * Current root has an active link layer and we
454		 * successfully read the config rom, but it's not
455		 * cycle master capable.
456		 */
457		new_root_id = local_id;
458	}
459
460 pick_me:
461	/*
462	 * Pick a gap count from 1394a table E-1.  The table doesn't cover
463	 * the typically much larger 1394b beta repeater delays though.
464	 */
465	if (!card->beta_repeaters_present &&
466	    root_node->max_hops < ARRAY_SIZE(gap_count_table))
467		gap_count = gap_count_table[root_node->max_hops];
468	else
469		gap_count = 63;
470
471	/*
472	 * Finally, figure out if we should do a reset or not.  If we have
473	 * done less than 5 resets with the same physical topology and we
474	 * have either a new root or a new gap count setting, let's do it.
475	 */
476
477	if (card->bm_retries++ < 5 &&
478	    (card->gap_count != gap_count || new_root_id != root_id))
479		do_reset = true;
480
481	spin_unlock_irq(&card->lock);
482
483	if (do_reset) {
484		fw_notice(card, "phy config: new root=%x, gap_count=%d\n",
485			  new_root_id, gap_count);
486		fw_send_phy_config(card, new_root_id, generation, gap_count);
487		reset_bus(card, true);
 
 
 
 
 
 
 
 
 
 
 
 
488		/* Will allocate broadcast channel after the reset. */
489		goto out;
490	}
491
492	if (root_device_is_cmc) {
493		/*
494		 * Make sure that the cycle master sends cycle start packets.
495		 */
496		transaction_data[0] = cpu_to_be32(CSR_STATE_BIT_CMSTR);
497		rcode = fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST,
498				root_id, generation, SCODE_100,
499				CSR_REGISTER_BASE + CSR_STATE_SET,
500				transaction_data, 4);
501		if (rcode == RCODE_GENERATION)
502			goto out;
503	}
504
505	if (local_id == irm_id)
506		allocate_broadcast_channel(card, generation);
507
508 out:
509	fw_node_put(root_node);
510 out_put_card:
511	fw_card_put(card);
512}
513
514void fw_card_initialize(struct fw_card *card,
515			const struct fw_card_driver *driver,
516			struct device *device)
517{
518	static atomic_t index = ATOMIC_INIT(-1);
519
520	card->index = atomic_inc_return(&index);
521	card->driver = driver;
522	card->device = device;
523	card->current_tlabel = 0;
524	card->tlabel_mask = 0;
525	card->split_timeout_hi = DEFAULT_SPLIT_TIMEOUT / 8000;
526	card->split_timeout_lo = (DEFAULT_SPLIT_TIMEOUT % 8000) << 19;
527	card->split_timeout_cycles = DEFAULT_SPLIT_TIMEOUT;
528	card->split_timeout_jiffies =
529			DIV_ROUND_UP(DEFAULT_SPLIT_TIMEOUT * HZ, 8000);
530	card->color = 0;
531	card->broadcast_channel = BROADCAST_CHANNEL_INITIAL;
532
533	kref_init(&card->kref);
534	init_completion(&card->done);
535	INIT_LIST_HEAD(&card->transaction_list);
536	INIT_LIST_HEAD(&card->phy_receiver_list);
537	spin_lock_init(&card->lock);
538
539	card->local_node = NULL;
540
541	INIT_DELAYED_WORK(&card->br_work, br_work);
542	INIT_DELAYED_WORK(&card->bm_work, bm_work);
543}
544EXPORT_SYMBOL(fw_card_initialize);
545
546int fw_card_add(struct fw_card *card,
547		u32 max_receive, u32 link_speed, u64 guid)
548{
 
549	int ret;
550
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
551	card->max_receive = max_receive;
552	card->link_speed = link_speed;
553	card->guid = guid;
554
555	mutex_lock(&card_mutex);
556
557	generate_config_rom(card, tmp_config_rom);
558	ret = card->driver->enable(card, tmp_config_rom, config_rom_length);
559	if (ret == 0)
560		list_add_tail(&card->link, &card_list);
 
 
561
562	mutex_unlock(&card_mutex);
 
563
564	return ret;
565}
566EXPORT_SYMBOL(fw_card_add);
567
568/*
569 * The next few functions implement a dummy driver that is used once a card
570 * driver shuts down an fw_card.  This allows the driver to cleanly unload,
571 * as all IO to the card will be handled (and failed) by the dummy driver
572 * instead of calling into the module.  Only functions for iso context
573 * shutdown still need to be provided by the card driver.
574 *
575 * .read/write_csr() should never be called anymore after the dummy driver
576 * was bound since they are only used within request handler context.
577 * .set_config_rom() is never called since the card is taken out of card_list
578 * before switching to the dummy driver.
579 */
580
581static int dummy_read_phy_reg(struct fw_card *card, int address)
582{
583	return -ENODEV;
584}
585
586static int dummy_update_phy_reg(struct fw_card *card, int address,
587				int clear_bits, int set_bits)
588{
589	return -ENODEV;
590}
591
592static void dummy_send_request(struct fw_card *card, struct fw_packet *packet)
593{
594	packet->callback(packet, card, RCODE_CANCELLED);
595}
596
597static void dummy_send_response(struct fw_card *card, struct fw_packet *packet)
598{
599	packet->callback(packet, card, RCODE_CANCELLED);
600}
601
602static int dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet)
603{
604	return -ENOENT;
605}
606
607static int dummy_enable_phys_dma(struct fw_card *card,
608				 int node_id, int generation)
609{
610	return -ENODEV;
611}
612
613static struct fw_iso_context *dummy_allocate_iso_context(struct fw_card *card,
614				int type, int channel, size_t header_size)
615{
616	return ERR_PTR(-ENODEV);
617}
618
619static u32 dummy_read_csr(struct fw_card *card, int csr_offset)
620{
621	return 0;
622}
623
624static void dummy_write_csr(struct fw_card *card, int csr_offset, u32 value)
625{
626}
627
628static int dummy_start_iso(struct fw_iso_context *ctx,
629			   s32 cycle, u32 sync, u32 tags)
630{
631	return -ENODEV;
632}
633
634static int dummy_set_iso_channels(struct fw_iso_context *ctx, u64 *channels)
635{
636	return -ENODEV;
637}
638
639static int dummy_queue_iso(struct fw_iso_context *ctx, struct fw_iso_packet *p,
640			   struct fw_iso_buffer *buffer, unsigned long payload)
641{
642	return -ENODEV;
643}
644
645static void dummy_flush_queue_iso(struct fw_iso_context *ctx)
646{
647}
648
649static int dummy_flush_iso_completions(struct fw_iso_context *ctx)
650{
651	return -ENODEV;
652}
653
654static const struct fw_card_driver dummy_driver_template = {
655	.read_phy_reg		= dummy_read_phy_reg,
656	.update_phy_reg		= dummy_update_phy_reg,
657	.send_request		= dummy_send_request,
658	.send_response		= dummy_send_response,
659	.cancel_packet		= dummy_cancel_packet,
660	.enable_phys_dma	= dummy_enable_phys_dma,
661	.read_csr		= dummy_read_csr,
662	.write_csr		= dummy_write_csr,
663	.allocate_iso_context	= dummy_allocate_iso_context,
664	.start_iso		= dummy_start_iso,
665	.set_iso_channels	= dummy_set_iso_channels,
666	.queue_iso		= dummy_queue_iso,
667	.flush_queue_iso	= dummy_flush_queue_iso,
668	.flush_iso_completions	= dummy_flush_iso_completions,
669};
670
671void fw_card_release(struct kref *kref)
672{
673	struct fw_card *card = container_of(kref, struct fw_card, kref);
674
675	complete(&card->done);
676}
677EXPORT_SYMBOL_GPL(fw_card_release);
678
679void fw_core_remove_card(struct fw_card *card)
680{
681	struct fw_card_driver dummy_driver = dummy_driver_template;
682	unsigned long flags;
 
683
684	card->driver->update_phy_reg(card, 4,
685				     PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
686	fw_schedule_bus_reset(card, false, true);
687
688	mutex_lock(&card_mutex);
689	list_del_init(&card->link);
690	mutex_unlock(&card_mutex);
691
692	/* Switch off most of the card driver interface. */
693	dummy_driver.free_iso_context	= card->driver->free_iso_context;
694	dummy_driver.stop_iso		= card->driver->stop_iso;
695	card->driver = &dummy_driver;
 
696
697	spin_lock_irqsave(&card->lock, flags);
698	fw_destroy_nodes(card);
699	spin_unlock_irqrestore(&card->lock, flags);
700
701	/* Wait for all users, especially device workqueue jobs, to finish. */
702	fw_card_put(card);
703	wait_for_completion(&card->done);
 
 
704
705	WARN_ON(!list_empty(&card->transaction_list));
706}
707EXPORT_SYMBOL(fw_core_remove_card);
708
709/**
710 * fw_card_read_cycle_time: read from Isochronous Cycle Timer Register of 1394 OHCI in MMIO region
711 *			    for controller card.
712 * @card: The instance of card for 1394 OHCI controller.
713 * @cycle_time: The mutual reference to value of cycle time for the read operation.
714 *
715 * Read value from Isochronous Cycle Timer Register of 1394 OHCI in MMIO region for the given
716 * controller card. This function accesses the region without any lock primitives or IRQ mask.
717 * When returning successfully, the content of @value argument has value aligned to host endianness,
718 * formetted by CYCLE_TIME CSR Register of IEEE 1394 std.
719 *
720 * Context: Any context.
721 * Return:
722 * * 0 - Read successfully.
723 * * -ENODEV - The controller is unavailable due to being removed or unbound.
724 */
725int fw_card_read_cycle_time(struct fw_card *card, u32 *cycle_time)
726{
727	if (card->driver->read_csr == dummy_read_csr)
728		return -ENODEV;
729
730	// It's possible to switch to dummy driver between the above and the below. This is the best
731	// effort to return -ENODEV.
732	*cycle_time = card->driver->read_csr(card, CSR_CYCLE_TIME);
733	return 0;
734}
735EXPORT_SYMBOL_GPL(fw_card_read_cycle_time);
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Copyright (C) 2005-2007  Kristian Hoegsberg <krh@bitplanet.net>
  4 */
  5
  6#include <linux/bug.h>
  7#include <linux/completion.h>
  8#include <linux/crc-itu-t.h>
  9#include <linux/device.h>
 10#include <linux/errno.h>
 11#include <linux/firewire.h>
 12#include <linux/firewire-constants.h>
 13#include <linux/jiffies.h>
 14#include <linux/kernel.h>
 15#include <linux/kref.h>
 16#include <linux/list.h>
 17#include <linux/module.h>
 18#include <linux/mutex.h>
 19#include <linux/spinlock.h>
 20#include <linux/workqueue.h>
 21
 22#include <linux/atomic.h>
 23#include <asm/byteorder.h>
 24
 25#include "core.h"
 26#include <trace/events/firewire.h>
 27
 28#define define_fw_printk_level(func, kern_level)		\
 29void func(const struct fw_card *card, const char *fmt, ...)	\
 30{								\
 31	struct va_format vaf;					\
 32	va_list args;						\
 33								\
 34	va_start(args, fmt);					\
 35	vaf.fmt = fmt;						\
 36	vaf.va = &args;						\
 37	printk(kern_level KBUILD_MODNAME " %s: %pV",		\
 38	       dev_name(card->device), &vaf);			\
 39	va_end(args);						\
 40}
 41define_fw_printk_level(fw_err, KERN_ERR);
 42define_fw_printk_level(fw_notice, KERN_NOTICE);
 43
 44int fw_compute_block_crc(__be32 *block)
 45{
 46	int length;
 47	u16 crc;
 48
 49	length = (be32_to_cpu(block[0]) >> 16) & 0xff;
 50	crc = crc_itu_t(0, (u8 *)&block[1], length * 4);
 51	*block |= cpu_to_be32(crc);
 52
 53	return length;
 54}
 55
 56static DEFINE_MUTEX(card_mutex);
 57static LIST_HEAD(card_list);
 58
 59static LIST_HEAD(descriptor_list);
 60static int descriptor_count;
 61
 62static __be32 tmp_config_rom[256];
 63/* ROM header, bus info block, root dir header, capabilities = 7 quadlets */
 64static size_t config_rom_length = 1 + 4 + 1 + 1;
 65
 66#define BIB_CRC(v)		((v) <<  0)
 67#define BIB_CRC_LENGTH(v)	((v) << 16)
 68#define BIB_INFO_LENGTH(v)	((v) << 24)
 69#define BIB_BUS_NAME		0x31333934 /* "1394" */
 70#define BIB_LINK_SPEED(v)	((v) <<  0)
 71#define BIB_GENERATION(v)	((v) <<  4)
 72#define BIB_MAX_ROM(v)		((v) <<  8)
 73#define BIB_MAX_RECEIVE(v)	((v) << 12)
 74#define BIB_CYC_CLK_ACC(v)	((v) << 16)
 75#define BIB_PMC			((1) << 27)
 76#define BIB_BMC			((1) << 28)
 77#define BIB_ISC			((1) << 29)
 78#define BIB_CMC			((1) << 30)
 79#define BIB_IRMC		((1) << 31)
 80#define NODE_CAPABILITIES	0x0c0083c0 /* per IEEE 1394 clause 8.3.2.6.5.2 */
 81
 82/*
 83 * IEEE-1394 specifies a default SPLIT_TIMEOUT value of 800 cycles (100 ms),
 84 * but we have to make it longer because there are many devices whose firmware
 85 * is just too slow for that.
 86 */
 87#define DEFAULT_SPLIT_TIMEOUT	(2 * 8000)
 88
 89#define CANON_OUI		0x000085
 90
 91static void generate_config_rom(struct fw_card *card, __be32 *config_rom)
 92{
 93	struct fw_descriptor *desc;
 94	int i, j, k, length;
 95
 96	/*
 97	 * Initialize contents of config rom buffer.  On the OHCI
 98	 * controller, block reads to the config rom accesses the host
 99	 * memory, but quadlet read access the hardware bus info block
100	 * registers.  That's just crack, but it means we should make
101	 * sure the contents of bus info block in host memory matches
102	 * the version stored in the OHCI registers.
103	 */
104
105	config_rom[0] = cpu_to_be32(
106		BIB_CRC_LENGTH(4) | BIB_INFO_LENGTH(4) | BIB_CRC(0));
107	config_rom[1] = cpu_to_be32(BIB_BUS_NAME);
108	config_rom[2] = cpu_to_be32(
109		BIB_LINK_SPEED(card->link_speed) |
110		BIB_GENERATION(card->config_rom_generation++ % 14 + 2) |
111		BIB_MAX_ROM(2) |
112		BIB_MAX_RECEIVE(card->max_receive) |
113		BIB_BMC | BIB_ISC | BIB_CMC | BIB_IRMC);
114	config_rom[3] = cpu_to_be32(card->guid >> 32);
115	config_rom[4] = cpu_to_be32(card->guid);
116
117	/* Generate root directory. */
118	config_rom[6] = cpu_to_be32(NODE_CAPABILITIES);
119	i = 7;
120	j = 7 + descriptor_count;
121
122	/* Generate root directory entries for descriptors. */
123	list_for_each_entry (desc, &descriptor_list, link) {
124		if (desc->immediate > 0)
125			config_rom[i++] = cpu_to_be32(desc->immediate);
126		config_rom[i] = cpu_to_be32(desc->key | (j - i));
127		i++;
128		j += desc->length;
129	}
130
131	/* Update root directory length. */
132	config_rom[5] = cpu_to_be32((i - 5 - 1) << 16);
133
134	/* End of root directory, now copy in descriptors. */
135	list_for_each_entry (desc, &descriptor_list, link) {
136		for (k = 0; k < desc->length; k++)
137			config_rom[i + k] = cpu_to_be32(desc->data[k]);
138		i += desc->length;
139	}
140
141	/* Calculate CRCs for all blocks in the config rom.  This
142	 * assumes that CRC length and info length are identical for
143	 * the bus info block, which is always the case for this
144	 * implementation. */
145	for (i = 0; i < j; i += length + 1)
146		length = fw_compute_block_crc(config_rom + i);
147
148	WARN_ON(j != config_rom_length);
149}
150
151static void update_config_roms(void)
152{
153	struct fw_card *card;
154
155	list_for_each_entry (card, &card_list, link) {
156		generate_config_rom(card, tmp_config_rom);
157		card->driver->set_config_rom(card, tmp_config_rom,
158					     config_rom_length);
159	}
160}
161
162static size_t required_space(struct fw_descriptor *desc)
163{
164	/* descriptor + entry into root dir + optional immediate entry */
165	return desc->length + 1 + (desc->immediate > 0 ? 1 : 0);
166}
167
168int fw_core_add_descriptor(struct fw_descriptor *desc)
169{
170	size_t i;
 
171
172	/*
173	 * Check descriptor is valid; the length of all blocks in the
174	 * descriptor has to add up to exactly the length of the
175	 * block.
176	 */
177	i = 0;
178	while (i < desc->length)
179		i += (desc->data[i] >> 16) + 1;
180
181	if (i != desc->length)
182		return -EINVAL;
183
184	guard(mutex)(&card_mutex);
185
186	if (config_rom_length + required_space(desc) > 256)
187		return -EBUSY;
 
 
 
 
 
 
 
 
 
188
189	list_add_tail(&desc->link, &descriptor_list);
190	config_rom_length += required_space(desc);
191	descriptor_count++;
192	if (desc->immediate > 0)
193		descriptor_count++;
194	update_config_roms();
195
196	return 0;
197}
198EXPORT_SYMBOL(fw_core_add_descriptor);
199
200void fw_core_remove_descriptor(struct fw_descriptor *desc)
201{
202	guard(mutex)(&card_mutex);
203
204	list_del(&desc->link);
205	config_rom_length -= required_space(desc);
206	descriptor_count--;
207	if (desc->immediate > 0)
208		descriptor_count--;
209	update_config_roms();
 
 
210}
211EXPORT_SYMBOL(fw_core_remove_descriptor);
212
213static int reset_bus(struct fw_card *card, bool short_reset)
214{
215	int reg = short_reset ? 5 : 1;
216	int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET;
217
218	trace_bus_reset_initiate(card->index, card->generation, short_reset);
219
220	return card->driver->update_phy_reg(card, reg, 0, bit);
221}
222
223void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset)
224{
225	trace_bus_reset_schedule(card->index, card->generation, short_reset);
226
227	/* We don't try hard to sort out requests of long vs. short resets. */
228	card->br_short = short_reset;
229
230	/* Use an arbitrary short delay to combine multiple reset requests. */
231	fw_card_get(card);
232	if (!queue_delayed_work(fw_workqueue, &card->br_work,
233				delayed ? DIV_ROUND_UP(HZ, 100) : 0))
234		fw_card_put(card);
235}
236EXPORT_SYMBOL(fw_schedule_bus_reset);
237
238static void br_work(struct work_struct *work)
239{
240	struct fw_card *card = container_of(work, struct fw_card, br_work.work);
241
242	/* Delay for 2s after last reset per IEEE 1394 clause 8.2.1. */
243	if (card->reset_jiffies != 0 &&
244	    time_before64(get_jiffies_64(), card->reset_jiffies + 2 * HZ)) {
245		trace_bus_reset_postpone(card->index, card->generation, card->br_short);
246
247		if (!queue_delayed_work(fw_workqueue, &card->br_work, 2 * HZ))
248			fw_card_put(card);
249		return;
250	}
251
252	fw_send_phy_config(card, FW_PHY_CONFIG_NO_NODE_ID, card->generation,
253			   FW_PHY_CONFIG_CURRENT_GAP_COUNT);
254	reset_bus(card, card->br_short);
255	fw_card_put(card);
256}
257
258static void allocate_broadcast_channel(struct fw_card *card, int generation)
259{
260	int channel, bandwidth = 0;
261
262	if (!card->broadcast_channel_allocated) {
263		fw_iso_resource_manage(card, generation, 1ULL << 31,
264				       &channel, &bandwidth, true);
265		if (channel != 31) {
266			fw_notice(card, "failed to allocate broadcast channel\n");
267			return;
268		}
269		card->broadcast_channel_allocated = true;
270	}
271
272	device_for_each_child(card->device, (void *)(long)generation,
273			      fw_device_set_broadcast_channel);
274}
275
276static const char gap_count_table[] = {
277	63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
278};
279
280void fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
281{
282	fw_card_get(card);
283	if (!schedule_delayed_work(&card->bm_work, delay))
284		fw_card_put(card);
285}
286
287static void bm_work(struct work_struct *work)
288{
289	struct fw_card *card = container_of(work, struct fw_card, bm_work.work);
290	struct fw_device *root_device, *irm_device;
291	struct fw_node *root_node;
292	int root_id, new_root_id, irm_id, bm_id, local_id;
293	int gap_count, generation, grace, rcode;
294	bool do_reset = false;
295	bool root_device_is_running;
296	bool root_device_is_cmc;
297	bool irm_is_1394_1995_only;
298	bool keep_this_irm;
299	__be32 transaction_data[2];
300
301	spin_lock_irq(&card->lock);
302
303	if (card->local_node == NULL) {
304		spin_unlock_irq(&card->lock);
305		goto out_put_card;
306	}
307
308	generation = card->generation;
309
310	root_node = card->root_node;
311	fw_node_get(root_node);
312	root_device = root_node->data;
313	root_device_is_running = root_device &&
314			atomic_read(&root_device->state) == FW_DEVICE_RUNNING;
315	root_device_is_cmc = root_device && root_device->cmc;
316
317	irm_device = card->irm_node->data;
318	irm_is_1394_1995_only = irm_device && irm_device->config_rom &&
319			(irm_device->config_rom[2] & 0x000000f0) == 0;
320
321	/* Canon MV5i works unreliably if it is not root node. */
322	keep_this_irm = irm_device && irm_device->config_rom &&
323			irm_device->config_rom[3] >> 8 == CANON_OUI;
324
325	root_id  = root_node->node_id;
326	irm_id   = card->irm_node->node_id;
327	local_id = card->local_node->node_id;
328
329	grace = time_after64(get_jiffies_64(),
330			     card->reset_jiffies + DIV_ROUND_UP(HZ, 8));
331
332	if ((is_next_generation(generation, card->bm_generation) &&
333	     !card->bm_abdicate) ||
334	    (card->bm_generation != generation && grace)) {
335		/*
336		 * This first step is to figure out who is IRM and
337		 * then try to become bus manager.  If the IRM is not
338		 * well defined (e.g. does not have an active link
339		 * layer or does not responds to our lock request, we
340		 * will have to do a little vigilante bus management.
341		 * In that case, we do a goto into the gap count logic
342		 * so that when we do the reset, we still optimize the
343		 * gap count.  That could well save a reset in the
344		 * next generation.
345		 */
346
347		if (!card->irm_node->link_on) {
348			new_root_id = local_id;
349			fw_notice(card, "%s, making local node (%02x) root\n",
350				  "IRM has link off", new_root_id);
351			goto pick_me;
352		}
353
354		if (irm_is_1394_1995_only && !keep_this_irm) {
355			new_root_id = local_id;
356			fw_notice(card, "%s, making local node (%02x) root\n",
357				  "IRM is not 1394a compliant", new_root_id);
358			goto pick_me;
359		}
360
361		transaction_data[0] = cpu_to_be32(0x3f);
362		transaction_data[1] = cpu_to_be32(local_id);
363
364		spin_unlock_irq(&card->lock);
365
366		rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
367				irm_id, generation, SCODE_100,
368				CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID,
369				transaction_data, 8);
370
371		if (rcode == RCODE_GENERATION)
372			/* Another bus reset, BM work has been rescheduled. */
373			goto out;
374
375		bm_id = be32_to_cpu(transaction_data[0]);
376
377		scoped_guard(spinlock_irq, &card->lock) {
378			if (rcode == RCODE_COMPLETE && generation == card->generation)
379				card->bm_node_id =
380				    bm_id == 0x3f ? local_id : 0xffc0 | bm_id;
381		}
382
383		if (rcode == RCODE_COMPLETE && bm_id != 0x3f) {
384			/* Somebody else is BM.  Only act as IRM. */
385			if (local_id == irm_id)
386				allocate_broadcast_channel(card, generation);
387
388			goto out;
389		}
390
391		if (rcode == RCODE_SEND_ERROR) {
392			/*
393			 * We have been unable to send the lock request due to
394			 * some local problem.  Let's try again later and hope
395			 * that the problem has gone away by then.
396			 */
397			fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8));
398			goto out;
399		}
400
401		spin_lock_irq(&card->lock);
402
403		if (rcode != RCODE_COMPLETE && !keep_this_irm) {
404			/*
405			 * The lock request failed, maybe the IRM
406			 * isn't really IRM capable after all. Let's
407			 * do a bus reset and pick the local node as
408			 * root, and thus, IRM.
409			 */
410			new_root_id = local_id;
411			fw_notice(card, "BM lock failed (%s), making local node (%02x) root\n",
412				  fw_rcode_string(rcode), new_root_id);
413			goto pick_me;
414		}
415	} else if (card->bm_generation != generation) {
416		/*
417		 * We weren't BM in the last generation, and the last
418		 * bus reset is less than 125ms ago.  Reschedule this job.
419		 */
420		spin_unlock_irq(&card->lock);
421		fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8));
422		goto out;
423	}
424
425	/*
426	 * We're bus manager for this generation, so next step is to
427	 * make sure we have an active cycle master and do gap count
428	 * optimization.
429	 */
430	card->bm_generation = generation;
431
432	if (card->gap_count == 0) {
433		/*
434		 * If self IDs have inconsistent gap counts, do a
435		 * bus reset ASAP. The config rom read might never
436		 * complete, so don't wait for it. However, still
437		 * send a PHY configuration packet prior to the
438		 * bus reset. The PHY configuration packet might
439		 * fail, but 1394-2008 8.4.5.2 explicitly permits
440		 * it in this case, so it should be safe to try.
441		 */
442		new_root_id = local_id;
443		/*
444		 * We must always send a bus reset if the gap count
445		 * is inconsistent, so bypass the 5-reset limit.
446		 */
447		card->bm_retries = 0;
448	} else if (root_device == NULL) {
449		/*
450		 * Either link_on is false, or we failed to read the
451		 * config rom.  In either case, pick another root.
452		 */
453		new_root_id = local_id;
454	} else if (!root_device_is_running) {
455		/*
456		 * If we haven't probed this device yet, bail out now
457		 * and let's try again once that's done.
458		 */
459		spin_unlock_irq(&card->lock);
460		goto out;
461	} else if (root_device_is_cmc) {
462		/*
463		 * We will send out a force root packet for this
464		 * node as part of the gap count optimization.
465		 */
466		new_root_id = root_id;
467	} else {
468		/*
469		 * Current root has an active link layer and we
470		 * successfully read the config rom, but it's not
471		 * cycle master capable.
472		 */
473		new_root_id = local_id;
474	}
475
476 pick_me:
477	/*
478	 * Pick a gap count from 1394a table E-1.  The table doesn't cover
479	 * the typically much larger 1394b beta repeater delays though.
480	 */
481	if (!card->beta_repeaters_present &&
482	    root_node->max_hops < ARRAY_SIZE(gap_count_table))
483		gap_count = gap_count_table[root_node->max_hops];
484	else
485		gap_count = 63;
486
487	/*
488	 * Finally, figure out if we should do a reset or not.  If we have
489	 * done less than 5 resets with the same physical topology and we
490	 * have either a new root or a new gap count setting, let's do it.
491	 */
492
493	if (card->bm_retries++ < 5 &&
494	    (card->gap_count != gap_count || new_root_id != root_id))
495		do_reset = true;
496
497	spin_unlock_irq(&card->lock);
498
499	if (do_reset) {
500		fw_notice(card, "phy config: new root=%x, gap_count=%d\n",
501			  new_root_id, gap_count);
502		fw_send_phy_config(card, new_root_id, generation, gap_count);
503		/*
504		 * Where possible, use a short bus reset to minimize
505		 * disruption to isochronous transfers. But in the event
506		 * of a gap count inconsistency, use a long bus reset.
507		 *
508		 * As noted in 1394a 8.4.6.2, nodes on a mixed 1394/1394a bus
509		 * may set different gap counts after a bus reset. On a mixed
510		 * 1394/1394a bus, a short bus reset can get doubled. Some
511		 * nodes may treat the double reset as one bus reset and others
512		 * may treat it as two, causing a gap count inconsistency
513		 * again. Using a long bus reset prevents this.
514		 */
515		reset_bus(card, card->gap_count != 0);
516		/* Will allocate broadcast channel after the reset. */
517		goto out;
518	}
519
520	if (root_device_is_cmc) {
521		/*
522		 * Make sure that the cycle master sends cycle start packets.
523		 */
524		transaction_data[0] = cpu_to_be32(CSR_STATE_BIT_CMSTR);
525		rcode = fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST,
526				root_id, generation, SCODE_100,
527				CSR_REGISTER_BASE + CSR_STATE_SET,
528				transaction_data, 4);
529		if (rcode == RCODE_GENERATION)
530			goto out;
531	}
532
533	if (local_id == irm_id)
534		allocate_broadcast_channel(card, generation);
535
536 out:
537	fw_node_put(root_node);
538 out_put_card:
539	fw_card_put(card);
540}
541
542void fw_card_initialize(struct fw_card *card,
543			const struct fw_card_driver *driver,
544			struct device *device)
545{
546	static atomic_t index = ATOMIC_INIT(-1);
547
548	card->index = atomic_inc_return(&index);
549	card->driver = driver;
550	card->device = device;
551	card->current_tlabel = 0;
552	card->tlabel_mask = 0;
553	card->split_timeout_hi = DEFAULT_SPLIT_TIMEOUT / 8000;
554	card->split_timeout_lo = (DEFAULT_SPLIT_TIMEOUT % 8000) << 19;
555	card->split_timeout_cycles = DEFAULT_SPLIT_TIMEOUT;
556	card->split_timeout_jiffies =
557			DIV_ROUND_UP(DEFAULT_SPLIT_TIMEOUT * HZ, 8000);
558	card->color = 0;
559	card->broadcast_channel = BROADCAST_CHANNEL_INITIAL;
560
561	kref_init(&card->kref);
562	init_completion(&card->done);
563	INIT_LIST_HEAD(&card->transaction_list);
564	INIT_LIST_HEAD(&card->phy_receiver_list);
565	spin_lock_init(&card->lock);
566
567	card->local_node = NULL;
568
569	INIT_DELAYED_WORK(&card->br_work, br_work);
570	INIT_DELAYED_WORK(&card->bm_work, bm_work);
571}
572EXPORT_SYMBOL(fw_card_initialize);
573
574int fw_card_add(struct fw_card *card, u32 max_receive, u32 link_speed, u64 guid,
575		unsigned int supported_isoc_contexts)
576{
577	struct workqueue_struct *isoc_wq;
578	int ret;
579
580	// This workqueue should be:
581	//  * != WQ_BH			Sleepable.
582	//  * == WQ_UNBOUND		Any core can process data for isoc context. The
583	//				implementation of unit protocol could consumes the core
584	//				longer somehow.
585	//  * != WQ_MEM_RECLAIM		Not used for any backend of block device.
586	//  * == WQ_FREEZABLE		Isochronous communication is at regular interval in real
587	//				time, thus should be drained if possible at freeze phase.
588	//  * == WQ_HIGHPRI		High priority to process semi-realtime timestamped data.
589	//  * == WQ_SYSFS		Parameters are available via sysfs.
590	//  * max_active == n_it + n_ir	A hardIRQ could notify events for multiple isochronous
591	//				contexts if they are scheduled to the same cycle.
592	isoc_wq = alloc_workqueue("firewire-isoc-card%u",
593				  WQ_UNBOUND | WQ_FREEZABLE | WQ_HIGHPRI | WQ_SYSFS,
594				  supported_isoc_contexts, card->index);
595	if (!isoc_wq)
596		return -ENOMEM;
597
598	card->max_receive = max_receive;
599	card->link_speed = link_speed;
600	card->guid = guid;
601
602	guard(mutex)(&card_mutex);
603
604	generate_config_rom(card, tmp_config_rom);
605	ret = card->driver->enable(card, tmp_config_rom, config_rom_length);
606	if (ret < 0) {
607		destroy_workqueue(isoc_wq);
608		return ret;
609	}
610
611	card->isoc_wq = isoc_wq;
612	list_add_tail(&card->link, &card_list);
613
614	return 0;
615}
616EXPORT_SYMBOL(fw_card_add);
617
618/*
619 * The next few functions implement a dummy driver that is used once a card
620 * driver shuts down an fw_card.  This allows the driver to cleanly unload,
621 * as all IO to the card will be handled (and failed) by the dummy driver
622 * instead of calling into the module.  Only functions for iso context
623 * shutdown still need to be provided by the card driver.
624 *
625 * .read/write_csr() should never be called anymore after the dummy driver
626 * was bound since they are only used within request handler context.
627 * .set_config_rom() is never called since the card is taken out of card_list
628 * before switching to the dummy driver.
629 */
630
631static int dummy_read_phy_reg(struct fw_card *card, int address)
632{
633	return -ENODEV;
634}
635
636static int dummy_update_phy_reg(struct fw_card *card, int address,
637				int clear_bits, int set_bits)
638{
639	return -ENODEV;
640}
641
642static void dummy_send_request(struct fw_card *card, struct fw_packet *packet)
643{
644	packet->callback(packet, card, RCODE_CANCELLED);
645}
646
647static void dummy_send_response(struct fw_card *card, struct fw_packet *packet)
648{
649	packet->callback(packet, card, RCODE_CANCELLED);
650}
651
652static int dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet)
653{
654	return -ENOENT;
655}
656
657static int dummy_enable_phys_dma(struct fw_card *card,
658				 int node_id, int generation)
659{
660	return -ENODEV;
661}
662
663static struct fw_iso_context *dummy_allocate_iso_context(struct fw_card *card,
664				int type, int channel, size_t header_size)
665{
666	return ERR_PTR(-ENODEV);
667}
668
669static u32 dummy_read_csr(struct fw_card *card, int csr_offset)
670{
671	return 0;
672}
673
674static void dummy_write_csr(struct fw_card *card, int csr_offset, u32 value)
675{
676}
677
678static int dummy_start_iso(struct fw_iso_context *ctx,
679			   s32 cycle, u32 sync, u32 tags)
680{
681	return -ENODEV;
682}
683
684static int dummy_set_iso_channels(struct fw_iso_context *ctx, u64 *channels)
685{
686	return -ENODEV;
687}
688
689static int dummy_queue_iso(struct fw_iso_context *ctx, struct fw_iso_packet *p,
690			   struct fw_iso_buffer *buffer, unsigned long payload)
691{
692	return -ENODEV;
693}
694
695static void dummy_flush_queue_iso(struct fw_iso_context *ctx)
696{
697}
698
699static int dummy_flush_iso_completions(struct fw_iso_context *ctx)
700{
701	return -ENODEV;
702}
703
704static const struct fw_card_driver dummy_driver_template = {
705	.read_phy_reg		= dummy_read_phy_reg,
706	.update_phy_reg		= dummy_update_phy_reg,
707	.send_request		= dummy_send_request,
708	.send_response		= dummy_send_response,
709	.cancel_packet		= dummy_cancel_packet,
710	.enable_phys_dma	= dummy_enable_phys_dma,
711	.read_csr		= dummy_read_csr,
712	.write_csr		= dummy_write_csr,
713	.allocate_iso_context	= dummy_allocate_iso_context,
714	.start_iso		= dummy_start_iso,
715	.set_iso_channels	= dummy_set_iso_channels,
716	.queue_iso		= dummy_queue_iso,
717	.flush_queue_iso	= dummy_flush_queue_iso,
718	.flush_iso_completions	= dummy_flush_iso_completions,
719};
720
721void fw_card_release(struct kref *kref)
722{
723	struct fw_card *card = container_of(kref, struct fw_card, kref);
724
725	complete(&card->done);
726}
727EXPORT_SYMBOL_GPL(fw_card_release);
728
729void fw_core_remove_card(struct fw_card *card)
730{
731	struct fw_card_driver dummy_driver = dummy_driver_template;
732
733	might_sleep();
734
735	card->driver->update_phy_reg(card, 4,
736				     PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
737	fw_schedule_bus_reset(card, false, true);
738
739	scoped_guard(mutex, &card_mutex)
740		list_del_init(&card->link);
 
741
742	/* Switch off most of the card driver interface. */
743	dummy_driver.free_iso_context	= card->driver->free_iso_context;
744	dummy_driver.stop_iso		= card->driver->stop_iso;
745	card->driver = &dummy_driver;
746	drain_workqueue(card->isoc_wq);
747
748	scoped_guard(spinlock_irqsave, &card->lock)
749		fw_destroy_nodes(card);
 
750
751	/* Wait for all users, especially device workqueue jobs, to finish. */
752	fw_card_put(card);
753	wait_for_completion(&card->done);
754
755	destroy_workqueue(card->isoc_wq);
756
757	WARN_ON(!list_empty(&card->transaction_list));
758}
759EXPORT_SYMBOL(fw_core_remove_card);
760
761/**
762 * fw_card_read_cycle_time: read from Isochronous Cycle Timer Register of 1394 OHCI in MMIO region
763 *			    for controller card.
764 * @card: The instance of card for 1394 OHCI controller.
765 * @cycle_time: The mutual reference to value of cycle time for the read operation.
766 *
767 * Read value from Isochronous Cycle Timer Register of 1394 OHCI in MMIO region for the given
768 * controller card. This function accesses the region without any lock primitives or IRQ mask.
769 * When returning successfully, the content of @value argument has value aligned to host endianness,
770 * formetted by CYCLE_TIME CSR Register of IEEE 1394 std.
771 *
772 * Context: Any context.
773 * Return:
774 * * 0 - Read successfully.
775 * * -ENODEV - The controller is unavailable due to being removed or unbound.
776 */
777int fw_card_read_cycle_time(struct fw_card *card, u32 *cycle_time)
778{
779	if (card->driver->read_csr == dummy_read_csr)
780		return -ENODEV;
781
782	// It's possible to switch to dummy driver between the above and the below. This is the best
783	// effort to return -ENODEV.
784	*cycle_time = card->driver->read_csr(card, CSR_CYCLE_TIME);
785	return 0;
786}
787EXPORT_SYMBOL_GPL(fw_card_read_cycle_time);