Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Driver for OHCI 1394 controllers
4 *
5 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
6 */
7
8#include <linux/bitops.h>
9#include <linux/bug.h>
10#include <linux/compiler.h>
11#include <linux/delay.h>
12#include <linux/device.h>
13#include <linux/dma-mapping.h>
14#include <linux/firewire.h>
15#include <linux/firewire-constants.h>
16#include <linux/init.h>
17#include <linux/interrupt.h>
18#include <linux/io.h>
19#include <linux/kernel.h>
20#include <linux/list.h>
21#include <linux/mm.h>
22#include <linux/module.h>
23#include <linux/moduleparam.h>
24#include <linux/mutex.h>
25#include <linux/pci.h>
26#include <linux/pci_ids.h>
27#include <linux/slab.h>
28#include <linux/spinlock.h>
29#include <linux/string.h>
30#include <linux/time.h>
31#include <linux/vmalloc.h>
32#include <linux/workqueue.h>
33
34#include <asm/byteorder.h>
35#include <asm/page.h>
36
37#ifdef CONFIG_PPC_PMAC
38#include <asm/pmac_feature.h>
39#endif
40
41#include "core.h"
42#include "ohci.h"
43#include "packet-header-definitions.h"
44#include "phy-packet-definitions.h"
45
46#include <trace/events/firewire.h>
47
48static u32 cond_le32_to_cpu(__le32 value, bool has_be_header_quirk);
49
50#define CREATE_TRACE_POINTS
51#include <trace/events/firewire_ohci.h>
52
53#define ohci_notice(ohci, f, args...) dev_notice(ohci->card.device, f, ##args)
54#define ohci_err(ohci, f, args...) dev_err(ohci->card.device, f, ##args)
55
56#define DESCRIPTOR_OUTPUT_MORE 0
57#define DESCRIPTOR_OUTPUT_LAST (1 << 12)
58#define DESCRIPTOR_INPUT_MORE (2 << 12)
59#define DESCRIPTOR_INPUT_LAST (3 << 12)
60#define DESCRIPTOR_STATUS (1 << 11)
61#define DESCRIPTOR_KEY_IMMEDIATE (2 << 8)
62#define DESCRIPTOR_PING (1 << 7)
63#define DESCRIPTOR_YY (1 << 6)
64#define DESCRIPTOR_NO_IRQ (0 << 4)
65#define DESCRIPTOR_IRQ_ERROR (1 << 4)
66#define DESCRIPTOR_IRQ_ALWAYS (3 << 4)
67#define DESCRIPTOR_BRANCH_ALWAYS (3 << 2)
68#define DESCRIPTOR_WAIT (3 << 0)
69
70#define DESCRIPTOR_CMD (0xf << 12)
71
72struct descriptor {
73 __le16 req_count;
74 __le16 control;
75 __le32 data_address;
76 __le32 branch_address;
77 __le16 res_count;
78 __le16 transfer_status;
79} __aligned(16);
80
81#define CONTROL_SET(regs) (regs)
82#define CONTROL_CLEAR(regs) ((regs) + 4)
83#define COMMAND_PTR(regs) ((regs) + 12)
84#define CONTEXT_MATCH(regs) ((regs) + 16)
85
86#define AR_BUFFER_SIZE (32*1024)
87#define AR_BUFFERS_MIN DIV_ROUND_UP(AR_BUFFER_SIZE, PAGE_SIZE)
88/* we need at least two pages for proper list management */
89#define AR_BUFFERS (AR_BUFFERS_MIN >= 2 ? AR_BUFFERS_MIN : 2)
90
91#define MAX_ASYNC_PAYLOAD 4096
92#define MAX_AR_PACKET_SIZE (16 + MAX_ASYNC_PAYLOAD + 4)
93#define AR_WRAPAROUND_PAGES DIV_ROUND_UP(MAX_AR_PACKET_SIZE, PAGE_SIZE)
94
95struct ar_context {
96 struct fw_ohci *ohci;
97 struct page *pages[AR_BUFFERS];
98 void *buffer;
99 struct descriptor *descriptors;
100 dma_addr_t descriptors_bus;
101 void *pointer;
102 unsigned int last_buffer_index;
103 u32 regs;
104 struct tasklet_struct tasklet;
105};
106
107struct context;
108
109typedef int (*descriptor_callback_t)(struct context *ctx,
110 struct descriptor *d,
111 struct descriptor *last);
112
113/*
114 * A buffer that contains a block of DMA-able coherent memory used for
115 * storing a portion of a DMA descriptor program.
116 */
117struct descriptor_buffer {
118 struct list_head list;
119 dma_addr_t buffer_bus;
120 size_t buffer_size;
121 size_t used;
122 struct descriptor buffer[];
123};
124
125struct context {
126 struct fw_ohci *ohci;
127 u32 regs;
128 int total_allocation;
129 u32 current_bus;
130 bool running;
131 bool flushing;
132
133 /*
134 * List of page-sized buffers for storing DMA descriptors.
135 * Head of list contains buffers in use and tail of list contains
136 * free buffers.
137 */
138 struct list_head buffer_list;
139
140 /*
141 * Pointer to a buffer inside buffer_list that contains the tail
142 * end of the current DMA program.
143 */
144 struct descriptor_buffer *buffer_tail;
145
146 /*
147 * The descriptor containing the branch address of the first
148 * descriptor that has not yet been filled by the device.
149 */
150 struct descriptor *last;
151
152 /*
153 * The last descriptor block in the DMA program. It contains the branch
154 * address that must be updated upon appending a new descriptor.
155 */
156 struct descriptor *prev;
157 int prev_z;
158
159 descriptor_callback_t callback;
160
161 struct tasklet_struct tasklet;
162};
163
164struct iso_context {
165 struct fw_iso_context base;
166 struct context context;
167 void *header;
168 size_t header_length;
169 unsigned long flushing_completions;
170 u32 mc_buffer_bus;
171 u16 mc_completed;
172 u16 last_timestamp;
173 u8 sync;
174 u8 tags;
175};
176
177#define CONFIG_ROM_SIZE (CSR_CONFIG_ROM_END - CSR_CONFIG_ROM)
178
179struct fw_ohci {
180 struct fw_card card;
181
182 __iomem char *registers;
183 int node_id;
184 int generation;
185 int request_generation; /* for timestamping incoming requests */
186 unsigned quirks;
187 unsigned int pri_req_max;
188 u32 bus_time;
189 bool bus_time_running;
190 bool is_root;
191 bool csr_state_setclear_abdicate;
192 int n_ir;
193 int n_it;
194 /*
195 * Spinlock for accessing fw_ohci data. Never call out of
196 * this driver with this lock held.
197 */
198 spinlock_t lock;
199
200 struct mutex phy_reg_mutex;
201
202 void *misc_buffer;
203 dma_addr_t misc_buffer_bus;
204
205 struct ar_context ar_request_ctx;
206 struct ar_context ar_response_ctx;
207 struct context at_request_ctx;
208 struct context at_response_ctx;
209
210 u32 it_context_support;
211 u32 it_context_mask; /* unoccupied IT contexts */
212 struct iso_context *it_context_list;
213 u64 ir_context_channels; /* unoccupied channels */
214 u32 ir_context_support;
215 u32 ir_context_mask; /* unoccupied IR contexts */
216 struct iso_context *ir_context_list;
217 u64 mc_channels; /* channels in use by the multichannel IR context */
218 bool mc_allocated;
219
220 __be32 *config_rom;
221 dma_addr_t config_rom_bus;
222 __be32 *next_config_rom;
223 dma_addr_t next_config_rom_bus;
224 __be32 next_header;
225
226 __le32 *self_id;
227 dma_addr_t self_id_bus;
228 struct work_struct bus_reset_work;
229
230 u32 self_id_buffer[512];
231};
232
233static struct workqueue_struct *selfid_workqueue;
234
235static inline struct fw_ohci *fw_ohci(struct fw_card *card)
236{
237 return container_of(card, struct fw_ohci, card);
238}
239
240#define IT_CONTEXT_CYCLE_MATCH_ENABLE 0x80000000
241#define IR_CONTEXT_BUFFER_FILL 0x80000000
242#define IR_CONTEXT_ISOCH_HEADER 0x40000000
243#define IR_CONTEXT_CYCLE_MATCH_ENABLE 0x20000000
244#define IR_CONTEXT_MULTI_CHANNEL_MODE 0x10000000
245#define IR_CONTEXT_DUAL_BUFFER_MODE 0x08000000
246
247#define CONTEXT_RUN 0x8000
248#define CONTEXT_WAKE 0x1000
249#define CONTEXT_DEAD 0x0800
250#define CONTEXT_ACTIVE 0x0400
251
252#define OHCI1394_MAX_AT_REQ_RETRIES 0xf
253#define OHCI1394_MAX_AT_RESP_RETRIES 0x2
254#define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
255
256#define OHCI1394_REGISTER_SIZE 0x800
257#define OHCI1394_PCI_HCI_Control 0x40
258#define SELF_ID_BUF_SIZE 0x800
259#define OHCI_VERSION_1_1 0x010010
260
261static char ohci_driver_name[] = KBUILD_MODNAME;
262
263#define PCI_VENDOR_ID_PINNACLE_SYSTEMS 0x11bd
264#define PCI_DEVICE_ID_AGERE_FW643 0x5901
265#define PCI_DEVICE_ID_CREATIVE_SB1394 0x4001
266#define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380
267#define PCI_DEVICE_ID_TI_TSB12LV22 0x8009
268#define PCI_DEVICE_ID_TI_TSB12LV26 0x8020
269#define PCI_DEVICE_ID_TI_TSB82AA2 0x8025
270#define PCI_DEVICE_ID_VIA_VT630X 0x3044
271#define PCI_REV_ID_VIA_VT6306 0x46
272#define PCI_DEVICE_ID_VIA_VT6315 0x3403
273
274#define QUIRK_CYCLE_TIMER 0x1
275#define QUIRK_RESET_PACKET 0x2
276#define QUIRK_BE_HEADERS 0x4
277#define QUIRK_NO_1394A 0x8
278#define QUIRK_NO_MSI 0x10
279#define QUIRK_TI_SLLZ059 0x20
280#define QUIRK_IR_WAKE 0x40
281
282// On PCI Express Root Complex in any type of AMD Ryzen machine, VIA VT6306/6307/6308 with Asmedia
283// ASM1083/1085 brings an inconvenience that the read accesses to 'Isochronous Cycle Timer' register
284// (at offset 0xf0 in PCI I/O space) often causes unexpected system reboot. The mechanism is not
285// clear, since the read access to the other registers is enough safe; e.g. 'Node ID' register,
286// while it is probable due to detection of any type of PCIe error.
287#define QUIRK_REBOOT_BY_CYCLE_TIMER_READ 0x80000000
288
289#if IS_ENABLED(CONFIG_X86)
290
291static bool has_reboot_by_cycle_timer_read_quirk(const struct fw_ohci *ohci)
292{
293 return !!(ohci->quirks & QUIRK_REBOOT_BY_CYCLE_TIMER_READ);
294}
295
296#define PCI_DEVICE_ID_ASMEDIA_ASM108X 0x1080
297
298static bool detect_vt630x_with_asm1083_on_amd_ryzen_machine(const struct pci_dev *pdev)
299{
300 const struct pci_dev *pcie_to_pci_bridge;
301
302 // Detect any type of AMD Ryzen machine.
303 if (!static_cpu_has(X86_FEATURE_ZEN))
304 return false;
305
306 // Detect VIA VT6306/6307/6308.
307 if (pdev->vendor != PCI_VENDOR_ID_VIA)
308 return false;
309 if (pdev->device != PCI_DEVICE_ID_VIA_VT630X)
310 return false;
311
312 // Detect Asmedia ASM1083/1085.
313 pcie_to_pci_bridge = pdev->bus->self;
314 if (pcie_to_pci_bridge->vendor != PCI_VENDOR_ID_ASMEDIA)
315 return false;
316 if (pcie_to_pci_bridge->device != PCI_DEVICE_ID_ASMEDIA_ASM108X)
317 return false;
318
319 return true;
320}
321
322#else
323#define has_reboot_by_cycle_timer_read_quirk(ohci) false
324#define detect_vt630x_with_asm1083_on_amd_ryzen_machine(pdev) false
325#endif
326
327/* In case of multiple matches in ohci_quirks[], only the first one is used. */
328static const struct {
329 unsigned short vendor, device, revision, flags;
330} ohci_quirks[] = {
331 {PCI_VENDOR_ID_AL, PCI_ANY_ID, PCI_ANY_ID,
332 QUIRK_CYCLE_TIMER},
333
334 {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, PCI_ANY_ID,
335 QUIRK_BE_HEADERS},
336
337 {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6,
338 QUIRK_NO_MSI},
339
340 {PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_SB1394, PCI_ANY_ID,
341 QUIRK_RESET_PACKET},
342
343 {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, PCI_ANY_ID,
344 QUIRK_NO_MSI},
345
346 {PCI_VENDOR_ID_NEC, PCI_ANY_ID, PCI_ANY_ID,
347 QUIRK_CYCLE_TIMER},
348
349 {PCI_VENDOR_ID_O2, PCI_ANY_ID, PCI_ANY_ID,
350 QUIRK_NO_MSI},
351
352 {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID,
353 QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
354
355 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, PCI_ANY_ID,
356 QUIRK_CYCLE_TIMER | QUIRK_RESET_PACKET | QUIRK_NO_1394A},
357
358 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV26, PCI_ANY_ID,
359 QUIRK_RESET_PACKET | QUIRK_TI_SLLZ059},
360
361 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB82AA2, PCI_ANY_ID,
362 QUIRK_RESET_PACKET | QUIRK_TI_SLLZ059},
363
364 {PCI_VENDOR_ID_TI, PCI_ANY_ID, PCI_ANY_ID,
365 QUIRK_RESET_PACKET},
366
367 {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT630X, PCI_REV_ID_VIA_VT6306,
368 QUIRK_CYCLE_TIMER | QUIRK_IR_WAKE},
369
370 {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, 0,
371 QUIRK_CYCLE_TIMER /* FIXME: necessary? */ | QUIRK_NO_MSI},
372
373 {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, PCI_ANY_ID,
374 QUIRK_NO_MSI},
375
376 {PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID,
377 QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
378};
379
380/* This overrides anything that was found in ohci_quirks[]. */
381static int param_quirks;
382module_param_named(quirks, param_quirks, int, 0644);
383MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
384 ", nonatomic cycle timer = " __stringify(QUIRK_CYCLE_TIMER)
385 ", reset packet generation = " __stringify(QUIRK_RESET_PACKET)
386 ", AR/selfID endianness = " __stringify(QUIRK_BE_HEADERS)
387 ", no 1394a enhancements = " __stringify(QUIRK_NO_1394A)
388 ", disable MSI = " __stringify(QUIRK_NO_MSI)
389 ", TI SLLZ059 erratum = " __stringify(QUIRK_TI_SLLZ059)
390 ", IR wake unreliable = " __stringify(QUIRK_IR_WAKE)
391 ")");
392
393#define OHCI_PARAM_DEBUG_AT_AR 1
394#define OHCI_PARAM_DEBUG_SELFIDS 2
395#define OHCI_PARAM_DEBUG_IRQS 4
396
397static int param_debug;
398module_param_named(debug, param_debug, int, 0644);
399MODULE_PARM_DESC(debug, "Verbose logging, deprecated in v6.11 kernel or later. (default = 0"
400 ", AT/AR events = " __stringify(OHCI_PARAM_DEBUG_AT_AR)
401 ", self-IDs = " __stringify(OHCI_PARAM_DEBUG_SELFIDS)
402 ", IRQs = " __stringify(OHCI_PARAM_DEBUG_IRQS)
403 ", or a combination, or all = -1)");
404
405static bool param_remote_dma;
406module_param_named(remote_dma, param_remote_dma, bool, 0444);
407MODULE_PARM_DESC(remote_dma, "Enable unfiltered remote DMA (default = N)");
408
409static void log_irqs(struct fw_ohci *ohci, u32 evt)
410{
411 if (likely(!(param_debug & OHCI_PARAM_DEBUG_IRQS)))
412 return;
413
414 ohci_notice(ohci, "IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
415 evt & OHCI1394_selfIDComplete ? " selfID" : "",
416 evt & OHCI1394_RQPkt ? " AR_req" : "",
417 evt & OHCI1394_RSPkt ? " AR_resp" : "",
418 evt & OHCI1394_reqTxComplete ? " AT_req" : "",
419 evt & OHCI1394_respTxComplete ? " AT_resp" : "",
420 evt & OHCI1394_isochRx ? " IR" : "",
421 evt & OHCI1394_isochTx ? " IT" : "",
422 evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "",
423 evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "",
424 evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "",
425 evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "",
426 evt & OHCI1394_regAccessFail ? " regAccessFail" : "",
427 evt & OHCI1394_unrecoverableError ? " unrecoverableError" : "",
428 evt & OHCI1394_busReset ? " busReset" : "",
429 evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt |
430 OHCI1394_RSPkt | OHCI1394_reqTxComplete |
431 OHCI1394_respTxComplete | OHCI1394_isochRx |
432 OHCI1394_isochTx | OHCI1394_postedWriteErr |
433 OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds |
434 OHCI1394_cycleInconsistent |
435 OHCI1394_regAccessFail | OHCI1394_busReset)
436 ? " ?" : "");
437}
438
439static void log_selfids(struct fw_ohci *ohci, int generation, int self_id_count)
440{
441 static const char *const speed[] = {
442 [0] = "S100", [1] = "S200", [2] = "S400", [3] = "beta",
443 };
444 static const char *const power[] = {
445 [0] = "+0W", [1] = "+15W", [2] = "+30W", [3] = "+45W",
446 [4] = "-3W", [5] = " ?W", [6] = "-3..-6W", [7] = "-3..-10W",
447 };
448 static const char port[] = {
449 [PHY_PACKET_SELF_ID_PORT_STATUS_NONE] = '.',
450 [PHY_PACKET_SELF_ID_PORT_STATUS_NCONN] = '-',
451 [PHY_PACKET_SELF_ID_PORT_STATUS_PARENT] = 'p',
452 [PHY_PACKET_SELF_ID_PORT_STATUS_CHILD] = 'c',
453 };
454 struct self_id_sequence_enumerator enumerator = {
455 .cursor = ohci->self_id_buffer,
456 .quadlet_count = self_id_count,
457 };
458
459 if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS)))
460 return;
461
462 ohci_notice(ohci, "%d selfIDs, generation %d, local node ID %04x\n",
463 self_id_count, generation, ohci->node_id);
464
465 while (enumerator.quadlet_count > 0) {
466 unsigned int quadlet_count;
467 unsigned int port_index;
468 const u32 *s;
469 int i;
470
471 s = self_id_sequence_enumerator_next(&enumerator, &quadlet_count);
472 if (IS_ERR(s))
473 break;
474
475 ohci_notice(ohci,
476 "selfID 0: %08x, phy %d [%c%c%c] %s gc=%d %s %s%s%s\n",
477 *s,
478 phy_packet_self_id_get_phy_id(*s),
479 port[self_id_sequence_get_port_status(s, quadlet_count, 0)],
480 port[self_id_sequence_get_port_status(s, quadlet_count, 1)],
481 port[self_id_sequence_get_port_status(s, quadlet_count, 2)],
482 speed[*s >> 14 & 3], *s >> 16 & 63,
483 power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "",
484 *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : "");
485
486 port_index = 3;
487 for (i = 1; i < quadlet_count; ++i) {
488 ohci_notice(ohci,
489 "selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n",
490 s[i],
491 phy_packet_self_id_get_phy_id(s[i]),
492 port[self_id_sequence_get_port_status(s, quadlet_count, port_index)],
493 port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 1)],
494 port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 2)],
495 port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 3)],
496 port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 4)],
497 port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 5)],
498 port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 6)],
499 port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 7)]
500 );
501
502 port_index += 8;
503 }
504 }
505}
506
507static const char *evts[] = {
508 [0x00] = "evt_no_status", [0x01] = "-reserved-",
509 [0x02] = "evt_long_packet", [0x03] = "evt_missing_ack",
510 [0x04] = "evt_underrun", [0x05] = "evt_overrun",
511 [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read",
512 [0x08] = "evt_data_write", [0x09] = "evt_bus_reset",
513 [0x0a] = "evt_timeout", [0x0b] = "evt_tcode_err",
514 [0x0c] = "-reserved-", [0x0d] = "-reserved-",
515 [0x0e] = "evt_unknown", [0x0f] = "evt_flushed",
516 [0x10] = "-reserved-", [0x11] = "ack_complete",
517 [0x12] = "ack_pending ", [0x13] = "-reserved-",
518 [0x14] = "ack_busy_X", [0x15] = "ack_busy_A",
519 [0x16] = "ack_busy_B", [0x17] = "-reserved-",
520 [0x18] = "-reserved-", [0x19] = "-reserved-",
521 [0x1a] = "-reserved-", [0x1b] = "ack_tardy",
522 [0x1c] = "-reserved-", [0x1d] = "ack_data_error",
523 [0x1e] = "ack_type_error", [0x1f] = "-reserved-",
524 [0x20] = "pending/cancelled",
525};
526
527static void log_ar_at_event(struct fw_ohci *ohci,
528 char dir, int speed, u32 *header, int evt)
529{
530 static const char *const tcodes[] = {
531 [TCODE_WRITE_QUADLET_REQUEST] = "QW req",
532 [TCODE_WRITE_BLOCK_REQUEST] = "BW req",
533 [TCODE_WRITE_RESPONSE] = "W resp",
534 [0x3] = "-reserved-",
535 [TCODE_READ_QUADLET_REQUEST] = "QR req",
536 [TCODE_READ_BLOCK_REQUEST] = "BR req",
537 [TCODE_READ_QUADLET_RESPONSE] = "QR resp",
538 [TCODE_READ_BLOCK_RESPONSE] = "BR resp",
539 [TCODE_CYCLE_START] = "cycle start",
540 [TCODE_LOCK_REQUEST] = "Lk req",
541 [TCODE_STREAM_DATA] = "async stream packet",
542 [TCODE_LOCK_RESPONSE] = "Lk resp",
543 [0xc] = "-reserved-",
544 [0xd] = "-reserved-",
545 [TCODE_LINK_INTERNAL] = "link internal",
546 [0xf] = "-reserved-",
547 };
548 int tcode = async_header_get_tcode(header);
549 char specific[12];
550
551 if (likely(!(param_debug & OHCI_PARAM_DEBUG_AT_AR)))
552 return;
553
554 if (unlikely(evt >= ARRAY_SIZE(evts)))
555 evt = 0x1f;
556
557 if (evt == OHCI1394_evt_bus_reset) {
558 ohci_notice(ohci, "A%c evt_bus_reset, generation %d\n",
559 dir, (header[2] >> 16) & 0xff);
560 return;
561 }
562
563 switch (tcode) {
564 case TCODE_WRITE_QUADLET_REQUEST:
565 case TCODE_READ_QUADLET_RESPONSE:
566 case TCODE_CYCLE_START:
567 snprintf(specific, sizeof(specific), " = %08x",
568 be32_to_cpu((__force __be32)header[3]));
569 break;
570 case TCODE_WRITE_BLOCK_REQUEST:
571 case TCODE_READ_BLOCK_REQUEST:
572 case TCODE_READ_BLOCK_RESPONSE:
573 case TCODE_LOCK_REQUEST:
574 case TCODE_LOCK_RESPONSE:
575 snprintf(specific, sizeof(specific), " %x,%x",
576 async_header_get_data_length(header),
577 async_header_get_extended_tcode(header));
578 break;
579 default:
580 specific[0] = '\0';
581 }
582
583 switch (tcode) {
584 case TCODE_STREAM_DATA:
585 ohci_notice(ohci, "A%c %s, %s\n",
586 dir, evts[evt], tcodes[tcode]);
587 break;
588 case TCODE_LINK_INTERNAL:
589 ohci_notice(ohci, "A%c %s, PHY %08x %08x\n",
590 dir, evts[evt], header[1], header[2]);
591 break;
592 case TCODE_WRITE_QUADLET_REQUEST:
593 case TCODE_WRITE_BLOCK_REQUEST:
594 case TCODE_READ_QUADLET_REQUEST:
595 case TCODE_READ_BLOCK_REQUEST:
596 case TCODE_LOCK_REQUEST:
597 ohci_notice(ohci,
598 "A%c spd %x tl %02x, %04x -> %04x, %s, %s, %012llx%s\n",
599 dir, speed, async_header_get_tlabel(header),
600 async_header_get_source(header), async_header_get_destination(header),
601 evts[evt], tcodes[tcode], async_header_get_offset(header), specific);
602 break;
603 default:
604 ohci_notice(ohci,
605 "A%c spd %x tl %02x, %04x -> %04x, %s, %s%s\n",
606 dir, speed, async_header_get_tlabel(header),
607 async_header_get_source(header), async_header_get_destination(header),
608 evts[evt], tcodes[tcode], specific);
609 }
610}
611
612static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
613{
614 writel(data, ohci->registers + offset);
615}
616
617static inline u32 reg_read(const struct fw_ohci *ohci, int offset)
618{
619 return readl(ohci->registers + offset);
620}
621
622static inline void flush_writes(const struct fw_ohci *ohci)
623{
624 /* Do a dummy read to flush writes. */
625 reg_read(ohci, OHCI1394_Version);
626}
627
628/*
629 * Beware! read_phy_reg(), write_phy_reg(), update_phy_reg(), and
630 * read_paged_phy_reg() require the caller to hold ohci->phy_reg_mutex.
631 * In other words, only use ohci_read_phy_reg() and ohci_update_phy_reg()
632 * directly. Exceptions are intrinsically serialized contexts like pci_probe.
633 */
634static int read_phy_reg(struct fw_ohci *ohci, int addr)
635{
636 u32 val;
637 int i;
638
639 reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
640 for (i = 0; i < 3 + 100; i++) {
641 val = reg_read(ohci, OHCI1394_PhyControl);
642 if (!~val)
643 return -ENODEV; /* Card was ejected. */
644
645 if (val & OHCI1394_PhyControl_ReadDone)
646 return OHCI1394_PhyControl_ReadData(val);
647
648 /*
649 * Try a few times without waiting. Sleeping is necessary
650 * only when the link/PHY interface is busy.
651 */
652 if (i >= 3)
653 msleep(1);
654 }
655 ohci_err(ohci, "failed to read phy reg %d\n", addr);
656 dump_stack();
657
658 return -EBUSY;
659}
660
661static int write_phy_reg(const struct fw_ohci *ohci, int addr, u32 val)
662{
663 int i;
664
665 reg_write(ohci, OHCI1394_PhyControl,
666 OHCI1394_PhyControl_Write(addr, val));
667 for (i = 0; i < 3 + 100; i++) {
668 val = reg_read(ohci, OHCI1394_PhyControl);
669 if (!~val)
670 return -ENODEV; /* Card was ejected. */
671
672 if (!(val & OHCI1394_PhyControl_WritePending))
673 return 0;
674
675 if (i >= 3)
676 msleep(1);
677 }
678 ohci_err(ohci, "failed to write phy reg %d, val %u\n", addr, val);
679 dump_stack();
680
681 return -EBUSY;
682}
683
684static int update_phy_reg(struct fw_ohci *ohci, int addr,
685 int clear_bits, int set_bits)
686{
687 int ret = read_phy_reg(ohci, addr);
688 if (ret < 0)
689 return ret;
690
691 /*
692 * The interrupt status bits are cleared by writing a one bit.
693 * Avoid clearing them unless explicitly requested in set_bits.
694 */
695 if (addr == 5)
696 clear_bits |= PHY_INT_STATUS_BITS;
697
698 return write_phy_reg(ohci, addr, (ret & ~clear_bits) | set_bits);
699}
700
701static int read_paged_phy_reg(struct fw_ohci *ohci, int page, int addr)
702{
703 int ret;
704
705 ret = update_phy_reg(ohci, 7, PHY_PAGE_SELECT, page << 5);
706 if (ret < 0)
707 return ret;
708
709 return read_phy_reg(ohci, addr);
710}
711
712static int ohci_read_phy_reg(struct fw_card *card, int addr)
713{
714 struct fw_ohci *ohci = fw_ohci(card);
715
716 guard(mutex)(&ohci->phy_reg_mutex);
717
718 return read_phy_reg(ohci, addr);
719}
720
721static int ohci_update_phy_reg(struct fw_card *card, int addr,
722 int clear_bits, int set_bits)
723{
724 struct fw_ohci *ohci = fw_ohci(card);
725
726 guard(mutex)(&ohci->phy_reg_mutex);
727
728 return update_phy_reg(ohci, addr, clear_bits, set_bits);
729}
730
731static inline dma_addr_t ar_buffer_bus(struct ar_context *ctx, unsigned int i)
732{
733 return page_private(ctx->pages[i]);
734}
735
736static void ar_context_link_page(struct ar_context *ctx, unsigned int index)
737{
738 struct descriptor *d;
739
740 d = &ctx->descriptors[index];
741 d->branch_address &= cpu_to_le32(~0xf);
742 d->res_count = cpu_to_le16(PAGE_SIZE);
743 d->transfer_status = 0;
744
745 wmb(); /* finish init of new descriptors before branch_address update */
746 d = &ctx->descriptors[ctx->last_buffer_index];
747 d->branch_address |= cpu_to_le32(1);
748
749 ctx->last_buffer_index = index;
750
751 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
752}
753
754static void ar_context_release(struct ar_context *ctx)
755{
756 struct device *dev = ctx->ohci->card.device;
757 unsigned int i;
758
759 if (!ctx->buffer)
760 return;
761
762 vunmap(ctx->buffer);
763
764 for (i = 0; i < AR_BUFFERS; i++) {
765 if (ctx->pages[i])
766 dma_free_pages(dev, PAGE_SIZE, ctx->pages[i],
767 ar_buffer_bus(ctx, i), DMA_FROM_DEVICE);
768 }
769}
770
771static void ar_context_abort(struct ar_context *ctx, const char *error_msg)
772{
773 struct fw_ohci *ohci = ctx->ohci;
774
775 if (reg_read(ohci, CONTROL_CLEAR(ctx->regs)) & CONTEXT_RUN) {
776 reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
777 flush_writes(ohci);
778
779 ohci_err(ohci, "AR error: %s; DMA stopped\n", error_msg);
780 }
781 /* FIXME: restart? */
782}
783
784static inline unsigned int ar_next_buffer_index(unsigned int index)
785{
786 return (index + 1) % AR_BUFFERS;
787}
788
789static inline unsigned int ar_first_buffer_index(struct ar_context *ctx)
790{
791 return ar_next_buffer_index(ctx->last_buffer_index);
792}
793
794/*
795 * We search for the buffer that contains the last AR packet DMA data written
796 * by the controller.
797 */
798static unsigned int ar_search_last_active_buffer(struct ar_context *ctx,
799 unsigned int *buffer_offset)
800{
801 unsigned int i, next_i, last = ctx->last_buffer_index;
802 __le16 res_count, next_res_count;
803
804 i = ar_first_buffer_index(ctx);
805 res_count = READ_ONCE(ctx->descriptors[i].res_count);
806
807 /* A buffer that is not yet completely filled must be the last one. */
808 while (i != last && res_count == 0) {
809
810 /* Peek at the next descriptor. */
811 next_i = ar_next_buffer_index(i);
812 rmb(); /* read descriptors in order */
813 next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count);
814 /*
815 * If the next descriptor is still empty, we must stop at this
816 * descriptor.
817 */
818 if (next_res_count == cpu_to_le16(PAGE_SIZE)) {
819 /*
820 * The exception is when the DMA data for one packet is
821 * split over three buffers; in this case, the middle
822 * buffer's descriptor might be never updated by the
823 * controller and look still empty, and we have to peek
824 * at the third one.
825 */
826 if (MAX_AR_PACKET_SIZE > PAGE_SIZE && i != last) {
827 next_i = ar_next_buffer_index(next_i);
828 rmb();
829 next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count);
830 if (next_res_count != cpu_to_le16(PAGE_SIZE))
831 goto next_buffer_is_active;
832 }
833
834 break;
835 }
836
837next_buffer_is_active:
838 i = next_i;
839 res_count = next_res_count;
840 }
841
842 rmb(); /* read res_count before the DMA data */
843
844 *buffer_offset = PAGE_SIZE - le16_to_cpu(res_count);
845 if (*buffer_offset > PAGE_SIZE) {
846 *buffer_offset = 0;
847 ar_context_abort(ctx, "corrupted descriptor");
848 }
849
850 return i;
851}
852
853static void ar_sync_buffers_for_cpu(struct ar_context *ctx,
854 unsigned int end_buffer_index,
855 unsigned int end_buffer_offset)
856{
857 unsigned int i;
858
859 i = ar_first_buffer_index(ctx);
860 while (i != end_buffer_index) {
861 dma_sync_single_for_cpu(ctx->ohci->card.device,
862 ar_buffer_bus(ctx, i),
863 PAGE_SIZE, DMA_FROM_DEVICE);
864 i = ar_next_buffer_index(i);
865 }
866 if (end_buffer_offset > 0)
867 dma_sync_single_for_cpu(ctx->ohci->card.device,
868 ar_buffer_bus(ctx, i),
869 end_buffer_offset, DMA_FROM_DEVICE);
870}
871
872#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
873static u32 cond_le32_to_cpu(__le32 value, bool has_be_header_quirk)
874{
875 return has_be_header_quirk ? (__force __u32)value : le32_to_cpu(value);
876}
877
878static bool has_be_header_quirk(const struct fw_ohci *ohci)
879{
880 return !!(ohci->quirks & QUIRK_BE_HEADERS);
881}
882#else
883static u32 cond_le32_to_cpu(__le32 value, bool has_be_header_quirk __maybe_unused)
884{
885 return le32_to_cpu(value);
886}
887
888static bool has_be_header_quirk(const struct fw_ohci *ohci)
889{
890 return false;
891}
892#endif
893
894static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
895{
896 struct fw_ohci *ohci = ctx->ohci;
897 struct fw_packet p;
898 u32 status, length, tcode;
899 int evt;
900
901 p.header[0] = cond_le32_to_cpu(buffer[0], has_be_header_quirk(ohci));
902 p.header[1] = cond_le32_to_cpu(buffer[1], has_be_header_quirk(ohci));
903 p.header[2] = cond_le32_to_cpu(buffer[2], has_be_header_quirk(ohci));
904
905 tcode = async_header_get_tcode(p.header);
906 switch (tcode) {
907 case TCODE_WRITE_QUADLET_REQUEST:
908 case TCODE_READ_QUADLET_RESPONSE:
909 p.header[3] = (__force __u32) buffer[3];
910 p.header_length = 16;
911 p.payload_length = 0;
912 break;
913
914 case TCODE_READ_BLOCK_REQUEST :
915 p.header[3] = cond_le32_to_cpu(buffer[3], has_be_header_quirk(ohci));
916 p.header_length = 16;
917 p.payload_length = 0;
918 break;
919
920 case TCODE_WRITE_BLOCK_REQUEST:
921 case TCODE_READ_BLOCK_RESPONSE:
922 case TCODE_LOCK_REQUEST:
923 case TCODE_LOCK_RESPONSE:
924 p.header[3] = cond_le32_to_cpu(buffer[3], has_be_header_quirk(ohci));
925 p.header_length = 16;
926 p.payload_length = async_header_get_data_length(p.header);
927 if (p.payload_length > MAX_ASYNC_PAYLOAD) {
928 ar_context_abort(ctx, "invalid packet length");
929 return NULL;
930 }
931 break;
932
933 case TCODE_WRITE_RESPONSE:
934 case TCODE_READ_QUADLET_REQUEST:
935 case TCODE_LINK_INTERNAL:
936 p.header_length = 12;
937 p.payload_length = 0;
938 break;
939
940 default:
941 ar_context_abort(ctx, "invalid tcode");
942 return NULL;
943 }
944
945 p.payload = (void *) buffer + p.header_length;
946
947 /* FIXME: What to do about evt_* errors? */
948 length = (p.header_length + p.payload_length + 3) / 4;
949 status = cond_le32_to_cpu(buffer[length], has_be_header_quirk(ohci));
950 evt = (status >> 16) & 0x1f;
951
952 p.ack = evt - 16;
953 p.speed = (status >> 21) & 0x7;
954 p.timestamp = status & 0xffff;
955 p.generation = ohci->request_generation;
956
957 log_ar_at_event(ohci, 'R', p.speed, p.header, evt);
958
959 /*
960 * Several controllers, notably from NEC and VIA, forget to
961 * write ack_complete status at PHY packet reception.
962 */
963 if (evt == OHCI1394_evt_no_status && tcode == TCODE_LINK_INTERNAL)
964 p.ack = ACK_COMPLETE;
965
966 /*
967 * The OHCI bus reset handler synthesizes a PHY packet with
968 * the new generation number when a bus reset happens (see
969 * section 8.4.2.3). This helps us determine when a request
970 * was received and make sure we send the response in the same
971 * generation. We only need this for requests; for responses
972 * we use the unique tlabel for finding the matching
973 * request.
974 *
975 * Alas some chips sometimes emit bus reset packets with a
976 * wrong generation. We set the correct generation for these
977 * at a slightly incorrect time (in bus_reset_work).
978 */
979 if (evt == OHCI1394_evt_bus_reset) {
980 if (!(ohci->quirks & QUIRK_RESET_PACKET))
981 ohci->request_generation = (p.header[2] >> 16) & 0xff;
982 } else if (ctx == &ohci->ar_request_ctx) {
983 fw_core_handle_request(&ohci->card, &p);
984 } else {
985 fw_core_handle_response(&ohci->card, &p);
986 }
987
988 return buffer + length + 1;
989}
990
991static void *handle_ar_packets(struct ar_context *ctx, void *p, void *end)
992{
993 void *next;
994
995 while (p < end) {
996 next = handle_ar_packet(ctx, p);
997 if (!next)
998 return p;
999 p = next;
1000 }
1001
1002 return p;
1003}
1004
1005static void ar_recycle_buffers(struct ar_context *ctx, unsigned int end_buffer)
1006{
1007 unsigned int i;
1008
1009 i = ar_first_buffer_index(ctx);
1010 while (i != end_buffer) {
1011 dma_sync_single_for_device(ctx->ohci->card.device,
1012 ar_buffer_bus(ctx, i),
1013 PAGE_SIZE, DMA_FROM_DEVICE);
1014 ar_context_link_page(ctx, i);
1015 i = ar_next_buffer_index(i);
1016 }
1017}
1018
1019static void ar_context_tasklet(unsigned long data)
1020{
1021 struct ar_context *ctx = (struct ar_context *)data;
1022 unsigned int end_buffer_index, end_buffer_offset;
1023 void *p, *end;
1024
1025 p = ctx->pointer;
1026 if (!p)
1027 return;
1028
1029 end_buffer_index = ar_search_last_active_buffer(ctx,
1030 &end_buffer_offset);
1031 ar_sync_buffers_for_cpu(ctx, end_buffer_index, end_buffer_offset);
1032 end = ctx->buffer + end_buffer_index * PAGE_SIZE + end_buffer_offset;
1033
1034 if (end_buffer_index < ar_first_buffer_index(ctx)) {
1035 /*
1036 * The filled part of the overall buffer wraps around; handle
1037 * all packets up to the buffer end here. If the last packet
1038 * wraps around, its tail will be visible after the buffer end
1039 * because the buffer start pages are mapped there again.
1040 */
1041 void *buffer_end = ctx->buffer + AR_BUFFERS * PAGE_SIZE;
1042 p = handle_ar_packets(ctx, p, buffer_end);
1043 if (p < buffer_end)
1044 goto error;
1045 /* adjust p to point back into the actual buffer */
1046 p -= AR_BUFFERS * PAGE_SIZE;
1047 }
1048
1049 p = handle_ar_packets(ctx, p, end);
1050 if (p != end) {
1051 if (p > end)
1052 ar_context_abort(ctx, "inconsistent descriptor");
1053 goto error;
1054 }
1055
1056 ctx->pointer = p;
1057 ar_recycle_buffers(ctx, end_buffer_index);
1058
1059 return;
1060
1061error:
1062 ctx->pointer = NULL;
1063}
1064
1065static int ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci,
1066 unsigned int descriptors_offset, u32 regs)
1067{
1068 struct device *dev = ohci->card.device;
1069 unsigned int i;
1070 dma_addr_t dma_addr;
1071 struct page *pages[AR_BUFFERS + AR_WRAPAROUND_PAGES];
1072 struct descriptor *d;
1073
1074 ctx->regs = regs;
1075 ctx->ohci = ohci;
1076 tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx);
1077
1078 for (i = 0; i < AR_BUFFERS; i++) {
1079 ctx->pages[i] = dma_alloc_pages(dev, PAGE_SIZE, &dma_addr,
1080 DMA_FROM_DEVICE, GFP_KERNEL);
1081 if (!ctx->pages[i])
1082 goto out_of_memory;
1083 set_page_private(ctx->pages[i], dma_addr);
1084 dma_sync_single_for_device(dev, dma_addr, PAGE_SIZE,
1085 DMA_FROM_DEVICE);
1086 }
1087
1088 for (i = 0; i < AR_BUFFERS; i++)
1089 pages[i] = ctx->pages[i];
1090 for (i = 0; i < AR_WRAPAROUND_PAGES; i++)
1091 pages[AR_BUFFERS + i] = ctx->pages[i];
1092 ctx->buffer = vmap(pages, ARRAY_SIZE(pages), VM_MAP, PAGE_KERNEL);
1093 if (!ctx->buffer)
1094 goto out_of_memory;
1095
1096 ctx->descriptors = ohci->misc_buffer + descriptors_offset;
1097 ctx->descriptors_bus = ohci->misc_buffer_bus + descriptors_offset;
1098
1099 for (i = 0; i < AR_BUFFERS; i++) {
1100 d = &ctx->descriptors[i];
1101 d->req_count = cpu_to_le16(PAGE_SIZE);
1102 d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
1103 DESCRIPTOR_STATUS |
1104 DESCRIPTOR_BRANCH_ALWAYS);
1105 d->data_address = cpu_to_le32(ar_buffer_bus(ctx, i));
1106 d->branch_address = cpu_to_le32(ctx->descriptors_bus +
1107 ar_next_buffer_index(i) * sizeof(struct descriptor));
1108 }
1109
1110 return 0;
1111
1112out_of_memory:
1113 ar_context_release(ctx);
1114
1115 return -ENOMEM;
1116}
1117
1118static void ar_context_run(struct ar_context *ctx)
1119{
1120 unsigned int i;
1121
1122 for (i = 0; i < AR_BUFFERS; i++)
1123 ar_context_link_page(ctx, i);
1124
1125 ctx->pointer = ctx->buffer;
1126
1127 reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ctx->descriptors_bus | 1);
1128 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN);
1129}
1130
1131static struct descriptor *find_branch_descriptor(struct descriptor *d, int z)
1132{
1133 __le16 branch;
1134
1135 branch = d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS);
1136
1137 /* figure out which descriptor the branch address goes in */
1138 if (z == 2 && branch == cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))
1139 return d;
1140 else
1141 return d + z - 1;
1142}
1143
1144static void context_retire_descriptors(struct context *ctx)
1145{
1146 struct descriptor *d, *last;
1147 u32 address;
1148 int z;
1149 struct descriptor_buffer *desc;
1150
1151 desc = list_entry(ctx->buffer_list.next,
1152 struct descriptor_buffer, list);
1153 last = ctx->last;
1154 while (last->branch_address != 0) {
1155 struct descriptor_buffer *old_desc = desc;
1156 address = le32_to_cpu(last->branch_address);
1157 z = address & 0xf;
1158 address &= ~0xf;
1159 ctx->current_bus = address;
1160
1161 /* If the branch address points to a buffer outside of the
1162 * current buffer, advance to the next buffer. */
1163 if (address < desc->buffer_bus ||
1164 address >= desc->buffer_bus + desc->used)
1165 desc = list_entry(desc->list.next,
1166 struct descriptor_buffer, list);
1167 d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d);
1168 last = find_branch_descriptor(d, z);
1169
1170 if (!ctx->callback(ctx, d, last))
1171 break;
1172
1173 if (old_desc != desc) {
1174 // If we've advanced to the next buffer, move the previous buffer to the
1175 // free list.
1176 old_desc->used = 0;
1177 guard(spinlock_irqsave)(&ctx->ohci->lock);
1178 list_move_tail(&old_desc->list, &ctx->buffer_list);
1179 }
1180 ctx->last = last;
1181 }
1182}
1183
1184static void context_tasklet(unsigned long data)
1185{
1186 struct context *ctx = (struct context *) data;
1187
1188 context_retire_descriptors(ctx);
1189}
1190
1191static void ohci_isoc_context_work(struct work_struct *work)
1192{
1193 struct fw_iso_context *base = container_of(work, struct fw_iso_context, work);
1194 struct iso_context *isoc_ctx = container_of(base, struct iso_context, base);
1195
1196 context_retire_descriptors(&isoc_ctx->context);
1197}
1198
1199/*
1200 * Allocate a new buffer and add it to the list of free buffers for this
1201 * context. Must be called with ohci->lock held.
1202 */
1203static int context_add_buffer(struct context *ctx)
1204{
1205 struct descriptor_buffer *desc;
1206 dma_addr_t bus_addr;
1207 int offset;
1208
1209 /*
1210 * 16MB of descriptors should be far more than enough for any DMA
1211 * program. This will catch run-away userspace or DoS attacks.
1212 */
1213 if (ctx->total_allocation >= 16*1024*1024)
1214 return -ENOMEM;
1215
1216 desc = dmam_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE, &bus_addr, GFP_ATOMIC);
1217 if (!desc)
1218 return -ENOMEM;
1219
1220 offset = (void *)&desc->buffer - (void *)desc;
1221 /*
1222 * Some controllers, like JMicron ones, always issue 0x20-byte DMA reads
1223 * for descriptors, even 0x10-byte ones. This can cause page faults when
1224 * an IOMMU is in use and the oversized read crosses a page boundary.
1225 * Work around this by always leaving at least 0x10 bytes of padding.
1226 */
1227 desc->buffer_size = PAGE_SIZE - offset - 0x10;
1228 desc->buffer_bus = bus_addr + offset;
1229 desc->used = 0;
1230
1231 list_add_tail(&desc->list, &ctx->buffer_list);
1232 ctx->total_allocation += PAGE_SIZE;
1233
1234 return 0;
1235}
1236
1237static int context_init(struct context *ctx, struct fw_ohci *ohci,
1238 u32 regs, descriptor_callback_t callback)
1239{
1240 ctx->ohci = ohci;
1241 ctx->regs = regs;
1242 ctx->total_allocation = 0;
1243
1244 INIT_LIST_HEAD(&ctx->buffer_list);
1245 if (context_add_buffer(ctx) < 0)
1246 return -ENOMEM;
1247
1248 ctx->buffer_tail = list_entry(ctx->buffer_list.next,
1249 struct descriptor_buffer, list);
1250
1251 tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx);
1252 ctx->callback = callback;
1253
1254 /*
1255 * We put a dummy descriptor in the buffer that has a NULL
1256 * branch address and looks like it's been sent. That way we
1257 * have a descriptor to append DMA programs to.
1258 */
1259 memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer));
1260 ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
1261 ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011);
1262 ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer);
1263 ctx->last = ctx->buffer_tail->buffer;
1264 ctx->prev = ctx->buffer_tail->buffer;
1265 ctx->prev_z = 1;
1266
1267 return 0;
1268}
1269
1270static void context_release(struct context *ctx)
1271{
1272 struct fw_card *card = &ctx->ohci->card;
1273 struct descriptor_buffer *desc, *tmp;
1274
1275 list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list) {
1276 dmam_free_coherent(card->device, PAGE_SIZE, desc,
1277 desc->buffer_bus - ((void *)&desc->buffer - (void *)desc));
1278 }
1279}
1280
1281/* Must be called with ohci->lock held */
1282static struct descriptor *context_get_descriptors(struct context *ctx,
1283 int z, dma_addr_t *d_bus)
1284{
1285 struct descriptor *d = NULL;
1286 struct descriptor_buffer *desc = ctx->buffer_tail;
1287
1288 if (z * sizeof(*d) > desc->buffer_size)
1289 return NULL;
1290
1291 if (z * sizeof(*d) > desc->buffer_size - desc->used) {
1292 /* No room for the descriptor in this buffer, so advance to the
1293 * next one. */
1294
1295 if (desc->list.next == &ctx->buffer_list) {
1296 /* If there is no free buffer next in the list,
1297 * allocate one. */
1298 if (context_add_buffer(ctx) < 0)
1299 return NULL;
1300 }
1301 desc = list_entry(desc->list.next,
1302 struct descriptor_buffer, list);
1303 ctx->buffer_tail = desc;
1304 }
1305
1306 d = desc->buffer + desc->used / sizeof(*d);
1307 memset(d, 0, z * sizeof(*d));
1308 *d_bus = desc->buffer_bus + desc->used;
1309
1310 return d;
1311}
1312
1313static void context_run(struct context *ctx, u32 extra)
1314{
1315 struct fw_ohci *ohci = ctx->ohci;
1316
1317 reg_write(ohci, COMMAND_PTR(ctx->regs),
1318 le32_to_cpu(ctx->last->branch_address));
1319 reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
1320 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
1321 ctx->running = true;
1322 flush_writes(ohci);
1323}
1324
1325static void context_append(struct context *ctx,
1326 struct descriptor *d, int z, int extra)
1327{
1328 dma_addr_t d_bus;
1329 struct descriptor_buffer *desc = ctx->buffer_tail;
1330 struct descriptor *d_branch;
1331
1332 d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d);
1333
1334 desc->used += (z + extra) * sizeof(*d);
1335
1336 wmb(); /* finish init of new descriptors before branch_address update */
1337
1338 d_branch = find_branch_descriptor(ctx->prev, ctx->prev_z);
1339 d_branch->branch_address = cpu_to_le32(d_bus | z);
1340
1341 /*
1342 * VT6306 incorrectly checks only the single descriptor at the
1343 * CommandPtr when the wake bit is written, so if it's a
1344 * multi-descriptor block starting with an INPUT_MORE, put a copy of
1345 * the branch address in the first descriptor.
1346 *
1347 * Not doing this for transmit contexts since not sure how it interacts
1348 * with skip addresses.
1349 */
1350 if (unlikely(ctx->ohci->quirks & QUIRK_IR_WAKE) &&
1351 d_branch != ctx->prev &&
1352 (ctx->prev->control & cpu_to_le16(DESCRIPTOR_CMD)) ==
1353 cpu_to_le16(DESCRIPTOR_INPUT_MORE)) {
1354 ctx->prev->branch_address = cpu_to_le32(d_bus | z);
1355 }
1356
1357 ctx->prev = d;
1358 ctx->prev_z = z;
1359}
1360
1361static void context_stop(struct context *ctx)
1362{
1363 struct fw_ohci *ohci = ctx->ohci;
1364 u32 reg;
1365 int i;
1366
1367 reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
1368 ctx->running = false;
1369
1370 for (i = 0; i < 1000; i++) {
1371 reg = reg_read(ohci, CONTROL_SET(ctx->regs));
1372 if ((reg & CONTEXT_ACTIVE) == 0)
1373 return;
1374
1375 if (i)
1376 udelay(10);
1377 }
1378 ohci_err(ohci, "DMA context still active (0x%08x)\n", reg);
1379}
1380
1381struct driver_data {
1382 u8 inline_data[8];
1383 struct fw_packet *packet;
1384};
1385
1386/*
1387 * This function appends a packet to the DMA queue for transmission.
1388 * Must always be called with the ochi->lock held to ensure proper
1389 * generation handling and locking around packet queue manipulation.
1390 */
1391static int at_context_queue_packet(struct context *ctx,
1392 struct fw_packet *packet)
1393{
1394 struct fw_ohci *ohci = ctx->ohci;
1395 dma_addr_t d_bus, payload_bus;
1396 struct driver_data *driver_data;
1397 struct descriptor *d, *last;
1398 __le32 *header;
1399 int z, tcode;
1400
1401 d = context_get_descriptors(ctx, 4, &d_bus);
1402 if (d == NULL) {
1403 packet->ack = RCODE_SEND_ERROR;
1404 return -1;
1405 }
1406
1407 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
1408 d[0].res_count = cpu_to_le16(packet->timestamp);
1409
1410 tcode = async_header_get_tcode(packet->header);
1411 header = (__le32 *) &d[1];
1412 switch (tcode) {
1413 case TCODE_WRITE_QUADLET_REQUEST:
1414 case TCODE_WRITE_BLOCK_REQUEST:
1415 case TCODE_WRITE_RESPONSE:
1416 case TCODE_READ_QUADLET_REQUEST:
1417 case TCODE_READ_BLOCK_REQUEST:
1418 case TCODE_READ_QUADLET_RESPONSE:
1419 case TCODE_READ_BLOCK_RESPONSE:
1420 case TCODE_LOCK_REQUEST:
1421 case TCODE_LOCK_RESPONSE:
1422 ohci1394_at_data_set_src_bus_id(header, false);
1423 ohci1394_at_data_set_speed(header, packet->speed);
1424 ohci1394_at_data_set_tlabel(header, async_header_get_tlabel(packet->header));
1425 ohci1394_at_data_set_retry(header, async_header_get_retry(packet->header));
1426 ohci1394_at_data_set_tcode(header, tcode);
1427
1428 ohci1394_at_data_set_destination_id(header,
1429 async_header_get_destination(packet->header));
1430
1431 if (ctx == &ctx->ohci->at_response_ctx) {
1432 ohci1394_at_data_set_rcode(header, async_header_get_rcode(packet->header));
1433 } else {
1434 ohci1394_at_data_set_destination_offset(header,
1435 async_header_get_offset(packet->header));
1436 }
1437
1438 if (tcode_is_block_packet(tcode))
1439 header[3] = cpu_to_le32(packet->header[3]);
1440 else
1441 header[3] = (__force __le32) packet->header[3];
1442
1443 d[0].req_count = cpu_to_le16(packet->header_length);
1444 break;
1445 case TCODE_LINK_INTERNAL:
1446 ohci1394_at_data_set_speed(header, packet->speed);
1447 ohci1394_at_data_set_tcode(header, TCODE_LINK_INTERNAL);
1448
1449 header[1] = cpu_to_le32(packet->header[1]);
1450 header[2] = cpu_to_le32(packet->header[2]);
1451 d[0].req_count = cpu_to_le16(12);
1452
1453 if (is_ping_packet(&packet->header[1]))
1454 d[0].control |= cpu_to_le16(DESCRIPTOR_PING);
1455 break;
1456
1457 case TCODE_STREAM_DATA:
1458 ohci1394_it_data_set_speed(header, packet->speed);
1459 ohci1394_it_data_set_tag(header, isoc_header_get_tag(packet->header[0]));
1460 ohci1394_it_data_set_channel(header, isoc_header_get_channel(packet->header[0]));
1461 ohci1394_it_data_set_tcode(header, TCODE_STREAM_DATA);
1462 ohci1394_it_data_set_sync(header, isoc_header_get_sy(packet->header[0]));
1463
1464 ohci1394_it_data_set_data_length(header, isoc_header_get_data_length(packet->header[0]));
1465
1466 d[0].req_count = cpu_to_le16(8);
1467 break;
1468
1469 default:
1470 /* BUG(); */
1471 packet->ack = RCODE_SEND_ERROR;
1472 return -1;
1473 }
1474
1475 BUILD_BUG_ON(sizeof(struct driver_data) > sizeof(struct descriptor));
1476 driver_data = (struct driver_data *) &d[3];
1477 driver_data->packet = packet;
1478 packet->driver_data = driver_data;
1479
1480 if (packet->payload_length > 0) {
1481 if (packet->payload_length > sizeof(driver_data->inline_data)) {
1482 payload_bus = dma_map_single(ohci->card.device,
1483 packet->payload,
1484 packet->payload_length,
1485 DMA_TO_DEVICE);
1486 if (dma_mapping_error(ohci->card.device, payload_bus)) {
1487 packet->ack = RCODE_SEND_ERROR;
1488 return -1;
1489 }
1490 packet->payload_bus = payload_bus;
1491 packet->payload_mapped = true;
1492 } else {
1493 memcpy(driver_data->inline_data, packet->payload,
1494 packet->payload_length);
1495 payload_bus = d_bus + 3 * sizeof(*d);
1496 }
1497
1498 d[2].req_count = cpu_to_le16(packet->payload_length);
1499 d[2].data_address = cpu_to_le32(payload_bus);
1500 last = &d[2];
1501 z = 3;
1502 } else {
1503 last = &d[0];
1504 z = 2;
1505 }
1506
1507 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
1508 DESCRIPTOR_IRQ_ALWAYS |
1509 DESCRIPTOR_BRANCH_ALWAYS);
1510
1511 /* FIXME: Document how the locking works. */
1512 if (ohci->generation != packet->generation) {
1513 if (packet->payload_mapped)
1514 dma_unmap_single(ohci->card.device, payload_bus,
1515 packet->payload_length, DMA_TO_DEVICE);
1516 packet->ack = RCODE_GENERATION;
1517 return -1;
1518 }
1519
1520 context_append(ctx, d, z, 4 - z);
1521
1522 if (ctx->running)
1523 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
1524 else
1525 context_run(ctx, 0);
1526
1527 return 0;
1528}
1529
1530static void at_context_flush(struct context *ctx)
1531{
1532 tasklet_disable(&ctx->tasklet);
1533
1534 ctx->flushing = true;
1535 context_tasklet((unsigned long)ctx);
1536 ctx->flushing = false;
1537
1538 tasklet_enable(&ctx->tasklet);
1539}
1540
1541static int handle_at_packet(struct context *context,
1542 struct descriptor *d,
1543 struct descriptor *last)
1544{
1545 struct driver_data *driver_data;
1546 struct fw_packet *packet;
1547 struct fw_ohci *ohci = context->ohci;
1548 int evt;
1549
1550 if (last->transfer_status == 0 && !context->flushing)
1551 /* This descriptor isn't done yet, stop iteration. */
1552 return 0;
1553
1554 driver_data = (struct driver_data *) &d[3];
1555 packet = driver_data->packet;
1556 if (packet == NULL)
1557 /* This packet was cancelled, just continue. */
1558 return 1;
1559
1560 if (packet->payload_mapped)
1561 dma_unmap_single(ohci->card.device, packet->payload_bus,
1562 packet->payload_length, DMA_TO_DEVICE);
1563
1564 evt = le16_to_cpu(last->transfer_status) & 0x1f;
1565 packet->timestamp = le16_to_cpu(last->res_count);
1566
1567 log_ar_at_event(ohci, 'T', packet->speed, packet->header, evt);
1568
1569 switch (evt) {
1570 case OHCI1394_evt_timeout:
1571 /* Async response transmit timed out. */
1572 packet->ack = RCODE_CANCELLED;
1573 break;
1574
1575 case OHCI1394_evt_flushed:
1576 /*
1577 * The packet was flushed should give same error as
1578 * when we try to use a stale generation count.
1579 */
1580 packet->ack = RCODE_GENERATION;
1581 break;
1582
1583 case OHCI1394_evt_missing_ack:
1584 if (context->flushing)
1585 packet->ack = RCODE_GENERATION;
1586 else {
1587 /*
1588 * Using a valid (current) generation count, but the
1589 * node is not on the bus or not sending acks.
1590 */
1591 packet->ack = RCODE_NO_ACK;
1592 }
1593 break;
1594
1595 case ACK_COMPLETE + 0x10:
1596 case ACK_PENDING + 0x10:
1597 case ACK_BUSY_X + 0x10:
1598 case ACK_BUSY_A + 0x10:
1599 case ACK_BUSY_B + 0x10:
1600 case ACK_DATA_ERROR + 0x10:
1601 case ACK_TYPE_ERROR + 0x10:
1602 packet->ack = evt - 0x10;
1603 break;
1604
1605 case OHCI1394_evt_no_status:
1606 if (context->flushing) {
1607 packet->ack = RCODE_GENERATION;
1608 break;
1609 }
1610 fallthrough;
1611
1612 default:
1613 packet->ack = RCODE_SEND_ERROR;
1614 break;
1615 }
1616
1617 packet->callback(packet, &ohci->card, packet->ack);
1618
1619 return 1;
1620}
1621
1622static u32 get_cycle_time(struct fw_ohci *ohci);
1623
1624static void handle_local_rom(struct fw_ohci *ohci,
1625 struct fw_packet *packet, u32 csr)
1626{
1627 struct fw_packet response;
1628 int tcode, length, i;
1629
1630 tcode = async_header_get_tcode(packet->header);
1631 if (tcode_is_block_packet(tcode))
1632 length = async_header_get_data_length(packet->header);
1633 else
1634 length = 4;
1635
1636 i = csr - CSR_CONFIG_ROM;
1637 if (i + length > CONFIG_ROM_SIZE) {
1638 fw_fill_response(&response, packet->header,
1639 RCODE_ADDRESS_ERROR, NULL, 0);
1640 } else if (!tcode_is_read_request(tcode)) {
1641 fw_fill_response(&response, packet->header,
1642 RCODE_TYPE_ERROR, NULL, 0);
1643 } else {
1644 fw_fill_response(&response, packet->header, RCODE_COMPLETE,
1645 (void *) ohci->config_rom + i, length);
1646 }
1647
1648 // Timestamping on behalf of the hardware.
1649 response.timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci));
1650 fw_core_handle_response(&ohci->card, &response);
1651}
1652
1653static void handle_local_lock(struct fw_ohci *ohci,
1654 struct fw_packet *packet, u32 csr)
1655{
1656 struct fw_packet response;
1657 int tcode, length, ext_tcode, sel, try;
1658 __be32 *payload, lock_old;
1659 u32 lock_arg, lock_data;
1660
1661 tcode = async_header_get_tcode(packet->header);
1662 length = async_header_get_data_length(packet->header);
1663 payload = packet->payload;
1664 ext_tcode = async_header_get_extended_tcode(packet->header);
1665
1666 if (tcode == TCODE_LOCK_REQUEST &&
1667 ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) {
1668 lock_arg = be32_to_cpu(payload[0]);
1669 lock_data = be32_to_cpu(payload[1]);
1670 } else if (tcode == TCODE_READ_QUADLET_REQUEST) {
1671 lock_arg = 0;
1672 lock_data = 0;
1673 } else {
1674 fw_fill_response(&response, packet->header,
1675 RCODE_TYPE_ERROR, NULL, 0);
1676 goto out;
1677 }
1678
1679 sel = (csr - CSR_BUS_MANAGER_ID) / 4;
1680 reg_write(ohci, OHCI1394_CSRData, lock_data);
1681 reg_write(ohci, OHCI1394_CSRCompareData, lock_arg);
1682 reg_write(ohci, OHCI1394_CSRControl, sel);
1683
1684 for (try = 0; try < 20; try++)
1685 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000) {
1686 lock_old = cpu_to_be32(reg_read(ohci,
1687 OHCI1394_CSRData));
1688 fw_fill_response(&response, packet->header,
1689 RCODE_COMPLETE,
1690 &lock_old, sizeof(lock_old));
1691 goto out;
1692 }
1693
1694 ohci_err(ohci, "swap not done (CSR lock timeout)\n");
1695 fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0);
1696
1697 out:
1698 // Timestamping on behalf of the hardware.
1699 response.timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci));
1700 fw_core_handle_response(&ohci->card, &response);
1701}
1702
1703static void handle_local_request(struct context *ctx, struct fw_packet *packet)
1704{
1705 u64 offset, csr;
1706
1707 if (ctx == &ctx->ohci->at_request_ctx) {
1708 packet->ack = ACK_PENDING;
1709 packet->callback(packet, &ctx->ohci->card, packet->ack);
1710 }
1711
1712 offset = async_header_get_offset(packet->header);
1713 csr = offset - CSR_REGISTER_BASE;
1714
1715 /* Handle config rom reads. */
1716 if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END)
1717 handle_local_rom(ctx->ohci, packet, csr);
1718 else switch (csr) {
1719 case CSR_BUS_MANAGER_ID:
1720 case CSR_BANDWIDTH_AVAILABLE:
1721 case CSR_CHANNELS_AVAILABLE_HI:
1722 case CSR_CHANNELS_AVAILABLE_LO:
1723 handle_local_lock(ctx->ohci, packet, csr);
1724 break;
1725 default:
1726 if (ctx == &ctx->ohci->at_request_ctx)
1727 fw_core_handle_request(&ctx->ohci->card, packet);
1728 else
1729 fw_core_handle_response(&ctx->ohci->card, packet);
1730 break;
1731 }
1732
1733 if (ctx == &ctx->ohci->at_response_ctx) {
1734 packet->ack = ACK_COMPLETE;
1735 packet->callback(packet, &ctx->ohci->card, packet->ack);
1736 }
1737}
1738
1739static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
1740{
1741 unsigned long flags;
1742 int ret;
1743
1744 spin_lock_irqsave(&ctx->ohci->lock, flags);
1745
1746 if (async_header_get_destination(packet->header) == ctx->ohci->node_id &&
1747 ctx->ohci->generation == packet->generation) {
1748 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
1749
1750 // Timestamping on behalf of the hardware.
1751 packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ctx->ohci));
1752
1753 handle_local_request(ctx, packet);
1754 return;
1755 }
1756
1757 ret = at_context_queue_packet(ctx, packet);
1758 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
1759
1760 if (ret < 0) {
1761 // Timestamping on behalf of the hardware.
1762 packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ctx->ohci));
1763
1764 packet->callback(packet, &ctx->ohci->card, packet->ack);
1765 }
1766}
1767
1768static void detect_dead_context(struct fw_ohci *ohci,
1769 const char *name, unsigned int regs)
1770{
1771 u32 ctl;
1772
1773 ctl = reg_read(ohci, CONTROL_SET(regs));
1774 if (ctl & CONTEXT_DEAD)
1775 ohci_err(ohci, "DMA context %s has stopped, error code: %s\n",
1776 name, evts[ctl & 0x1f]);
1777}
1778
1779static void handle_dead_contexts(struct fw_ohci *ohci)
1780{
1781 unsigned int i;
1782 char name[8];
1783
1784 detect_dead_context(ohci, "ATReq", OHCI1394_AsReqTrContextBase);
1785 detect_dead_context(ohci, "ATRsp", OHCI1394_AsRspTrContextBase);
1786 detect_dead_context(ohci, "ARReq", OHCI1394_AsReqRcvContextBase);
1787 detect_dead_context(ohci, "ARRsp", OHCI1394_AsRspRcvContextBase);
1788 for (i = 0; i < 32; ++i) {
1789 if (!(ohci->it_context_support & (1 << i)))
1790 continue;
1791 sprintf(name, "IT%u", i);
1792 detect_dead_context(ohci, name, OHCI1394_IsoXmitContextBase(i));
1793 }
1794 for (i = 0; i < 32; ++i) {
1795 if (!(ohci->ir_context_support & (1 << i)))
1796 continue;
1797 sprintf(name, "IR%u", i);
1798 detect_dead_context(ohci, name, OHCI1394_IsoRcvContextBase(i));
1799 }
1800 /* TODO: maybe try to flush and restart the dead contexts */
1801}
1802
1803static u32 cycle_timer_ticks(u32 cycle_timer)
1804{
1805 u32 ticks;
1806
1807 ticks = cycle_timer & 0xfff;
1808 ticks += 3072 * ((cycle_timer >> 12) & 0x1fff);
1809 ticks += (3072 * 8000) * (cycle_timer >> 25);
1810
1811 return ticks;
1812}
1813
1814/*
1815 * Some controllers exhibit one or more of the following bugs when updating the
1816 * iso cycle timer register:
1817 * - When the lowest six bits are wrapping around to zero, a read that happens
1818 * at the same time will return garbage in the lowest ten bits.
1819 * - When the cycleOffset field wraps around to zero, the cycleCount field is
1820 * not incremented for about 60 ns.
1821 * - Occasionally, the entire register reads zero.
1822 *
1823 * To catch these, we read the register three times and ensure that the
1824 * difference between each two consecutive reads is approximately the same, i.e.
1825 * less than twice the other. Furthermore, any negative difference indicates an
1826 * error. (A PCI read should take at least 20 ticks of the 24.576 MHz timer to
1827 * execute, so we have enough precision to compute the ratio of the differences.)
1828 */
1829static u32 get_cycle_time(struct fw_ohci *ohci)
1830{
1831 u32 c0, c1, c2;
1832 u32 t0, t1, t2;
1833 s32 diff01, diff12;
1834 int i;
1835
1836 if (has_reboot_by_cycle_timer_read_quirk(ohci))
1837 return 0;
1838
1839 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1840
1841 if (ohci->quirks & QUIRK_CYCLE_TIMER) {
1842 i = 0;
1843 c1 = c2;
1844 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1845 do {
1846 c0 = c1;
1847 c1 = c2;
1848 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1849 t0 = cycle_timer_ticks(c0);
1850 t1 = cycle_timer_ticks(c1);
1851 t2 = cycle_timer_ticks(c2);
1852 diff01 = t1 - t0;
1853 diff12 = t2 - t1;
1854 } while ((diff01 <= 0 || diff12 <= 0 ||
1855 diff01 / diff12 >= 2 || diff12 / diff01 >= 2)
1856 && i++ < 20);
1857 }
1858
1859 return c2;
1860}
1861
1862/*
1863 * This function has to be called at least every 64 seconds. The bus_time
1864 * field stores not only the upper 25 bits of the BUS_TIME register but also
1865 * the most significant bit of the cycle timer in bit 6 so that we can detect
1866 * changes in this bit.
1867 */
1868static u32 update_bus_time(struct fw_ohci *ohci)
1869{
1870 u32 cycle_time_seconds = get_cycle_time(ohci) >> 25;
1871
1872 if (unlikely(!ohci->bus_time_running)) {
1873 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_cycle64Seconds);
1874 ohci->bus_time = (lower_32_bits(ktime_get_seconds()) & ~0x7f) |
1875 (cycle_time_seconds & 0x40);
1876 ohci->bus_time_running = true;
1877 }
1878
1879 if ((ohci->bus_time & 0x40) != (cycle_time_seconds & 0x40))
1880 ohci->bus_time += 0x40;
1881
1882 return ohci->bus_time | cycle_time_seconds;
1883}
1884
1885static int get_status_for_port(struct fw_ohci *ohci, int port_index,
1886 enum phy_packet_self_id_port_status *status)
1887{
1888 int reg;
1889
1890 scoped_guard(mutex, &ohci->phy_reg_mutex) {
1891 reg = write_phy_reg(ohci, 7, port_index);
1892 if (reg < 0)
1893 return reg;
1894
1895 reg = read_phy_reg(ohci, 8);
1896 if (reg < 0)
1897 return reg;
1898 }
1899
1900 switch (reg & 0x0f) {
1901 case 0x06:
1902 // is child node (connected to parent node)
1903 *status = PHY_PACKET_SELF_ID_PORT_STATUS_PARENT;
1904 break;
1905 case 0x0e:
1906 // is parent node (connected to child node)
1907 *status = PHY_PACKET_SELF_ID_PORT_STATUS_CHILD;
1908 break;
1909 default:
1910 // not connected
1911 *status = PHY_PACKET_SELF_ID_PORT_STATUS_NCONN;
1912 break;
1913 }
1914
1915 return 0;
1916}
1917
1918static int get_self_id_pos(struct fw_ohci *ohci, u32 self_id,
1919 int self_id_count)
1920{
1921 unsigned int left_phy_id = phy_packet_self_id_get_phy_id(self_id);
1922 int i;
1923
1924 for (i = 0; i < self_id_count; i++) {
1925 u32 entry = ohci->self_id_buffer[i];
1926 unsigned int right_phy_id = phy_packet_self_id_get_phy_id(entry);
1927
1928 if (left_phy_id == right_phy_id)
1929 return -1;
1930 if (left_phy_id < right_phy_id)
1931 return i;
1932 }
1933 return i;
1934}
1935
1936static int detect_initiated_reset(struct fw_ohci *ohci, bool *is_initiated_reset)
1937{
1938 int reg;
1939
1940 guard(mutex)(&ohci->phy_reg_mutex);
1941
1942 // Select page 7
1943 reg = write_phy_reg(ohci, 7, 0xe0);
1944 if (reg < 0)
1945 return reg;
1946
1947 reg = read_phy_reg(ohci, 8);
1948 if (reg < 0)
1949 return reg;
1950
1951 // set PMODE bit
1952 reg |= 0x40;
1953 reg = write_phy_reg(ohci, 8, reg);
1954 if (reg < 0)
1955 return reg;
1956
1957 // read register 12
1958 reg = read_phy_reg(ohci, 12);
1959 if (reg < 0)
1960 return reg;
1961
1962 // bit 3 indicates "initiated reset"
1963 *is_initiated_reset = !!((reg & 0x08) == 0x08);
1964
1965 return 0;
1966}
1967
1968/*
1969 * TI TSB82AA2B and TSB12LV26 do not receive the selfID of a locally
1970 * attached TSB41BA3D phy; see http://www.ti.com/litv/pdf/sllz059.
1971 * Construct the selfID from phy register contents.
1972 */
1973static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count)
1974{
1975 int reg, i, pos, err;
1976 bool is_initiated_reset;
1977 u32 self_id = 0;
1978
1979 // link active 1, speed 3, bridge 0, contender 1, more packets 0.
1980 phy_packet_set_packet_identifier(&self_id, PHY_PACKET_PACKET_IDENTIFIER_SELF_ID);
1981 phy_packet_self_id_zero_set_link_active(&self_id, true);
1982 phy_packet_self_id_zero_set_scode(&self_id, SCODE_800);
1983 phy_packet_self_id_zero_set_contender(&self_id, true);
1984
1985 reg = reg_read(ohci, OHCI1394_NodeID);
1986 if (!(reg & OHCI1394_NodeID_idValid)) {
1987 ohci_notice(ohci,
1988 "node ID not valid, new bus reset in progress\n");
1989 return -EBUSY;
1990 }
1991 phy_packet_self_id_set_phy_id(&self_id, reg & 0x3f);
1992
1993 reg = ohci_read_phy_reg(&ohci->card, 4);
1994 if (reg < 0)
1995 return reg;
1996 phy_packet_self_id_zero_set_power_class(&self_id, reg & 0x07);
1997
1998 reg = ohci_read_phy_reg(&ohci->card, 1);
1999 if (reg < 0)
2000 return reg;
2001 phy_packet_self_id_zero_set_gap_count(&self_id, reg & 0x3f);
2002
2003 for (i = 0; i < 3; i++) {
2004 enum phy_packet_self_id_port_status status;
2005
2006 err = get_status_for_port(ohci, i, &status);
2007 if (err < 0)
2008 return err;
2009
2010 self_id_sequence_set_port_status(&self_id, 1, i, status);
2011 }
2012
2013 err = detect_initiated_reset(ohci, &is_initiated_reset);
2014 if (err < 0)
2015 return err;
2016 phy_packet_self_id_zero_set_initiated_reset(&self_id, is_initiated_reset);
2017
2018 pos = get_self_id_pos(ohci, self_id, self_id_count);
2019 if (pos >= 0) {
2020 memmove(&(ohci->self_id_buffer[pos+1]),
2021 &(ohci->self_id_buffer[pos]),
2022 (self_id_count - pos) * sizeof(*ohci->self_id_buffer));
2023 ohci->self_id_buffer[pos] = self_id;
2024 self_id_count++;
2025 }
2026 return self_id_count;
2027}
2028
2029static void bus_reset_work(struct work_struct *work)
2030{
2031 struct fw_ohci *ohci =
2032 container_of(work, struct fw_ohci, bus_reset_work);
2033 int self_id_count, generation, new_generation, i, j;
2034 u32 reg, quadlet;
2035 void *free_rom = NULL;
2036 dma_addr_t free_rom_bus = 0;
2037 bool is_new_root;
2038
2039 reg = reg_read(ohci, OHCI1394_NodeID);
2040 if (!(reg & OHCI1394_NodeID_idValid)) {
2041 ohci_notice(ohci,
2042 "node ID not valid, new bus reset in progress\n");
2043 return;
2044 }
2045 if ((reg & OHCI1394_NodeID_nodeNumber) == 63) {
2046 ohci_notice(ohci, "malconfigured bus\n");
2047 return;
2048 }
2049 ohci->node_id = reg & (OHCI1394_NodeID_busNumber |
2050 OHCI1394_NodeID_nodeNumber);
2051
2052 is_new_root = (reg & OHCI1394_NodeID_root) != 0;
2053 if (!(ohci->is_root && is_new_root))
2054 reg_write(ohci, OHCI1394_LinkControlSet,
2055 OHCI1394_LinkControl_cycleMaster);
2056 ohci->is_root = is_new_root;
2057
2058 reg = reg_read(ohci, OHCI1394_SelfIDCount);
2059 if (ohci1394_self_id_count_is_error(reg)) {
2060 ohci_notice(ohci, "self ID receive error\n");
2061 return;
2062 }
2063 /*
2064 * The count in the SelfIDCount register is the number of
2065 * bytes in the self ID receive buffer. Since we also receive
2066 * the inverted quadlets and a header quadlet, we shift one
2067 * bit extra to get the actual number of self IDs.
2068 */
2069 self_id_count = ohci1394_self_id_count_get_size(reg) >> 1;
2070
2071 if (self_id_count > 252) {
2072 ohci_notice(ohci, "bad selfIDSize (%08x)\n", reg);
2073 return;
2074 }
2075
2076 quadlet = cond_le32_to_cpu(ohci->self_id[0], has_be_header_quirk(ohci));
2077 generation = ohci1394_self_id_receive_q0_get_generation(quadlet);
2078 rmb();
2079
2080 for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
2081 u32 id = cond_le32_to_cpu(ohci->self_id[i], has_be_header_quirk(ohci));
2082 u32 id2 = cond_le32_to_cpu(ohci->self_id[i + 1], has_be_header_quirk(ohci));
2083
2084 if (id != ~id2) {
2085 /*
2086 * If the invalid data looks like a cycle start packet,
2087 * it's likely to be the result of the cycle master
2088 * having a wrong gap count. In this case, the self IDs
2089 * so far are valid and should be processed so that the
2090 * bus manager can then correct the gap count.
2091 */
2092 if (id == 0xffff008f) {
2093 ohci_notice(ohci, "ignoring spurious self IDs\n");
2094 self_id_count = j;
2095 break;
2096 }
2097
2098 ohci_notice(ohci, "bad self ID %d/%d (%08x != ~%08x)\n",
2099 j, self_id_count, id, id2);
2100 return;
2101 }
2102 ohci->self_id_buffer[j] = id;
2103 }
2104
2105 if (ohci->quirks & QUIRK_TI_SLLZ059) {
2106 self_id_count = find_and_insert_self_id(ohci, self_id_count);
2107 if (self_id_count < 0) {
2108 ohci_notice(ohci,
2109 "could not construct local self ID\n");
2110 return;
2111 }
2112 }
2113
2114 if (self_id_count == 0) {
2115 ohci_notice(ohci, "no self IDs\n");
2116 return;
2117 }
2118 rmb();
2119
2120 /*
2121 * Check the consistency of the self IDs we just read. The
2122 * problem we face is that a new bus reset can start while we
2123 * read out the self IDs from the DMA buffer. If this happens,
2124 * the DMA buffer will be overwritten with new self IDs and we
2125 * will read out inconsistent data. The OHCI specification
2126 * (section 11.2) recommends a technique similar to
2127 * linux/seqlock.h, where we remember the generation of the
2128 * self IDs in the buffer before reading them out and compare
2129 * it to the current generation after reading them out. If
2130 * the two generations match we know we have a consistent set
2131 * of self IDs.
2132 */
2133
2134 reg = reg_read(ohci, OHCI1394_SelfIDCount);
2135 new_generation = ohci1394_self_id_count_get_generation(reg);
2136 if (new_generation != generation) {
2137 ohci_notice(ohci, "new bus reset, discarding self ids\n");
2138 return;
2139 }
2140
2141 // FIXME: Document how the locking works.
2142 scoped_guard(spinlock_irq, &ohci->lock) {
2143 ohci->generation = -1; // prevent AT packet queueing
2144 context_stop(&ohci->at_request_ctx);
2145 context_stop(&ohci->at_response_ctx);
2146 }
2147
2148 /*
2149 * Per OHCI 1.2 draft, clause 7.2.3.3, hardware may leave unsent
2150 * packets in the AT queues and software needs to drain them.
2151 * Some OHCI 1.1 controllers (JMicron) apparently require this too.
2152 */
2153 at_context_flush(&ohci->at_request_ctx);
2154 at_context_flush(&ohci->at_response_ctx);
2155
2156 scoped_guard(spinlock_irq, &ohci->lock) {
2157 ohci->generation = generation;
2158 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2159 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
2160
2161 if (ohci->quirks & QUIRK_RESET_PACKET)
2162 ohci->request_generation = generation;
2163
2164 // This next bit is unrelated to the AT context stuff but we have to do it under the
2165 // spinlock also. If a new config rom was set up before this reset, the old one is
2166 // now no longer in use and we can free it. Update the config rom pointers to point
2167 // to the current config rom and clear the next_config_rom pointer so a new update
2168 // can take place.
2169 if (ohci->next_config_rom != NULL) {
2170 if (ohci->next_config_rom != ohci->config_rom) {
2171 free_rom = ohci->config_rom;
2172 free_rom_bus = ohci->config_rom_bus;
2173 }
2174 ohci->config_rom = ohci->next_config_rom;
2175 ohci->config_rom_bus = ohci->next_config_rom_bus;
2176 ohci->next_config_rom = NULL;
2177
2178 // Restore config_rom image and manually update config_rom registers.
2179 // Writing the header quadlet will indicate that the config rom is ready,
2180 // so we do that last.
2181 reg_write(ohci, OHCI1394_BusOptions, be32_to_cpu(ohci->config_rom[2]));
2182 ohci->config_rom[0] = ohci->next_header;
2183 reg_write(ohci, OHCI1394_ConfigROMhdr, be32_to_cpu(ohci->next_header));
2184 }
2185
2186 if (param_remote_dma) {
2187 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
2188 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
2189 }
2190 }
2191
2192 if (free_rom)
2193 dmam_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, free_rom, free_rom_bus);
2194
2195 log_selfids(ohci, generation, self_id_count);
2196
2197 fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
2198 self_id_count, ohci->self_id_buffer,
2199 ohci->csr_state_setclear_abdicate);
2200 ohci->csr_state_setclear_abdicate = false;
2201}
2202
2203static irqreturn_t irq_handler(int irq, void *data)
2204{
2205 struct fw_ohci *ohci = data;
2206 u32 event, iso_event;
2207 int i;
2208
2209 event = reg_read(ohci, OHCI1394_IntEventClear);
2210
2211 if (!event || !~event)
2212 return IRQ_NONE;
2213
2214 if (unlikely(param_debug > 0)) {
2215 dev_notice_ratelimited(ohci->card.device,
2216 "The debug parameter is superseded by tracepoints events, and deprecated.");
2217 }
2218
2219 /*
2220 * busReset and postedWriteErr events must not be cleared yet
2221 * (OHCI 1.1 clauses 7.2.3.2 and 13.2.8.1)
2222 */
2223 reg_write(ohci, OHCI1394_IntEventClear,
2224 event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr));
2225 trace_irqs(ohci->card.index, event);
2226 log_irqs(ohci, event);
2227 // The flag is masked again at bus_reset_work() scheduled by selfID event.
2228 if (event & OHCI1394_busReset)
2229 reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
2230
2231 if (event & OHCI1394_selfIDComplete) {
2232 if (trace_self_id_complete_enabled()) {
2233 u32 reg = reg_read(ohci, OHCI1394_SelfIDCount);
2234
2235 trace_self_id_complete(ohci->card.index, reg, ohci->self_id,
2236 has_be_header_quirk(ohci));
2237 }
2238 queue_work(selfid_workqueue, &ohci->bus_reset_work);
2239 }
2240
2241 if (event & OHCI1394_RQPkt)
2242 tasklet_schedule(&ohci->ar_request_ctx.tasklet);
2243
2244 if (event & OHCI1394_RSPkt)
2245 tasklet_schedule(&ohci->ar_response_ctx.tasklet);
2246
2247 if (event & OHCI1394_reqTxComplete)
2248 tasklet_schedule(&ohci->at_request_ctx.tasklet);
2249
2250 if (event & OHCI1394_respTxComplete)
2251 tasklet_schedule(&ohci->at_response_ctx.tasklet);
2252
2253 if (event & OHCI1394_isochRx) {
2254 iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
2255 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event);
2256
2257 while (iso_event) {
2258 i = ffs(iso_event) - 1;
2259 fw_iso_context_schedule_flush_completions(&ohci->ir_context_list[i].base);
2260 iso_event &= ~(1 << i);
2261 }
2262 }
2263
2264 if (event & OHCI1394_isochTx) {
2265 iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear);
2266 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event);
2267
2268 while (iso_event) {
2269 i = ffs(iso_event) - 1;
2270 fw_iso_context_schedule_flush_completions(&ohci->it_context_list[i].base);
2271 iso_event &= ~(1 << i);
2272 }
2273 }
2274
2275 if (unlikely(event & OHCI1394_regAccessFail))
2276 ohci_err(ohci, "register access failure\n");
2277
2278 if (unlikely(event & OHCI1394_postedWriteErr)) {
2279 reg_read(ohci, OHCI1394_PostedWriteAddressHi);
2280 reg_read(ohci, OHCI1394_PostedWriteAddressLo);
2281 reg_write(ohci, OHCI1394_IntEventClear,
2282 OHCI1394_postedWriteErr);
2283 dev_err_ratelimited(ohci->card.device, "PCI posted write error\n");
2284 }
2285
2286 if (unlikely(event & OHCI1394_cycleTooLong)) {
2287 dev_notice_ratelimited(ohci->card.device, "isochronous cycle too long\n");
2288 reg_write(ohci, OHCI1394_LinkControlSet,
2289 OHCI1394_LinkControl_cycleMaster);
2290 }
2291
2292 if (unlikely(event & OHCI1394_cycleInconsistent)) {
2293 /*
2294 * We need to clear this event bit in order to make
2295 * cycleMatch isochronous I/O work. In theory we should
2296 * stop active cycleMatch iso contexts now and restart
2297 * them at least two cycles later. (FIXME?)
2298 */
2299 dev_notice_ratelimited(ohci->card.device, "isochronous cycle inconsistent\n");
2300 }
2301
2302 if (unlikely(event & OHCI1394_unrecoverableError))
2303 handle_dead_contexts(ohci);
2304
2305 if (event & OHCI1394_cycle64Seconds) {
2306 guard(spinlock)(&ohci->lock);
2307 update_bus_time(ohci);
2308 } else
2309 flush_writes(ohci);
2310
2311 return IRQ_HANDLED;
2312}
2313
2314static int software_reset(struct fw_ohci *ohci)
2315{
2316 u32 val;
2317 int i;
2318
2319 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
2320 for (i = 0; i < 500; i++) {
2321 val = reg_read(ohci, OHCI1394_HCControlSet);
2322 if (!~val)
2323 return -ENODEV; /* Card was ejected. */
2324
2325 if (!(val & OHCI1394_HCControl_softReset))
2326 return 0;
2327
2328 msleep(1);
2329 }
2330
2331 return -EBUSY;
2332}
2333
2334static void copy_config_rom(__be32 *dest, const __be32 *src, size_t length)
2335{
2336 size_t size = length * 4;
2337
2338 memcpy(dest, src, size);
2339 if (size < CONFIG_ROM_SIZE)
2340 memset(&dest[length], 0, CONFIG_ROM_SIZE - size);
2341}
2342
2343static int configure_1394a_enhancements(struct fw_ohci *ohci)
2344{
2345 bool enable_1394a;
2346 int ret, clear, set, offset;
2347
2348 /* Check if the driver should configure link and PHY. */
2349 if (!(reg_read(ohci, OHCI1394_HCControlSet) &
2350 OHCI1394_HCControl_programPhyEnable))
2351 return 0;
2352
2353 /* Paranoia: check whether the PHY supports 1394a, too. */
2354 enable_1394a = false;
2355 ret = read_phy_reg(ohci, 2);
2356 if (ret < 0)
2357 return ret;
2358 if ((ret & PHY_EXTENDED_REGISTERS) == PHY_EXTENDED_REGISTERS) {
2359 ret = read_paged_phy_reg(ohci, 1, 8);
2360 if (ret < 0)
2361 return ret;
2362 if (ret >= 1)
2363 enable_1394a = true;
2364 }
2365
2366 if (ohci->quirks & QUIRK_NO_1394A)
2367 enable_1394a = false;
2368
2369 /* Configure PHY and link consistently. */
2370 if (enable_1394a) {
2371 clear = 0;
2372 set = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI;
2373 } else {
2374 clear = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI;
2375 set = 0;
2376 }
2377 ret = update_phy_reg(ohci, 5, clear, set);
2378 if (ret < 0)
2379 return ret;
2380
2381 if (enable_1394a)
2382 offset = OHCI1394_HCControlSet;
2383 else
2384 offset = OHCI1394_HCControlClear;
2385 reg_write(ohci, offset, OHCI1394_HCControl_aPhyEnhanceEnable);
2386
2387 /* Clean up: configuration has been taken care of. */
2388 reg_write(ohci, OHCI1394_HCControlClear,
2389 OHCI1394_HCControl_programPhyEnable);
2390
2391 return 0;
2392}
2393
2394static int probe_tsb41ba3d(struct fw_ohci *ohci)
2395{
2396 /* TI vendor ID = 0x080028, TSB41BA3D product ID = 0x833005 (sic) */
2397 static const u8 id[] = { 0x08, 0x00, 0x28, 0x83, 0x30, 0x05, };
2398 int reg, i;
2399
2400 reg = read_phy_reg(ohci, 2);
2401 if (reg < 0)
2402 return reg;
2403 if ((reg & PHY_EXTENDED_REGISTERS) != PHY_EXTENDED_REGISTERS)
2404 return 0;
2405
2406 for (i = ARRAY_SIZE(id) - 1; i >= 0; i--) {
2407 reg = read_paged_phy_reg(ohci, 1, i + 10);
2408 if (reg < 0)
2409 return reg;
2410 if (reg != id[i])
2411 return 0;
2412 }
2413 return 1;
2414}
2415
2416static int ohci_enable(struct fw_card *card,
2417 const __be32 *config_rom, size_t length)
2418{
2419 struct fw_ohci *ohci = fw_ohci(card);
2420 u32 lps, version, irqs;
2421 int i, ret;
2422
2423 ret = software_reset(ohci);
2424 if (ret < 0) {
2425 ohci_err(ohci, "failed to reset ohci card\n");
2426 return ret;
2427 }
2428
2429 /*
2430 * Now enable LPS, which we need in order to start accessing
2431 * most of the registers. In fact, on some cards (ALI M5251),
2432 * accessing registers in the SClk domain without LPS enabled
2433 * will lock up the machine. Wait 50msec to make sure we have
2434 * full link enabled. However, with some cards (well, at least
2435 * a JMicron PCIe card), we have to try again sometimes.
2436 *
2437 * TI TSB82AA2 + TSB81BA3(A) cards signal LPS enabled early but
2438 * cannot actually use the phy at that time. These need tens of
2439 * millisecods pause between LPS write and first phy access too.
2440 */
2441
2442 reg_write(ohci, OHCI1394_HCControlSet,
2443 OHCI1394_HCControl_LPS |
2444 OHCI1394_HCControl_postedWriteEnable);
2445 flush_writes(ohci);
2446
2447 for (lps = 0, i = 0; !lps && i < 3; i++) {
2448 msleep(50);
2449 lps = reg_read(ohci, OHCI1394_HCControlSet) &
2450 OHCI1394_HCControl_LPS;
2451 }
2452
2453 if (!lps) {
2454 ohci_err(ohci, "failed to set Link Power Status\n");
2455 return -EIO;
2456 }
2457
2458 if (ohci->quirks & QUIRK_TI_SLLZ059) {
2459 ret = probe_tsb41ba3d(ohci);
2460 if (ret < 0)
2461 return ret;
2462 if (ret)
2463 ohci_notice(ohci, "local TSB41BA3D phy\n");
2464 else
2465 ohci->quirks &= ~QUIRK_TI_SLLZ059;
2466 }
2467
2468 reg_write(ohci, OHCI1394_HCControlClear,
2469 OHCI1394_HCControl_noByteSwapData);
2470
2471 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus);
2472 reg_write(ohci, OHCI1394_LinkControlSet,
2473 OHCI1394_LinkControl_cycleTimerEnable |
2474 OHCI1394_LinkControl_cycleMaster);
2475
2476 reg_write(ohci, OHCI1394_ATRetries,
2477 OHCI1394_MAX_AT_REQ_RETRIES |
2478 (OHCI1394_MAX_AT_RESP_RETRIES << 4) |
2479 (OHCI1394_MAX_PHYS_RESP_RETRIES << 8) |
2480 (200 << 16));
2481
2482 ohci->bus_time_running = false;
2483
2484 for (i = 0; i < 32; i++)
2485 if (ohci->ir_context_support & (1 << i))
2486 reg_write(ohci, OHCI1394_IsoRcvContextControlClear(i),
2487 IR_CONTEXT_MULTI_CHANNEL_MODE);
2488
2489 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
2490 if (version >= OHCI_VERSION_1_1) {
2491 reg_write(ohci, OHCI1394_InitialChannelsAvailableHi,
2492 0xfffffffe);
2493 card->broadcast_channel_auto_allocated = true;
2494 }
2495
2496 /* Get implemented bits of the priority arbitration request counter. */
2497 reg_write(ohci, OHCI1394_FairnessControl, 0x3f);
2498 ohci->pri_req_max = reg_read(ohci, OHCI1394_FairnessControl) & 0x3f;
2499 reg_write(ohci, OHCI1394_FairnessControl, 0);
2500 card->priority_budget_implemented = ohci->pri_req_max != 0;
2501
2502 reg_write(ohci, OHCI1394_PhyUpperBound, FW_MAX_PHYSICAL_RANGE >> 16);
2503 reg_write(ohci, OHCI1394_IntEventClear, ~0);
2504 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
2505
2506 ret = configure_1394a_enhancements(ohci);
2507 if (ret < 0)
2508 return ret;
2509
2510 /* Activate link_on bit and contender bit in our self ID packets.*/
2511 ret = ohci_update_phy_reg(card, 4, 0, PHY_LINK_ACTIVE | PHY_CONTENDER);
2512 if (ret < 0)
2513 return ret;
2514
2515 /*
2516 * When the link is not yet enabled, the atomic config rom
2517 * update mechanism described below in ohci_set_config_rom()
2518 * is not active. We have to update ConfigRomHeader and
2519 * BusOptions manually, and the write to ConfigROMmap takes
2520 * effect immediately. We tie this to the enabling of the
2521 * link, so we have a valid config rom before enabling - the
2522 * OHCI requires that ConfigROMhdr and BusOptions have valid
2523 * values before enabling.
2524 *
2525 * However, when the ConfigROMmap is written, some controllers
2526 * always read back quadlets 0 and 2 from the config rom to
2527 * the ConfigRomHeader and BusOptions registers on bus reset.
2528 * They shouldn't do that in this initial case where the link
2529 * isn't enabled. This means we have to use the same
2530 * workaround here, setting the bus header to 0 and then write
2531 * the right values in the bus reset tasklet.
2532 */
2533
2534 if (config_rom) {
2535 ohci->next_config_rom = dmam_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2536 &ohci->next_config_rom_bus, GFP_KERNEL);
2537 if (ohci->next_config_rom == NULL)
2538 return -ENOMEM;
2539
2540 copy_config_rom(ohci->next_config_rom, config_rom, length);
2541 } else {
2542 /*
2543 * In the suspend case, config_rom is NULL, which
2544 * means that we just reuse the old config rom.
2545 */
2546 ohci->next_config_rom = ohci->config_rom;
2547 ohci->next_config_rom_bus = ohci->config_rom_bus;
2548 }
2549
2550 ohci->next_header = ohci->next_config_rom[0];
2551 ohci->next_config_rom[0] = 0;
2552 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
2553 reg_write(ohci, OHCI1394_BusOptions,
2554 be32_to_cpu(ohci->next_config_rom[2]));
2555 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
2556
2557 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
2558
2559 irqs = OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
2560 OHCI1394_RQPkt | OHCI1394_RSPkt |
2561 OHCI1394_isochTx | OHCI1394_isochRx |
2562 OHCI1394_postedWriteErr |
2563 OHCI1394_selfIDComplete |
2564 OHCI1394_regAccessFail |
2565 OHCI1394_cycleInconsistent |
2566 OHCI1394_unrecoverableError |
2567 OHCI1394_cycleTooLong |
2568 OHCI1394_masterIntEnable |
2569 OHCI1394_busReset;
2570 reg_write(ohci, OHCI1394_IntMaskSet, irqs);
2571
2572 reg_write(ohci, OHCI1394_HCControlSet,
2573 OHCI1394_HCControl_linkEnable |
2574 OHCI1394_HCControl_BIBimageValid);
2575
2576 reg_write(ohci, OHCI1394_LinkControlSet,
2577 OHCI1394_LinkControl_rcvSelfID |
2578 OHCI1394_LinkControl_rcvPhyPkt);
2579
2580 ar_context_run(&ohci->ar_request_ctx);
2581 ar_context_run(&ohci->ar_response_ctx);
2582
2583 flush_writes(ohci);
2584
2585 /* We are ready to go, reset bus to finish initialization. */
2586 fw_schedule_bus_reset(&ohci->card, false, true);
2587
2588 return 0;
2589}
2590
2591static int ohci_set_config_rom(struct fw_card *card,
2592 const __be32 *config_rom, size_t length)
2593{
2594 struct fw_ohci *ohci;
2595 __be32 *next_config_rom;
2596 dma_addr_t next_config_rom_bus;
2597
2598 ohci = fw_ohci(card);
2599
2600 /*
2601 * When the OHCI controller is enabled, the config rom update
2602 * mechanism is a bit tricky, but easy enough to use. See
2603 * section 5.5.6 in the OHCI specification.
2604 *
2605 * The OHCI controller caches the new config rom address in a
2606 * shadow register (ConfigROMmapNext) and needs a bus reset
2607 * for the changes to take place. When the bus reset is
2608 * detected, the controller loads the new values for the
2609 * ConfigRomHeader and BusOptions registers from the specified
2610 * config rom and loads ConfigROMmap from the ConfigROMmapNext
2611 * shadow register. All automatically and atomically.
2612 *
2613 * Now, there's a twist to this story. The automatic load of
2614 * ConfigRomHeader and BusOptions doesn't honor the
2615 * noByteSwapData bit, so with a be32 config rom, the
2616 * controller will load be32 values in to these registers
2617 * during the atomic update, even on little endian
2618 * architectures. The workaround we use is to put a 0 in the
2619 * header quadlet; 0 is endian agnostic and means that the
2620 * config rom isn't ready yet. In the bus reset tasklet we
2621 * then set up the real values for the two registers.
2622 *
2623 * We use ohci->lock to avoid racing with the code that sets
2624 * ohci->next_config_rom to NULL (see bus_reset_work).
2625 */
2626
2627 next_config_rom = dmam_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2628 &next_config_rom_bus, GFP_KERNEL);
2629 if (next_config_rom == NULL)
2630 return -ENOMEM;
2631
2632 scoped_guard(spinlock_irq, &ohci->lock) {
2633 // If there is not an already pending config_rom update, push our new allocation
2634 // into the ohci->next_config_rom and then mark the local variable as null so that
2635 // we won't deallocate the new buffer.
2636 //
2637 // OTOH, if there is a pending config_rom update, just use that buffer with the new
2638 // config_rom data, and let this routine free the unused DMA allocation.
2639 if (ohci->next_config_rom == NULL) {
2640 ohci->next_config_rom = next_config_rom;
2641 ohci->next_config_rom_bus = next_config_rom_bus;
2642 next_config_rom = NULL;
2643 }
2644
2645 copy_config_rom(ohci->next_config_rom, config_rom, length);
2646
2647 ohci->next_header = config_rom[0];
2648 ohci->next_config_rom[0] = 0;
2649
2650 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
2651 }
2652
2653 /* If we didn't use the DMA allocation, delete it. */
2654 if (next_config_rom != NULL) {
2655 dmam_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, next_config_rom,
2656 next_config_rom_bus);
2657 }
2658
2659 /*
2660 * Now initiate a bus reset to have the changes take
2661 * effect. We clean up the old config rom memory and DMA
2662 * mappings in the bus reset tasklet, since the OHCI
2663 * controller could need to access it before the bus reset
2664 * takes effect.
2665 */
2666
2667 fw_schedule_bus_reset(&ohci->card, true, true);
2668
2669 return 0;
2670}
2671
2672static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
2673{
2674 struct fw_ohci *ohci = fw_ohci(card);
2675
2676 at_context_transmit(&ohci->at_request_ctx, packet);
2677}
2678
2679static void ohci_send_response(struct fw_card *card, struct fw_packet *packet)
2680{
2681 struct fw_ohci *ohci = fw_ohci(card);
2682
2683 at_context_transmit(&ohci->at_response_ctx, packet);
2684}
2685
2686static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
2687{
2688 struct fw_ohci *ohci = fw_ohci(card);
2689 struct context *ctx = &ohci->at_request_ctx;
2690 struct driver_data *driver_data = packet->driver_data;
2691 int ret = -ENOENT;
2692
2693 tasklet_disable_in_atomic(&ctx->tasklet);
2694
2695 if (packet->ack != 0)
2696 goto out;
2697
2698 if (packet->payload_mapped)
2699 dma_unmap_single(ohci->card.device, packet->payload_bus,
2700 packet->payload_length, DMA_TO_DEVICE);
2701
2702 log_ar_at_event(ohci, 'T', packet->speed, packet->header, 0x20);
2703 driver_data->packet = NULL;
2704 packet->ack = RCODE_CANCELLED;
2705
2706 // Timestamping on behalf of the hardware.
2707 packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci));
2708
2709 packet->callback(packet, &ohci->card, packet->ack);
2710 ret = 0;
2711 out:
2712 tasklet_enable(&ctx->tasklet);
2713
2714 return ret;
2715}
2716
2717static int ohci_enable_phys_dma(struct fw_card *card,
2718 int node_id, int generation)
2719{
2720 struct fw_ohci *ohci = fw_ohci(card);
2721 int n, ret = 0;
2722
2723 if (param_remote_dma)
2724 return 0;
2725
2726 /*
2727 * FIXME: Make sure this bitmask is cleared when we clear the busReset
2728 * interrupt bit. Clear physReqResourceAllBuses on bus reset.
2729 */
2730
2731 guard(spinlock_irqsave)(&ohci->lock);
2732
2733 if (ohci->generation != generation)
2734 return -ESTALE;
2735
2736 /*
2737 * Note, if the node ID contains a non-local bus ID, physical DMA is
2738 * enabled for _all_ nodes on remote buses.
2739 */
2740
2741 n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63;
2742 if (n < 32)
2743 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n);
2744 else
2745 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32));
2746
2747 flush_writes(ohci);
2748
2749 return ret;
2750}
2751
2752static u32 ohci_read_csr(struct fw_card *card, int csr_offset)
2753{
2754 struct fw_ohci *ohci = fw_ohci(card);
2755 u32 value;
2756
2757 switch (csr_offset) {
2758 case CSR_STATE_CLEAR:
2759 case CSR_STATE_SET:
2760 if (ohci->is_root &&
2761 (reg_read(ohci, OHCI1394_LinkControlSet) &
2762 OHCI1394_LinkControl_cycleMaster))
2763 value = CSR_STATE_BIT_CMSTR;
2764 else
2765 value = 0;
2766 if (ohci->csr_state_setclear_abdicate)
2767 value |= CSR_STATE_BIT_ABDICATE;
2768
2769 return value;
2770
2771 case CSR_NODE_IDS:
2772 return reg_read(ohci, OHCI1394_NodeID) << 16;
2773
2774 case CSR_CYCLE_TIME:
2775 return get_cycle_time(ohci);
2776
2777 case CSR_BUS_TIME:
2778 {
2779 // We might be called just after the cycle timer has wrapped around but just before
2780 // the cycle64Seconds handler, so we better check here, too, if the bus time needs
2781 // to be updated.
2782
2783 guard(spinlock_irqsave)(&ohci->lock);
2784 return update_bus_time(ohci);
2785 }
2786 case CSR_BUSY_TIMEOUT:
2787 value = reg_read(ohci, OHCI1394_ATRetries);
2788 return (value >> 4) & 0x0ffff00f;
2789
2790 case CSR_PRIORITY_BUDGET:
2791 return (reg_read(ohci, OHCI1394_FairnessControl) & 0x3f) |
2792 (ohci->pri_req_max << 8);
2793
2794 default:
2795 WARN_ON(1);
2796 return 0;
2797 }
2798}
2799
2800static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value)
2801{
2802 struct fw_ohci *ohci = fw_ohci(card);
2803
2804 switch (csr_offset) {
2805 case CSR_STATE_CLEAR:
2806 if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) {
2807 reg_write(ohci, OHCI1394_LinkControlClear,
2808 OHCI1394_LinkControl_cycleMaster);
2809 flush_writes(ohci);
2810 }
2811 if (value & CSR_STATE_BIT_ABDICATE)
2812 ohci->csr_state_setclear_abdicate = false;
2813 break;
2814
2815 case CSR_STATE_SET:
2816 if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) {
2817 reg_write(ohci, OHCI1394_LinkControlSet,
2818 OHCI1394_LinkControl_cycleMaster);
2819 flush_writes(ohci);
2820 }
2821 if (value & CSR_STATE_BIT_ABDICATE)
2822 ohci->csr_state_setclear_abdicate = true;
2823 break;
2824
2825 case CSR_NODE_IDS:
2826 reg_write(ohci, OHCI1394_NodeID, value >> 16);
2827 flush_writes(ohci);
2828 break;
2829
2830 case CSR_CYCLE_TIME:
2831 reg_write(ohci, OHCI1394_IsochronousCycleTimer, value);
2832 reg_write(ohci, OHCI1394_IntEventSet,
2833 OHCI1394_cycleInconsistent);
2834 flush_writes(ohci);
2835 break;
2836
2837 case CSR_BUS_TIME:
2838 {
2839 guard(spinlock_irqsave)(&ohci->lock);
2840 ohci->bus_time = (update_bus_time(ohci) & 0x40) | (value & ~0x7f);
2841 break;
2842 }
2843 case CSR_BUSY_TIMEOUT:
2844 value = (value & 0xf) | ((value & 0xf) << 4) |
2845 ((value & 0xf) << 8) | ((value & 0x0ffff000) << 4);
2846 reg_write(ohci, OHCI1394_ATRetries, value);
2847 flush_writes(ohci);
2848 break;
2849
2850 case CSR_PRIORITY_BUDGET:
2851 reg_write(ohci, OHCI1394_FairnessControl, value & 0x3f);
2852 flush_writes(ohci);
2853 break;
2854
2855 default:
2856 WARN_ON(1);
2857 break;
2858 }
2859}
2860
2861static void flush_iso_completions(struct iso_context *ctx, enum fw_iso_context_completions_cause cause)
2862{
2863 trace_isoc_inbound_single_completions(&ctx->base, ctx->last_timestamp, cause, ctx->header,
2864 ctx->header_length);
2865 trace_isoc_outbound_completions(&ctx->base, ctx->last_timestamp, cause, ctx->header,
2866 ctx->header_length);
2867
2868 ctx->base.callback.sc(&ctx->base, ctx->last_timestamp,
2869 ctx->header_length, ctx->header,
2870 ctx->base.callback_data);
2871 ctx->header_length = 0;
2872}
2873
2874static void copy_iso_headers(struct iso_context *ctx, const u32 *dma_hdr)
2875{
2876 u32 *ctx_hdr;
2877
2878 if (ctx->header_length + ctx->base.header_size > PAGE_SIZE) {
2879 if (ctx->base.drop_overflow_headers)
2880 return;
2881 flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_HEADER_OVERFLOW);
2882 }
2883
2884 ctx_hdr = ctx->header + ctx->header_length;
2885 ctx->last_timestamp = (u16)le32_to_cpu((__force __le32)dma_hdr[0]);
2886
2887 /*
2888 * The two iso header quadlets are byteswapped to little
2889 * endian by the controller, but we want to present them
2890 * as big endian for consistency with the bus endianness.
2891 */
2892 if (ctx->base.header_size > 0)
2893 ctx_hdr[0] = swab32(dma_hdr[1]); /* iso packet header */
2894 if (ctx->base.header_size > 4)
2895 ctx_hdr[1] = swab32(dma_hdr[0]); /* timestamp */
2896 if (ctx->base.header_size > 8)
2897 memcpy(&ctx_hdr[2], &dma_hdr[2], ctx->base.header_size - 8);
2898 ctx->header_length += ctx->base.header_size;
2899}
2900
2901static int handle_ir_packet_per_buffer(struct context *context,
2902 struct descriptor *d,
2903 struct descriptor *last)
2904{
2905 struct iso_context *ctx =
2906 container_of(context, struct iso_context, context);
2907 struct descriptor *pd;
2908 u32 buffer_dma;
2909
2910 for (pd = d; pd <= last; pd++)
2911 if (pd->transfer_status)
2912 break;
2913 if (pd > last)
2914 /* Descriptor(s) not done yet, stop iteration */
2915 return 0;
2916
2917 while (!(d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))) {
2918 d++;
2919 buffer_dma = le32_to_cpu(d->data_address);
2920 dma_sync_single_range_for_cpu(context->ohci->card.device,
2921 buffer_dma & PAGE_MASK,
2922 buffer_dma & ~PAGE_MASK,
2923 le16_to_cpu(d->req_count),
2924 DMA_FROM_DEVICE);
2925 }
2926
2927 copy_iso_headers(ctx, (u32 *) (last + 1));
2928
2929 if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS))
2930 flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_INTERRUPT);
2931
2932 return 1;
2933}
2934
2935/* d == last because each descriptor block is only a single descriptor. */
2936static int handle_ir_buffer_fill(struct context *context,
2937 struct descriptor *d,
2938 struct descriptor *last)
2939{
2940 struct iso_context *ctx =
2941 container_of(context, struct iso_context, context);
2942 unsigned int req_count, res_count, completed;
2943 u32 buffer_dma;
2944
2945 req_count = le16_to_cpu(last->req_count);
2946 res_count = le16_to_cpu(READ_ONCE(last->res_count));
2947 completed = req_count - res_count;
2948 buffer_dma = le32_to_cpu(last->data_address);
2949
2950 if (completed > 0) {
2951 ctx->mc_buffer_bus = buffer_dma;
2952 ctx->mc_completed = completed;
2953 }
2954
2955 if (res_count != 0)
2956 /* Descriptor(s) not done yet, stop iteration */
2957 return 0;
2958
2959 dma_sync_single_range_for_cpu(context->ohci->card.device,
2960 buffer_dma & PAGE_MASK,
2961 buffer_dma & ~PAGE_MASK,
2962 completed, DMA_FROM_DEVICE);
2963
2964 if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) {
2965 trace_isoc_inbound_multiple_completions(&ctx->base, completed,
2966 FW_ISO_CONTEXT_COMPLETIONS_CAUSE_INTERRUPT);
2967
2968 ctx->base.callback.mc(&ctx->base,
2969 buffer_dma + completed,
2970 ctx->base.callback_data);
2971 ctx->mc_completed = 0;
2972 }
2973
2974 return 1;
2975}
2976
2977static void flush_ir_buffer_fill(struct iso_context *ctx)
2978{
2979 dma_sync_single_range_for_cpu(ctx->context.ohci->card.device,
2980 ctx->mc_buffer_bus & PAGE_MASK,
2981 ctx->mc_buffer_bus & ~PAGE_MASK,
2982 ctx->mc_completed, DMA_FROM_DEVICE);
2983
2984 trace_isoc_inbound_multiple_completions(&ctx->base, ctx->mc_completed,
2985 FW_ISO_CONTEXT_COMPLETIONS_CAUSE_FLUSH);
2986
2987 ctx->base.callback.mc(&ctx->base,
2988 ctx->mc_buffer_bus + ctx->mc_completed,
2989 ctx->base.callback_data);
2990 ctx->mc_completed = 0;
2991}
2992
2993static inline void sync_it_packet_for_cpu(struct context *context,
2994 struct descriptor *pd)
2995{
2996 __le16 control;
2997 u32 buffer_dma;
2998
2999 /* only packets beginning with OUTPUT_MORE* have data buffers */
3000 if (pd->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))
3001 return;
3002
3003 /* skip over the OUTPUT_MORE_IMMEDIATE descriptor */
3004 pd += 2;
3005
3006 /*
3007 * If the packet has a header, the first OUTPUT_MORE/LAST descriptor's
3008 * data buffer is in the context program's coherent page and must not
3009 * be synced.
3010 */
3011 if ((le32_to_cpu(pd->data_address) & PAGE_MASK) ==
3012 (context->current_bus & PAGE_MASK)) {
3013 if (pd->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))
3014 return;
3015 pd++;
3016 }
3017
3018 do {
3019 buffer_dma = le32_to_cpu(pd->data_address);
3020 dma_sync_single_range_for_cpu(context->ohci->card.device,
3021 buffer_dma & PAGE_MASK,
3022 buffer_dma & ~PAGE_MASK,
3023 le16_to_cpu(pd->req_count),
3024 DMA_TO_DEVICE);
3025 control = pd->control;
3026 pd++;
3027 } while (!(control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS)));
3028}
3029
3030static int handle_it_packet(struct context *context,
3031 struct descriptor *d,
3032 struct descriptor *last)
3033{
3034 struct iso_context *ctx =
3035 container_of(context, struct iso_context, context);
3036 struct descriptor *pd;
3037 __be32 *ctx_hdr;
3038
3039 for (pd = d; pd <= last; pd++)
3040 if (pd->transfer_status)
3041 break;
3042 if (pd > last)
3043 /* Descriptor(s) not done yet, stop iteration */
3044 return 0;
3045
3046 sync_it_packet_for_cpu(context, d);
3047
3048 if (ctx->header_length + 4 > PAGE_SIZE) {
3049 if (ctx->base.drop_overflow_headers)
3050 return 1;
3051 flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_HEADER_OVERFLOW);
3052 }
3053
3054 ctx_hdr = ctx->header + ctx->header_length;
3055 ctx->last_timestamp = le16_to_cpu(last->res_count);
3056 /* Present this value as big-endian to match the receive code */
3057 *ctx_hdr = cpu_to_be32((le16_to_cpu(pd->transfer_status) << 16) |
3058 le16_to_cpu(pd->res_count));
3059 ctx->header_length += 4;
3060
3061 if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS))
3062 flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_INTERRUPT);
3063
3064 return 1;
3065}
3066
3067static void set_multichannel_mask(struct fw_ohci *ohci, u64 channels)
3068{
3069 u32 hi = channels >> 32, lo = channels;
3070
3071 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, ~hi);
3072 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, ~lo);
3073 reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet, hi);
3074 reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet, lo);
3075 ohci->mc_channels = channels;
3076}
3077
3078static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
3079 int type, int channel, size_t header_size)
3080{
3081 struct fw_ohci *ohci = fw_ohci(card);
3082 struct iso_context *ctx;
3083 descriptor_callback_t callback;
3084 u64 *channels;
3085 u32 *mask, regs;
3086 int index, ret = -EBUSY;
3087
3088 scoped_guard(spinlock_irq, &ohci->lock) {
3089 switch (type) {
3090 case FW_ISO_CONTEXT_TRANSMIT:
3091 mask = &ohci->it_context_mask;
3092 callback = handle_it_packet;
3093 index = ffs(*mask) - 1;
3094 if (index >= 0) {
3095 *mask &= ~(1 << index);
3096 regs = OHCI1394_IsoXmitContextBase(index);
3097 ctx = &ohci->it_context_list[index];
3098 }
3099 break;
3100
3101 case FW_ISO_CONTEXT_RECEIVE:
3102 channels = &ohci->ir_context_channels;
3103 mask = &ohci->ir_context_mask;
3104 callback = handle_ir_packet_per_buffer;
3105 index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1;
3106 if (index >= 0) {
3107 *channels &= ~(1ULL << channel);
3108 *mask &= ~(1 << index);
3109 regs = OHCI1394_IsoRcvContextBase(index);
3110 ctx = &ohci->ir_context_list[index];
3111 }
3112 break;
3113
3114 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3115 mask = &ohci->ir_context_mask;
3116 callback = handle_ir_buffer_fill;
3117 index = !ohci->mc_allocated ? ffs(*mask) - 1 : -1;
3118 if (index >= 0) {
3119 ohci->mc_allocated = true;
3120 *mask &= ~(1 << index);
3121 regs = OHCI1394_IsoRcvContextBase(index);
3122 ctx = &ohci->ir_context_list[index];
3123 }
3124 break;
3125
3126 default:
3127 index = -1;
3128 ret = -ENOSYS;
3129 }
3130
3131 if (index < 0)
3132 return ERR_PTR(ret);
3133 }
3134
3135 memset(ctx, 0, sizeof(*ctx));
3136 ctx->header_length = 0;
3137 ctx->header = (void *) __get_free_page(GFP_KERNEL);
3138 if (ctx->header == NULL) {
3139 ret = -ENOMEM;
3140 goto out;
3141 }
3142 ret = context_init(&ctx->context, ohci, regs, callback);
3143 if (ret < 0)
3144 goto out_with_header;
3145 fw_iso_context_init_work(&ctx->base, ohci_isoc_context_work);
3146
3147 if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL) {
3148 set_multichannel_mask(ohci, 0);
3149 ctx->mc_completed = 0;
3150 }
3151
3152 return &ctx->base;
3153
3154 out_with_header:
3155 free_page((unsigned long)ctx->header);
3156 out:
3157 scoped_guard(spinlock_irq, &ohci->lock) {
3158 switch (type) {
3159 case FW_ISO_CONTEXT_RECEIVE:
3160 *channels |= 1ULL << channel;
3161 break;
3162
3163 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3164 ohci->mc_allocated = false;
3165 break;
3166 }
3167 *mask |= 1 << index;
3168 }
3169
3170 return ERR_PTR(ret);
3171}
3172
3173static int ohci_start_iso(struct fw_iso_context *base,
3174 s32 cycle, u32 sync, u32 tags)
3175{
3176 struct iso_context *ctx = container_of(base, struct iso_context, base);
3177 struct fw_ohci *ohci = ctx->context.ohci;
3178 u32 control = IR_CONTEXT_ISOCH_HEADER, match;
3179 int index;
3180
3181 /* the controller cannot start without any queued packets */
3182 if (ctx->context.last->branch_address == 0)
3183 return -ENODATA;
3184
3185 switch (ctx->base.type) {
3186 case FW_ISO_CONTEXT_TRANSMIT:
3187 index = ctx - ohci->it_context_list;
3188 match = 0;
3189 if (cycle >= 0)
3190 match = IT_CONTEXT_CYCLE_MATCH_ENABLE |
3191 (cycle & 0x7fff) << 16;
3192
3193 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index);
3194 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index);
3195 context_run(&ctx->context, match);
3196 break;
3197
3198 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3199 control |= IR_CONTEXT_BUFFER_FILL|IR_CONTEXT_MULTI_CHANNEL_MODE;
3200 fallthrough;
3201 case FW_ISO_CONTEXT_RECEIVE:
3202 index = ctx - ohci->ir_context_list;
3203 match = (tags << 28) | (sync << 8) | ctx->base.channel;
3204 if (cycle >= 0) {
3205 match |= (cycle & 0x07fff) << 12;
3206 control |= IR_CONTEXT_CYCLE_MATCH_ENABLE;
3207 }
3208
3209 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index);
3210 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
3211 reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match);
3212 context_run(&ctx->context, control);
3213
3214 ctx->sync = sync;
3215 ctx->tags = tags;
3216
3217 break;
3218 }
3219
3220 return 0;
3221}
3222
3223static int ohci_stop_iso(struct fw_iso_context *base)
3224{
3225 struct fw_ohci *ohci = fw_ohci(base->card);
3226 struct iso_context *ctx = container_of(base, struct iso_context, base);
3227 int index;
3228
3229 switch (ctx->base.type) {
3230 case FW_ISO_CONTEXT_TRANSMIT:
3231 index = ctx - ohci->it_context_list;
3232 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index);
3233 break;
3234
3235 case FW_ISO_CONTEXT_RECEIVE:
3236 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3237 index = ctx - ohci->ir_context_list;
3238 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index);
3239 break;
3240 }
3241 flush_writes(ohci);
3242 context_stop(&ctx->context);
3243
3244 return 0;
3245}
3246
3247static void ohci_free_iso_context(struct fw_iso_context *base)
3248{
3249 struct fw_ohci *ohci = fw_ohci(base->card);
3250 struct iso_context *ctx = container_of(base, struct iso_context, base);
3251 int index;
3252
3253 ohci_stop_iso(base);
3254 context_release(&ctx->context);
3255 free_page((unsigned long)ctx->header);
3256
3257 guard(spinlock_irqsave)(&ohci->lock);
3258
3259 switch (base->type) {
3260 case FW_ISO_CONTEXT_TRANSMIT:
3261 index = ctx - ohci->it_context_list;
3262 ohci->it_context_mask |= 1 << index;
3263 break;
3264
3265 case FW_ISO_CONTEXT_RECEIVE:
3266 index = ctx - ohci->ir_context_list;
3267 ohci->ir_context_mask |= 1 << index;
3268 ohci->ir_context_channels |= 1ULL << base->channel;
3269 break;
3270
3271 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3272 index = ctx - ohci->ir_context_list;
3273 ohci->ir_context_mask |= 1 << index;
3274 ohci->ir_context_channels |= ohci->mc_channels;
3275 ohci->mc_channels = 0;
3276 ohci->mc_allocated = false;
3277 break;
3278 }
3279}
3280
3281static int ohci_set_iso_channels(struct fw_iso_context *base, u64 *channels)
3282{
3283 struct fw_ohci *ohci = fw_ohci(base->card);
3284
3285 switch (base->type) {
3286 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3287 {
3288 guard(spinlock_irqsave)(&ohci->lock);
3289
3290 // Don't allow multichannel to grab other contexts' channels.
3291 if (~ohci->ir_context_channels & ~ohci->mc_channels & *channels) {
3292 *channels = ohci->ir_context_channels;
3293 return -EBUSY;
3294 } else {
3295 set_multichannel_mask(ohci, *channels);
3296 return 0;
3297 }
3298 }
3299 default:
3300 return -EINVAL;
3301 }
3302}
3303
3304#ifdef CONFIG_PM
3305static void ohci_resume_iso_dma(struct fw_ohci *ohci)
3306{
3307 int i;
3308 struct iso_context *ctx;
3309
3310 for (i = 0 ; i < ohci->n_ir ; i++) {
3311 ctx = &ohci->ir_context_list[i];
3312 if (ctx->context.running)
3313 ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags);
3314 }
3315
3316 for (i = 0 ; i < ohci->n_it ; i++) {
3317 ctx = &ohci->it_context_list[i];
3318 if (ctx->context.running)
3319 ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags);
3320 }
3321}
3322#endif
3323
3324static int queue_iso_transmit(struct iso_context *ctx,
3325 struct fw_iso_packet *packet,
3326 struct fw_iso_buffer *buffer,
3327 unsigned long payload)
3328{
3329 struct descriptor *d, *last, *pd;
3330 struct fw_iso_packet *p;
3331 __le32 *header;
3332 dma_addr_t d_bus, page_bus;
3333 u32 z, header_z, payload_z, irq;
3334 u32 payload_index, payload_end_index, next_page_index;
3335 int page, end_page, i, length, offset;
3336
3337 p = packet;
3338 payload_index = payload;
3339
3340 if (p->skip)
3341 z = 1;
3342 else
3343 z = 2;
3344 if (p->header_length > 0)
3345 z++;
3346
3347 /* Determine the first page the payload isn't contained in. */
3348 end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT;
3349 if (p->payload_length > 0)
3350 payload_z = end_page - (payload_index >> PAGE_SHIFT);
3351 else
3352 payload_z = 0;
3353
3354 z += payload_z;
3355
3356 /* Get header size in number of descriptors. */
3357 header_z = DIV_ROUND_UP(p->header_length, sizeof(*d));
3358
3359 d = context_get_descriptors(&ctx->context, z + header_z, &d_bus);
3360 if (d == NULL)
3361 return -ENOMEM;
3362
3363 if (!p->skip) {
3364 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
3365 d[0].req_count = cpu_to_le16(8);
3366 /*
3367 * Link the skip address to this descriptor itself. This causes
3368 * a context to skip a cycle whenever lost cycles or FIFO
3369 * overruns occur, without dropping the data. The application
3370 * should then decide whether this is an error condition or not.
3371 * FIXME: Make the context's cycle-lost behaviour configurable?
3372 */
3373 d[0].branch_address = cpu_to_le32(d_bus | z);
3374
3375 header = (__le32 *) &d[1];
3376
3377 ohci1394_it_data_set_speed(header, ctx->base.speed);
3378 ohci1394_it_data_set_tag(header, p->tag);
3379 ohci1394_it_data_set_channel(header, ctx->base.channel);
3380 ohci1394_it_data_set_tcode(header, TCODE_STREAM_DATA);
3381 ohci1394_it_data_set_sync(header, p->sy);
3382
3383 ohci1394_it_data_set_data_length(header, p->header_length + p->payload_length);
3384 }
3385
3386 if (p->header_length > 0) {
3387 d[2].req_count = cpu_to_le16(p->header_length);
3388 d[2].data_address = cpu_to_le32(d_bus + z * sizeof(*d));
3389 memcpy(&d[z], p->header, p->header_length);
3390 }
3391
3392 pd = d + z - payload_z;
3393 payload_end_index = payload_index + p->payload_length;
3394 for (i = 0; i < payload_z; i++) {
3395 page = payload_index >> PAGE_SHIFT;
3396 offset = payload_index & ~PAGE_MASK;
3397 next_page_index = (page + 1) << PAGE_SHIFT;
3398 length =
3399 min(next_page_index, payload_end_index) - payload_index;
3400 pd[i].req_count = cpu_to_le16(length);
3401
3402 page_bus = page_private(buffer->pages[page]);
3403 pd[i].data_address = cpu_to_le32(page_bus + offset);
3404
3405 dma_sync_single_range_for_device(ctx->context.ohci->card.device,
3406 page_bus, offset, length,
3407 DMA_TO_DEVICE);
3408
3409 payload_index += length;
3410 }
3411
3412 if (p->interrupt)
3413 irq = DESCRIPTOR_IRQ_ALWAYS;
3414 else
3415 irq = DESCRIPTOR_NO_IRQ;
3416
3417 last = z == 2 ? d : d + z - 1;
3418 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
3419 DESCRIPTOR_STATUS |
3420 DESCRIPTOR_BRANCH_ALWAYS |
3421 irq);
3422
3423 context_append(&ctx->context, d, z, header_z);
3424
3425 return 0;
3426}
3427
3428static int queue_iso_packet_per_buffer(struct iso_context *ctx,
3429 struct fw_iso_packet *packet,
3430 struct fw_iso_buffer *buffer,
3431 unsigned long payload)
3432{
3433 struct device *device = ctx->context.ohci->card.device;
3434 struct descriptor *d, *pd;
3435 dma_addr_t d_bus, page_bus;
3436 u32 z, header_z, rest;
3437 int i, j, length;
3438 int page, offset, packet_count, header_size, payload_per_buffer;
3439
3440 /*
3441 * The OHCI controller puts the isochronous header and trailer in the
3442 * buffer, so we need at least 8 bytes.
3443 */
3444 packet_count = packet->header_length / ctx->base.header_size;
3445 header_size = max(ctx->base.header_size, (size_t)8);
3446
3447 /* Get header size in number of descriptors. */
3448 header_z = DIV_ROUND_UP(header_size, sizeof(*d));
3449 page = payload >> PAGE_SHIFT;
3450 offset = payload & ~PAGE_MASK;
3451 payload_per_buffer = packet->payload_length / packet_count;
3452
3453 for (i = 0; i < packet_count; i++) {
3454 /* d points to the header descriptor */
3455 z = DIV_ROUND_UP(payload_per_buffer + offset, PAGE_SIZE) + 1;
3456 d = context_get_descriptors(&ctx->context,
3457 z + header_z, &d_bus);
3458 if (d == NULL)
3459 return -ENOMEM;
3460
3461 d->control = cpu_to_le16(DESCRIPTOR_STATUS |
3462 DESCRIPTOR_INPUT_MORE);
3463 if (packet->skip && i == 0)
3464 d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
3465 d->req_count = cpu_to_le16(header_size);
3466 d->res_count = d->req_count;
3467 d->transfer_status = 0;
3468 d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d)));
3469
3470 rest = payload_per_buffer;
3471 pd = d;
3472 for (j = 1; j < z; j++) {
3473 pd++;
3474 pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
3475 DESCRIPTOR_INPUT_MORE);
3476
3477 if (offset + rest < PAGE_SIZE)
3478 length = rest;
3479 else
3480 length = PAGE_SIZE - offset;
3481 pd->req_count = cpu_to_le16(length);
3482 pd->res_count = pd->req_count;
3483 pd->transfer_status = 0;
3484
3485 page_bus = page_private(buffer->pages[page]);
3486 pd->data_address = cpu_to_le32(page_bus + offset);
3487
3488 dma_sync_single_range_for_device(device, page_bus,
3489 offset, length,
3490 DMA_FROM_DEVICE);
3491
3492 offset = (offset + length) & ~PAGE_MASK;
3493 rest -= length;
3494 if (offset == 0)
3495 page++;
3496 }
3497 pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
3498 DESCRIPTOR_INPUT_LAST |
3499 DESCRIPTOR_BRANCH_ALWAYS);
3500 if (packet->interrupt && i == packet_count - 1)
3501 pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
3502
3503 context_append(&ctx->context, d, z, header_z);
3504 }
3505
3506 return 0;
3507}
3508
3509static int queue_iso_buffer_fill(struct iso_context *ctx,
3510 struct fw_iso_packet *packet,
3511 struct fw_iso_buffer *buffer,
3512 unsigned long payload)
3513{
3514 struct descriptor *d;
3515 dma_addr_t d_bus, page_bus;
3516 int page, offset, rest, z, i, length;
3517
3518 page = payload >> PAGE_SHIFT;
3519 offset = payload & ~PAGE_MASK;
3520 rest = packet->payload_length;
3521
3522 /* We need one descriptor for each page in the buffer. */
3523 z = DIV_ROUND_UP(offset + rest, PAGE_SIZE);
3524
3525 if (WARN_ON(offset & 3 || rest & 3 || page + z > buffer->page_count))
3526 return -EFAULT;
3527
3528 for (i = 0; i < z; i++) {
3529 d = context_get_descriptors(&ctx->context, 1, &d_bus);
3530 if (d == NULL)
3531 return -ENOMEM;
3532
3533 d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
3534 DESCRIPTOR_BRANCH_ALWAYS);
3535 if (packet->skip && i == 0)
3536 d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
3537 if (packet->interrupt && i == z - 1)
3538 d->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
3539
3540 if (offset + rest < PAGE_SIZE)
3541 length = rest;
3542 else
3543 length = PAGE_SIZE - offset;
3544 d->req_count = cpu_to_le16(length);
3545 d->res_count = d->req_count;
3546 d->transfer_status = 0;
3547
3548 page_bus = page_private(buffer->pages[page]);
3549 d->data_address = cpu_to_le32(page_bus + offset);
3550
3551 dma_sync_single_range_for_device(ctx->context.ohci->card.device,
3552 page_bus, offset, length,
3553 DMA_FROM_DEVICE);
3554
3555 rest -= length;
3556 offset = 0;
3557 page++;
3558
3559 context_append(&ctx->context, d, 1, 0);
3560 }
3561
3562 return 0;
3563}
3564
3565static int ohci_queue_iso(struct fw_iso_context *base,
3566 struct fw_iso_packet *packet,
3567 struct fw_iso_buffer *buffer,
3568 unsigned long payload)
3569{
3570 struct iso_context *ctx = container_of(base, struct iso_context, base);
3571
3572 guard(spinlock_irqsave)(&ctx->context.ohci->lock);
3573
3574 switch (base->type) {
3575 case FW_ISO_CONTEXT_TRANSMIT:
3576 return queue_iso_transmit(ctx, packet, buffer, payload);
3577 case FW_ISO_CONTEXT_RECEIVE:
3578 return queue_iso_packet_per_buffer(ctx, packet, buffer, payload);
3579 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3580 return queue_iso_buffer_fill(ctx, packet, buffer, payload);
3581 default:
3582 return -ENOSYS;
3583 }
3584}
3585
3586static void ohci_flush_queue_iso(struct fw_iso_context *base)
3587{
3588 struct context *ctx =
3589 &container_of(base, struct iso_context, base)->context;
3590
3591 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
3592}
3593
3594static int ohci_flush_iso_completions(struct fw_iso_context *base)
3595{
3596 struct iso_context *ctx = container_of(base, struct iso_context, base);
3597 int ret = 0;
3598
3599 if (!test_and_set_bit_lock(0, &ctx->flushing_completions)) {
3600 ohci_isoc_context_work(&base->work);
3601
3602 switch (base->type) {
3603 case FW_ISO_CONTEXT_TRANSMIT:
3604 case FW_ISO_CONTEXT_RECEIVE:
3605 if (ctx->header_length != 0)
3606 flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_FLUSH);
3607 break;
3608 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3609 if (ctx->mc_completed != 0)
3610 flush_ir_buffer_fill(ctx);
3611 break;
3612 default:
3613 ret = -ENOSYS;
3614 }
3615
3616 clear_bit_unlock(0, &ctx->flushing_completions);
3617 smp_mb__after_atomic();
3618 }
3619
3620 return ret;
3621}
3622
3623static const struct fw_card_driver ohci_driver = {
3624 .enable = ohci_enable,
3625 .read_phy_reg = ohci_read_phy_reg,
3626 .update_phy_reg = ohci_update_phy_reg,
3627 .set_config_rom = ohci_set_config_rom,
3628 .send_request = ohci_send_request,
3629 .send_response = ohci_send_response,
3630 .cancel_packet = ohci_cancel_packet,
3631 .enable_phys_dma = ohci_enable_phys_dma,
3632 .read_csr = ohci_read_csr,
3633 .write_csr = ohci_write_csr,
3634
3635 .allocate_iso_context = ohci_allocate_iso_context,
3636 .free_iso_context = ohci_free_iso_context,
3637 .set_iso_channels = ohci_set_iso_channels,
3638 .queue_iso = ohci_queue_iso,
3639 .flush_queue_iso = ohci_flush_queue_iso,
3640 .flush_iso_completions = ohci_flush_iso_completions,
3641 .start_iso = ohci_start_iso,
3642 .stop_iso = ohci_stop_iso,
3643};
3644
3645#ifdef CONFIG_PPC_PMAC
3646static void pmac_ohci_on(struct pci_dev *dev)
3647{
3648 if (machine_is(powermac)) {
3649 struct device_node *ofn = pci_device_to_OF_node(dev);
3650
3651 if (ofn) {
3652 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1);
3653 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
3654 }
3655 }
3656}
3657
3658static void pmac_ohci_off(struct pci_dev *dev)
3659{
3660 if (machine_is(powermac)) {
3661 struct device_node *ofn = pci_device_to_OF_node(dev);
3662
3663 if (ofn) {
3664 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
3665 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
3666 }
3667 }
3668}
3669#else
3670static inline void pmac_ohci_on(struct pci_dev *dev) {}
3671static inline void pmac_ohci_off(struct pci_dev *dev) {}
3672#endif /* CONFIG_PPC_PMAC */
3673
3674static void release_ohci(struct device *dev, void *data)
3675{
3676 struct pci_dev *pdev = to_pci_dev(dev);
3677 struct fw_ohci *ohci = pci_get_drvdata(pdev);
3678
3679 pmac_ohci_off(pdev);
3680
3681 ar_context_release(&ohci->ar_response_ctx);
3682 ar_context_release(&ohci->ar_request_ctx);
3683
3684 dev_notice(dev, "removed fw-ohci device\n");
3685}
3686
3687static int pci_probe(struct pci_dev *dev,
3688 const struct pci_device_id *ent)
3689{
3690 struct fw_ohci *ohci;
3691 u32 bus_options, max_receive, link_speed, version;
3692 u64 guid;
3693 int i, flags, irq, err;
3694 size_t size;
3695
3696 if (dev->vendor == PCI_VENDOR_ID_PINNACLE_SYSTEMS) {
3697 dev_err(&dev->dev, "Pinnacle MovieBoard is not yet supported\n");
3698 return -ENOSYS;
3699 }
3700
3701 ohci = devres_alloc(release_ohci, sizeof(*ohci), GFP_KERNEL);
3702 if (ohci == NULL)
3703 return -ENOMEM;
3704 fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
3705 pci_set_drvdata(dev, ohci);
3706 pmac_ohci_on(dev);
3707 devres_add(&dev->dev, ohci);
3708
3709 err = pcim_enable_device(dev);
3710 if (err) {
3711 dev_err(&dev->dev, "failed to enable OHCI hardware\n");
3712 return err;
3713 }
3714
3715 pci_set_master(dev);
3716 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
3717
3718 spin_lock_init(&ohci->lock);
3719 mutex_init(&ohci->phy_reg_mutex);
3720
3721 INIT_WORK(&ohci->bus_reset_work, bus_reset_work);
3722
3723 if (!(pci_resource_flags(dev, 0) & IORESOURCE_MEM) ||
3724 pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE) {
3725 ohci_err(ohci, "invalid MMIO resource\n");
3726 return -ENXIO;
3727 }
3728
3729 ohci->registers = pcim_iomap_region(dev, 0, ohci_driver_name);
3730 if (IS_ERR(ohci->registers)) {
3731 ohci_err(ohci, "request and map MMIO resource unavailable\n");
3732 return -ENXIO;
3733 }
3734
3735 for (i = 0; i < ARRAY_SIZE(ohci_quirks); i++)
3736 if ((ohci_quirks[i].vendor == dev->vendor) &&
3737 (ohci_quirks[i].device == (unsigned short)PCI_ANY_ID ||
3738 ohci_quirks[i].device == dev->device) &&
3739 (ohci_quirks[i].revision == (unsigned short)PCI_ANY_ID ||
3740 ohci_quirks[i].revision >= dev->revision)) {
3741 ohci->quirks = ohci_quirks[i].flags;
3742 break;
3743 }
3744 if (param_quirks)
3745 ohci->quirks = param_quirks;
3746
3747 if (detect_vt630x_with_asm1083_on_amd_ryzen_machine(dev))
3748 ohci->quirks |= QUIRK_REBOOT_BY_CYCLE_TIMER_READ;
3749
3750 /*
3751 * Because dma_alloc_coherent() allocates at least one page,
3752 * we save space by using a common buffer for the AR request/
3753 * response descriptors and the self IDs buffer.
3754 */
3755 BUILD_BUG_ON(AR_BUFFERS * sizeof(struct descriptor) > PAGE_SIZE/4);
3756 BUILD_BUG_ON(SELF_ID_BUF_SIZE > PAGE_SIZE/2);
3757 ohci->misc_buffer = dmam_alloc_coherent(&dev->dev, PAGE_SIZE, &ohci->misc_buffer_bus,
3758 GFP_KERNEL);
3759 if (!ohci->misc_buffer)
3760 return -ENOMEM;
3761
3762 err = ar_context_init(&ohci->ar_request_ctx, ohci, 0,
3763 OHCI1394_AsReqRcvContextControlSet);
3764 if (err < 0)
3765 return err;
3766
3767 err = ar_context_init(&ohci->ar_response_ctx, ohci, PAGE_SIZE/4,
3768 OHCI1394_AsRspRcvContextControlSet);
3769 if (err < 0)
3770 return err;
3771
3772 err = context_init(&ohci->at_request_ctx, ohci,
3773 OHCI1394_AsReqTrContextControlSet, handle_at_packet);
3774 if (err < 0)
3775 return err;
3776
3777 err = context_init(&ohci->at_response_ctx, ohci,
3778 OHCI1394_AsRspTrContextControlSet, handle_at_packet);
3779 if (err < 0)
3780 return err;
3781
3782 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
3783 ohci->ir_context_channels = ~0ULL;
3784 ohci->ir_context_support = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
3785 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
3786 ohci->ir_context_mask = ohci->ir_context_support;
3787 ohci->n_ir = hweight32(ohci->ir_context_mask);
3788 size = sizeof(struct iso_context) * ohci->n_ir;
3789 ohci->ir_context_list = devm_kzalloc(&dev->dev, size, GFP_KERNEL);
3790 if (!ohci->ir_context_list)
3791 return -ENOMEM;
3792
3793 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
3794 ohci->it_context_support = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
3795 /* JMicron JMB38x often shows 0 at first read, just ignore it */
3796 if (!ohci->it_context_support) {
3797 ohci_notice(ohci, "overriding IsoXmitIntMask\n");
3798 ohci->it_context_support = 0xf;
3799 }
3800 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
3801 ohci->it_context_mask = ohci->it_context_support;
3802 ohci->n_it = hweight32(ohci->it_context_mask);
3803 size = sizeof(struct iso_context) * ohci->n_it;
3804 ohci->it_context_list = devm_kzalloc(&dev->dev, size, GFP_KERNEL);
3805 if (!ohci->it_context_list)
3806 return -ENOMEM;
3807
3808 ohci->self_id = ohci->misc_buffer + PAGE_SIZE/2;
3809 ohci->self_id_bus = ohci->misc_buffer_bus + PAGE_SIZE/2;
3810
3811 bus_options = reg_read(ohci, OHCI1394_BusOptions);
3812 max_receive = (bus_options >> 12) & 0xf;
3813 link_speed = bus_options & 0x7;
3814 guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) |
3815 reg_read(ohci, OHCI1394_GUIDLo);
3816
3817 flags = PCI_IRQ_INTX;
3818 if (!(ohci->quirks & QUIRK_NO_MSI))
3819 flags |= PCI_IRQ_MSI;
3820 err = pci_alloc_irq_vectors(dev, 1, 1, flags);
3821 if (err < 0)
3822 return err;
3823 irq = pci_irq_vector(dev, 0);
3824 if (irq < 0) {
3825 err = irq;
3826 goto fail_msi;
3827 }
3828
3829 err = request_threaded_irq(irq, irq_handler, NULL,
3830 pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED, ohci_driver_name,
3831 ohci);
3832 if (err < 0) {
3833 ohci_err(ohci, "failed to allocate interrupt %d\n", irq);
3834 goto fail_msi;
3835 }
3836
3837 err = fw_card_add(&ohci->card, max_receive, link_speed, guid, ohci->n_it + ohci->n_ir);
3838 if (err)
3839 goto fail_irq;
3840
3841 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
3842 ohci_notice(ohci,
3843 "added OHCI v%x.%x device as card %d, "
3844 "%d IR + %d IT contexts, quirks 0x%x%s\n",
3845 version >> 16, version & 0xff, ohci->card.index,
3846 ohci->n_ir, ohci->n_it, ohci->quirks,
3847 reg_read(ohci, OHCI1394_PhyUpperBound) ?
3848 ", physUB" : "");
3849
3850 return 0;
3851
3852 fail_irq:
3853 free_irq(irq, ohci);
3854 fail_msi:
3855 pci_free_irq_vectors(dev);
3856
3857 return err;
3858}
3859
3860static void pci_remove(struct pci_dev *dev)
3861{
3862 struct fw_ohci *ohci = pci_get_drvdata(dev);
3863 int irq;
3864
3865 /*
3866 * If the removal is happening from the suspend state, LPS won't be
3867 * enabled and host registers (eg., IntMaskClear) won't be accessible.
3868 */
3869 if (reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_LPS) {
3870 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
3871 flush_writes(ohci);
3872 }
3873 cancel_work_sync(&ohci->bus_reset_work);
3874 fw_core_remove_card(&ohci->card);
3875
3876 /*
3877 * FIXME: Fail all pending packets here, now that the upper
3878 * layers can't queue any more.
3879 */
3880
3881 software_reset(ohci);
3882
3883 irq = pci_irq_vector(dev, 0);
3884 if (irq >= 0)
3885 free_irq(irq, ohci);
3886 pci_free_irq_vectors(dev);
3887
3888 dev_notice(&dev->dev, "removing fw-ohci device\n");
3889}
3890
3891#ifdef CONFIG_PM
3892static int pci_suspend(struct pci_dev *dev, pm_message_t state)
3893{
3894 struct fw_ohci *ohci = pci_get_drvdata(dev);
3895 int err;
3896
3897 software_reset(ohci);
3898 err = pci_save_state(dev);
3899 if (err) {
3900 ohci_err(ohci, "pci_save_state failed\n");
3901 return err;
3902 }
3903 err = pci_set_power_state(dev, pci_choose_state(dev, state));
3904 if (err)
3905 ohci_err(ohci, "pci_set_power_state failed with %d\n", err);
3906 pmac_ohci_off(dev);
3907
3908 return 0;
3909}
3910
3911static int pci_resume(struct pci_dev *dev)
3912{
3913 struct fw_ohci *ohci = pci_get_drvdata(dev);
3914 int err;
3915
3916 pmac_ohci_on(dev);
3917 pci_set_power_state(dev, PCI_D0);
3918 pci_restore_state(dev);
3919 err = pci_enable_device(dev);
3920 if (err) {
3921 ohci_err(ohci, "pci_enable_device failed\n");
3922 return err;
3923 }
3924
3925 /* Some systems don't setup GUID register on resume from ram */
3926 if (!reg_read(ohci, OHCI1394_GUIDLo) &&
3927 !reg_read(ohci, OHCI1394_GUIDHi)) {
3928 reg_write(ohci, OHCI1394_GUIDLo, (u32)ohci->card.guid);
3929 reg_write(ohci, OHCI1394_GUIDHi, (u32)(ohci->card.guid >> 32));
3930 }
3931
3932 err = ohci_enable(&ohci->card, NULL, 0);
3933 if (err)
3934 return err;
3935
3936 ohci_resume_iso_dma(ohci);
3937
3938 return 0;
3939}
3940#endif
3941
3942static const struct pci_device_id pci_table[] = {
3943 { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
3944 { }
3945};
3946
3947MODULE_DEVICE_TABLE(pci, pci_table);
3948
3949static struct pci_driver fw_ohci_pci_driver = {
3950 .name = ohci_driver_name,
3951 .id_table = pci_table,
3952 .probe = pci_probe,
3953 .remove = pci_remove,
3954#ifdef CONFIG_PM
3955 .resume = pci_resume,
3956 .suspend = pci_suspend,
3957#endif
3958};
3959
3960static int __init fw_ohci_init(void)
3961{
3962 selfid_workqueue = alloc_workqueue(KBUILD_MODNAME, WQ_MEM_RECLAIM, 0);
3963 if (!selfid_workqueue)
3964 return -ENOMEM;
3965
3966 return pci_register_driver(&fw_ohci_pci_driver);
3967}
3968
3969static void __exit fw_ohci_cleanup(void)
3970{
3971 pci_unregister_driver(&fw_ohci_pci_driver);
3972 destroy_workqueue(selfid_workqueue);
3973}
3974
3975module_init(fw_ohci_init);
3976module_exit(fw_ohci_cleanup);
3977
3978MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
3979MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
3980MODULE_LICENSE("GPL");
3981
3982/* Provide a module alias so root-on-sbp2 initrds don't break. */
3983MODULE_ALIAS("ohci1394");
1/*
2 * Driver for OHCI 1394 controllers
3 *
4 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#include <linux/bitops.h>
22#include <linux/bug.h>
23#include <linux/compiler.h>
24#include <linux/delay.h>
25#include <linux/device.h>
26#include <linux/dma-mapping.h>
27#include <linux/firewire.h>
28#include <linux/firewire-constants.h>
29#include <linux/init.h>
30#include <linux/interrupt.h>
31#include <linux/io.h>
32#include <linux/kernel.h>
33#include <linux/list.h>
34#include <linux/mm.h>
35#include <linux/module.h>
36#include <linux/moduleparam.h>
37#include <linux/mutex.h>
38#include <linux/pci.h>
39#include <linux/pci_ids.h>
40#include <linux/slab.h>
41#include <linux/spinlock.h>
42#include <linux/string.h>
43#include <linux/time.h>
44#include <linux/vmalloc.h>
45#include <linux/workqueue.h>
46
47#include <asm/byteorder.h>
48#include <asm/page.h>
49
50#ifdef CONFIG_PPC_PMAC
51#include <asm/pmac_feature.h>
52#endif
53
54#include "core.h"
55#include "ohci.h"
56
57#define ohci_info(ohci, f, args...) dev_info(ohci->card.device, f, ##args)
58#define ohci_notice(ohci, f, args...) dev_notice(ohci->card.device, f, ##args)
59#define ohci_err(ohci, f, args...) dev_err(ohci->card.device, f, ##args)
60
61#define DESCRIPTOR_OUTPUT_MORE 0
62#define DESCRIPTOR_OUTPUT_LAST (1 << 12)
63#define DESCRIPTOR_INPUT_MORE (2 << 12)
64#define DESCRIPTOR_INPUT_LAST (3 << 12)
65#define DESCRIPTOR_STATUS (1 << 11)
66#define DESCRIPTOR_KEY_IMMEDIATE (2 << 8)
67#define DESCRIPTOR_PING (1 << 7)
68#define DESCRIPTOR_YY (1 << 6)
69#define DESCRIPTOR_NO_IRQ (0 << 4)
70#define DESCRIPTOR_IRQ_ERROR (1 << 4)
71#define DESCRIPTOR_IRQ_ALWAYS (3 << 4)
72#define DESCRIPTOR_BRANCH_ALWAYS (3 << 2)
73#define DESCRIPTOR_WAIT (3 << 0)
74
75#define DESCRIPTOR_CMD (0xf << 12)
76
77struct descriptor {
78 __le16 req_count;
79 __le16 control;
80 __le32 data_address;
81 __le32 branch_address;
82 __le16 res_count;
83 __le16 transfer_status;
84} __attribute__((aligned(16)));
85
86#define CONTROL_SET(regs) (regs)
87#define CONTROL_CLEAR(regs) ((regs) + 4)
88#define COMMAND_PTR(regs) ((regs) + 12)
89#define CONTEXT_MATCH(regs) ((regs) + 16)
90
91#define AR_BUFFER_SIZE (32*1024)
92#define AR_BUFFERS_MIN DIV_ROUND_UP(AR_BUFFER_SIZE, PAGE_SIZE)
93/* we need at least two pages for proper list management */
94#define AR_BUFFERS (AR_BUFFERS_MIN >= 2 ? AR_BUFFERS_MIN : 2)
95
96#define MAX_ASYNC_PAYLOAD 4096
97#define MAX_AR_PACKET_SIZE (16 + MAX_ASYNC_PAYLOAD + 4)
98#define AR_WRAPAROUND_PAGES DIV_ROUND_UP(MAX_AR_PACKET_SIZE, PAGE_SIZE)
99
100struct ar_context {
101 struct fw_ohci *ohci;
102 struct page *pages[AR_BUFFERS];
103 void *buffer;
104 struct descriptor *descriptors;
105 dma_addr_t descriptors_bus;
106 void *pointer;
107 unsigned int last_buffer_index;
108 u32 regs;
109 struct tasklet_struct tasklet;
110};
111
112struct context;
113
114typedef int (*descriptor_callback_t)(struct context *ctx,
115 struct descriptor *d,
116 struct descriptor *last);
117
118/*
119 * A buffer that contains a block of DMA-able coherent memory used for
120 * storing a portion of a DMA descriptor program.
121 */
122struct descriptor_buffer {
123 struct list_head list;
124 dma_addr_t buffer_bus;
125 size_t buffer_size;
126 size_t used;
127 struct descriptor buffer[0];
128};
129
130struct context {
131 struct fw_ohci *ohci;
132 u32 regs;
133 int total_allocation;
134 u32 current_bus;
135 bool running;
136 bool flushing;
137
138 /*
139 * List of page-sized buffers for storing DMA descriptors.
140 * Head of list contains buffers in use and tail of list contains
141 * free buffers.
142 */
143 struct list_head buffer_list;
144
145 /*
146 * Pointer to a buffer inside buffer_list that contains the tail
147 * end of the current DMA program.
148 */
149 struct descriptor_buffer *buffer_tail;
150
151 /*
152 * The descriptor containing the branch address of the first
153 * descriptor that has not yet been filled by the device.
154 */
155 struct descriptor *last;
156
157 /*
158 * The last descriptor block in the DMA program. It contains the branch
159 * address that must be updated upon appending a new descriptor.
160 */
161 struct descriptor *prev;
162 int prev_z;
163
164 descriptor_callback_t callback;
165
166 struct tasklet_struct tasklet;
167};
168
169#define IT_HEADER_SY(v) ((v) << 0)
170#define IT_HEADER_TCODE(v) ((v) << 4)
171#define IT_HEADER_CHANNEL(v) ((v) << 8)
172#define IT_HEADER_TAG(v) ((v) << 14)
173#define IT_HEADER_SPEED(v) ((v) << 16)
174#define IT_HEADER_DATA_LENGTH(v) ((v) << 16)
175
176struct iso_context {
177 struct fw_iso_context base;
178 struct context context;
179 void *header;
180 size_t header_length;
181 unsigned long flushing_completions;
182 u32 mc_buffer_bus;
183 u16 mc_completed;
184 u16 last_timestamp;
185 u8 sync;
186 u8 tags;
187};
188
189#define CONFIG_ROM_SIZE 1024
190
191struct fw_ohci {
192 struct fw_card card;
193
194 __iomem char *registers;
195 int node_id;
196 int generation;
197 int request_generation; /* for timestamping incoming requests */
198 unsigned quirks;
199 unsigned int pri_req_max;
200 u32 bus_time;
201 bool bus_time_running;
202 bool is_root;
203 bool csr_state_setclear_abdicate;
204 int n_ir;
205 int n_it;
206 /*
207 * Spinlock for accessing fw_ohci data. Never call out of
208 * this driver with this lock held.
209 */
210 spinlock_t lock;
211
212 struct mutex phy_reg_mutex;
213
214 void *misc_buffer;
215 dma_addr_t misc_buffer_bus;
216
217 struct ar_context ar_request_ctx;
218 struct ar_context ar_response_ctx;
219 struct context at_request_ctx;
220 struct context at_response_ctx;
221
222 u32 it_context_support;
223 u32 it_context_mask; /* unoccupied IT contexts */
224 struct iso_context *it_context_list;
225 u64 ir_context_channels; /* unoccupied channels */
226 u32 ir_context_support;
227 u32 ir_context_mask; /* unoccupied IR contexts */
228 struct iso_context *ir_context_list;
229 u64 mc_channels; /* channels in use by the multichannel IR context */
230 bool mc_allocated;
231
232 __be32 *config_rom;
233 dma_addr_t config_rom_bus;
234 __be32 *next_config_rom;
235 dma_addr_t next_config_rom_bus;
236 __be32 next_header;
237
238 __le32 *self_id;
239 dma_addr_t self_id_bus;
240 struct work_struct bus_reset_work;
241
242 u32 self_id_buffer[512];
243};
244
245static struct workqueue_struct *selfid_workqueue;
246
247static inline struct fw_ohci *fw_ohci(struct fw_card *card)
248{
249 return container_of(card, struct fw_ohci, card);
250}
251
252#define IT_CONTEXT_CYCLE_MATCH_ENABLE 0x80000000
253#define IR_CONTEXT_BUFFER_FILL 0x80000000
254#define IR_CONTEXT_ISOCH_HEADER 0x40000000
255#define IR_CONTEXT_CYCLE_MATCH_ENABLE 0x20000000
256#define IR_CONTEXT_MULTI_CHANNEL_MODE 0x10000000
257#define IR_CONTEXT_DUAL_BUFFER_MODE 0x08000000
258
259#define CONTEXT_RUN 0x8000
260#define CONTEXT_WAKE 0x1000
261#define CONTEXT_DEAD 0x0800
262#define CONTEXT_ACTIVE 0x0400
263
264#define OHCI1394_MAX_AT_REQ_RETRIES 0xf
265#define OHCI1394_MAX_AT_RESP_RETRIES 0x2
266#define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
267
268#define OHCI1394_REGISTER_SIZE 0x800
269#define OHCI1394_PCI_HCI_Control 0x40
270#define SELF_ID_BUF_SIZE 0x800
271#define OHCI_TCODE_PHY_PACKET 0x0e
272#define OHCI_VERSION_1_1 0x010010
273
274static char ohci_driver_name[] = KBUILD_MODNAME;
275
276#define PCI_VENDOR_ID_PINNACLE_SYSTEMS 0x11bd
277#define PCI_DEVICE_ID_AGERE_FW643 0x5901
278#define PCI_DEVICE_ID_CREATIVE_SB1394 0x4001
279#define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380
280#define PCI_DEVICE_ID_TI_TSB12LV22 0x8009
281#define PCI_DEVICE_ID_TI_TSB12LV26 0x8020
282#define PCI_DEVICE_ID_TI_TSB82AA2 0x8025
283#define PCI_DEVICE_ID_VIA_VT630X 0x3044
284#define PCI_REV_ID_VIA_VT6306 0x46
285#define PCI_DEVICE_ID_VIA_VT6315 0x3403
286
287#define QUIRK_CYCLE_TIMER 0x1
288#define QUIRK_RESET_PACKET 0x2
289#define QUIRK_BE_HEADERS 0x4
290#define QUIRK_NO_1394A 0x8
291#define QUIRK_NO_MSI 0x10
292#define QUIRK_TI_SLLZ059 0x20
293#define QUIRK_IR_WAKE 0x40
294
295/* In case of multiple matches in ohci_quirks[], only the first one is used. */
296static const struct {
297 unsigned short vendor, device, revision, flags;
298} ohci_quirks[] = {
299 {PCI_VENDOR_ID_AL, PCI_ANY_ID, PCI_ANY_ID,
300 QUIRK_CYCLE_TIMER},
301
302 {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, PCI_ANY_ID,
303 QUIRK_BE_HEADERS},
304
305 {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6,
306 QUIRK_NO_MSI},
307
308 {PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_SB1394, PCI_ANY_ID,
309 QUIRK_RESET_PACKET},
310
311 {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, PCI_ANY_ID,
312 QUIRK_NO_MSI},
313
314 {PCI_VENDOR_ID_NEC, PCI_ANY_ID, PCI_ANY_ID,
315 QUIRK_CYCLE_TIMER},
316
317 {PCI_VENDOR_ID_O2, PCI_ANY_ID, PCI_ANY_ID,
318 QUIRK_NO_MSI},
319
320 {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID,
321 QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
322
323 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, PCI_ANY_ID,
324 QUIRK_CYCLE_TIMER | QUIRK_RESET_PACKET | QUIRK_NO_1394A},
325
326 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV26, PCI_ANY_ID,
327 QUIRK_RESET_PACKET | QUIRK_TI_SLLZ059},
328
329 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB82AA2, PCI_ANY_ID,
330 QUIRK_RESET_PACKET | QUIRK_TI_SLLZ059},
331
332 {PCI_VENDOR_ID_TI, PCI_ANY_ID, PCI_ANY_ID,
333 QUIRK_RESET_PACKET},
334
335 {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT630X, PCI_REV_ID_VIA_VT6306,
336 QUIRK_CYCLE_TIMER | QUIRK_IR_WAKE},
337
338 {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, 0,
339 QUIRK_CYCLE_TIMER /* FIXME: necessary? */ | QUIRK_NO_MSI},
340
341 {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, PCI_ANY_ID,
342 QUIRK_NO_MSI},
343
344 {PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID,
345 QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
346};
347
348/* This overrides anything that was found in ohci_quirks[]. */
349static int param_quirks;
350module_param_named(quirks, param_quirks, int, 0644);
351MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
352 ", nonatomic cycle timer = " __stringify(QUIRK_CYCLE_TIMER)
353 ", reset packet generation = " __stringify(QUIRK_RESET_PACKET)
354 ", AR/selfID endianness = " __stringify(QUIRK_BE_HEADERS)
355 ", no 1394a enhancements = " __stringify(QUIRK_NO_1394A)
356 ", disable MSI = " __stringify(QUIRK_NO_MSI)
357 ", TI SLLZ059 erratum = " __stringify(QUIRK_TI_SLLZ059)
358 ", IR wake unreliable = " __stringify(QUIRK_IR_WAKE)
359 ")");
360
361#define OHCI_PARAM_DEBUG_AT_AR 1
362#define OHCI_PARAM_DEBUG_SELFIDS 2
363#define OHCI_PARAM_DEBUG_IRQS 4
364#define OHCI_PARAM_DEBUG_BUSRESETS 8 /* only effective before chip init */
365
366static int param_debug;
367module_param_named(debug, param_debug, int, 0644);
368MODULE_PARM_DESC(debug, "Verbose logging (default = 0"
369 ", AT/AR events = " __stringify(OHCI_PARAM_DEBUG_AT_AR)
370 ", self-IDs = " __stringify(OHCI_PARAM_DEBUG_SELFIDS)
371 ", IRQs = " __stringify(OHCI_PARAM_DEBUG_IRQS)
372 ", busReset events = " __stringify(OHCI_PARAM_DEBUG_BUSRESETS)
373 ", or a combination, or all = -1)");
374
375static bool param_remote_dma;
376module_param_named(remote_dma, param_remote_dma, bool, 0444);
377MODULE_PARM_DESC(remote_dma, "Enable unfiltered remote DMA (default = N)");
378
379static void log_irqs(struct fw_ohci *ohci, u32 evt)
380{
381 if (likely(!(param_debug &
382 (OHCI_PARAM_DEBUG_IRQS | OHCI_PARAM_DEBUG_BUSRESETS))))
383 return;
384
385 if (!(param_debug & OHCI_PARAM_DEBUG_IRQS) &&
386 !(evt & OHCI1394_busReset))
387 return;
388
389 ohci_notice(ohci, "IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
390 evt & OHCI1394_selfIDComplete ? " selfID" : "",
391 evt & OHCI1394_RQPkt ? " AR_req" : "",
392 evt & OHCI1394_RSPkt ? " AR_resp" : "",
393 evt & OHCI1394_reqTxComplete ? " AT_req" : "",
394 evt & OHCI1394_respTxComplete ? " AT_resp" : "",
395 evt & OHCI1394_isochRx ? " IR" : "",
396 evt & OHCI1394_isochTx ? " IT" : "",
397 evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "",
398 evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "",
399 evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "",
400 evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "",
401 evt & OHCI1394_regAccessFail ? " regAccessFail" : "",
402 evt & OHCI1394_unrecoverableError ? " unrecoverableError" : "",
403 evt & OHCI1394_busReset ? " busReset" : "",
404 evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt |
405 OHCI1394_RSPkt | OHCI1394_reqTxComplete |
406 OHCI1394_respTxComplete | OHCI1394_isochRx |
407 OHCI1394_isochTx | OHCI1394_postedWriteErr |
408 OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds |
409 OHCI1394_cycleInconsistent |
410 OHCI1394_regAccessFail | OHCI1394_busReset)
411 ? " ?" : "");
412}
413
414static const char *speed[] = {
415 [0] = "S100", [1] = "S200", [2] = "S400", [3] = "beta",
416};
417static const char *power[] = {
418 [0] = "+0W", [1] = "+15W", [2] = "+30W", [3] = "+45W",
419 [4] = "-3W", [5] = " ?W", [6] = "-3..-6W", [7] = "-3..-10W",
420};
421static const char port[] = { '.', '-', 'p', 'c', };
422
423static char _p(u32 *s, int shift)
424{
425 return port[*s >> shift & 3];
426}
427
428static void log_selfids(struct fw_ohci *ohci, int generation, int self_id_count)
429{
430 u32 *s;
431
432 if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS)))
433 return;
434
435 ohci_notice(ohci, "%d selfIDs, generation %d, local node ID %04x\n",
436 self_id_count, generation, ohci->node_id);
437
438 for (s = ohci->self_id_buffer; self_id_count--; ++s)
439 if ((*s & 1 << 23) == 0)
440 ohci_notice(ohci,
441 "selfID 0: %08x, phy %d [%c%c%c] %s gc=%d %s %s%s%s\n",
442 *s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2),
443 speed[*s >> 14 & 3], *s >> 16 & 63,
444 power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "",
445 *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : "");
446 else
447 ohci_notice(ohci,
448 "selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n",
449 *s, *s >> 24 & 63,
450 _p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10),
451 _p(s, 8), _p(s, 6), _p(s, 4), _p(s, 2));
452}
453
454static const char *evts[] = {
455 [0x00] = "evt_no_status", [0x01] = "-reserved-",
456 [0x02] = "evt_long_packet", [0x03] = "evt_missing_ack",
457 [0x04] = "evt_underrun", [0x05] = "evt_overrun",
458 [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read",
459 [0x08] = "evt_data_write", [0x09] = "evt_bus_reset",
460 [0x0a] = "evt_timeout", [0x0b] = "evt_tcode_err",
461 [0x0c] = "-reserved-", [0x0d] = "-reserved-",
462 [0x0e] = "evt_unknown", [0x0f] = "evt_flushed",
463 [0x10] = "-reserved-", [0x11] = "ack_complete",
464 [0x12] = "ack_pending ", [0x13] = "-reserved-",
465 [0x14] = "ack_busy_X", [0x15] = "ack_busy_A",
466 [0x16] = "ack_busy_B", [0x17] = "-reserved-",
467 [0x18] = "-reserved-", [0x19] = "-reserved-",
468 [0x1a] = "-reserved-", [0x1b] = "ack_tardy",
469 [0x1c] = "-reserved-", [0x1d] = "ack_data_error",
470 [0x1e] = "ack_type_error", [0x1f] = "-reserved-",
471 [0x20] = "pending/cancelled",
472};
473static const char *tcodes[] = {
474 [0x0] = "QW req", [0x1] = "BW req",
475 [0x2] = "W resp", [0x3] = "-reserved-",
476 [0x4] = "QR req", [0x5] = "BR req",
477 [0x6] = "QR resp", [0x7] = "BR resp",
478 [0x8] = "cycle start", [0x9] = "Lk req",
479 [0xa] = "async stream packet", [0xb] = "Lk resp",
480 [0xc] = "-reserved-", [0xd] = "-reserved-",
481 [0xe] = "link internal", [0xf] = "-reserved-",
482};
483
484static void log_ar_at_event(struct fw_ohci *ohci,
485 char dir, int speed, u32 *header, int evt)
486{
487 int tcode = header[0] >> 4 & 0xf;
488 char specific[12];
489
490 if (likely(!(param_debug & OHCI_PARAM_DEBUG_AT_AR)))
491 return;
492
493 if (unlikely(evt >= ARRAY_SIZE(evts)))
494 evt = 0x1f;
495
496 if (evt == OHCI1394_evt_bus_reset) {
497 ohci_notice(ohci, "A%c evt_bus_reset, generation %d\n",
498 dir, (header[2] >> 16) & 0xff);
499 return;
500 }
501
502 switch (tcode) {
503 case 0x0: case 0x6: case 0x8:
504 snprintf(specific, sizeof(specific), " = %08x",
505 be32_to_cpu((__force __be32)header[3]));
506 break;
507 case 0x1: case 0x5: case 0x7: case 0x9: case 0xb:
508 snprintf(specific, sizeof(specific), " %x,%x",
509 header[3] >> 16, header[3] & 0xffff);
510 break;
511 default:
512 specific[0] = '\0';
513 }
514
515 switch (tcode) {
516 case 0xa:
517 ohci_notice(ohci, "A%c %s, %s\n",
518 dir, evts[evt], tcodes[tcode]);
519 break;
520 case 0xe:
521 ohci_notice(ohci, "A%c %s, PHY %08x %08x\n",
522 dir, evts[evt], header[1], header[2]);
523 break;
524 case 0x0: case 0x1: case 0x4: case 0x5: case 0x9:
525 ohci_notice(ohci,
526 "A%c spd %x tl %02x, %04x -> %04x, %s, %s, %04x%08x%s\n",
527 dir, speed, header[0] >> 10 & 0x3f,
528 header[1] >> 16, header[0] >> 16, evts[evt],
529 tcodes[tcode], header[1] & 0xffff, header[2], specific);
530 break;
531 default:
532 ohci_notice(ohci,
533 "A%c spd %x tl %02x, %04x -> %04x, %s, %s%s\n",
534 dir, speed, header[0] >> 10 & 0x3f,
535 header[1] >> 16, header[0] >> 16, evts[evt],
536 tcodes[tcode], specific);
537 }
538}
539
540static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
541{
542 writel(data, ohci->registers + offset);
543}
544
545static inline u32 reg_read(const struct fw_ohci *ohci, int offset)
546{
547 return readl(ohci->registers + offset);
548}
549
550static inline void flush_writes(const struct fw_ohci *ohci)
551{
552 /* Do a dummy read to flush writes. */
553 reg_read(ohci, OHCI1394_Version);
554}
555
556/*
557 * Beware! read_phy_reg(), write_phy_reg(), update_phy_reg(), and
558 * read_paged_phy_reg() require the caller to hold ohci->phy_reg_mutex.
559 * In other words, only use ohci_read_phy_reg() and ohci_update_phy_reg()
560 * directly. Exceptions are intrinsically serialized contexts like pci_probe.
561 */
562static int read_phy_reg(struct fw_ohci *ohci, int addr)
563{
564 u32 val;
565 int i;
566
567 reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
568 for (i = 0; i < 3 + 100; i++) {
569 val = reg_read(ohci, OHCI1394_PhyControl);
570 if (!~val)
571 return -ENODEV; /* Card was ejected. */
572
573 if (val & OHCI1394_PhyControl_ReadDone)
574 return OHCI1394_PhyControl_ReadData(val);
575
576 /*
577 * Try a few times without waiting. Sleeping is necessary
578 * only when the link/PHY interface is busy.
579 */
580 if (i >= 3)
581 msleep(1);
582 }
583 ohci_err(ohci, "failed to read phy reg %d\n", addr);
584 dump_stack();
585
586 return -EBUSY;
587}
588
589static int write_phy_reg(const struct fw_ohci *ohci, int addr, u32 val)
590{
591 int i;
592
593 reg_write(ohci, OHCI1394_PhyControl,
594 OHCI1394_PhyControl_Write(addr, val));
595 for (i = 0; i < 3 + 100; i++) {
596 val = reg_read(ohci, OHCI1394_PhyControl);
597 if (!~val)
598 return -ENODEV; /* Card was ejected. */
599
600 if (!(val & OHCI1394_PhyControl_WritePending))
601 return 0;
602
603 if (i >= 3)
604 msleep(1);
605 }
606 ohci_err(ohci, "failed to write phy reg %d, val %u\n", addr, val);
607 dump_stack();
608
609 return -EBUSY;
610}
611
612static int update_phy_reg(struct fw_ohci *ohci, int addr,
613 int clear_bits, int set_bits)
614{
615 int ret = read_phy_reg(ohci, addr);
616 if (ret < 0)
617 return ret;
618
619 /*
620 * The interrupt status bits are cleared by writing a one bit.
621 * Avoid clearing them unless explicitly requested in set_bits.
622 */
623 if (addr == 5)
624 clear_bits |= PHY_INT_STATUS_BITS;
625
626 return write_phy_reg(ohci, addr, (ret & ~clear_bits) | set_bits);
627}
628
629static int read_paged_phy_reg(struct fw_ohci *ohci, int page, int addr)
630{
631 int ret;
632
633 ret = update_phy_reg(ohci, 7, PHY_PAGE_SELECT, page << 5);
634 if (ret < 0)
635 return ret;
636
637 return read_phy_reg(ohci, addr);
638}
639
640static int ohci_read_phy_reg(struct fw_card *card, int addr)
641{
642 struct fw_ohci *ohci = fw_ohci(card);
643 int ret;
644
645 mutex_lock(&ohci->phy_reg_mutex);
646 ret = read_phy_reg(ohci, addr);
647 mutex_unlock(&ohci->phy_reg_mutex);
648
649 return ret;
650}
651
652static int ohci_update_phy_reg(struct fw_card *card, int addr,
653 int clear_bits, int set_bits)
654{
655 struct fw_ohci *ohci = fw_ohci(card);
656 int ret;
657
658 mutex_lock(&ohci->phy_reg_mutex);
659 ret = update_phy_reg(ohci, addr, clear_bits, set_bits);
660 mutex_unlock(&ohci->phy_reg_mutex);
661
662 return ret;
663}
664
665static inline dma_addr_t ar_buffer_bus(struct ar_context *ctx, unsigned int i)
666{
667 return page_private(ctx->pages[i]);
668}
669
670static void ar_context_link_page(struct ar_context *ctx, unsigned int index)
671{
672 struct descriptor *d;
673
674 d = &ctx->descriptors[index];
675 d->branch_address &= cpu_to_le32(~0xf);
676 d->res_count = cpu_to_le16(PAGE_SIZE);
677 d->transfer_status = 0;
678
679 wmb(); /* finish init of new descriptors before branch_address update */
680 d = &ctx->descriptors[ctx->last_buffer_index];
681 d->branch_address |= cpu_to_le32(1);
682
683 ctx->last_buffer_index = index;
684
685 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
686}
687
688static void ar_context_release(struct ar_context *ctx)
689{
690 unsigned int i;
691
692 vunmap(ctx->buffer);
693
694 for (i = 0; i < AR_BUFFERS; i++)
695 if (ctx->pages[i]) {
696 dma_unmap_page(ctx->ohci->card.device,
697 ar_buffer_bus(ctx, i),
698 PAGE_SIZE, DMA_FROM_DEVICE);
699 __free_page(ctx->pages[i]);
700 }
701}
702
703static void ar_context_abort(struct ar_context *ctx, const char *error_msg)
704{
705 struct fw_ohci *ohci = ctx->ohci;
706
707 if (reg_read(ohci, CONTROL_CLEAR(ctx->regs)) & CONTEXT_RUN) {
708 reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
709 flush_writes(ohci);
710
711 ohci_err(ohci, "AR error: %s; DMA stopped\n", error_msg);
712 }
713 /* FIXME: restart? */
714}
715
716static inline unsigned int ar_next_buffer_index(unsigned int index)
717{
718 return (index + 1) % AR_BUFFERS;
719}
720
721static inline unsigned int ar_first_buffer_index(struct ar_context *ctx)
722{
723 return ar_next_buffer_index(ctx->last_buffer_index);
724}
725
726/*
727 * We search for the buffer that contains the last AR packet DMA data written
728 * by the controller.
729 */
730static unsigned int ar_search_last_active_buffer(struct ar_context *ctx,
731 unsigned int *buffer_offset)
732{
733 unsigned int i, next_i, last = ctx->last_buffer_index;
734 __le16 res_count, next_res_count;
735
736 i = ar_first_buffer_index(ctx);
737 res_count = ACCESS_ONCE(ctx->descriptors[i].res_count);
738
739 /* A buffer that is not yet completely filled must be the last one. */
740 while (i != last && res_count == 0) {
741
742 /* Peek at the next descriptor. */
743 next_i = ar_next_buffer_index(i);
744 rmb(); /* read descriptors in order */
745 next_res_count = ACCESS_ONCE(
746 ctx->descriptors[next_i].res_count);
747 /*
748 * If the next descriptor is still empty, we must stop at this
749 * descriptor.
750 */
751 if (next_res_count == cpu_to_le16(PAGE_SIZE)) {
752 /*
753 * The exception is when the DMA data for one packet is
754 * split over three buffers; in this case, the middle
755 * buffer's descriptor might be never updated by the
756 * controller and look still empty, and we have to peek
757 * at the third one.
758 */
759 if (MAX_AR_PACKET_SIZE > PAGE_SIZE && i != last) {
760 next_i = ar_next_buffer_index(next_i);
761 rmb();
762 next_res_count = ACCESS_ONCE(
763 ctx->descriptors[next_i].res_count);
764 if (next_res_count != cpu_to_le16(PAGE_SIZE))
765 goto next_buffer_is_active;
766 }
767
768 break;
769 }
770
771next_buffer_is_active:
772 i = next_i;
773 res_count = next_res_count;
774 }
775
776 rmb(); /* read res_count before the DMA data */
777
778 *buffer_offset = PAGE_SIZE - le16_to_cpu(res_count);
779 if (*buffer_offset > PAGE_SIZE) {
780 *buffer_offset = 0;
781 ar_context_abort(ctx, "corrupted descriptor");
782 }
783
784 return i;
785}
786
787static void ar_sync_buffers_for_cpu(struct ar_context *ctx,
788 unsigned int end_buffer_index,
789 unsigned int end_buffer_offset)
790{
791 unsigned int i;
792
793 i = ar_first_buffer_index(ctx);
794 while (i != end_buffer_index) {
795 dma_sync_single_for_cpu(ctx->ohci->card.device,
796 ar_buffer_bus(ctx, i),
797 PAGE_SIZE, DMA_FROM_DEVICE);
798 i = ar_next_buffer_index(i);
799 }
800 if (end_buffer_offset > 0)
801 dma_sync_single_for_cpu(ctx->ohci->card.device,
802 ar_buffer_bus(ctx, i),
803 end_buffer_offset, DMA_FROM_DEVICE);
804}
805
806#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
807#define cond_le32_to_cpu(v) \
808 (ohci->quirks & QUIRK_BE_HEADERS ? (__force __u32)(v) : le32_to_cpu(v))
809#else
810#define cond_le32_to_cpu(v) le32_to_cpu(v)
811#endif
812
813static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
814{
815 struct fw_ohci *ohci = ctx->ohci;
816 struct fw_packet p;
817 u32 status, length, tcode;
818 int evt;
819
820 p.header[0] = cond_le32_to_cpu(buffer[0]);
821 p.header[1] = cond_le32_to_cpu(buffer[1]);
822 p.header[2] = cond_le32_to_cpu(buffer[2]);
823
824 tcode = (p.header[0] >> 4) & 0x0f;
825 switch (tcode) {
826 case TCODE_WRITE_QUADLET_REQUEST:
827 case TCODE_READ_QUADLET_RESPONSE:
828 p.header[3] = (__force __u32) buffer[3];
829 p.header_length = 16;
830 p.payload_length = 0;
831 break;
832
833 case TCODE_READ_BLOCK_REQUEST :
834 p.header[3] = cond_le32_to_cpu(buffer[3]);
835 p.header_length = 16;
836 p.payload_length = 0;
837 break;
838
839 case TCODE_WRITE_BLOCK_REQUEST:
840 case TCODE_READ_BLOCK_RESPONSE:
841 case TCODE_LOCK_REQUEST:
842 case TCODE_LOCK_RESPONSE:
843 p.header[3] = cond_le32_to_cpu(buffer[3]);
844 p.header_length = 16;
845 p.payload_length = p.header[3] >> 16;
846 if (p.payload_length > MAX_ASYNC_PAYLOAD) {
847 ar_context_abort(ctx, "invalid packet length");
848 return NULL;
849 }
850 break;
851
852 case TCODE_WRITE_RESPONSE:
853 case TCODE_READ_QUADLET_REQUEST:
854 case OHCI_TCODE_PHY_PACKET:
855 p.header_length = 12;
856 p.payload_length = 0;
857 break;
858
859 default:
860 ar_context_abort(ctx, "invalid tcode");
861 return NULL;
862 }
863
864 p.payload = (void *) buffer + p.header_length;
865
866 /* FIXME: What to do about evt_* errors? */
867 length = (p.header_length + p.payload_length + 3) / 4;
868 status = cond_le32_to_cpu(buffer[length]);
869 evt = (status >> 16) & 0x1f;
870
871 p.ack = evt - 16;
872 p.speed = (status >> 21) & 0x7;
873 p.timestamp = status & 0xffff;
874 p.generation = ohci->request_generation;
875
876 log_ar_at_event(ohci, 'R', p.speed, p.header, evt);
877
878 /*
879 * Several controllers, notably from NEC and VIA, forget to
880 * write ack_complete status at PHY packet reception.
881 */
882 if (evt == OHCI1394_evt_no_status &&
883 (p.header[0] & 0xff) == (OHCI1394_phy_tcode << 4))
884 p.ack = ACK_COMPLETE;
885
886 /*
887 * The OHCI bus reset handler synthesizes a PHY packet with
888 * the new generation number when a bus reset happens (see
889 * section 8.4.2.3). This helps us determine when a request
890 * was received and make sure we send the response in the same
891 * generation. We only need this for requests; for responses
892 * we use the unique tlabel for finding the matching
893 * request.
894 *
895 * Alas some chips sometimes emit bus reset packets with a
896 * wrong generation. We set the correct generation for these
897 * at a slightly incorrect time (in bus_reset_work).
898 */
899 if (evt == OHCI1394_evt_bus_reset) {
900 if (!(ohci->quirks & QUIRK_RESET_PACKET))
901 ohci->request_generation = (p.header[2] >> 16) & 0xff;
902 } else if (ctx == &ohci->ar_request_ctx) {
903 fw_core_handle_request(&ohci->card, &p);
904 } else {
905 fw_core_handle_response(&ohci->card, &p);
906 }
907
908 return buffer + length + 1;
909}
910
911static void *handle_ar_packets(struct ar_context *ctx, void *p, void *end)
912{
913 void *next;
914
915 while (p < end) {
916 next = handle_ar_packet(ctx, p);
917 if (!next)
918 return p;
919 p = next;
920 }
921
922 return p;
923}
924
925static void ar_recycle_buffers(struct ar_context *ctx, unsigned int end_buffer)
926{
927 unsigned int i;
928
929 i = ar_first_buffer_index(ctx);
930 while (i != end_buffer) {
931 dma_sync_single_for_device(ctx->ohci->card.device,
932 ar_buffer_bus(ctx, i),
933 PAGE_SIZE, DMA_FROM_DEVICE);
934 ar_context_link_page(ctx, i);
935 i = ar_next_buffer_index(i);
936 }
937}
938
939static void ar_context_tasklet(unsigned long data)
940{
941 struct ar_context *ctx = (struct ar_context *)data;
942 unsigned int end_buffer_index, end_buffer_offset;
943 void *p, *end;
944
945 p = ctx->pointer;
946 if (!p)
947 return;
948
949 end_buffer_index = ar_search_last_active_buffer(ctx,
950 &end_buffer_offset);
951 ar_sync_buffers_for_cpu(ctx, end_buffer_index, end_buffer_offset);
952 end = ctx->buffer + end_buffer_index * PAGE_SIZE + end_buffer_offset;
953
954 if (end_buffer_index < ar_first_buffer_index(ctx)) {
955 /*
956 * The filled part of the overall buffer wraps around; handle
957 * all packets up to the buffer end here. If the last packet
958 * wraps around, its tail will be visible after the buffer end
959 * because the buffer start pages are mapped there again.
960 */
961 void *buffer_end = ctx->buffer + AR_BUFFERS * PAGE_SIZE;
962 p = handle_ar_packets(ctx, p, buffer_end);
963 if (p < buffer_end)
964 goto error;
965 /* adjust p to point back into the actual buffer */
966 p -= AR_BUFFERS * PAGE_SIZE;
967 }
968
969 p = handle_ar_packets(ctx, p, end);
970 if (p != end) {
971 if (p > end)
972 ar_context_abort(ctx, "inconsistent descriptor");
973 goto error;
974 }
975
976 ctx->pointer = p;
977 ar_recycle_buffers(ctx, end_buffer_index);
978
979 return;
980
981error:
982 ctx->pointer = NULL;
983}
984
985static int ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci,
986 unsigned int descriptors_offset, u32 regs)
987{
988 unsigned int i;
989 dma_addr_t dma_addr;
990 struct page *pages[AR_BUFFERS + AR_WRAPAROUND_PAGES];
991 struct descriptor *d;
992
993 ctx->regs = regs;
994 ctx->ohci = ohci;
995 tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx);
996
997 for (i = 0; i < AR_BUFFERS; i++) {
998 ctx->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32);
999 if (!ctx->pages[i])
1000 goto out_of_memory;
1001 dma_addr = dma_map_page(ohci->card.device, ctx->pages[i],
1002 0, PAGE_SIZE, DMA_FROM_DEVICE);
1003 if (dma_mapping_error(ohci->card.device, dma_addr)) {
1004 __free_page(ctx->pages[i]);
1005 ctx->pages[i] = NULL;
1006 goto out_of_memory;
1007 }
1008 set_page_private(ctx->pages[i], dma_addr);
1009 }
1010
1011 for (i = 0; i < AR_BUFFERS; i++)
1012 pages[i] = ctx->pages[i];
1013 for (i = 0; i < AR_WRAPAROUND_PAGES; i++)
1014 pages[AR_BUFFERS + i] = ctx->pages[i];
1015 ctx->buffer = vmap(pages, ARRAY_SIZE(pages), VM_MAP, PAGE_KERNEL);
1016 if (!ctx->buffer)
1017 goto out_of_memory;
1018
1019 ctx->descriptors = ohci->misc_buffer + descriptors_offset;
1020 ctx->descriptors_bus = ohci->misc_buffer_bus + descriptors_offset;
1021
1022 for (i = 0; i < AR_BUFFERS; i++) {
1023 d = &ctx->descriptors[i];
1024 d->req_count = cpu_to_le16(PAGE_SIZE);
1025 d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
1026 DESCRIPTOR_STATUS |
1027 DESCRIPTOR_BRANCH_ALWAYS);
1028 d->data_address = cpu_to_le32(ar_buffer_bus(ctx, i));
1029 d->branch_address = cpu_to_le32(ctx->descriptors_bus +
1030 ar_next_buffer_index(i) * sizeof(struct descriptor));
1031 }
1032
1033 return 0;
1034
1035out_of_memory:
1036 ar_context_release(ctx);
1037
1038 return -ENOMEM;
1039}
1040
1041static void ar_context_run(struct ar_context *ctx)
1042{
1043 unsigned int i;
1044
1045 for (i = 0; i < AR_BUFFERS; i++)
1046 ar_context_link_page(ctx, i);
1047
1048 ctx->pointer = ctx->buffer;
1049
1050 reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ctx->descriptors_bus | 1);
1051 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN);
1052}
1053
1054static struct descriptor *find_branch_descriptor(struct descriptor *d, int z)
1055{
1056 __le16 branch;
1057
1058 branch = d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS);
1059
1060 /* figure out which descriptor the branch address goes in */
1061 if (z == 2 && branch == cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))
1062 return d;
1063 else
1064 return d + z - 1;
1065}
1066
1067static void context_tasklet(unsigned long data)
1068{
1069 struct context *ctx = (struct context *) data;
1070 struct descriptor *d, *last;
1071 u32 address;
1072 int z;
1073 struct descriptor_buffer *desc;
1074
1075 desc = list_entry(ctx->buffer_list.next,
1076 struct descriptor_buffer, list);
1077 last = ctx->last;
1078 while (last->branch_address != 0) {
1079 struct descriptor_buffer *old_desc = desc;
1080 address = le32_to_cpu(last->branch_address);
1081 z = address & 0xf;
1082 address &= ~0xf;
1083 ctx->current_bus = address;
1084
1085 /* If the branch address points to a buffer outside of the
1086 * current buffer, advance to the next buffer. */
1087 if (address < desc->buffer_bus ||
1088 address >= desc->buffer_bus + desc->used)
1089 desc = list_entry(desc->list.next,
1090 struct descriptor_buffer, list);
1091 d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d);
1092 last = find_branch_descriptor(d, z);
1093
1094 if (!ctx->callback(ctx, d, last))
1095 break;
1096
1097 if (old_desc != desc) {
1098 /* If we've advanced to the next buffer, move the
1099 * previous buffer to the free list. */
1100 unsigned long flags;
1101 old_desc->used = 0;
1102 spin_lock_irqsave(&ctx->ohci->lock, flags);
1103 list_move_tail(&old_desc->list, &ctx->buffer_list);
1104 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
1105 }
1106 ctx->last = last;
1107 }
1108}
1109
1110/*
1111 * Allocate a new buffer and add it to the list of free buffers for this
1112 * context. Must be called with ohci->lock held.
1113 */
1114static int context_add_buffer(struct context *ctx)
1115{
1116 struct descriptor_buffer *desc;
1117 dma_addr_t uninitialized_var(bus_addr);
1118 int offset;
1119
1120 /*
1121 * 16MB of descriptors should be far more than enough for any DMA
1122 * program. This will catch run-away userspace or DoS attacks.
1123 */
1124 if (ctx->total_allocation >= 16*1024*1024)
1125 return -ENOMEM;
1126
1127 desc = dma_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE,
1128 &bus_addr, GFP_ATOMIC);
1129 if (!desc)
1130 return -ENOMEM;
1131
1132 offset = (void *)&desc->buffer - (void *)desc;
1133 desc->buffer_size = PAGE_SIZE - offset;
1134 desc->buffer_bus = bus_addr + offset;
1135 desc->used = 0;
1136
1137 list_add_tail(&desc->list, &ctx->buffer_list);
1138 ctx->total_allocation += PAGE_SIZE;
1139
1140 return 0;
1141}
1142
1143static int context_init(struct context *ctx, struct fw_ohci *ohci,
1144 u32 regs, descriptor_callback_t callback)
1145{
1146 ctx->ohci = ohci;
1147 ctx->regs = regs;
1148 ctx->total_allocation = 0;
1149
1150 INIT_LIST_HEAD(&ctx->buffer_list);
1151 if (context_add_buffer(ctx) < 0)
1152 return -ENOMEM;
1153
1154 ctx->buffer_tail = list_entry(ctx->buffer_list.next,
1155 struct descriptor_buffer, list);
1156
1157 tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx);
1158 ctx->callback = callback;
1159
1160 /*
1161 * We put a dummy descriptor in the buffer that has a NULL
1162 * branch address and looks like it's been sent. That way we
1163 * have a descriptor to append DMA programs to.
1164 */
1165 memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer));
1166 ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
1167 ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011);
1168 ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer);
1169 ctx->last = ctx->buffer_tail->buffer;
1170 ctx->prev = ctx->buffer_tail->buffer;
1171 ctx->prev_z = 1;
1172
1173 return 0;
1174}
1175
1176static void context_release(struct context *ctx)
1177{
1178 struct fw_card *card = &ctx->ohci->card;
1179 struct descriptor_buffer *desc, *tmp;
1180
1181 list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list)
1182 dma_free_coherent(card->device, PAGE_SIZE, desc,
1183 desc->buffer_bus -
1184 ((void *)&desc->buffer - (void *)desc));
1185}
1186
1187/* Must be called with ohci->lock held */
1188static struct descriptor *context_get_descriptors(struct context *ctx,
1189 int z, dma_addr_t *d_bus)
1190{
1191 struct descriptor *d = NULL;
1192 struct descriptor_buffer *desc = ctx->buffer_tail;
1193
1194 if (z * sizeof(*d) > desc->buffer_size)
1195 return NULL;
1196
1197 if (z * sizeof(*d) > desc->buffer_size - desc->used) {
1198 /* No room for the descriptor in this buffer, so advance to the
1199 * next one. */
1200
1201 if (desc->list.next == &ctx->buffer_list) {
1202 /* If there is no free buffer next in the list,
1203 * allocate one. */
1204 if (context_add_buffer(ctx) < 0)
1205 return NULL;
1206 }
1207 desc = list_entry(desc->list.next,
1208 struct descriptor_buffer, list);
1209 ctx->buffer_tail = desc;
1210 }
1211
1212 d = desc->buffer + desc->used / sizeof(*d);
1213 memset(d, 0, z * sizeof(*d));
1214 *d_bus = desc->buffer_bus + desc->used;
1215
1216 return d;
1217}
1218
1219static void context_run(struct context *ctx, u32 extra)
1220{
1221 struct fw_ohci *ohci = ctx->ohci;
1222
1223 reg_write(ohci, COMMAND_PTR(ctx->regs),
1224 le32_to_cpu(ctx->last->branch_address));
1225 reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
1226 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
1227 ctx->running = true;
1228 flush_writes(ohci);
1229}
1230
1231static void context_append(struct context *ctx,
1232 struct descriptor *d, int z, int extra)
1233{
1234 dma_addr_t d_bus;
1235 struct descriptor_buffer *desc = ctx->buffer_tail;
1236 struct descriptor *d_branch;
1237
1238 d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d);
1239
1240 desc->used += (z + extra) * sizeof(*d);
1241
1242 wmb(); /* finish init of new descriptors before branch_address update */
1243
1244 d_branch = find_branch_descriptor(ctx->prev, ctx->prev_z);
1245 d_branch->branch_address = cpu_to_le32(d_bus | z);
1246
1247 /*
1248 * VT6306 incorrectly checks only the single descriptor at the
1249 * CommandPtr when the wake bit is written, so if it's a
1250 * multi-descriptor block starting with an INPUT_MORE, put a copy of
1251 * the branch address in the first descriptor.
1252 *
1253 * Not doing this for transmit contexts since not sure how it interacts
1254 * with skip addresses.
1255 */
1256 if (unlikely(ctx->ohci->quirks & QUIRK_IR_WAKE) &&
1257 d_branch != ctx->prev &&
1258 (ctx->prev->control & cpu_to_le16(DESCRIPTOR_CMD)) ==
1259 cpu_to_le16(DESCRIPTOR_INPUT_MORE)) {
1260 ctx->prev->branch_address = cpu_to_le32(d_bus | z);
1261 }
1262
1263 ctx->prev = d;
1264 ctx->prev_z = z;
1265}
1266
1267static void context_stop(struct context *ctx)
1268{
1269 struct fw_ohci *ohci = ctx->ohci;
1270 u32 reg;
1271 int i;
1272
1273 reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
1274 ctx->running = false;
1275
1276 for (i = 0; i < 1000; i++) {
1277 reg = reg_read(ohci, CONTROL_SET(ctx->regs));
1278 if ((reg & CONTEXT_ACTIVE) == 0)
1279 return;
1280
1281 if (i)
1282 udelay(10);
1283 }
1284 ohci_err(ohci, "DMA context still active (0x%08x)\n", reg);
1285}
1286
1287struct driver_data {
1288 u8 inline_data[8];
1289 struct fw_packet *packet;
1290};
1291
1292/*
1293 * This function apppends a packet to the DMA queue for transmission.
1294 * Must always be called with the ochi->lock held to ensure proper
1295 * generation handling and locking around packet queue manipulation.
1296 */
1297static int at_context_queue_packet(struct context *ctx,
1298 struct fw_packet *packet)
1299{
1300 struct fw_ohci *ohci = ctx->ohci;
1301 dma_addr_t d_bus, uninitialized_var(payload_bus);
1302 struct driver_data *driver_data;
1303 struct descriptor *d, *last;
1304 __le32 *header;
1305 int z, tcode;
1306
1307 d = context_get_descriptors(ctx, 4, &d_bus);
1308 if (d == NULL) {
1309 packet->ack = RCODE_SEND_ERROR;
1310 return -1;
1311 }
1312
1313 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
1314 d[0].res_count = cpu_to_le16(packet->timestamp);
1315
1316 /*
1317 * The DMA format for asynchronous link packets is different
1318 * from the IEEE1394 layout, so shift the fields around
1319 * accordingly.
1320 */
1321
1322 tcode = (packet->header[0] >> 4) & 0x0f;
1323 header = (__le32 *) &d[1];
1324 switch (tcode) {
1325 case TCODE_WRITE_QUADLET_REQUEST:
1326 case TCODE_WRITE_BLOCK_REQUEST:
1327 case TCODE_WRITE_RESPONSE:
1328 case TCODE_READ_QUADLET_REQUEST:
1329 case TCODE_READ_BLOCK_REQUEST:
1330 case TCODE_READ_QUADLET_RESPONSE:
1331 case TCODE_READ_BLOCK_RESPONSE:
1332 case TCODE_LOCK_REQUEST:
1333 case TCODE_LOCK_RESPONSE:
1334 header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
1335 (packet->speed << 16));
1336 header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
1337 (packet->header[0] & 0xffff0000));
1338 header[2] = cpu_to_le32(packet->header[2]);
1339
1340 if (TCODE_IS_BLOCK_PACKET(tcode))
1341 header[3] = cpu_to_le32(packet->header[3]);
1342 else
1343 header[3] = (__force __le32) packet->header[3];
1344
1345 d[0].req_count = cpu_to_le16(packet->header_length);
1346 break;
1347
1348 case TCODE_LINK_INTERNAL:
1349 header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
1350 (packet->speed << 16));
1351 header[1] = cpu_to_le32(packet->header[1]);
1352 header[2] = cpu_to_le32(packet->header[2]);
1353 d[0].req_count = cpu_to_le16(12);
1354
1355 if (is_ping_packet(&packet->header[1]))
1356 d[0].control |= cpu_to_le16(DESCRIPTOR_PING);
1357 break;
1358
1359 case TCODE_STREAM_DATA:
1360 header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
1361 (packet->speed << 16));
1362 header[1] = cpu_to_le32(packet->header[0] & 0xffff0000);
1363 d[0].req_count = cpu_to_le16(8);
1364 break;
1365
1366 default:
1367 /* BUG(); */
1368 packet->ack = RCODE_SEND_ERROR;
1369 return -1;
1370 }
1371
1372 BUILD_BUG_ON(sizeof(struct driver_data) > sizeof(struct descriptor));
1373 driver_data = (struct driver_data *) &d[3];
1374 driver_data->packet = packet;
1375 packet->driver_data = driver_data;
1376
1377 if (packet->payload_length > 0) {
1378 if (packet->payload_length > sizeof(driver_data->inline_data)) {
1379 payload_bus = dma_map_single(ohci->card.device,
1380 packet->payload,
1381 packet->payload_length,
1382 DMA_TO_DEVICE);
1383 if (dma_mapping_error(ohci->card.device, payload_bus)) {
1384 packet->ack = RCODE_SEND_ERROR;
1385 return -1;
1386 }
1387 packet->payload_bus = payload_bus;
1388 packet->payload_mapped = true;
1389 } else {
1390 memcpy(driver_data->inline_data, packet->payload,
1391 packet->payload_length);
1392 payload_bus = d_bus + 3 * sizeof(*d);
1393 }
1394
1395 d[2].req_count = cpu_to_le16(packet->payload_length);
1396 d[2].data_address = cpu_to_le32(payload_bus);
1397 last = &d[2];
1398 z = 3;
1399 } else {
1400 last = &d[0];
1401 z = 2;
1402 }
1403
1404 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
1405 DESCRIPTOR_IRQ_ALWAYS |
1406 DESCRIPTOR_BRANCH_ALWAYS);
1407
1408 /* FIXME: Document how the locking works. */
1409 if (ohci->generation != packet->generation) {
1410 if (packet->payload_mapped)
1411 dma_unmap_single(ohci->card.device, payload_bus,
1412 packet->payload_length, DMA_TO_DEVICE);
1413 packet->ack = RCODE_GENERATION;
1414 return -1;
1415 }
1416
1417 context_append(ctx, d, z, 4 - z);
1418
1419 if (ctx->running)
1420 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
1421 else
1422 context_run(ctx, 0);
1423
1424 return 0;
1425}
1426
1427static void at_context_flush(struct context *ctx)
1428{
1429 tasklet_disable(&ctx->tasklet);
1430
1431 ctx->flushing = true;
1432 context_tasklet((unsigned long)ctx);
1433 ctx->flushing = false;
1434
1435 tasklet_enable(&ctx->tasklet);
1436}
1437
1438static int handle_at_packet(struct context *context,
1439 struct descriptor *d,
1440 struct descriptor *last)
1441{
1442 struct driver_data *driver_data;
1443 struct fw_packet *packet;
1444 struct fw_ohci *ohci = context->ohci;
1445 int evt;
1446
1447 if (last->transfer_status == 0 && !context->flushing)
1448 /* This descriptor isn't done yet, stop iteration. */
1449 return 0;
1450
1451 driver_data = (struct driver_data *) &d[3];
1452 packet = driver_data->packet;
1453 if (packet == NULL)
1454 /* This packet was cancelled, just continue. */
1455 return 1;
1456
1457 if (packet->payload_mapped)
1458 dma_unmap_single(ohci->card.device, packet->payload_bus,
1459 packet->payload_length, DMA_TO_DEVICE);
1460
1461 evt = le16_to_cpu(last->transfer_status) & 0x1f;
1462 packet->timestamp = le16_to_cpu(last->res_count);
1463
1464 log_ar_at_event(ohci, 'T', packet->speed, packet->header, evt);
1465
1466 switch (evt) {
1467 case OHCI1394_evt_timeout:
1468 /* Async response transmit timed out. */
1469 packet->ack = RCODE_CANCELLED;
1470 break;
1471
1472 case OHCI1394_evt_flushed:
1473 /*
1474 * The packet was flushed should give same error as
1475 * when we try to use a stale generation count.
1476 */
1477 packet->ack = RCODE_GENERATION;
1478 break;
1479
1480 case OHCI1394_evt_missing_ack:
1481 if (context->flushing)
1482 packet->ack = RCODE_GENERATION;
1483 else {
1484 /*
1485 * Using a valid (current) generation count, but the
1486 * node is not on the bus or not sending acks.
1487 */
1488 packet->ack = RCODE_NO_ACK;
1489 }
1490 break;
1491
1492 case ACK_COMPLETE + 0x10:
1493 case ACK_PENDING + 0x10:
1494 case ACK_BUSY_X + 0x10:
1495 case ACK_BUSY_A + 0x10:
1496 case ACK_BUSY_B + 0x10:
1497 case ACK_DATA_ERROR + 0x10:
1498 case ACK_TYPE_ERROR + 0x10:
1499 packet->ack = evt - 0x10;
1500 break;
1501
1502 case OHCI1394_evt_no_status:
1503 if (context->flushing) {
1504 packet->ack = RCODE_GENERATION;
1505 break;
1506 }
1507 /* fall through */
1508
1509 default:
1510 packet->ack = RCODE_SEND_ERROR;
1511 break;
1512 }
1513
1514 packet->callback(packet, &ohci->card, packet->ack);
1515
1516 return 1;
1517}
1518
1519#define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff)
1520#define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f)
1521#define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff)
1522#define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
1523#define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
1524
1525static void handle_local_rom(struct fw_ohci *ohci,
1526 struct fw_packet *packet, u32 csr)
1527{
1528 struct fw_packet response;
1529 int tcode, length, i;
1530
1531 tcode = HEADER_GET_TCODE(packet->header[0]);
1532 if (TCODE_IS_BLOCK_PACKET(tcode))
1533 length = HEADER_GET_DATA_LENGTH(packet->header[3]);
1534 else
1535 length = 4;
1536
1537 i = csr - CSR_CONFIG_ROM;
1538 if (i + length > CONFIG_ROM_SIZE) {
1539 fw_fill_response(&response, packet->header,
1540 RCODE_ADDRESS_ERROR, NULL, 0);
1541 } else if (!TCODE_IS_READ_REQUEST(tcode)) {
1542 fw_fill_response(&response, packet->header,
1543 RCODE_TYPE_ERROR, NULL, 0);
1544 } else {
1545 fw_fill_response(&response, packet->header, RCODE_COMPLETE,
1546 (void *) ohci->config_rom + i, length);
1547 }
1548
1549 fw_core_handle_response(&ohci->card, &response);
1550}
1551
1552static void handle_local_lock(struct fw_ohci *ohci,
1553 struct fw_packet *packet, u32 csr)
1554{
1555 struct fw_packet response;
1556 int tcode, length, ext_tcode, sel, try;
1557 __be32 *payload, lock_old;
1558 u32 lock_arg, lock_data;
1559
1560 tcode = HEADER_GET_TCODE(packet->header[0]);
1561 length = HEADER_GET_DATA_LENGTH(packet->header[3]);
1562 payload = packet->payload;
1563 ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]);
1564
1565 if (tcode == TCODE_LOCK_REQUEST &&
1566 ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) {
1567 lock_arg = be32_to_cpu(payload[0]);
1568 lock_data = be32_to_cpu(payload[1]);
1569 } else if (tcode == TCODE_READ_QUADLET_REQUEST) {
1570 lock_arg = 0;
1571 lock_data = 0;
1572 } else {
1573 fw_fill_response(&response, packet->header,
1574 RCODE_TYPE_ERROR, NULL, 0);
1575 goto out;
1576 }
1577
1578 sel = (csr - CSR_BUS_MANAGER_ID) / 4;
1579 reg_write(ohci, OHCI1394_CSRData, lock_data);
1580 reg_write(ohci, OHCI1394_CSRCompareData, lock_arg);
1581 reg_write(ohci, OHCI1394_CSRControl, sel);
1582
1583 for (try = 0; try < 20; try++)
1584 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000) {
1585 lock_old = cpu_to_be32(reg_read(ohci,
1586 OHCI1394_CSRData));
1587 fw_fill_response(&response, packet->header,
1588 RCODE_COMPLETE,
1589 &lock_old, sizeof(lock_old));
1590 goto out;
1591 }
1592
1593 ohci_err(ohci, "swap not done (CSR lock timeout)\n");
1594 fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0);
1595
1596 out:
1597 fw_core_handle_response(&ohci->card, &response);
1598}
1599
1600static void handle_local_request(struct context *ctx, struct fw_packet *packet)
1601{
1602 u64 offset, csr;
1603
1604 if (ctx == &ctx->ohci->at_request_ctx) {
1605 packet->ack = ACK_PENDING;
1606 packet->callback(packet, &ctx->ohci->card, packet->ack);
1607 }
1608
1609 offset =
1610 ((unsigned long long)
1611 HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) |
1612 packet->header[2];
1613 csr = offset - CSR_REGISTER_BASE;
1614
1615 /* Handle config rom reads. */
1616 if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END)
1617 handle_local_rom(ctx->ohci, packet, csr);
1618 else switch (csr) {
1619 case CSR_BUS_MANAGER_ID:
1620 case CSR_BANDWIDTH_AVAILABLE:
1621 case CSR_CHANNELS_AVAILABLE_HI:
1622 case CSR_CHANNELS_AVAILABLE_LO:
1623 handle_local_lock(ctx->ohci, packet, csr);
1624 break;
1625 default:
1626 if (ctx == &ctx->ohci->at_request_ctx)
1627 fw_core_handle_request(&ctx->ohci->card, packet);
1628 else
1629 fw_core_handle_response(&ctx->ohci->card, packet);
1630 break;
1631 }
1632
1633 if (ctx == &ctx->ohci->at_response_ctx) {
1634 packet->ack = ACK_COMPLETE;
1635 packet->callback(packet, &ctx->ohci->card, packet->ack);
1636 }
1637}
1638
1639static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
1640{
1641 unsigned long flags;
1642 int ret;
1643
1644 spin_lock_irqsave(&ctx->ohci->lock, flags);
1645
1646 if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id &&
1647 ctx->ohci->generation == packet->generation) {
1648 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
1649 handle_local_request(ctx, packet);
1650 return;
1651 }
1652
1653 ret = at_context_queue_packet(ctx, packet);
1654 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
1655
1656 if (ret < 0)
1657 packet->callback(packet, &ctx->ohci->card, packet->ack);
1658
1659}
1660
1661static void detect_dead_context(struct fw_ohci *ohci,
1662 const char *name, unsigned int regs)
1663{
1664 u32 ctl;
1665
1666 ctl = reg_read(ohci, CONTROL_SET(regs));
1667 if (ctl & CONTEXT_DEAD)
1668 ohci_err(ohci, "DMA context %s has stopped, error code: %s\n",
1669 name, evts[ctl & 0x1f]);
1670}
1671
1672static void handle_dead_contexts(struct fw_ohci *ohci)
1673{
1674 unsigned int i;
1675 char name[8];
1676
1677 detect_dead_context(ohci, "ATReq", OHCI1394_AsReqTrContextBase);
1678 detect_dead_context(ohci, "ATRsp", OHCI1394_AsRspTrContextBase);
1679 detect_dead_context(ohci, "ARReq", OHCI1394_AsReqRcvContextBase);
1680 detect_dead_context(ohci, "ARRsp", OHCI1394_AsRspRcvContextBase);
1681 for (i = 0; i < 32; ++i) {
1682 if (!(ohci->it_context_support & (1 << i)))
1683 continue;
1684 sprintf(name, "IT%u", i);
1685 detect_dead_context(ohci, name, OHCI1394_IsoXmitContextBase(i));
1686 }
1687 for (i = 0; i < 32; ++i) {
1688 if (!(ohci->ir_context_support & (1 << i)))
1689 continue;
1690 sprintf(name, "IR%u", i);
1691 detect_dead_context(ohci, name, OHCI1394_IsoRcvContextBase(i));
1692 }
1693 /* TODO: maybe try to flush and restart the dead contexts */
1694}
1695
1696static u32 cycle_timer_ticks(u32 cycle_timer)
1697{
1698 u32 ticks;
1699
1700 ticks = cycle_timer & 0xfff;
1701 ticks += 3072 * ((cycle_timer >> 12) & 0x1fff);
1702 ticks += (3072 * 8000) * (cycle_timer >> 25);
1703
1704 return ticks;
1705}
1706
1707/*
1708 * Some controllers exhibit one or more of the following bugs when updating the
1709 * iso cycle timer register:
1710 * - When the lowest six bits are wrapping around to zero, a read that happens
1711 * at the same time will return garbage in the lowest ten bits.
1712 * - When the cycleOffset field wraps around to zero, the cycleCount field is
1713 * not incremented for about 60 ns.
1714 * - Occasionally, the entire register reads zero.
1715 *
1716 * To catch these, we read the register three times and ensure that the
1717 * difference between each two consecutive reads is approximately the same, i.e.
1718 * less than twice the other. Furthermore, any negative difference indicates an
1719 * error. (A PCI read should take at least 20 ticks of the 24.576 MHz timer to
1720 * execute, so we have enough precision to compute the ratio of the differences.)
1721 */
1722static u32 get_cycle_time(struct fw_ohci *ohci)
1723{
1724 u32 c0, c1, c2;
1725 u32 t0, t1, t2;
1726 s32 diff01, diff12;
1727 int i;
1728
1729 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1730
1731 if (ohci->quirks & QUIRK_CYCLE_TIMER) {
1732 i = 0;
1733 c1 = c2;
1734 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1735 do {
1736 c0 = c1;
1737 c1 = c2;
1738 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1739 t0 = cycle_timer_ticks(c0);
1740 t1 = cycle_timer_ticks(c1);
1741 t2 = cycle_timer_ticks(c2);
1742 diff01 = t1 - t0;
1743 diff12 = t2 - t1;
1744 } while ((diff01 <= 0 || diff12 <= 0 ||
1745 diff01 / diff12 >= 2 || diff12 / diff01 >= 2)
1746 && i++ < 20);
1747 }
1748
1749 return c2;
1750}
1751
1752/*
1753 * This function has to be called at least every 64 seconds. The bus_time
1754 * field stores not only the upper 25 bits of the BUS_TIME register but also
1755 * the most significant bit of the cycle timer in bit 6 so that we can detect
1756 * changes in this bit.
1757 */
1758static u32 update_bus_time(struct fw_ohci *ohci)
1759{
1760 u32 cycle_time_seconds = get_cycle_time(ohci) >> 25;
1761
1762 if (unlikely(!ohci->bus_time_running)) {
1763 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_cycle64Seconds);
1764 ohci->bus_time = (lower_32_bits(get_seconds()) & ~0x7f) |
1765 (cycle_time_seconds & 0x40);
1766 ohci->bus_time_running = true;
1767 }
1768
1769 if ((ohci->bus_time & 0x40) != (cycle_time_seconds & 0x40))
1770 ohci->bus_time += 0x40;
1771
1772 return ohci->bus_time | cycle_time_seconds;
1773}
1774
1775static int get_status_for_port(struct fw_ohci *ohci, int port_index)
1776{
1777 int reg;
1778
1779 mutex_lock(&ohci->phy_reg_mutex);
1780 reg = write_phy_reg(ohci, 7, port_index);
1781 if (reg >= 0)
1782 reg = read_phy_reg(ohci, 8);
1783 mutex_unlock(&ohci->phy_reg_mutex);
1784 if (reg < 0)
1785 return reg;
1786
1787 switch (reg & 0x0f) {
1788 case 0x06:
1789 return 2; /* is child node (connected to parent node) */
1790 case 0x0e:
1791 return 3; /* is parent node (connected to child node) */
1792 }
1793 return 1; /* not connected */
1794}
1795
1796static int get_self_id_pos(struct fw_ohci *ohci, u32 self_id,
1797 int self_id_count)
1798{
1799 int i;
1800 u32 entry;
1801
1802 for (i = 0; i < self_id_count; i++) {
1803 entry = ohci->self_id_buffer[i];
1804 if ((self_id & 0xff000000) == (entry & 0xff000000))
1805 return -1;
1806 if ((self_id & 0xff000000) < (entry & 0xff000000))
1807 return i;
1808 }
1809 return i;
1810}
1811
1812static int initiated_reset(struct fw_ohci *ohci)
1813{
1814 int reg;
1815 int ret = 0;
1816
1817 mutex_lock(&ohci->phy_reg_mutex);
1818 reg = write_phy_reg(ohci, 7, 0xe0); /* Select page 7 */
1819 if (reg >= 0) {
1820 reg = read_phy_reg(ohci, 8);
1821 reg |= 0x40;
1822 reg = write_phy_reg(ohci, 8, reg); /* set PMODE bit */
1823 if (reg >= 0) {
1824 reg = read_phy_reg(ohci, 12); /* read register 12 */
1825 if (reg >= 0) {
1826 if ((reg & 0x08) == 0x08) {
1827 /* bit 3 indicates "initiated reset" */
1828 ret = 0x2;
1829 }
1830 }
1831 }
1832 }
1833 mutex_unlock(&ohci->phy_reg_mutex);
1834 return ret;
1835}
1836
1837/*
1838 * TI TSB82AA2B and TSB12LV26 do not receive the selfID of a locally
1839 * attached TSB41BA3D phy; see http://www.ti.com/litv/pdf/sllz059.
1840 * Construct the selfID from phy register contents.
1841 */
1842static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count)
1843{
1844 int reg, i, pos, status;
1845 /* link active 1, speed 3, bridge 0, contender 1, more packets 0 */
1846 u32 self_id = 0x8040c800;
1847
1848 reg = reg_read(ohci, OHCI1394_NodeID);
1849 if (!(reg & OHCI1394_NodeID_idValid)) {
1850 ohci_notice(ohci,
1851 "node ID not valid, new bus reset in progress\n");
1852 return -EBUSY;
1853 }
1854 self_id |= ((reg & 0x3f) << 24); /* phy ID */
1855
1856 reg = ohci_read_phy_reg(&ohci->card, 4);
1857 if (reg < 0)
1858 return reg;
1859 self_id |= ((reg & 0x07) << 8); /* power class */
1860
1861 reg = ohci_read_phy_reg(&ohci->card, 1);
1862 if (reg < 0)
1863 return reg;
1864 self_id |= ((reg & 0x3f) << 16); /* gap count */
1865
1866 for (i = 0; i < 3; i++) {
1867 status = get_status_for_port(ohci, i);
1868 if (status < 0)
1869 return status;
1870 self_id |= ((status & 0x3) << (6 - (i * 2)));
1871 }
1872
1873 self_id |= initiated_reset(ohci);
1874
1875 pos = get_self_id_pos(ohci, self_id, self_id_count);
1876 if (pos >= 0) {
1877 memmove(&(ohci->self_id_buffer[pos+1]),
1878 &(ohci->self_id_buffer[pos]),
1879 (self_id_count - pos) * sizeof(*ohci->self_id_buffer));
1880 ohci->self_id_buffer[pos] = self_id;
1881 self_id_count++;
1882 }
1883 return self_id_count;
1884}
1885
1886static void bus_reset_work(struct work_struct *work)
1887{
1888 struct fw_ohci *ohci =
1889 container_of(work, struct fw_ohci, bus_reset_work);
1890 int self_id_count, generation, new_generation, i, j;
1891 u32 reg;
1892 void *free_rom = NULL;
1893 dma_addr_t free_rom_bus = 0;
1894 bool is_new_root;
1895
1896 reg = reg_read(ohci, OHCI1394_NodeID);
1897 if (!(reg & OHCI1394_NodeID_idValid)) {
1898 ohci_notice(ohci,
1899 "node ID not valid, new bus reset in progress\n");
1900 return;
1901 }
1902 if ((reg & OHCI1394_NodeID_nodeNumber) == 63) {
1903 ohci_notice(ohci, "malconfigured bus\n");
1904 return;
1905 }
1906 ohci->node_id = reg & (OHCI1394_NodeID_busNumber |
1907 OHCI1394_NodeID_nodeNumber);
1908
1909 is_new_root = (reg & OHCI1394_NodeID_root) != 0;
1910 if (!(ohci->is_root && is_new_root))
1911 reg_write(ohci, OHCI1394_LinkControlSet,
1912 OHCI1394_LinkControl_cycleMaster);
1913 ohci->is_root = is_new_root;
1914
1915 reg = reg_read(ohci, OHCI1394_SelfIDCount);
1916 if (reg & OHCI1394_SelfIDCount_selfIDError) {
1917 ohci_notice(ohci, "self ID receive error\n");
1918 return;
1919 }
1920 /*
1921 * The count in the SelfIDCount register is the number of
1922 * bytes in the self ID receive buffer. Since we also receive
1923 * the inverted quadlets and a header quadlet, we shift one
1924 * bit extra to get the actual number of self IDs.
1925 */
1926 self_id_count = (reg >> 3) & 0xff;
1927
1928 if (self_id_count > 252) {
1929 ohci_notice(ohci, "bad selfIDSize (%08x)\n", reg);
1930 return;
1931 }
1932
1933 generation = (cond_le32_to_cpu(ohci->self_id[0]) >> 16) & 0xff;
1934 rmb();
1935
1936 for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
1937 u32 id = cond_le32_to_cpu(ohci->self_id[i]);
1938 u32 id2 = cond_le32_to_cpu(ohci->self_id[i + 1]);
1939
1940 if (id != ~id2) {
1941 /*
1942 * If the invalid data looks like a cycle start packet,
1943 * it's likely to be the result of the cycle master
1944 * having a wrong gap count. In this case, the self IDs
1945 * so far are valid and should be processed so that the
1946 * bus manager can then correct the gap count.
1947 */
1948 if (id == 0xffff008f) {
1949 ohci_notice(ohci, "ignoring spurious self IDs\n");
1950 self_id_count = j;
1951 break;
1952 }
1953
1954 ohci_notice(ohci, "bad self ID %d/%d (%08x != ~%08x)\n",
1955 j, self_id_count, id, id2);
1956 return;
1957 }
1958 ohci->self_id_buffer[j] = id;
1959 }
1960
1961 if (ohci->quirks & QUIRK_TI_SLLZ059) {
1962 self_id_count = find_and_insert_self_id(ohci, self_id_count);
1963 if (self_id_count < 0) {
1964 ohci_notice(ohci,
1965 "could not construct local self ID\n");
1966 return;
1967 }
1968 }
1969
1970 if (self_id_count == 0) {
1971 ohci_notice(ohci, "no self IDs\n");
1972 return;
1973 }
1974 rmb();
1975
1976 /*
1977 * Check the consistency of the self IDs we just read. The
1978 * problem we face is that a new bus reset can start while we
1979 * read out the self IDs from the DMA buffer. If this happens,
1980 * the DMA buffer will be overwritten with new self IDs and we
1981 * will read out inconsistent data. The OHCI specification
1982 * (section 11.2) recommends a technique similar to
1983 * linux/seqlock.h, where we remember the generation of the
1984 * self IDs in the buffer before reading them out and compare
1985 * it to the current generation after reading them out. If
1986 * the two generations match we know we have a consistent set
1987 * of self IDs.
1988 */
1989
1990 new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff;
1991 if (new_generation != generation) {
1992 ohci_notice(ohci, "new bus reset, discarding self ids\n");
1993 return;
1994 }
1995
1996 /* FIXME: Document how the locking works. */
1997 spin_lock_irq(&ohci->lock);
1998
1999 ohci->generation = -1; /* prevent AT packet queueing */
2000 context_stop(&ohci->at_request_ctx);
2001 context_stop(&ohci->at_response_ctx);
2002
2003 spin_unlock_irq(&ohci->lock);
2004
2005 /*
2006 * Per OHCI 1.2 draft, clause 7.2.3.3, hardware may leave unsent
2007 * packets in the AT queues and software needs to drain them.
2008 * Some OHCI 1.1 controllers (JMicron) apparently require this too.
2009 */
2010 at_context_flush(&ohci->at_request_ctx);
2011 at_context_flush(&ohci->at_response_ctx);
2012
2013 spin_lock_irq(&ohci->lock);
2014
2015 ohci->generation = generation;
2016 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2017
2018 if (ohci->quirks & QUIRK_RESET_PACKET)
2019 ohci->request_generation = generation;
2020
2021 /*
2022 * This next bit is unrelated to the AT context stuff but we
2023 * have to do it under the spinlock also. If a new config rom
2024 * was set up before this reset, the old one is now no longer
2025 * in use and we can free it. Update the config rom pointers
2026 * to point to the current config rom and clear the
2027 * next_config_rom pointer so a new update can take place.
2028 */
2029
2030 if (ohci->next_config_rom != NULL) {
2031 if (ohci->next_config_rom != ohci->config_rom) {
2032 free_rom = ohci->config_rom;
2033 free_rom_bus = ohci->config_rom_bus;
2034 }
2035 ohci->config_rom = ohci->next_config_rom;
2036 ohci->config_rom_bus = ohci->next_config_rom_bus;
2037 ohci->next_config_rom = NULL;
2038
2039 /*
2040 * Restore config_rom image and manually update
2041 * config_rom registers. Writing the header quadlet
2042 * will indicate that the config rom is ready, so we
2043 * do that last.
2044 */
2045 reg_write(ohci, OHCI1394_BusOptions,
2046 be32_to_cpu(ohci->config_rom[2]));
2047 ohci->config_rom[0] = ohci->next_header;
2048 reg_write(ohci, OHCI1394_ConfigROMhdr,
2049 be32_to_cpu(ohci->next_header));
2050 }
2051
2052 if (param_remote_dma) {
2053 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
2054 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
2055 }
2056
2057 spin_unlock_irq(&ohci->lock);
2058
2059 if (free_rom)
2060 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2061 free_rom, free_rom_bus);
2062
2063 log_selfids(ohci, generation, self_id_count);
2064
2065 fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
2066 self_id_count, ohci->self_id_buffer,
2067 ohci->csr_state_setclear_abdicate);
2068 ohci->csr_state_setclear_abdicate = false;
2069}
2070
2071static irqreturn_t irq_handler(int irq, void *data)
2072{
2073 struct fw_ohci *ohci = data;
2074 u32 event, iso_event;
2075 int i;
2076
2077 event = reg_read(ohci, OHCI1394_IntEventClear);
2078
2079 if (!event || !~event)
2080 return IRQ_NONE;
2081
2082 /*
2083 * busReset and postedWriteErr must not be cleared yet
2084 * (OHCI 1.1 clauses 7.2.3.2 and 13.2.8.1)
2085 */
2086 reg_write(ohci, OHCI1394_IntEventClear,
2087 event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr));
2088 log_irqs(ohci, event);
2089
2090 if (event & OHCI1394_selfIDComplete)
2091 queue_work(selfid_workqueue, &ohci->bus_reset_work);
2092
2093 if (event & OHCI1394_RQPkt)
2094 tasklet_schedule(&ohci->ar_request_ctx.tasklet);
2095
2096 if (event & OHCI1394_RSPkt)
2097 tasklet_schedule(&ohci->ar_response_ctx.tasklet);
2098
2099 if (event & OHCI1394_reqTxComplete)
2100 tasklet_schedule(&ohci->at_request_ctx.tasklet);
2101
2102 if (event & OHCI1394_respTxComplete)
2103 tasklet_schedule(&ohci->at_response_ctx.tasklet);
2104
2105 if (event & OHCI1394_isochRx) {
2106 iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
2107 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event);
2108
2109 while (iso_event) {
2110 i = ffs(iso_event) - 1;
2111 tasklet_schedule(
2112 &ohci->ir_context_list[i].context.tasklet);
2113 iso_event &= ~(1 << i);
2114 }
2115 }
2116
2117 if (event & OHCI1394_isochTx) {
2118 iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear);
2119 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event);
2120
2121 while (iso_event) {
2122 i = ffs(iso_event) - 1;
2123 tasklet_schedule(
2124 &ohci->it_context_list[i].context.tasklet);
2125 iso_event &= ~(1 << i);
2126 }
2127 }
2128
2129 if (unlikely(event & OHCI1394_regAccessFail))
2130 ohci_err(ohci, "register access failure\n");
2131
2132 if (unlikely(event & OHCI1394_postedWriteErr)) {
2133 reg_read(ohci, OHCI1394_PostedWriteAddressHi);
2134 reg_read(ohci, OHCI1394_PostedWriteAddressLo);
2135 reg_write(ohci, OHCI1394_IntEventClear,
2136 OHCI1394_postedWriteErr);
2137 if (printk_ratelimit())
2138 ohci_err(ohci, "PCI posted write error\n");
2139 }
2140
2141 if (unlikely(event & OHCI1394_cycleTooLong)) {
2142 if (printk_ratelimit())
2143 ohci_notice(ohci, "isochronous cycle too long\n");
2144 reg_write(ohci, OHCI1394_LinkControlSet,
2145 OHCI1394_LinkControl_cycleMaster);
2146 }
2147
2148 if (unlikely(event & OHCI1394_cycleInconsistent)) {
2149 /*
2150 * We need to clear this event bit in order to make
2151 * cycleMatch isochronous I/O work. In theory we should
2152 * stop active cycleMatch iso contexts now and restart
2153 * them at least two cycles later. (FIXME?)
2154 */
2155 if (printk_ratelimit())
2156 ohci_notice(ohci, "isochronous cycle inconsistent\n");
2157 }
2158
2159 if (unlikely(event & OHCI1394_unrecoverableError))
2160 handle_dead_contexts(ohci);
2161
2162 if (event & OHCI1394_cycle64Seconds) {
2163 spin_lock(&ohci->lock);
2164 update_bus_time(ohci);
2165 spin_unlock(&ohci->lock);
2166 } else
2167 flush_writes(ohci);
2168
2169 return IRQ_HANDLED;
2170}
2171
2172static int software_reset(struct fw_ohci *ohci)
2173{
2174 u32 val;
2175 int i;
2176
2177 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
2178 for (i = 0; i < 500; i++) {
2179 val = reg_read(ohci, OHCI1394_HCControlSet);
2180 if (!~val)
2181 return -ENODEV; /* Card was ejected. */
2182
2183 if (!(val & OHCI1394_HCControl_softReset))
2184 return 0;
2185
2186 msleep(1);
2187 }
2188
2189 return -EBUSY;
2190}
2191
2192static void copy_config_rom(__be32 *dest, const __be32 *src, size_t length)
2193{
2194 size_t size = length * 4;
2195
2196 memcpy(dest, src, size);
2197 if (size < CONFIG_ROM_SIZE)
2198 memset(&dest[length], 0, CONFIG_ROM_SIZE - size);
2199}
2200
2201static int configure_1394a_enhancements(struct fw_ohci *ohci)
2202{
2203 bool enable_1394a;
2204 int ret, clear, set, offset;
2205
2206 /* Check if the driver should configure link and PHY. */
2207 if (!(reg_read(ohci, OHCI1394_HCControlSet) &
2208 OHCI1394_HCControl_programPhyEnable))
2209 return 0;
2210
2211 /* Paranoia: check whether the PHY supports 1394a, too. */
2212 enable_1394a = false;
2213 ret = read_phy_reg(ohci, 2);
2214 if (ret < 0)
2215 return ret;
2216 if ((ret & PHY_EXTENDED_REGISTERS) == PHY_EXTENDED_REGISTERS) {
2217 ret = read_paged_phy_reg(ohci, 1, 8);
2218 if (ret < 0)
2219 return ret;
2220 if (ret >= 1)
2221 enable_1394a = true;
2222 }
2223
2224 if (ohci->quirks & QUIRK_NO_1394A)
2225 enable_1394a = false;
2226
2227 /* Configure PHY and link consistently. */
2228 if (enable_1394a) {
2229 clear = 0;
2230 set = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI;
2231 } else {
2232 clear = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI;
2233 set = 0;
2234 }
2235 ret = update_phy_reg(ohci, 5, clear, set);
2236 if (ret < 0)
2237 return ret;
2238
2239 if (enable_1394a)
2240 offset = OHCI1394_HCControlSet;
2241 else
2242 offset = OHCI1394_HCControlClear;
2243 reg_write(ohci, offset, OHCI1394_HCControl_aPhyEnhanceEnable);
2244
2245 /* Clean up: configuration has been taken care of. */
2246 reg_write(ohci, OHCI1394_HCControlClear,
2247 OHCI1394_HCControl_programPhyEnable);
2248
2249 return 0;
2250}
2251
2252static int probe_tsb41ba3d(struct fw_ohci *ohci)
2253{
2254 /* TI vendor ID = 0x080028, TSB41BA3D product ID = 0x833005 (sic) */
2255 static const u8 id[] = { 0x08, 0x00, 0x28, 0x83, 0x30, 0x05, };
2256 int reg, i;
2257
2258 reg = read_phy_reg(ohci, 2);
2259 if (reg < 0)
2260 return reg;
2261 if ((reg & PHY_EXTENDED_REGISTERS) != PHY_EXTENDED_REGISTERS)
2262 return 0;
2263
2264 for (i = ARRAY_SIZE(id) - 1; i >= 0; i--) {
2265 reg = read_paged_phy_reg(ohci, 1, i + 10);
2266 if (reg < 0)
2267 return reg;
2268 if (reg != id[i])
2269 return 0;
2270 }
2271 return 1;
2272}
2273
2274static int ohci_enable(struct fw_card *card,
2275 const __be32 *config_rom, size_t length)
2276{
2277 struct fw_ohci *ohci = fw_ohci(card);
2278 u32 lps, version, irqs;
2279 int i, ret;
2280
2281 ret = software_reset(ohci);
2282 if (ret < 0) {
2283 ohci_err(ohci, "failed to reset ohci card\n");
2284 return ret;
2285 }
2286
2287 /*
2288 * Now enable LPS, which we need in order to start accessing
2289 * most of the registers. In fact, on some cards (ALI M5251),
2290 * accessing registers in the SClk domain without LPS enabled
2291 * will lock up the machine. Wait 50msec to make sure we have
2292 * full link enabled. However, with some cards (well, at least
2293 * a JMicron PCIe card), we have to try again sometimes.
2294 *
2295 * TI TSB82AA2 + TSB81BA3(A) cards signal LPS enabled early but
2296 * cannot actually use the phy at that time. These need tens of
2297 * millisecods pause between LPS write and first phy access too.
2298 */
2299
2300 reg_write(ohci, OHCI1394_HCControlSet,
2301 OHCI1394_HCControl_LPS |
2302 OHCI1394_HCControl_postedWriteEnable);
2303 flush_writes(ohci);
2304
2305 for (lps = 0, i = 0; !lps && i < 3; i++) {
2306 msleep(50);
2307 lps = reg_read(ohci, OHCI1394_HCControlSet) &
2308 OHCI1394_HCControl_LPS;
2309 }
2310
2311 if (!lps) {
2312 ohci_err(ohci, "failed to set Link Power Status\n");
2313 return -EIO;
2314 }
2315
2316 if (ohci->quirks & QUIRK_TI_SLLZ059) {
2317 ret = probe_tsb41ba3d(ohci);
2318 if (ret < 0)
2319 return ret;
2320 if (ret)
2321 ohci_notice(ohci, "local TSB41BA3D phy\n");
2322 else
2323 ohci->quirks &= ~QUIRK_TI_SLLZ059;
2324 }
2325
2326 reg_write(ohci, OHCI1394_HCControlClear,
2327 OHCI1394_HCControl_noByteSwapData);
2328
2329 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus);
2330 reg_write(ohci, OHCI1394_LinkControlSet,
2331 OHCI1394_LinkControl_cycleTimerEnable |
2332 OHCI1394_LinkControl_cycleMaster);
2333
2334 reg_write(ohci, OHCI1394_ATRetries,
2335 OHCI1394_MAX_AT_REQ_RETRIES |
2336 (OHCI1394_MAX_AT_RESP_RETRIES << 4) |
2337 (OHCI1394_MAX_PHYS_RESP_RETRIES << 8) |
2338 (200 << 16));
2339
2340 ohci->bus_time_running = false;
2341
2342 for (i = 0; i < 32; i++)
2343 if (ohci->ir_context_support & (1 << i))
2344 reg_write(ohci, OHCI1394_IsoRcvContextControlClear(i),
2345 IR_CONTEXT_MULTI_CHANNEL_MODE);
2346
2347 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
2348 if (version >= OHCI_VERSION_1_1) {
2349 reg_write(ohci, OHCI1394_InitialChannelsAvailableHi,
2350 0xfffffffe);
2351 card->broadcast_channel_auto_allocated = true;
2352 }
2353
2354 /* Get implemented bits of the priority arbitration request counter. */
2355 reg_write(ohci, OHCI1394_FairnessControl, 0x3f);
2356 ohci->pri_req_max = reg_read(ohci, OHCI1394_FairnessControl) & 0x3f;
2357 reg_write(ohci, OHCI1394_FairnessControl, 0);
2358 card->priority_budget_implemented = ohci->pri_req_max != 0;
2359
2360 reg_write(ohci, OHCI1394_PhyUpperBound, FW_MAX_PHYSICAL_RANGE >> 16);
2361 reg_write(ohci, OHCI1394_IntEventClear, ~0);
2362 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
2363
2364 ret = configure_1394a_enhancements(ohci);
2365 if (ret < 0)
2366 return ret;
2367
2368 /* Activate link_on bit and contender bit in our self ID packets.*/
2369 ret = ohci_update_phy_reg(card, 4, 0, PHY_LINK_ACTIVE | PHY_CONTENDER);
2370 if (ret < 0)
2371 return ret;
2372
2373 /*
2374 * When the link is not yet enabled, the atomic config rom
2375 * update mechanism described below in ohci_set_config_rom()
2376 * is not active. We have to update ConfigRomHeader and
2377 * BusOptions manually, and the write to ConfigROMmap takes
2378 * effect immediately. We tie this to the enabling of the
2379 * link, so we have a valid config rom before enabling - the
2380 * OHCI requires that ConfigROMhdr and BusOptions have valid
2381 * values before enabling.
2382 *
2383 * However, when the ConfigROMmap is written, some controllers
2384 * always read back quadlets 0 and 2 from the config rom to
2385 * the ConfigRomHeader and BusOptions registers on bus reset.
2386 * They shouldn't do that in this initial case where the link
2387 * isn't enabled. This means we have to use the same
2388 * workaround here, setting the bus header to 0 and then write
2389 * the right values in the bus reset tasklet.
2390 */
2391
2392 if (config_rom) {
2393 ohci->next_config_rom =
2394 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2395 &ohci->next_config_rom_bus,
2396 GFP_KERNEL);
2397 if (ohci->next_config_rom == NULL)
2398 return -ENOMEM;
2399
2400 copy_config_rom(ohci->next_config_rom, config_rom, length);
2401 } else {
2402 /*
2403 * In the suspend case, config_rom is NULL, which
2404 * means that we just reuse the old config rom.
2405 */
2406 ohci->next_config_rom = ohci->config_rom;
2407 ohci->next_config_rom_bus = ohci->config_rom_bus;
2408 }
2409
2410 ohci->next_header = ohci->next_config_rom[0];
2411 ohci->next_config_rom[0] = 0;
2412 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
2413 reg_write(ohci, OHCI1394_BusOptions,
2414 be32_to_cpu(ohci->next_config_rom[2]));
2415 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
2416
2417 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
2418
2419 irqs = OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
2420 OHCI1394_RQPkt | OHCI1394_RSPkt |
2421 OHCI1394_isochTx | OHCI1394_isochRx |
2422 OHCI1394_postedWriteErr |
2423 OHCI1394_selfIDComplete |
2424 OHCI1394_regAccessFail |
2425 OHCI1394_cycleInconsistent |
2426 OHCI1394_unrecoverableError |
2427 OHCI1394_cycleTooLong |
2428 OHCI1394_masterIntEnable;
2429 if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
2430 irqs |= OHCI1394_busReset;
2431 reg_write(ohci, OHCI1394_IntMaskSet, irqs);
2432
2433 reg_write(ohci, OHCI1394_HCControlSet,
2434 OHCI1394_HCControl_linkEnable |
2435 OHCI1394_HCControl_BIBimageValid);
2436
2437 reg_write(ohci, OHCI1394_LinkControlSet,
2438 OHCI1394_LinkControl_rcvSelfID |
2439 OHCI1394_LinkControl_rcvPhyPkt);
2440
2441 ar_context_run(&ohci->ar_request_ctx);
2442 ar_context_run(&ohci->ar_response_ctx);
2443
2444 flush_writes(ohci);
2445
2446 /* We are ready to go, reset bus to finish initialization. */
2447 fw_schedule_bus_reset(&ohci->card, false, true);
2448
2449 return 0;
2450}
2451
2452static int ohci_set_config_rom(struct fw_card *card,
2453 const __be32 *config_rom, size_t length)
2454{
2455 struct fw_ohci *ohci;
2456 __be32 *next_config_rom;
2457 dma_addr_t uninitialized_var(next_config_rom_bus);
2458
2459 ohci = fw_ohci(card);
2460
2461 /*
2462 * When the OHCI controller is enabled, the config rom update
2463 * mechanism is a bit tricky, but easy enough to use. See
2464 * section 5.5.6 in the OHCI specification.
2465 *
2466 * The OHCI controller caches the new config rom address in a
2467 * shadow register (ConfigROMmapNext) and needs a bus reset
2468 * for the changes to take place. When the bus reset is
2469 * detected, the controller loads the new values for the
2470 * ConfigRomHeader and BusOptions registers from the specified
2471 * config rom and loads ConfigROMmap from the ConfigROMmapNext
2472 * shadow register. All automatically and atomically.
2473 *
2474 * Now, there's a twist to this story. The automatic load of
2475 * ConfigRomHeader and BusOptions doesn't honor the
2476 * noByteSwapData bit, so with a be32 config rom, the
2477 * controller will load be32 values in to these registers
2478 * during the atomic update, even on litte endian
2479 * architectures. The workaround we use is to put a 0 in the
2480 * header quadlet; 0 is endian agnostic and means that the
2481 * config rom isn't ready yet. In the bus reset tasklet we
2482 * then set up the real values for the two registers.
2483 *
2484 * We use ohci->lock to avoid racing with the code that sets
2485 * ohci->next_config_rom to NULL (see bus_reset_work).
2486 */
2487
2488 next_config_rom =
2489 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2490 &next_config_rom_bus, GFP_KERNEL);
2491 if (next_config_rom == NULL)
2492 return -ENOMEM;
2493
2494 spin_lock_irq(&ohci->lock);
2495
2496 /*
2497 * If there is not an already pending config_rom update,
2498 * push our new allocation into the ohci->next_config_rom
2499 * and then mark the local variable as null so that we
2500 * won't deallocate the new buffer.
2501 *
2502 * OTOH, if there is a pending config_rom update, just
2503 * use that buffer with the new config_rom data, and
2504 * let this routine free the unused DMA allocation.
2505 */
2506
2507 if (ohci->next_config_rom == NULL) {
2508 ohci->next_config_rom = next_config_rom;
2509 ohci->next_config_rom_bus = next_config_rom_bus;
2510 next_config_rom = NULL;
2511 }
2512
2513 copy_config_rom(ohci->next_config_rom, config_rom, length);
2514
2515 ohci->next_header = config_rom[0];
2516 ohci->next_config_rom[0] = 0;
2517
2518 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
2519
2520 spin_unlock_irq(&ohci->lock);
2521
2522 /* If we didn't use the DMA allocation, delete it. */
2523 if (next_config_rom != NULL)
2524 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2525 next_config_rom, next_config_rom_bus);
2526
2527 /*
2528 * Now initiate a bus reset to have the changes take
2529 * effect. We clean up the old config rom memory and DMA
2530 * mappings in the bus reset tasklet, since the OHCI
2531 * controller could need to access it before the bus reset
2532 * takes effect.
2533 */
2534
2535 fw_schedule_bus_reset(&ohci->card, true, true);
2536
2537 return 0;
2538}
2539
2540static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
2541{
2542 struct fw_ohci *ohci = fw_ohci(card);
2543
2544 at_context_transmit(&ohci->at_request_ctx, packet);
2545}
2546
2547static void ohci_send_response(struct fw_card *card, struct fw_packet *packet)
2548{
2549 struct fw_ohci *ohci = fw_ohci(card);
2550
2551 at_context_transmit(&ohci->at_response_ctx, packet);
2552}
2553
2554static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
2555{
2556 struct fw_ohci *ohci = fw_ohci(card);
2557 struct context *ctx = &ohci->at_request_ctx;
2558 struct driver_data *driver_data = packet->driver_data;
2559 int ret = -ENOENT;
2560
2561 tasklet_disable(&ctx->tasklet);
2562
2563 if (packet->ack != 0)
2564 goto out;
2565
2566 if (packet->payload_mapped)
2567 dma_unmap_single(ohci->card.device, packet->payload_bus,
2568 packet->payload_length, DMA_TO_DEVICE);
2569
2570 log_ar_at_event(ohci, 'T', packet->speed, packet->header, 0x20);
2571 driver_data->packet = NULL;
2572 packet->ack = RCODE_CANCELLED;
2573 packet->callback(packet, &ohci->card, packet->ack);
2574 ret = 0;
2575 out:
2576 tasklet_enable(&ctx->tasklet);
2577
2578 return ret;
2579}
2580
2581static int ohci_enable_phys_dma(struct fw_card *card,
2582 int node_id, int generation)
2583{
2584 struct fw_ohci *ohci = fw_ohci(card);
2585 unsigned long flags;
2586 int n, ret = 0;
2587
2588 if (param_remote_dma)
2589 return 0;
2590
2591 /*
2592 * FIXME: Make sure this bitmask is cleared when we clear the busReset
2593 * interrupt bit. Clear physReqResourceAllBuses on bus reset.
2594 */
2595
2596 spin_lock_irqsave(&ohci->lock, flags);
2597
2598 if (ohci->generation != generation) {
2599 ret = -ESTALE;
2600 goto out;
2601 }
2602
2603 /*
2604 * Note, if the node ID contains a non-local bus ID, physical DMA is
2605 * enabled for _all_ nodes on remote buses.
2606 */
2607
2608 n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63;
2609 if (n < 32)
2610 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n);
2611 else
2612 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32));
2613
2614 flush_writes(ohci);
2615 out:
2616 spin_unlock_irqrestore(&ohci->lock, flags);
2617
2618 return ret;
2619}
2620
2621static u32 ohci_read_csr(struct fw_card *card, int csr_offset)
2622{
2623 struct fw_ohci *ohci = fw_ohci(card);
2624 unsigned long flags;
2625 u32 value;
2626
2627 switch (csr_offset) {
2628 case CSR_STATE_CLEAR:
2629 case CSR_STATE_SET:
2630 if (ohci->is_root &&
2631 (reg_read(ohci, OHCI1394_LinkControlSet) &
2632 OHCI1394_LinkControl_cycleMaster))
2633 value = CSR_STATE_BIT_CMSTR;
2634 else
2635 value = 0;
2636 if (ohci->csr_state_setclear_abdicate)
2637 value |= CSR_STATE_BIT_ABDICATE;
2638
2639 return value;
2640
2641 case CSR_NODE_IDS:
2642 return reg_read(ohci, OHCI1394_NodeID) << 16;
2643
2644 case CSR_CYCLE_TIME:
2645 return get_cycle_time(ohci);
2646
2647 case CSR_BUS_TIME:
2648 /*
2649 * We might be called just after the cycle timer has wrapped
2650 * around but just before the cycle64Seconds handler, so we
2651 * better check here, too, if the bus time needs to be updated.
2652 */
2653 spin_lock_irqsave(&ohci->lock, flags);
2654 value = update_bus_time(ohci);
2655 spin_unlock_irqrestore(&ohci->lock, flags);
2656 return value;
2657
2658 case CSR_BUSY_TIMEOUT:
2659 value = reg_read(ohci, OHCI1394_ATRetries);
2660 return (value >> 4) & 0x0ffff00f;
2661
2662 case CSR_PRIORITY_BUDGET:
2663 return (reg_read(ohci, OHCI1394_FairnessControl) & 0x3f) |
2664 (ohci->pri_req_max << 8);
2665
2666 default:
2667 WARN_ON(1);
2668 return 0;
2669 }
2670}
2671
2672static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value)
2673{
2674 struct fw_ohci *ohci = fw_ohci(card);
2675 unsigned long flags;
2676
2677 switch (csr_offset) {
2678 case CSR_STATE_CLEAR:
2679 if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) {
2680 reg_write(ohci, OHCI1394_LinkControlClear,
2681 OHCI1394_LinkControl_cycleMaster);
2682 flush_writes(ohci);
2683 }
2684 if (value & CSR_STATE_BIT_ABDICATE)
2685 ohci->csr_state_setclear_abdicate = false;
2686 break;
2687
2688 case CSR_STATE_SET:
2689 if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) {
2690 reg_write(ohci, OHCI1394_LinkControlSet,
2691 OHCI1394_LinkControl_cycleMaster);
2692 flush_writes(ohci);
2693 }
2694 if (value & CSR_STATE_BIT_ABDICATE)
2695 ohci->csr_state_setclear_abdicate = true;
2696 break;
2697
2698 case CSR_NODE_IDS:
2699 reg_write(ohci, OHCI1394_NodeID, value >> 16);
2700 flush_writes(ohci);
2701 break;
2702
2703 case CSR_CYCLE_TIME:
2704 reg_write(ohci, OHCI1394_IsochronousCycleTimer, value);
2705 reg_write(ohci, OHCI1394_IntEventSet,
2706 OHCI1394_cycleInconsistent);
2707 flush_writes(ohci);
2708 break;
2709
2710 case CSR_BUS_TIME:
2711 spin_lock_irqsave(&ohci->lock, flags);
2712 ohci->bus_time = (update_bus_time(ohci) & 0x40) |
2713 (value & ~0x7f);
2714 spin_unlock_irqrestore(&ohci->lock, flags);
2715 break;
2716
2717 case CSR_BUSY_TIMEOUT:
2718 value = (value & 0xf) | ((value & 0xf) << 4) |
2719 ((value & 0xf) << 8) | ((value & 0x0ffff000) << 4);
2720 reg_write(ohci, OHCI1394_ATRetries, value);
2721 flush_writes(ohci);
2722 break;
2723
2724 case CSR_PRIORITY_BUDGET:
2725 reg_write(ohci, OHCI1394_FairnessControl, value & 0x3f);
2726 flush_writes(ohci);
2727 break;
2728
2729 default:
2730 WARN_ON(1);
2731 break;
2732 }
2733}
2734
2735static void flush_iso_completions(struct iso_context *ctx)
2736{
2737 ctx->base.callback.sc(&ctx->base, ctx->last_timestamp,
2738 ctx->header_length, ctx->header,
2739 ctx->base.callback_data);
2740 ctx->header_length = 0;
2741}
2742
2743static void copy_iso_headers(struct iso_context *ctx, const u32 *dma_hdr)
2744{
2745 u32 *ctx_hdr;
2746
2747 if (ctx->header_length + ctx->base.header_size > PAGE_SIZE) {
2748 if (ctx->base.drop_overflow_headers)
2749 return;
2750 flush_iso_completions(ctx);
2751 }
2752
2753 ctx_hdr = ctx->header + ctx->header_length;
2754 ctx->last_timestamp = (u16)le32_to_cpu((__force __le32)dma_hdr[0]);
2755
2756 /*
2757 * The two iso header quadlets are byteswapped to little
2758 * endian by the controller, but we want to present them
2759 * as big endian for consistency with the bus endianness.
2760 */
2761 if (ctx->base.header_size > 0)
2762 ctx_hdr[0] = swab32(dma_hdr[1]); /* iso packet header */
2763 if (ctx->base.header_size > 4)
2764 ctx_hdr[1] = swab32(dma_hdr[0]); /* timestamp */
2765 if (ctx->base.header_size > 8)
2766 memcpy(&ctx_hdr[2], &dma_hdr[2], ctx->base.header_size - 8);
2767 ctx->header_length += ctx->base.header_size;
2768}
2769
2770static int handle_ir_packet_per_buffer(struct context *context,
2771 struct descriptor *d,
2772 struct descriptor *last)
2773{
2774 struct iso_context *ctx =
2775 container_of(context, struct iso_context, context);
2776 struct descriptor *pd;
2777 u32 buffer_dma;
2778
2779 for (pd = d; pd <= last; pd++)
2780 if (pd->transfer_status)
2781 break;
2782 if (pd > last)
2783 /* Descriptor(s) not done yet, stop iteration */
2784 return 0;
2785
2786 while (!(d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))) {
2787 d++;
2788 buffer_dma = le32_to_cpu(d->data_address);
2789 dma_sync_single_range_for_cpu(context->ohci->card.device,
2790 buffer_dma & PAGE_MASK,
2791 buffer_dma & ~PAGE_MASK,
2792 le16_to_cpu(d->req_count),
2793 DMA_FROM_DEVICE);
2794 }
2795
2796 copy_iso_headers(ctx, (u32 *) (last + 1));
2797
2798 if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS))
2799 flush_iso_completions(ctx);
2800
2801 return 1;
2802}
2803
2804/* d == last because each descriptor block is only a single descriptor. */
2805static int handle_ir_buffer_fill(struct context *context,
2806 struct descriptor *d,
2807 struct descriptor *last)
2808{
2809 struct iso_context *ctx =
2810 container_of(context, struct iso_context, context);
2811 unsigned int req_count, res_count, completed;
2812 u32 buffer_dma;
2813
2814 req_count = le16_to_cpu(last->req_count);
2815 res_count = le16_to_cpu(ACCESS_ONCE(last->res_count));
2816 completed = req_count - res_count;
2817 buffer_dma = le32_to_cpu(last->data_address);
2818
2819 if (completed > 0) {
2820 ctx->mc_buffer_bus = buffer_dma;
2821 ctx->mc_completed = completed;
2822 }
2823
2824 if (res_count != 0)
2825 /* Descriptor(s) not done yet, stop iteration */
2826 return 0;
2827
2828 dma_sync_single_range_for_cpu(context->ohci->card.device,
2829 buffer_dma & PAGE_MASK,
2830 buffer_dma & ~PAGE_MASK,
2831 completed, DMA_FROM_DEVICE);
2832
2833 if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) {
2834 ctx->base.callback.mc(&ctx->base,
2835 buffer_dma + completed,
2836 ctx->base.callback_data);
2837 ctx->mc_completed = 0;
2838 }
2839
2840 return 1;
2841}
2842
2843static void flush_ir_buffer_fill(struct iso_context *ctx)
2844{
2845 dma_sync_single_range_for_cpu(ctx->context.ohci->card.device,
2846 ctx->mc_buffer_bus & PAGE_MASK,
2847 ctx->mc_buffer_bus & ~PAGE_MASK,
2848 ctx->mc_completed, DMA_FROM_DEVICE);
2849
2850 ctx->base.callback.mc(&ctx->base,
2851 ctx->mc_buffer_bus + ctx->mc_completed,
2852 ctx->base.callback_data);
2853 ctx->mc_completed = 0;
2854}
2855
2856static inline void sync_it_packet_for_cpu(struct context *context,
2857 struct descriptor *pd)
2858{
2859 __le16 control;
2860 u32 buffer_dma;
2861
2862 /* only packets beginning with OUTPUT_MORE* have data buffers */
2863 if (pd->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))
2864 return;
2865
2866 /* skip over the OUTPUT_MORE_IMMEDIATE descriptor */
2867 pd += 2;
2868
2869 /*
2870 * If the packet has a header, the first OUTPUT_MORE/LAST descriptor's
2871 * data buffer is in the context program's coherent page and must not
2872 * be synced.
2873 */
2874 if ((le32_to_cpu(pd->data_address) & PAGE_MASK) ==
2875 (context->current_bus & PAGE_MASK)) {
2876 if (pd->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))
2877 return;
2878 pd++;
2879 }
2880
2881 do {
2882 buffer_dma = le32_to_cpu(pd->data_address);
2883 dma_sync_single_range_for_cpu(context->ohci->card.device,
2884 buffer_dma & PAGE_MASK,
2885 buffer_dma & ~PAGE_MASK,
2886 le16_to_cpu(pd->req_count),
2887 DMA_TO_DEVICE);
2888 control = pd->control;
2889 pd++;
2890 } while (!(control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS)));
2891}
2892
2893static int handle_it_packet(struct context *context,
2894 struct descriptor *d,
2895 struct descriptor *last)
2896{
2897 struct iso_context *ctx =
2898 container_of(context, struct iso_context, context);
2899 struct descriptor *pd;
2900 __be32 *ctx_hdr;
2901
2902 for (pd = d; pd <= last; pd++)
2903 if (pd->transfer_status)
2904 break;
2905 if (pd > last)
2906 /* Descriptor(s) not done yet, stop iteration */
2907 return 0;
2908
2909 sync_it_packet_for_cpu(context, d);
2910
2911 if (ctx->header_length + 4 > PAGE_SIZE) {
2912 if (ctx->base.drop_overflow_headers)
2913 return 1;
2914 flush_iso_completions(ctx);
2915 }
2916
2917 ctx_hdr = ctx->header + ctx->header_length;
2918 ctx->last_timestamp = le16_to_cpu(last->res_count);
2919 /* Present this value as big-endian to match the receive code */
2920 *ctx_hdr = cpu_to_be32((le16_to_cpu(pd->transfer_status) << 16) |
2921 le16_to_cpu(pd->res_count));
2922 ctx->header_length += 4;
2923
2924 if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS))
2925 flush_iso_completions(ctx);
2926
2927 return 1;
2928}
2929
2930static void set_multichannel_mask(struct fw_ohci *ohci, u64 channels)
2931{
2932 u32 hi = channels >> 32, lo = channels;
2933
2934 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, ~hi);
2935 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, ~lo);
2936 reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet, hi);
2937 reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet, lo);
2938 mmiowb();
2939 ohci->mc_channels = channels;
2940}
2941
2942static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
2943 int type, int channel, size_t header_size)
2944{
2945 struct fw_ohci *ohci = fw_ohci(card);
2946 struct iso_context *uninitialized_var(ctx);
2947 descriptor_callback_t uninitialized_var(callback);
2948 u64 *uninitialized_var(channels);
2949 u32 *uninitialized_var(mask), uninitialized_var(regs);
2950 int index, ret = -EBUSY;
2951
2952 spin_lock_irq(&ohci->lock);
2953
2954 switch (type) {
2955 case FW_ISO_CONTEXT_TRANSMIT:
2956 mask = &ohci->it_context_mask;
2957 callback = handle_it_packet;
2958 index = ffs(*mask) - 1;
2959 if (index >= 0) {
2960 *mask &= ~(1 << index);
2961 regs = OHCI1394_IsoXmitContextBase(index);
2962 ctx = &ohci->it_context_list[index];
2963 }
2964 break;
2965
2966 case FW_ISO_CONTEXT_RECEIVE:
2967 channels = &ohci->ir_context_channels;
2968 mask = &ohci->ir_context_mask;
2969 callback = handle_ir_packet_per_buffer;
2970 index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1;
2971 if (index >= 0) {
2972 *channels &= ~(1ULL << channel);
2973 *mask &= ~(1 << index);
2974 regs = OHCI1394_IsoRcvContextBase(index);
2975 ctx = &ohci->ir_context_list[index];
2976 }
2977 break;
2978
2979 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
2980 mask = &ohci->ir_context_mask;
2981 callback = handle_ir_buffer_fill;
2982 index = !ohci->mc_allocated ? ffs(*mask) - 1 : -1;
2983 if (index >= 0) {
2984 ohci->mc_allocated = true;
2985 *mask &= ~(1 << index);
2986 regs = OHCI1394_IsoRcvContextBase(index);
2987 ctx = &ohci->ir_context_list[index];
2988 }
2989 break;
2990
2991 default:
2992 index = -1;
2993 ret = -ENOSYS;
2994 }
2995
2996 spin_unlock_irq(&ohci->lock);
2997
2998 if (index < 0)
2999 return ERR_PTR(ret);
3000
3001 memset(ctx, 0, sizeof(*ctx));
3002 ctx->header_length = 0;
3003 ctx->header = (void *) __get_free_page(GFP_KERNEL);
3004 if (ctx->header == NULL) {
3005 ret = -ENOMEM;
3006 goto out;
3007 }
3008 ret = context_init(&ctx->context, ohci, regs, callback);
3009 if (ret < 0)
3010 goto out_with_header;
3011
3012 if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL) {
3013 set_multichannel_mask(ohci, 0);
3014 ctx->mc_completed = 0;
3015 }
3016
3017 return &ctx->base;
3018
3019 out_with_header:
3020 free_page((unsigned long)ctx->header);
3021 out:
3022 spin_lock_irq(&ohci->lock);
3023
3024 switch (type) {
3025 case FW_ISO_CONTEXT_RECEIVE:
3026 *channels |= 1ULL << channel;
3027 break;
3028
3029 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3030 ohci->mc_allocated = false;
3031 break;
3032 }
3033 *mask |= 1 << index;
3034
3035 spin_unlock_irq(&ohci->lock);
3036
3037 return ERR_PTR(ret);
3038}
3039
3040static int ohci_start_iso(struct fw_iso_context *base,
3041 s32 cycle, u32 sync, u32 tags)
3042{
3043 struct iso_context *ctx = container_of(base, struct iso_context, base);
3044 struct fw_ohci *ohci = ctx->context.ohci;
3045 u32 control = IR_CONTEXT_ISOCH_HEADER, match;
3046 int index;
3047
3048 /* the controller cannot start without any queued packets */
3049 if (ctx->context.last->branch_address == 0)
3050 return -ENODATA;
3051
3052 switch (ctx->base.type) {
3053 case FW_ISO_CONTEXT_TRANSMIT:
3054 index = ctx - ohci->it_context_list;
3055 match = 0;
3056 if (cycle >= 0)
3057 match = IT_CONTEXT_CYCLE_MATCH_ENABLE |
3058 (cycle & 0x7fff) << 16;
3059
3060 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index);
3061 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index);
3062 context_run(&ctx->context, match);
3063 break;
3064
3065 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3066 control |= IR_CONTEXT_BUFFER_FILL|IR_CONTEXT_MULTI_CHANNEL_MODE;
3067 /* fall through */
3068 case FW_ISO_CONTEXT_RECEIVE:
3069 index = ctx - ohci->ir_context_list;
3070 match = (tags << 28) | (sync << 8) | ctx->base.channel;
3071 if (cycle >= 0) {
3072 match |= (cycle & 0x07fff) << 12;
3073 control |= IR_CONTEXT_CYCLE_MATCH_ENABLE;
3074 }
3075
3076 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index);
3077 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
3078 reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match);
3079 context_run(&ctx->context, control);
3080
3081 ctx->sync = sync;
3082 ctx->tags = tags;
3083
3084 break;
3085 }
3086
3087 return 0;
3088}
3089
3090static int ohci_stop_iso(struct fw_iso_context *base)
3091{
3092 struct fw_ohci *ohci = fw_ohci(base->card);
3093 struct iso_context *ctx = container_of(base, struct iso_context, base);
3094 int index;
3095
3096 switch (ctx->base.type) {
3097 case FW_ISO_CONTEXT_TRANSMIT:
3098 index = ctx - ohci->it_context_list;
3099 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index);
3100 break;
3101
3102 case FW_ISO_CONTEXT_RECEIVE:
3103 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3104 index = ctx - ohci->ir_context_list;
3105 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index);
3106 break;
3107 }
3108 flush_writes(ohci);
3109 context_stop(&ctx->context);
3110 tasklet_kill(&ctx->context.tasklet);
3111
3112 return 0;
3113}
3114
3115static void ohci_free_iso_context(struct fw_iso_context *base)
3116{
3117 struct fw_ohci *ohci = fw_ohci(base->card);
3118 struct iso_context *ctx = container_of(base, struct iso_context, base);
3119 unsigned long flags;
3120 int index;
3121
3122 ohci_stop_iso(base);
3123 context_release(&ctx->context);
3124 free_page((unsigned long)ctx->header);
3125
3126 spin_lock_irqsave(&ohci->lock, flags);
3127
3128 switch (base->type) {
3129 case FW_ISO_CONTEXT_TRANSMIT:
3130 index = ctx - ohci->it_context_list;
3131 ohci->it_context_mask |= 1 << index;
3132 break;
3133
3134 case FW_ISO_CONTEXT_RECEIVE:
3135 index = ctx - ohci->ir_context_list;
3136 ohci->ir_context_mask |= 1 << index;
3137 ohci->ir_context_channels |= 1ULL << base->channel;
3138 break;
3139
3140 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3141 index = ctx - ohci->ir_context_list;
3142 ohci->ir_context_mask |= 1 << index;
3143 ohci->ir_context_channels |= ohci->mc_channels;
3144 ohci->mc_channels = 0;
3145 ohci->mc_allocated = false;
3146 break;
3147 }
3148
3149 spin_unlock_irqrestore(&ohci->lock, flags);
3150}
3151
3152static int ohci_set_iso_channels(struct fw_iso_context *base, u64 *channels)
3153{
3154 struct fw_ohci *ohci = fw_ohci(base->card);
3155 unsigned long flags;
3156 int ret;
3157
3158 switch (base->type) {
3159 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3160
3161 spin_lock_irqsave(&ohci->lock, flags);
3162
3163 /* Don't allow multichannel to grab other contexts' channels. */
3164 if (~ohci->ir_context_channels & ~ohci->mc_channels & *channels) {
3165 *channels = ohci->ir_context_channels;
3166 ret = -EBUSY;
3167 } else {
3168 set_multichannel_mask(ohci, *channels);
3169 ret = 0;
3170 }
3171
3172 spin_unlock_irqrestore(&ohci->lock, flags);
3173
3174 break;
3175 default:
3176 ret = -EINVAL;
3177 }
3178
3179 return ret;
3180}
3181
3182#ifdef CONFIG_PM
3183static void ohci_resume_iso_dma(struct fw_ohci *ohci)
3184{
3185 int i;
3186 struct iso_context *ctx;
3187
3188 for (i = 0 ; i < ohci->n_ir ; i++) {
3189 ctx = &ohci->ir_context_list[i];
3190 if (ctx->context.running)
3191 ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags);
3192 }
3193
3194 for (i = 0 ; i < ohci->n_it ; i++) {
3195 ctx = &ohci->it_context_list[i];
3196 if (ctx->context.running)
3197 ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags);
3198 }
3199}
3200#endif
3201
3202static int queue_iso_transmit(struct iso_context *ctx,
3203 struct fw_iso_packet *packet,
3204 struct fw_iso_buffer *buffer,
3205 unsigned long payload)
3206{
3207 struct descriptor *d, *last, *pd;
3208 struct fw_iso_packet *p;
3209 __le32 *header;
3210 dma_addr_t d_bus, page_bus;
3211 u32 z, header_z, payload_z, irq;
3212 u32 payload_index, payload_end_index, next_page_index;
3213 int page, end_page, i, length, offset;
3214
3215 p = packet;
3216 payload_index = payload;
3217
3218 if (p->skip)
3219 z = 1;
3220 else
3221 z = 2;
3222 if (p->header_length > 0)
3223 z++;
3224
3225 /* Determine the first page the payload isn't contained in. */
3226 end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT;
3227 if (p->payload_length > 0)
3228 payload_z = end_page - (payload_index >> PAGE_SHIFT);
3229 else
3230 payload_z = 0;
3231
3232 z += payload_z;
3233
3234 /* Get header size in number of descriptors. */
3235 header_z = DIV_ROUND_UP(p->header_length, sizeof(*d));
3236
3237 d = context_get_descriptors(&ctx->context, z + header_z, &d_bus);
3238 if (d == NULL)
3239 return -ENOMEM;
3240
3241 if (!p->skip) {
3242 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
3243 d[0].req_count = cpu_to_le16(8);
3244 /*
3245 * Link the skip address to this descriptor itself. This causes
3246 * a context to skip a cycle whenever lost cycles or FIFO
3247 * overruns occur, without dropping the data. The application
3248 * should then decide whether this is an error condition or not.
3249 * FIXME: Make the context's cycle-lost behaviour configurable?
3250 */
3251 d[0].branch_address = cpu_to_le32(d_bus | z);
3252
3253 header = (__le32 *) &d[1];
3254 header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |
3255 IT_HEADER_TAG(p->tag) |
3256 IT_HEADER_TCODE(TCODE_STREAM_DATA) |
3257 IT_HEADER_CHANNEL(ctx->base.channel) |
3258 IT_HEADER_SPEED(ctx->base.speed));
3259 header[1] =
3260 cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length +
3261 p->payload_length));
3262 }
3263
3264 if (p->header_length > 0) {
3265 d[2].req_count = cpu_to_le16(p->header_length);
3266 d[2].data_address = cpu_to_le32(d_bus + z * sizeof(*d));
3267 memcpy(&d[z], p->header, p->header_length);
3268 }
3269
3270 pd = d + z - payload_z;
3271 payload_end_index = payload_index + p->payload_length;
3272 for (i = 0; i < payload_z; i++) {
3273 page = payload_index >> PAGE_SHIFT;
3274 offset = payload_index & ~PAGE_MASK;
3275 next_page_index = (page + 1) << PAGE_SHIFT;
3276 length =
3277 min(next_page_index, payload_end_index) - payload_index;
3278 pd[i].req_count = cpu_to_le16(length);
3279
3280 page_bus = page_private(buffer->pages[page]);
3281 pd[i].data_address = cpu_to_le32(page_bus + offset);
3282
3283 dma_sync_single_range_for_device(ctx->context.ohci->card.device,
3284 page_bus, offset, length,
3285 DMA_TO_DEVICE);
3286
3287 payload_index += length;
3288 }
3289
3290 if (p->interrupt)
3291 irq = DESCRIPTOR_IRQ_ALWAYS;
3292 else
3293 irq = DESCRIPTOR_NO_IRQ;
3294
3295 last = z == 2 ? d : d + z - 1;
3296 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
3297 DESCRIPTOR_STATUS |
3298 DESCRIPTOR_BRANCH_ALWAYS |
3299 irq);
3300
3301 context_append(&ctx->context, d, z, header_z);
3302
3303 return 0;
3304}
3305
3306static int queue_iso_packet_per_buffer(struct iso_context *ctx,
3307 struct fw_iso_packet *packet,
3308 struct fw_iso_buffer *buffer,
3309 unsigned long payload)
3310{
3311 struct device *device = ctx->context.ohci->card.device;
3312 struct descriptor *d, *pd;
3313 dma_addr_t d_bus, page_bus;
3314 u32 z, header_z, rest;
3315 int i, j, length;
3316 int page, offset, packet_count, header_size, payload_per_buffer;
3317
3318 /*
3319 * The OHCI controller puts the isochronous header and trailer in the
3320 * buffer, so we need at least 8 bytes.
3321 */
3322 packet_count = packet->header_length / ctx->base.header_size;
3323 header_size = max(ctx->base.header_size, (size_t)8);
3324
3325 /* Get header size in number of descriptors. */
3326 header_z = DIV_ROUND_UP(header_size, sizeof(*d));
3327 page = payload >> PAGE_SHIFT;
3328 offset = payload & ~PAGE_MASK;
3329 payload_per_buffer = packet->payload_length / packet_count;
3330
3331 for (i = 0; i < packet_count; i++) {
3332 /* d points to the header descriptor */
3333 z = DIV_ROUND_UP(payload_per_buffer + offset, PAGE_SIZE) + 1;
3334 d = context_get_descriptors(&ctx->context,
3335 z + header_z, &d_bus);
3336 if (d == NULL)
3337 return -ENOMEM;
3338
3339 d->control = cpu_to_le16(DESCRIPTOR_STATUS |
3340 DESCRIPTOR_INPUT_MORE);
3341 if (packet->skip && i == 0)
3342 d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
3343 d->req_count = cpu_to_le16(header_size);
3344 d->res_count = d->req_count;
3345 d->transfer_status = 0;
3346 d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d)));
3347
3348 rest = payload_per_buffer;
3349 pd = d;
3350 for (j = 1; j < z; j++) {
3351 pd++;
3352 pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
3353 DESCRIPTOR_INPUT_MORE);
3354
3355 if (offset + rest < PAGE_SIZE)
3356 length = rest;
3357 else
3358 length = PAGE_SIZE - offset;
3359 pd->req_count = cpu_to_le16(length);
3360 pd->res_count = pd->req_count;
3361 pd->transfer_status = 0;
3362
3363 page_bus = page_private(buffer->pages[page]);
3364 pd->data_address = cpu_to_le32(page_bus + offset);
3365
3366 dma_sync_single_range_for_device(device, page_bus,
3367 offset, length,
3368 DMA_FROM_DEVICE);
3369
3370 offset = (offset + length) & ~PAGE_MASK;
3371 rest -= length;
3372 if (offset == 0)
3373 page++;
3374 }
3375 pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
3376 DESCRIPTOR_INPUT_LAST |
3377 DESCRIPTOR_BRANCH_ALWAYS);
3378 if (packet->interrupt && i == packet_count - 1)
3379 pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
3380
3381 context_append(&ctx->context, d, z, header_z);
3382 }
3383
3384 return 0;
3385}
3386
3387static int queue_iso_buffer_fill(struct iso_context *ctx,
3388 struct fw_iso_packet *packet,
3389 struct fw_iso_buffer *buffer,
3390 unsigned long payload)
3391{
3392 struct descriptor *d;
3393 dma_addr_t d_bus, page_bus;
3394 int page, offset, rest, z, i, length;
3395
3396 page = payload >> PAGE_SHIFT;
3397 offset = payload & ~PAGE_MASK;
3398 rest = packet->payload_length;
3399
3400 /* We need one descriptor for each page in the buffer. */
3401 z = DIV_ROUND_UP(offset + rest, PAGE_SIZE);
3402
3403 if (WARN_ON(offset & 3 || rest & 3 || page + z > buffer->page_count))
3404 return -EFAULT;
3405
3406 for (i = 0; i < z; i++) {
3407 d = context_get_descriptors(&ctx->context, 1, &d_bus);
3408 if (d == NULL)
3409 return -ENOMEM;
3410
3411 d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
3412 DESCRIPTOR_BRANCH_ALWAYS);
3413 if (packet->skip && i == 0)
3414 d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
3415 if (packet->interrupt && i == z - 1)
3416 d->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
3417
3418 if (offset + rest < PAGE_SIZE)
3419 length = rest;
3420 else
3421 length = PAGE_SIZE - offset;
3422 d->req_count = cpu_to_le16(length);
3423 d->res_count = d->req_count;
3424 d->transfer_status = 0;
3425
3426 page_bus = page_private(buffer->pages[page]);
3427 d->data_address = cpu_to_le32(page_bus + offset);
3428
3429 dma_sync_single_range_for_device(ctx->context.ohci->card.device,
3430 page_bus, offset, length,
3431 DMA_FROM_DEVICE);
3432
3433 rest -= length;
3434 offset = 0;
3435 page++;
3436
3437 context_append(&ctx->context, d, 1, 0);
3438 }
3439
3440 return 0;
3441}
3442
3443static int ohci_queue_iso(struct fw_iso_context *base,
3444 struct fw_iso_packet *packet,
3445 struct fw_iso_buffer *buffer,
3446 unsigned long payload)
3447{
3448 struct iso_context *ctx = container_of(base, struct iso_context, base);
3449 unsigned long flags;
3450 int ret = -ENOSYS;
3451
3452 spin_lock_irqsave(&ctx->context.ohci->lock, flags);
3453 switch (base->type) {
3454 case FW_ISO_CONTEXT_TRANSMIT:
3455 ret = queue_iso_transmit(ctx, packet, buffer, payload);
3456 break;
3457 case FW_ISO_CONTEXT_RECEIVE:
3458 ret = queue_iso_packet_per_buffer(ctx, packet, buffer, payload);
3459 break;
3460 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3461 ret = queue_iso_buffer_fill(ctx, packet, buffer, payload);
3462 break;
3463 }
3464 spin_unlock_irqrestore(&ctx->context.ohci->lock, flags);
3465
3466 return ret;
3467}
3468
3469static void ohci_flush_queue_iso(struct fw_iso_context *base)
3470{
3471 struct context *ctx =
3472 &container_of(base, struct iso_context, base)->context;
3473
3474 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
3475}
3476
3477static int ohci_flush_iso_completions(struct fw_iso_context *base)
3478{
3479 struct iso_context *ctx = container_of(base, struct iso_context, base);
3480 int ret = 0;
3481
3482 tasklet_disable(&ctx->context.tasklet);
3483
3484 if (!test_and_set_bit_lock(0, &ctx->flushing_completions)) {
3485 context_tasklet((unsigned long)&ctx->context);
3486
3487 switch (base->type) {
3488 case FW_ISO_CONTEXT_TRANSMIT:
3489 case FW_ISO_CONTEXT_RECEIVE:
3490 if (ctx->header_length != 0)
3491 flush_iso_completions(ctx);
3492 break;
3493 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3494 if (ctx->mc_completed != 0)
3495 flush_ir_buffer_fill(ctx);
3496 break;
3497 default:
3498 ret = -ENOSYS;
3499 }
3500
3501 clear_bit_unlock(0, &ctx->flushing_completions);
3502 smp_mb__after_atomic();
3503 }
3504
3505 tasklet_enable(&ctx->context.tasklet);
3506
3507 return ret;
3508}
3509
3510static const struct fw_card_driver ohci_driver = {
3511 .enable = ohci_enable,
3512 .read_phy_reg = ohci_read_phy_reg,
3513 .update_phy_reg = ohci_update_phy_reg,
3514 .set_config_rom = ohci_set_config_rom,
3515 .send_request = ohci_send_request,
3516 .send_response = ohci_send_response,
3517 .cancel_packet = ohci_cancel_packet,
3518 .enable_phys_dma = ohci_enable_phys_dma,
3519 .read_csr = ohci_read_csr,
3520 .write_csr = ohci_write_csr,
3521
3522 .allocate_iso_context = ohci_allocate_iso_context,
3523 .free_iso_context = ohci_free_iso_context,
3524 .set_iso_channels = ohci_set_iso_channels,
3525 .queue_iso = ohci_queue_iso,
3526 .flush_queue_iso = ohci_flush_queue_iso,
3527 .flush_iso_completions = ohci_flush_iso_completions,
3528 .start_iso = ohci_start_iso,
3529 .stop_iso = ohci_stop_iso,
3530};
3531
3532#ifdef CONFIG_PPC_PMAC
3533static void pmac_ohci_on(struct pci_dev *dev)
3534{
3535 if (machine_is(powermac)) {
3536 struct device_node *ofn = pci_device_to_OF_node(dev);
3537
3538 if (ofn) {
3539 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1);
3540 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
3541 }
3542 }
3543}
3544
3545static void pmac_ohci_off(struct pci_dev *dev)
3546{
3547 if (machine_is(powermac)) {
3548 struct device_node *ofn = pci_device_to_OF_node(dev);
3549
3550 if (ofn) {
3551 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
3552 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
3553 }
3554 }
3555}
3556#else
3557static inline void pmac_ohci_on(struct pci_dev *dev) {}
3558static inline void pmac_ohci_off(struct pci_dev *dev) {}
3559#endif /* CONFIG_PPC_PMAC */
3560
3561static int pci_probe(struct pci_dev *dev,
3562 const struct pci_device_id *ent)
3563{
3564 struct fw_ohci *ohci;
3565 u32 bus_options, max_receive, link_speed, version;
3566 u64 guid;
3567 int i, err;
3568 size_t size;
3569
3570 if (dev->vendor == PCI_VENDOR_ID_PINNACLE_SYSTEMS) {
3571 dev_err(&dev->dev, "Pinnacle MovieBoard is not yet supported\n");
3572 return -ENOSYS;
3573 }
3574
3575 ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
3576 if (ohci == NULL) {
3577 err = -ENOMEM;
3578 goto fail;
3579 }
3580
3581 fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
3582
3583 pmac_ohci_on(dev);
3584
3585 err = pci_enable_device(dev);
3586 if (err) {
3587 dev_err(&dev->dev, "failed to enable OHCI hardware\n");
3588 goto fail_free;
3589 }
3590
3591 pci_set_master(dev);
3592 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
3593 pci_set_drvdata(dev, ohci);
3594
3595 spin_lock_init(&ohci->lock);
3596 mutex_init(&ohci->phy_reg_mutex);
3597
3598 INIT_WORK(&ohci->bus_reset_work, bus_reset_work);
3599
3600 if (!(pci_resource_flags(dev, 0) & IORESOURCE_MEM) ||
3601 pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE) {
3602 ohci_err(ohci, "invalid MMIO resource\n");
3603 err = -ENXIO;
3604 goto fail_disable;
3605 }
3606
3607 err = pci_request_region(dev, 0, ohci_driver_name);
3608 if (err) {
3609 ohci_err(ohci, "MMIO resource unavailable\n");
3610 goto fail_disable;
3611 }
3612
3613 ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE);
3614 if (ohci->registers == NULL) {
3615 ohci_err(ohci, "failed to remap registers\n");
3616 err = -ENXIO;
3617 goto fail_iomem;
3618 }
3619
3620 for (i = 0; i < ARRAY_SIZE(ohci_quirks); i++)
3621 if ((ohci_quirks[i].vendor == dev->vendor) &&
3622 (ohci_quirks[i].device == (unsigned short)PCI_ANY_ID ||
3623 ohci_quirks[i].device == dev->device) &&
3624 (ohci_quirks[i].revision == (unsigned short)PCI_ANY_ID ||
3625 ohci_quirks[i].revision >= dev->revision)) {
3626 ohci->quirks = ohci_quirks[i].flags;
3627 break;
3628 }
3629 if (param_quirks)
3630 ohci->quirks = param_quirks;
3631
3632 /*
3633 * Because dma_alloc_coherent() allocates at least one page,
3634 * we save space by using a common buffer for the AR request/
3635 * response descriptors and the self IDs buffer.
3636 */
3637 BUILD_BUG_ON(AR_BUFFERS * sizeof(struct descriptor) > PAGE_SIZE/4);
3638 BUILD_BUG_ON(SELF_ID_BUF_SIZE > PAGE_SIZE/2);
3639 ohci->misc_buffer = dma_alloc_coherent(ohci->card.device,
3640 PAGE_SIZE,
3641 &ohci->misc_buffer_bus,
3642 GFP_KERNEL);
3643 if (!ohci->misc_buffer) {
3644 err = -ENOMEM;
3645 goto fail_iounmap;
3646 }
3647
3648 err = ar_context_init(&ohci->ar_request_ctx, ohci, 0,
3649 OHCI1394_AsReqRcvContextControlSet);
3650 if (err < 0)
3651 goto fail_misc_buf;
3652
3653 err = ar_context_init(&ohci->ar_response_ctx, ohci, PAGE_SIZE/4,
3654 OHCI1394_AsRspRcvContextControlSet);
3655 if (err < 0)
3656 goto fail_arreq_ctx;
3657
3658 err = context_init(&ohci->at_request_ctx, ohci,
3659 OHCI1394_AsReqTrContextControlSet, handle_at_packet);
3660 if (err < 0)
3661 goto fail_arrsp_ctx;
3662
3663 err = context_init(&ohci->at_response_ctx, ohci,
3664 OHCI1394_AsRspTrContextControlSet, handle_at_packet);
3665 if (err < 0)
3666 goto fail_atreq_ctx;
3667
3668 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
3669 ohci->ir_context_channels = ~0ULL;
3670 ohci->ir_context_support = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
3671 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
3672 ohci->ir_context_mask = ohci->ir_context_support;
3673 ohci->n_ir = hweight32(ohci->ir_context_mask);
3674 size = sizeof(struct iso_context) * ohci->n_ir;
3675 ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
3676
3677 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
3678 ohci->it_context_support = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
3679 /* JMicron JMB38x often shows 0 at first read, just ignore it */
3680 if (!ohci->it_context_support) {
3681 ohci_notice(ohci, "overriding IsoXmitIntMask\n");
3682 ohci->it_context_support = 0xf;
3683 }
3684 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
3685 ohci->it_context_mask = ohci->it_context_support;
3686 ohci->n_it = hweight32(ohci->it_context_mask);
3687 size = sizeof(struct iso_context) * ohci->n_it;
3688 ohci->it_context_list = kzalloc(size, GFP_KERNEL);
3689
3690 if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) {
3691 err = -ENOMEM;
3692 goto fail_contexts;
3693 }
3694
3695 ohci->self_id = ohci->misc_buffer + PAGE_SIZE/2;
3696 ohci->self_id_bus = ohci->misc_buffer_bus + PAGE_SIZE/2;
3697
3698 bus_options = reg_read(ohci, OHCI1394_BusOptions);
3699 max_receive = (bus_options >> 12) & 0xf;
3700 link_speed = bus_options & 0x7;
3701 guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) |
3702 reg_read(ohci, OHCI1394_GUIDLo);
3703
3704 if (!(ohci->quirks & QUIRK_NO_MSI))
3705 pci_enable_msi(dev);
3706 if (request_irq(dev->irq, irq_handler,
3707 pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED,
3708 ohci_driver_name, ohci)) {
3709 ohci_err(ohci, "failed to allocate interrupt %d\n", dev->irq);
3710 err = -EIO;
3711 goto fail_msi;
3712 }
3713
3714 err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
3715 if (err)
3716 goto fail_irq;
3717
3718 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
3719 ohci_notice(ohci,
3720 "added OHCI v%x.%x device as card %d, "
3721 "%d IR + %d IT contexts, quirks 0x%x%s\n",
3722 version >> 16, version & 0xff, ohci->card.index,
3723 ohci->n_ir, ohci->n_it, ohci->quirks,
3724 reg_read(ohci, OHCI1394_PhyUpperBound) ?
3725 ", physUB" : "");
3726
3727 return 0;
3728
3729 fail_irq:
3730 free_irq(dev->irq, ohci);
3731 fail_msi:
3732 pci_disable_msi(dev);
3733 fail_contexts:
3734 kfree(ohci->ir_context_list);
3735 kfree(ohci->it_context_list);
3736 context_release(&ohci->at_response_ctx);
3737 fail_atreq_ctx:
3738 context_release(&ohci->at_request_ctx);
3739 fail_arrsp_ctx:
3740 ar_context_release(&ohci->ar_response_ctx);
3741 fail_arreq_ctx:
3742 ar_context_release(&ohci->ar_request_ctx);
3743 fail_misc_buf:
3744 dma_free_coherent(ohci->card.device, PAGE_SIZE,
3745 ohci->misc_buffer, ohci->misc_buffer_bus);
3746 fail_iounmap:
3747 pci_iounmap(dev, ohci->registers);
3748 fail_iomem:
3749 pci_release_region(dev, 0);
3750 fail_disable:
3751 pci_disable_device(dev);
3752 fail_free:
3753 kfree(ohci);
3754 pmac_ohci_off(dev);
3755 fail:
3756 return err;
3757}
3758
3759static void pci_remove(struct pci_dev *dev)
3760{
3761 struct fw_ohci *ohci = pci_get_drvdata(dev);
3762
3763 /*
3764 * If the removal is happening from the suspend state, LPS won't be
3765 * enabled and host registers (eg., IntMaskClear) won't be accessible.
3766 */
3767 if (reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_LPS) {
3768 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
3769 flush_writes(ohci);
3770 }
3771 cancel_work_sync(&ohci->bus_reset_work);
3772 fw_core_remove_card(&ohci->card);
3773
3774 /*
3775 * FIXME: Fail all pending packets here, now that the upper
3776 * layers can't queue any more.
3777 */
3778
3779 software_reset(ohci);
3780 free_irq(dev->irq, ohci);
3781
3782 if (ohci->next_config_rom && ohci->next_config_rom != ohci->config_rom)
3783 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
3784 ohci->next_config_rom, ohci->next_config_rom_bus);
3785 if (ohci->config_rom)
3786 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
3787 ohci->config_rom, ohci->config_rom_bus);
3788 ar_context_release(&ohci->ar_request_ctx);
3789 ar_context_release(&ohci->ar_response_ctx);
3790 dma_free_coherent(ohci->card.device, PAGE_SIZE,
3791 ohci->misc_buffer, ohci->misc_buffer_bus);
3792 context_release(&ohci->at_request_ctx);
3793 context_release(&ohci->at_response_ctx);
3794 kfree(ohci->it_context_list);
3795 kfree(ohci->ir_context_list);
3796 pci_disable_msi(dev);
3797 pci_iounmap(dev, ohci->registers);
3798 pci_release_region(dev, 0);
3799 pci_disable_device(dev);
3800 kfree(ohci);
3801 pmac_ohci_off(dev);
3802
3803 dev_notice(&dev->dev, "removed fw-ohci device\n");
3804}
3805
3806#ifdef CONFIG_PM
3807static int pci_suspend(struct pci_dev *dev, pm_message_t state)
3808{
3809 struct fw_ohci *ohci = pci_get_drvdata(dev);
3810 int err;
3811
3812 software_reset(ohci);
3813 err = pci_save_state(dev);
3814 if (err) {
3815 ohci_err(ohci, "pci_save_state failed\n");
3816 return err;
3817 }
3818 err = pci_set_power_state(dev, pci_choose_state(dev, state));
3819 if (err)
3820 ohci_err(ohci, "pci_set_power_state failed with %d\n", err);
3821 pmac_ohci_off(dev);
3822
3823 return 0;
3824}
3825
3826static int pci_resume(struct pci_dev *dev)
3827{
3828 struct fw_ohci *ohci = pci_get_drvdata(dev);
3829 int err;
3830
3831 pmac_ohci_on(dev);
3832 pci_set_power_state(dev, PCI_D0);
3833 pci_restore_state(dev);
3834 err = pci_enable_device(dev);
3835 if (err) {
3836 ohci_err(ohci, "pci_enable_device failed\n");
3837 return err;
3838 }
3839
3840 /* Some systems don't setup GUID register on resume from ram */
3841 if (!reg_read(ohci, OHCI1394_GUIDLo) &&
3842 !reg_read(ohci, OHCI1394_GUIDHi)) {
3843 reg_write(ohci, OHCI1394_GUIDLo, (u32)ohci->card.guid);
3844 reg_write(ohci, OHCI1394_GUIDHi, (u32)(ohci->card.guid >> 32));
3845 }
3846
3847 err = ohci_enable(&ohci->card, NULL, 0);
3848 if (err)
3849 return err;
3850
3851 ohci_resume_iso_dma(ohci);
3852
3853 return 0;
3854}
3855#endif
3856
3857static const struct pci_device_id pci_table[] = {
3858 { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
3859 { }
3860};
3861
3862MODULE_DEVICE_TABLE(pci, pci_table);
3863
3864static struct pci_driver fw_ohci_pci_driver = {
3865 .name = ohci_driver_name,
3866 .id_table = pci_table,
3867 .probe = pci_probe,
3868 .remove = pci_remove,
3869#ifdef CONFIG_PM
3870 .resume = pci_resume,
3871 .suspend = pci_suspend,
3872#endif
3873};
3874
3875static int __init fw_ohci_init(void)
3876{
3877 selfid_workqueue = alloc_workqueue(KBUILD_MODNAME, WQ_MEM_RECLAIM, 0);
3878 if (!selfid_workqueue)
3879 return -ENOMEM;
3880
3881 return pci_register_driver(&fw_ohci_pci_driver);
3882}
3883
3884static void __exit fw_ohci_cleanup(void)
3885{
3886 pci_unregister_driver(&fw_ohci_pci_driver);
3887 destroy_workqueue(selfid_workqueue);
3888}
3889
3890module_init(fw_ohci_init);
3891module_exit(fw_ohci_cleanup);
3892
3893MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
3894MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
3895MODULE_LICENSE("GPL");
3896
3897/* Provide a module alias so root-on-sbp2 initrds don't break. */
3898MODULE_ALIAS("ohci1394");