Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.5.6.
  1/*
  2 * Thunderbolt Cactus Ridge driver - NHI driver
  3 *
  4 * The NHI (native host interface) is the pci device that allows us to send and
  5 * receive frames from the thunderbolt bus.
  6 *
  7 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
  8 */
  9
 10#include <linux/pm_runtime.h>
 11#include <linux/slab.h>
 12#include <linux/errno.h>
 13#include <linux/pci.h>
 14#include <linux/interrupt.h>
 15#include <linux/module.h>
 16#include <linux/dmi.h>
 17
 18#include "nhi.h"
 19#include "nhi_regs.h"
 20#include "tb.h"
 21
 22#define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring")
 23
 24
 25static int ring_interrupt_index(struct tb_ring *ring)
 26{
 27	int bit = ring->hop;
 28	if (!ring->is_tx)
 29		bit += ring->nhi->hop_count;
 30	return bit;
 31}
 32
 33/**
 34 * ring_interrupt_active() - activate/deactivate interrupts for a single ring
 35 *
 36 * ring->nhi->lock must be held.
 37 */
 38static void ring_interrupt_active(struct tb_ring *ring, bool active)
 39{
 40	int reg = REG_RING_INTERRUPT_BASE +
 41		  ring_interrupt_index(ring) / 32 * 4;
 42	int bit = ring_interrupt_index(ring) & 31;
 43	int mask = 1 << bit;
 44	u32 old, new;
 45	old = ioread32(ring->nhi->iobase + reg);
 46	if (active)
 47		new = old | mask;
 48	else
 49		new = old & ~mask;
 50
 51	dev_info(&ring->nhi->pdev->dev,
 52		 "%s interrupt at register %#x bit %d (%#x -> %#x)\n",
 53		 active ? "enabling" : "disabling", reg, bit, old, new);
 54
 55	if (new == old)
 56		dev_WARN(&ring->nhi->pdev->dev,
 57					 "interrupt for %s %d is already %s\n",
 58					 RING_TYPE(ring), ring->hop,
 59					 active ? "enabled" : "disabled");
 60	iowrite32(new, ring->nhi->iobase + reg);
 61}
 62
 63/**
 64 * nhi_disable_interrupts() - disable interrupts for all rings
 65 *
 66 * Use only during init and shutdown.
 67 */
 68static void nhi_disable_interrupts(struct tb_nhi *nhi)
 69{
 70	int i = 0;
 71	/* disable interrupts */
 72	for (i = 0; i < RING_INTERRUPT_REG_COUNT(nhi); i++)
 73		iowrite32(0, nhi->iobase + REG_RING_INTERRUPT_BASE + 4 * i);
 74
 75	/* clear interrupt status bits */
 76	for (i = 0; i < RING_NOTIFY_REG_COUNT(nhi); i++)
 77		ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + 4 * i);
 78}
 79
 80/* ring helper methods */
 81
 82static void __iomem *ring_desc_base(struct tb_ring *ring)
 83{
 84	void __iomem *io = ring->nhi->iobase;
 85	io += ring->is_tx ? REG_TX_RING_BASE : REG_RX_RING_BASE;
 86	io += ring->hop * 16;
 87	return io;
 88}
 89
 90static void __iomem *ring_options_base(struct tb_ring *ring)
 91{
 92	void __iomem *io = ring->nhi->iobase;
 93	io += ring->is_tx ? REG_TX_OPTIONS_BASE : REG_RX_OPTIONS_BASE;
 94	io += ring->hop * 32;
 95	return io;
 96}
 97
 98static void ring_iowrite16desc(struct tb_ring *ring, u32 value, u32 offset)
 99{
100	iowrite16(value, ring_desc_base(ring) + offset);
101}
102
103static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset)
104{
105	iowrite32(value, ring_desc_base(ring) + offset);
106}
107
108static void ring_iowrite64desc(struct tb_ring *ring, u64 value, u32 offset)
109{
110	iowrite32(value, ring_desc_base(ring) + offset);
111	iowrite32(value >> 32, ring_desc_base(ring) + offset + 4);
112}
113
114static void ring_iowrite32options(struct tb_ring *ring, u32 value, u32 offset)
115{
116	iowrite32(value, ring_options_base(ring) + offset);
117}
118
119static bool ring_full(struct tb_ring *ring)
120{
121	return ((ring->head + 1) % ring->size) == ring->tail;
122}
123
124static bool ring_empty(struct tb_ring *ring)
125{
126	return ring->head == ring->tail;
127}
128
129/**
130 * ring_write_descriptors() - post frames from ring->queue to the controller
131 *
132 * ring->lock is held.
133 */
134static void ring_write_descriptors(struct tb_ring *ring)
135{
136	struct ring_frame *frame, *n;
137	struct ring_desc *descriptor;
138	list_for_each_entry_safe(frame, n, &ring->queue, list) {
139		if (ring_full(ring))
140			break;
141		list_move_tail(&frame->list, &ring->in_flight);
142		descriptor = &ring->descriptors[ring->head];
143		descriptor->phys = frame->buffer_phy;
144		descriptor->time = 0;
145		descriptor->flags = RING_DESC_POSTED | RING_DESC_INTERRUPT;
146		if (ring->is_tx) {
147			descriptor->length = frame->size;
148			descriptor->eof = frame->eof;
149			descriptor->sof = frame->sof;
150		}
151		ring->head = (ring->head + 1) % ring->size;
152		ring_iowrite16desc(ring, ring->head, ring->is_tx ? 10 : 8);
153	}
154}
155
156/**
157 * ring_work() - progress completed frames
158 *
159 * If the ring is shutting down then all frames are marked as canceled and
160 * their callbacks are invoked.
161 *
162 * Otherwise we collect all completed frame from the ring buffer, write new
163 * frame to the ring buffer and invoke the callbacks for the completed frames.
164 */
165static void ring_work(struct work_struct *work)
166{
167	struct tb_ring *ring = container_of(work, typeof(*ring), work);
168	struct ring_frame *frame;
169	bool canceled = false;
170	LIST_HEAD(done);
171	mutex_lock(&ring->lock);
172
173	if (!ring->running) {
174		/*  Move all frames to done and mark them as canceled. */
175		list_splice_tail_init(&ring->in_flight, &done);
176		list_splice_tail_init(&ring->queue, &done);
177		canceled = true;
178		goto invoke_callback;
179	}
180
181	while (!ring_empty(ring)) {
182		if (!(ring->descriptors[ring->tail].flags
183				& RING_DESC_COMPLETED))
184			break;
185		frame = list_first_entry(&ring->in_flight, typeof(*frame),
186					 list);
187		list_move_tail(&frame->list, &done);
188		if (!ring->is_tx) {
189			frame->size = ring->descriptors[ring->tail].length;
190			frame->eof = ring->descriptors[ring->tail].eof;
191			frame->sof = ring->descriptors[ring->tail].sof;
192			frame->flags = ring->descriptors[ring->tail].flags;
193			if (frame->sof != 0)
194				dev_WARN(&ring->nhi->pdev->dev,
195					 "%s %d got unexpected SOF: %#x\n",
196					 RING_TYPE(ring), ring->hop,
197					 frame->sof);
198			/*
199			 * known flags:
200			 * raw not enabled, interupt not set: 0x2=0010
201			 * raw enabled: 0xa=1010
202			 * raw not enabled: 0xb=1011
203			 * partial frame (>MAX_FRAME_SIZE): 0xe=1110
204			 */
205			if (frame->flags != 0xa)
206				dev_WARN(&ring->nhi->pdev->dev,
207					 "%s %d got unexpected flags: %#x\n",
208					 RING_TYPE(ring), ring->hop,
209					 frame->flags);
210		}
211		ring->tail = (ring->tail + 1) % ring->size;
212	}
213	ring_write_descriptors(ring);
214
215invoke_callback:
216	mutex_unlock(&ring->lock); /* allow callbacks to schedule new work */
217	while (!list_empty(&done)) {
218		frame = list_first_entry(&done, typeof(*frame), list);
219		/*
220		 * The callback may reenqueue or delete frame.
221		 * Do not hold on to it.
222		 */
223		list_del_init(&frame->list);
224		frame->callback(ring, frame, canceled);
225	}
226}
227
228int __ring_enqueue(struct tb_ring *ring, struct ring_frame *frame)
229{
230	int ret = 0;
231	mutex_lock(&ring->lock);
232	if (ring->running) {
233		list_add_tail(&frame->list, &ring->queue);
234		ring_write_descriptors(ring);
235	} else {
236		ret = -ESHUTDOWN;
237	}
238	mutex_unlock(&ring->lock);
239	return ret;
240}
241
242static struct tb_ring *ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
243				  bool transmit)
244{
245	struct tb_ring *ring = NULL;
246	dev_info(&nhi->pdev->dev, "allocating %s ring %d of size %d\n",
247		 transmit ? "TX" : "RX", hop, size);
248
249	mutex_lock(&nhi->lock);
250	if (hop >= nhi->hop_count) {
251		dev_WARN(&nhi->pdev->dev, "invalid hop: %d\n", hop);
252		goto err;
253	}
254	if (transmit && nhi->tx_rings[hop]) {
255		dev_WARN(&nhi->pdev->dev, "TX hop %d already allocated\n", hop);
256		goto err;
257	} else if (!transmit && nhi->rx_rings[hop]) {
258		dev_WARN(&nhi->pdev->dev, "RX hop %d already allocated\n", hop);
259		goto err;
260	}
261	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
262	if (!ring)
263		goto err;
264
265	mutex_init(&ring->lock);
266	INIT_LIST_HEAD(&ring->queue);
267	INIT_LIST_HEAD(&ring->in_flight);
268	INIT_WORK(&ring->work, ring_work);
269
270	ring->nhi = nhi;
271	ring->hop = hop;
272	ring->is_tx = transmit;
273	ring->size = size;
274	ring->head = 0;
275	ring->tail = 0;
276	ring->running = false;
277	ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev,
278			size * sizeof(*ring->descriptors),
279			&ring->descriptors_dma, GFP_KERNEL | __GFP_ZERO);
280	if (!ring->descriptors)
281		goto err;
282
283	if (transmit)
284		nhi->tx_rings[hop] = ring;
285	else
286		nhi->rx_rings[hop] = ring;
287	mutex_unlock(&nhi->lock);
288	return ring;
289
290err:
291	if (ring)
292		mutex_destroy(&ring->lock);
293	kfree(ring);
294	mutex_unlock(&nhi->lock);
295	return NULL;
296}
297
298struct tb_ring *ring_alloc_tx(struct tb_nhi *nhi, int hop, int size)
299{
300	return ring_alloc(nhi, hop, size, true);
301}
302
303struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size)
304{
305	return ring_alloc(nhi, hop, size, false);
306}
307
308/**
309 * ring_start() - enable a ring
310 *
311 * Must not be invoked in parallel with ring_stop().
312 */
313void ring_start(struct tb_ring *ring)
314{
315	mutex_lock(&ring->nhi->lock);
316	mutex_lock(&ring->lock);
317	if (ring->running) {
318		dev_WARN(&ring->nhi->pdev->dev, "ring already started\n");
319		goto err;
320	}
321	dev_info(&ring->nhi->pdev->dev, "starting %s %d\n",
322		 RING_TYPE(ring), ring->hop);
323
324	ring_iowrite64desc(ring, ring->descriptors_dma, 0);
325	if (ring->is_tx) {
326		ring_iowrite32desc(ring, ring->size, 12);
327		ring_iowrite32options(ring, 0, 4); /* time releated ? */
328		ring_iowrite32options(ring,
329				      RING_FLAG_ENABLE | RING_FLAG_RAW, 0);
330	} else {
331		ring_iowrite32desc(ring,
332				   (TB_FRAME_SIZE << 16) | ring->size, 12);
333		ring_iowrite32options(ring, 0xffffffff, 4); /* SOF EOF mask */
334		ring_iowrite32options(ring,
335				      RING_FLAG_ENABLE | RING_FLAG_RAW, 0);
336	}
337	ring_interrupt_active(ring, true);
338	ring->running = true;
339err:
340	mutex_unlock(&ring->lock);
341	mutex_unlock(&ring->nhi->lock);
342}
343
344
345/**
346 * ring_stop() - shutdown a ring
347 *
348 * Must not be invoked from a callback.
349 *
350 * This method will disable the ring. Further calls to ring_tx/ring_rx will
351 * return -ESHUTDOWN until ring_stop has been called.
352 *
353 * All enqueued frames will be canceled and their callbacks will be executed
354 * with frame->canceled set to true (on the callback thread). This method
355 * returns only after all callback invocations have finished.
356 */
357void ring_stop(struct tb_ring *ring)
358{
359	mutex_lock(&ring->nhi->lock);
360	mutex_lock(&ring->lock);
361	dev_info(&ring->nhi->pdev->dev, "stopping %s %d\n",
362		 RING_TYPE(ring), ring->hop);
363	if (!ring->running) {
364		dev_WARN(&ring->nhi->pdev->dev, "%s %d already stopped\n",
365			 RING_TYPE(ring), ring->hop);
366		goto err;
367	}
368	ring_interrupt_active(ring, false);
369
370	ring_iowrite32options(ring, 0, 0);
371	ring_iowrite64desc(ring, 0, 0);
372	ring_iowrite16desc(ring, 0, ring->is_tx ? 10 : 8);
373	ring_iowrite32desc(ring, 0, 12);
374	ring->head = 0;
375	ring->tail = 0;
376	ring->running = false;
377
378err:
379	mutex_unlock(&ring->lock);
380	mutex_unlock(&ring->nhi->lock);
381
382	/*
383	 * schedule ring->work to invoke callbacks on all remaining frames.
384	 */
385	schedule_work(&ring->work);
386	flush_work(&ring->work);
387}
388
389/*
390 * ring_free() - free ring
391 *
392 * When this method returns all invocations of ring->callback will have
393 * finished.
394 *
395 * Ring must be stopped.
396 *
397 * Must NOT be called from ring_frame->callback!
398 */
399void ring_free(struct tb_ring *ring)
400{
401	mutex_lock(&ring->nhi->lock);
402	/*
403	 * Dissociate the ring from the NHI. This also ensures that
404	 * nhi_interrupt_work cannot reschedule ring->work.
405	 */
406	if (ring->is_tx)
407		ring->nhi->tx_rings[ring->hop] = NULL;
408	else
409		ring->nhi->rx_rings[ring->hop] = NULL;
410
411	if (ring->running) {
412		dev_WARN(&ring->nhi->pdev->dev, "%s %d still running\n",
413			 RING_TYPE(ring), ring->hop);
414	}
415
416	dma_free_coherent(&ring->nhi->pdev->dev,
417			  ring->size * sizeof(*ring->descriptors),
418			  ring->descriptors, ring->descriptors_dma);
419
420	ring->descriptors = NULL;
421	ring->descriptors_dma = 0;
422
423
424	dev_info(&ring->nhi->pdev->dev,
425		 "freeing %s %d\n",
426		 RING_TYPE(ring),
427		 ring->hop);
428
429	mutex_unlock(&ring->nhi->lock);
430	/**
431	 * ring->work can no longer be scheduled (it is scheduled only by
432	 * nhi_interrupt_work and ring_stop). Wait for it to finish before
433	 * freeing the ring.
434	 */
435	flush_work(&ring->work);
436	mutex_destroy(&ring->lock);
437	kfree(ring);
438}
439
440static void nhi_interrupt_work(struct work_struct *work)
441{
442	struct tb_nhi *nhi = container_of(work, typeof(*nhi), interrupt_work);
443	int value = 0; /* Suppress uninitialized usage warning. */
444	int bit;
445	int hop = -1;
446	int type = 0; /* current interrupt type 0: TX, 1: RX, 2: RX overflow */
447	struct tb_ring *ring;
448
449	mutex_lock(&nhi->lock);
450
451	/*
452	 * Starting at REG_RING_NOTIFY_BASE there are three status bitfields
453	 * (TX, RX, RX overflow). We iterate over the bits and read a new
454	 * dwords as required. The registers are cleared on read.
455	 */
456	for (bit = 0; bit < 3 * nhi->hop_count; bit++) {
457		if (bit % 32 == 0)
458			value = ioread32(nhi->iobase
459					 + REG_RING_NOTIFY_BASE
460					 + 4 * (bit / 32));
461		if (++hop == nhi->hop_count) {
462			hop = 0;
463			type++;
464		}
465		if ((value & (1 << (bit % 32))) == 0)
466			continue;
467		if (type == 2) {
468			dev_warn(&nhi->pdev->dev,
469				 "RX overflow for ring %d\n",
470				 hop);
471			continue;
472		}
473		if (type == 0)
474			ring = nhi->tx_rings[hop];
475		else
476			ring = nhi->rx_rings[hop];
477		if (ring == NULL) {
478			dev_warn(&nhi->pdev->dev,
479				 "got interrupt for inactive %s ring %d\n",
480				 type ? "RX" : "TX",
481				 hop);
482			continue;
483		}
484		/* we do not check ring->running, this is done in ring->work */
485		schedule_work(&ring->work);
486	}
487	mutex_unlock(&nhi->lock);
488}
489
490static irqreturn_t nhi_msi(int irq, void *data)
491{
492	struct tb_nhi *nhi = data;
493	schedule_work(&nhi->interrupt_work);
494	return IRQ_HANDLED;
495}
496
497static int nhi_suspend_noirq(struct device *dev)
498{
499	struct pci_dev *pdev = to_pci_dev(dev);
500	struct tb *tb = pci_get_drvdata(pdev);
501	thunderbolt_suspend(tb);
502	return 0;
503}
504
505static int nhi_resume_noirq(struct device *dev)
506{
507	struct pci_dev *pdev = to_pci_dev(dev);
508	struct tb *tb = pci_get_drvdata(pdev);
509	thunderbolt_resume(tb);
510	return 0;
511}
512
513static void nhi_shutdown(struct tb_nhi *nhi)
514{
515	int i;
516	dev_info(&nhi->pdev->dev, "shutdown\n");
517
518	for (i = 0; i < nhi->hop_count; i++) {
519		if (nhi->tx_rings[i])
520			dev_WARN(&nhi->pdev->dev,
521				 "TX ring %d is still active\n", i);
522		if (nhi->rx_rings[i])
523			dev_WARN(&nhi->pdev->dev,
524				 "RX ring %d is still active\n", i);
525	}
526	nhi_disable_interrupts(nhi);
527	/*
528	 * We have to release the irq before calling flush_work. Otherwise an
529	 * already executing IRQ handler could call schedule_work again.
530	 */
531	devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi);
532	flush_work(&nhi->interrupt_work);
533	mutex_destroy(&nhi->lock);
534}
535
536static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
537{
538	struct tb_nhi *nhi;
539	struct tb *tb;
540	int res;
541
542	res = pcim_enable_device(pdev);
543	if (res) {
544		dev_err(&pdev->dev, "cannot enable PCI device, aborting\n");
545		return res;
546	}
547
548	res = pci_enable_msi(pdev);
549	if (res) {
550		dev_err(&pdev->dev, "cannot enable MSI, aborting\n");
551		return res;
552	}
553
554	res = pcim_iomap_regions(pdev, 1 << 0, "thunderbolt");
555	if (res) {
556		dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n");
557		return res;
558	}
559
560	nhi = devm_kzalloc(&pdev->dev, sizeof(*nhi), GFP_KERNEL);
561	if (!nhi)
562		return -ENOMEM;
563
564	nhi->pdev = pdev;
565	/* cannot fail - table is allocated bin pcim_iomap_regions */
566	nhi->iobase = pcim_iomap_table(pdev)[0];
567	nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff;
568	if (nhi->hop_count != 12 && nhi->hop_count != 32)
569		dev_warn(&pdev->dev, "unexpected hop count: %d\n",
570			 nhi->hop_count);
571	INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work);
572
573	nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
574				     sizeof(*nhi->tx_rings), GFP_KERNEL);
575	nhi->rx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
576				     sizeof(*nhi->rx_rings), GFP_KERNEL);
577	if (!nhi->tx_rings || !nhi->rx_rings)
578		return -ENOMEM;
579
580	nhi_disable_interrupts(nhi); /* In case someone left them on. */
581	res = devm_request_irq(&pdev->dev, pdev->irq, nhi_msi,
582			       IRQF_NO_SUSPEND, /* must work during _noirq */
583			       "thunderbolt", nhi);
584	if (res) {
585		dev_err(&pdev->dev, "request_irq failed, aborting\n");
586		return res;
587	}
588
589	mutex_init(&nhi->lock);
590
591	pci_set_master(pdev);
592
593	/* magic value - clock related? */
594	iowrite32(3906250 / 10000, nhi->iobase + 0x38c00);
595
596	dev_info(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n");
597	tb = thunderbolt_alloc_and_start(nhi);
598	if (!tb) {
599		/*
600		 * At this point the RX/TX rings might already have been
601		 * activated. Do a proper shutdown.
602		 */
603		nhi_shutdown(nhi);
604		return -EIO;
605	}
606	pci_set_drvdata(pdev, tb);
607
608	return 0;
609}
610
611static void nhi_remove(struct pci_dev *pdev)
612{
613	struct tb *tb = pci_get_drvdata(pdev);
614	struct tb_nhi *nhi = tb->nhi;
615	thunderbolt_shutdown_and_free(tb);
616	nhi_shutdown(nhi);
617}
618
619/*
620 * The tunneled pci bridges are siblings of us. Use resume_noirq to reenable
621 * the tunnels asap. A corresponding pci quirk blocks the downstream bridges
622 * resume_noirq until we are done.
623 */
624static const struct dev_pm_ops nhi_pm_ops = {
625	.suspend_noirq = nhi_suspend_noirq,
626	.resume_noirq = nhi_resume_noirq,
627	.freeze_noirq = nhi_suspend_noirq, /*
628					    * we just disable hotplug, the
629					    * pci-tunnels stay alive.
630					    */
631	.restore_noirq = nhi_resume_noirq,
632};
633
634static struct pci_device_id nhi_ids[] = {
635	/*
636	 * We have to specify class, the TB bridges use the same device and
637	 * vendor (sub)id on gen 1 and gen 2 controllers.
638	 */
639	{
640		.class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
641		.vendor = PCI_VENDOR_ID_INTEL,
642		.device = PCI_DEVICE_ID_INTEL_LIGHT_RIDGE,
643		.subvendor = 0x2222, .subdevice = 0x1111,
644	},
645	{
646		.class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
647		.vendor = PCI_VENDOR_ID_INTEL,
648		.device = PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
649		.subvendor = 0x2222, .subdevice = 0x1111,
650	},
651	{
652		.class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
653		.vendor = PCI_VENDOR_ID_INTEL,
654		.device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI,
655		.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
656	},
657	{
658		.class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
659		.vendor = PCI_VENDOR_ID_INTEL,
660		.device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI,
661		.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
662	},
663	{ 0,}
664};
665
666MODULE_DEVICE_TABLE(pci, nhi_ids);
667MODULE_LICENSE("GPL");
668
669static struct pci_driver nhi_driver = {
670	.name = "thunderbolt",
671	.id_table = nhi_ids,
672	.probe = nhi_probe,
673	.remove = nhi_remove,
674	.driver.pm = &nhi_pm_ops,
675};
676
677static int __init nhi_init(void)
678{
679	if (!dmi_match(DMI_BOARD_VENDOR, "Apple Inc."))
680		return -ENOSYS;
681	return pci_register_driver(&nhi_driver);
682}
683
684static void __exit nhi_unload(void)
685{
686	pci_unregister_driver(&nhi_driver);
687}
688
689module_init(nhi_init);
690module_exit(nhi_unload);