Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.15.
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * ISM driver for s390.
  4 *
  5 * Copyright IBM Corp. 2018
  6 */
  7#define KMSG_COMPONENT "ism"
  8#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  9
 10#include <linux/module.h>
 11#include <linux/types.h>
 12#include <linux/interrupt.h>
 13#include <linux/device.h>
 14#include <linux/err.h>
 15#include <linux/ctype.h>
 16#include <linux/processor.h>
 17
 18#include "ism.h"
 19
 20MODULE_DESCRIPTION("ISM driver for s390");
 21MODULE_LICENSE("GPL");
 22
 23#define PCI_DEVICE_ID_IBM_ISM 0x04ED
 24#define DRV_NAME "ism"
 25
 26static const struct pci_device_id ism_device_table[] = {
 27	{ PCI_VDEVICE(IBM, PCI_DEVICE_ID_IBM_ISM), 0 },
 28	{ 0, }
 29};
 30MODULE_DEVICE_TABLE(pci, ism_device_table);
 31
 32static debug_info_t *ism_debug_info;
 33
 34#define NO_CLIENT		0xff		/* must be >= MAX_CLIENTS */
 35static struct ism_client *clients[MAX_CLIENTS];	/* use an array rather than */
 36						/* a list for fast mapping  */
 37static u8 max_client;
 38static DEFINE_MUTEX(clients_lock);
 39static bool ism_v2_capable;
 40struct ism_dev_list {
 41	struct list_head list;
 42	struct mutex mutex; /* protects ism device list */
 43};
 44
 45static struct ism_dev_list ism_dev_list = {
 46	.list = LIST_HEAD_INIT(ism_dev_list.list),
 47	.mutex = __MUTEX_INITIALIZER(ism_dev_list.mutex),
 48};
 49
 50static void ism_setup_forwarding(struct ism_client *client, struct ism_dev *ism)
 51{
 52	unsigned long flags;
 53
 54	spin_lock_irqsave(&ism->lock, flags);
 55	ism->subs[client->id] = client;
 56	spin_unlock_irqrestore(&ism->lock, flags);
 57}
 58
 59int ism_register_client(struct ism_client *client)
 60{
 61	struct ism_dev *ism;
 62	int i, rc = -ENOSPC;
 63
 64	mutex_lock(&ism_dev_list.mutex);
 65	mutex_lock(&clients_lock);
 66	for (i = 0; i < MAX_CLIENTS; ++i) {
 67		if (!clients[i]) {
 68			clients[i] = client;
 69			client->id = i;
 70			if (i == max_client)
 71				max_client++;
 72			rc = 0;
 73			break;
 74		}
 75	}
 76	mutex_unlock(&clients_lock);
 77
 78	if (i < MAX_CLIENTS) {
 79		/* initialize with all devices that we got so far */
 80		list_for_each_entry(ism, &ism_dev_list.list, list) {
 81			ism->priv[i] = NULL;
 82			client->add(ism);
 83			ism_setup_forwarding(client, ism);
 84		}
 85	}
 86	mutex_unlock(&ism_dev_list.mutex);
 87
 88	return rc;
 89}
 90EXPORT_SYMBOL_GPL(ism_register_client);
 91
 92int ism_unregister_client(struct ism_client *client)
 93{
 94	struct ism_dev *ism;
 95	unsigned long flags;
 96	int rc = 0;
 97
 98	mutex_lock(&ism_dev_list.mutex);
 99	list_for_each_entry(ism, &ism_dev_list.list, list) {
100		spin_lock_irqsave(&ism->lock, flags);
101		/* Stop forwarding IRQs and events */
102		ism->subs[client->id] = NULL;
103		for (int i = 0; i < ISM_NR_DMBS; ++i) {
104			if (ism->sba_client_arr[i] == client->id) {
105				WARN(1, "%s: attempt to unregister '%s' with registered dmb(s)\n",
106				     __func__, client->name);
107				rc = -EBUSY;
108				goto err_reg_dmb;
109			}
110		}
111		spin_unlock_irqrestore(&ism->lock, flags);
112	}
113	mutex_unlock(&ism_dev_list.mutex);
114
115	mutex_lock(&clients_lock);
116	clients[client->id] = NULL;
117	if (client->id + 1 == max_client)
118		max_client--;
119	mutex_unlock(&clients_lock);
120	return rc;
121
122err_reg_dmb:
123	spin_unlock_irqrestore(&ism->lock, flags);
124	mutex_unlock(&ism_dev_list.mutex);
125	return rc;
126}
127EXPORT_SYMBOL_GPL(ism_unregister_client);
128
129static int ism_cmd(struct ism_dev *ism, void *cmd)
130{
131	struct ism_req_hdr *req = cmd;
132	struct ism_resp_hdr *resp = cmd;
133
134	__ism_write_cmd(ism, req + 1, sizeof(*req), req->len - sizeof(*req));
135	__ism_write_cmd(ism, req, 0, sizeof(*req));
136
137	WRITE_ONCE(resp->ret, ISM_ERROR);
138
139	__ism_read_cmd(ism, resp, 0, sizeof(*resp));
140	if (resp->ret) {
141		debug_text_event(ism_debug_info, 0, "cmd failure");
142		debug_event(ism_debug_info, 0, resp, sizeof(*resp));
143		goto out;
144	}
145	__ism_read_cmd(ism, resp + 1, sizeof(*resp), resp->len - sizeof(*resp));
146out:
147	return resp->ret;
148}
149
150static int ism_cmd_simple(struct ism_dev *ism, u32 cmd_code)
151{
152	union ism_cmd_simple cmd;
153
154	memset(&cmd, 0, sizeof(cmd));
155	cmd.request.hdr.cmd = cmd_code;
156	cmd.request.hdr.len = sizeof(cmd.request);
157
158	return ism_cmd(ism, &cmd);
159}
160
161static int query_info(struct ism_dev *ism)
162{
163	union ism_qi cmd;
164
165	memset(&cmd, 0, sizeof(cmd));
166	cmd.request.hdr.cmd = ISM_QUERY_INFO;
167	cmd.request.hdr.len = sizeof(cmd.request);
168
169	if (ism_cmd(ism, &cmd))
170		goto out;
171
172	debug_text_event(ism_debug_info, 3, "query info");
173	debug_event(ism_debug_info, 3, &cmd.response, sizeof(cmd.response));
174out:
175	return 0;
176}
177
178static int register_sba(struct ism_dev *ism)
179{
180	union ism_reg_sba cmd;
181	dma_addr_t dma_handle;
182	struct ism_sba *sba;
183
184	sba = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle,
185				 GFP_KERNEL);
186	if (!sba)
187		return -ENOMEM;
188
189	memset(&cmd, 0, sizeof(cmd));
190	cmd.request.hdr.cmd = ISM_REG_SBA;
191	cmd.request.hdr.len = sizeof(cmd.request);
192	cmd.request.sba = dma_handle;
193
194	if (ism_cmd(ism, &cmd)) {
195		dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, sba, dma_handle);
196		return -EIO;
197	}
198
199	ism->sba = sba;
200	ism->sba_dma_addr = dma_handle;
201
202	return 0;
203}
204
205static int register_ieq(struct ism_dev *ism)
206{
207	union ism_reg_ieq cmd;
208	dma_addr_t dma_handle;
209	struct ism_eq *ieq;
210
211	ieq = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle,
212				 GFP_KERNEL);
213	if (!ieq)
214		return -ENOMEM;
215
216	memset(&cmd, 0, sizeof(cmd));
217	cmd.request.hdr.cmd = ISM_REG_IEQ;
218	cmd.request.hdr.len = sizeof(cmd.request);
219	cmd.request.ieq = dma_handle;
220	cmd.request.len = sizeof(*ieq);
221
222	if (ism_cmd(ism, &cmd)) {
223		dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, ieq, dma_handle);
224		return -EIO;
225	}
226
227	ism->ieq = ieq;
228	ism->ieq_idx = -1;
229	ism->ieq_dma_addr = dma_handle;
230
231	return 0;
232}
233
234static int unregister_sba(struct ism_dev *ism)
235{
236	int ret;
237
238	if (!ism->sba)
239		return 0;
240
241	ret = ism_cmd_simple(ism, ISM_UNREG_SBA);
242	if (ret && ret != ISM_ERROR)
243		return -EIO;
244
245	dma_free_coherent(&ism->pdev->dev, PAGE_SIZE,
246			  ism->sba, ism->sba_dma_addr);
247
248	ism->sba = NULL;
249	ism->sba_dma_addr = 0;
250
251	return 0;
252}
253
254static int unregister_ieq(struct ism_dev *ism)
255{
256	int ret;
257
258	if (!ism->ieq)
259		return 0;
260
261	ret = ism_cmd_simple(ism, ISM_UNREG_IEQ);
262	if (ret && ret != ISM_ERROR)
263		return -EIO;
264
265	dma_free_coherent(&ism->pdev->dev, PAGE_SIZE,
266			  ism->ieq, ism->ieq_dma_addr);
267
268	ism->ieq = NULL;
269	ism->ieq_dma_addr = 0;
270
271	return 0;
272}
273
274static int ism_read_local_gid(struct ism_dev *ism)
275{
276	union ism_read_gid cmd;
277	int ret;
278
279	memset(&cmd, 0, sizeof(cmd));
280	cmd.request.hdr.cmd = ISM_READ_GID;
281	cmd.request.hdr.len = sizeof(cmd.request);
282
283	ret = ism_cmd(ism, &cmd);
284	if (ret)
285		goto out;
286
287	ism->local_gid = cmd.response.gid;
288out:
289	return ret;
290}
291
292static void ism_free_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
293{
294	clear_bit(dmb->sba_idx, ism->sba_bitmap);
295	dma_free_coherent(&ism->pdev->dev, dmb->dmb_len,
296			  dmb->cpu_addr, dmb->dma_addr);
297}
298
299static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
300{
301	unsigned long bit;
302
303	if (PAGE_ALIGN(dmb->dmb_len) > dma_get_max_seg_size(&ism->pdev->dev))
304		return -EINVAL;
305
306	if (!dmb->sba_idx) {
307		bit = find_next_zero_bit(ism->sba_bitmap, ISM_NR_DMBS,
308					 ISM_DMB_BIT_OFFSET);
309		if (bit == ISM_NR_DMBS)
310			return -ENOSPC;
311
312		dmb->sba_idx = bit;
313	}
314	if (dmb->sba_idx < ISM_DMB_BIT_OFFSET ||
315	    test_and_set_bit(dmb->sba_idx, ism->sba_bitmap))
316		return -EINVAL;
317
318	dmb->cpu_addr = dma_alloc_coherent(&ism->pdev->dev, dmb->dmb_len,
319					   &dmb->dma_addr,
320					   GFP_KERNEL | __GFP_NOWARN |
321					   __GFP_NOMEMALLOC | __GFP_NORETRY);
322	if (!dmb->cpu_addr)
323		clear_bit(dmb->sba_idx, ism->sba_bitmap);
324
325	return dmb->cpu_addr ? 0 : -ENOMEM;
326}
327
328int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb,
329		     struct ism_client *client)
330{
331	union ism_reg_dmb cmd;
332	unsigned long flags;
333	int ret;
334
335	ret = ism_alloc_dmb(ism, dmb);
336	if (ret)
337		goto out;
338
339	memset(&cmd, 0, sizeof(cmd));
340	cmd.request.hdr.cmd = ISM_REG_DMB;
341	cmd.request.hdr.len = sizeof(cmd.request);
342
343	cmd.request.dmb = dmb->dma_addr;
344	cmd.request.dmb_len = dmb->dmb_len;
345	cmd.request.sba_idx = dmb->sba_idx;
346	cmd.request.vlan_valid = dmb->vlan_valid;
347	cmd.request.vlan_id = dmb->vlan_id;
348	cmd.request.rgid = dmb->rgid;
349
350	ret = ism_cmd(ism, &cmd);
351	if (ret) {
352		ism_free_dmb(ism, dmb);
353		goto out;
354	}
355	dmb->dmb_tok = cmd.response.dmb_tok;
356	spin_lock_irqsave(&ism->lock, flags);
357	ism->sba_client_arr[dmb->sba_idx - ISM_DMB_BIT_OFFSET] = client->id;
358	spin_unlock_irqrestore(&ism->lock, flags);
359out:
360	return ret;
361}
362EXPORT_SYMBOL_GPL(ism_register_dmb);
363
364int ism_unregister_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
365{
366	union ism_unreg_dmb cmd;
367	unsigned long flags;
368	int ret;
369
370	memset(&cmd, 0, sizeof(cmd));
371	cmd.request.hdr.cmd = ISM_UNREG_DMB;
372	cmd.request.hdr.len = sizeof(cmd.request);
373
374	cmd.request.dmb_tok = dmb->dmb_tok;
375
376	spin_lock_irqsave(&ism->lock, flags);
377	ism->sba_client_arr[dmb->sba_idx - ISM_DMB_BIT_OFFSET] = NO_CLIENT;
378	spin_unlock_irqrestore(&ism->lock, flags);
379
380	ret = ism_cmd(ism, &cmd);
381	if (ret && ret != ISM_ERROR)
382		goto out;
383
384	ism_free_dmb(ism, dmb);
385out:
386	return ret;
387}
388EXPORT_SYMBOL_GPL(ism_unregister_dmb);
389
390static int ism_add_vlan_id(struct ism_dev *ism, u64 vlan_id)
391{
392	union ism_set_vlan_id cmd;
393
394	memset(&cmd, 0, sizeof(cmd));
395	cmd.request.hdr.cmd = ISM_ADD_VLAN_ID;
396	cmd.request.hdr.len = sizeof(cmd.request);
397
398	cmd.request.vlan_id = vlan_id;
399
400	return ism_cmd(ism, &cmd);
401}
402
403static int ism_del_vlan_id(struct ism_dev *ism, u64 vlan_id)
404{
405	union ism_set_vlan_id cmd;
406
407	memset(&cmd, 0, sizeof(cmd));
408	cmd.request.hdr.cmd = ISM_DEL_VLAN_ID;
409	cmd.request.hdr.len = sizeof(cmd.request);
410
411	cmd.request.vlan_id = vlan_id;
412
413	return ism_cmd(ism, &cmd);
414}
415
416static unsigned int max_bytes(unsigned int start, unsigned int len,
417			      unsigned int boundary)
418{
419	return min(boundary - (start & (boundary - 1)), len);
420}
421
422int ism_move(struct ism_dev *ism, u64 dmb_tok, unsigned int idx, bool sf,
423	     unsigned int offset, void *data, unsigned int size)
424{
425	unsigned int bytes;
426	u64 dmb_req;
427	int ret;
428
429	while (size) {
430		bytes = max_bytes(offset, size, PAGE_SIZE);
431		dmb_req = ISM_CREATE_REQ(dmb_tok, idx, size == bytes ? sf : 0,
432					 offset);
433
434		ret = __ism_move(ism, dmb_req, data, bytes);
435		if (ret)
436			return ret;
437
438		size -= bytes;
439		data += bytes;
440		offset += bytes;
441	}
442
443	return 0;
444}
445EXPORT_SYMBOL_GPL(ism_move);
446
447static void ism_handle_event(struct ism_dev *ism)
448{
449	struct ism_event *entry;
450	struct ism_client *clt;
451	int i;
452
453	while ((ism->ieq_idx + 1) != READ_ONCE(ism->ieq->header.idx)) {
454		if (++(ism->ieq_idx) == ARRAY_SIZE(ism->ieq->entry))
455			ism->ieq_idx = 0;
456
457		entry = &ism->ieq->entry[ism->ieq_idx];
458		debug_event(ism_debug_info, 2, entry, sizeof(*entry));
459		for (i = 0; i < max_client; ++i) {
460			clt = ism->subs[i];
461			if (clt)
462				clt->handle_event(ism, entry);
463		}
464	}
465}
466
467static irqreturn_t ism_handle_irq(int irq, void *data)
468{
469	struct ism_dev *ism = data;
470	unsigned long bit, end;
471	unsigned long *bv;
472	u16 dmbemask;
473	u8 client_id;
474
475	bv = (void *) &ism->sba->dmb_bits[ISM_DMB_WORD_OFFSET];
476	end = sizeof(ism->sba->dmb_bits) * BITS_PER_BYTE - ISM_DMB_BIT_OFFSET;
477
478	spin_lock(&ism->lock);
479	ism->sba->s = 0;
480	barrier();
481	for (bit = 0;;) {
482		bit = find_next_bit_inv(bv, end, bit);
483		if (bit >= end)
484			break;
485
486		clear_bit_inv(bit, bv);
487		dmbemask = ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET];
488		ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0;
489		barrier();
490		client_id = ism->sba_client_arr[bit];
491		if (unlikely(client_id == NO_CLIENT || !ism->subs[client_id]))
492			continue;
493		ism->subs[client_id]->handle_irq(ism, bit + ISM_DMB_BIT_OFFSET, dmbemask);
494	}
495
496	if (ism->sba->e) {
497		ism->sba->e = 0;
498		barrier();
499		ism_handle_event(ism);
500	}
501	spin_unlock(&ism->lock);
502	return IRQ_HANDLED;
503}
504
505static int ism_dev_init(struct ism_dev *ism)
506{
507	struct pci_dev *pdev = ism->pdev;
508	int i, ret;
509
510	ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
511	if (ret <= 0)
512		goto out;
513
514	ism->sba_client_arr = kzalloc(ISM_NR_DMBS, GFP_KERNEL);
515	if (!ism->sba_client_arr)
516		goto free_vectors;
517	memset(ism->sba_client_arr, NO_CLIENT, ISM_NR_DMBS);
518
519	ret = request_irq(pci_irq_vector(pdev, 0), ism_handle_irq, 0,
520			  pci_name(pdev), ism);
521	if (ret)
522		goto free_client_arr;
523
524	ret = register_sba(ism);
525	if (ret)
526		goto free_irq;
527
528	ret = register_ieq(ism);
529	if (ret)
530		goto unreg_sba;
531
532	ret = ism_read_local_gid(ism);
533	if (ret)
534		goto unreg_ieq;
535
536	if (!ism_add_vlan_id(ism, ISM_RESERVED_VLANID))
537		/* hardware is V2 capable */
538		ism_v2_capable = true;
539	else
540		ism_v2_capable = false;
541
542	mutex_lock(&ism_dev_list.mutex);
543	mutex_lock(&clients_lock);
544	for (i = 0; i < max_client; ++i) {
545		if (clients[i]) {
546			clients[i]->add(ism);
547			ism_setup_forwarding(clients[i], ism);
548		}
549	}
550	mutex_unlock(&clients_lock);
551
552	list_add(&ism->list, &ism_dev_list.list);
553	mutex_unlock(&ism_dev_list.mutex);
554
555	query_info(ism);
556	return 0;
557
558unreg_ieq:
559	unregister_ieq(ism);
560unreg_sba:
561	unregister_sba(ism);
562free_irq:
563	free_irq(pci_irq_vector(pdev, 0), ism);
564free_client_arr:
565	kfree(ism->sba_client_arr);
566free_vectors:
567	pci_free_irq_vectors(pdev);
568out:
569	return ret;
570}
571
572static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
573{
574	struct ism_dev *ism;
575	int ret;
576
577	ism = kzalloc(sizeof(*ism), GFP_KERNEL);
578	if (!ism)
579		return -ENOMEM;
580
581	spin_lock_init(&ism->lock);
582	dev_set_drvdata(&pdev->dev, ism);
583	ism->pdev = pdev;
584	ism->dev.parent = &pdev->dev;
585	device_initialize(&ism->dev);
586	dev_set_name(&ism->dev, dev_name(&pdev->dev));
587	ret = device_add(&ism->dev);
588	if (ret)
589		goto err_dev;
590
591	ret = pci_enable_device_mem(pdev);
592	if (ret)
593		goto err;
594
595	ret = pci_request_mem_regions(pdev, DRV_NAME);
596	if (ret)
597		goto err_disable;
598
599	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
600	if (ret)
601		goto err_resource;
602
603	dma_set_seg_boundary(&pdev->dev, SZ_1M - 1);
604	dma_set_max_seg_size(&pdev->dev, SZ_1M);
605	pci_set_master(pdev);
606
607	ret = ism_dev_init(ism);
608	if (ret)
609		goto err_resource;
610
611	return 0;
612
613err_resource:
614	pci_release_mem_regions(pdev);
615err_disable:
616	pci_disable_device(pdev);
617err:
618	device_del(&ism->dev);
619err_dev:
620	dev_set_drvdata(&pdev->dev, NULL);
621	kfree(ism);
622
623	return ret;
624}
625
626static void ism_dev_exit(struct ism_dev *ism)
627{
628	struct pci_dev *pdev = ism->pdev;
629	unsigned long flags;
630	int i;
631
632	spin_lock_irqsave(&ism->lock, flags);
633	for (i = 0; i < max_client; ++i)
634		ism->subs[i] = NULL;
635	spin_unlock_irqrestore(&ism->lock, flags);
636
637	mutex_lock(&ism_dev_list.mutex);
638	mutex_lock(&clients_lock);
639	for (i = 0; i < max_client; ++i) {
640		if (clients[i])
641			clients[i]->remove(ism);
642	}
643	mutex_unlock(&clients_lock);
644
645	if (ism_v2_capable)
646		ism_del_vlan_id(ism, ISM_RESERVED_VLANID);
647	unregister_ieq(ism);
648	unregister_sba(ism);
649	free_irq(pci_irq_vector(pdev, 0), ism);
650	kfree(ism->sba_client_arr);
651	pci_free_irq_vectors(pdev);
652	list_del_init(&ism->list);
653	mutex_unlock(&ism_dev_list.mutex);
654}
655
656static void ism_remove(struct pci_dev *pdev)
657{
658	struct ism_dev *ism = dev_get_drvdata(&pdev->dev);
659
660	ism_dev_exit(ism);
661
662	pci_release_mem_regions(pdev);
663	pci_disable_device(pdev);
664	device_del(&ism->dev);
665	dev_set_drvdata(&pdev->dev, NULL);
666	kfree(ism);
667}
668
669static struct pci_driver ism_driver = {
670	.name	  = DRV_NAME,
671	.id_table = ism_device_table,
672	.probe	  = ism_probe,
673	.remove	  = ism_remove,
674};
675
676static int __init ism_init(void)
677{
678	int ret;
679
680	ism_debug_info = debug_register("ism", 2, 1, 16);
681	if (!ism_debug_info)
682		return -ENODEV;
683
684	memset(clients, 0, sizeof(clients));
685	max_client = 0;
686	debug_register_view(ism_debug_info, &debug_hex_ascii_view);
687	ret = pci_register_driver(&ism_driver);
688	if (ret)
689		debug_unregister(ism_debug_info);
690
691	return ret;
692}
693
694static void __exit ism_exit(void)
695{
696	pci_unregister_driver(&ism_driver);
697	debug_unregister(ism_debug_info);
698}
699
700module_init(ism_init);
701module_exit(ism_exit);
702
703/*************************** SMC-D Implementation *****************************/
704
705#if IS_ENABLED(CONFIG_SMC)
706static int ism_query_rgid(struct ism_dev *ism, u64 rgid, u32 vid_valid,
707			  u32 vid)
708{
709	union ism_query_rgid cmd;
710
711	memset(&cmd, 0, sizeof(cmd));
712	cmd.request.hdr.cmd = ISM_QUERY_RGID;
713	cmd.request.hdr.len = sizeof(cmd.request);
714
715	cmd.request.rgid = rgid;
716	cmd.request.vlan_valid = vid_valid;
717	cmd.request.vlan_id = vid;
718
719	return ism_cmd(ism, &cmd);
720}
721
722static int smcd_query_rgid(struct smcd_dev *smcd, struct smcd_gid *rgid,
723			   u32 vid_valid, u32 vid)
724{
725	return ism_query_rgid(smcd->priv, rgid->gid, vid_valid, vid);
726}
727
728static int smcd_register_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb,
729			     struct ism_client *client)
730{
731	return ism_register_dmb(smcd->priv, (struct ism_dmb *)dmb, client);
732}
733
734static int smcd_unregister_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
735{
736	return ism_unregister_dmb(smcd->priv, (struct ism_dmb *)dmb);
737}
738
739static int smcd_add_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
740{
741	return ism_add_vlan_id(smcd->priv, vlan_id);
742}
743
744static int smcd_del_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
745{
746	return ism_del_vlan_id(smcd->priv, vlan_id);
747}
748
749static int smcd_set_vlan_required(struct smcd_dev *smcd)
750{
751	return ism_cmd_simple(smcd->priv, ISM_SET_VLAN);
752}
753
754static int smcd_reset_vlan_required(struct smcd_dev *smcd)
755{
756	return ism_cmd_simple(smcd->priv, ISM_RESET_VLAN);
757}
758
759static int ism_signal_ieq(struct ism_dev *ism, u64 rgid, u32 trigger_irq,
760			  u32 event_code, u64 info)
761{
762	union ism_sig_ieq cmd;
763
764	memset(&cmd, 0, sizeof(cmd));
765	cmd.request.hdr.cmd = ISM_SIGNAL_IEQ;
766	cmd.request.hdr.len = sizeof(cmd.request);
767
768	cmd.request.rgid = rgid;
769	cmd.request.trigger_irq = trigger_irq;
770	cmd.request.event_code = event_code;
771	cmd.request.info = info;
772
773	return ism_cmd(ism, &cmd);
774}
775
776static int smcd_signal_ieq(struct smcd_dev *smcd, struct smcd_gid *rgid,
777			   u32 trigger_irq, u32 event_code, u64 info)
778{
779	return ism_signal_ieq(smcd->priv, rgid->gid,
780			      trigger_irq, event_code, info);
781}
782
783static int smcd_move(struct smcd_dev *smcd, u64 dmb_tok, unsigned int idx,
784		     bool sf, unsigned int offset, void *data,
785		     unsigned int size)
786{
787	return ism_move(smcd->priv, dmb_tok, idx, sf, offset, data, size);
788}
789
790static int smcd_supports_v2(void)
791{
792	return ism_v2_capable;
793}
794
795static u64 ism_get_local_gid(struct ism_dev *ism)
796{
797	return ism->local_gid;
798}
799
800static void smcd_get_local_gid(struct smcd_dev *smcd,
801			       struct smcd_gid *smcd_gid)
802{
803	smcd_gid->gid = ism_get_local_gid(smcd->priv);
804	smcd_gid->gid_ext = 0;
805}
806
807static u16 ism_get_chid(struct ism_dev *ism)
808{
809	if (!ism || !ism->pdev)
810		return 0;
811
812	return to_zpci(ism->pdev)->pchid;
813}
814
815static u16 smcd_get_chid(struct smcd_dev *smcd)
816{
817	return ism_get_chid(smcd->priv);
818}
819
820static inline struct device *smcd_get_dev(struct smcd_dev *dev)
821{
822	struct ism_dev *ism = dev->priv;
823
824	return &ism->dev;
825}
826
827static const struct smcd_ops ism_ops = {
828	.query_remote_gid = smcd_query_rgid,
829	.register_dmb = smcd_register_dmb,
830	.unregister_dmb = smcd_unregister_dmb,
831	.add_vlan_id = smcd_add_vlan_id,
832	.del_vlan_id = smcd_del_vlan_id,
833	.set_vlan_required = smcd_set_vlan_required,
834	.reset_vlan_required = smcd_reset_vlan_required,
835	.signal_event = smcd_signal_ieq,
836	.move_data = smcd_move,
837	.supports_v2 = smcd_supports_v2,
838	.get_local_gid = smcd_get_local_gid,
839	.get_chid = smcd_get_chid,
840	.get_dev = smcd_get_dev,
841};
842
843const struct smcd_ops *ism_get_smcd_ops(void)
844{
845	return &ism_ops;
846}
847EXPORT_SYMBOL_GPL(ism_get_smcd_ops);
848#endif