Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * Copyright (c) 2004 Topspin Communications.  All rights reserved.
  3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
  4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
  5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
  6 *
  7 * This software is available to you under a choice of one of two
  8 * licenses.  You may choose to be licensed under the terms of the GNU
  9 * General Public License (GPL) Version 2, available from the file
 10 * COPYING in the main directory of this source tree, or the
 11 * OpenIB.org BSD license below:
 12 *
 13 *     Redistribution and use in source and binary forms, with or
 14 *     without modification, are permitted provided that the following
 15 *     conditions are met:
 16 *
 17 *      - Redistributions of source code must retain the above
 18 *        copyright notice, this list of conditions and the following
 19 *        disclaimer.
 20 *
 21 *      - Redistributions in binary form must reproduce the above
 22 *        copyright notice, this list of conditions and the following
 23 *        disclaimer in the documentation and/or other materials
 24 *        provided with the distribution.
 25 *
 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 33 * SOFTWARE.
 34 */
 35
 36#include <linux/module.h>
 37#include <linux/errno.h>
 38#include <linux/slab.h>
 39#include <linux/workqueue.h>
 40
 41#include <rdma/ib_cache.h>
 42
 43#include "core_priv.h"
 44
 45struct ib_pkey_cache {
 46	int             table_len;
 47	u16             table[0];
 48};
 49
 50struct ib_gid_cache {
 51	int             table_len;
 52	union ib_gid    table[0];
 53};
 54
 55struct ib_update_work {
 56	struct work_struct work;
 57	struct ib_device  *device;
 58	u8                 port_num;
 59};
 60
 61static inline int start_port(struct ib_device *device)
 62{
 63	return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1;
 64}
 65
 66static inline int end_port(struct ib_device *device)
 67{
 68	return (device->node_type == RDMA_NODE_IB_SWITCH) ?
 69		0 : device->phys_port_cnt;
 70}
 71
 72int ib_get_cached_gid(struct ib_device *device,
 73		      u8                port_num,
 74		      int               index,
 75		      union ib_gid     *gid)
 76{
 77	struct ib_gid_cache *cache;
 78	unsigned long flags;
 79	int ret = 0;
 80
 81	if (port_num < start_port(device) || port_num > end_port(device))
 82		return -EINVAL;
 83
 84	read_lock_irqsave(&device->cache.lock, flags);
 85
 86	cache = device->cache.gid_cache[port_num - start_port(device)];
 87
 88	if (index < 0 || index >= cache->table_len)
 89		ret = -EINVAL;
 90	else
 91		*gid = cache->table[index];
 92
 93	read_unlock_irqrestore(&device->cache.lock, flags);
 94
 95	return ret;
 96}
 97EXPORT_SYMBOL(ib_get_cached_gid);
 98
 99int ib_find_cached_gid(struct ib_device *device,
100		       union ib_gid	*gid,
101		       u8               *port_num,
102		       u16              *index)
103{
104	struct ib_gid_cache *cache;
105	unsigned long flags;
106	int p, i;
107	int ret = -ENOENT;
108
109	*port_num = -1;
110	if (index)
111		*index = -1;
112
113	read_lock_irqsave(&device->cache.lock, flags);
114
115	for (p = 0; p <= end_port(device) - start_port(device); ++p) {
116		cache = device->cache.gid_cache[p];
117		for (i = 0; i < cache->table_len; ++i) {
118			if (!memcmp(gid, &cache->table[i], sizeof *gid)) {
119				*port_num = p + start_port(device);
120				if (index)
121					*index = i;
122				ret = 0;
123				goto found;
124			}
125		}
126	}
127found:
128	read_unlock_irqrestore(&device->cache.lock, flags);
129
130	return ret;
131}
132EXPORT_SYMBOL(ib_find_cached_gid);
133
134int ib_get_cached_pkey(struct ib_device *device,
135		       u8                port_num,
136		       int               index,
137		       u16              *pkey)
138{
139	struct ib_pkey_cache *cache;
140	unsigned long flags;
141	int ret = 0;
142
143	if (port_num < start_port(device) || port_num > end_port(device))
144		return -EINVAL;
145
146	read_lock_irqsave(&device->cache.lock, flags);
147
148	cache = device->cache.pkey_cache[port_num - start_port(device)];
149
150	if (index < 0 || index >= cache->table_len)
151		ret = -EINVAL;
152	else
153		*pkey = cache->table[index];
154
155	read_unlock_irqrestore(&device->cache.lock, flags);
156
157	return ret;
158}
159EXPORT_SYMBOL(ib_get_cached_pkey);
160
161int ib_find_cached_pkey(struct ib_device *device,
162			u8                port_num,
163			u16               pkey,
164			u16              *index)
165{
166	struct ib_pkey_cache *cache;
167	unsigned long flags;
168	int i;
169	int ret = -ENOENT;
 
170
171	if (port_num < start_port(device) || port_num > end_port(device))
172		return -EINVAL;
173
174	read_lock_irqsave(&device->cache.lock, flags);
175
176	cache = device->cache.pkey_cache[port_num - start_port(device)];
177
178	*index = -1;
179
180	for (i = 0; i < cache->table_len; ++i)
181		if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
182			*index = i;
183			ret = 0;
184			break;
185		}
186
187	read_unlock_irqrestore(&device->cache.lock, flags);
188
189	return ret;
190}
191EXPORT_SYMBOL(ib_find_cached_pkey);
192
193int ib_get_cached_lmc(struct ib_device *device,
194		      u8                port_num,
195		      u8                *lmc)
196{
197	unsigned long flags;
198	int ret = 0;
199
200	if (port_num < start_port(device) || port_num > end_port(device))
201		return -EINVAL;
202
203	read_lock_irqsave(&device->cache.lock, flags);
204	*lmc = device->cache.lmc_cache[port_num - start_port(device)];
205	read_unlock_irqrestore(&device->cache.lock, flags);
206
207	return ret;
208}
209EXPORT_SYMBOL(ib_get_cached_lmc);
210
211static void ib_cache_update(struct ib_device *device,
212			    u8                port)
213{
214	struct ib_port_attr       *tprops = NULL;
215	struct ib_pkey_cache      *pkey_cache = NULL, *old_pkey_cache;
216	struct ib_gid_cache       *gid_cache = NULL, *old_gid_cache;
217	int                        i;
218	int                        ret;
219
220	tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
221	if (!tprops)
222		return;
223
224	ret = ib_query_port(device, port, tprops);
225	if (ret) {
226		printk(KERN_WARNING "ib_query_port failed (%d) for %s\n",
227		       ret, device->name);
228		goto err;
229	}
230
231	pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len *
232			     sizeof *pkey_cache->table, GFP_KERNEL);
233	if (!pkey_cache)
234		goto err;
235
236	pkey_cache->table_len = tprops->pkey_tbl_len;
237
238	gid_cache = kmalloc(sizeof *gid_cache + tprops->gid_tbl_len *
239			    sizeof *gid_cache->table, GFP_KERNEL);
240	if (!gid_cache)
241		goto err;
242
243	gid_cache->table_len = tprops->gid_tbl_len;
244
245	for (i = 0; i < pkey_cache->table_len; ++i) {
246		ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
247		if (ret) {
248			printk(KERN_WARNING "ib_query_pkey failed (%d) for %s (index %d)\n",
249			       ret, device->name, i);
250			goto err;
251		}
252	}
253
254	for (i = 0; i < gid_cache->table_len; ++i) {
255		ret = ib_query_gid(device, port, i, gid_cache->table + i);
256		if (ret) {
257			printk(KERN_WARNING "ib_query_gid failed (%d) for %s (index %d)\n",
258			       ret, device->name, i);
259			goto err;
260		}
261	}
262
263	write_lock_irq(&device->cache.lock);
264
265	old_pkey_cache = device->cache.pkey_cache[port - start_port(device)];
266	old_gid_cache  = device->cache.gid_cache [port - start_port(device)];
267
268	device->cache.pkey_cache[port - start_port(device)] = pkey_cache;
269	device->cache.gid_cache [port - start_port(device)] = gid_cache;
270
271	device->cache.lmc_cache[port - start_port(device)] = tprops->lmc;
272
273	write_unlock_irq(&device->cache.lock);
274
275	kfree(old_pkey_cache);
276	kfree(old_gid_cache);
277	kfree(tprops);
278	return;
279
280err:
281	kfree(pkey_cache);
282	kfree(gid_cache);
283	kfree(tprops);
284}
285
286static void ib_cache_task(struct work_struct *_work)
287{
288	struct ib_update_work *work =
289		container_of(_work, struct ib_update_work, work);
290
291	ib_cache_update(work->device, work->port_num);
292	kfree(work);
293}
294
295static void ib_cache_event(struct ib_event_handler *handler,
296			   struct ib_event *event)
297{
298	struct ib_update_work *work;
299
300	if (event->event == IB_EVENT_PORT_ERR    ||
301	    event->event == IB_EVENT_PORT_ACTIVE ||
302	    event->event == IB_EVENT_LID_CHANGE  ||
303	    event->event == IB_EVENT_PKEY_CHANGE ||
304	    event->event == IB_EVENT_SM_CHANGE   ||
305	    event->event == IB_EVENT_CLIENT_REREGISTER ||
306	    event->event == IB_EVENT_GID_CHANGE) {
307		work = kmalloc(sizeof *work, GFP_ATOMIC);
308		if (work) {
309			INIT_WORK(&work->work, ib_cache_task);
310			work->device   = event->device;
311			work->port_num = event->element.port_num;
312			queue_work(ib_wq, &work->work);
313		}
314	}
315}
316
317static void ib_cache_setup_one(struct ib_device *device)
318{
319	int p;
320
321	rwlock_init(&device->cache.lock);
322
323	device->cache.pkey_cache =
324		kmalloc(sizeof *device->cache.pkey_cache *
325			(end_port(device) - start_port(device) + 1), GFP_KERNEL);
326	device->cache.gid_cache =
327		kmalloc(sizeof *device->cache.gid_cache *
328			(end_port(device) - start_port(device) + 1), GFP_KERNEL);
329
330	device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache *
331					  (end_port(device) -
332					   start_port(device) + 1),
333					  GFP_KERNEL);
334
335	if (!device->cache.pkey_cache || !device->cache.gid_cache ||
336	    !device->cache.lmc_cache) {
337		printk(KERN_WARNING "Couldn't allocate cache "
338		       "for %s\n", device->name);
339		goto err;
340	}
341
342	for (p = 0; p <= end_port(device) - start_port(device); ++p) {
343		device->cache.pkey_cache[p] = NULL;
344		device->cache.gid_cache [p] = NULL;
345		ib_cache_update(device, p + start_port(device));
346	}
347
348	INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
349			      device, ib_cache_event);
350	if (ib_register_event_handler(&device->cache.event_handler))
351		goto err_cache;
352
353	return;
354
355err_cache:
356	for (p = 0; p <= end_port(device) - start_port(device); ++p) {
357		kfree(device->cache.pkey_cache[p]);
358		kfree(device->cache.gid_cache[p]);
359	}
360
361err:
362	kfree(device->cache.pkey_cache);
363	kfree(device->cache.gid_cache);
364	kfree(device->cache.lmc_cache);
365}
366
367static void ib_cache_cleanup_one(struct ib_device *device)
368{
369	int p;
370
371	ib_unregister_event_handler(&device->cache.event_handler);
372	flush_workqueue(ib_wq);
373
374	for (p = 0; p <= end_port(device) - start_port(device); ++p) {
375		kfree(device->cache.pkey_cache[p]);
376		kfree(device->cache.gid_cache[p]);
377	}
378
379	kfree(device->cache.pkey_cache);
380	kfree(device->cache.gid_cache);
381	kfree(device->cache.lmc_cache);
382}
383
384static struct ib_client cache_client = {
385	.name   = "cache",
386	.add    = ib_cache_setup_one,
387	.remove = ib_cache_cleanup_one
388};
389
390int __init ib_cache_setup(void)
391{
392	return ib_register_client(&cache_client);
393}
394
395void __exit ib_cache_cleanup(void)
396{
397	ib_unregister_client(&cache_client);
398}
v3.15
  1/*
  2 * Copyright (c) 2004 Topspin Communications.  All rights reserved.
  3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
  4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
  5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
  6 *
  7 * This software is available to you under a choice of one of two
  8 * licenses.  You may choose to be licensed under the terms of the GNU
  9 * General Public License (GPL) Version 2, available from the file
 10 * COPYING in the main directory of this source tree, or the
 11 * OpenIB.org BSD license below:
 12 *
 13 *     Redistribution and use in source and binary forms, with or
 14 *     without modification, are permitted provided that the following
 15 *     conditions are met:
 16 *
 17 *      - Redistributions of source code must retain the above
 18 *        copyright notice, this list of conditions and the following
 19 *        disclaimer.
 20 *
 21 *      - Redistributions in binary form must reproduce the above
 22 *        copyright notice, this list of conditions and the following
 23 *        disclaimer in the documentation and/or other materials
 24 *        provided with the distribution.
 25 *
 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 33 * SOFTWARE.
 34 */
 35
 36#include <linux/module.h>
 37#include <linux/errno.h>
 38#include <linux/slab.h>
 39#include <linux/workqueue.h>
 40
 41#include <rdma/ib_cache.h>
 42
 43#include "core_priv.h"
 44
 45struct ib_pkey_cache {
 46	int             table_len;
 47	u16             table[0];
 48};
 49
 50struct ib_gid_cache {
 51	int             table_len;
 52	union ib_gid    table[0];
 53};
 54
 55struct ib_update_work {
 56	struct work_struct work;
 57	struct ib_device  *device;
 58	u8                 port_num;
 59};
 60
 61static inline int start_port(struct ib_device *device)
 62{
 63	return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1;
 64}
 65
 66static inline int end_port(struct ib_device *device)
 67{
 68	return (device->node_type == RDMA_NODE_IB_SWITCH) ?
 69		0 : device->phys_port_cnt;
 70}
 71
 72int ib_get_cached_gid(struct ib_device *device,
 73		      u8                port_num,
 74		      int               index,
 75		      union ib_gid     *gid)
 76{
 77	struct ib_gid_cache *cache;
 78	unsigned long flags;
 79	int ret = 0;
 80
 81	if (port_num < start_port(device) || port_num > end_port(device))
 82		return -EINVAL;
 83
 84	read_lock_irqsave(&device->cache.lock, flags);
 85
 86	cache = device->cache.gid_cache[port_num - start_port(device)];
 87
 88	if (index < 0 || index >= cache->table_len)
 89		ret = -EINVAL;
 90	else
 91		*gid = cache->table[index];
 92
 93	read_unlock_irqrestore(&device->cache.lock, flags);
 94
 95	return ret;
 96}
 97EXPORT_SYMBOL(ib_get_cached_gid);
 98
 99int ib_find_cached_gid(struct ib_device *device,
100		       union ib_gid	*gid,
101		       u8               *port_num,
102		       u16              *index)
103{
104	struct ib_gid_cache *cache;
105	unsigned long flags;
106	int p, i;
107	int ret = -ENOENT;
108
109	*port_num = -1;
110	if (index)
111		*index = -1;
112
113	read_lock_irqsave(&device->cache.lock, flags);
114
115	for (p = 0; p <= end_port(device) - start_port(device); ++p) {
116		cache = device->cache.gid_cache[p];
117		for (i = 0; i < cache->table_len; ++i) {
118			if (!memcmp(gid, &cache->table[i], sizeof *gid)) {
119				*port_num = p + start_port(device);
120				if (index)
121					*index = i;
122				ret = 0;
123				goto found;
124			}
125		}
126	}
127found:
128	read_unlock_irqrestore(&device->cache.lock, flags);
129
130	return ret;
131}
132EXPORT_SYMBOL(ib_find_cached_gid);
133
134int ib_get_cached_pkey(struct ib_device *device,
135		       u8                port_num,
136		       int               index,
137		       u16              *pkey)
138{
139	struct ib_pkey_cache *cache;
140	unsigned long flags;
141	int ret = 0;
142
143	if (port_num < start_port(device) || port_num > end_port(device))
144		return -EINVAL;
145
146	read_lock_irqsave(&device->cache.lock, flags);
147
148	cache = device->cache.pkey_cache[port_num - start_port(device)];
149
150	if (index < 0 || index >= cache->table_len)
151		ret = -EINVAL;
152	else
153		*pkey = cache->table[index];
154
155	read_unlock_irqrestore(&device->cache.lock, flags);
156
157	return ret;
158}
159EXPORT_SYMBOL(ib_get_cached_pkey);
160
161int ib_find_cached_pkey(struct ib_device *device,
162			u8                port_num,
163			u16               pkey,
164			u16              *index)
165{
166	struct ib_pkey_cache *cache;
167	unsigned long flags;
168	int i;
169	int ret = -ENOENT;
170	int partial_ix = -1;
171
172	if (port_num < start_port(device) || port_num > end_port(device))
173		return -EINVAL;
174
175	read_lock_irqsave(&device->cache.lock, flags);
176
177	cache = device->cache.pkey_cache[port_num - start_port(device)];
178
179	*index = -1;
180
181	for (i = 0; i < cache->table_len; ++i)
182		if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
183			if (cache->table[i] & 0x8000) {
184				*index = i;
185				ret = 0;
186				break;
187			} else
188				partial_ix = i;
189		}
190
191	if (ret && partial_ix >= 0) {
192		*index = partial_ix;
193		ret = 0;
194	}
195
196	read_unlock_irqrestore(&device->cache.lock, flags);
197
198	return ret;
199}
200EXPORT_SYMBOL(ib_find_cached_pkey);
201
202int ib_find_exact_cached_pkey(struct ib_device *device,
203			      u8                port_num,
204			      u16               pkey,
205			      u16              *index)
206{
207	struct ib_pkey_cache *cache;
208	unsigned long flags;
209	int i;
210	int ret = -ENOENT;
211
212	if (port_num < start_port(device) || port_num > end_port(device))
213		return -EINVAL;
214
215	read_lock_irqsave(&device->cache.lock, flags);
216
217	cache = device->cache.pkey_cache[port_num - start_port(device)];
218
219	*index = -1;
220
221	for (i = 0; i < cache->table_len; ++i)
222		if (cache->table[i] == pkey) {
223			*index = i;
224			ret = 0;
225			break;
226		}
227
228	read_unlock_irqrestore(&device->cache.lock, flags);
229
230	return ret;
231}
232EXPORT_SYMBOL(ib_find_exact_cached_pkey);
233
234int ib_get_cached_lmc(struct ib_device *device,
235		      u8                port_num,
236		      u8                *lmc)
237{
238	unsigned long flags;
239	int ret = 0;
240
241	if (port_num < start_port(device) || port_num > end_port(device))
242		return -EINVAL;
243
244	read_lock_irqsave(&device->cache.lock, flags);
245	*lmc = device->cache.lmc_cache[port_num - start_port(device)];
246	read_unlock_irqrestore(&device->cache.lock, flags);
247
248	return ret;
249}
250EXPORT_SYMBOL(ib_get_cached_lmc);
251
252static void ib_cache_update(struct ib_device *device,
253			    u8                port)
254{
255	struct ib_port_attr       *tprops = NULL;
256	struct ib_pkey_cache      *pkey_cache = NULL, *old_pkey_cache;
257	struct ib_gid_cache       *gid_cache = NULL, *old_gid_cache;
258	int                        i;
259	int                        ret;
260
261	tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
262	if (!tprops)
263		return;
264
265	ret = ib_query_port(device, port, tprops);
266	if (ret) {
267		printk(KERN_WARNING "ib_query_port failed (%d) for %s\n",
268		       ret, device->name);
269		goto err;
270	}
271
272	pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len *
273			     sizeof *pkey_cache->table, GFP_KERNEL);
274	if (!pkey_cache)
275		goto err;
276
277	pkey_cache->table_len = tprops->pkey_tbl_len;
278
279	gid_cache = kmalloc(sizeof *gid_cache + tprops->gid_tbl_len *
280			    sizeof *gid_cache->table, GFP_KERNEL);
281	if (!gid_cache)
282		goto err;
283
284	gid_cache->table_len = tprops->gid_tbl_len;
285
286	for (i = 0; i < pkey_cache->table_len; ++i) {
287		ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
288		if (ret) {
289			printk(KERN_WARNING "ib_query_pkey failed (%d) for %s (index %d)\n",
290			       ret, device->name, i);
291			goto err;
292		}
293	}
294
295	for (i = 0; i < gid_cache->table_len; ++i) {
296		ret = ib_query_gid(device, port, i, gid_cache->table + i);
297		if (ret) {
298			printk(KERN_WARNING "ib_query_gid failed (%d) for %s (index %d)\n",
299			       ret, device->name, i);
300			goto err;
301		}
302	}
303
304	write_lock_irq(&device->cache.lock);
305
306	old_pkey_cache = device->cache.pkey_cache[port - start_port(device)];
307	old_gid_cache  = device->cache.gid_cache [port - start_port(device)];
308
309	device->cache.pkey_cache[port - start_port(device)] = pkey_cache;
310	device->cache.gid_cache [port - start_port(device)] = gid_cache;
311
312	device->cache.lmc_cache[port - start_port(device)] = tprops->lmc;
313
314	write_unlock_irq(&device->cache.lock);
315
316	kfree(old_pkey_cache);
317	kfree(old_gid_cache);
318	kfree(tprops);
319	return;
320
321err:
322	kfree(pkey_cache);
323	kfree(gid_cache);
324	kfree(tprops);
325}
326
327static void ib_cache_task(struct work_struct *_work)
328{
329	struct ib_update_work *work =
330		container_of(_work, struct ib_update_work, work);
331
332	ib_cache_update(work->device, work->port_num);
333	kfree(work);
334}
335
336static void ib_cache_event(struct ib_event_handler *handler,
337			   struct ib_event *event)
338{
339	struct ib_update_work *work;
340
341	if (event->event == IB_EVENT_PORT_ERR    ||
342	    event->event == IB_EVENT_PORT_ACTIVE ||
343	    event->event == IB_EVENT_LID_CHANGE  ||
344	    event->event == IB_EVENT_PKEY_CHANGE ||
345	    event->event == IB_EVENT_SM_CHANGE   ||
346	    event->event == IB_EVENT_CLIENT_REREGISTER ||
347	    event->event == IB_EVENT_GID_CHANGE) {
348		work = kmalloc(sizeof *work, GFP_ATOMIC);
349		if (work) {
350			INIT_WORK(&work->work, ib_cache_task);
351			work->device   = event->device;
352			work->port_num = event->element.port_num;
353			queue_work(ib_wq, &work->work);
354		}
355	}
356}
357
358static void ib_cache_setup_one(struct ib_device *device)
359{
360	int p;
361
362	rwlock_init(&device->cache.lock);
363
364	device->cache.pkey_cache =
365		kmalloc(sizeof *device->cache.pkey_cache *
366			(end_port(device) - start_port(device) + 1), GFP_KERNEL);
367	device->cache.gid_cache =
368		kmalloc(sizeof *device->cache.gid_cache *
369			(end_port(device) - start_port(device) + 1), GFP_KERNEL);
370
371	device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache *
372					  (end_port(device) -
373					   start_port(device) + 1),
374					  GFP_KERNEL);
375
376	if (!device->cache.pkey_cache || !device->cache.gid_cache ||
377	    !device->cache.lmc_cache) {
378		printk(KERN_WARNING "Couldn't allocate cache "
379		       "for %s\n", device->name);
380		goto err;
381	}
382
383	for (p = 0; p <= end_port(device) - start_port(device); ++p) {
384		device->cache.pkey_cache[p] = NULL;
385		device->cache.gid_cache [p] = NULL;
386		ib_cache_update(device, p + start_port(device));
387	}
388
389	INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
390			      device, ib_cache_event);
391	if (ib_register_event_handler(&device->cache.event_handler))
392		goto err_cache;
393
394	return;
395
396err_cache:
397	for (p = 0; p <= end_port(device) - start_port(device); ++p) {
398		kfree(device->cache.pkey_cache[p]);
399		kfree(device->cache.gid_cache[p]);
400	}
401
402err:
403	kfree(device->cache.pkey_cache);
404	kfree(device->cache.gid_cache);
405	kfree(device->cache.lmc_cache);
406}
407
408static void ib_cache_cleanup_one(struct ib_device *device)
409{
410	int p;
411
412	ib_unregister_event_handler(&device->cache.event_handler);
413	flush_workqueue(ib_wq);
414
415	for (p = 0; p <= end_port(device) - start_port(device); ++p) {
416		kfree(device->cache.pkey_cache[p]);
417		kfree(device->cache.gid_cache[p]);
418	}
419
420	kfree(device->cache.pkey_cache);
421	kfree(device->cache.gid_cache);
422	kfree(device->cache.lmc_cache);
423}
424
425static struct ib_client cache_client = {
426	.name   = "cache",
427	.add    = ib_cache_setup_one,
428	.remove = ib_cache_cleanup_one
429};
430
431int __init ib_cache_setup(void)
432{
433	return ib_register_client(&cache_client);
434}
435
436void __exit ib_cache_cleanup(void)
437{
438	ib_unregister_client(&cache_client);
439}