Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * O(1) TX queue with built-in allocator for ST-Ericsson CW1200 drivers
  4 *
  5 * Copyright (c) 2010, ST-Ericsson
  6 * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
 
 
 
 
  7 */
  8
  9#include <net/mac80211.h>
 10#include <linux/sched.h>
 11#include <linux/jiffies.h>
 12#include "queue.h"
 13#include "cw1200.h"
 14#include "debug.h"
 15
 16/* private */ struct cw1200_queue_item
 17{
 18	struct list_head	head;
 19	struct sk_buff		*skb;
 20	u32			packet_id;
 21	unsigned long		queue_timestamp;
 22	unsigned long		xmit_timestamp;
 23	struct cw1200_txpriv	txpriv;
 24	u8			generation;
 25};
 26
 27static inline void __cw1200_queue_lock(struct cw1200_queue *queue)
 28{
 29	struct cw1200_queue_stats *stats = queue->stats;
 30	if (queue->tx_locked_cnt++ == 0) {
 31		pr_debug("[TX] Queue %d is locked.\n",
 32			 queue->queue_id);
 33		ieee80211_stop_queue(stats->priv->hw, queue->queue_id);
 34	}
 35}
 36
 37static inline void __cw1200_queue_unlock(struct cw1200_queue *queue)
 38{
 39	struct cw1200_queue_stats *stats = queue->stats;
 40	BUG_ON(!queue->tx_locked_cnt);
 41	if (--queue->tx_locked_cnt == 0) {
 42		pr_debug("[TX] Queue %d is unlocked.\n",
 43			 queue->queue_id);
 44		ieee80211_wake_queue(stats->priv->hw, queue->queue_id);
 45	}
 46}
 47
 48static inline void cw1200_queue_parse_id(u32 packet_id, u8 *queue_generation,
 49					 u8 *queue_id, u8 *item_generation,
 50					 u8 *item_id)
 51{
 52	*item_id		= (packet_id >>  0) & 0xFF;
 53	*item_generation	= (packet_id >>  8) & 0xFF;
 54	*queue_id		= (packet_id >> 16) & 0xFF;
 55	*queue_generation	= (packet_id >> 24) & 0xFF;
 56}
 57
 58static inline u32 cw1200_queue_mk_packet_id(u8 queue_generation, u8 queue_id,
 59					    u8 item_generation, u8 item_id)
 60{
 61	return ((u32)item_id << 0) |
 62		((u32)item_generation << 8) |
 63		((u32)queue_id << 16) |
 64		((u32)queue_generation << 24);
 65}
 66
 67static void cw1200_queue_post_gc(struct cw1200_queue_stats *stats,
 68				 struct list_head *gc_list)
 69{
 70	struct cw1200_queue_item *item, *tmp;
 71
 72	list_for_each_entry_safe(item, tmp, gc_list, head) {
 73		list_del(&item->head);
 74		stats->skb_dtor(stats->priv, item->skb, &item->txpriv);
 75		kfree(item);
 76	}
 77}
 78
 79static void cw1200_queue_register_post_gc(struct list_head *gc_list,
 80					  struct cw1200_queue_item *item)
 81{
 82	struct cw1200_queue_item *gc_item;
 83	gc_item = kmemdup(item, sizeof(struct cw1200_queue_item),
 84			GFP_ATOMIC);
 85	BUG_ON(!gc_item);
 
 86	list_add_tail(&gc_item->head, gc_list);
 87}
 88
 89static void __cw1200_queue_gc(struct cw1200_queue *queue,
 90			      struct list_head *head,
 91			      bool unlock)
 92{
 93	struct cw1200_queue_stats *stats = queue->stats;
 94	struct cw1200_queue_item *item = NULL, *iter, *tmp;
 95	bool wakeup_stats = false;
 96
 97	list_for_each_entry_safe(iter, tmp, &queue->queue, head) {
 98		if (time_is_after_jiffies(iter->queue_timestamp + queue->ttl)) {
 99			item = iter;
100			break;
101		}
102		--queue->num_queued;
103		--queue->link_map_cache[iter->txpriv.link_id];
104		spin_lock_bh(&stats->lock);
105		--stats->num_queued;
106		if (!--stats->link_map_cache[iter->txpriv.link_id])
107			wakeup_stats = true;
108		spin_unlock_bh(&stats->lock);
109		cw1200_debug_tx_ttl(stats->priv);
110		cw1200_queue_register_post_gc(head, iter);
111		iter->skb = NULL;
112		list_move_tail(&iter->head, &queue->free_pool);
113	}
114
115	if (wakeup_stats)
116		wake_up(&stats->wait_link_id_empty);
117
118	if (queue->overfull) {
119		if (queue->num_queued <= (queue->capacity >> 1)) {
120			queue->overfull = false;
121			if (unlock)
122				__cw1200_queue_unlock(queue);
123		} else if (item) {
124			unsigned long tmo = item->queue_timestamp + queue->ttl;
125			mod_timer(&queue->gc, tmo);
126			cw1200_pm_stay_awake(&stats->priv->pm_state,
127					     tmo - jiffies);
128		}
129	}
130}
131
132static void cw1200_queue_gc(struct timer_list *t)
133{
134	LIST_HEAD(list);
135	struct cw1200_queue *queue =
136		from_timer(queue, t, gc);
137
138	spin_lock_bh(&queue->lock);
139	__cw1200_queue_gc(queue, &list, true);
140	spin_unlock_bh(&queue->lock);
141	cw1200_queue_post_gc(queue->stats, &list);
142}
143
144int cw1200_queue_stats_init(struct cw1200_queue_stats *stats,
145			    size_t map_capacity,
146			    cw1200_queue_skb_dtor_t skb_dtor,
147			    struct cw1200_common *priv)
148{
149	memset(stats, 0, sizeof(*stats));
150	stats->map_capacity = map_capacity;
151	stats->skb_dtor = skb_dtor;
152	stats->priv = priv;
153	spin_lock_init(&stats->lock);
154	init_waitqueue_head(&stats->wait_link_id_empty);
155
156	stats->link_map_cache = kcalloc(map_capacity, sizeof(int),
157					GFP_KERNEL);
158	if (!stats->link_map_cache)
159		return -ENOMEM;
160
161	return 0;
162}
163
164int cw1200_queue_init(struct cw1200_queue *queue,
165		      struct cw1200_queue_stats *stats,
166		      u8 queue_id,
167		      size_t capacity,
168		      unsigned long ttl)
169{
170	size_t i;
171
172	memset(queue, 0, sizeof(*queue));
173	queue->stats = stats;
174	queue->capacity = capacity;
175	queue->queue_id = queue_id;
176	queue->ttl = ttl;
177	INIT_LIST_HEAD(&queue->queue);
178	INIT_LIST_HEAD(&queue->pending);
179	INIT_LIST_HEAD(&queue->free_pool);
180	spin_lock_init(&queue->lock);
181	timer_setup(&queue->gc, cw1200_queue_gc, 0);
182
183	queue->pool = kcalloc(capacity, sizeof(struct cw1200_queue_item),
184			      GFP_KERNEL);
185	if (!queue->pool)
186		return -ENOMEM;
187
188	queue->link_map_cache = kcalloc(stats->map_capacity, sizeof(int),
189					GFP_KERNEL);
190	if (!queue->link_map_cache) {
191		kfree(queue->pool);
192		queue->pool = NULL;
193		return -ENOMEM;
194	}
195
196	for (i = 0; i < capacity; ++i)
197		list_add_tail(&queue->pool[i].head, &queue->free_pool);
198
199	return 0;
200}
201
202int cw1200_queue_clear(struct cw1200_queue *queue)
203{
204	int i;
205	LIST_HEAD(gc_list);
206	struct cw1200_queue_stats *stats = queue->stats;
207	struct cw1200_queue_item *item, *tmp;
208
209	spin_lock_bh(&queue->lock);
210	queue->generation++;
211	list_splice_tail_init(&queue->queue, &queue->pending);
212	list_for_each_entry_safe(item, tmp, &queue->pending, head) {
213		WARN_ON(!item->skb);
214		cw1200_queue_register_post_gc(&gc_list, item);
215		item->skb = NULL;
216		list_move_tail(&item->head, &queue->free_pool);
217	}
218	queue->num_queued = 0;
219	queue->num_pending = 0;
220
221	spin_lock_bh(&stats->lock);
222	for (i = 0; i < stats->map_capacity; ++i) {
223		stats->num_queued -= queue->link_map_cache[i];
224		stats->link_map_cache[i] -= queue->link_map_cache[i];
225		queue->link_map_cache[i] = 0;
226	}
227	spin_unlock_bh(&stats->lock);
228	if (queue->overfull) {
229		queue->overfull = false;
230		__cw1200_queue_unlock(queue);
231	}
232	spin_unlock_bh(&queue->lock);
233	wake_up(&stats->wait_link_id_empty);
234	cw1200_queue_post_gc(stats, &gc_list);
235	return 0;
236}
237
238void cw1200_queue_stats_deinit(struct cw1200_queue_stats *stats)
239{
240	kfree(stats->link_map_cache);
241	stats->link_map_cache = NULL;
242}
243
244void cw1200_queue_deinit(struct cw1200_queue *queue)
245{
246	cw1200_queue_clear(queue);
247	del_timer_sync(&queue->gc);
248	INIT_LIST_HEAD(&queue->free_pool);
249	kfree(queue->pool);
250	kfree(queue->link_map_cache);
251	queue->pool = NULL;
252	queue->link_map_cache = NULL;
253	queue->capacity = 0;
254}
255
256size_t cw1200_queue_get_num_queued(struct cw1200_queue *queue,
257				   u32 link_id_map)
258{
259	size_t ret;
260	int i, bit;
261	size_t map_capacity = queue->stats->map_capacity;
262
263	if (!link_id_map)
264		return 0;
265
266	spin_lock_bh(&queue->lock);
267	if (link_id_map == (u32)-1) {
268		ret = queue->num_queued - queue->num_pending;
269	} else {
270		ret = 0;
271		for (i = 0, bit = 1; i < map_capacity; ++i, bit <<= 1) {
272			if (link_id_map & bit)
273				ret += queue->link_map_cache[i];
274		}
275	}
276	spin_unlock_bh(&queue->lock);
277	return ret;
278}
279
280int cw1200_queue_put(struct cw1200_queue *queue,
281		     struct sk_buff *skb,
282		     struct cw1200_txpriv *txpriv)
283{
284	int ret = 0;
 
285	struct cw1200_queue_stats *stats = queue->stats;
286
287	if (txpriv->link_id >= queue->stats->map_capacity)
288		return -EINVAL;
289
290	spin_lock_bh(&queue->lock);
291	if (!WARN_ON(list_empty(&queue->free_pool))) {
292		struct cw1200_queue_item *item = list_first_entry(
293			&queue->free_pool, struct cw1200_queue_item, head);
294		BUG_ON(item->skb);
295
296		list_move_tail(&item->head, &queue->queue);
297		item->skb = skb;
298		item->txpriv = *txpriv;
299		item->generation = 0;
300		item->packet_id = cw1200_queue_mk_packet_id(queue->generation,
301							    queue->queue_id,
302							    item->generation,
303							    item - queue->pool);
304		item->queue_timestamp = jiffies;
305
306		++queue->num_queued;
307		++queue->link_map_cache[txpriv->link_id];
308
309		spin_lock_bh(&stats->lock);
310		++stats->num_queued;
311		++stats->link_map_cache[txpriv->link_id];
312		spin_unlock_bh(&stats->lock);
313
314		/* TX may happen in parallel sometimes.
315		 * Leave extra queue slots so we don't overflow.
316		 */
317		if (queue->overfull == false &&
318		    queue->num_queued >=
319		    (queue->capacity - (num_present_cpus() - 1))) {
320			queue->overfull = true;
321			__cw1200_queue_lock(queue);
322			mod_timer(&queue->gc, jiffies);
323		}
324	} else {
325		ret = -ENOENT;
326	}
327	spin_unlock_bh(&queue->lock);
328	return ret;
329}
330
331int cw1200_queue_get(struct cw1200_queue *queue,
332		     u32 link_id_map,
333		     struct wsm_tx **tx,
334		     struct ieee80211_tx_info **tx_info,
335		     const struct cw1200_txpriv **txpriv)
336{
337	int ret = -ENOENT;
338	struct cw1200_queue_item *item;
339	struct cw1200_queue_stats *stats = queue->stats;
340	bool wakeup_stats = false;
341
342	spin_lock_bh(&queue->lock);
343	list_for_each_entry(item, &queue->queue, head) {
344		if (link_id_map & BIT(item->txpriv.link_id)) {
345			ret = 0;
346			break;
347		}
348	}
349
350	if (!WARN_ON(ret)) {
351		*tx = (struct wsm_tx *)item->skb->data;
352		*tx_info = IEEE80211_SKB_CB(item->skb);
353		*txpriv = &item->txpriv;
354		(*tx)->packet_id = item->packet_id;
355		list_move_tail(&item->head, &queue->pending);
356		++queue->num_pending;
357		--queue->link_map_cache[item->txpriv.link_id];
358		item->xmit_timestamp = jiffies;
359
360		spin_lock_bh(&stats->lock);
361		--stats->num_queued;
362		if (!--stats->link_map_cache[item->txpriv.link_id])
363			wakeup_stats = true;
364		spin_unlock_bh(&stats->lock);
365	}
366	spin_unlock_bh(&queue->lock);
367	if (wakeup_stats)
368		wake_up(&stats->wait_link_id_empty);
369	return ret;
370}
371
372int cw1200_queue_requeue(struct cw1200_queue *queue, u32 packet_id)
373{
374	int ret = 0;
375	u8 queue_generation, queue_id, item_generation, item_id;
376	struct cw1200_queue_item *item;
377	struct cw1200_queue_stats *stats = queue->stats;
378
379	cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id,
380			      &item_generation, &item_id);
381
382	item = &queue->pool[item_id];
383
384	spin_lock_bh(&queue->lock);
385	BUG_ON(queue_id != queue->queue_id);
386	if (queue_generation != queue->generation) {
387		ret = -ENOENT;
388	} else if (item_id >= (unsigned) queue->capacity) {
389		WARN_ON(1);
390		ret = -EINVAL;
391	} else if (item->generation != item_generation) {
392		WARN_ON(1);
393		ret = -ENOENT;
394	} else {
395		--queue->num_pending;
396		++queue->link_map_cache[item->txpriv.link_id];
397
398		spin_lock_bh(&stats->lock);
399		++stats->num_queued;
400		++stats->link_map_cache[item->txpriv.link_id];
401		spin_unlock_bh(&stats->lock);
402
403		item->generation = ++item_generation;
404		item->packet_id = cw1200_queue_mk_packet_id(queue_generation,
405							    queue_id,
406							    item_generation,
407							    item_id);
408		list_move(&item->head, &queue->queue);
409	}
410	spin_unlock_bh(&queue->lock);
411	return ret;
412}
413
414int cw1200_queue_requeue_all(struct cw1200_queue *queue)
415{
416	struct cw1200_queue_item *item, *tmp;
417	struct cw1200_queue_stats *stats = queue->stats;
418	spin_lock_bh(&queue->lock);
419
420	list_for_each_entry_safe_reverse(item, tmp, &queue->pending, head) {
421		--queue->num_pending;
422		++queue->link_map_cache[item->txpriv.link_id];
423
424		spin_lock_bh(&stats->lock);
425		++stats->num_queued;
426		++stats->link_map_cache[item->txpriv.link_id];
427		spin_unlock_bh(&stats->lock);
428
429		++item->generation;
430		item->packet_id = cw1200_queue_mk_packet_id(queue->generation,
431							    queue->queue_id,
432							    item->generation,
433							    item - queue->pool);
434		list_move(&item->head, &queue->queue);
435	}
436	spin_unlock_bh(&queue->lock);
437
438	return 0;
439}
440
441int cw1200_queue_remove(struct cw1200_queue *queue, u32 packet_id)
442{
443	int ret = 0;
444	u8 queue_generation, queue_id, item_generation, item_id;
445	struct cw1200_queue_item *item;
446	struct cw1200_queue_stats *stats = queue->stats;
447	struct sk_buff *gc_skb = NULL;
448	struct cw1200_txpriv gc_txpriv;
449
450	cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id,
451			      &item_generation, &item_id);
452
453	item = &queue->pool[item_id];
454
455	spin_lock_bh(&queue->lock);
456	BUG_ON(queue_id != queue->queue_id);
457	if (queue_generation != queue->generation) {
458		ret = -ENOENT;
459	} else if (item_id >= (unsigned) queue->capacity) {
460		WARN_ON(1);
461		ret = -EINVAL;
462	} else if (item->generation != item_generation) {
463		WARN_ON(1);
464		ret = -ENOENT;
465	} else {
466		gc_txpriv = item->txpriv;
467		gc_skb = item->skb;
468		item->skb = NULL;
469		--queue->num_pending;
470		--queue->num_queued;
471		++queue->num_sent;
472		++item->generation;
473		/* Do not use list_move_tail here, but list_move:
474		 * try to utilize cache row.
475		 */
476		list_move(&item->head, &queue->free_pool);
477
478		if (queue->overfull &&
479		    (queue->num_queued <= (queue->capacity >> 1))) {
480			queue->overfull = false;
481			__cw1200_queue_unlock(queue);
482		}
483	}
484	spin_unlock_bh(&queue->lock);
485
486	if (gc_skb)
487		stats->skb_dtor(stats->priv, gc_skb, &gc_txpriv);
488
489	return ret;
490}
491
492int cw1200_queue_get_skb(struct cw1200_queue *queue, u32 packet_id,
493			 struct sk_buff **skb,
494			 const struct cw1200_txpriv **txpriv)
495{
496	int ret = 0;
497	u8 queue_generation, queue_id, item_generation, item_id;
498	struct cw1200_queue_item *item;
499	cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id,
500			      &item_generation, &item_id);
501
502	item = &queue->pool[item_id];
503
504	spin_lock_bh(&queue->lock);
505	BUG_ON(queue_id != queue->queue_id);
506	if (queue_generation != queue->generation) {
507		ret = -ENOENT;
508	} else if (item_id >= (unsigned) queue->capacity) {
509		WARN_ON(1);
510		ret = -EINVAL;
511	} else if (item->generation != item_generation) {
512		WARN_ON(1);
513		ret = -ENOENT;
514	} else {
515		*skb = item->skb;
516		*txpriv = &item->txpriv;
517	}
518	spin_unlock_bh(&queue->lock);
519	return ret;
520}
521
522void cw1200_queue_lock(struct cw1200_queue *queue)
523{
524	spin_lock_bh(&queue->lock);
525	__cw1200_queue_lock(queue);
526	spin_unlock_bh(&queue->lock);
527}
528
529void cw1200_queue_unlock(struct cw1200_queue *queue)
530{
531	spin_lock_bh(&queue->lock);
532	__cw1200_queue_unlock(queue);
533	spin_unlock_bh(&queue->lock);
534}
535
536bool cw1200_queue_get_xmit_timestamp(struct cw1200_queue *queue,
537				     unsigned long *timestamp,
538				     u32 pending_frame_id)
539{
540	struct cw1200_queue_item *item;
541	bool ret;
542
543	spin_lock_bh(&queue->lock);
544	ret = !list_empty(&queue->pending);
545	if (ret) {
546		list_for_each_entry(item, &queue->pending, head) {
547			if (item->packet_id != pending_frame_id)
548				if (time_before(item->xmit_timestamp,
549						*timestamp))
550					*timestamp = item->xmit_timestamp;
551		}
552	}
553	spin_unlock_bh(&queue->lock);
554	return ret;
555}
556
557bool cw1200_queue_stats_is_empty(struct cw1200_queue_stats *stats,
558				 u32 link_id_map)
559{
560	bool empty = true;
561
562	spin_lock_bh(&stats->lock);
563	if (link_id_map == (u32)-1) {
564		empty = stats->num_queued == 0;
565	} else {
566		int i;
567		for (i = 0; i < stats->map_capacity; ++i) {
568			if (link_id_map & BIT(i)) {
569				if (stats->link_map_cache[i]) {
570					empty = false;
571					break;
572				}
573			}
574		}
575	}
576	spin_unlock_bh(&stats->lock);
577
578	return empty;
579}
v4.6
 
  1/*
  2 * O(1) TX queue with built-in allocator for ST-Ericsson CW1200 drivers
  3 *
  4 * Copyright (c) 2010, ST-Ericsson
  5 * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
  6 *
  7 * This program is free software; you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License version 2 as
  9 * published by the Free Software Foundation.
 10 */
 11
 12#include <net/mac80211.h>
 13#include <linux/sched.h>
 
 14#include "queue.h"
 15#include "cw1200.h"
 16#include "debug.h"
 17
 18/* private */ struct cw1200_queue_item
 19{
 20	struct list_head	head;
 21	struct sk_buff		*skb;
 22	u32			packet_id;
 23	unsigned long		queue_timestamp;
 24	unsigned long		xmit_timestamp;
 25	struct cw1200_txpriv	txpriv;
 26	u8			generation;
 27};
 28
 29static inline void __cw1200_queue_lock(struct cw1200_queue *queue)
 30{
 31	struct cw1200_queue_stats *stats = queue->stats;
 32	if (queue->tx_locked_cnt++ == 0) {
 33		pr_debug("[TX] Queue %d is locked.\n",
 34			 queue->queue_id);
 35		ieee80211_stop_queue(stats->priv->hw, queue->queue_id);
 36	}
 37}
 38
 39static inline void __cw1200_queue_unlock(struct cw1200_queue *queue)
 40{
 41	struct cw1200_queue_stats *stats = queue->stats;
 42	BUG_ON(!queue->tx_locked_cnt);
 43	if (--queue->tx_locked_cnt == 0) {
 44		pr_debug("[TX] Queue %d is unlocked.\n",
 45			 queue->queue_id);
 46		ieee80211_wake_queue(stats->priv->hw, queue->queue_id);
 47	}
 48}
 49
 50static inline void cw1200_queue_parse_id(u32 packet_id, u8 *queue_generation,
 51					 u8 *queue_id, u8 *item_generation,
 52					 u8 *item_id)
 53{
 54	*item_id		= (packet_id >>  0) & 0xFF;
 55	*item_generation	= (packet_id >>  8) & 0xFF;
 56	*queue_id		= (packet_id >> 16) & 0xFF;
 57	*queue_generation	= (packet_id >> 24) & 0xFF;
 58}
 59
 60static inline u32 cw1200_queue_mk_packet_id(u8 queue_generation, u8 queue_id,
 61					    u8 item_generation, u8 item_id)
 62{
 63	return ((u32)item_id << 0) |
 64		((u32)item_generation << 8) |
 65		((u32)queue_id << 16) |
 66		((u32)queue_generation << 24);
 67}
 68
 69static void cw1200_queue_post_gc(struct cw1200_queue_stats *stats,
 70				 struct list_head *gc_list)
 71{
 72	struct cw1200_queue_item *item, *tmp;
 73
 74	list_for_each_entry_safe(item, tmp, gc_list, head) {
 75		list_del(&item->head);
 76		stats->skb_dtor(stats->priv, item->skb, &item->txpriv);
 77		kfree(item);
 78	}
 79}
 80
 81static void cw1200_queue_register_post_gc(struct list_head *gc_list,
 82					  struct cw1200_queue_item *item)
 83{
 84	struct cw1200_queue_item *gc_item;
 85	gc_item = kmalloc(sizeof(struct cw1200_queue_item),
 86			GFP_ATOMIC);
 87	BUG_ON(!gc_item);
 88	memcpy(gc_item, item, sizeof(struct cw1200_queue_item));
 89	list_add_tail(&gc_item->head, gc_list);
 90}
 91
 92static void __cw1200_queue_gc(struct cw1200_queue *queue,
 93			      struct list_head *head,
 94			      bool unlock)
 95{
 96	struct cw1200_queue_stats *stats = queue->stats;
 97	struct cw1200_queue_item *item = NULL, *tmp;
 98	bool wakeup_stats = false;
 99
100	list_for_each_entry_safe(item, tmp, &queue->queue, head) {
101		if (jiffies - item->queue_timestamp < queue->ttl)
 
102			break;
 
103		--queue->num_queued;
104		--queue->link_map_cache[item->txpriv.link_id];
105		spin_lock_bh(&stats->lock);
106		--stats->num_queued;
107		if (!--stats->link_map_cache[item->txpriv.link_id])
108			wakeup_stats = true;
109		spin_unlock_bh(&stats->lock);
110		cw1200_debug_tx_ttl(stats->priv);
111		cw1200_queue_register_post_gc(head, item);
112		item->skb = NULL;
113		list_move_tail(&item->head, &queue->free_pool);
114	}
115
116	if (wakeup_stats)
117		wake_up(&stats->wait_link_id_empty);
118
119	if (queue->overfull) {
120		if (queue->num_queued <= (queue->capacity >> 1)) {
121			queue->overfull = false;
122			if (unlock)
123				__cw1200_queue_unlock(queue);
124		} else if (item) {
125			unsigned long tmo = item->queue_timestamp + queue->ttl;
126			mod_timer(&queue->gc, tmo);
127			cw1200_pm_stay_awake(&stats->priv->pm_state,
128					     tmo - jiffies);
129		}
130	}
131}
132
133static void cw1200_queue_gc(unsigned long arg)
134{
135	LIST_HEAD(list);
136	struct cw1200_queue *queue =
137		(struct cw1200_queue *)arg;
138
139	spin_lock_bh(&queue->lock);
140	__cw1200_queue_gc(queue, &list, true);
141	spin_unlock_bh(&queue->lock);
142	cw1200_queue_post_gc(queue->stats, &list);
143}
144
145int cw1200_queue_stats_init(struct cw1200_queue_stats *stats,
146			    size_t map_capacity,
147			    cw1200_queue_skb_dtor_t skb_dtor,
148			    struct cw1200_common *priv)
149{
150	memset(stats, 0, sizeof(*stats));
151	stats->map_capacity = map_capacity;
152	stats->skb_dtor = skb_dtor;
153	stats->priv = priv;
154	spin_lock_init(&stats->lock);
155	init_waitqueue_head(&stats->wait_link_id_empty);
156
157	stats->link_map_cache = kzalloc(sizeof(int) * map_capacity,
158					GFP_KERNEL);
159	if (!stats->link_map_cache)
160		return -ENOMEM;
161
162	return 0;
163}
164
165int cw1200_queue_init(struct cw1200_queue *queue,
166		      struct cw1200_queue_stats *stats,
167		      u8 queue_id,
168		      size_t capacity,
169		      unsigned long ttl)
170{
171	size_t i;
172
173	memset(queue, 0, sizeof(*queue));
174	queue->stats = stats;
175	queue->capacity = capacity;
176	queue->queue_id = queue_id;
177	queue->ttl = ttl;
178	INIT_LIST_HEAD(&queue->queue);
179	INIT_LIST_HEAD(&queue->pending);
180	INIT_LIST_HEAD(&queue->free_pool);
181	spin_lock_init(&queue->lock);
182	setup_timer(&queue->gc, cw1200_queue_gc, (unsigned long)queue);
183
184	queue->pool = kzalloc(sizeof(struct cw1200_queue_item) * capacity,
185			GFP_KERNEL);
186	if (!queue->pool)
187		return -ENOMEM;
188
189	queue->link_map_cache = kzalloc(sizeof(int) * stats->map_capacity,
190			GFP_KERNEL);
191	if (!queue->link_map_cache) {
192		kfree(queue->pool);
193		queue->pool = NULL;
194		return -ENOMEM;
195	}
196
197	for (i = 0; i < capacity; ++i)
198		list_add_tail(&queue->pool[i].head, &queue->free_pool);
199
200	return 0;
201}
202
203int cw1200_queue_clear(struct cw1200_queue *queue)
204{
205	int i;
206	LIST_HEAD(gc_list);
207	struct cw1200_queue_stats *stats = queue->stats;
208	struct cw1200_queue_item *item, *tmp;
209
210	spin_lock_bh(&queue->lock);
211	queue->generation++;
212	list_splice_tail_init(&queue->queue, &queue->pending);
213	list_for_each_entry_safe(item, tmp, &queue->pending, head) {
214		WARN_ON(!item->skb);
215		cw1200_queue_register_post_gc(&gc_list, item);
216		item->skb = NULL;
217		list_move_tail(&item->head, &queue->free_pool);
218	}
219	queue->num_queued = 0;
220	queue->num_pending = 0;
221
222	spin_lock_bh(&stats->lock);
223	for (i = 0; i < stats->map_capacity; ++i) {
224		stats->num_queued -= queue->link_map_cache[i];
225		stats->link_map_cache[i] -= queue->link_map_cache[i];
226		queue->link_map_cache[i] = 0;
227	}
228	spin_unlock_bh(&stats->lock);
229	if (queue->overfull) {
230		queue->overfull = false;
231		__cw1200_queue_unlock(queue);
232	}
233	spin_unlock_bh(&queue->lock);
234	wake_up(&stats->wait_link_id_empty);
235	cw1200_queue_post_gc(stats, &gc_list);
236	return 0;
237}
238
239void cw1200_queue_stats_deinit(struct cw1200_queue_stats *stats)
240{
241	kfree(stats->link_map_cache);
242	stats->link_map_cache = NULL;
243}
244
245void cw1200_queue_deinit(struct cw1200_queue *queue)
246{
247	cw1200_queue_clear(queue);
248	del_timer_sync(&queue->gc);
249	INIT_LIST_HEAD(&queue->free_pool);
250	kfree(queue->pool);
251	kfree(queue->link_map_cache);
252	queue->pool = NULL;
253	queue->link_map_cache = NULL;
254	queue->capacity = 0;
255}
256
257size_t cw1200_queue_get_num_queued(struct cw1200_queue *queue,
258				   u32 link_id_map)
259{
260	size_t ret;
261	int i, bit;
262	size_t map_capacity = queue->stats->map_capacity;
263
264	if (!link_id_map)
265		return 0;
266
267	spin_lock_bh(&queue->lock);
268	if (link_id_map == (u32)-1) {
269		ret = queue->num_queued - queue->num_pending;
270	} else {
271		ret = 0;
272		for (i = 0, bit = 1; i < map_capacity; ++i, bit <<= 1) {
273			if (link_id_map & bit)
274				ret += queue->link_map_cache[i];
275		}
276	}
277	spin_unlock_bh(&queue->lock);
278	return ret;
279}
280
281int cw1200_queue_put(struct cw1200_queue *queue,
282		     struct sk_buff *skb,
283		     struct cw1200_txpriv *txpriv)
284{
285	int ret = 0;
286	LIST_HEAD(gc_list);
287	struct cw1200_queue_stats *stats = queue->stats;
288
289	if (txpriv->link_id >= queue->stats->map_capacity)
290		return -EINVAL;
291
292	spin_lock_bh(&queue->lock);
293	if (!WARN_ON(list_empty(&queue->free_pool))) {
294		struct cw1200_queue_item *item = list_first_entry(
295			&queue->free_pool, struct cw1200_queue_item, head);
296		BUG_ON(item->skb);
297
298		list_move_tail(&item->head, &queue->queue);
299		item->skb = skb;
300		item->txpriv = *txpriv;
301		item->generation = 0;
302		item->packet_id = cw1200_queue_mk_packet_id(queue->generation,
303							    queue->queue_id,
304							    item->generation,
305							    item - queue->pool);
306		item->queue_timestamp = jiffies;
307
308		++queue->num_queued;
309		++queue->link_map_cache[txpriv->link_id];
310
311		spin_lock_bh(&stats->lock);
312		++stats->num_queued;
313		++stats->link_map_cache[txpriv->link_id];
314		spin_unlock_bh(&stats->lock);
315
316		/* TX may happen in parallel sometimes.
317		 * Leave extra queue slots so we don't overflow.
318		 */
319		if (queue->overfull == false &&
320		    queue->num_queued >=
321		    (queue->capacity - (num_present_cpus() - 1))) {
322			queue->overfull = true;
323			__cw1200_queue_lock(queue);
324			mod_timer(&queue->gc, jiffies);
325		}
326	} else {
327		ret = -ENOENT;
328	}
329	spin_unlock_bh(&queue->lock);
330	return ret;
331}
332
333int cw1200_queue_get(struct cw1200_queue *queue,
334		     u32 link_id_map,
335		     struct wsm_tx **tx,
336		     struct ieee80211_tx_info **tx_info,
337		     const struct cw1200_txpriv **txpriv)
338{
339	int ret = -ENOENT;
340	struct cw1200_queue_item *item;
341	struct cw1200_queue_stats *stats = queue->stats;
342	bool wakeup_stats = false;
343
344	spin_lock_bh(&queue->lock);
345	list_for_each_entry(item, &queue->queue, head) {
346		if (link_id_map & BIT(item->txpriv.link_id)) {
347			ret = 0;
348			break;
349		}
350	}
351
352	if (!WARN_ON(ret)) {
353		*tx = (struct wsm_tx *)item->skb->data;
354		*tx_info = IEEE80211_SKB_CB(item->skb);
355		*txpriv = &item->txpriv;
356		(*tx)->packet_id = item->packet_id;
357		list_move_tail(&item->head, &queue->pending);
358		++queue->num_pending;
359		--queue->link_map_cache[item->txpriv.link_id];
360		item->xmit_timestamp = jiffies;
361
362		spin_lock_bh(&stats->lock);
363		--stats->num_queued;
364		if (!--stats->link_map_cache[item->txpriv.link_id])
365			wakeup_stats = true;
366		spin_unlock_bh(&stats->lock);
367	}
368	spin_unlock_bh(&queue->lock);
369	if (wakeup_stats)
370		wake_up(&stats->wait_link_id_empty);
371	return ret;
372}
373
374int cw1200_queue_requeue(struct cw1200_queue *queue, u32 packet_id)
375{
376	int ret = 0;
377	u8 queue_generation, queue_id, item_generation, item_id;
378	struct cw1200_queue_item *item;
379	struct cw1200_queue_stats *stats = queue->stats;
380
381	cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id,
382			      &item_generation, &item_id);
383
384	item = &queue->pool[item_id];
385
386	spin_lock_bh(&queue->lock);
387	BUG_ON(queue_id != queue->queue_id);
388	if (queue_generation != queue->generation) {
389		ret = -ENOENT;
390	} else if (item_id >= (unsigned) queue->capacity) {
391		WARN_ON(1);
392		ret = -EINVAL;
393	} else if (item->generation != item_generation) {
394		WARN_ON(1);
395		ret = -ENOENT;
396	} else {
397		--queue->num_pending;
398		++queue->link_map_cache[item->txpriv.link_id];
399
400		spin_lock_bh(&stats->lock);
401		++stats->num_queued;
402		++stats->link_map_cache[item->txpriv.link_id];
403		spin_unlock_bh(&stats->lock);
404
405		item->generation = ++item_generation;
406		item->packet_id = cw1200_queue_mk_packet_id(queue_generation,
407							    queue_id,
408							    item_generation,
409							    item_id);
410		list_move(&item->head, &queue->queue);
411	}
412	spin_unlock_bh(&queue->lock);
413	return ret;
414}
415
416int cw1200_queue_requeue_all(struct cw1200_queue *queue)
417{
418	struct cw1200_queue_item *item, *tmp;
419	struct cw1200_queue_stats *stats = queue->stats;
420	spin_lock_bh(&queue->lock);
421
422	list_for_each_entry_safe_reverse(item, tmp, &queue->pending, head) {
423		--queue->num_pending;
424		++queue->link_map_cache[item->txpriv.link_id];
425
426		spin_lock_bh(&stats->lock);
427		++stats->num_queued;
428		++stats->link_map_cache[item->txpriv.link_id];
429		spin_unlock_bh(&stats->lock);
430
431		++item->generation;
432		item->packet_id = cw1200_queue_mk_packet_id(queue->generation,
433							    queue->queue_id,
434							    item->generation,
435							    item - queue->pool);
436		list_move(&item->head, &queue->queue);
437	}
438	spin_unlock_bh(&queue->lock);
439
440	return 0;
441}
442
443int cw1200_queue_remove(struct cw1200_queue *queue, u32 packet_id)
444{
445	int ret = 0;
446	u8 queue_generation, queue_id, item_generation, item_id;
447	struct cw1200_queue_item *item;
448	struct cw1200_queue_stats *stats = queue->stats;
449	struct sk_buff *gc_skb = NULL;
450	struct cw1200_txpriv gc_txpriv;
451
452	cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id,
453			      &item_generation, &item_id);
454
455	item = &queue->pool[item_id];
456
457	spin_lock_bh(&queue->lock);
458	BUG_ON(queue_id != queue->queue_id);
459	if (queue_generation != queue->generation) {
460		ret = -ENOENT;
461	} else if (item_id >= (unsigned) queue->capacity) {
462		WARN_ON(1);
463		ret = -EINVAL;
464	} else if (item->generation != item_generation) {
465		WARN_ON(1);
466		ret = -ENOENT;
467	} else {
468		gc_txpriv = item->txpriv;
469		gc_skb = item->skb;
470		item->skb = NULL;
471		--queue->num_pending;
472		--queue->num_queued;
473		++queue->num_sent;
474		++item->generation;
475		/* Do not use list_move_tail here, but list_move:
476		 * try to utilize cache row.
477		 */
478		list_move(&item->head, &queue->free_pool);
479
480		if (queue->overfull &&
481		    (queue->num_queued <= (queue->capacity >> 1))) {
482			queue->overfull = false;
483			__cw1200_queue_unlock(queue);
484		}
485	}
486	spin_unlock_bh(&queue->lock);
487
488	if (gc_skb)
489		stats->skb_dtor(stats->priv, gc_skb, &gc_txpriv);
490
491	return ret;
492}
493
494int cw1200_queue_get_skb(struct cw1200_queue *queue, u32 packet_id,
495			 struct sk_buff **skb,
496			 const struct cw1200_txpriv **txpriv)
497{
498	int ret = 0;
499	u8 queue_generation, queue_id, item_generation, item_id;
500	struct cw1200_queue_item *item;
501	cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id,
502			      &item_generation, &item_id);
503
504	item = &queue->pool[item_id];
505
506	spin_lock_bh(&queue->lock);
507	BUG_ON(queue_id != queue->queue_id);
508	if (queue_generation != queue->generation) {
509		ret = -ENOENT;
510	} else if (item_id >= (unsigned) queue->capacity) {
511		WARN_ON(1);
512		ret = -EINVAL;
513	} else if (item->generation != item_generation) {
514		WARN_ON(1);
515		ret = -ENOENT;
516	} else {
517		*skb = item->skb;
518		*txpriv = &item->txpriv;
519	}
520	spin_unlock_bh(&queue->lock);
521	return ret;
522}
523
524void cw1200_queue_lock(struct cw1200_queue *queue)
525{
526	spin_lock_bh(&queue->lock);
527	__cw1200_queue_lock(queue);
528	spin_unlock_bh(&queue->lock);
529}
530
531void cw1200_queue_unlock(struct cw1200_queue *queue)
532{
533	spin_lock_bh(&queue->lock);
534	__cw1200_queue_unlock(queue);
535	spin_unlock_bh(&queue->lock);
536}
537
538bool cw1200_queue_get_xmit_timestamp(struct cw1200_queue *queue,
539				     unsigned long *timestamp,
540				     u32 pending_frame_id)
541{
542	struct cw1200_queue_item *item;
543	bool ret;
544
545	spin_lock_bh(&queue->lock);
546	ret = !list_empty(&queue->pending);
547	if (ret) {
548		list_for_each_entry(item, &queue->pending, head) {
549			if (item->packet_id != pending_frame_id)
550				if (time_before(item->xmit_timestamp,
551						*timestamp))
552					*timestamp = item->xmit_timestamp;
553		}
554	}
555	spin_unlock_bh(&queue->lock);
556	return ret;
557}
558
559bool cw1200_queue_stats_is_empty(struct cw1200_queue_stats *stats,
560				 u32 link_id_map)
561{
562	bool empty = true;
563
564	spin_lock_bh(&stats->lock);
565	if (link_id_map == (u32)-1) {
566		empty = stats->num_queued == 0;
567	} else {
568		int i;
569		for (i = 0; i < stats->map_capacity; ++i) {
570			if (link_id_map & BIT(i)) {
571				if (stats->link_map_cache[i]) {
572					empty = false;
573					break;
574				}
575			}
576		}
577	}
578	spin_unlock_bh(&stats->lock);
579
580	return empty;
581}