Loading...
1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/*
3 * Copyright (C) 2020-2023 Intel Corporation
4 */
5#include <net/tso.h>
6#include <linux/tcp.h>
7
8#include "iwl-debug.h"
9#include "iwl-io.h"
10#include "fw/api/commands.h"
11#include "fw/api/tx.h"
12#include "fw/api/datapath.h"
13#include "fw/api/debug.h"
14#include "queue/tx.h"
15#include "iwl-fh.h"
16#include "iwl-scd.h"
17#include <linux/dmapool.h>
18
19/*
20 * iwl_txq_update_byte_tbl - Set up entry in Tx byte-count array
21 */
22static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans,
23 struct iwl_txq *txq, u16 byte_cnt,
24 int num_tbs)
25{
26 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
27 u8 filled_tfd_size, num_fetch_chunks;
28 u16 len = byte_cnt;
29 __le16 bc_ent;
30
31 if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window))
32 return;
33
34 filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
35 num_tbs * sizeof(struct iwl_tfh_tb);
36 /*
37 * filled_tfd_size contains the number of filled bytes in the TFD.
38 * Dividing it by 64 will give the number of chunks to fetch
39 * to SRAM- 0 for one chunk, 1 for 2 and so on.
40 * If, for example, TFD contains only 3 TBs then 32 bytes
41 * of the TFD are used, and only one chunk of 64 bytes should
42 * be fetched
43 */
44 num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
45
46 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
47 struct iwl_gen3_bc_tbl_entry *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
48
49 /* Starting from AX210, the HW expects bytes */
50 WARN_ON(trans->txqs.bc_table_dword);
51 WARN_ON(len > 0x3FFF);
52 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
53 scd_bc_tbl_gen3[idx].tfd_offset = bc_ent;
54 } else {
55 struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
56
57 /* Before AX210, the HW expects DW */
58 WARN_ON(!trans->txqs.bc_table_dword);
59 len = DIV_ROUND_UP(len, 4);
60 WARN_ON(len > 0xFFF);
61 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
62 scd_bc_tbl->tfd_offset[idx] = bc_ent;
63 }
64}
65
66/*
67 * iwl_txq_inc_wr_ptr - Send new write index to hardware
68 */
69void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
70{
71 lockdep_assert_held(&txq->lock);
72
73 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr);
74
75 /*
76 * if not in power-save mode, uCode will never sleep when we're
77 * trying to tx (during RFKILL, we're not trying to tx).
78 */
79 iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16));
80}
81
82static u8 iwl_txq_gen2_get_num_tbs(struct iwl_trans *trans,
83 struct iwl_tfh_tfd *tfd)
84{
85 return le16_to_cpu(tfd->num_tbs) & 0x1f;
86}
87
88int iwl_txq_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd,
89 dma_addr_t addr, u16 len)
90{
91 int idx = iwl_txq_gen2_get_num_tbs(trans, tfd);
92 struct iwl_tfh_tb *tb;
93
94 /* Only WARN here so we know about the issue, but we mess up our
95 * unmap path because not every place currently checks for errors
96 * returned from this function - it can only return an error if
97 * there's no more space, and so when we know there is enough we
98 * don't always check ...
99 */
100 WARN(iwl_txq_crosses_4g_boundary(addr, len),
101 "possible DMA problem with iova:0x%llx, len:%d\n",
102 (unsigned long long)addr, len);
103
104 if (WARN_ON(idx >= IWL_TFH_NUM_TBS))
105 return -EINVAL;
106 tb = &tfd->tbs[idx];
107
108 /* Each TFD can point to a maximum max_tbs Tx buffers */
109 if (le16_to_cpu(tfd->num_tbs) >= trans->txqs.tfd.max_tbs) {
110 IWL_ERR(trans, "Error can not send more than %d chunks\n",
111 trans->txqs.tfd.max_tbs);
112 return -EINVAL;
113 }
114
115 put_unaligned_le64(addr, &tb->addr);
116 tb->tb_len = cpu_to_le16(len);
117
118 tfd->num_tbs = cpu_to_le16(idx + 1);
119
120 return idx;
121}
122
123static void iwl_txq_set_tfd_invalid_gen2(struct iwl_trans *trans,
124 struct iwl_tfh_tfd *tfd)
125{
126 tfd->num_tbs = 0;
127
128 iwl_txq_gen2_set_tb(trans, tfd, trans->invalid_tx_cmd.dma,
129 trans->invalid_tx_cmd.size);
130}
131
132void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
133 struct iwl_tfh_tfd *tfd)
134{
135 int i, num_tbs;
136
137 /* Sanity check on number of chunks */
138 num_tbs = iwl_txq_gen2_get_num_tbs(trans, tfd);
139
140 if (num_tbs > trans->txqs.tfd.max_tbs) {
141 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
142 return;
143 }
144
145 /* first TB is never freed - it's the bidirectional DMA data */
146 for (i = 1; i < num_tbs; i++) {
147 if (meta->tbs & BIT(i))
148 dma_unmap_page(trans->dev,
149 le64_to_cpu(tfd->tbs[i].addr),
150 le16_to_cpu(tfd->tbs[i].tb_len),
151 DMA_TO_DEVICE);
152 else
153 dma_unmap_single(trans->dev,
154 le64_to_cpu(tfd->tbs[i].addr),
155 le16_to_cpu(tfd->tbs[i].tb_len),
156 DMA_TO_DEVICE);
157 }
158
159 iwl_txq_set_tfd_invalid_gen2(trans, tfd);
160}
161
162void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
163{
164 /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
165 * idx is bounded by n_window
166 */
167 int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
168 struct sk_buff *skb;
169
170 lockdep_assert_held(&txq->lock);
171
172 if (!txq->entries)
173 return;
174
175 iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
176 iwl_txq_get_tfd(trans, txq, idx));
177
178 skb = txq->entries[idx].skb;
179
180 /* Can be called from irqs-disabled context
181 * If skb is not NULL, it means that the whole queue is being
182 * freed and that the queue is not empty - free the skb
183 */
184 if (skb) {
185 iwl_op_mode_free_skb(trans->op_mode, skb);
186 txq->entries[idx].skb = NULL;
187 }
188}
189
190static struct page *get_workaround_page(struct iwl_trans *trans,
191 struct sk_buff *skb)
192{
193 struct page **page_ptr;
194 struct page *ret;
195
196 page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
197
198 ret = alloc_page(GFP_ATOMIC);
199 if (!ret)
200 return NULL;
201
202 /* set the chaining pointer to the previous page if there */
203 *(void **)((u8 *)page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr;
204 *page_ptr = ret;
205
206 return ret;
207}
208
209/*
210 * Add a TB and if needed apply the FH HW bug workaround;
211 * meta != NULL indicates that it's a page mapping and we
212 * need to dma_unmap_page() and set the meta->tbs bit in
213 * this case.
214 */
215static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans,
216 struct sk_buff *skb,
217 struct iwl_tfh_tfd *tfd,
218 dma_addr_t phys, void *virt,
219 u16 len, struct iwl_cmd_meta *meta)
220{
221 dma_addr_t oldphys = phys;
222 struct page *page;
223 int ret;
224
225 if (unlikely(dma_mapping_error(trans->dev, phys)))
226 return -ENOMEM;
227
228 if (likely(!iwl_txq_crosses_4g_boundary(phys, len))) {
229 ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
230
231 if (ret < 0)
232 goto unmap;
233
234 if (meta)
235 meta->tbs |= BIT(ret);
236
237 ret = 0;
238 goto trace;
239 }
240
241 /*
242 * Work around a hardware bug. If (as expressed in the
243 * condition above) the TB ends on a 32-bit boundary,
244 * then the next TB may be accessed with the wrong
245 * address.
246 * To work around it, copy the data elsewhere and make
247 * a new mapping for it so the device will not fail.
248 */
249
250 if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) {
251 ret = -ENOBUFS;
252 goto unmap;
253 }
254
255 page = get_workaround_page(trans, skb);
256 if (!page) {
257 ret = -ENOMEM;
258 goto unmap;
259 }
260
261 memcpy(page_address(page), virt, len);
262
263 phys = dma_map_single(trans->dev, page_address(page), len,
264 DMA_TO_DEVICE);
265 if (unlikely(dma_mapping_error(trans->dev, phys)))
266 return -ENOMEM;
267 ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
268 if (ret < 0) {
269 /* unmap the new allocation as single */
270 oldphys = phys;
271 meta = NULL;
272 goto unmap;
273 }
274 IWL_WARN(trans,
275 "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
276 len, (unsigned long long)oldphys, (unsigned long long)phys);
277
278 ret = 0;
279unmap:
280 if (meta)
281 dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE);
282 else
283 dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE);
284trace:
285 trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len);
286
287 return ret;
288}
289
290#ifdef CONFIG_INET
291struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
292 struct sk_buff *skb)
293{
294 struct iwl_tso_hdr_page *p = this_cpu_ptr(trans->txqs.tso_hdr_page);
295 struct page **page_ptr;
296
297 page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
298
299 if (WARN_ON(*page_ptr))
300 return NULL;
301
302 if (!p->page)
303 goto alloc;
304
305 /*
306 * Check if there's enough room on this page
307 *
308 * Note that we put a page chaining pointer *last* in the
309 * page - we need it somewhere, and if it's there then we
310 * avoid DMA mapping the last bits of the page which may
311 * trigger the 32-bit boundary hardware bug.
312 *
313 * (see also get_workaround_page() in tx-gen2.c)
314 */
315 if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE -
316 sizeof(void *))
317 goto out;
318
319 /* We don't have enough room on this page, get a new one. */
320 __free_page(p->page);
321
322alloc:
323 p->page = alloc_page(GFP_ATOMIC);
324 if (!p->page)
325 return NULL;
326 p->pos = page_address(p->page);
327 /* set the chaining pointer to NULL */
328 *(void **)((u8 *)page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL;
329out:
330 *page_ptr = p->page;
331 get_page(p->page);
332 return p;
333}
334#endif
335
336static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
337 struct sk_buff *skb,
338 struct iwl_tfh_tfd *tfd, int start_len,
339 u8 hdr_len,
340 struct iwl_device_tx_cmd *dev_cmd)
341{
342#ifdef CONFIG_INET
343 struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
344 struct ieee80211_hdr *hdr = (void *)skb->data;
345 unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
346 unsigned int mss = skb_shinfo(skb)->gso_size;
347 u16 length, amsdu_pad;
348 u8 *start_hdr;
349 struct iwl_tso_hdr_page *hdr_page;
350 struct tso_t tso;
351
352 trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
353 &dev_cmd->hdr, start_len, 0);
354
355 ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
356 snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
357 total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len;
358 amsdu_pad = 0;
359
360 /* total amount of header we may need for this A-MSDU */
361 hdr_room = DIV_ROUND_UP(total_len, mss) *
362 (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
363
364 /* Our device supports 9 segments at most, it will fit in 1 page */
365 hdr_page = get_page_hdr(trans, hdr_room, skb);
366 if (!hdr_page)
367 return -ENOMEM;
368
369 start_hdr = hdr_page->pos;
370
371 /*
372 * Pull the ieee80211 header to be able to use TSO core,
373 * we will restore it for the tx_status flow.
374 */
375 skb_pull(skb, hdr_len);
376
377 /*
378 * Remove the length of all the headers that we don't actually
379 * have in the MPDU by themselves, but that we duplicate into
380 * all the different MSDUs inside the A-MSDU.
381 */
382 le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
383
384 tso_start(skb, &tso);
385
386 while (total_len) {
387 /* this is the data left for this subframe */
388 unsigned int data_left = min_t(unsigned int, mss, total_len);
389 unsigned int tb_len;
390 dma_addr_t tb_phys;
391 u8 *subf_hdrs_start = hdr_page->pos;
392
393 total_len -= data_left;
394
395 memset(hdr_page->pos, 0, amsdu_pad);
396 hdr_page->pos += amsdu_pad;
397 amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
398 data_left)) & 0x3;
399 ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr));
400 hdr_page->pos += ETH_ALEN;
401 ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr));
402 hdr_page->pos += ETH_ALEN;
403
404 length = snap_ip_tcp_hdrlen + data_left;
405 *((__be16 *)hdr_page->pos) = cpu_to_be16(length);
406 hdr_page->pos += sizeof(length);
407
408 /*
409 * This will copy the SNAP as well which will be considered
410 * as MAC header.
411 */
412 tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
413
414 hdr_page->pos += snap_ip_tcp_hdrlen;
415
416 tb_len = hdr_page->pos - start_hdr;
417 tb_phys = dma_map_single(trans->dev, start_hdr,
418 tb_len, DMA_TO_DEVICE);
419 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
420 goto out_err;
421 /*
422 * No need for _with_wa, this is from the TSO page and
423 * we leave some space at the end of it so can't hit
424 * the buggy scenario.
425 */
426 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb_len);
427 trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
428 tb_phys, tb_len);
429 /* add this subframe's headers' length to the tx_cmd */
430 le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
431
432 /* prepare the start_hdr for the next subframe */
433 start_hdr = hdr_page->pos;
434
435 /* put the payload */
436 while (data_left) {
437 int ret;
438
439 tb_len = min_t(unsigned int, tso.size, data_left);
440 tb_phys = dma_map_single(trans->dev, tso.data,
441 tb_len, DMA_TO_DEVICE);
442 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd,
443 tb_phys, tso.data,
444 tb_len, NULL);
445 if (ret)
446 goto out_err;
447
448 data_left -= tb_len;
449 tso_build_data(skb, &tso, tb_len);
450 }
451 }
452
453 /* re -add the WiFi header */
454 skb_push(skb, hdr_len);
455
456 return 0;
457
458out_err:
459#endif
460 return -EINVAL;
461}
462
463static struct
464iwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans,
465 struct iwl_txq *txq,
466 struct iwl_device_tx_cmd *dev_cmd,
467 struct sk_buff *skb,
468 struct iwl_cmd_meta *out_meta,
469 int hdr_len,
470 int tx_cmd_len)
471{
472 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
473 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
474 dma_addr_t tb_phys;
475 int len;
476 void *tb1_addr;
477
478 tb_phys = iwl_txq_get_first_tb_dma(txq, idx);
479
480 /*
481 * No need for _with_wa, the first TB allocation is aligned up
482 * to a 64-byte boundary and thus can't be at the end or cross
483 * a page boundary (much less a 2^32 boundary).
484 */
485 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
486
487 /*
488 * The second TB (tb1) points to the remainder of the TX command
489 * and the 802.11 header - dword aligned size
490 * (This calculation modifies the TX command, so do it before the
491 * setup of the first TB)
492 */
493 len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
494 IWL_FIRST_TB_SIZE;
495
496 /* do not align A-MSDU to dword as the subframe header aligns it */
497
498 /* map the data for TB1 */
499 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
500 tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
501 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
502 goto out_err;
503 /*
504 * No need for _with_wa(), we ensure (via alignment) that the data
505 * here can never cross or end at a page boundary.
506 */
507 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, len);
508
509 if (iwl_txq_gen2_build_amsdu(trans, skb, tfd, len + IWL_FIRST_TB_SIZE,
510 hdr_len, dev_cmd))
511 goto out_err;
512
513 /* building the A-MSDU might have changed this data, memcpy it now */
514 memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
515 return tfd;
516
517out_err:
518 iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
519 return NULL;
520}
521
522static int iwl_txq_gen2_tx_add_frags(struct iwl_trans *trans,
523 struct sk_buff *skb,
524 struct iwl_tfh_tfd *tfd,
525 struct iwl_cmd_meta *out_meta)
526{
527 int i;
528
529 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
530 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
531 dma_addr_t tb_phys;
532 unsigned int fragsz = skb_frag_size(frag);
533 int ret;
534
535 if (!fragsz)
536 continue;
537
538 tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
539 fragsz, DMA_TO_DEVICE);
540 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
541 skb_frag_address(frag),
542 fragsz, out_meta);
543 if (ret)
544 return ret;
545 }
546
547 return 0;
548}
549
550static struct
551iwl_tfh_tfd *iwl_txq_gen2_build_tx(struct iwl_trans *trans,
552 struct iwl_txq *txq,
553 struct iwl_device_tx_cmd *dev_cmd,
554 struct sk_buff *skb,
555 struct iwl_cmd_meta *out_meta,
556 int hdr_len,
557 int tx_cmd_len,
558 bool pad)
559{
560 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
561 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
562 dma_addr_t tb_phys;
563 int len, tb1_len, tb2_len;
564 void *tb1_addr;
565 struct sk_buff *frag;
566
567 tb_phys = iwl_txq_get_first_tb_dma(txq, idx);
568
569 /* The first TB points to bi-directional DMA data */
570 memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
571
572 /*
573 * No need for _with_wa, the first TB allocation is aligned up
574 * to a 64-byte boundary and thus can't be at the end or cross
575 * a page boundary (much less a 2^32 boundary).
576 */
577 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
578
579 /*
580 * The second TB (tb1) points to the remainder of the TX command
581 * and the 802.11 header - dword aligned size
582 * (This calculation modifies the TX command, so do it before the
583 * setup of the first TB)
584 */
585 len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
586 IWL_FIRST_TB_SIZE;
587
588 if (pad)
589 tb1_len = ALIGN(len, 4);
590 else
591 tb1_len = len;
592
593 /* map the data for TB1 */
594 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
595 tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
596 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
597 goto out_err;
598 /*
599 * No need for _with_wa(), we ensure (via alignment) that the data
600 * here can never cross or end at a page boundary.
601 */
602 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
603 trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
604 IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
605
606 /* set up TFD's third entry to point to remainder of skb's head */
607 tb2_len = skb_headlen(skb) - hdr_len;
608
609 if (tb2_len > 0) {
610 int ret;
611
612 tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
613 tb2_len, DMA_TO_DEVICE);
614 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
615 skb->data + hdr_len, tb2_len,
616 NULL);
617 if (ret)
618 goto out_err;
619 }
620
621 if (iwl_txq_gen2_tx_add_frags(trans, skb, tfd, out_meta))
622 goto out_err;
623
624 skb_walk_frags(skb, frag) {
625 int ret;
626
627 tb_phys = dma_map_single(trans->dev, frag->data,
628 skb_headlen(frag), DMA_TO_DEVICE);
629 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
630 frag->data,
631 skb_headlen(frag), NULL);
632 if (ret)
633 goto out_err;
634 if (iwl_txq_gen2_tx_add_frags(trans, frag, tfd, out_meta))
635 goto out_err;
636 }
637
638 return tfd;
639
640out_err:
641 iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
642 return NULL;
643}
644
645static
646struct iwl_tfh_tfd *iwl_txq_gen2_build_tfd(struct iwl_trans *trans,
647 struct iwl_txq *txq,
648 struct iwl_device_tx_cmd *dev_cmd,
649 struct sk_buff *skb,
650 struct iwl_cmd_meta *out_meta)
651{
652 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
653 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
654 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
655 int len, hdr_len;
656 bool amsdu;
657
658 /* There must be data left over for TB1 or this code must be changed */
659 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
660 BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +
661 offsetofend(struct iwl_tx_cmd_gen2, dram_info) >
662 IWL_FIRST_TB_SIZE);
663 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) < IWL_FIRST_TB_SIZE);
664 BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +
665 offsetofend(struct iwl_tx_cmd_gen3, dram_info) >
666 IWL_FIRST_TB_SIZE);
667
668 memset(tfd, 0, sizeof(*tfd));
669
670 if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
671 len = sizeof(struct iwl_tx_cmd_gen2);
672 else
673 len = sizeof(struct iwl_tx_cmd_gen3);
674
675 amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
676 (*ieee80211_get_qos_ctl(hdr) &
677 IEEE80211_QOS_CTL_A_MSDU_PRESENT);
678
679 hdr_len = ieee80211_hdrlen(hdr->frame_control);
680
681 /*
682 * Only build A-MSDUs here if doing so by GSO, otherwise it may be
683 * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been
684 * built in the higher layers already.
685 */
686 if (amsdu && skb_shinfo(skb)->gso_size)
687 return iwl_txq_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb,
688 out_meta, hdr_len, len);
689 return iwl_txq_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
690 hdr_len, len, !amsdu);
691}
692
693int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q)
694{
695 unsigned int max;
696 unsigned int used;
697
698 /*
699 * To avoid ambiguity between empty and completely full queues, there
700 * should always be less than max_tfd_queue_size elements in the queue.
701 * If q->n_window is smaller than max_tfd_queue_size, there is no need
702 * to reserve any queue entries for this purpose.
703 */
704 if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size)
705 max = q->n_window;
706 else
707 max = trans->trans_cfg->base_params->max_tfd_queue_size - 1;
708
709 /*
710 * max_tfd_queue_size is a power of 2, so the following is equivalent to
711 * modulo by max_tfd_queue_size and is well defined.
712 */
713 used = (q->write_ptr - q->read_ptr) &
714 (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
715
716 if (WARN_ON(used > max))
717 return 0;
718
719 return max - used;
720}
721
722int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
723 struct iwl_device_tx_cmd *dev_cmd, int txq_id)
724{
725 struct iwl_cmd_meta *out_meta;
726 struct iwl_txq *txq = trans->txqs.txq[txq_id];
727 u16 cmd_len;
728 int idx;
729 void *tfd;
730
731 if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
732 "queue %d out of range", txq_id))
733 return -EINVAL;
734
735 if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used),
736 "TX on unused queue %d\n", txq_id))
737 return -EINVAL;
738
739 if (skb_is_nonlinear(skb) &&
740 skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) &&
741 __skb_linearize(skb))
742 return -ENOMEM;
743
744 spin_lock(&txq->lock);
745
746 if (iwl_txq_space(trans, txq) < txq->high_mark) {
747 iwl_txq_stop(trans, txq);
748
749 /* don't put the packet on the ring, if there is no room */
750 if (unlikely(iwl_txq_space(trans, txq) < 3)) {
751 struct iwl_device_tx_cmd **dev_cmd_ptr;
752
753 dev_cmd_ptr = (void *)((u8 *)skb->cb +
754 trans->txqs.dev_cmd_offs);
755
756 *dev_cmd_ptr = dev_cmd;
757 __skb_queue_tail(&txq->overflow_q, skb);
758 spin_unlock(&txq->lock);
759 return 0;
760 }
761 }
762
763 idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
764
765 /* Set up driver data for this TFD */
766 txq->entries[idx].skb = skb;
767 txq->entries[idx].cmd = dev_cmd;
768
769 dev_cmd->hdr.sequence =
770 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
771 INDEX_TO_SEQ(idx)));
772
773 /* Set up first empty entry in queue's array of Tx/cmd buffers */
774 out_meta = &txq->entries[idx].meta;
775 out_meta->flags = 0;
776
777 tfd = iwl_txq_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta);
778 if (!tfd) {
779 spin_unlock(&txq->lock);
780 return -1;
781 }
782
783 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
784 struct iwl_tx_cmd_gen3 *tx_cmd_gen3 =
785 (void *)dev_cmd->payload;
786
787 cmd_len = le16_to_cpu(tx_cmd_gen3->len);
788 } else {
789 struct iwl_tx_cmd_gen2 *tx_cmd_gen2 =
790 (void *)dev_cmd->payload;
791
792 cmd_len = le16_to_cpu(tx_cmd_gen2->len);
793 }
794
795 /* Set up entry for this TFD in Tx byte-count array */
796 iwl_pcie_gen2_update_byte_tbl(trans, txq, cmd_len,
797 iwl_txq_gen2_get_num_tbs(trans, tfd));
798
799 /* start timer if queue currently empty */
800 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
801 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
802
803 /* Tell device the write index *just past* this latest filled TFD */
804 txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
805 iwl_txq_inc_wr_ptr(trans, txq);
806 /*
807 * At this point the frame is "transmitted" successfully
808 * and we will get a TX status notification eventually.
809 */
810 spin_unlock(&txq->lock);
811 return 0;
812}
813
814/*************** HOST COMMAND QUEUE FUNCTIONS *****/
815
816/*
817 * iwl_txq_gen2_unmap - Unmap any remaining DMA mappings and free skb's
818 */
819void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id)
820{
821 struct iwl_txq *txq = trans->txqs.txq[txq_id];
822
823 spin_lock_bh(&txq->lock);
824 while (txq->write_ptr != txq->read_ptr) {
825 IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
826 txq_id, txq->read_ptr);
827
828 if (txq_id != trans->txqs.cmd.q_id) {
829 int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
830 struct sk_buff *skb = txq->entries[idx].skb;
831
832 if (!WARN_ON_ONCE(!skb))
833 iwl_txq_free_tso_page(trans, skb);
834 }
835 iwl_txq_gen2_free_tfd(trans, txq);
836 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
837 }
838
839 while (!skb_queue_empty(&txq->overflow_q)) {
840 struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
841
842 iwl_op_mode_free_skb(trans->op_mode, skb);
843 }
844
845 spin_unlock_bh(&txq->lock);
846
847 /* just in case - this queue may have been stopped */
848 iwl_wake_queue(trans, txq);
849}
850
851static void iwl_txq_gen2_free_memory(struct iwl_trans *trans,
852 struct iwl_txq *txq)
853{
854 struct device *dev = trans->dev;
855
856 /* De-alloc circular buffer of TFDs */
857 if (txq->tfds) {
858 dma_free_coherent(dev,
859 trans->txqs.tfd.size * txq->n_window,
860 txq->tfds, txq->dma_addr);
861 dma_free_coherent(dev,
862 sizeof(*txq->first_tb_bufs) * txq->n_window,
863 txq->first_tb_bufs, txq->first_tb_dma);
864 }
865
866 kfree(txq->entries);
867 if (txq->bc_tbl.addr)
868 dma_pool_free(trans->txqs.bc_pool,
869 txq->bc_tbl.addr, txq->bc_tbl.dma);
870 kfree(txq);
871}
872
873/*
874 * iwl_pcie_txq_free - Deallocate DMA queue.
875 * @txq: Transmit queue to deallocate.
876 *
877 * Empty queue by removing and destroying all BD's.
878 * Free all buffers.
879 * 0-fill, but do not free "txq" descriptor structure.
880 */
881static void iwl_txq_gen2_free(struct iwl_trans *trans, int txq_id)
882{
883 struct iwl_txq *txq;
884 int i;
885
886 if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
887 "queue %d out of range", txq_id))
888 return;
889
890 txq = trans->txqs.txq[txq_id];
891
892 if (WARN_ON(!txq))
893 return;
894
895 iwl_txq_gen2_unmap(trans, txq_id);
896
897 /* De-alloc array of command/tx buffers */
898 if (txq_id == trans->txqs.cmd.q_id)
899 for (i = 0; i < txq->n_window; i++) {
900 kfree_sensitive(txq->entries[i].cmd);
901 kfree_sensitive(txq->entries[i].free_buf);
902 }
903 del_timer_sync(&txq->stuck_timer);
904
905 iwl_txq_gen2_free_memory(trans, txq);
906
907 trans->txqs.txq[txq_id] = NULL;
908
909 clear_bit(txq_id, trans->txqs.queue_used);
910}
911
912/*
913 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
914 */
915static int iwl_queue_init(struct iwl_txq *q, int slots_num)
916{
917 q->n_window = slots_num;
918
919 /* slots_num must be power-of-two size, otherwise
920 * iwl_txq_get_cmd_index is broken. */
921 if (WARN_ON(!is_power_of_2(slots_num)))
922 return -EINVAL;
923
924 q->low_mark = q->n_window / 4;
925 if (q->low_mark < 4)
926 q->low_mark = 4;
927
928 q->high_mark = q->n_window / 8;
929 if (q->high_mark < 2)
930 q->high_mark = 2;
931
932 q->write_ptr = 0;
933 q->read_ptr = 0;
934
935 return 0;
936}
937
938int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
939 bool cmd_queue)
940{
941 int ret;
942 u32 tfd_queue_max_size =
943 trans->trans_cfg->base_params->max_tfd_queue_size;
944
945 txq->need_update = false;
946
947 /* max_tfd_queue_size must be power-of-two size, otherwise
948 * iwl_txq_inc_wrap and iwl_txq_dec_wrap are broken. */
949 if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1),
950 "Max tfd queue size must be a power of two, but is %d",
951 tfd_queue_max_size))
952 return -EINVAL;
953
954 /* Initialize queue's high/low-water marks, and head/tail indexes */
955 ret = iwl_queue_init(txq, slots_num);
956 if (ret)
957 return ret;
958
959 spin_lock_init(&txq->lock);
960
961 if (cmd_queue) {
962 static struct lock_class_key iwl_txq_cmd_queue_lock_class;
963
964 lockdep_set_class(&txq->lock, &iwl_txq_cmd_queue_lock_class);
965 }
966
967 __skb_queue_head_init(&txq->overflow_q);
968
969 return 0;
970}
971
972void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb)
973{
974 struct page **page_ptr;
975 struct page *next;
976
977 page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
978 next = *page_ptr;
979 *page_ptr = NULL;
980
981 while (next) {
982 struct page *tmp = next;
983
984 next = *(void **)((u8 *)page_address(next) + PAGE_SIZE -
985 sizeof(void *));
986 __free_page(tmp);
987 }
988}
989
990void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
991{
992 u32 txq_id = txq->id;
993 u32 status;
994 bool active;
995 u8 fifo;
996
997 if (trans->trans_cfg->gen2) {
998 IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id,
999 txq->read_ptr, txq->write_ptr);
1000 /* TODO: access new SCD registers and dump them */
1001 return;
1002 }
1003
1004 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id));
1005 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
1006 active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
1007
1008 IWL_ERR(trans,
1009 "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n",
1010 txq_id, active ? "" : "in", fifo,
1011 jiffies_to_msecs(txq->wd_timeout),
1012 txq->read_ptr, txq->write_ptr,
1013 iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
1014 (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
1015 iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
1016 (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
1017 iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
1018}
1019
1020static void iwl_txq_stuck_timer(struct timer_list *t)
1021{
1022 struct iwl_txq *txq = from_timer(txq, t, stuck_timer);
1023 struct iwl_trans *trans = txq->trans;
1024
1025 spin_lock(&txq->lock);
1026 /* check if triggered erroneously */
1027 if (txq->read_ptr == txq->write_ptr) {
1028 spin_unlock(&txq->lock);
1029 return;
1030 }
1031 spin_unlock(&txq->lock);
1032
1033 iwl_txq_log_scd_error(trans, txq);
1034
1035 iwl_force_nmi(trans);
1036}
1037
1038static void iwl_txq_set_tfd_invalid_gen1(struct iwl_trans *trans,
1039 struct iwl_tfd *tfd)
1040{
1041 tfd->num_tbs = 0;
1042
1043 iwl_pcie_gen1_tfd_set_tb(trans, tfd, 0, trans->invalid_tx_cmd.dma,
1044 trans->invalid_tx_cmd.size);
1045}
1046
1047int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
1048 bool cmd_queue)
1049{
1050 size_t num_entries = trans->trans_cfg->gen2 ?
1051 slots_num : trans->trans_cfg->base_params->max_tfd_queue_size;
1052 size_t tfd_sz;
1053 size_t tb0_buf_sz;
1054 int i;
1055
1056 if (WARN_ONCE(slots_num <= 0, "Invalid slots num:%d\n", slots_num))
1057 return -EINVAL;
1058
1059 if (WARN_ON(txq->entries || txq->tfds))
1060 return -EINVAL;
1061
1062 tfd_sz = trans->txqs.tfd.size * num_entries;
1063
1064 timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0);
1065 txq->trans = trans;
1066
1067 txq->n_window = slots_num;
1068
1069 txq->entries = kcalloc(slots_num,
1070 sizeof(struct iwl_pcie_txq_entry),
1071 GFP_KERNEL);
1072
1073 if (!txq->entries)
1074 goto error;
1075
1076 if (cmd_queue)
1077 for (i = 0; i < slots_num; i++) {
1078 txq->entries[i].cmd =
1079 kmalloc(sizeof(struct iwl_device_cmd),
1080 GFP_KERNEL);
1081 if (!txq->entries[i].cmd)
1082 goto error;
1083 }
1084
1085 /* Circular buffer of transmit frame descriptors (TFDs),
1086 * shared with device */
1087 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
1088 &txq->dma_addr, GFP_KERNEL);
1089 if (!txq->tfds)
1090 goto error;
1091
1092 BUILD_BUG_ON(sizeof(*txq->first_tb_bufs) != IWL_FIRST_TB_SIZE_ALIGN);
1093
1094 tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num;
1095
1096 txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz,
1097 &txq->first_tb_dma,
1098 GFP_KERNEL);
1099 if (!txq->first_tb_bufs)
1100 goto err_free_tfds;
1101
1102 for (i = 0; i < num_entries; i++) {
1103 void *tfd = iwl_txq_get_tfd(trans, txq, i);
1104
1105 if (trans->trans_cfg->gen2)
1106 iwl_txq_set_tfd_invalid_gen2(trans, tfd);
1107 else
1108 iwl_txq_set_tfd_invalid_gen1(trans, tfd);
1109 }
1110
1111 return 0;
1112err_free_tfds:
1113 dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr);
1114 txq->tfds = NULL;
1115error:
1116 if (txq->entries && cmd_queue)
1117 for (i = 0; i < slots_num; i++)
1118 kfree(txq->entries[i].cmd);
1119 kfree(txq->entries);
1120 txq->entries = NULL;
1121
1122 return -ENOMEM;
1123}
1124
1125static struct iwl_txq *
1126iwl_txq_dyn_alloc_dma(struct iwl_trans *trans, int size, unsigned int timeout)
1127{
1128 size_t bc_tbl_size, bc_tbl_entries;
1129 struct iwl_txq *txq;
1130 int ret;
1131
1132 WARN_ON(!trans->txqs.bc_tbl_size);
1133
1134 bc_tbl_size = trans->txqs.bc_tbl_size;
1135 bc_tbl_entries = bc_tbl_size / sizeof(u16);
1136
1137 if (WARN_ON(size > bc_tbl_entries))
1138 return ERR_PTR(-EINVAL);
1139
1140 txq = kzalloc(sizeof(*txq), GFP_KERNEL);
1141 if (!txq)
1142 return ERR_PTR(-ENOMEM);
1143
1144 txq->bc_tbl.addr = dma_pool_alloc(trans->txqs.bc_pool, GFP_KERNEL,
1145 &txq->bc_tbl.dma);
1146 if (!txq->bc_tbl.addr) {
1147 IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
1148 kfree(txq);
1149 return ERR_PTR(-ENOMEM);
1150 }
1151
1152 ret = iwl_txq_alloc(trans, txq, size, false);
1153 if (ret) {
1154 IWL_ERR(trans, "Tx queue alloc failed\n");
1155 goto error;
1156 }
1157 ret = iwl_txq_init(trans, txq, size, false);
1158 if (ret) {
1159 IWL_ERR(trans, "Tx queue init failed\n");
1160 goto error;
1161 }
1162
1163 txq->wd_timeout = msecs_to_jiffies(timeout);
1164
1165 return txq;
1166
1167error:
1168 iwl_txq_gen2_free_memory(trans, txq);
1169 return ERR_PTR(ret);
1170}
1171
1172static int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq,
1173 struct iwl_host_cmd *hcmd)
1174{
1175 struct iwl_tx_queue_cfg_rsp *rsp;
1176 int ret, qid;
1177 u32 wr_ptr;
1178
1179 if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) !=
1180 sizeof(*rsp))) {
1181 ret = -EINVAL;
1182 goto error_free_resp;
1183 }
1184
1185 rsp = (void *)hcmd->resp_pkt->data;
1186 qid = le16_to_cpu(rsp->queue_number);
1187 wr_ptr = le16_to_cpu(rsp->write_pointer);
1188
1189 if (qid >= ARRAY_SIZE(trans->txqs.txq)) {
1190 WARN_ONCE(1, "queue index %d unsupported", qid);
1191 ret = -EIO;
1192 goto error_free_resp;
1193 }
1194
1195 if (test_and_set_bit(qid, trans->txqs.queue_used)) {
1196 WARN_ONCE(1, "queue %d already used", qid);
1197 ret = -EIO;
1198 goto error_free_resp;
1199 }
1200
1201 if (WARN_ONCE(trans->txqs.txq[qid],
1202 "queue %d already allocated\n", qid)) {
1203 ret = -EIO;
1204 goto error_free_resp;
1205 }
1206
1207 txq->id = qid;
1208 trans->txqs.txq[qid] = txq;
1209 wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
1210
1211 /* Place first TFD at index corresponding to start sequence number */
1212 txq->read_ptr = wr_ptr;
1213 txq->write_ptr = wr_ptr;
1214
1215 IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
1216
1217 iwl_free_resp(hcmd);
1218 return qid;
1219
1220error_free_resp:
1221 iwl_free_resp(hcmd);
1222 iwl_txq_gen2_free_memory(trans, txq);
1223 return ret;
1224}
1225
1226int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,
1227 u8 tid, int size, unsigned int timeout)
1228{
1229 struct iwl_txq *txq;
1230 union {
1231 struct iwl_tx_queue_cfg_cmd old;
1232 struct iwl_scd_queue_cfg_cmd new;
1233 } cmd;
1234 struct iwl_host_cmd hcmd = {
1235 .flags = CMD_WANT_SKB,
1236 };
1237 int ret;
1238
1239 if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_BZ &&
1240 trans->hw_rev_step == SILICON_A_STEP)
1241 size = 4096;
1242
1243 txq = iwl_txq_dyn_alloc_dma(trans, size, timeout);
1244 if (IS_ERR(txq))
1245 return PTR_ERR(txq);
1246
1247 if (trans->txqs.queue_alloc_cmd_ver == 0) {
1248 memset(&cmd.old, 0, sizeof(cmd.old));
1249 cmd.old.tfdq_addr = cpu_to_le64(txq->dma_addr);
1250 cmd.old.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
1251 cmd.old.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
1252 cmd.old.flags = cpu_to_le16(flags | TX_QUEUE_CFG_ENABLE_QUEUE);
1253 cmd.old.tid = tid;
1254
1255 if (hweight32(sta_mask) != 1) {
1256 ret = -EINVAL;
1257 goto error;
1258 }
1259 cmd.old.sta_id = ffs(sta_mask) - 1;
1260
1261 hcmd.id = SCD_QUEUE_CFG;
1262 hcmd.len[0] = sizeof(cmd.old);
1263 hcmd.data[0] = &cmd.old;
1264 } else if (trans->txqs.queue_alloc_cmd_ver == 3) {
1265 memset(&cmd.new, 0, sizeof(cmd.new));
1266 cmd.new.operation = cpu_to_le32(IWL_SCD_QUEUE_ADD);
1267 cmd.new.u.add.tfdq_dram_addr = cpu_to_le64(txq->dma_addr);
1268 cmd.new.u.add.bc_dram_addr = cpu_to_le64(txq->bc_tbl.dma);
1269 cmd.new.u.add.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
1270 cmd.new.u.add.flags = cpu_to_le32(flags);
1271 cmd.new.u.add.sta_mask = cpu_to_le32(sta_mask);
1272 cmd.new.u.add.tid = tid;
1273
1274 hcmd.id = WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD);
1275 hcmd.len[0] = sizeof(cmd.new);
1276 hcmd.data[0] = &cmd.new;
1277 } else {
1278 ret = -EOPNOTSUPP;
1279 goto error;
1280 }
1281
1282 ret = iwl_trans_send_cmd(trans, &hcmd);
1283 if (ret)
1284 goto error;
1285
1286 return iwl_txq_alloc_response(trans, txq, &hcmd);
1287
1288error:
1289 iwl_txq_gen2_free_memory(trans, txq);
1290 return ret;
1291}
1292
1293void iwl_txq_dyn_free(struct iwl_trans *trans, int queue)
1294{
1295 if (WARN(queue >= IWL_MAX_TVQM_QUEUES,
1296 "queue %d out of range", queue))
1297 return;
1298
1299 /*
1300 * Upon HW Rfkill - we stop the device, and then stop the queues
1301 * in the op_mode. Just for the sake of the simplicity of the op_mode,
1302 * allow the op_mode to call txq_disable after it already called
1303 * stop_device.
1304 */
1305 if (!test_and_clear_bit(queue, trans->txqs.queue_used)) {
1306 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
1307 "queue %d not used", queue);
1308 return;
1309 }
1310
1311 iwl_txq_gen2_free(trans, queue);
1312
1313 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
1314}
1315
1316void iwl_txq_gen2_tx_free(struct iwl_trans *trans)
1317{
1318 int i;
1319
1320 memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
1321
1322 /* Free all TX queues */
1323 for (i = 0; i < ARRAY_SIZE(trans->txqs.txq); i++) {
1324 if (!trans->txqs.txq[i])
1325 continue;
1326
1327 iwl_txq_gen2_free(trans, i);
1328 }
1329}
1330
1331int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size)
1332{
1333 struct iwl_txq *queue;
1334 int ret;
1335
1336 /* alloc and init the tx queue */
1337 if (!trans->txqs.txq[txq_id]) {
1338 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1339 if (!queue) {
1340 IWL_ERR(trans, "Not enough memory for tx queue\n");
1341 return -ENOMEM;
1342 }
1343 trans->txqs.txq[txq_id] = queue;
1344 ret = iwl_txq_alloc(trans, queue, queue_size, true);
1345 if (ret) {
1346 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
1347 goto error;
1348 }
1349 } else {
1350 queue = trans->txqs.txq[txq_id];
1351 }
1352
1353 ret = iwl_txq_init(trans, queue, queue_size,
1354 (txq_id == trans->txqs.cmd.q_id));
1355 if (ret) {
1356 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
1357 goto error;
1358 }
1359 trans->txqs.txq[txq_id]->id = txq_id;
1360 set_bit(txq_id, trans->txqs.queue_used);
1361
1362 return 0;
1363
1364error:
1365 iwl_txq_gen2_tx_free(trans);
1366 return ret;
1367}
1368
1369static inline dma_addr_t iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans *trans,
1370 struct iwl_tfd *tfd, u8 idx)
1371{
1372 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
1373 dma_addr_t addr;
1374 dma_addr_t hi_len;
1375
1376 addr = get_unaligned_le32(&tb->lo);
1377
1378 if (sizeof(dma_addr_t) <= sizeof(u32))
1379 return addr;
1380
1381 hi_len = le16_to_cpu(tb->hi_n_len) & 0xF;
1382
1383 /*
1384 * shift by 16 twice to avoid warnings on 32-bit
1385 * (where this code never runs anyway due to the
1386 * if statement above)
1387 */
1388 return addr | ((hi_len << 16) << 16);
1389}
1390
1391void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
1392 struct iwl_cmd_meta *meta,
1393 struct iwl_txq *txq, int index)
1394{
1395 int i, num_tbs;
1396 struct iwl_tfd *tfd = iwl_txq_get_tfd(trans, txq, index);
1397
1398 /* Sanity check on number of chunks */
1399 num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd);
1400
1401 if (num_tbs > trans->txqs.tfd.max_tbs) {
1402 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
1403 /* @todo issue fatal error, it is quite serious situation */
1404 return;
1405 }
1406
1407 /* first TB is never freed - it's the bidirectional DMA data */
1408
1409 for (i = 1; i < num_tbs; i++) {
1410 if (meta->tbs & BIT(i))
1411 dma_unmap_page(trans->dev,
1412 iwl_txq_gen1_tfd_tb_get_addr(trans,
1413 tfd, i),
1414 iwl_txq_gen1_tfd_tb_get_len(trans,
1415 tfd, i),
1416 DMA_TO_DEVICE);
1417 else
1418 dma_unmap_single(trans->dev,
1419 iwl_txq_gen1_tfd_tb_get_addr(trans,
1420 tfd, i),
1421 iwl_txq_gen1_tfd_tb_get_len(trans,
1422 tfd, i),
1423 DMA_TO_DEVICE);
1424 }
1425
1426 meta->tbs = 0;
1427
1428 iwl_txq_set_tfd_invalid_gen1(trans, tfd);
1429}
1430
1431#define IWL_TX_CRC_SIZE 4
1432#define IWL_TX_DELIMITER_SIZE 4
1433
1434/*
1435 * iwl_txq_gen1_update_byte_cnt_tbl - Set up entry in Tx byte-count array
1436 */
1437void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
1438 struct iwl_txq *txq, u16 byte_cnt,
1439 int num_tbs)
1440{
1441 struct iwlagn_scd_bc_tbl *scd_bc_tbl;
1442 int write_ptr = txq->write_ptr;
1443 int txq_id = txq->id;
1444 u8 sec_ctl = 0;
1445 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
1446 __le16 bc_ent;
1447 struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd;
1448 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
1449 u8 sta_id = tx_cmd->sta_id;
1450
1451 scd_bc_tbl = trans->txqs.scd_bc_tbls.addr;
1452
1453 sec_ctl = tx_cmd->sec_ctl;
1454
1455 switch (sec_ctl & TX_CMD_SEC_MSK) {
1456 case TX_CMD_SEC_CCM:
1457 len += IEEE80211_CCMP_MIC_LEN;
1458 break;
1459 case TX_CMD_SEC_TKIP:
1460 len += IEEE80211_TKIP_ICV_LEN;
1461 break;
1462 case TX_CMD_SEC_WEP:
1463 len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
1464 break;
1465 }
1466 if (trans->txqs.bc_table_dword)
1467 len = DIV_ROUND_UP(len, 4);
1468
1469 if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
1470 return;
1471
1472 bc_ent = cpu_to_le16(len | (sta_id << 12));
1473
1474 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
1475
1476 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
1477 scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] =
1478 bc_ent;
1479}
1480
1481void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
1482 struct iwl_txq *txq)
1483{
1484 struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans->txqs.scd_bc_tbls.addr;
1485 int txq_id = txq->id;
1486 int read_ptr = txq->read_ptr;
1487 u8 sta_id = 0;
1488 __le16 bc_ent;
1489 struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd;
1490 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
1491
1492 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
1493
1494 if (txq_id != trans->txqs.cmd.q_id)
1495 sta_id = tx_cmd->sta_id;
1496
1497 bc_ent = cpu_to_le16(1 | (sta_id << 12));
1498
1499 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
1500
1501 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
1502 scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] =
1503 bc_ent;
1504}
1505
1506/*
1507 * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
1508 * @trans - transport private data
1509 * @txq - tx queue
1510 * @dma_dir - the direction of the DMA mapping
1511 *
1512 * Does NOT advance any TFD circular buffer read/write indexes
1513 * Does NOT free the TFD itself (which is within circular buffer)
1514 */
1515void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
1516{
1517 /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
1518 * idx is bounded by n_window
1519 */
1520 int rd_ptr = txq->read_ptr;
1521 int idx = iwl_txq_get_cmd_index(txq, rd_ptr);
1522 struct sk_buff *skb;
1523
1524 lockdep_assert_held(&txq->lock);
1525
1526 if (!txq->entries)
1527 return;
1528
1529 /* We have only q->n_window txq->entries, but we use
1530 * TFD_QUEUE_SIZE_MAX tfds
1531 */
1532 if (trans->trans_cfg->gen2)
1533 iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
1534 iwl_txq_get_tfd(trans, txq, rd_ptr));
1535 else
1536 iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta,
1537 txq, rd_ptr);
1538
1539 /* free SKB */
1540 skb = txq->entries[idx].skb;
1541
1542 /* Can be called from irqs-disabled context
1543 * If skb is not NULL, it means that the whole queue is being
1544 * freed and that the queue is not empty - free the skb
1545 */
1546 if (skb) {
1547 iwl_op_mode_free_skb(trans->op_mode, skb);
1548 txq->entries[idx].skb = NULL;
1549 }
1550}
1551
1552void iwl_txq_progress(struct iwl_txq *txq)
1553{
1554 lockdep_assert_held(&txq->lock);
1555
1556 if (!txq->wd_timeout)
1557 return;
1558
1559 /*
1560 * station is asleep and we send data - that must
1561 * be uAPSD or PS-Poll. Don't rearm the timer.
1562 */
1563 if (txq->frozen)
1564 return;
1565
1566 /*
1567 * if empty delete timer, otherwise move timer forward
1568 * since we're making progress on this queue
1569 */
1570 if (txq->read_ptr == txq->write_ptr)
1571 del_timer(&txq->stuck_timer);
1572 else
1573 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
1574}
1575
1576/* Frees buffers until index _not_ inclusive */
1577void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
1578 struct sk_buff_head *skbs, bool is_flush)
1579{
1580 struct iwl_txq *txq = trans->txqs.txq[txq_id];
1581 int tfd_num, read_ptr, last_to_free;
1582
1583 /* This function is not meant to release cmd queue*/
1584 if (WARN_ON(txq_id == trans->txqs.cmd.q_id))
1585 return;
1586
1587 if (WARN_ON(!txq))
1588 return;
1589
1590 tfd_num = iwl_txq_get_cmd_index(txq, ssn);
1591 read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr);
1592
1593 spin_lock_bh(&txq->lock);
1594
1595 if (!test_bit(txq_id, trans->txqs.queue_used)) {
1596 IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
1597 txq_id, ssn);
1598 goto out;
1599 }
1600
1601 if (read_ptr == tfd_num)
1602 goto out;
1603
1604 IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
1605 txq_id, txq->read_ptr, tfd_num, ssn);
1606
1607 /*Since we free until index _not_ inclusive, the one before index is
1608 * the last we will free. This one must be used */
1609 last_to_free = iwl_txq_dec_wrap(trans, tfd_num);
1610
1611 if (!iwl_txq_used(txq, last_to_free)) {
1612 IWL_ERR(trans,
1613 "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
1614 __func__, txq_id, last_to_free,
1615 trans->trans_cfg->base_params->max_tfd_queue_size,
1616 txq->write_ptr, txq->read_ptr);
1617
1618 iwl_op_mode_time_point(trans->op_mode,
1619 IWL_FW_INI_TIME_POINT_FAKE_TX,
1620 NULL);
1621 goto out;
1622 }
1623
1624 if (WARN_ON(!skb_queue_empty(skbs)))
1625 goto out;
1626
1627 for (;
1628 read_ptr != tfd_num;
1629 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr),
1630 read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr)) {
1631 struct sk_buff *skb = txq->entries[read_ptr].skb;
1632
1633 if (WARN_ON_ONCE(!skb))
1634 continue;
1635
1636 iwl_txq_free_tso_page(trans, skb);
1637
1638 __skb_queue_tail(skbs, skb);
1639
1640 txq->entries[read_ptr].skb = NULL;
1641
1642 if (!trans->trans_cfg->gen2)
1643 iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq);
1644
1645 iwl_txq_free_tfd(trans, txq);
1646 }
1647
1648 iwl_txq_progress(txq);
1649
1650 if (iwl_txq_space(trans, txq) > txq->low_mark &&
1651 test_bit(txq_id, trans->txqs.queue_stopped)) {
1652 struct sk_buff_head overflow_skbs;
1653 struct sk_buff *skb;
1654
1655 __skb_queue_head_init(&overflow_skbs);
1656 skb_queue_splice_init(&txq->overflow_q,
1657 is_flush ? skbs : &overflow_skbs);
1658
1659 /*
1660 * We are going to transmit from the overflow queue.
1661 * Remember this state so that wait_for_txq_empty will know we
1662 * are adding more packets to the TFD queue. It cannot rely on
1663 * the state of &txq->overflow_q, as we just emptied it, but
1664 * haven't TXed the content yet.
1665 */
1666 txq->overflow_tx = true;
1667
1668 /*
1669 * This is tricky: we are in reclaim path which is non
1670 * re-entrant, so noone will try to take the access the
1671 * txq data from that path. We stopped tx, so we can't
1672 * have tx as well. Bottom line, we can unlock and re-lock
1673 * later.
1674 */
1675 spin_unlock_bh(&txq->lock);
1676
1677 while ((skb = __skb_dequeue(&overflow_skbs))) {
1678 struct iwl_device_tx_cmd *dev_cmd_ptr;
1679
1680 dev_cmd_ptr = *(void **)((u8 *)skb->cb +
1681 trans->txqs.dev_cmd_offs);
1682
1683 /*
1684 * Note that we can very well be overflowing again.
1685 * In that case, iwl_txq_space will be small again
1686 * and we won't wake mac80211's queue.
1687 */
1688 iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id);
1689 }
1690
1691 if (iwl_txq_space(trans, txq) > txq->low_mark)
1692 iwl_wake_queue(trans, txq);
1693
1694 spin_lock_bh(&txq->lock);
1695 txq->overflow_tx = false;
1696 }
1697
1698out:
1699 spin_unlock_bh(&txq->lock);
1700}
1701
1702/* Set wr_ptr of specific device and txq */
1703void iwl_txq_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr)
1704{
1705 struct iwl_txq *txq = trans->txqs.txq[txq_id];
1706
1707 spin_lock_bh(&txq->lock);
1708
1709 txq->write_ptr = ptr;
1710 txq->read_ptr = txq->write_ptr;
1711
1712 spin_unlock_bh(&txq->lock);
1713}
1714
1715void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs,
1716 bool freeze)
1717{
1718 int queue;
1719
1720 for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
1721 struct iwl_txq *txq = trans->txqs.txq[queue];
1722 unsigned long now;
1723
1724 spin_lock_bh(&txq->lock);
1725
1726 now = jiffies;
1727
1728 if (txq->frozen == freeze)
1729 goto next_queue;
1730
1731 IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n",
1732 freeze ? "Freezing" : "Waking", queue);
1733
1734 txq->frozen = freeze;
1735
1736 if (txq->read_ptr == txq->write_ptr)
1737 goto next_queue;
1738
1739 if (freeze) {
1740 if (unlikely(time_after(now,
1741 txq->stuck_timer.expires))) {
1742 /*
1743 * The timer should have fired, maybe it is
1744 * spinning right now on the lock.
1745 */
1746 goto next_queue;
1747 }
1748 /* remember how long until the timer fires */
1749 txq->frozen_expiry_remainder =
1750 txq->stuck_timer.expires - now;
1751 del_timer(&txq->stuck_timer);
1752 goto next_queue;
1753 }
1754
1755 /*
1756 * Wake a non-empty queue -> arm timer with the
1757 * remainder before it froze
1758 */
1759 mod_timer(&txq->stuck_timer,
1760 now + txq->frozen_expiry_remainder);
1761
1762next_queue:
1763 spin_unlock_bh(&txq->lock);
1764 }
1765}
1766
1767#define HOST_COMPLETE_TIMEOUT (2 * HZ)
1768
1769static int iwl_trans_txq_send_hcmd_sync(struct iwl_trans *trans,
1770 struct iwl_host_cmd *cmd)
1771{
1772 const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
1773 struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
1774 int cmd_idx;
1775 int ret;
1776
1777 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str);
1778
1779 if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
1780 &trans->status),
1781 "Command %s: a command is already active!\n", cmd_str))
1782 return -EIO;
1783
1784 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str);
1785
1786 cmd_idx = trans->ops->send_cmd(trans, cmd);
1787 if (cmd_idx < 0) {
1788 ret = cmd_idx;
1789 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1790 IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
1791 cmd_str, ret);
1792 return ret;
1793 }
1794
1795 ret = wait_event_timeout(trans->wait_command_queue,
1796 !test_bit(STATUS_SYNC_HCMD_ACTIVE,
1797 &trans->status),
1798 HOST_COMPLETE_TIMEOUT);
1799 if (!ret) {
1800 IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
1801 cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
1802
1803 IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
1804 txq->read_ptr, txq->write_ptr);
1805
1806 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1807 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
1808 cmd_str);
1809 ret = -ETIMEDOUT;
1810
1811 iwl_trans_sync_nmi(trans);
1812 goto cancel;
1813 }
1814
1815 if (test_bit(STATUS_FW_ERROR, &trans->status)) {
1816 if (!test_and_clear_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE,
1817 &trans->status)) {
1818 IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str);
1819 dump_stack();
1820 }
1821 ret = -EIO;
1822 goto cancel;
1823 }
1824
1825 if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
1826 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
1827 IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
1828 ret = -ERFKILL;
1829 goto cancel;
1830 }
1831
1832 if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
1833 IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str);
1834 ret = -EIO;
1835 goto cancel;
1836 }
1837
1838 return 0;
1839
1840cancel:
1841 if (cmd->flags & CMD_WANT_SKB) {
1842 /*
1843 * Cancel the CMD_WANT_SKB flag for the cmd in the
1844 * TX cmd queue. Otherwise in case the cmd comes
1845 * in later, it will possibly set an invalid
1846 * address (cmd->meta.source).
1847 */
1848 txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
1849 }
1850
1851 if (cmd->resp_pkt) {
1852 iwl_free_resp(cmd);
1853 cmd->resp_pkt = NULL;
1854 }
1855
1856 return ret;
1857}
1858
1859int iwl_trans_txq_send_hcmd(struct iwl_trans *trans,
1860 struct iwl_host_cmd *cmd)
1861{
1862 /* Make sure the NIC is still alive in the bus */
1863 if (test_bit(STATUS_TRANS_DEAD, &trans->status))
1864 return -ENODEV;
1865
1866 if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
1867 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
1868 IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
1869 cmd->id);
1870 return -ERFKILL;
1871 }
1872
1873 if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 &&
1874 !(cmd->flags & CMD_SEND_IN_D3))) {
1875 IWL_DEBUG_WOWLAN(trans, "Dropping CMD 0x%x: D3\n", cmd->id);
1876 return -EHOSTDOWN;
1877 }
1878
1879 if (cmd->flags & CMD_ASYNC) {
1880 int ret;
1881
1882 /* An asynchronous command can not expect an SKB to be set. */
1883 if (WARN_ON(cmd->flags & CMD_WANT_SKB))
1884 return -EINVAL;
1885
1886 ret = trans->ops->send_cmd(trans, cmd);
1887 if (ret < 0) {
1888 IWL_ERR(trans,
1889 "Error sending %s: enqueue_hcmd failed: %d\n",
1890 iwl_get_cmd_string(trans, cmd->id), ret);
1891 return ret;
1892 }
1893 return 0;
1894 }
1895
1896 return iwl_trans_txq_send_hcmd_sync(trans, cmd);
1897}
1898
1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/*
3 * Copyright (C) 2020-2024 Intel Corporation
4 */
5#include <net/tso.h>
6#include <linux/tcp.h>
7
8#include "iwl-debug.h"
9#include "iwl-io.h"
10#include "fw/api/commands.h"
11#include "fw/api/tx.h"
12#include "fw/api/datapath.h"
13#include "fw/api/debug.h"
14#include "queue/tx.h"
15#include "iwl-fh.h"
16#include "iwl-scd.h"
17#include <linux/dmapool.h>
18
19/*
20 * iwl_txq_update_byte_tbl - Set up entry in Tx byte-count array
21 */
22static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans,
23 struct iwl_txq *txq, u16 byte_cnt,
24 int num_tbs)
25{
26 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
27 u8 filled_tfd_size, num_fetch_chunks;
28 u16 len = byte_cnt;
29 __le16 bc_ent;
30
31 if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window))
32 return;
33
34 filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
35 num_tbs * sizeof(struct iwl_tfh_tb);
36 /*
37 * filled_tfd_size contains the number of filled bytes in the TFD.
38 * Dividing it by 64 will give the number of chunks to fetch
39 * to SRAM- 0 for one chunk, 1 for 2 and so on.
40 * If, for example, TFD contains only 3 TBs then 32 bytes
41 * of the TFD are used, and only one chunk of 64 bytes should
42 * be fetched
43 */
44 num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
45
46 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
47 struct iwl_gen3_bc_tbl_entry *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
48
49 /* Starting from AX210, the HW expects bytes */
50 WARN_ON(trans->txqs.bc_table_dword);
51 WARN_ON(len > 0x3FFF);
52 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
53 scd_bc_tbl_gen3[idx].tfd_offset = bc_ent;
54 } else {
55 struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
56
57 /* Before AX210, the HW expects DW */
58 WARN_ON(!trans->txqs.bc_table_dword);
59 len = DIV_ROUND_UP(len, 4);
60 WARN_ON(len > 0xFFF);
61 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
62 scd_bc_tbl->tfd_offset[idx] = bc_ent;
63 }
64}
65
66/*
67 * iwl_txq_inc_wr_ptr - Send new write index to hardware
68 */
69void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
70{
71 lockdep_assert_held(&txq->lock);
72
73 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr);
74
75 /*
76 * if not in power-save mode, uCode will never sleep when we're
77 * trying to tx (during RFKILL, we're not trying to tx).
78 */
79 iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16));
80}
81
82static u8 iwl_txq_gen2_get_num_tbs(struct iwl_trans *trans,
83 struct iwl_tfh_tfd *tfd)
84{
85 return le16_to_cpu(tfd->num_tbs) & 0x1f;
86}
87
88int iwl_txq_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd,
89 dma_addr_t addr, u16 len)
90{
91 int idx = iwl_txq_gen2_get_num_tbs(trans, tfd);
92 struct iwl_tfh_tb *tb;
93
94 /* Only WARN here so we know about the issue, but we mess up our
95 * unmap path because not every place currently checks for errors
96 * returned from this function - it can only return an error if
97 * there's no more space, and so when we know there is enough we
98 * don't always check ...
99 */
100 WARN(iwl_txq_crosses_4g_boundary(addr, len),
101 "possible DMA problem with iova:0x%llx, len:%d\n",
102 (unsigned long long)addr, len);
103
104 if (WARN_ON(idx >= IWL_TFH_NUM_TBS))
105 return -EINVAL;
106 tb = &tfd->tbs[idx];
107
108 /* Each TFD can point to a maximum max_tbs Tx buffers */
109 if (le16_to_cpu(tfd->num_tbs) >= trans->txqs.tfd.max_tbs) {
110 IWL_ERR(trans, "Error can not send more than %d chunks\n",
111 trans->txqs.tfd.max_tbs);
112 return -EINVAL;
113 }
114
115 put_unaligned_le64(addr, &tb->addr);
116 tb->tb_len = cpu_to_le16(len);
117
118 tfd->num_tbs = cpu_to_le16(idx + 1);
119
120 return idx;
121}
122
123static void iwl_txq_set_tfd_invalid_gen2(struct iwl_trans *trans,
124 struct iwl_tfh_tfd *tfd)
125{
126 tfd->num_tbs = 0;
127
128 iwl_txq_gen2_set_tb(trans, tfd, trans->invalid_tx_cmd.dma,
129 trans->invalid_tx_cmd.size);
130}
131
132void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
133 struct iwl_tfh_tfd *tfd)
134{
135 int i, num_tbs;
136
137 /* Sanity check on number of chunks */
138 num_tbs = iwl_txq_gen2_get_num_tbs(trans, tfd);
139
140 if (num_tbs > trans->txqs.tfd.max_tbs) {
141 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
142 return;
143 }
144
145 /* first TB is never freed - it's the bidirectional DMA data */
146 for (i = 1; i < num_tbs; i++) {
147 if (meta->tbs & BIT(i))
148 dma_unmap_page(trans->dev,
149 le64_to_cpu(tfd->tbs[i].addr),
150 le16_to_cpu(tfd->tbs[i].tb_len),
151 DMA_TO_DEVICE);
152 else
153 dma_unmap_single(trans->dev,
154 le64_to_cpu(tfd->tbs[i].addr),
155 le16_to_cpu(tfd->tbs[i].tb_len),
156 DMA_TO_DEVICE);
157 }
158
159 iwl_txq_set_tfd_invalid_gen2(trans, tfd);
160}
161
162void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
163{
164 /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
165 * idx is bounded by n_window
166 */
167 int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
168 struct sk_buff *skb;
169
170 lockdep_assert_held(&txq->lock);
171
172 if (!txq->entries)
173 return;
174
175 iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
176 iwl_txq_get_tfd(trans, txq, idx));
177
178 skb = txq->entries[idx].skb;
179
180 /* Can be called from irqs-disabled context
181 * If skb is not NULL, it means that the whole queue is being
182 * freed and that the queue is not empty - free the skb
183 */
184 if (skb) {
185 iwl_op_mode_free_skb(trans->op_mode, skb);
186 txq->entries[idx].skb = NULL;
187 }
188}
189
190static struct page *get_workaround_page(struct iwl_trans *trans,
191 struct sk_buff *skb)
192{
193 struct page **page_ptr;
194 struct page *ret;
195
196 page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
197
198 ret = alloc_page(GFP_ATOMIC);
199 if (!ret)
200 return NULL;
201
202 /* set the chaining pointer to the previous page if there */
203 *(void **)((u8 *)page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr;
204 *page_ptr = ret;
205
206 return ret;
207}
208
209/*
210 * Add a TB and if needed apply the FH HW bug workaround;
211 * meta != NULL indicates that it's a page mapping and we
212 * need to dma_unmap_page() and set the meta->tbs bit in
213 * this case.
214 */
215static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans,
216 struct sk_buff *skb,
217 struct iwl_tfh_tfd *tfd,
218 dma_addr_t phys, void *virt,
219 u16 len, struct iwl_cmd_meta *meta)
220{
221 dma_addr_t oldphys = phys;
222 struct page *page;
223 int ret;
224
225 if (unlikely(dma_mapping_error(trans->dev, phys)))
226 return -ENOMEM;
227
228 if (likely(!iwl_txq_crosses_4g_boundary(phys, len))) {
229 ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
230
231 if (ret < 0)
232 goto unmap;
233
234 if (meta)
235 meta->tbs |= BIT(ret);
236
237 ret = 0;
238 goto trace;
239 }
240
241 /*
242 * Work around a hardware bug. If (as expressed in the
243 * condition above) the TB ends on a 32-bit boundary,
244 * then the next TB may be accessed with the wrong
245 * address.
246 * To work around it, copy the data elsewhere and make
247 * a new mapping for it so the device will not fail.
248 */
249
250 if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) {
251 ret = -ENOBUFS;
252 goto unmap;
253 }
254
255 page = get_workaround_page(trans, skb);
256 if (!page) {
257 ret = -ENOMEM;
258 goto unmap;
259 }
260
261 memcpy(page_address(page), virt, len);
262
263 phys = dma_map_single(trans->dev, page_address(page), len,
264 DMA_TO_DEVICE);
265 if (unlikely(dma_mapping_error(trans->dev, phys)))
266 return -ENOMEM;
267 ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
268 if (ret < 0) {
269 /* unmap the new allocation as single */
270 oldphys = phys;
271 meta = NULL;
272 goto unmap;
273 }
274 IWL_DEBUG_TX(trans,
275 "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
276 len, (unsigned long long)oldphys,
277 (unsigned long long)phys);
278
279 ret = 0;
280unmap:
281 if (meta)
282 dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE);
283 else
284 dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE);
285trace:
286 trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len);
287
288 return ret;
289}
290
291#ifdef CONFIG_INET
292struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
293 struct sk_buff *skb)
294{
295 struct iwl_tso_hdr_page *p = this_cpu_ptr(trans->txqs.tso_hdr_page);
296 struct page **page_ptr;
297
298 page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
299
300 if (WARN_ON(*page_ptr))
301 return NULL;
302
303 if (!p->page)
304 goto alloc;
305
306 /*
307 * Check if there's enough room on this page
308 *
309 * Note that we put a page chaining pointer *last* in the
310 * page - we need it somewhere, and if it's there then we
311 * avoid DMA mapping the last bits of the page which may
312 * trigger the 32-bit boundary hardware bug.
313 *
314 * (see also get_workaround_page() in tx-gen2.c)
315 */
316 if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE -
317 sizeof(void *))
318 goto out;
319
320 /* We don't have enough room on this page, get a new one. */
321 __free_page(p->page);
322
323alloc:
324 p->page = alloc_page(GFP_ATOMIC);
325 if (!p->page)
326 return NULL;
327 p->pos = page_address(p->page);
328 /* set the chaining pointer to NULL */
329 *(void **)((u8 *)page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL;
330out:
331 *page_ptr = p->page;
332 get_page(p->page);
333 return p;
334}
335#endif
336
337static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
338 struct sk_buff *skb,
339 struct iwl_tfh_tfd *tfd, int start_len,
340 u8 hdr_len,
341 struct iwl_device_tx_cmd *dev_cmd)
342{
343#ifdef CONFIG_INET
344 struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
345 struct ieee80211_hdr *hdr = (void *)skb->data;
346 unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
347 unsigned int mss = skb_shinfo(skb)->gso_size;
348 u16 length, amsdu_pad;
349 u8 *start_hdr;
350 struct iwl_tso_hdr_page *hdr_page;
351 struct tso_t tso;
352
353 trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
354 &dev_cmd->hdr, start_len, 0);
355
356 ip_hdrlen = skb_network_header_len(skb);
357 snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
358 total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len;
359 amsdu_pad = 0;
360
361 /* total amount of header we may need for this A-MSDU */
362 hdr_room = DIV_ROUND_UP(total_len, mss) *
363 (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
364
365 /* Our device supports 9 segments at most, it will fit in 1 page */
366 hdr_page = get_page_hdr(trans, hdr_room, skb);
367 if (!hdr_page)
368 return -ENOMEM;
369
370 start_hdr = hdr_page->pos;
371
372 /*
373 * Pull the ieee80211 header to be able to use TSO core,
374 * we will restore it for the tx_status flow.
375 */
376 skb_pull(skb, hdr_len);
377
378 /*
379 * Remove the length of all the headers that we don't actually
380 * have in the MPDU by themselves, but that we duplicate into
381 * all the different MSDUs inside the A-MSDU.
382 */
383 le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
384
385 tso_start(skb, &tso);
386
387 while (total_len) {
388 /* this is the data left for this subframe */
389 unsigned int data_left = min_t(unsigned int, mss, total_len);
390 unsigned int tb_len;
391 dma_addr_t tb_phys;
392 u8 *subf_hdrs_start = hdr_page->pos;
393
394 total_len -= data_left;
395
396 memset(hdr_page->pos, 0, amsdu_pad);
397 hdr_page->pos += amsdu_pad;
398 amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
399 data_left)) & 0x3;
400 ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr));
401 hdr_page->pos += ETH_ALEN;
402 ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr));
403 hdr_page->pos += ETH_ALEN;
404
405 length = snap_ip_tcp_hdrlen + data_left;
406 *((__be16 *)hdr_page->pos) = cpu_to_be16(length);
407 hdr_page->pos += sizeof(length);
408
409 /*
410 * This will copy the SNAP as well which will be considered
411 * as MAC header.
412 */
413 tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
414
415 hdr_page->pos += snap_ip_tcp_hdrlen;
416
417 tb_len = hdr_page->pos - start_hdr;
418 tb_phys = dma_map_single(trans->dev, start_hdr,
419 tb_len, DMA_TO_DEVICE);
420 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
421 goto out_err;
422 /*
423 * No need for _with_wa, this is from the TSO page and
424 * we leave some space at the end of it so can't hit
425 * the buggy scenario.
426 */
427 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb_len);
428 trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
429 tb_phys, tb_len);
430 /* add this subframe's headers' length to the tx_cmd */
431 le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
432
433 /* prepare the start_hdr for the next subframe */
434 start_hdr = hdr_page->pos;
435
436 /* put the payload */
437 while (data_left) {
438 int ret;
439
440 tb_len = min_t(unsigned int, tso.size, data_left);
441 tb_phys = dma_map_single(trans->dev, tso.data,
442 tb_len, DMA_TO_DEVICE);
443 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd,
444 tb_phys, tso.data,
445 tb_len, NULL);
446 if (ret)
447 goto out_err;
448
449 data_left -= tb_len;
450 tso_build_data(skb, &tso, tb_len);
451 }
452 }
453
454 /* re -add the WiFi header */
455 skb_push(skb, hdr_len);
456
457 return 0;
458
459out_err:
460#endif
461 return -EINVAL;
462}
463
464static struct
465iwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans,
466 struct iwl_txq *txq,
467 struct iwl_device_tx_cmd *dev_cmd,
468 struct sk_buff *skb,
469 struct iwl_cmd_meta *out_meta,
470 int hdr_len,
471 int tx_cmd_len)
472{
473 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
474 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
475 dma_addr_t tb_phys;
476 int len;
477 void *tb1_addr;
478
479 tb_phys = iwl_txq_get_first_tb_dma(txq, idx);
480
481 /*
482 * No need for _with_wa, the first TB allocation is aligned up
483 * to a 64-byte boundary and thus can't be at the end or cross
484 * a page boundary (much less a 2^32 boundary).
485 */
486 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
487
488 /*
489 * The second TB (tb1) points to the remainder of the TX command
490 * and the 802.11 header - dword aligned size
491 * (This calculation modifies the TX command, so do it before the
492 * setup of the first TB)
493 */
494 len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
495 IWL_FIRST_TB_SIZE;
496
497 /* do not align A-MSDU to dword as the subframe header aligns it */
498
499 /* map the data for TB1 */
500 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
501 tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
502 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
503 goto out_err;
504 /*
505 * No need for _with_wa(), we ensure (via alignment) that the data
506 * here can never cross or end at a page boundary.
507 */
508 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, len);
509
510 if (iwl_txq_gen2_build_amsdu(trans, skb, tfd, len + IWL_FIRST_TB_SIZE,
511 hdr_len, dev_cmd))
512 goto out_err;
513
514 /* building the A-MSDU might have changed this data, memcpy it now */
515 memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
516 return tfd;
517
518out_err:
519 iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
520 return NULL;
521}
522
523static int iwl_txq_gen2_tx_add_frags(struct iwl_trans *trans,
524 struct sk_buff *skb,
525 struct iwl_tfh_tfd *tfd,
526 struct iwl_cmd_meta *out_meta)
527{
528 int i;
529
530 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
531 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
532 dma_addr_t tb_phys;
533 unsigned int fragsz = skb_frag_size(frag);
534 int ret;
535
536 if (!fragsz)
537 continue;
538
539 tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
540 fragsz, DMA_TO_DEVICE);
541 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
542 skb_frag_address(frag),
543 fragsz, out_meta);
544 if (ret)
545 return ret;
546 }
547
548 return 0;
549}
550
551static struct
552iwl_tfh_tfd *iwl_txq_gen2_build_tx(struct iwl_trans *trans,
553 struct iwl_txq *txq,
554 struct iwl_device_tx_cmd *dev_cmd,
555 struct sk_buff *skb,
556 struct iwl_cmd_meta *out_meta,
557 int hdr_len,
558 int tx_cmd_len,
559 bool pad)
560{
561 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
562 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
563 dma_addr_t tb_phys;
564 int len, tb1_len, tb2_len;
565 void *tb1_addr;
566 struct sk_buff *frag;
567
568 tb_phys = iwl_txq_get_first_tb_dma(txq, idx);
569
570 /* The first TB points to bi-directional DMA data */
571 memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
572
573 /*
574 * No need for _with_wa, the first TB allocation is aligned up
575 * to a 64-byte boundary and thus can't be at the end or cross
576 * a page boundary (much less a 2^32 boundary).
577 */
578 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
579
580 /*
581 * The second TB (tb1) points to the remainder of the TX command
582 * and the 802.11 header - dword aligned size
583 * (This calculation modifies the TX command, so do it before the
584 * setup of the first TB)
585 */
586 len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
587 IWL_FIRST_TB_SIZE;
588
589 if (pad)
590 tb1_len = ALIGN(len, 4);
591 else
592 tb1_len = len;
593
594 /* map the data for TB1 */
595 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
596 tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
597 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
598 goto out_err;
599 /*
600 * No need for _with_wa(), we ensure (via alignment) that the data
601 * here can never cross or end at a page boundary.
602 */
603 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
604 trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
605 IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
606
607 /* set up TFD's third entry to point to remainder of skb's head */
608 tb2_len = skb_headlen(skb) - hdr_len;
609
610 if (tb2_len > 0) {
611 int ret;
612
613 tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
614 tb2_len, DMA_TO_DEVICE);
615 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
616 skb->data + hdr_len, tb2_len,
617 NULL);
618 if (ret)
619 goto out_err;
620 }
621
622 if (iwl_txq_gen2_tx_add_frags(trans, skb, tfd, out_meta))
623 goto out_err;
624
625 skb_walk_frags(skb, frag) {
626 int ret;
627
628 tb_phys = dma_map_single(trans->dev, frag->data,
629 skb_headlen(frag), DMA_TO_DEVICE);
630 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
631 frag->data,
632 skb_headlen(frag), NULL);
633 if (ret)
634 goto out_err;
635 if (iwl_txq_gen2_tx_add_frags(trans, frag, tfd, out_meta))
636 goto out_err;
637 }
638
639 return tfd;
640
641out_err:
642 iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
643 return NULL;
644}
645
646static
647struct iwl_tfh_tfd *iwl_txq_gen2_build_tfd(struct iwl_trans *trans,
648 struct iwl_txq *txq,
649 struct iwl_device_tx_cmd *dev_cmd,
650 struct sk_buff *skb,
651 struct iwl_cmd_meta *out_meta)
652{
653 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
654 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
655 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
656 int len, hdr_len;
657 bool amsdu;
658
659 /* There must be data left over for TB1 or this code must be changed */
660 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
661 BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +
662 offsetofend(struct iwl_tx_cmd_gen2, dram_info) >
663 IWL_FIRST_TB_SIZE);
664 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) < IWL_FIRST_TB_SIZE);
665 BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +
666 offsetofend(struct iwl_tx_cmd_gen3, dram_info) >
667 IWL_FIRST_TB_SIZE);
668
669 memset(tfd, 0, sizeof(*tfd));
670
671 if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
672 len = sizeof(struct iwl_tx_cmd_gen2);
673 else
674 len = sizeof(struct iwl_tx_cmd_gen3);
675
676 amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
677 (*ieee80211_get_qos_ctl(hdr) &
678 IEEE80211_QOS_CTL_A_MSDU_PRESENT);
679
680 hdr_len = ieee80211_hdrlen(hdr->frame_control);
681
682 /*
683 * Only build A-MSDUs here if doing so by GSO, otherwise it may be
684 * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been
685 * built in the higher layers already.
686 */
687 if (amsdu && skb_shinfo(skb)->gso_size)
688 return iwl_txq_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb,
689 out_meta, hdr_len, len);
690 return iwl_txq_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
691 hdr_len, len, !amsdu);
692}
693
694int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q)
695{
696 unsigned int max;
697 unsigned int used;
698
699 /*
700 * To avoid ambiguity between empty and completely full queues, there
701 * should always be less than max_tfd_queue_size elements in the queue.
702 * If q->n_window is smaller than max_tfd_queue_size, there is no need
703 * to reserve any queue entries for this purpose.
704 */
705 if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size)
706 max = q->n_window;
707 else
708 max = trans->trans_cfg->base_params->max_tfd_queue_size - 1;
709
710 /*
711 * max_tfd_queue_size is a power of 2, so the following is equivalent to
712 * modulo by max_tfd_queue_size and is well defined.
713 */
714 used = (q->write_ptr - q->read_ptr) &
715 (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
716
717 if (WARN_ON(used > max))
718 return 0;
719
720 return max - used;
721}
722
723int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
724 struct iwl_device_tx_cmd *dev_cmd, int txq_id)
725{
726 struct iwl_cmd_meta *out_meta;
727 struct iwl_txq *txq = trans->txqs.txq[txq_id];
728 u16 cmd_len;
729 int idx;
730 void *tfd;
731
732 if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
733 "queue %d out of range", txq_id))
734 return -EINVAL;
735
736 if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used),
737 "TX on unused queue %d\n", txq_id))
738 return -EINVAL;
739
740 if (skb_is_nonlinear(skb) &&
741 skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) &&
742 __skb_linearize(skb))
743 return -ENOMEM;
744
745 spin_lock(&txq->lock);
746
747 if (iwl_txq_space(trans, txq) < txq->high_mark) {
748 iwl_txq_stop(trans, txq);
749
750 /* don't put the packet on the ring, if there is no room */
751 if (unlikely(iwl_txq_space(trans, txq) < 3)) {
752 struct iwl_device_tx_cmd **dev_cmd_ptr;
753
754 dev_cmd_ptr = (void *)((u8 *)skb->cb +
755 trans->txqs.dev_cmd_offs);
756
757 *dev_cmd_ptr = dev_cmd;
758 __skb_queue_tail(&txq->overflow_q, skb);
759 spin_unlock(&txq->lock);
760 return 0;
761 }
762 }
763
764 idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
765
766 /* Set up driver data for this TFD */
767 txq->entries[idx].skb = skb;
768 txq->entries[idx].cmd = dev_cmd;
769
770 dev_cmd->hdr.sequence =
771 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
772 INDEX_TO_SEQ(idx)));
773
774 /* Set up first empty entry in queue's array of Tx/cmd buffers */
775 out_meta = &txq->entries[idx].meta;
776 out_meta->flags = 0;
777
778 tfd = iwl_txq_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta);
779 if (!tfd) {
780 spin_unlock(&txq->lock);
781 return -1;
782 }
783
784 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
785 struct iwl_tx_cmd_gen3 *tx_cmd_gen3 =
786 (void *)dev_cmd->payload;
787
788 cmd_len = le16_to_cpu(tx_cmd_gen3->len);
789 } else {
790 struct iwl_tx_cmd_gen2 *tx_cmd_gen2 =
791 (void *)dev_cmd->payload;
792
793 cmd_len = le16_to_cpu(tx_cmd_gen2->len);
794 }
795
796 /* Set up entry for this TFD in Tx byte-count array */
797 iwl_pcie_gen2_update_byte_tbl(trans, txq, cmd_len,
798 iwl_txq_gen2_get_num_tbs(trans, tfd));
799
800 /* start timer if queue currently empty */
801 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
802 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
803
804 /* Tell device the write index *just past* this latest filled TFD */
805 txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
806 iwl_txq_inc_wr_ptr(trans, txq);
807 /*
808 * At this point the frame is "transmitted" successfully
809 * and we will get a TX status notification eventually.
810 */
811 spin_unlock(&txq->lock);
812 return 0;
813}
814
815/*************** HOST COMMAND QUEUE FUNCTIONS *****/
816
817/*
818 * iwl_txq_gen2_unmap - Unmap any remaining DMA mappings and free skb's
819 */
820void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id)
821{
822 struct iwl_txq *txq = trans->txqs.txq[txq_id];
823
824 spin_lock_bh(&txq->lock);
825 while (txq->write_ptr != txq->read_ptr) {
826 IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
827 txq_id, txq->read_ptr);
828
829 if (txq_id != trans->txqs.cmd.q_id) {
830 int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
831 struct sk_buff *skb = txq->entries[idx].skb;
832
833 if (!WARN_ON_ONCE(!skb))
834 iwl_txq_free_tso_page(trans, skb);
835 }
836 iwl_txq_gen2_free_tfd(trans, txq);
837 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
838 }
839
840 while (!skb_queue_empty(&txq->overflow_q)) {
841 struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
842
843 iwl_op_mode_free_skb(trans->op_mode, skb);
844 }
845
846 spin_unlock_bh(&txq->lock);
847
848 /* just in case - this queue may have been stopped */
849 iwl_wake_queue(trans, txq);
850}
851
852static void iwl_txq_gen2_free_memory(struct iwl_trans *trans,
853 struct iwl_txq *txq)
854{
855 struct device *dev = trans->dev;
856
857 /* De-alloc circular buffer of TFDs */
858 if (txq->tfds) {
859 dma_free_coherent(dev,
860 trans->txqs.tfd.size * txq->n_window,
861 txq->tfds, txq->dma_addr);
862 dma_free_coherent(dev,
863 sizeof(*txq->first_tb_bufs) * txq->n_window,
864 txq->first_tb_bufs, txq->first_tb_dma);
865 }
866
867 kfree(txq->entries);
868 if (txq->bc_tbl.addr)
869 dma_pool_free(trans->txqs.bc_pool,
870 txq->bc_tbl.addr, txq->bc_tbl.dma);
871 kfree(txq);
872}
873
874/*
875 * iwl_pcie_txq_free - Deallocate DMA queue.
876 * @txq: Transmit queue to deallocate.
877 *
878 * Empty queue by removing and destroying all BD's.
879 * Free all buffers.
880 * 0-fill, but do not free "txq" descriptor structure.
881 */
882static void iwl_txq_gen2_free(struct iwl_trans *trans, int txq_id)
883{
884 struct iwl_txq *txq;
885 int i;
886
887 if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
888 "queue %d out of range", txq_id))
889 return;
890
891 txq = trans->txqs.txq[txq_id];
892
893 if (WARN_ON(!txq))
894 return;
895
896 iwl_txq_gen2_unmap(trans, txq_id);
897
898 /* De-alloc array of command/tx buffers */
899 if (txq_id == trans->txqs.cmd.q_id)
900 for (i = 0; i < txq->n_window; i++) {
901 kfree_sensitive(txq->entries[i].cmd);
902 kfree_sensitive(txq->entries[i].free_buf);
903 }
904 del_timer_sync(&txq->stuck_timer);
905
906 iwl_txq_gen2_free_memory(trans, txq);
907
908 trans->txqs.txq[txq_id] = NULL;
909
910 clear_bit(txq_id, trans->txqs.queue_used);
911}
912
913/*
914 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
915 */
916static int iwl_queue_init(struct iwl_txq *q, int slots_num)
917{
918 q->n_window = slots_num;
919
920 /* slots_num must be power-of-two size, otherwise
921 * iwl_txq_get_cmd_index is broken. */
922 if (WARN_ON(!is_power_of_2(slots_num)))
923 return -EINVAL;
924
925 q->low_mark = q->n_window / 4;
926 if (q->low_mark < 4)
927 q->low_mark = 4;
928
929 q->high_mark = q->n_window / 8;
930 if (q->high_mark < 2)
931 q->high_mark = 2;
932
933 q->write_ptr = 0;
934 q->read_ptr = 0;
935
936 return 0;
937}
938
939int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
940 bool cmd_queue)
941{
942 int ret;
943 u32 tfd_queue_max_size =
944 trans->trans_cfg->base_params->max_tfd_queue_size;
945
946 txq->need_update = false;
947
948 /* max_tfd_queue_size must be power-of-two size, otherwise
949 * iwl_txq_inc_wrap and iwl_txq_dec_wrap are broken. */
950 if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1),
951 "Max tfd queue size must be a power of two, but is %d",
952 tfd_queue_max_size))
953 return -EINVAL;
954
955 /* Initialize queue's high/low-water marks, and head/tail indexes */
956 ret = iwl_queue_init(txq, slots_num);
957 if (ret)
958 return ret;
959
960 spin_lock_init(&txq->lock);
961
962 if (cmd_queue) {
963 static struct lock_class_key iwl_txq_cmd_queue_lock_class;
964
965 lockdep_set_class(&txq->lock, &iwl_txq_cmd_queue_lock_class);
966 }
967
968 __skb_queue_head_init(&txq->overflow_q);
969
970 return 0;
971}
972
973void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb)
974{
975 struct page **page_ptr;
976 struct page *next;
977
978 page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
979 next = *page_ptr;
980 *page_ptr = NULL;
981
982 while (next) {
983 struct page *tmp = next;
984
985 next = *(void **)((u8 *)page_address(next) + PAGE_SIZE -
986 sizeof(void *));
987 __free_page(tmp);
988 }
989}
990
991void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
992{
993 u32 txq_id = txq->id;
994 u32 status;
995 bool active;
996 u8 fifo;
997
998 if (trans->trans_cfg->gen2) {
999 IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id,
1000 txq->read_ptr, txq->write_ptr);
1001 /* TODO: access new SCD registers and dump them */
1002 return;
1003 }
1004
1005 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id));
1006 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
1007 active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
1008
1009 IWL_ERR(trans,
1010 "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n",
1011 txq_id, active ? "" : "in", fifo,
1012 jiffies_to_msecs(txq->wd_timeout),
1013 txq->read_ptr, txq->write_ptr,
1014 iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
1015 (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
1016 iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
1017 (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
1018 iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
1019}
1020
1021static void iwl_txq_stuck_timer(struct timer_list *t)
1022{
1023 struct iwl_txq *txq = from_timer(txq, t, stuck_timer);
1024 struct iwl_trans *trans = txq->trans;
1025
1026 spin_lock(&txq->lock);
1027 /* check if triggered erroneously */
1028 if (txq->read_ptr == txq->write_ptr) {
1029 spin_unlock(&txq->lock);
1030 return;
1031 }
1032 spin_unlock(&txq->lock);
1033
1034 iwl_txq_log_scd_error(trans, txq);
1035
1036 iwl_force_nmi(trans);
1037}
1038
1039static void iwl_txq_set_tfd_invalid_gen1(struct iwl_trans *trans,
1040 struct iwl_tfd *tfd)
1041{
1042 tfd->num_tbs = 0;
1043
1044 iwl_pcie_gen1_tfd_set_tb(trans, tfd, 0, trans->invalid_tx_cmd.dma,
1045 trans->invalid_tx_cmd.size);
1046}
1047
1048int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
1049 bool cmd_queue)
1050{
1051 size_t num_entries = trans->trans_cfg->gen2 ?
1052 slots_num : trans->trans_cfg->base_params->max_tfd_queue_size;
1053 size_t tfd_sz;
1054 size_t tb0_buf_sz;
1055 int i;
1056
1057 if (WARN_ONCE(slots_num <= 0, "Invalid slots num:%d\n", slots_num))
1058 return -EINVAL;
1059
1060 if (WARN_ON(txq->entries || txq->tfds))
1061 return -EINVAL;
1062
1063 tfd_sz = trans->txqs.tfd.size * num_entries;
1064
1065 timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0);
1066 txq->trans = trans;
1067
1068 txq->n_window = slots_num;
1069
1070 txq->entries = kcalloc(slots_num,
1071 sizeof(struct iwl_pcie_txq_entry),
1072 GFP_KERNEL);
1073
1074 if (!txq->entries)
1075 goto error;
1076
1077 if (cmd_queue)
1078 for (i = 0; i < slots_num; i++) {
1079 txq->entries[i].cmd =
1080 kmalloc(sizeof(struct iwl_device_cmd),
1081 GFP_KERNEL);
1082 if (!txq->entries[i].cmd)
1083 goto error;
1084 }
1085
1086 /* Circular buffer of transmit frame descriptors (TFDs),
1087 * shared with device */
1088 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
1089 &txq->dma_addr, GFP_KERNEL);
1090 if (!txq->tfds)
1091 goto error;
1092
1093 BUILD_BUG_ON(sizeof(*txq->first_tb_bufs) != IWL_FIRST_TB_SIZE_ALIGN);
1094
1095 tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num;
1096
1097 txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz,
1098 &txq->first_tb_dma,
1099 GFP_KERNEL);
1100 if (!txq->first_tb_bufs)
1101 goto err_free_tfds;
1102
1103 for (i = 0; i < num_entries; i++) {
1104 void *tfd = iwl_txq_get_tfd(trans, txq, i);
1105
1106 if (trans->trans_cfg->gen2)
1107 iwl_txq_set_tfd_invalid_gen2(trans, tfd);
1108 else
1109 iwl_txq_set_tfd_invalid_gen1(trans, tfd);
1110 }
1111
1112 return 0;
1113err_free_tfds:
1114 dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr);
1115 txq->tfds = NULL;
1116error:
1117 if (txq->entries && cmd_queue)
1118 for (i = 0; i < slots_num; i++)
1119 kfree(txq->entries[i].cmd);
1120 kfree(txq->entries);
1121 txq->entries = NULL;
1122
1123 return -ENOMEM;
1124}
1125
1126static struct iwl_txq *
1127iwl_txq_dyn_alloc_dma(struct iwl_trans *trans, int size, unsigned int timeout)
1128{
1129 size_t bc_tbl_size, bc_tbl_entries;
1130 struct iwl_txq *txq;
1131 int ret;
1132
1133 WARN_ON(!trans->txqs.bc_tbl_size);
1134
1135 bc_tbl_size = trans->txqs.bc_tbl_size;
1136 bc_tbl_entries = bc_tbl_size / sizeof(u16);
1137
1138 if (WARN_ON(size > bc_tbl_entries))
1139 return ERR_PTR(-EINVAL);
1140
1141 txq = kzalloc(sizeof(*txq), GFP_KERNEL);
1142 if (!txq)
1143 return ERR_PTR(-ENOMEM);
1144
1145 txq->bc_tbl.addr = dma_pool_alloc(trans->txqs.bc_pool, GFP_KERNEL,
1146 &txq->bc_tbl.dma);
1147 if (!txq->bc_tbl.addr) {
1148 IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
1149 kfree(txq);
1150 return ERR_PTR(-ENOMEM);
1151 }
1152
1153 ret = iwl_txq_alloc(trans, txq, size, false);
1154 if (ret) {
1155 IWL_ERR(trans, "Tx queue alloc failed\n");
1156 goto error;
1157 }
1158 ret = iwl_txq_init(trans, txq, size, false);
1159 if (ret) {
1160 IWL_ERR(trans, "Tx queue init failed\n");
1161 goto error;
1162 }
1163
1164 txq->wd_timeout = msecs_to_jiffies(timeout);
1165
1166 return txq;
1167
1168error:
1169 iwl_txq_gen2_free_memory(trans, txq);
1170 return ERR_PTR(ret);
1171}
1172
1173static int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq,
1174 struct iwl_host_cmd *hcmd)
1175{
1176 struct iwl_tx_queue_cfg_rsp *rsp;
1177 int ret, qid;
1178 u32 wr_ptr;
1179
1180 if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) !=
1181 sizeof(*rsp))) {
1182 ret = -EINVAL;
1183 goto error_free_resp;
1184 }
1185
1186 rsp = (void *)hcmd->resp_pkt->data;
1187 qid = le16_to_cpu(rsp->queue_number);
1188 wr_ptr = le16_to_cpu(rsp->write_pointer);
1189
1190 if (qid >= ARRAY_SIZE(trans->txqs.txq)) {
1191 WARN_ONCE(1, "queue index %d unsupported", qid);
1192 ret = -EIO;
1193 goto error_free_resp;
1194 }
1195
1196 if (test_and_set_bit(qid, trans->txqs.queue_used)) {
1197 WARN_ONCE(1, "queue %d already used", qid);
1198 ret = -EIO;
1199 goto error_free_resp;
1200 }
1201
1202 if (WARN_ONCE(trans->txqs.txq[qid],
1203 "queue %d already allocated\n", qid)) {
1204 ret = -EIO;
1205 goto error_free_resp;
1206 }
1207
1208 txq->id = qid;
1209 trans->txqs.txq[qid] = txq;
1210 wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
1211
1212 /* Place first TFD at index corresponding to start sequence number */
1213 txq->read_ptr = wr_ptr;
1214 txq->write_ptr = wr_ptr;
1215
1216 IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
1217
1218 iwl_free_resp(hcmd);
1219 return qid;
1220
1221error_free_resp:
1222 iwl_free_resp(hcmd);
1223 iwl_txq_gen2_free_memory(trans, txq);
1224 return ret;
1225}
1226
1227int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,
1228 u8 tid, int size, unsigned int timeout)
1229{
1230 struct iwl_txq *txq;
1231 union {
1232 struct iwl_tx_queue_cfg_cmd old;
1233 struct iwl_scd_queue_cfg_cmd new;
1234 } cmd;
1235 struct iwl_host_cmd hcmd = {
1236 .flags = CMD_WANT_SKB,
1237 };
1238 int ret;
1239
1240 if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_BZ &&
1241 trans->hw_rev_step == SILICON_A_STEP)
1242 size = 4096;
1243
1244 txq = iwl_txq_dyn_alloc_dma(trans, size, timeout);
1245 if (IS_ERR(txq))
1246 return PTR_ERR(txq);
1247
1248 if (trans->txqs.queue_alloc_cmd_ver == 0) {
1249 memset(&cmd.old, 0, sizeof(cmd.old));
1250 cmd.old.tfdq_addr = cpu_to_le64(txq->dma_addr);
1251 cmd.old.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
1252 cmd.old.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
1253 cmd.old.flags = cpu_to_le16(flags | TX_QUEUE_CFG_ENABLE_QUEUE);
1254 cmd.old.tid = tid;
1255
1256 if (hweight32(sta_mask) != 1) {
1257 ret = -EINVAL;
1258 goto error;
1259 }
1260 cmd.old.sta_id = ffs(sta_mask) - 1;
1261
1262 hcmd.id = SCD_QUEUE_CFG;
1263 hcmd.len[0] = sizeof(cmd.old);
1264 hcmd.data[0] = &cmd.old;
1265 } else if (trans->txqs.queue_alloc_cmd_ver == 3) {
1266 memset(&cmd.new, 0, sizeof(cmd.new));
1267 cmd.new.operation = cpu_to_le32(IWL_SCD_QUEUE_ADD);
1268 cmd.new.u.add.tfdq_dram_addr = cpu_to_le64(txq->dma_addr);
1269 cmd.new.u.add.bc_dram_addr = cpu_to_le64(txq->bc_tbl.dma);
1270 cmd.new.u.add.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
1271 cmd.new.u.add.flags = cpu_to_le32(flags);
1272 cmd.new.u.add.sta_mask = cpu_to_le32(sta_mask);
1273 cmd.new.u.add.tid = tid;
1274
1275 hcmd.id = WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD);
1276 hcmd.len[0] = sizeof(cmd.new);
1277 hcmd.data[0] = &cmd.new;
1278 } else {
1279 ret = -EOPNOTSUPP;
1280 goto error;
1281 }
1282
1283 ret = iwl_trans_send_cmd(trans, &hcmd);
1284 if (ret)
1285 goto error;
1286
1287 return iwl_txq_alloc_response(trans, txq, &hcmd);
1288
1289error:
1290 iwl_txq_gen2_free_memory(trans, txq);
1291 return ret;
1292}
1293
1294void iwl_txq_dyn_free(struct iwl_trans *trans, int queue)
1295{
1296 if (WARN(queue >= IWL_MAX_TVQM_QUEUES,
1297 "queue %d out of range", queue))
1298 return;
1299
1300 /*
1301 * Upon HW Rfkill - we stop the device, and then stop the queues
1302 * in the op_mode. Just for the sake of the simplicity of the op_mode,
1303 * allow the op_mode to call txq_disable after it already called
1304 * stop_device.
1305 */
1306 if (!test_and_clear_bit(queue, trans->txqs.queue_used)) {
1307 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
1308 "queue %d not used", queue);
1309 return;
1310 }
1311
1312 iwl_txq_gen2_free(trans, queue);
1313
1314 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
1315}
1316
1317void iwl_txq_gen2_tx_free(struct iwl_trans *trans)
1318{
1319 int i;
1320
1321 memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
1322
1323 /* Free all TX queues */
1324 for (i = 0; i < ARRAY_SIZE(trans->txqs.txq); i++) {
1325 if (!trans->txqs.txq[i])
1326 continue;
1327
1328 iwl_txq_gen2_free(trans, i);
1329 }
1330}
1331
1332int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size)
1333{
1334 struct iwl_txq *queue;
1335 int ret;
1336
1337 /* alloc and init the tx queue */
1338 if (!trans->txqs.txq[txq_id]) {
1339 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1340 if (!queue) {
1341 IWL_ERR(trans, "Not enough memory for tx queue\n");
1342 return -ENOMEM;
1343 }
1344 trans->txqs.txq[txq_id] = queue;
1345 ret = iwl_txq_alloc(trans, queue, queue_size, true);
1346 if (ret) {
1347 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
1348 goto error;
1349 }
1350 } else {
1351 queue = trans->txqs.txq[txq_id];
1352 }
1353
1354 ret = iwl_txq_init(trans, queue, queue_size,
1355 (txq_id == trans->txqs.cmd.q_id));
1356 if (ret) {
1357 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
1358 goto error;
1359 }
1360 trans->txqs.txq[txq_id]->id = txq_id;
1361 set_bit(txq_id, trans->txqs.queue_used);
1362
1363 return 0;
1364
1365error:
1366 iwl_txq_gen2_tx_free(trans);
1367 return ret;
1368}
1369
1370static inline dma_addr_t iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans *trans,
1371 struct iwl_tfd *tfd, u8 idx)
1372{
1373 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
1374 dma_addr_t addr;
1375 dma_addr_t hi_len;
1376
1377 addr = get_unaligned_le32(&tb->lo);
1378
1379 if (sizeof(dma_addr_t) <= sizeof(u32))
1380 return addr;
1381
1382 hi_len = le16_to_cpu(tb->hi_n_len) & 0xF;
1383
1384 /*
1385 * shift by 16 twice to avoid warnings on 32-bit
1386 * (where this code never runs anyway due to the
1387 * if statement above)
1388 */
1389 return addr | ((hi_len << 16) << 16);
1390}
1391
1392void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
1393 struct iwl_cmd_meta *meta,
1394 struct iwl_txq *txq, int index)
1395{
1396 int i, num_tbs;
1397 struct iwl_tfd *tfd = iwl_txq_get_tfd(trans, txq, index);
1398
1399 /* Sanity check on number of chunks */
1400 num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd);
1401
1402 if (num_tbs > trans->txqs.tfd.max_tbs) {
1403 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
1404 /* @todo issue fatal error, it is quite serious situation */
1405 return;
1406 }
1407
1408 /* first TB is never freed - it's the bidirectional DMA data */
1409
1410 for (i = 1; i < num_tbs; i++) {
1411 if (meta->tbs & BIT(i))
1412 dma_unmap_page(trans->dev,
1413 iwl_txq_gen1_tfd_tb_get_addr(trans,
1414 tfd, i),
1415 iwl_txq_gen1_tfd_tb_get_len(trans,
1416 tfd, i),
1417 DMA_TO_DEVICE);
1418 else
1419 dma_unmap_single(trans->dev,
1420 iwl_txq_gen1_tfd_tb_get_addr(trans,
1421 tfd, i),
1422 iwl_txq_gen1_tfd_tb_get_len(trans,
1423 tfd, i),
1424 DMA_TO_DEVICE);
1425 }
1426
1427 meta->tbs = 0;
1428
1429 iwl_txq_set_tfd_invalid_gen1(trans, tfd);
1430}
1431
1432#define IWL_TX_CRC_SIZE 4
1433#define IWL_TX_DELIMITER_SIZE 4
1434
1435/*
1436 * iwl_txq_gen1_update_byte_cnt_tbl - Set up entry in Tx byte-count array
1437 */
1438void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
1439 struct iwl_txq *txq, u16 byte_cnt,
1440 int num_tbs)
1441{
1442 struct iwlagn_scd_bc_tbl *scd_bc_tbl;
1443 int write_ptr = txq->write_ptr;
1444 int txq_id = txq->id;
1445 u8 sec_ctl = 0;
1446 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
1447 __le16 bc_ent;
1448 struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd;
1449 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
1450 u8 sta_id = tx_cmd->sta_id;
1451
1452 scd_bc_tbl = trans->txqs.scd_bc_tbls.addr;
1453
1454 sec_ctl = tx_cmd->sec_ctl;
1455
1456 switch (sec_ctl & TX_CMD_SEC_MSK) {
1457 case TX_CMD_SEC_CCM:
1458 len += IEEE80211_CCMP_MIC_LEN;
1459 break;
1460 case TX_CMD_SEC_TKIP:
1461 len += IEEE80211_TKIP_ICV_LEN;
1462 break;
1463 case TX_CMD_SEC_WEP:
1464 len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
1465 break;
1466 }
1467 if (trans->txqs.bc_table_dword)
1468 len = DIV_ROUND_UP(len, 4);
1469
1470 if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
1471 return;
1472
1473 bc_ent = cpu_to_le16(len | (sta_id << 12));
1474
1475 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
1476
1477 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
1478 scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] =
1479 bc_ent;
1480}
1481
1482void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
1483 struct iwl_txq *txq)
1484{
1485 struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans->txqs.scd_bc_tbls.addr;
1486 int txq_id = txq->id;
1487 int read_ptr = txq->read_ptr;
1488 u8 sta_id = 0;
1489 __le16 bc_ent;
1490 struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd;
1491 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
1492
1493 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
1494
1495 if (txq_id != trans->txqs.cmd.q_id)
1496 sta_id = tx_cmd->sta_id;
1497
1498 bc_ent = cpu_to_le16(1 | (sta_id << 12));
1499
1500 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
1501
1502 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
1503 scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] =
1504 bc_ent;
1505}
1506
1507/*
1508 * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
1509 * @trans - transport private data
1510 * @txq - tx queue
1511 * @dma_dir - the direction of the DMA mapping
1512 *
1513 * Does NOT advance any TFD circular buffer read/write indexes
1514 * Does NOT free the TFD itself (which is within circular buffer)
1515 */
1516void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
1517{
1518 /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
1519 * idx is bounded by n_window
1520 */
1521 int rd_ptr = txq->read_ptr;
1522 int idx = iwl_txq_get_cmd_index(txq, rd_ptr);
1523 struct sk_buff *skb;
1524
1525 lockdep_assert_held(&txq->lock);
1526
1527 if (!txq->entries)
1528 return;
1529
1530 /* We have only q->n_window txq->entries, but we use
1531 * TFD_QUEUE_SIZE_MAX tfds
1532 */
1533 if (trans->trans_cfg->gen2)
1534 iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
1535 iwl_txq_get_tfd(trans, txq, rd_ptr));
1536 else
1537 iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta,
1538 txq, rd_ptr);
1539
1540 /* free SKB */
1541 skb = txq->entries[idx].skb;
1542
1543 /* Can be called from irqs-disabled context
1544 * If skb is not NULL, it means that the whole queue is being
1545 * freed and that the queue is not empty - free the skb
1546 */
1547 if (skb) {
1548 iwl_op_mode_free_skb(trans->op_mode, skb);
1549 txq->entries[idx].skb = NULL;
1550 }
1551}
1552
1553void iwl_txq_progress(struct iwl_txq *txq)
1554{
1555 lockdep_assert_held(&txq->lock);
1556
1557 if (!txq->wd_timeout)
1558 return;
1559
1560 /*
1561 * station is asleep and we send data - that must
1562 * be uAPSD or PS-Poll. Don't rearm the timer.
1563 */
1564 if (txq->frozen)
1565 return;
1566
1567 /*
1568 * if empty delete timer, otherwise move timer forward
1569 * since we're making progress on this queue
1570 */
1571 if (txq->read_ptr == txq->write_ptr)
1572 del_timer(&txq->stuck_timer);
1573 else
1574 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
1575}
1576
1577/* Frees buffers until index _not_ inclusive */
1578void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
1579 struct sk_buff_head *skbs, bool is_flush)
1580{
1581 struct iwl_txq *txq = trans->txqs.txq[txq_id];
1582 int tfd_num, read_ptr, last_to_free;
1583
1584 /* This function is not meant to release cmd queue*/
1585 if (WARN_ON(txq_id == trans->txqs.cmd.q_id))
1586 return;
1587
1588 if (WARN_ON(!txq))
1589 return;
1590
1591 tfd_num = iwl_txq_get_cmd_index(txq, ssn);
1592
1593 spin_lock_bh(&txq->lock);
1594 read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr);
1595
1596 if (!test_bit(txq_id, trans->txqs.queue_used)) {
1597 IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
1598 txq_id, ssn);
1599 goto out;
1600 }
1601
1602 if (read_ptr == tfd_num)
1603 goto out;
1604
1605 IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d (%d) -> %d (%d)\n",
1606 txq_id, read_ptr, txq->read_ptr, tfd_num, ssn);
1607
1608 /*Since we free until index _not_ inclusive, the one before index is
1609 * the last we will free. This one must be used */
1610 last_to_free = iwl_txq_dec_wrap(trans, tfd_num);
1611
1612 if (!iwl_txq_used(txq, last_to_free)) {
1613 IWL_ERR(trans,
1614 "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
1615 __func__, txq_id, last_to_free,
1616 trans->trans_cfg->base_params->max_tfd_queue_size,
1617 txq->write_ptr, txq->read_ptr);
1618
1619 iwl_op_mode_time_point(trans->op_mode,
1620 IWL_FW_INI_TIME_POINT_FAKE_TX,
1621 NULL);
1622 goto out;
1623 }
1624
1625 if (WARN_ON(!skb_queue_empty(skbs)))
1626 goto out;
1627
1628 for (;
1629 read_ptr != tfd_num;
1630 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr),
1631 read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr)) {
1632 struct sk_buff *skb = txq->entries[read_ptr].skb;
1633
1634 if (WARN_ONCE(!skb, "no SKB at %d (%d) on queue %d\n",
1635 read_ptr, txq->read_ptr, txq_id))
1636 continue;
1637
1638 iwl_txq_free_tso_page(trans, skb);
1639
1640 __skb_queue_tail(skbs, skb);
1641
1642 txq->entries[read_ptr].skb = NULL;
1643
1644 if (!trans->trans_cfg->gen2)
1645 iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq);
1646
1647 iwl_txq_free_tfd(trans, txq);
1648 }
1649
1650 iwl_txq_progress(txq);
1651
1652 if (iwl_txq_space(trans, txq) > txq->low_mark &&
1653 test_bit(txq_id, trans->txqs.queue_stopped)) {
1654 struct sk_buff_head overflow_skbs;
1655 struct sk_buff *skb;
1656
1657 __skb_queue_head_init(&overflow_skbs);
1658 skb_queue_splice_init(&txq->overflow_q,
1659 is_flush ? skbs : &overflow_skbs);
1660
1661 /*
1662 * We are going to transmit from the overflow queue.
1663 * Remember this state so that wait_for_txq_empty will know we
1664 * are adding more packets to the TFD queue. It cannot rely on
1665 * the state of &txq->overflow_q, as we just emptied it, but
1666 * haven't TXed the content yet.
1667 */
1668 txq->overflow_tx = true;
1669
1670 /*
1671 * This is tricky: we are in reclaim path which is non
1672 * re-entrant, so noone will try to take the access the
1673 * txq data from that path. We stopped tx, so we can't
1674 * have tx as well. Bottom line, we can unlock and re-lock
1675 * later.
1676 */
1677 spin_unlock_bh(&txq->lock);
1678
1679 while ((skb = __skb_dequeue(&overflow_skbs))) {
1680 struct iwl_device_tx_cmd *dev_cmd_ptr;
1681
1682 dev_cmd_ptr = *(void **)((u8 *)skb->cb +
1683 trans->txqs.dev_cmd_offs);
1684
1685 /*
1686 * Note that we can very well be overflowing again.
1687 * In that case, iwl_txq_space will be small again
1688 * and we won't wake mac80211's queue.
1689 */
1690 iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id);
1691 }
1692
1693 if (iwl_txq_space(trans, txq) > txq->low_mark)
1694 iwl_wake_queue(trans, txq);
1695
1696 spin_lock_bh(&txq->lock);
1697 txq->overflow_tx = false;
1698 }
1699
1700out:
1701 spin_unlock_bh(&txq->lock);
1702}
1703
1704/* Set wr_ptr of specific device and txq */
1705void iwl_txq_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr)
1706{
1707 struct iwl_txq *txq = trans->txqs.txq[txq_id];
1708
1709 spin_lock_bh(&txq->lock);
1710
1711 txq->write_ptr = ptr;
1712 txq->read_ptr = txq->write_ptr;
1713
1714 spin_unlock_bh(&txq->lock);
1715}
1716
1717void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs,
1718 bool freeze)
1719{
1720 int queue;
1721
1722 for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
1723 struct iwl_txq *txq = trans->txqs.txq[queue];
1724 unsigned long now;
1725
1726 spin_lock_bh(&txq->lock);
1727
1728 now = jiffies;
1729
1730 if (txq->frozen == freeze)
1731 goto next_queue;
1732
1733 IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n",
1734 freeze ? "Freezing" : "Waking", queue);
1735
1736 txq->frozen = freeze;
1737
1738 if (txq->read_ptr == txq->write_ptr)
1739 goto next_queue;
1740
1741 if (freeze) {
1742 if (unlikely(time_after(now,
1743 txq->stuck_timer.expires))) {
1744 /*
1745 * The timer should have fired, maybe it is
1746 * spinning right now on the lock.
1747 */
1748 goto next_queue;
1749 }
1750 /* remember how long until the timer fires */
1751 txq->frozen_expiry_remainder =
1752 txq->stuck_timer.expires - now;
1753 del_timer(&txq->stuck_timer);
1754 goto next_queue;
1755 }
1756
1757 /*
1758 * Wake a non-empty queue -> arm timer with the
1759 * remainder before it froze
1760 */
1761 mod_timer(&txq->stuck_timer,
1762 now + txq->frozen_expiry_remainder);
1763
1764next_queue:
1765 spin_unlock_bh(&txq->lock);
1766 }
1767}
1768
1769#define HOST_COMPLETE_TIMEOUT (2 * HZ)
1770
1771static int iwl_trans_txq_send_hcmd_sync(struct iwl_trans *trans,
1772 struct iwl_host_cmd *cmd)
1773{
1774 const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
1775 struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
1776 int cmd_idx;
1777 int ret;
1778
1779 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str);
1780
1781 if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
1782 &trans->status),
1783 "Command %s: a command is already active!\n", cmd_str))
1784 return -EIO;
1785
1786 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str);
1787
1788 cmd_idx = trans->ops->send_cmd(trans, cmd);
1789 if (cmd_idx < 0) {
1790 ret = cmd_idx;
1791 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1792 IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
1793 cmd_str, ret);
1794 return ret;
1795 }
1796
1797 ret = wait_event_timeout(trans->wait_command_queue,
1798 !test_bit(STATUS_SYNC_HCMD_ACTIVE,
1799 &trans->status),
1800 HOST_COMPLETE_TIMEOUT);
1801 if (!ret) {
1802 IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
1803 cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
1804
1805 IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
1806 txq->read_ptr, txq->write_ptr);
1807
1808 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1809 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
1810 cmd_str);
1811 ret = -ETIMEDOUT;
1812
1813 iwl_trans_sync_nmi(trans);
1814 goto cancel;
1815 }
1816
1817 if (test_bit(STATUS_FW_ERROR, &trans->status)) {
1818 if (!test_and_clear_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE,
1819 &trans->status)) {
1820 IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str);
1821 dump_stack();
1822 }
1823 ret = -EIO;
1824 goto cancel;
1825 }
1826
1827 if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
1828 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
1829 IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
1830 ret = -ERFKILL;
1831 goto cancel;
1832 }
1833
1834 if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
1835 IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str);
1836 ret = -EIO;
1837 goto cancel;
1838 }
1839
1840 return 0;
1841
1842cancel:
1843 if (cmd->flags & CMD_WANT_SKB) {
1844 /*
1845 * Cancel the CMD_WANT_SKB flag for the cmd in the
1846 * TX cmd queue. Otherwise in case the cmd comes
1847 * in later, it will possibly set an invalid
1848 * address (cmd->meta.source).
1849 */
1850 txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
1851 }
1852
1853 if (cmd->resp_pkt) {
1854 iwl_free_resp(cmd);
1855 cmd->resp_pkt = NULL;
1856 }
1857
1858 return ret;
1859}
1860
1861int iwl_trans_txq_send_hcmd(struct iwl_trans *trans,
1862 struct iwl_host_cmd *cmd)
1863{
1864 /* Make sure the NIC is still alive in the bus */
1865 if (test_bit(STATUS_TRANS_DEAD, &trans->status))
1866 return -ENODEV;
1867
1868 if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
1869 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
1870 IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
1871 cmd->id);
1872 return -ERFKILL;
1873 }
1874
1875 if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 &&
1876 !(cmd->flags & CMD_SEND_IN_D3))) {
1877 IWL_DEBUG_WOWLAN(trans, "Dropping CMD 0x%x: D3\n", cmd->id);
1878 return -EHOSTDOWN;
1879 }
1880
1881 if (cmd->flags & CMD_ASYNC) {
1882 int ret;
1883
1884 /* An asynchronous command can not expect an SKB to be set. */
1885 if (WARN_ON(cmd->flags & CMD_WANT_SKB))
1886 return -EINVAL;
1887
1888 ret = trans->ops->send_cmd(trans, cmd);
1889 if (ret < 0) {
1890 IWL_ERR(trans,
1891 "Error sending %s: enqueue_hcmd failed: %d\n",
1892 iwl_get_cmd_string(trans, cmd->id), ret);
1893 return ret;
1894 }
1895 return 0;
1896 }
1897
1898 return iwl_trans_txq_send_hcmd_sync(trans, cmd);
1899}
1900