Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2021-2023 Intel Corporation
4 */
5
6#include <linux/etherdevice.h>
7#include <linux/netdevice.h>
8#include <linux/ieee80211.h>
9#include <linux/rtnetlink.h>
10#include <linux/module.h>
11#include <linux/moduleparam.h>
12#include <linux/mei_cl_bus.h>
13#include <linux/rcupdate.h>
14#include <linux/debugfs.h>
15#include <linux/skbuff.h>
16#include <linux/wait.h>
17#include <linux/slab.h>
18#include <linux/mm.h>
19
20#include <net/cfg80211.h>
21
22#include "internal.h"
23#include "iwl-mei.h"
24#include "trace.h"
25#include "trace-data.h"
26#include "sap.h"
27
28MODULE_DESCRIPTION("The Intel(R) wireless / CSME firmware interface");
29MODULE_LICENSE("GPL");
30
31#define MEI_WLAN_UUID UUID_LE(0x13280904, 0x7792, 0x4fcb, \
32 0xa1, 0xaa, 0x5e, 0x70, 0xcb, 0xb1, 0xe8, 0x65)
33
34/* After CSME takes ownership, it won't release it for 60 seconds to avoid
35 * frequent ownership transitions.
36 */
37#define MEI_OWNERSHIP_RETAKE_TIMEOUT_MS msecs_to_jiffies(60000)
38
39/*
40 * Since iwlwifi calls iwlmei without any context, hold a pointer to the
41 * mei_cl_device structure here.
42 * Define a mutex that will synchronize all the flows between iwlwifi and
43 * iwlmei.
44 * Note that iwlmei can't have several instances, so it ok to have static
45 * variables here.
46 */
47static struct mei_cl_device *iwl_mei_global_cldev;
48static DEFINE_MUTEX(iwl_mei_mutex);
49static unsigned long iwl_mei_status;
50
51enum iwl_mei_status_bits {
52 IWL_MEI_STATUS_SAP_CONNECTED,
53};
54
55bool iwl_mei_is_connected(void)
56{
57 return test_bit(IWL_MEI_STATUS_SAP_CONNECTED, &iwl_mei_status);
58}
59EXPORT_SYMBOL_GPL(iwl_mei_is_connected);
60
61#define SAP_VERSION 3
62#define SAP_CONTROL_BLOCK_ID 0x21504153 /* SAP! in ASCII */
63
64struct iwl_sap_q_ctrl_blk {
65 __le32 wr_ptr;
66 __le32 rd_ptr;
67 __le32 size;
68};
69
70enum iwl_sap_q_idx {
71 SAP_QUEUE_IDX_NOTIF = 0,
72 SAP_QUEUE_IDX_DATA,
73 SAP_QUEUE_IDX_MAX,
74};
75
76struct iwl_sap_dir {
77 __le32 reserved;
78 struct iwl_sap_q_ctrl_blk q_ctrl_blk[SAP_QUEUE_IDX_MAX];
79};
80
81enum iwl_sap_dir_idx {
82 SAP_DIRECTION_HOST_TO_ME = 0,
83 SAP_DIRECTION_ME_TO_HOST,
84 SAP_DIRECTION_MAX,
85};
86
87struct iwl_sap_shared_mem_ctrl_blk {
88 __le32 sap_id;
89 __le32 size;
90 struct iwl_sap_dir dir[SAP_DIRECTION_MAX];
91};
92
93/*
94 * The shared area has the following layout:
95 *
96 * +-----------------------------------+
97 * |struct iwl_sap_shared_mem_ctrl_blk |
98 * +-----------------------------------+
99 * |Host -> ME data queue |
100 * +-----------------------------------+
101 * |Host -> ME notif queue |
102 * +-----------------------------------+
103 * |ME -> Host data queue |
104 * +-----------------------------------+
105 * |ME -> host notif queue |
106 * +-----------------------------------+
107 * |SAP control block id (SAP!) |
108 * +-----------------------------------+
109 */
110
111#define SAP_H2M_DATA_Q_SZ 48256
112#define SAP_M2H_DATA_Q_SZ 24128
113#define SAP_H2M_NOTIF_Q_SZ 2240
114#define SAP_M2H_NOTIF_Q_SZ 62720
115
116#define _IWL_MEI_SAP_SHARED_MEM_SZ \
117 (sizeof(struct iwl_sap_shared_mem_ctrl_blk) + \
118 SAP_H2M_DATA_Q_SZ + SAP_H2M_NOTIF_Q_SZ + \
119 SAP_M2H_DATA_Q_SZ + SAP_M2H_NOTIF_Q_SZ + 4)
120
121#define IWL_MEI_SAP_SHARED_MEM_SZ \
122 (roundup(_IWL_MEI_SAP_SHARED_MEM_SZ, PAGE_SIZE))
123
124struct iwl_mei_shared_mem_ptrs {
125 struct iwl_sap_shared_mem_ctrl_blk *ctrl;
126 void *q_head[SAP_DIRECTION_MAX][SAP_QUEUE_IDX_MAX];
127 size_t q_size[SAP_DIRECTION_MAX][SAP_QUEUE_IDX_MAX];
128};
129
130struct iwl_mei_filters {
131 struct rcu_head rcu_head;
132 struct iwl_sap_oob_filters filters;
133};
134
135/**
136 * struct iwl_mei - holds the private date for iwl_mei
137 *
138 * @get_nvm_wq: the wait queue for the get_nvm flow
139 * @send_csa_msg_wk: used to defer the transmission of the CHECK_SHARED_AREA
140 * message. Used so that we can send CHECK_SHARED_AREA from atomic
141 * contexts.
142 * @get_ownership_wq: the wait queue for the get_ownership_flow
143 * @shared_mem: the memory that is shared between CSME and the host
144 * @cldev: the pointer to the MEI client device
145 * @nvm: the data returned by the CSME for the NVM
146 * @filters: the filters sent by CSME
147 * @got_ownership: true if we own the device
148 * @amt_enabled: true if CSME has wireless enabled
149 * @csa_throttled: when true, we can't send CHECK_SHARED_AREA over the MEI
150 * bus, but rather need to wait until send_csa_msg_wk runs
151 * @csme_taking_ownership: true when CSME is taking ownership. Used to remember
152 * to send CSME_OWNERSHIP_CONFIRMED when the driver completes its down
153 * flow.
154 * @link_prot_state: true when we are in link protection PASSIVE
155 * @device_down: true if the device is down. Used to remember to send
156 * CSME_OWNERSHIP_CONFIRMED when the driver is already down.
157 * @csa_throttle_end_wk: used when &csa_throttled is true
158 * @pldr_wq: the wait queue for PLDR flow
159 * @pldr_active: PLDR flow is in progress
160 * @data_q_lock: protects the access to the data queues which are
161 * accessed without the mutex.
162 * @netdev_work: used to defer registering and unregistering of the netdev to
163 * avoid taking the rtnl lock in the SAP messages handlers.
164 * @ownership_dwork: used to re-ask for NIC ownership after ownership was taken
165 * by CSME or when a previous ownership request failed.
166 * @sap_seq_no: the sequence number for the SAP messages
167 * @seq_no: the sequence number for the SAP messages
168 * @dbgfs_dir: the debugfs dir entry
169 */
170struct iwl_mei {
171 wait_queue_head_t get_nvm_wq;
172 struct work_struct send_csa_msg_wk;
173 wait_queue_head_t get_ownership_wq;
174 struct iwl_mei_shared_mem_ptrs shared_mem;
175 struct mei_cl_device *cldev;
176 struct iwl_mei_nvm *nvm;
177 struct iwl_mei_filters __rcu *filters;
178 bool got_ownership;
179 bool amt_enabled;
180 bool csa_throttled;
181 bool csme_taking_ownership;
182 bool link_prot_state;
183 bool device_down;
184 struct delayed_work csa_throttle_end_wk;
185 wait_queue_head_t pldr_wq;
186 bool pldr_active;
187 spinlock_t data_q_lock;
188 struct work_struct netdev_work;
189 struct delayed_work ownership_dwork;
190
191 atomic_t sap_seq_no;
192 atomic_t seq_no;
193
194 struct dentry *dbgfs_dir;
195};
196
197/**
198 * struct iwl_mei_cache - cache for the parameters from iwlwifi
199 * @ops: Callbacks to iwlwifi.
200 * @netdev: The netdev that will be used to transmit / receive packets.
201 * @conn_info: The connection info message triggered by iwlwifi's association.
202 * @power_limit: pointer to an array of 10 elements (le16) represents the power
203 * restrictions per chain.
204 * @rf_kill: rf kill state.
205 * @mcc: MCC info
206 * @mac_address: interface MAC address.
207 * @nvm_address: NVM MAC address.
208 * @priv: A pointer to iwlwifi.
209 *
210 * This used to cache the configurations coming from iwlwifi's way. The data
211 * is cached here so that we can buffer the configuration even if we don't have
212 * a bind from the mei bus and hence, on iwl_mei structure.
213 */
214struct iwl_mei_cache {
215 const struct iwl_mei_ops *ops;
216 struct net_device __rcu *netdev;
217 const struct iwl_sap_notif_connection_info *conn_info;
218 const __le16 *power_limit;
219 u32 rf_kill;
220 u16 mcc;
221 u8 mac_address[6];
222 u8 nvm_address[6];
223 void *priv;
224};
225
226static struct iwl_mei_cache iwl_mei_cache = {
227 .rf_kill = SAP_HW_RFKILL_DEASSERTED | SAP_SW_RFKILL_DEASSERTED
228};
229
230static void iwl_mei_free_shared_mem(struct mei_cl_device *cldev)
231{
232 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
233
234 if (mei_cldev_dma_unmap(cldev))
235 dev_err(&cldev->dev, "Couldn't unmap the shared mem properly\n");
236 memset(&mei->shared_mem, 0, sizeof(mei->shared_mem));
237}
238
239#define HBM_DMA_BUF_ID_WLAN 1
240
241static int iwl_mei_alloc_shared_mem(struct mei_cl_device *cldev)
242{
243 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
244 struct iwl_mei_shared_mem_ptrs *mem = &mei->shared_mem;
245
246 mem->ctrl = mei_cldev_dma_map(cldev, HBM_DMA_BUF_ID_WLAN,
247 IWL_MEI_SAP_SHARED_MEM_SZ);
248
249 if (IS_ERR(mem->ctrl)) {
250 int ret = PTR_ERR(mem->ctrl);
251
252 mem->ctrl = NULL;
253
254 return ret;
255 }
256
257 memset(mem->ctrl, 0, IWL_MEI_SAP_SHARED_MEM_SZ);
258
259 return 0;
260}
261
262static void iwl_mei_init_shared_mem(struct iwl_mei *mei)
263{
264 struct iwl_mei_shared_mem_ptrs *mem = &mei->shared_mem;
265 struct iwl_sap_dir *h2m;
266 struct iwl_sap_dir *m2h;
267 int dir, queue;
268 u8 *q_head;
269
270 mem->ctrl->sap_id = cpu_to_le32(SAP_CONTROL_BLOCK_ID);
271
272 mem->ctrl->size = cpu_to_le32(sizeof(*mem->ctrl));
273
274 h2m = &mem->ctrl->dir[SAP_DIRECTION_HOST_TO_ME];
275 m2h = &mem->ctrl->dir[SAP_DIRECTION_ME_TO_HOST];
276
277 h2m->q_ctrl_blk[SAP_QUEUE_IDX_DATA].size =
278 cpu_to_le32(SAP_H2M_DATA_Q_SZ);
279 h2m->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF].size =
280 cpu_to_le32(SAP_H2M_NOTIF_Q_SZ);
281 m2h->q_ctrl_blk[SAP_QUEUE_IDX_DATA].size =
282 cpu_to_le32(SAP_M2H_DATA_Q_SZ);
283 m2h->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF].size =
284 cpu_to_le32(SAP_M2H_NOTIF_Q_SZ);
285
286 /* q_head points to the start of the first queue */
287 q_head = (void *)(mem->ctrl + 1);
288
289 /* Initialize the queue heads */
290 for (dir = 0; dir < SAP_DIRECTION_MAX; dir++) {
291 for (queue = 0; queue < SAP_QUEUE_IDX_MAX; queue++) {
292 mem->q_head[dir][queue] = q_head;
293 q_head +=
294 le32_to_cpu(mem->ctrl->dir[dir].q_ctrl_blk[queue].size);
295 mem->q_size[dir][queue] =
296 le32_to_cpu(mem->ctrl->dir[dir].q_ctrl_blk[queue].size);
297 }
298 }
299
300 *(__le32 *)q_head = cpu_to_le32(SAP_CONTROL_BLOCK_ID);
301}
302
303static ssize_t iwl_mei_write_cyclic_buf(struct mei_cl_device *cldev,
304 struct iwl_sap_q_ctrl_blk *notif_q,
305 u8 *q_head,
306 const struct iwl_sap_hdr *hdr,
307 u32 q_sz)
308{
309 u32 rd = le32_to_cpu(READ_ONCE(notif_q->rd_ptr));
310 u32 wr = le32_to_cpu(READ_ONCE(notif_q->wr_ptr));
311 size_t room_in_buf;
312 size_t tx_sz = sizeof(*hdr) + le16_to_cpu(hdr->len);
313
314 if (rd > q_sz || wr > q_sz) {
315 dev_err(&cldev->dev,
316 "Pointers are past the end of the buffer\n");
317 return -EINVAL;
318 }
319
320 room_in_buf = wr >= rd ? q_sz - wr + rd : rd - wr;
321
322 /* we don't have enough room for the data to write */
323 if (room_in_buf < tx_sz) {
324 dev_err(&cldev->dev,
325 "Not enough room in the buffer\n");
326 return -ENOSPC;
327 }
328
329 if (wr + tx_sz <= q_sz) {
330 memcpy(q_head + wr, hdr, tx_sz);
331 } else {
332 memcpy(q_head + wr, hdr, q_sz - wr);
333 memcpy(q_head, (const u8 *)hdr + q_sz - wr, tx_sz - (q_sz - wr));
334 }
335
336 WRITE_ONCE(notif_q->wr_ptr, cpu_to_le32((wr + tx_sz) % q_sz));
337 return 0;
338}
339
340static bool iwl_mei_host_to_me_data_pending(const struct iwl_mei *mei)
341{
342 struct iwl_sap_q_ctrl_blk *notif_q;
343 struct iwl_sap_dir *dir;
344
345 dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_HOST_TO_ME];
346 notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_DATA];
347
348 if (READ_ONCE(notif_q->wr_ptr) != READ_ONCE(notif_q->rd_ptr))
349 return true;
350
351 notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF];
352 return READ_ONCE(notif_q->wr_ptr) != READ_ONCE(notif_q->rd_ptr);
353}
354
355static int iwl_mei_send_check_shared_area(struct mei_cl_device *cldev)
356{
357 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
358 struct iwl_sap_me_msg_start msg = {
359 .hdr.type = cpu_to_le32(SAP_ME_MSG_CHECK_SHARED_AREA),
360 .hdr.seq_num = cpu_to_le32(atomic_inc_return(&mei->seq_no)),
361 };
362 int ret;
363
364 lockdep_assert_held(&iwl_mei_mutex);
365
366 if (mei->csa_throttled)
367 return 0;
368
369 trace_iwlmei_me_msg(&msg.hdr, true);
370 ret = mei_cldev_send(cldev, (void *)&msg, sizeof(msg));
371 if (ret != sizeof(msg)) {
372 dev_err(&cldev->dev,
373 "failed to send the SAP_ME_MSG_CHECK_SHARED_AREA message %d\n",
374 ret);
375 return ret;
376 }
377
378 mei->csa_throttled = true;
379
380 schedule_delayed_work(&mei->csa_throttle_end_wk,
381 msecs_to_jiffies(100));
382
383 return 0;
384}
385
386static void iwl_mei_csa_throttle_end_wk(struct work_struct *wk)
387{
388 struct iwl_mei *mei =
389 container_of(wk, struct iwl_mei, csa_throttle_end_wk.work);
390
391 mutex_lock(&iwl_mei_mutex);
392
393 mei->csa_throttled = false;
394
395 if (iwl_mei_host_to_me_data_pending(mei))
396 iwl_mei_send_check_shared_area(mei->cldev);
397
398 mutex_unlock(&iwl_mei_mutex);
399}
400
401static int iwl_mei_send_sap_msg_payload(struct mei_cl_device *cldev,
402 struct iwl_sap_hdr *hdr)
403{
404 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
405 struct iwl_sap_q_ctrl_blk *notif_q;
406 struct iwl_sap_dir *dir;
407 void *q_head;
408 u32 q_sz;
409 int ret;
410
411 lockdep_assert_held(&iwl_mei_mutex);
412
413 if (!mei->shared_mem.ctrl) {
414 dev_err(&cldev->dev,
415 "No shared memory, can't send any SAP message\n");
416 return -EINVAL;
417 }
418
419 if (!iwl_mei_is_connected()) {
420 dev_err(&cldev->dev,
421 "Can't send a SAP message if we're not connected\n");
422 return -ENODEV;
423 }
424
425 hdr->seq_num = cpu_to_le32(atomic_inc_return(&mei->sap_seq_no));
426 dev_dbg(&cldev->dev, "Sending %d\n", hdr->type);
427
428 dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_HOST_TO_ME];
429 notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF];
430 q_head = mei->shared_mem.q_head[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_NOTIF];
431 q_sz = mei->shared_mem.q_size[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_NOTIF];
432 ret = iwl_mei_write_cyclic_buf(q_head, notif_q, q_head, hdr, q_sz);
433
434 if (ret < 0)
435 return ret;
436
437 trace_iwlmei_sap_cmd(hdr, true);
438
439 return iwl_mei_send_check_shared_area(cldev);
440}
441
442void iwl_mei_add_data_to_ring(struct sk_buff *skb, bool cb_tx)
443{
444 struct iwl_sap_q_ctrl_blk *notif_q;
445 struct iwl_sap_dir *dir;
446 struct iwl_mei *mei;
447 size_t room_in_buf;
448 size_t tx_sz;
449 size_t hdr_sz;
450 u32 q_sz;
451 u32 rd;
452 u32 wr;
453 u8 *q_head;
454
455 if (!iwl_mei_global_cldev)
456 return;
457
458 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
459
460 /*
461 * We access this path for Rx packets (the more common case)
462 * and from Tx path when we send DHCP packets, the latter is
463 * very unlikely.
464 * Take the lock already here to make sure we see that remove()
465 * might have cleared the IWL_MEI_STATUS_SAP_CONNECTED bit.
466 */
467 spin_lock_bh(&mei->data_q_lock);
468
469 if (!iwl_mei_is_connected()) {
470 spin_unlock_bh(&mei->data_q_lock);
471 return;
472 }
473
474 /*
475 * We are in a RCU critical section and the remove from the CSME bus
476 * which would free this memory waits for the readers to complete (this
477 * is done in netdev_rx_handler_unregister).
478 */
479 dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_HOST_TO_ME];
480 notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_DATA];
481 q_head = mei->shared_mem.q_head[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_DATA];
482 q_sz = mei->shared_mem.q_size[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_DATA];
483
484 rd = le32_to_cpu(READ_ONCE(notif_q->rd_ptr));
485 wr = le32_to_cpu(READ_ONCE(notif_q->wr_ptr));
486 hdr_sz = cb_tx ? sizeof(struct iwl_sap_cb_data) :
487 sizeof(struct iwl_sap_hdr);
488 tx_sz = skb->len + hdr_sz;
489
490 if (rd > q_sz || wr > q_sz) {
491 dev_err(&mei->cldev->dev,
492 "can't write the data: pointers are past the end of the buffer\n");
493 goto out;
494 }
495
496 room_in_buf = wr >= rd ? q_sz - wr + rd : rd - wr;
497
498 /* we don't have enough room for the data to write */
499 if (room_in_buf < tx_sz) {
500 dev_err(&mei->cldev->dev,
501 "Not enough room in the buffer for this data\n");
502 goto out;
503 }
504
505 if (skb_headroom(skb) < hdr_sz) {
506 dev_err(&mei->cldev->dev,
507 "Not enough headroom in the skb to write the SAP header\n");
508 goto out;
509 }
510
511 if (cb_tx) {
512 struct iwl_sap_cb_data *cb_hdr = skb_push(skb, sizeof(*cb_hdr));
513
514 memset(cb_hdr, 0, sizeof(*cb_hdr));
515 cb_hdr->hdr.type = cpu_to_le16(SAP_MSG_CB_DATA_PACKET);
516 cb_hdr->hdr.len = cpu_to_le16(skb->len - sizeof(cb_hdr->hdr));
517 cb_hdr->hdr.seq_num = cpu_to_le32(atomic_inc_return(&mei->sap_seq_no));
518 cb_hdr->to_me_filt_status = cpu_to_le32(BIT(CB_TX_DHCP_FILT_IDX));
519 cb_hdr->data_len = cpu_to_le32(skb->len - sizeof(*cb_hdr));
520 trace_iwlmei_sap_data(skb, IWL_SAP_TX_DHCP);
521 } else {
522 struct iwl_sap_hdr *hdr = skb_push(skb, sizeof(*hdr));
523
524 hdr->type = cpu_to_le16(SAP_MSG_DATA_PACKET);
525 hdr->len = cpu_to_le16(skb->len - sizeof(*hdr));
526 hdr->seq_num = cpu_to_le32(atomic_inc_return(&mei->sap_seq_no));
527 trace_iwlmei_sap_data(skb, IWL_SAP_TX_DATA_FROM_AIR);
528 }
529
530 if (wr + tx_sz <= q_sz) {
531 skb_copy_bits(skb, 0, q_head + wr, tx_sz);
532 } else {
533 skb_copy_bits(skb, 0, q_head + wr, q_sz - wr);
534 skb_copy_bits(skb, q_sz - wr, q_head, tx_sz - (q_sz - wr));
535 }
536
537 WRITE_ONCE(notif_q->wr_ptr, cpu_to_le32((wr + tx_sz) % q_sz));
538
539out:
540 spin_unlock_bh(&mei->data_q_lock);
541}
542
543static int
544iwl_mei_send_sap_msg(struct mei_cl_device *cldev, u16 type)
545{
546 struct iwl_sap_hdr msg = {
547 .type = cpu_to_le16(type),
548 };
549
550 return iwl_mei_send_sap_msg_payload(cldev, &msg);
551}
552
553static void iwl_mei_send_csa_msg_wk(struct work_struct *wk)
554{
555 struct iwl_mei *mei =
556 container_of(wk, struct iwl_mei, send_csa_msg_wk);
557
558 if (!iwl_mei_is_connected())
559 return;
560
561 mutex_lock(&iwl_mei_mutex);
562
563 iwl_mei_send_check_shared_area(mei->cldev);
564
565 mutex_unlock(&iwl_mei_mutex);
566}
567
568/* Called in a RCU read critical section from netif_receive_skb */
569static rx_handler_result_t iwl_mei_rx_handler(struct sk_buff **pskb)
570{
571 struct sk_buff *skb = *pskb;
572 struct iwl_mei *mei =
573 rcu_dereference(skb->dev->rx_handler_data);
574 struct iwl_mei_filters *filters = rcu_dereference(mei->filters);
575 bool rx_for_csme = false;
576 rx_handler_result_t res;
577
578 /*
579 * remove() unregisters this handler and synchronize_net, so this
580 * should never happen.
581 */
582 if (!iwl_mei_is_connected()) {
583 dev_err(&mei->cldev->dev,
584 "Got an Rx packet, but we're not connected to SAP?\n");
585 return RX_HANDLER_PASS;
586 }
587
588 if (filters)
589 res = iwl_mei_rx_filter(skb, &filters->filters, &rx_for_csme);
590 else
591 res = RX_HANDLER_PASS;
592
593 /*
594 * The data is already on the ring of the shared area, all we
595 * need to do is to tell the CSME firmware to check what we have
596 * there.
597 */
598 if (rx_for_csme)
599 schedule_work(&mei->send_csa_msg_wk);
600
601 if (res != RX_HANDLER_PASS) {
602 trace_iwlmei_sap_data(skb, IWL_SAP_RX_DATA_DROPPED_FROM_AIR);
603 dev_kfree_skb(skb);
604 }
605
606 return res;
607}
608
609static void iwl_mei_netdev_work(struct work_struct *wk)
610{
611 struct iwl_mei *mei =
612 container_of(wk, struct iwl_mei, netdev_work);
613 struct net_device *netdev;
614
615 /*
616 * First take rtnl and only then the mutex to avoid an ABBA
617 * with iwl_mei_set_netdev()
618 */
619 rtnl_lock();
620 mutex_lock(&iwl_mei_mutex);
621
622 netdev = rcu_dereference_protected(iwl_mei_cache.netdev,
623 lockdep_is_held(&iwl_mei_mutex));
624 if (netdev) {
625 if (mei->amt_enabled)
626 netdev_rx_handler_register(netdev, iwl_mei_rx_handler,
627 mei);
628 else
629 netdev_rx_handler_unregister(netdev);
630 }
631
632 mutex_unlock(&iwl_mei_mutex);
633 rtnl_unlock();
634}
635
636static void
637iwl_mei_handle_rx_start_ok(struct mei_cl_device *cldev,
638 const struct iwl_sap_me_msg_start_ok *rsp,
639 ssize_t len)
640{
641 if (len != sizeof(*rsp)) {
642 dev_err(&cldev->dev,
643 "got invalid SAP_ME_MSG_START_OK from CSME firmware\n");
644 dev_err(&cldev->dev,
645 "size is incorrect: %zd instead of %zu\n",
646 len, sizeof(*rsp));
647 return;
648 }
649
650 if (rsp->supported_version != SAP_VERSION) {
651 dev_err(&cldev->dev,
652 "didn't get the expected version: got %d\n",
653 rsp->supported_version);
654 return;
655 }
656
657 mutex_lock(&iwl_mei_mutex);
658 set_bit(IWL_MEI_STATUS_SAP_CONNECTED, &iwl_mei_status);
659 /*
660 * We'll receive AMT_STATE SAP message in a bit and
661 * that will continue the flow
662 */
663 mutex_unlock(&iwl_mei_mutex);
664}
665
666static void iwl_mei_handle_csme_filters(struct mei_cl_device *cldev,
667 const struct iwl_sap_csme_filters *filters)
668{
669 struct iwl_mei *mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
670 struct iwl_mei_filters *new_filters;
671 struct iwl_mei_filters *old_filters;
672
673 old_filters =
674 rcu_dereference_protected(mei->filters,
675 lockdep_is_held(&iwl_mei_mutex));
676
677 new_filters = kzalloc(sizeof(*new_filters), GFP_KERNEL);
678 if (!new_filters)
679 return;
680
681 /* Copy the OOB filters */
682 new_filters->filters = filters->filters;
683
684 rcu_assign_pointer(mei->filters, new_filters);
685
686 if (old_filters)
687 kfree_rcu(old_filters, rcu_head);
688}
689
690static void
691iwl_mei_handle_conn_status(struct mei_cl_device *cldev,
692 const struct iwl_sap_notif_conn_status *status)
693{
694 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
695 struct iwl_mei_conn_info conn_info = {
696 .lp_state = le32_to_cpu(status->link_prot_state),
697 .ssid_len = le32_to_cpu(status->conn_info.ssid_len),
698 .channel = status->conn_info.channel,
699 .band = status->conn_info.band,
700 .auth_mode = le32_to_cpu(status->conn_info.auth_mode),
701 .pairwise_cipher = le32_to_cpu(status->conn_info.pairwise_cipher),
702 };
703
704 if (!iwl_mei_cache.ops ||
705 conn_info.ssid_len > ARRAY_SIZE(conn_info.ssid))
706 return;
707
708 memcpy(conn_info.ssid, status->conn_info.ssid, conn_info.ssid_len);
709 ether_addr_copy(conn_info.bssid, status->conn_info.bssid);
710
711 iwl_mei_cache.ops->me_conn_status(iwl_mei_cache.priv, &conn_info);
712
713 mei->link_prot_state = status->link_prot_state;
714
715 /*
716 * Update the Rfkill state in case the host does not own the device:
717 * if we are in Link Protection, ask to not touch the device, else,
718 * unblock rfkill.
719 * If the host owns the device, inform the user space whether it can
720 * roam.
721 */
722 if (mei->got_ownership)
723 iwl_mei_cache.ops->roaming_forbidden(iwl_mei_cache.priv,
724 status->link_prot_state);
725 else
726 iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv,
727 status->link_prot_state, false);
728}
729
730static void iwl_mei_set_init_conf(struct iwl_mei *mei)
731{
732 struct iwl_sap_notif_host_link_up link_msg = {
733 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_HOST_LINK_UP),
734 .hdr.len = cpu_to_le16(sizeof(link_msg) - sizeof(link_msg.hdr)),
735 };
736 struct iwl_sap_notif_country_code mcc_msg = {
737 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_COUNTRY_CODE),
738 .hdr.len = cpu_to_le16(sizeof(mcc_msg) - sizeof(mcc_msg.hdr)),
739 .mcc = cpu_to_le16(iwl_mei_cache.mcc),
740 };
741 struct iwl_sap_notif_sar_limits sar_msg = {
742 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_SAR_LIMITS),
743 .hdr.len = cpu_to_le16(sizeof(sar_msg) - sizeof(sar_msg.hdr)),
744 };
745 struct iwl_sap_notif_host_nic_info nic_info_msg = {
746 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_NIC_INFO),
747 .hdr.len = cpu_to_le16(sizeof(nic_info_msg) - sizeof(nic_info_msg.hdr)),
748 };
749 struct iwl_sap_msg_dw rfkill_msg = {
750 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_RADIO_STATE),
751 .hdr.len = cpu_to_le16(sizeof(rfkill_msg) - sizeof(rfkill_msg.hdr)),
752 .val = cpu_to_le32(iwl_mei_cache.rf_kill),
753 };
754
755 /* wifi driver has registered already */
756 if (iwl_mei_cache.ops) {
757 iwl_mei_send_sap_msg(mei->cldev,
758 SAP_MSG_NOTIF_WIFIDR_UP);
759 iwl_mei_cache.ops->sap_connected(iwl_mei_cache.priv);
760 }
761
762 iwl_mei_send_sap_msg(mei->cldev, SAP_MSG_NOTIF_WHO_OWNS_NIC);
763
764 if (iwl_mei_cache.conn_info) {
765 link_msg.conn_info = *iwl_mei_cache.conn_info;
766 iwl_mei_send_sap_msg_payload(mei->cldev, &link_msg.hdr);
767 }
768
769 iwl_mei_send_sap_msg_payload(mei->cldev, &mcc_msg.hdr);
770
771 if (iwl_mei_cache.power_limit) {
772 memcpy(sar_msg.sar_chain_info_table, iwl_mei_cache.power_limit,
773 sizeof(sar_msg.sar_chain_info_table));
774 iwl_mei_send_sap_msg_payload(mei->cldev, &sar_msg.hdr);
775 }
776
777 if (is_valid_ether_addr(iwl_mei_cache.mac_address)) {
778 ether_addr_copy(nic_info_msg.mac_address,
779 iwl_mei_cache.mac_address);
780 ether_addr_copy(nic_info_msg.nvm_address,
781 iwl_mei_cache.nvm_address);
782 iwl_mei_send_sap_msg_payload(mei->cldev, &nic_info_msg.hdr);
783 }
784
785 iwl_mei_send_sap_msg_payload(mei->cldev, &rfkill_msg.hdr);
786}
787
788static void iwl_mei_handle_amt_state(struct mei_cl_device *cldev,
789 const struct iwl_sap_msg_dw *dw)
790{
791 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
792
793 mutex_lock(&iwl_mei_mutex);
794
795 if (mei->amt_enabled == !!le32_to_cpu(dw->val))
796 goto out;
797
798 mei->amt_enabled = dw->val;
799
800 if (mei->amt_enabled)
801 iwl_mei_set_init_conf(mei);
802 else if (iwl_mei_cache.ops)
803 iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false, false);
804
805 schedule_work(&mei->netdev_work);
806
807out:
808 mutex_unlock(&iwl_mei_mutex);
809}
810
811static void iwl_mei_handle_nic_owner(struct mei_cl_device *cldev,
812 const struct iwl_sap_msg_dw *dw)
813{
814 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
815
816 mei->got_ownership = dw->val != cpu_to_le32(SAP_NIC_OWNER_ME);
817}
818
819static void iwl_mei_handle_can_release_ownership(struct mei_cl_device *cldev,
820 const void *payload)
821{
822 /* We can get ownership and driver is registered, go ahead */
823 if (iwl_mei_cache.ops)
824 iwl_mei_send_sap_msg(cldev,
825 SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP);
826}
827
828static void iwl_mei_handle_csme_taking_ownership(struct mei_cl_device *cldev,
829 const void *payload)
830{
831 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
832
833 dev_info(&cldev->dev, "CSME takes ownership\n");
834
835 mei->got_ownership = false;
836
837 if (iwl_mei_cache.ops && !mei->device_down) {
838 /*
839 * Remember to send CSME_OWNERSHIP_CONFIRMED when the wifi
840 * driver is finished taking the device down.
841 */
842 mei->csme_taking_ownership = true;
843
844 iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, true, true);
845 } else {
846 iwl_mei_send_sap_msg(cldev,
847 SAP_MSG_NOTIF_CSME_OWNERSHIP_CONFIRMED);
848 schedule_delayed_work(&mei->ownership_dwork,
849 MEI_OWNERSHIP_RETAKE_TIMEOUT_MS);
850 }
851}
852
853static void iwl_mei_handle_nvm(struct mei_cl_device *cldev,
854 const struct iwl_sap_nvm *sap_nvm)
855{
856 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
857 const struct iwl_mei_nvm *mei_nvm = (const void *)sap_nvm;
858 int i;
859
860 kfree(mei->nvm);
861 mei->nvm = kzalloc(sizeof(*mei_nvm), GFP_KERNEL);
862 if (!mei->nvm)
863 return;
864
865 ether_addr_copy(mei->nvm->hw_addr, sap_nvm->hw_addr);
866 mei->nvm->n_hw_addrs = sap_nvm->n_hw_addrs;
867 mei->nvm->radio_cfg = le32_to_cpu(sap_nvm->radio_cfg);
868 mei->nvm->caps = le32_to_cpu(sap_nvm->caps);
869 mei->nvm->nvm_version = le32_to_cpu(sap_nvm->nvm_version);
870
871 for (i = 0; i < ARRAY_SIZE(mei->nvm->channels); i++)
872 mei->nvm->channels[i] = le32_to_cpu(sap_nvm->channels[i]);
873
874 wake_up_all(&mei->get_nvm_wq);
875}
876
877static void iwl_mei_handle_rx_host_own_req(struct mei_cl_device *cldev,
878 const struct iwl_sap_msg_dw *dw)
879{
880 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
881
882 /*
883 * This means that we can't use the wifi device right now, CSME is not
884 * ready to let us use it.
885 */
886 if (!dw->val) {
887 dev_info(&cldev->dev, "Ownership req denied\n");
888 return;
889 }
890
891 mei->got_ownership = true;
892 wake_up_all(&mei->get_ownership_wq);
893
894 iwl_mei_send_sap_msg(cldev,
895 SAP_MSG_NOTIF_HOST_OWNERSHIP_CONFIRMED);
896
897 /* We can now start the connection, unblock rfkill */
898 if (iwl_mei_cache.ops)
899 iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false, false);
900}
901
902static void iwl_mei_handle_pldr_ack(struct mei_cl_device *cldev,
903 const struct iwl_sap_pldr_ack_data *ack)
904{
905 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
906
907 mei->pldr_active = le32_to_cpu(ack->status) == SAP_PLDR_STATUS_SUCCESS;
908 wake_up_all(&mei->pldr_wq);
909}
910
911static void iwl_mei_handle_ping(struct mei_cl_device *cldev,
912 const struct iwl_sap_hdr *hdr)
913{
914 iwl_mei_send_sap_msg(cldev, SAP_MSG_NOTIF_PONG);
915}
916
917static void iwl_mei_handle_sap_msg(struct mei_cl_device *cldev,
918 const struct iwl_sap_hdr *hdr)
919{
920 u16 len = le16_to_cpu(hdr->len) + sizeof(*hdr);
921 u16 type = le16_to_cpu(hdr->type);
922
923 dev_dbg(&cldev->dev,
924 "Got a new SAP message: type %d, len %d, seq %d\n",
925 le16_to_cpu(hdr->type), len,
926 le32_to_cpu(hdr->seq_num));
927
928#define SAP_MSG_HANDLER(_cmd, _handler, _sz) \
929 case SAP_MSG_NOTIF_ ## _cmd: \
930 if (len < _sz) { \
931 dev_err(&cldev->dev, \
932 "Bad size for %d: %u < %u\n", \
933 le16_to_cpu(hdr->type), \
934 (unsigned int)len, \
935 (unsigned int)_sz); \
936 break; \
937 } \
938 mutex_lock(&iwl_mei_mutex); \
939 _handler(cldev, (const void *)hdr); \
940 mutex_unlock(&iwl_mei_mutex); \
941 break
942
943#define SAP_MSG_HANDLER_NO_LOCK(_cmd, _handler, _sz) \
944 case SAP_MSG_NOTIF_ ## _cmd: \
945 if (len < _sz) { \
946 dev_err(&cldev->dev, \
947 "Bad size for %d: %u < %u\n", \
948 le16_to_cpu(hdr->type), \
949 (unsigned int)len, \
950 (unsigned int)_sz); \
951 break; \
952 } \
953 _handler(cldev, (const void *)hdr); \
954 break
955
956#define SAP_MSG_HANDLER_NO_HANDLER(_cmd, _sz) \
957 case SAP_MSG_NOTIF_ ## _cmd: \
958 if (len < _sz) { \
959 dev_err(&cldev->dev, \
960 "Bad size for %d: %u < %u\n", \
961 le16_to_cpu(hdr->type), \
962 (unsigned int)len, \
963 (unsigned int)_sz); \
964 break; \
965 } \
966 break
967
968 switch (type) {
969 SAP_MSG_HANDLER(PING, iwl_mei_handle_ping, 0);
970 SAP_MSG_HANDLER(CSME_FILTERS,
971 iwl_mei_handle_csme_filters,
972 sizeof(struct iwl_sap_csme_filters));
973 SAP_MSG_HANDLER(CSME_CONN_STATUS,
974 iwl_mei_handle_conn_status,
975 sizeof(struct iwl_sap_notif_conn_status));
976 SAP_MSG_HANDLER_NO_LOCK(AMT_STATE,
977 iwl_mei_handle_amt_state,
978 sizeof(struct iwl_sap_msg_dw));
979 SAP_MSG_HANDLER_NO_HANDLER(PONG, 0);
980 SAP_MSG_HANDLER(NVM, iwl_mei_handle_nvm,
981 sizeof(struct iwl_sap_nvm));
982 SAP_MSG_HANDLER(CSME_REPLY_TO_HOST_OWNERSHIP_REQ,
983 iwl_mei_handle_rx_host_own_req,
984 sizeof(struct iwl_sap_msg_dw));
985 SAP_MSG_HANDLER(NIC_OWNER, iwl_mei_handle_nic_owner,
986 sizeof(struct iwl_sap_msg_dw));
987 SAP_MSG_HANDLER(CSME_CAN_RELEASE_OWNERSHIP,
988 iwl_mei_handle_can_release_ownership, 0);
989 SAP_MSG_HANDLER(CSME_TAKING_OWNERSHIP,
990 iwl_mei_handle_csme_taking_ownership, 0);
991 SAP_MSG_HANDLER(PLDR_ACK, iwl_mei_handle_pldr_ack,
992 sizeof(struct iwl_sap_pldr_ack_data));
993 default:
994 /*
995 * This is not really an error, there are message that we decided
996 * to ignore, yet, it is useful to be able to leave a note if debug
997 * is enabled.
998 */
999 dev_dbg(&cldev->dev, "Unsupported message: type %d, len %d\n",
1000 le16_to_cpu(hdr->type), len);
1001 }
1002
1003#undef SAP_MSG_HANDLER
1004#undef SAP_MSG_HANDLER_NO_LOCK
1005}
1006
1007static void iwl_mei_read_from_q(const u8 *q_head, u32 q_sz,
1008 u32 *_rd, u32 wr,
1009 void *_buf, u32 len)
1010{
1011 u8 *buf = _buf;
1012 u32 rd = *_rd;
1013
1014 if (rd + len <= q_sz) {
1015 memcpy(buf, q_head + rd, len);
1016 rd += len;
1017 } else {
1018 memcpy(buf, q_head + rd, q_sz - rd);
1019 memcpy(buf + q_sz - rd, q_head, len - (q_sz - rd));
1020 rd = len - (q_sz - rd);
1021 }
1022
1023 *_rd = rd;
1024}
1025
1026#define QOS_HDR_IV_SNAP_LEN (sizeof(struct ieee80211_qos_hdr) + \
1027 IEEE80211_TKIP_IV_LEN + \
1028 sizeof(rfc1042_header) + ETH_TLEN)
1029
1030static void iwl_mei_handle_sap_data(struct mei_cl_device *cldev,
1031 const u8 *q_head, u32 q_sz,
1032 u32 rd, u32 wr, ssize_t valid_rx_sz,
1033 struct sk_buff_head *tx_skbs)
1034{
1035 struct iwl_sap_hdr hdr;
1036 struct net_device *netdev =
1037 rcu_dereference_protected(iwl_mei_cache.netdev,
1038 lockdep_is_held(&iwl_mei_mutex));
1039
1040 if (!netdev)
1041 return;
1042
1043 while (valid_rx_sz >= sizeof(hdr)) {
1044 struct ethhdr *ethhdr;
1045 unsigned char *data;
1046 struct sk_buff *skb;
1047 u16 len;
1048
1049 iwl_mei_read_from_q(q_head, q_sz, &rd, wr, &hdr, sizeof(hdr));
1050 valid_rx_sz -= sizeof(hdr);
1051 len = le16_to_cpu(hdr.len);
1052
1053 if (valid_rx_sz < len) {
1054 dev_err(&cldev->dev,
1055 "Data queue is corrupted: valid data len %zd, len %d\n",
1056 valid_rx_sz, len);
1057 break;
1058 }
1059
1060 if (len < sizeof(*ethhdr)) {
1061 dev_err(&cldev->dev,
1062 "Data len is smaller than an ethernet header? len = %d\n",
1063 len);
1064 }
1065
1066 valid_rx_sz -= len;
1067
1068 if (le16_to_cpu(hdr.type) != SAP_MSG_DATA_PACKET) {
1069 dev_err(&cldev->dev, "Unsupported Rx data: type %d, len %d\n",
1070 le16_to_cpu(hdr.type), len);
1071 continue;
1072 }
1073
1074 /* We need enough room for the WiFi header + SNAP + IV */
1075 skb = netdev_alloc_skb(netdev, len + QOS_HDR_IV_SNAP_LEN);
1076 if (!skb)
1077 continue;
1078
1079 skb_reserve(skb, QOS_HDR_IV_SNAP_LEN);
1080 ethhdr = skb_push(skb, sizeof(*ethhdr));
1081
1082 iwl_mei_read_from_q(q_head, q_sz, &rd, wr,
1083 ethhdr, sizeof(*ethhdr));
1084 len -= sizeof(*ethhdr);
1085
1086 skb_reset_mac_header(skb);
1087 skb_reset_network_header(skb);
1088 skb->protocol = ethhdr->h_proto;
1089
1090 data = skb_put(skb, len);
1091 iwl_mei_read_from_q(q_head, q_sz, &rd, wr, data, len);
1092
1093 /*
1094 * Enqueue the skb here so that it can be sent later when we
1095 * do not hold the mutex. TX'ing a packet with a mutex held is
1096 * possible, but it wouldn't be nice to forbid the TX path to
1097 * call any of iwlmei's functions, since every API from iwlmei
1098 * needs the mutex.
1099 */
1100 __skb_queue_tail(tx_skbs, skb);
1101 }
1102}
1103
1104static void iwl_mei_handle_sap_rx_cmd(struct mei_cl_device *cldev,
1105 const u8 *q_head, u32 q_sz,
1106 u32 rd, u32 wr, ssize_t valid_rx_sz)
1107{
1108 struct page *p = alloc_page(GFP_KERNEL);
1109 struct iwl_sap_hdr *hdr;
1110
1111 if (!p)
1112 return;
1113
1114 hdr = page_address(p);
1115
1116 while (valid_rx_sz >= sizeof(*hdr)) {
1117 u16 len;
1118
1119 iwl_mei_read_from_q(q_head, q_sz, &rd, wr, hdr, sizeof(*hdr));
1120 valid_rx_sz -= sizeof(*hdr);
1121 len = le16_to_cpu(hdr->len);
1122
1123 if (valid_rx_sz < len)
1124 break;
1125
1126 iwl_mei_read_from_q(q_head, q_sz, &rd, wr, hdr + 1, len);
1127
1128 trace_iwlmei_sap_cmd(hdr, false);
1129 iwl_mei_handle_sap_msg(cldev, hdr);
1130 valid_rx_sz -= len;
1131 }
1132
1133 /* valid_rx_sz must be 0 now... */
1134 if (valid_rx_sz)
1135 dev_err(&cldev->dev,
1136 "More data in the buffer although we read it all\n");
1137
1138 __free_page(p);
1139}
1140
1141static void iwl_mei_handle_sap_rx(struct mei_cl_device *cldev,
1142 struct iwl_sap_q_ctrl_blk *notif_q,
1143 const u8 *q_head,
1144 struct sk_buff_head *skbs,
1145 u32 q_sz)
1146{
1147 u32 rd = le32_to_cpu(READ_ONCE(notif_q->rd_ptr));
1148 u32 wr = le32_to_cpu(READ_ONCE(notif_q->wr_ptr));
1149 ssize_t valid_rx_sz;
1150
1151 if (rd > q_sz || wr > q_sz) {
1152 dev_err(&cldev->dev,
1153 "Pointers are past the buffer limit\n");
1154 return;
1155 }
1156
1157 if (rd == wr)
1158 return;
1159
1160 valid_rx_sz = wr > rd ? wr - rd : q_sz - rd + wr;
1161
1162 if (skbs)
1163 iwl_mei_handle_sap_data(cldev, q_head, q_sz, rd, wr,
1164 valid_rx_sz, skbs);
1165 else
1166 iwl_mei_handle_sap_rx_cmd(cldev, q_head, q_sz, rd, wr,
1167 valid_rx_sz);
1168
1169 /* Increment the read pointer to point to the write pointer */
1170 WRITE_ONCE(notif_q->rd_ptr, cpu_to_le32(wr));
1171}
1172
1173static void iwl_mei_handle_check_shared_area(struct mei_cl_device *cldev)
1174{
1175 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
1176 struct iwl_sap_q_ctrl_blk *notif_q;
1177 struct sk_buff_head tx_skbs;
1178 struct iwl_sap_dir *dir;
1179 void *q_head;
1180 u32 q_sz;
1181
1182 if (!mei->shared_mem.ctrl)
1183 return;
1184
1185 dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_ME_TO_HOST];
1186 notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF];
1187 q_head = mei->shared_mem.q_head[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_NOTIF];
1188 q_sz = mei->shared_mem.q_size[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_NOTIF];
1189
1190 /*
1191 * Do not hold the mutex here, but rather each and every message
1192 * handler takes it.
1193 * This allows message handlers to take it at a certain time.
1194 */
1195 iwl_mei_handle_sap_rx(cldev, notif_q, q_head, NULL, q_sz);
1196
1197 mutex_lock(&iwl_mei_mutex);
1198 dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_ME_TO_HOST];
1199 notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_DATA];
1200 q_head = mei->shared_mem.q_head[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_DATA];
1201 q_sz = mei->shared_mem.q_size[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_DATA];
1202
1203 __skb_queue_head_init(&tx_skbs);
1204
1205 iwl_mei_handle_sap_rx(cldev, notif_q, q_head, &tx_skbs, q_sz);
1206
1207 if (skb_queue_empty(&tx_skbs)) {
1208 mutex_unlock(&iwl_mei_mutex);
1209 return;
1210 }
1211
1212 /*
1213 * Take the RCU read lock before we unlock the mutex to make sure that
1214 * even if the netdev is replaced by another non-NULL netdev right after
1215 * we unlock the mutex, the old netdev will still be valid when we
1216 * transmit the frames. We can't allow to replace the netdev here because
1217 * the skbs hold a pointer to the netdev.
1218 */
1219 rcu_read_lock();
1220
1221 mutex_unlock(&iwl_mei_mutex);
1222
1223 if (!rcu_access_pointer(iwl_mei_cache.netdev)) {
1224 dev_err(&cldev->dev, "Can't Tx without a netdev\n");
1225 skb_queue_purge(&tx_skbs);
1226 goto out;
1227 }
1228
1229 while (!skb_queue_empty(&tx_skbs)) {
1230 struct sk_buff *skb = __skb_dequeue(&tx_skbs);
1231
1232 trace_iwlmei_sap_data(skb, IWL_SAP_RX_DATA_TO_AIR);
1233 dev_queue_xmit(skb);
1234 }
1235
1236out:
1237 rcu_read_unlock();
1238}
1239
1240static void iwl_mei_rx(struct mei_cl_device *cldev)
1241{
1242 struct iwl_sap_me_msg_hdr *hdr;
1243 u8 msg[100];
1244 ssize_t ret;
1245
1246 ret = mei_cldev_recv(cldev, (u8 *)&msg, sizeof(msg));
1247 if (ret < 0) {
1248 dev_err(&cldev->dev, "failed to receive data: %zd\n", ret);
1249 return;
1250 }
1251
1252 if (ret == 0) {
1253 dev_err(&cldev->dev, "got an empty response\n");
1254 return;
1255 }
1256
1257 hdr = (void *)msg;
1258 trace_iwlmei_me_msg(hdr, false);
1259
1260 switch (le32_to_cpu(hdr->type)) {
1261 case SAP_ME_MSG_START_OK:
1262 BUILD_BUG_ON(sizeof(struct iwl_sap_me_msg_start_ok) >
1263 sizeof(msg));
1264
1265 iwl_mei_handle_rx_start_ok(cldev, (void *)msg, ret);
1266 break;
1267 case SAP_ME_MSG_CHECK_SHARED_AREA:
1268 iwl_mei_handle_check_shared_area(cldev);
1269 break;
1270 default:
1271 dev_err(&cldev->dev, "got a RX notification: %d\n",
1272 le32_to_cpu(hdr->type));
1273 break;
1274 }
1275}
1276
1277static int iwl_mei_send_start(struct mei_cl_device *cldev)
1278{
1279 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
1280 struct iwl_sap_me_msg_start msg = {
1281 .hdr.type = cpu_to_le32(SAP_ME_MSG_START),
1282 .hdr.seq_num = cpu_to_le32(atomic_inc_return(&mei->seq_no)),
1283 .hdr.len = cpu_to_le32(sizeof(msg)),
1284 .supported_versions[0] = SAP_VERSION,
1285 .init_data_seq_num = cpu_to_le16(0x100),
1286 .init_notif_seq_num = cpu_to_le16(0x800),
1287 };
1288 int ret;
1289
1290 trace_iwlmei_me_msg(&msg.hdr, true);
1291 ret = mei_cldev_send(cldev, (void *)&msg, sizeof(msg));
1292 if (ret != sizeof(msg)) {
1293 dev_err(&cldev->dev,
1294 "failed to send the SAP_ME_MSG_START message %d\n",
1295 ret);
1296 return ret;
1297 }
1298
1299 return 0;
1300}
1301
1302static int iwl_mei_enable(struct mei_cl_device *cldev)
1303{
1304 int ret;
1305
1306 ret = mei_cldev_enable(cldev);
1307 if (ret < 0) {
1308 dev_err(&cldev->dev, "failed to enable the device: %d\n", ret);
1309 return ret;
1310 }
1311
1312 ret = mei_cldev_register_rx_cb(cldev, iwl_mei_rx);
1313 if (ret) {
1314 dev_err(&cldev->dev,
1315 "failed to register to the rx cb: %d\n", ret);
1316 mei_cldev_disable(cldev);
1317 return ret;
1318 }
1319
1320 return 0;
1321}
1322
1323struct iwl_mei_nvm *iwl_mei_get_nvm(void)
1324{
1325 struct iwl_mei_nvm *nvm = NULL;
1326 struct iwl_mei *mei;
1327 int ret;
1328
1329 mutex_lock(&iwl_mei_mutex);
1330
1331 if (!iwl_mei_is_connected())
1332 goto out;
1333
1334 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1335
1336 if (!mei)
1337 goto out;
1338
1339 ret = iwl_mei_send_sap_msg(iwl_mei_global_cldev,
1340 SAP_MSG_NOTIF_GET_NVM);
1341 if (ret)
1342 goto out;
1343
1344 mutex_unlock(&iwl_mei_mutex);
1345
1346 ret = wait_event_timeout(mei->get_nvm_wq, mei->nvm, 2 * HZ);
1347 if (!ret)
1348 return NULL;
1349
1350 mutex_lock(&iwl_mei_mutex);
1351
1352 if (!iwl_mei_is_connected())
1353 goto out;
1354
1355 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1356
1357 if (!mei)
1358 goto out;
1359
1360 if (mei->nvm)
1361 nvm = kmemdup(mei->nvm, sizeof(*mei->nvm), GFP_KERNEL);
1362
1363out:
1364 mutex_unlock(&iwl_mei_mutex);
1365 return nvm;
1366}
1367EXPORT_SYMBOL_GPL(iwl_mei_get_nvm);
1368
1369#define IWL_MEI_PLDR_NUM_RETRIES 3
1370
1371int iwl_mei_pldr_req(void)
1372{
1373 struct iwl_mei *mei;
1374 int ret;
1375 struct iwl_sap_pldr_data msg = {
1376 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_PLDR),
1377 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1378 };
1379 int i;
1380
1381 mutex_lock(&iwl_mei_mutex);
1382
1383 /* In case we didn't have a bind */
1384 if (!iwl_mei_is_connected()) {
1385 ret = 0;
1386 goto out;
1387 }
1388
1389 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1390
1391 if (!mei) {
1392 ret = -ENODEV;
1393 goto out;
1394 }
1395
1396 if (!mei->amt_enabled) {
1397 ret = 0;
1398 goto out;
1399 }
1400
1401 for (i = 0; i < IWL_MEI_PLDR_NUM_RETRIES; i++) {
1402 ret = iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1403 mutex_unlock(&iwl_mei_mutex);
1404 if (ret)
1405 return ret;
1406
1407 ret = wait_event_timeout(mei->pldr_wq, mei->pldr_active, HZ / 2);
1408 if (ret)
1409 break;
1410
1411 /* Take the mutex for the next iteration */
1412 mutex_lock(&iwl_mei_mutex);
1413 }
1414
1415 if (ret)
1416 return 0;
1417
1418 ret = -ETIMEDOUT;
1419out:
1420 mutex_unlock(&iwl_mei_mutex);
1421 return ret;
1422}
1423EXPORT_SYMBOL_GPL(iwl_mei_pldr_req);
1424
1425int iwl_mei_get_ownership(void)
1426{
1427 struct iwl_mei *mei;
1428 int ret;
1429
1430 mutex_lock(&iwl_mei_mutex);
1431
1432 /* In case we didn't have a bind */
1433 if (!iwl_mei_is_connected()) {
1434 ret = 0;
1435 goto out;
1436 }
1437
1438 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1439
1440 if (!mei) {
1441 ret = -ENODEV;
1442 goto out;
1443 }
1444
1445 if (!mei->amt_enabled) {
1446 ret = 0;
1447 goto out;
1448 }
1449
1450 if (mei->got_ownership) {
1451 ret = 0;
1452 goto out;
1453 }
1454
1455 ret = iwl_mei_send_sap_msg(mei->cldev,
1456 SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP);
1457 if (ret)
1458 goto out;
1459
1460 mutex_unlock(&iwl_mei_mutex);
1461
1462 ret = wait_event_timeout(mei->get_ownership_wq,
1463 mei->got_ownership, HZ / 2);
1464 if (!ret) {
1465 schedule_delayed_work(&mei->ownership_dwork,
1466 MEI_OWNERSHIP_RETAKE_TIMEOUT_MS);
1467 return -ETIMEDOUT;
1468 }
1469
1470 return 0;
1471out:
1472 mutex_unlock(&iwl_mei_mutex);
1473 return ret;
1474}
1475EXPORT_SYMBOL_GPL(iwl_mei_get_ownership);
1476
1477void iwl_mei_alive_notif(bool success)
1478{
1479 struct iwl_mei *mei;
1480 struct iwl_sap_pldr_end_data msg = {
1481 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_PLDR_END),
1482 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1483 .status = success ? cpu_to_le32(SAP_PLDR_STATUS_SUCCESS) :
1484 cpu_to_le32(SAP_PLDR_STATUS_FAILURE),
1485 };
1486
1487 mutex_lock(&iwl_mei_mutex);
1488
1489 if (!iwl_mei_is_connected())
1490 goto out;
1491
1492 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1493 if (!mei || !mei->pldr_active)
1494 goto out;
1495
1496 mei->pldr_active = false;
1497
1498 iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1499out:
1500 mutex_unlock(&iwl_mei_mutex);
1501}
1502EXPORT_SYMBOL_GPL(iwl_mei_alive_notif);
1503
1504void iwl_mei_host_associated(const struct iwl_mei_conn_info *conn_info,
1505 const struct iwl_mei_colloc_info *colloc_info)
1506{
1507 struct iwl_sap_notif_host_link_up msg = {
1508 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_HOST_LINK_UP),
1509 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1510 .conn_info = {
1511 .ssid_len = cpu_to_le32(conn_info->ssid_len),
1512 .channel = conn_info->channel,
1513 .band = conn_info->band,
1514 .pairwise_cipher = cpu_to_le32(conn_info->pairwise_cipher),
1515 .auth_mode = cpu_to_le32(conn_info->auth_mode),
1516 },
1517 };
1518 struct iwl_mei *mei;
1519
1520 if (conn_info->ssid_len > ARRAY_SIZE(msg.conn_info.ssid))
1521 return;
1522
1523 memcpy(msg.conn_info.ssid, conn_info->ssid, conn_info->ssid_len);
1524 memcpy(msg.conn_info.bssid, conn_info->bssid, ETH_ALEN);
1525
1526 if (colloc_info) {
1527 msg.colloc_channel = colloc_info->channel;
1528 msg.colloc_band = colloc_info->channel <= 14 ? 0 : 1;
1529 memcpy(msg.colloc_bssid, colloc_info->bssid, ETH_ALEN);
1530 }
1531
1532 mutex_lock(&iwl_mei_mutex);
1533
1534 if (!iwl_mei_is_connected())
1535 goto out;
1536
1537 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1538
1539 if (!mei || !mei->amt_enabled)
1540 goto out;
1541
1542 iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1543
1544out:
1545 kfree(iwl_mei_cache.conn_info);
1546 iwl_mei_cache.conn_info =
1547 kmemdup(&msg.conn_info, sizeof(msg.conn_info), GFP_KERNEL);
1548 mutex_unlock(&iwl_mei_mutex);
1549}
1550EXPORT_SYMBOL_GPL(iwl_mei_host_associated);
1551
1552void iwl_mei_host_disassociated(void)
1553{
1554 struct iwl_mei *mei;
1555 struct iwl_sap_notif_host_link_down msg = {
1556 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_HOST_LINK_DOWN),
1557 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1558 .type = HOST_LINK_DOWN_TYPE_TEMPORARY,
1559 };
1560
1561 mutex_lock(&iwl_mei_mutex);
1562
1563 if (!iwl_mei_is_connected())
1564 goto out;
1565
1566 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1567
1568 if (!mei || !mei->amt_enabled)
1569 goto out;
1570
1571 iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1572
1573out:
1574 kfree(iwl_mei_cache.conn_info);
1575 iwl_mei_cache.conn_info = NULL;
1576 mutex_unlock(&iwl_mei_mutex);
1577}
1578EXPORT_SYMBOL_GPL(iwl_mei_host_disassociated);
1579
1580void iwl_mei_set_rfkill_state(bool hw_rfkill, bool sw_rfkill)
1581{
1582 struct iwl_mei *mei;
1583 u32 rfkill_state = 0;
1584 struct iwl_sap_msg_dw msg = {
1585 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_RADIO_STATE),
1586 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1587 };
1588
1589 if (!sw_rfkill)
1590 rfkill_state |= SAP_SW_RFKILL_DEASSERTED;
1591
1592 if (!hw_rfkill)
1593 rfkill_state |= SAP_HW_RFKILL_DEASSERTED;
1594
1595 mutex_lock(&iwl_mei_mutex);
1596
1597 if (!iwl_mei_is_connected())
1598 goto out;
1599
1600 msg.val = cpu_to_le32(rfkill_state);
1601
1602 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1603
1604 if (!mei || !mei->amt_enabled)
1605 goto out;
1606
1607 iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1608
1609out:
1610 iwl_mei_cache.rf_kill = rfkill_state;
1611 mutex_unlock(&iwl_mei_mutex);
1612}
1613EXPORT_SYMBOL_GPL(iwl_mei_set_rfkill_state);
1614
1615void iwl_mei_set_nic_info(const u8 *mac_address, const u8 *nvm_address)
1616{
1617 struct iwl_mei *mei;
1618 struct iwl_sap_notif_host_nic_info msg = {
1619 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_NIC_INFO),
1620 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1621 };
1622
1623 mutex_lock(&iwl_mei_mutex);
1624
1625 if (!iwl_mei_is_connected())
1626 goto out;
1627
1628 ether_addr_copy(msg.mac_address, mac_address);
1629 ether_addr_copy(msg.nvm_address, nvm_address);
1630
1631 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1632
1633 if (!mei || !mei->amt_enabled)
1634 goto out;
1635
1636 iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1637
1638out:
1639 ether_addr_copy(iwl_mei_cache.mac_address, mac_address);
1640 ether_addr_copy(iwl_mei_cache.nvm_address, nvm_address);
1641 mutex_unlock(&iwl_mei_mutex);
1642}
1643EXPORT_SYMBOL_GPL(iwl_mei_set_nic_info);
1644
1645void iwl_mei_set_country_code(u16 mcc)
1646{
1647 struct iwl_mei *mei;
1648 struct iwl_sap_notif_country_code msg = {
1649 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_COUNTRY_CODE),
1650 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1651 .mcc = cpu_to_le16(mcc),
1652 };
1653
1654 mutex_lock(&iwl_mei_mutex);
1655
1656 if (!iwl_mei_is_connected())
1657 goto out;
1658
1659 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1660
1661 if (!mei || !mei->amt_enabled)
1662 goto out;
1663
1664 iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1665
1666out:
1667 iwl_mei_cache.mcc = mcc;
1668 mutex_unlock(&iwl_mei_mutex);
1669}
1670EXPORT_SYMBOL_GPL(iwl_mei_set_country_code);
1671
1672void iwl_mei_set_power_limit(const __le16 *power_limit)
1673{
1674 struct iwl_mei *mei;
1675 struct iwl_sap_notif_sar_limits msg = {
1676 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_SAR_LIMITS),
1677 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1678 };
1679
1680 mutex_lock(&iwl_mei_mutex);
1681
1682 if (!iwl_mei_is_connected())
1683 goto out;
1684
1685 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1686
1687 if (!mei || !mei->amt_enabled)
1688 goto out;
1689
1690 memcpy(msg.sar_chain_info_table, power_limit, sizeof(msg.sar_chain_info_table));
1691
1692 iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1693
1694out:
1695 kfree(iwl_mei_cache.power_limit);
1696 iwl_mei_cache.power_limit = kmemdup(power_limit,
1697 sizeof(msg.sar_chain_info_table), GFP_KERNEL);
1698 mutex_unlock(&iwl_mei_mutex);
1699}
1700EXPORT_SYMBOL_GPL(iwl_mei_set_power_limit);
1701
1702void iwl_mei_set_netdev(struct net_device *netdev)
1703{
1704 struct iwl_mei *mei;
1705
1706 mutex_lock(&iwl_mei_mutex);
1707
1708 if (!iwl_mei_is_connected()) {
1709 rcu_assign_pointer(iwl_mei_cache.netdev, netdev);
1710 goto out;
1711 }
1712
1713 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1714
1715 if (!mei)
1716 goto out;
1717
1718 if (!netdev) {
1719 struct net_device *dev =
1720 rcu_dereference_protected(iwl_mei_cache.netdev,
1721 lockdep_is_held(&iwl_mei_mutex));
1722
1723 if (!dev)
1724 goto out;
1725
1726 netdev_rx_handler_unregister(dev);
1727 }
1728
1729 rcu_assign_pointer(iwl_mei_cache.netdev, netdev);
1730
1731 if (netdev && mei->amt_enabled)
1732 netdev_rx_handler_register(netdev, iwl_mei_rx_handler, mei);
1733
1734out:
1735 mutex_unlock(&iwl_mei_mutex);
1736}
1737EXPORT_SYMBOL_GPL(iwl_mei_set_netdev);
1738
1739void iwl_mei_device_state(bool up)
1740{
1741 struct iwl_mei *mei;
1742
1743 mutex_lock(&iwl_mei_mutex);
1744
1745 if (!iwl_mei_is_connected())
1746 goto out;
1747
1748 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1749
1750 if (!mei)
1751 goto out;
1752
1753 mei->device_down = !up;
1754
1755 if (up || !mei->csme_taking_ownership)
1756 goto out;
1757
1758 iwl_mei_send_sap_msg(mei->cldev,
1759 SAP_MSG_NOTIF_CSME_OWNERSHIP_CONFIRMED);
1760 mei->csme_taking_ownership = false;
1761 schedule_delayed_work(&mei->ownership_dwork,
1762 MEI_OWNERSHIP_RETAKE_TIMEOUT_MS);
1763out:
1764 mutex_unlock(&iwl_mei_mutex);
1765}
1766EXPORT_SYMBOL_GPL(iwl_mei_device_state);
1767
1768int iwl_mei_register(void *priv, const struct iwl_mei_ops *ops)
1769{
1770 int ret;
1771
1772 /*
1773 * We must have a non-NULL priv pointer to not crash when there are
1774 * multiple WiFi devices.
1775 */
1776 if (!priv)
1777 return -EINVAL;
1778
1779 mutex_lock(&iwl_mei_mutex);
1780
1781 /* do not allow registration if someone else already registered */
1782 if (iwl_mei_cache.priv || iwl_mei_cache.ops) {
1783 ret = -EBUSY;
1784 goto out;
1785 }
1786
1787 iwl_mei_cache.priv = priv;
1788 iwl_mei_cache.ops = ops;
1789
1790 if (iwl_mei_global_cldev) {
1791 struct iwl_mei *mei =
1792 mei_cldev_get_drvdata(iwl_mei_global_cldev);
1793
1794 /* we have already a SAP connection */
1795 if (iwl_mei_is_connected()) {
1796 if (mei->amt_enabled)
1797 iwl_mei_send_sap_msg(mei->cldev,
1798 SAP_MSG_NOTIF_WIFIDR_UP);
1799 ops->rfkill(priv, mei->link_prot_state, false);
1800 }
1801 }
1802 ret = 0;
1803
1804out:
1805 mutex_unlock(&iwl_mei_mutex);
1806 return ret;
1807}
1808EXPORT_SYMBOL_GPL(iwl_mei_register);
1809
1810void iwl_mei_start_unregister(void)
1811{
1812 mutex_lock(&iwl_mei_mutex);
1813
1814 /* At this point, the wifi driver should have removed the netdev */
1815 if (rcu_access_pointer(iwl_mei_cache.netdev))
1816 pr_err("Still had a netdev pointer set upon unregister\n");
1817
1818 kfree(iwl_mei_cache.conn_info);
1819 iwl_mei_cache.conn_info = NULL;
1820 kfree(iwl_mei_cache.power_limit);
1821 iwl_mei_cache.power_limit = NULL;
1822 iwl_mei_cache.ops = NULL;
1823 /* leave iwl_mei_cache.priv non-NULL to prevent any new registration */
1824
1825 mutex_unlock(&iwl_mei_mutex);
1826}
1827EXPORT_SYMBOL_GPL(iwl_mei_start_unregister);
1828
1829void iwl_mei_unregister_complete(void)
1830{
1831 mutex_lock(&iwl_mei_mutex);
1832
1833 iwl_mei_cache.priv = NULL;
1834
1835 if (iwl_mei_global_cldev) {
1836 struct iwl_mei *mei =
1837 mei_cldev_get_drvdata(iwl_mei_global_cldev);
1838
1839 if (mei->amt_enabled)
1840 iwl_mei_send_sap_msg(mei->cldev,
1841 SAP_MSG_NOTIF_WIFIDR_DOWN);
1842 mei->got_ownership = false;
1843 }
1844
1845 mutex_unlock(&iwl_mei_mutex);
1846}
1847EXPORT_SYMBOL_GPL(iwl_mei_unregister_complete);
1848
1849#if IS_ENABLED(CONFIG_DEBUG_FS)
1850
1851static ssize_t
1852iwl_mei_dbgfs_send_start_message_write(struct file *file,
1853 const char __user *user_buf,
1854 size_t count, loff_t *ppos)
1855{
1856 int ret;
1857
1858 mutex_lock(&iwl_mei_mutex);
1859
1860 if (!iwl_mei_global_cldev) {
1861 ret = -ENODEV;
1862 goto out;
1863 }
1864
1865 ret = iwl_mei_send_start(iwl_mei_global_cldev);
1866
1867out:
1868 mutex_unlock(&iwl_mei_mutex);
1869 return ret ?: count;
1870}
1871
1872static const struct file_operations iwl_mei_dbgfs_send_start_message_ops = {
1873 .write = iwl_mei_dbgfs_send_start_message_write,
1874 .open = simple_open,
1875 .llseek = default_llseek,
1876};
1877
1878static ssize_t iwl_mei_dbgfs_req_ownership_write(struct file *file,
1879 const char __user *user_buf,
1880 size_t count, loff_t *ppos)
1881{
1882 iwl_mei_get_ownership();
1883
1884 return count;
1885}
1886
1887static const struct file_operations iwl_mei_dbgfs_req_ownership_ops = {
1888 .write = iwl_mei_dbgfs_req_ownership_write,
1889 .open = simple_open,
1890 .llseek = default_llseek,
1891};
1892
1893static void iwl_mei_dbgfs_register(struct iwl_mei *mei)
1894{
1895 mei->dbgfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
1896
1897 if (!mei->dbgfs_dir)
1898 return;
1899
1900 debugfs_create_ulong("status", S_IRUSR,
1901 mei->dbgfs_dir, &iwl_mei_status);
1902 debugfs_create_file("send_start_message", S_IWUSR, mei->dbgfs_dir,
1903 mei, &iwl_mei_dbgfs_send_start_message_ops);
1904 debugfs_create_file("req_ownership", S_IWUSR, mei->dbgfs_dir,
1905 mei, &iwl_mei_dbgfs_req_ownership_ops);
1906}
1907
1908static void iwl_mei_dbgfs_unregister(struct iwl_mei *mei)
1909{
1910 debugfs_remove_recursive(mei->dbgfs_dir);
1911 mei->dbgfs_dir = NULL;
1912}
1913
1914#else
1915
1916static void iwl_mei_dbgfs_register(struct iwl_mei *mei) {}
1917static void iwl_mei_dbgfs_unregister(struct iwl_mei *mei) {}
1918
1919#endif /* CONFIG_DEBUG_FS */
1920
1921static void iwl_mei_ownership_dwork(struct work_struct *wk)
1922{
1923 iwl_mei_get_ownership();
1924}
1925
1926#define ALLOC_SHARED_MEM_RETRY_MAX_NUM 3
1927
1928/*
1929 * iwl_mei_probe - the probe function called by the mei bus enumeration
1930 *
1931 * This allocates the data needed by iwlmei and sets a pointer to this data
1932 * into the mei_cl_device's drvdata.
1933 * It starts the SAP protocol by sending the SAP_ME_MSG_START without
1934 * waiting for the answer. The answer will be caught later by the Rx callback.
1935 */
1936static int iwl_mei_probe(struct mei_cl_device *cldev,
1937 const struct mei_cl_device_id *id)
1938{
1939 int alloc_retry = ALLOC_SHARED_MEM_RETRY_MAX_NUM;
1940 struct iwl_mei *mei;
1941 int ret;
1942
1943 mei = devm_kzalloc(&cldev->dev, sizeof(*mei), GFP_KERNEL);
1944 if (!mei)
1945 return -ENOMEM;
1946
1947 init_waitqueue_head(&mei->get_nvm_wq);
1948 INIT_WORK(&mei->send_csa_msg_wk, iwl_mei_send_csa_msg_wk);
1949 INIT_DELAYED_WORK(&mei->csa_throttle_end_wk,
1950 iwl_mei_csa_throttle_end_wk);
1951 init_waitqueue_head(&mei->get_ownership_wq);
1952 init_waitqueue_head(&mei->pldr_wq);
1953 spin_lock_init(&mei->data_q_lock);
1954 INIT_WORK(&mei->netdev_work, iwl_mei_netdev_work);
1955 INIT_DELAYED_WORK(&mei->ownership_dwork, iwl_mei_ownership_dwork);
1956
1957 mei_cldev_set_drvdata(cldev, mei);
1958 mei->cldev = cldev;
1959 mei->device_down = true;
1960
1961 do {
1962 ret = iwl_mei_alloc_shared_mem(cldev);
1963 if (!ret)
1964 break;
1965 /*
1966 * The CSME firmware needs to boot the internal WLAN client.
1967 * This can take time in certain configurations (usually
1968 * upon resume and when the whole CSME firmware is shut down
1969 * during suspend).
1970 *
1971 * Wait a bit before retrying and hope we'll succeed next time.
1972 */
1973
1974 dev_dbg(&cldev->dev,
1975 "Couldn't allocate the shared memory: %d, attempt %d / %d\n",
1976 ret, alloc_retry, ALLOC_SHARED_MEM_RETRY_MAX_NUM);
1977 msleep(100);
1978 alloc_retry--;
1979 } while (alloc_retry);
1980
1981 if (ret) {
1982 dev_err(&cldev->dev, "Couldn't allocate the shared memory: %d\n",
1983 ret);
1984 goto free;
1985 }
1986
1987 iwl_mei_init_shared_mem(mei);
1988
1989 ret = iwl_mei_enable(cldev);
1990 if (ret)
1991 goto free_shared_mem;
1992
1993 iwl_mei_dbgfs_register(mei);
1994
1995 /*
1996 * We now have a Rx function in place, start the SAP protocol
1997 * we expect to get the SAP_ME_MSG_START_OK response later on.
1998 */
1999 mutex_lock(&iwl_mei_mutex);
2000 ret = iwl_mei_send_start(cldev);
2001 mutex_unlock(&iwl_mei_mutex);
2002 if (ret)
2003 goto debugfs_unregister;
2004
2005 /* must be last */
2006 iwl_mei_global_cldev = cldev;
2007
2008 return 0;
2009
2010debugfs_unregister:
2011 iwl_mei_dbgfs_unregister(mei);
2012 mei_cldev_disable(cldev);
2013free_shared_mem:
2014 iwl_mei_free_shared_mem(cldev);
2015free:
2016 mei_cldev_set_drvdata(cldev, NULL);
2017 devm_kfree(&cldev->dev, mei);
2018
2019 return ret;
2020}
2021
2022#define SEND_SAP_MAX_WAIT_ITERATION 10
2023#define IWLMEI_DEVICE_DOWN_WAIT_ITERATION 50
2024
2025static void iwl_mei_remove(struct mei_cl_device *cldev)
2026{
2027 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
2028 int i;
2029
2030 /*
2031 * We are being removed while the bus is active, it means we are
2032 * going to suspend/ shutdown, so the NIC will disappear.
2033 */
2034 if (mei_cldev_enabled(cldev) && iwl_mei_cache.ops) {
2035 unsigned int iter = IWLMEI_DEVICE_DOWN_WAIT_ITERATION;
2036 bool down = false;
2037
2038 /*
2039 * In case of suspend, wait for the mac to stop and don't remove
2040 * the interface. This will allow the interface to come back
2041 * on resume.
2042 */
2043 while (!down && iter--) {
2044 mdelay(1);
2045
2046 mutex_lock(&iwl_mei_mutex);
2047 down = mei->device_down;
2048 mutex_unlock(&iwl_mei_mutex);
2049 }
2050
2051 if (!down)
2052 iwl_mei_cache.ops->nic_stolen(iwl_mei_cache.priv);
2053 }
2054
2055 if (rcu_access_pointer(iwl_mei_cache.netdev)) {
2056 struct net_device *dev;
2057
2058 /*
2059 * First take rtnl and only then the mutex to avoid an ABBA
2060 * with iwl_mei_set_netdev()
2061 */
2062 rtnl_lock();
2063 mutex_lock(&iwl_mei_mutex);
2064
2065 /*
2066 * If we are suspending and the wifi driver hasn't removed it's netdev
2067 * yet, do it now. In any case, don't change the cache.netdev pointer.
2068 */
2069 dev = rcu_dereference_protected(iwl_mei_cache.netdev,
2070 lockdep_is_held(&iwl_mei_mutex));
2071
2072 netdev_rx_handler_unregister(dev);
2073 mutex_unlock(&iwl_mei_mutex);
2074 rtnl_unlock();
2075 }
2076
2077 mutex_lock(&iwl_mei_mutex);
2078
2079 /* Tell CSME that we are going down so that it won't access the
2080 * memory anymore, make sure this message goes through immediately.
2081 */
2082 mei->csa_throttled = false;
2083 iwl_mei_send_sap_msg(mei->cldev,
2084 SAP_MSG_NOTIF_HOST_GOES_DOWN);
2085
2086 for (i = 0; i < SEND_SAP_MAX_WAIT_ITERATION; i++) {
2087 if (!iwl_mei_host_to_me_data_pending(mei))
2088 break;
2089
2090 msleep(20);
2091 }
2092
2093 /* If we couldn't make sure that CSME saw the HOST_GOES_DOWN
2094 * message, it means that it will probably keep reading memory
2095 * that we are going to unmap and free, expect IOMMU error
2096 * messages.
2097 */
2098 if (i == SEND_SAP_MAX_WAIT_ITERATION)
2099 dev_err(&mei->cldev->dev,
2100 "Couldn't get ACK from CSME on HOST_GOES_DOWN message\n");
2101
2102 mutex_unlock(&iwl_mei_mutex);
2103
2104 /*
2105 * This looks strange, but this lock is taken here to make sure that
2106 * iwl_mei_add_data_to_ring called from the Tx path sees that we
2107 * clear the IWL_MEI_STATUS_SAP_CONNECTED bit.
2108 * Rx isn't a problem because the rx_handler can't be called after
2109 * having been unregistered.
2110 */
2111 spin_lock_bh(&mei->data_q_lock);
2112 clear_bit(IWL_MEI_STATUS_SAP_CONNECTED, &iwl_mei_status);
2113 spin_unlock_bh(&mei->data_q_lock);
2114
2115 if (iwl_mei_cache.ops)
2116 iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false, false);
2117
2118 /*
2119 * mei_cldev_disable will return only after all the MEI Rx is done.
2120 * It must be called when iwl_mei_mutex is *not* held, since it waits
2121 * for our Rx handler to complete.
2122 * After it returns, no new Rx will start.
2123 */
2124 mei_cldev_disable(cldev);
2125
2126 /*
2127 * Since the netdev was already removed and the netdev's removal
2128 * includes a call to synchronize_net() so that we know there won't be
2129 * any new Rx that will trigger the following workers.
2130 */
2131 cancel_work_sync(&mei->send_csa_msg_wk);
2132 cancel_delayed_work_sync(&mei->csa_throttle_end_wk);
2133 cancel_work_sync(&mei->netdev_work);
2134 cancel_delayed_work_sync(&mei->ownership_dwork);
2135
2136 /*
2137 * If someone waits for the ownership, let him know that we are going
2138 * down and that we are not connected anymore. He'll be able to take
2139 * the device.
2140 */
2141 wake_up_all(&mei->get_ownership_wq);
2142 wake_up_all(&mei->pldr_wq);
2143
2144 mutex_lock(&iwl_mei_mutex);
2145
2146 iwl_mei_global_cldev = NULL;
2147
2148 wake_up_all(&mei->get_nvm_wq);
2149
2150 iwl_mei_free_shared_mem(cldev);
2151
2152 iwl_mei_dbgfs_unregister(mei);
2153
2154 mei_cldev_set_drvdata(cldev, NULL);
2155
2156 kfree(mei->nvm);
2157
2158 kfree(rcu_access_pointer(mei->filters));
2159
2160 devm_kfree(&cldev->dev, mei);
2161
2162 mutex_unlock(&iwl_mei_mutex);
2163}
2164
2165static const struct mei_cl_device_id iwl_mei_tbl[] = {
2166 {
2167 .name = KBUILD_MODNAME,
2168 .uuid = MEI_WLAN_UUID,
2169 .version = MEI_CL_VERSION_ANY,
2170 },
2171
2172 /* required last entry */
2173 { }
2174};
2175
2176/*
2177 * Do not export the device table because this module is loaded by
2178 * iwlwifi's dependency.
2179 */
2180
2181static struct mei_cl_driver iwl_mei_cl_driver = {
2182 .id_table = iwl_mei_tbl,
2183 .name = KBUILD_MODNAME,
2184 .probe = iwl_mei_probe,
2185 .remove = iwl_mei_remove,
2186};
2187
2188module_mei_cl_driver(iwl_mei_cl_driver);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2021-2022 Intel Corporation
4 */
5
6#include <linux/etherdevice.h>
7#include <linux/netdevice.h>
8#include <linux/ieee80211.h>
9#include <linux/rtnetlink.h>
10#include <linux/module.h>
11#include <linux/moduleparam.h>
12#include <linux/mei_cl_bus.h>
13#include <linux/rcupdate.h>
14#include <linux/debugfs.h>
15#include <linux/skbuff.h>
16#include <linux/wait.h>
17#include <linux/slab.h>
18#include <linux/mm.h>
19
20#include <net/cfg80211.h>
21
22#include "internal.h"
23#include "iwl-mei.h"
24#include "trace.h"
25#include "trace-data.h"
26#include "sap.h"
27
28MODULE_DESCRIPTION("The Intel(R) wireless / CSME firmware interface");
29MODULE_LICENSE("GPL");
30
31#define MEI_WLAN_UUID UUID_LE(0x13280904, 0x7792, 0x4fcb, \
32 0xa1, 0xaa, 0x5e, 0x70, 0xcb, 0xb1, 0xe8, 0x65)
33
34/*
35 * Since iwlwifi calls iwlmei without any context, hold a pointer to the
36 * mei_cl_device structure here.
37 * Define a mutex that will synchronize all the flows between iwlwifi and
38 * iwlmei.
39 * Note that iwlmei can't have several instances, so it ok to have static
40 * variables here.
41 */
42static struct mei_cl_device *iwl_mei_global_cldev;
43static DEFINE_MUTEX(iwl_mei_mutex);
44static unsigned long iwl_mei_status;
45
46enum iwl_mei_status_bits {
47 IWL_MEI_STATUS_SAP_CONNECTED,
48};
49
50bool iwl_mei_is_connected(void)
51{
52 return test_bit(IWL_MEI_STATUS_SAP_CONNECTED, &iwl_mei_status);
53}
54EXPORT_SYMBOL_GPL(iwl_mei_is_connected);
55
56#define SAP_VERSION 3
57#define SAP_CONTROL_BLOCK_ID 0x21504153 /* SAP! in ASCII */
58
59struct iwl_sap_q_ctrl_blk {
60 __le32 wr_ptr;
61 __le32 rd_ptr;
62 __le32 size;
63};
64
65enum iwl_sap_q_idx {
66 SAP_QUEUE_IDX_NOTIF = 0,
67 SAP_QUEUE_IDX_DATA,
68 SAP_QUEUE_IDX_MAX,
69};
70
71struct iwl_sap_dir {
72 __le32 reserved;
73 struct iwl_sap_q_ctrl_blk q_ctrl_blk[SAP_QUEUE_IDX_MAX];
74};
75
76enum iwl_sap_dir_idx {
77 SAP_DIRECTION_HOST_TO_ME = 0,
78 SAP_DIRECTION_ME_TO_HOST,
79 SAP_DIRECTION_MAX,
80};
81
82struct iwl_sap_shared_mem_ctrl_blk {
83 __le32 sap_id;
84 __le32 size;
85 struct iwl_sap_dir dir[SAP_DIRECTION_MAX];
86};
87
88/*
89 * The shared area has the following layout:
90 *
91 * +-----------------------------------+
92 * |struct iwl_sap_shared_mem_ctrl_blk |
93 * +-----------------------------------+
94 * |Host -> ME data queue |
95 * +-----------------------------------+
96 * |Host -> ME notif queue |
97 * +-----------------------------------+
98 * |ME -> Host data queue |
99 * +-----------------------------------+
100 * |ME -> host notif queue |
101 * +-----------------------------------+
102 * |SAP control block id (SAP!) |
103 * +-----------------------------------+
104 */
105
106#define SAP_H2M_DATA_Q_SZ 48256
107#define SAP_M2H_DATA_Q_SZ 24128
108#define SAP_H2M_NOTIF_Q_SZ 2240
109#define SAP_M2H_NOTIF_Q_SZ 62720
110
111#define _IWL_MEI_SAP_SHARED_MEM_SZ \
112 (sizeof(struct iwl_sap_shared_mem_ctrl_blk) + \
113 SAP_H2M_DATA_Q_SZ + SAP_H2M_NOTIF_Q_SZ + \
114 SAP_M2H_DATA_Q_SZ + SAP_M2H_NOTIF_Q_SZ + 4)
115
116#define IWL_MEI_SAP_SHARED_MEM_SZ \
117 (roundup(_IWL_MEI_SAP_SHARED_MEM_SZ, PAGE_SIZE))
118
119struct iwl_mei_shared_mem_ptrs {
120 struct iwl_sap_shared_mem_ctrl_blk *ctrl;
121 void *q_head[SAP_DIRECTION_MAX][SAP_QUEUE_IDX_MAX];
122 size_t q_size[SAP_DIRECTION_MAX][SAP_QUEUE_IDX_MAX];
123};
124
125struct iwl_mei_filters {
126 struct rcu_head rcu_head;
127 struct iwl_sap_oob_filters filters;
128};
129
130/**
131 * struct iwl_mei - holds the private date for iwl_mei
132 *
133 * @get_nvm_wq: the wait queue for the get_nvm flow
134 * @send_csa_msg_wk: used to defer the transmission of the CHECK_SHARED_AREA
135 * message. Used so that we can send CHECK_SHARED_AREA from atomic
136 * contexts.
137 * @get_ownership_wq: the wait queue for the get_ownership_flow
138 * @shared_mem: the memory that is shared between CSME and the host
139 * @cldev: the pointer to the MEI client device
140 * @nvm: the data returned by the CSME for the NVM
141 * @filters: the filters sent by CSME
142 * @got_ownership: true if we own the device
143 * @amt_enabled: true if CSME has wireless enabled
144 * @csa_throttled: when true, we can't send CHECK_SHARED_AREA over the MEI
145 * bus, but rather need to wait until send_csa_msg_wk runs
146 * @csme_taking_ownership: true when CSME is taking ownership. Used to remember
147 * to send CSME_OWNERSHIP_CONFIRMED when the driver completes its down
148 * flow.
149 * @link_prot_state: true when we are in link protection PASSIVE
150 * @device_down: true if the device is down. Used to remember to send
151 * CSME_OWNERSHIP_CONFIRMED when the driver is already down.
152 * @csa_throttle_end_wk: used when &csa_throttled is true
153 * @pldr_wq: the wait queue for PLDR flow
154 * @pldr_active: PLDR flow is in progress
155 * @data_q_lock: protects the access to the data queues which are
156 * accessed without the mutex.
157 * @netdev_work: used to defer registering and unregistering of the netdev to
158 * avoid taking the rtnl lock in the SAP messages handlers.
159 * @sap_seq_no: the sequence number for the SAP messages
160 * @seq_no: the sequence number for the SAP messages
161 * @dbgfs_dir: the debugfs dir entry
162 */
163struct iwl_mei {
164 wait_queue_head_t get_nvm_wq;
165 struct work_struct send_csa_msg_wk;
166 wait_queue_head_t get_ownership_wq;
167 struct iwl_mei_shared_mem_ptrs shared_mem;
168 struct mei_cl_device *cldev;
169 struct iwl_mei_nvm *nvm;
170 struct iwl_mei_filters __rcu *filters;
171 bool got_ownership;
172 bool amt_enabled;
173 bool csa_throttled;
174 bool csme_taking_ownership;
175 bool link_prot_state;
176 bool device_down;
177 struct delayed_work csa_throttle_end_wk;
178 wait_queue_head_t pldr_wq;
179 bool pldr_active;
180 spinlock_t data_q_lock;
181 struct work_struct netdev_work;
182
183 atomic_t sap_seq_no;
184 atomic_t seq_no;
185
186 struct dentry *dbgfs_dir;
187};
188
189/**
190 * struct iwl_mei_cache - cache for the parameters from iwlwifi
191 * @ops: Callbacks to iwlwifi.
192 * @netdev: The netdev that will be used to transmit / receive packets.
193 * @conn_info: The connection info message triggered by iwlwifi's association.
194 * @power_limit: pointer to an array of 10 elements (le16) represents the power
195 * restrictions per chain.
196 * @rf_kill: rf kill state.
197 * @mcc: MCC info
198 * @mac_address: interface MAC address.
199 * @nvm_address: NVM MAC address.
200 * @priv: A pointer to iwlwifi.
201 *
202 * This used to cache the configurations coming from iwlwifi's way. The data
203 * is cached here so that we can buffer the configuration even if we don't have
204 * a bind from the mei bus and hence, on iwl_mei structure.
205 */
206struct iwl_mei_cache {
207 const struct iwl_mei_ops *ops;
208 struct net_device __rcu *netdev;
209 const struct iwl_sap_notif_connection_info *conn_info;
210 const __le16 *power_limit;
211 u32 rf_kill;
212 u16 mcc;
213 u8 mac_address[6];
214 u8 nvm_address[6];
215 void *priv;
216};
217
218static struct iwl_mei_cache iwl_mei_cache = {
219 .rf_kill = SAP_HW_RFKILL_DEASSERTED | SAP_SW_RFKILL_DEASSERTED
220};
221
222static void iwl_mei_free_shared_mem(struct mei_cl_device *cldev)
223{
224 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
225
226 if (mei_cldev_dma_unmap(cldev))
227 dev_err(&cldev->dev, "Couldn't unmap the shared mem properly\n");
228 memset(&mei->shared_mem, 0, sizeof(mei->shared_mem));
229}
230
231#define HBM_DMA_BUF_ID_WLAN 1
232
233static int iwl_mei_alloc_shared_mem(struct mei_cl_device *cldev)
234{
235 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
236 struct iwl_mei_shared_mem_ptrs *mem = &mei->shared_mem;
237
238 mem->ctrl = mei_cldev_dma_map(cldev, HBM_DMA_BUF_ID_WLAN,
239 IWL_MEI_SAP_SHARED_MEM_SZ);
240
241 if (IS_ERR(mem->ctrl)) {
242 int ret = PTR_ERR(mem->ctrl);
243
244 mem->ctrl = NULL;
245
246 return ret;
247 }
248
249 memset(mem->ctrl, 0, IWL_MEI_SAP_SHARED_MEM_SZ);
250
251 return 0;
252}
253
254static void iwl_mei_init_shared_mem(struct iwl_mei *mei)
255{
256 struct iwl_mei_shared_mem_ptrs *mem = &mei->shared_mem;
257 struct iwl_sap_dir *h2m;
258 struct iwl_sap_dir *m2h;
259 int dir, queue;
260 u8 *q_head;
261
262 mem->ctrl->sap_id = cpu_to_le32(SAP_CONTROL_BLOCK_ID);
263
264 mem->ctrl->size = cpu_to_le32(sizeof(*mem->ctrl));
265
266 h2m = &mem->ctrl->dir[SAP_DIRECTION_HOST_TO_ME];
267 m2h = &mem->ctrl->dir[SAP_DIRECTION_ME_TO_HOST];
268
269 h2m->q_ctrl_blk[SAP_QUEUE_IDX_DATA].size =
270 cpu_to_le32(SAP_H2M_DATA_Q_SZ);
271 h2m->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF].size =
272 cpu_to_le32(SAP_H2M_NOTIF_Q_SZ);
273 m2h->q_ctrl_blk[SAP_QUEUE_IDX_DATA].size =
274 cpu_to_le32(SAP_M2H_DATA_Q_SZ);
275 m2h->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF].size =
276 cpu_to_le32(SAP_M2H_NOTIF_Q_SZ);
277
278 /* q_head points to the start of the first queue */
279 q_head = (void *)(mem->ctrl + 1);
280
281 /* Initialize the queue heads */
282 for (dir = 0; dir < SAP_DIRECTION_MAX; dir++) {
283 for (queue = 0; queue < SAP_QUEUE_IDX_MAX; queue++) {
284 mem->q_head[dir][queue] = q_head;
285 q_head +=
286 le32_to_cpu(mem->ctrl->dir[dir].q_ctrl_blk[queue].size);
287 mem->q_size[dir][queue] =
288 le32_to_cpu(mem->ctrl->dir[dir].q_ctrl_blk[queue].size);
289 }
290 }
291
292 *(__le32 *)q_head = cpu_to_le32(SAP_CONTROL_BLOCK_ID);
293}
294
295static ssize_t iwl_mei_write_cyclic_buf(struct mei_cl_device *cldev,
296 struct iwl_sap_q_ctrl_blk *notif_q,
297 u8 *q_head,
298 const struct iwl_sap_hdr *hdr,
299 u32 q_sz)
300{
301 u32 rd = le32_to_cpu(READ_ONCE(notif_q->rd_ptr));
302 u32 wr = le32_to_cpu(READ_ONCE(notif_q->wr_ptr));
303 size_t room_in_buf;
304 size_t tx_sz = sizeof(*hdr) + le16_to_cpu(hdr->len);
305
306 if (rd > q_sz || wr > q_sz) {
307 dev_err(&cldev->dev,
308 "Pointers are past the end of the buffer\n");
309 return -EINVAL;
310 }
311
312 room_in_buf = wr >= rd ? q_sz - wr + rd : rd - wr;
313
314 /* we don't have enough room for the data to write */
315 if (room_in_buf < tx_sz) {
316 dev_err(&cldev->dev,
317 "Not enough room in the buffer\n");
318 return -ENOSPC;
319 }
320
321 if (wr + tx_sz <= q_sz) {
322 memcpy(q_head + wr, hdr, tx_sz);
323 } else {
324 memcpy(q_head + wr, hdr, q_sz - wr);
325 memcpy(q_head, (const u8 *)hdr + q_sz - wr, tx_sz - (q_sz - wr));
326 }
327
328 WRITE_ONCE(notif_q->wr_ptr, cpu_to_le32((wr + tx_sz) % q_sz));
329 return 0;
330}
331
332static bool iwl_mei_host_to_me_data_pending(const struct iwl_mei *mei)
333{
334 struct iwl_sap_q_ctrl_blk *notif_q;
335 struct iwl_sap_dir *dir;
336
337 dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_HOST_TO_ME];
338 notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_DATA];
339
340 if (READ_ONCE(notif_q->wr_ptr) != READ_ONCE(notif_q->rd_ptr))
341 return true;
342
343 notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF];
344 return READ_ONCE(notif_q->wr_ptr) != READ_ONCE(notif_q->rd_ptr);
345}
346
347static int iwl_mei_send_check_shared_area(struct mei_cl_device *cldev)
348{
349 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
350 struct iwl_sap_me_msg_start msg = {
351 .hdr.type = cpu_to_le32(SAP_ME_MSG_CHECK_SHARED_AREA),
352 .hdr.seq_num = cpu_to_le32(atomic_inc_return(&mei->seq_no)),
353 };
354 int ret;
355
356 lockdep_assert_held(&iwl_mei_mutex);
357
358 if (mei->csa_throttled)
359 return 0;
360
361 trace_iwlmei_me_msg(&msg.hdr, true);
362 ret = mei_cldev_send(cldev, (void *)&msg, sizeof(msg));
363 if (ret != sizeof(msg)) {
364 dev_err(&cldev->dev,
365 "failed to send the SAP_ME_MSG_CHECK_SHARED_AREA message %d\n",
366 ret);
367 return ret;
368 }
369
370 mei->csa_throttled = true;
371
372 schedule_delayed_work(&mei->csa_throttle_end_wk,
373 msecs_to_jiffies(100));
374
375 return 0;
376}
377
378static void iwl_mei_csa_throttle_end_wk(struct work_struct *wk)
379{
380 struct iwl_mei *mei =
381 container_of(wk, struct iwl_mei, csa_throttle_end_wk.work);
382
383 mutex_lock(&iwl_mei_mutex);
384
385 mei->csa_throttled = false;
386
387 if (iwl_mei_host_to_me_data_pending(mei))
388 iwl_mei_send_check_shared_area(mei->cldev);
389
390 mutex_unlock(&iwl_mei_mutex);
391}
392
393static int iwl_mei_send_sap_msg_payload(struct mei_cl_device *cldev,
394 struct iwl_sap_hdr *hdr)
395{
396 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
397 struct iwl_sap_q_ctrl_blk *notif_q;
398 struct iwl_sap_dir *dir;
399 void *q_head;
400 u32 q_sz;
401 int ret;
402
403 lockdep_assert_held(&iwl_mei_mutex);
404
405 if (!mei->shared_mem.ctrl) {
406 dev_err(&cldev->dev,
407 "No shared memory, can't send any SAP message\n");
408 return -EINVAL;
409 }
410
411 if (!iwl_mei_is_connected()) {
412 dev_err(&cldev->dev,
413 "Can't send a SAP message if we're not connected\n");
414 return -ENODEV;
415 }
416
417 hdr->seq_num = cpu_to_le32(atomic_inc_return(&mei->sap_seq_no));
418 dev_dbg(&cldev->dev, "Sending %d\n", hdr->type);
419
420 dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_HOST_TO_ME];
421 notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF];
422 q_head = mei->shared_mem.q_head[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_NOTIF];
423 q_sz = mei->shared_mem.q_size[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_NOTIF];
424 ret = iwl_mei_write_cyclic_buf(q_head, notif_q, q_head, hdr, q_sz);
425
426 if (ret < 0)
427 return ret;
428
429 trace_iwlmei_sap_cmd(hdr, true);
430
431 return iwl_mei_send_check_shared_area(cldev);
432}
433
434void iwl_mei_add_data_to_ring(struct sk_buff *skb, bool cb_tx)
435{
436 struct iwl_sap_q_ctrl_blk *notif_q;
437 struct iwl_sap_dir *dir;
438 struct iwl_mei *mei;
439 size_t room_in_buf;
440 size_t tx_sz;
441 size_t hdr_sz;
442 u32 q_sz;
443 u32 rd;
444 u32 wr;
445 u8 *q_head;
446
447 if (!iwl_mei_global_cldev)
448 return;
449
450 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
451
452 /*
453 * We access this path for Rx packets (the more common case)
454 * and from Tx path when we send DHCP packets, the latter is
455 * very unlikely.
456 * Take the lock already here to make sure we see that remove()
457 * might have cleared the IWL_MEI_STATUS_SAP_CONNECTED bit.
458 */
459 spin_lock_bh(&mei->data_q_lock);
460
461 if (!iwl_mei_is_connected()) {
462 spin_unlock_bh(&mei->data_q_lock);
463 return;
464 }
465
466 /*
467 * We are in a RCU critical section and the remove from the CSME bus
468 * which would free this memory waits for the readers to complete (this
469 * is done in netdev_rx_handler_unregister).
470 */
471 dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_HOST_TO_ME];
472 notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_DATA];
473 q_head = mei->shared_mem.q_head[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_DATA];
474 q_sz = mei->shared_mem.q_size[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_DATA];
475
476 rd = le32_to_cpu(READ_ONCE(notif_q->rd_ptr));
477 wr = le32_to_cpu(READ_ONCE(notif_q->wr_ptr));
478 hdr_sz = cb_tx ? sizeof(struct iwl_sap_cb_data) :
479 sizeof(struct iwl_sap_hdr);
480 tx_sz = skb->len + hdr_sz;
481
482 if (rd > q_sz || wr > q_sz) {
483 dev_err(&mei->cldev->dev,
484 "can't write the data: pointers are past the end of the buffer\n");
485 goto out;
486 }
487
488 room_in_buf = wr >= rd ? q_sz - wr + rd : rd - wr;
489
490 /* we don't have enough room for the data to write */
491 if (room_in_buf < tx_sz) {
492 dev_err(&mei->cldev->dev,
493 "Not enough room in the buffer for this data\n");
494 goto out;
495 }
496
497 if (skb_headroom(skb) < hdr_sz) {
498 dev_err(&mei->cldev->dev,
499 "Not enough headroom in the skb to write the SAP header\n");
500 goto out;
501 }
502
503 if (cb_tx) {
504 struct iwl_sap_cb_data *cb_hdr = skb_push(skb, sizeof(*cb_hdr));
505
506 memset(cb_hdr, 0, sizeof(*cb_hdr));
507 cb_hdr->hdr.type = cpu_to_le16(SAP_MSG_CB_DATA_PACKET);
508 cb_hdr->hdr.len = cpu_to_le16(skb->len - sizeof(cb_hdr->hdr));
509 cb_hdr->hdr.seq_num = cpu_to_le32(atomic_inc_return(&mei->sap_seq_no));
510 cb_hdr->to_me_filt_status = cpu_to_le32(BIT(CB_TX_DHCP_FILT_IDX));
511 cb_hdr->data_len = cpu_to_le32(skb->len - sizeof(*cb_hdr));
512 trace_iwlmei_sap_data(skb, IWL_SAP_TX_DHCP);
513 } else {
514 struct iwl_sap_hdr *hdr = skb_push(skb, sizeof(*hdr));
515
516 hdr->type = cpu_to_le16(SAP_MSG_DATA_PACKET);
517 hdr->len = cpu_to_le16(skb->len - sizeof(*hdr));
518 hdr->seq_num = cpu_to_le32(atomic_inc_return(&mei->sap_seq_no));
519 trace_iwlmei_sap_data(skb, IWL_SAP_TX_DATA_FROM_AIR);
520 }
521
522 if (wr + tx_sz <= q_sz) {
523 skb_copy_bits(skb, 0, q_head + wr, tx_sz);
524 } else {
525 skb_copy_bits(skb, 0, q_head + wr, q_sz - wr);
526 skb_copy_bits(skb, q_sz - wr, q_head, tx_sz - (q_sz - wr));
527 }
528
529 WRITE_ONCE(notif_q->wr_ptr, cpu_to_le32((wr + tx_sz) % q_sz));
530
531out:
532 spin_unlock_bh(&mei->data_q_lock);
533}
534
535static int
536iwl_mei_send_sap_msg(struct mei_cl_device *cldev, u16 type)
537{
538 struct iwl_sap_hdr msg = {
539 .type = cpu_to_le16(type),
540 };
541
542 return iwl_mei_send_sap_msg_payload(cldev, &msg);
543}
544
545static void iwl_mei_send_csa_msg_wk(struct work_struct *wk)
546{
547 struct iwl_mei *mei =
548 container_of(wk, struct iwl_mei, send_csa_msg_wk);
549
550 if (!iwl_mei_is_connected())
551 return;
552
553 mutex_lock(&iwl_mei_mutex);
554
555 iwl_mei_send_check_shared_area(mei->cldev);
556
557 mutex_unlock(&iwl_mei_mutex);
558}
559
560/* Called in a RCU read critical section from netif_receive_skb */
561static rx_handler_result_t iwl_mei_rx_handler(struct sk_buff **pskb)
562{
563 struct sk_buff *skb = *pskb;
564 struct iwl_mei *mei =
565 rcu_dereference(skb->dev->rx_handler_data);
566 struct iwl_mei_filters *filters = rcu_dereference(mei->filters);
567 bool rx_for_csme = false;
568 rx_handler_result_t res;
569
570 /*
571 * remove() unregisters this handler and synchronize_net, so this
572 * should never happen.
573 */
574 if (!iwl_mei_is_connected()) {
575 dev_err(&mei->cldev->dev,
576 "Got an Rx packet, but we're not connected to SAP?\n");
577 return RX_HANDLER_PASS;
578 }
579
580 if (filters)
581 res = iwl_mei_rx_filter(skb, &filters->filters, &rx_for_csme);
582 else
583 res = RX_HANDLER_PASS;
584
585 /*
586 * The data is already on the ring of the shared area, all we
587 * need to do is to tell the CSME firmware to check what we have
588 * there.
589 */
590 if (rx_for_csme)
591 schedule_work(&mei->send_csa_msg_wk);
592
593 if (res != RX_HANDLER_PASS) {
594 trace_iwlmei_sap_data(skb, IWL_SAP_RX_DATA_DROPPED_FROM_AIR);
595 dev_kfree_skb(skb);
596 }
597
598 return res;
599}
600
601static void iwl_mei_netdev_work(struct work_struct *wk)
602{
603 struct iwl_mei *mei =
604 container_of(wk, struct iwl_mei, netdev_work);
605 struct net_device *netdev;
606
607 /*
608 * First take rtnl and only then the mutex to avoid an ABBA
609 * with iwl_mei_set_netdev()
610 */
611 rtnl_lock();
612 mutex_lock(&iwl_mei_mutex);
613
614 netdev = rcu_dereference_protected(iwl_mei_cache.netdev,
615 lockdep_is_held(&iwl_mei_mutex));
616 if (netdev) {
617 if (mei->amt_enabled)
618 netdev_rx_handler_register(netdev, iwl_mei_rx_handler,
619 mei);
620 else
621 netdev_rx_handler_unregister(netdev);
622 }
623
624 mutex_unlock(&iwl_mei_mutex);
625 rtnl_unlock();
626}
627
628static void
629iwl_mei_handle_rx_start_ok(struct mei_cl_device *cldev,
630 const struct iwl_sap_me_msg_start_ok *rsp,
631 ssize_t len)
632{
633 if (len != sizeof(*rsp)) {
634 dev_err(&cldev->dev,
635 "got invalid SAP_ME_MSG_START_OK from CSME firmware\n");
636 dev_err(&cldev->dev,
637 "size is incorrect: %zd instead of %zu\n",
638 len, sizeof(*rsp));
639 return;
640 }
641
642 if (rsp->supported_version != SAP_VERSION) {
643 dev_err(&cldev->dev,
644 "didn't get the expected version: got %d\n",
645 rsp->supported_version);
646 return;
647 }
648
649 mutex_lock(&iwl_mei_mutex);
650 set_bit(IWL_MEI_STATUS_SAP_CONNECTED, &iwl_mei_status);
651 /*
652 * We'll receive AMT_STATE SAP message in a bit and
653 * that will continue the flow
654 */
655 mutex_unlock(&iwl_mei_mutex);
656}
657
658static void iwl_mei_handle_csme_filters(struct mei_cl_device *cldev,
659 const struct iwl_sap_csme_filters *filters)
660{
661 struct iwl_mei *mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
662 struct iwl_mei_filters *new_filters;
663 struct iwl_mei_filters *old_filters;
664
665 old_filters =
666 rcu_dereference_protected(mei->filters,
667 lockdep_is_held(&iwl_mei_mutex));
668
669 new_filters = kzalloc(sizeof(*new_filters), GFP_KERNEL);
670 if (!new_filters)
671 return;
672
673 /* Copy the OOB filters */
674 new_filters->filters = filters->filters;
675
676 rcu_assign_pointer(mei->filters, new_filters);
677
678 if (old_filters)
679 kfree_rcu(old_filters, rcu_head);
680}
681
682static void
683iwl_mei_handle_conn_status(struct mei_cl_device *cldev,
684 const struct iwl_sap_notif_conn_status *status)
685{
686 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
687 struct iwl_mei_conn_info conn_info = {
688 .lp_state = le32_to_cpu(status->link_prot_state),
689 .ssid_len = le32_to_cpu(status->conn_info.ssid_len),
690 .channel = status->conn_info.channel,
691 .band = status->conn_info.band,
692 .auth_mode = le32_to_cpu(status->conn_info.auth_mode),
693 .pairwise_cipher = le32_to_cpu(status->conn_info.pairwise_cipher),
694 };
695
696 if (!iwl_mei_cache.ops ||
697 conn_info.ssid_len > ARRAY_SIZE(conn_info.ssid))
698 return;
699
700 memcpy(conn_info.ssid, status->conn_info.ssid, conn_info.ssid_len);
701 ether_addr_copy(conn_info.bssid, status->conn_info.bssid);
702
703 iwl_mei_cache.ops->me_conn_status(iwl_mei_cache.priv, &conn_info);
704
705 mei->link_prot_state = status->link_prot_state;
706
707 /*
708 * Update the Rfkill state in case the host does not own the device:
709 * if we are in Link Protection, ask to not touch the device, else,
710 * unblock rfkill.
711 * If the host owns the device, inform the user space whether it can
712 * roam.
713 */
714 if (mei->got_ownership)
715 iwl_mei_cache.ops->roaming_forbidden(iwl_mei_cache.priv,
716 status->link_prot_state);
717 else
718 iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv,
719 status->link_prot_state);
720}
721
722static void iwl_mei_set_init_conf(struct iwl_mei *mei)
723{
724 struct iwl_sap_notif_host_link_up link_msg = {
725 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_HOST_LINK_UP),
726 .hdr.len = cpu_to_le16(sizeof(link_msg) - sizeof(link_msg.hdr)),
727 };
728 struct iwl_sap_notif_country_code mcc_msg = {
729 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_COUNTRY_CODE),
730 .hdr.len = cpu_to_le16(sizeof(mcc_msg) - sizeof(mcc_msg.hdr)),
731 .mcc = cpu_to_le16(iwl_mei_cache.mcc),
732 };
733 struct iwl_sap_notif_sar_limits sar_msg = {
734 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_SAR_LIMITS),
735 .hdr.len = cpu_to_le16(sizeof(sar_msg) - sizeof(sar_msg.hdr)),
736 };
737 struct iwl_sap_notif_host_nic_info nic_info_msg = {
738 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_NIC_INFO),
739 .hdr.len = cpu_to_le16(sizeof(nic_info_msg) - sizeof(nic_info_msg.hdr)),
740 };
741 struct iwl_sap_msg_dw rfkill_msg = {
742 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_RADIO_STATE),
743 .hdr.len = cpu_to_le16(sizeof(rfkill_msg) - sizeof(rfkill_msg.hdr)),
744 .val = cpu_to_le32(iwl_mei_cache.rf_kill),
745 };
746
747 /* wifi driver has registered already */
748 if (iwl_mei_cache.ops) {
749 iwl_mei_send_sap_msg(mei->cldev,
750 SAP_MSG_NOTIF_WIFIDR_UP);
751 iwl_mei_cache.ops->sap_connected(iwl_mei_cache.priv);
752 }
753
754 iwl_mei_send_sap_msg(mei->cldev, SAP_MSG_NOTIF_WHO_OWNS_NIC);
755
756 if (iwl_mei_cache.conn_info) {
757 link_msg.conn_info = *iwl_mei_cache.conn_info;
758 iwl_mei_send_sap_msg_payload(mei->cldev, &link_msg.hdr);
759 }
760
761 iwl_mei_send_sap_msg_payload(mei->cldev, &mcc_msg.hdr);
762
763 if (iwl_mei_cache.power_limit) {
764 memcpy(sar_msg.sar_chain_info_table, iwl_mei_cache.power_limit,
765 sizeof(sar_msg.sar_chain_info_table));
766 iwl_mei_send_sap_msg_payload(mei->cldev, &sar_msg.hdr);
767 }
768
769 ether_addr_copy(nic_info_msg.mac_address, iwl_mei_cache.mac_address);
770 ether_addr_copy(nic_info_msg.nvm_address, iwl_mei_cache.nvm_address);
771 iwl_mei_send_sap_msg_payload(mei->cldev, &nic_info_msg.hdr);
772
773 iwl_mei_send_sap_msg_payload(mei->cldev, &rfkill_msg.hdr);
774}
775
776static void iwl_mei_handle_amt_state(struct mei_cl_device *cldev,
777 const struct iwl_sap_msg_dw *dw)
778{
779 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
780
781 mutex_lock(&iwl_mei_mutex);
782
783 if (mei->amt_enabled == !!le32_to_cpu(dw->val))
784 goto out;
785
786 mei->amt_enabled = dw->val;
787
788 if (mei->amt_enabled)
789 iwl_mei_set_init_conf(mei);
790 else if (iwl_mei_cache.ops)
791 iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false, false);
792
793 schedule_work(&mei->netdev_work);
794
795out:
796 mutex_unlock(&iwl_mei_mutex);
797}
798
799static void iwl_mei_handle_nic_owner(struct mei_cl_device *cldev,
800 const struct iwl_sap_msg_dw *dw)
801{
802 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
803
804 mei->got_ownership = dw->val != cpu_to_le32(SAP_NIC_OWNER_ME);
805}
806
807static void iwl_mei_handle_can_release_ownership(struct mei_cl_device *cldev,
808 const void *payload)
809{
810 /* We can get ownership and driver is registered, go ahead */
811 if (iwl_mei_cache.ops)
812 iwl_mei_send_sap_msg(cldev,
813 SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP);
814}
815
816static void iwl_mei_handle_csme_taking_ownership(struct mei_cl_device *cldev,
817 const void *payload)
818{
819 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
820
821 dev_info(&cldev->dev, "CSME takes ownership\n");
822
823 mei->got_ownership = false;
824
825 if (iwl_mei_cache.ops && !mei->device_down) {
826 /*
827 * Remember to send CSME_OWNERSHIP_CONFIRMED when the wifi
828 * driver is finished taking the device down.
829 */
830 mei->csme_taking_ownership = true;
831
832 iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, true, true);
833 } else {
834 iwl_mei_send_sap_msg(cldev,
835 SAP_MSG_NOTIF_CSME_OWNERSHIP_CONFIRMED);
836 }
837}
838
839static void iwl_mei_handle_nvm(struct mei_cl_device *cldev,
840 const struct iwl_sap_nvm *sap_nvm)
841{
842 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
843 const struct iwl_mei_nvm *mei_nvm = (const void *)sap_nvm;
844 int i;
845
846 kfree(mei->nvm);
847 mei->nvm = kzalloc(sizeof(*mei_nvm), GFP_KERNEL);
848 if (!mei->nvm)
849 return;
850
851 ether_addr_copy(mei->nvm->hw_addr, sap_nvm->hw_addr);
852 mei->nvm->n_hw_addrs = sap_nvm->n_hw_addrs;
853 mei->nvm->radio_cfg = le32_to_cpu(sap_nvm->radio_cfg);
854 mei->nvm->caps = le32_to_cpu(sap_nvm->caps);
855 mei->nvm->nvm_version = le32_to_cpu(sap_nvm->nvm_version);
856
857 for (i = 0; i < ARRAY_SIZE(mei->nvm->channels); i++)
858 mei->nvm->channels[i] = le32_to_cpu(sap_nvm->channels[i]);
859
860 wake_up_all(&mei->get_nvm_wq);
861}
862
863static void iwl_mei_handle_rx_host_own_req(struct mei_cl_device *cldev,
864 const struct iwl_sap_msg_dw *dw)
865{
866 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
867
868 /*
869 * This means that we can't use the wifi device right now, CSME is not
870 * ready to let us use it.
871 */
872 if (!dw->val) {
873 dev_info(&cldev->dev, "Ownership req denied\n");
874 return;
875 }
876
877 mei->got_ownership = true;
878 wake_up_all(&mei->get_ownership_wq);
879
880 iwl_mei_send_sap_msg(cldev,
881 SAP_MSG_NOTIF_HOST_OWNERSHIP_CONFIRMED);
882
883 /* We can now start the connection, unblock rfkill */
884 if (iwl_mei_cache.ops)
885 iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false);
886}
887
888static void iwl_mei_handle_pldr_ack(struct mei_cl_device *cldev,
889 const struct iwl_sap_pldr_ack_data *ack)
890{
891 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
892
893 mei->pldr_active = le32_to_cpu(ack->status) == SAP_PLDR_STATUS_SUCCESS;
894 wake_up_all(&mei->pldr_wq);
895}
896
897static void iwl_mei_handle_ping(struct mei_cl_device *cldev,
898 const struct iwl_sap_hdr *hdr)
899{
900 iwl_mei_send_sap_msg(cldev, SAP_MSG_NOTIF_PONG);
901}
902
903static void iwl_mei_handle_sap_msg(struct mei_cl_device *cldev,
904 const struct iwl_sap_hdr *hdr)
905{
906 u16 len = le16_to_cpu(hdr->len) + sizeof(*hdr);
907 u16 type = le16_to_cpu(hdr->type);
908
909 dev_dbg(&cldev->dev,
910 "Got a new SAP message: type %d, len %d, seq %d\n",
911 le16_to_cpu(hdr->type), len,
912 le32_to_cpu(hdr->seq_num));
913
914#define SAP_MSG_HANDLER(_cmd, _handler, _sz) \
915 case SAP_MSG_NOTIF_ ## _cmd: \
916 if (len < _sz) { \
917 dev_err(&cldev->dev, \
918 "Bad size for %d: %u < %u\n", \
919 le16_to_cpu(hdr->type), \
920 (unsigned int)len, \
921 (unsigned int)_sz); \
922 break; \
923 } \
924 mutex_lock(&iwl_mei_mutex); \
925 _handler(cldev, (const void *)hdr); \
926 mutex_unlock(&iwl_mei_mutex); \
927 break
928
929#define SAP_MSG_HANDLER_NO_LOCK(_cmd, _handler, _sz) \
930 case SAP_MSG_NOTIF_ ## _cmd: \
931 if (len < _sz) { \
932 dev_err(&cldev->dev, \
933 "Bad size for %d: %u < %u\n", \
934 le16_to_cpu(hdr->type), \
935 (unsigned int)len, \
936 (unsigned int)_sz); \
937 break; \
938 } \
939 _handler(cldev, (const void *)hdr); \
940 break
941
942#define SAP_MSG_HANDLER_NO_HANDLER(_cmd, _sz) \
943 case SAP_MSG_NOTIF_ ## _cmd: \
944 if (len < _sz) { \
945 dev_err(&cldev->dev, \
946 "Bad size for %d: %u < %u\n", \
947 le16_to_cpu(hdr->type), \
948 (unsigned int)len, \
949 (unsigned int)_sz); \
950 break; \
951 } \
952 break
953
954 switch (type) {
955 SAP_MSG_HANDLER(PING, iwl_mei_handle_ping, 0);
956 SAP_MSG_HANDLER(CSME_FILTERS,
957 iwl_mei_handle_csme_filters,
958 sizeof(struct iwl_sap_csme_filters));
959 SAP_MSG_HANDLER(CSME_CONN_STATUS,
960 iwl_mei_handle_conn_status,
961 sizeof(struct iwl_sap_notif_conn_status));
962 SAP_MSG_HANDLER_NO_LOCK(AMT_STATE,
963 iwl_mei_handle_amt_state,
964 sizeof(struct iwl_sap_msg_dw));
965 SAP_MSG_HANDLER_NO_HANDLER(PONG, 0);
966 SAP_MSG_HANDLER(NVM, iwl_mei_handle_nvm,
967 sizeof(struct iwl_sap_nvm));
968 SAP_MSG_HANDLER(CSME_REPLY_TO_HOST_OWNERSHIP_REQ,
969 iwl_mei_handle_rx_host_own_req,
970 sizeof(struct iwl_sap_msg_dw));
971 SAP_MSG_HANDLER(NIC_OWNER, iwl_mei_handle_nic_owner,
972 sizeof(struct iwl_sap_msg_dw));
973 SAP_MSG_HANDLER(CSME_CAN_RELEASE_OWNERSHIP,
974 iwl_mei_handle_can_release_ownership, 0);
975 SAP_MSG_HANDLER(CSME_TAKING_OWNERSHIP,
976 iwl_mei_handle_csme_taking_ownership, 0);
977 SAP_MSG_HANDLER(PLDR_ACK, iwl_mei_handle_pldr_ack,
978 sizeof(struct iwl_sap_pldr_ack_data));
979 default:
980 /*
981 * This is not really an error, there are message that we decided
982 * to ignore, yet, it is useful to be able to leave a note if debug
983 * is enabled.
984 */
985 dev_dbg(&cldev->dev, "Unsupported message: type %d, len %d\n",
986 le16_to_cpu(hdr->type), len);
987 }
988
989#undef SAP_MSG_HANDLER
990#undef SAP_MSG_HANDLER_NO_LOCK
991}
992
993static void iwl_mei_read_from_q(const u8 *q_head, u32 q_sz,
994 u32 *_rd, u32 wr,
995 void *_buf, u32 len)
996{
997 u8 *buf = _buf;
998 u32 rd = *_rd;
999
1000 if (rd + len <= q_sz) {
1001 memcpy(buf, q_head + rd, len);
1002 rd += len;
1003 } else {
1004 memcpy(buf, q_head + rd, q_sz - rd);
1005 memcpy(buf + q_sz - rd, q_head, len - (q_sz - rd));
1006 rd = len - (q_sz - rd);
1007 }
1008
1009 *_rd = rd;
1010}
1011
1012#define QOS_HDR_IV_SNAP_LEN (sizeof(struct ieee80211_qos_hdr) + \
1013 IEEE80211_TKIP_IV_LEN + \
1014 sizeof(rfc1042_header) + ETH_TLEN)
1015
1016static void iwl_mei_handle_sap_data(struct mei_cl_device *cldev,
1017 const u8 *q_head, u32 q_sz,
1018 u32 rd, u32 wr, ssize_t valid_rx_sz,
1019 struct sk_buff_head *tx_skbs)
1020{
1021 struct iwl_sap_hdr hdr;
1022 struct net_device *netdev =
1023 rcu_dereference_protected(iwl_mei_cache.netdev,
1024 lockdep_is_held(&iwl_mei_mutex));
1025
1026 if (!netdev)
1027 return;
1028
1029 while (valid_rx_sz >= sizeof(hdr)) {
1030 struct ethhdr *ethhdr;
1031 unsigned char *data;
1032 struct sk_buff *skb;
1033 u16 len;
1034
1035 iwl_mei_read_from_q(q_head, q_sz, &rd, wr, &hdr, sizeof(hdr));
1036 valid_rx_sz -= sizeof(hdr);
1037 len = le16_to_cpu(hdr.len);
1038
1039 if (valid_rx_sz < len) {
1040 dev_err(&cldev->dev,
1041 "Data queue is corrupted: valid data len %zd, len %d\n",
1042 valid_rx_sz, len);
1043 break;
1044 }
1045
1046 if (len < sizeof(*ethhdr)) {
1047 dev_err(&cldev->dev,
1048 "Data len is smaller than an ethernet header? len = %d\n",
1049 len);
1050 }
1051
1052 valid_rx_sz -= len;
1053
1054 if (le16_to_cpu(hdr.type) != SAP_MSG_DATA_PACKET) {
1055 dev_err(&cldev->dev, "Unsupported Rx data: type %d, len %d\n",
1056 le16_to_cpu(hdr.type), len);
1057 continue;
1058 }
1059
1060 /* We need enough room for the WiFi header + SNAP + IV */
1061 skb = netdev_alloc_skb(netdev, len + QOS_HDR_IV_SNAP_LEN);
1062 if (!skb)
1063 continue;
1064
1065 skb_reserve(skb, QOS_HDR_IV_SNAP_LEN);
1066 ethhdr = skb_push(skb, sizeof(*ethhdr));
1067
1068 iwl_mei_read_from_q(q_head, q_sz, &rd, wr,
1069 ethhdr, sizeof(*ethhdr));
1070 len -= sizeof(*ethhdr);
1071
1072 skb_reset_mac_header(skb);
1073 skb_reset_network_header(skb);
1074 skb->protocol = ethhdr->h_proto;
1075
1076 data = skb_put(skb, len);
1077 iwl_mei_read_from_q(q_head, q_sz, &rd, wr, data, len);
1078
1079 /*
1080 * Enqueue the skb here so that it can be sent later when we
1081 * do not hold the mutex. TX'ing a packet with a mutex held is
1082 * possible, but it wouldn't be nice to forbid the TX path to
1083 * call any of iwlmei's functions, since every API from iwlmei
1084 * needs the mutex.
1085 */
1086 __skb_queue_tail(tx_skbs, skb);
1087 }
1088}
1089
1090static void iwl_mei_handle_sap_rx_cmd(struct mei_cl_device *cldev,
1091 const u8 *q_head, u32 q_sz,
1092 u32 rd, u32 wr, ssize_t valid_rx_sz)
1093{
1094 struct page *p = alloc_page(GFP_KERNEL);
1095 struct iwl_sap_hdr *hdr;
1096
1097 if (!p)
1098 return;
1099
1100 hdr = page_address(p);
1101
1102 while (valid_rx_sz >= sizeof(*hdr)) {
1103 u16 len;
1104
1105 iwl_mei_read_from_q(q_head, q_sz, &rd, wr, hdr, sizeof(*hdr));
1106 valid_rx_sz -= sizeof(*hdr);
1107 len = le16_to_cpu(hdr->len);
1108
1109 if (valid_rx_sz < len)
1110 break;
1111
1112 iwl_mei_read_from_q(q_head, q_sz, &rd, wr, hdr + 1, len);
1113
1114 trace_iwlmei_sap_cmd(hdr, false);
1115 iwl_mei_handle_sap_msg(cldev, hdr);
1116 valid_rx_sz -= len;
1117 }
1118
1119 /* valid_rx_sz must be 0 now... */
1120 if (valid_rx_sz)
1121 dev_err(&cldev->dev,
1122 "More data in the buffer although we read it all\n");
1123
1124 __free_page(p);
1125}
1126
1127static void iwl_mei_handle_sap_rx(struct mei_cl_device *cldev,
1128 struct iwl_sap_q_ctrl_blk *notif_q,
1129 const u8 *q_head,
1130 struct sk_buff_head *skbs,
1131 u32 q_sz)
1132{
1133 u32 rd = le32_to_cpu(READ_ONCE(notif_q->rd_ptr));
1134 u32 wr = le32_to_cpu(READ_ONCE(notif_q->wr_ptr));
1135 ssize_t valid_rx_sz;
1136
1137 if (rd > q_sz || wr > q_sz) {
1138 dev_err(&cldev->dev,
1139 "Pointers are past the buffer limit\n");
1140 return;
1141 }
1142
1143 if (rd == wr)
1144 return;
1145
1146 valid_rx_sz = wr > rd ? wr - rd : q_sz - rd + wr;
1147
1148 if (skbs)
1149 iwl_mei_handle_sap_data(cldev, q_head, q_sz, rd, wr,
1150 valid_rx_sz, skbs);
1151 else
1152 iwl_mei_handle_sap_rx_cmd(cldev, q_head, q_sz, rd, wr,
1153 valid_rx_sz);
1154
1155 /* Increment the read pointer to point to the write pointer */
1156 WRITE_ONCE(notif_q->rd_ptr, cpu_to_le32(wr));
1157}
1158
1159static void iwl_mei_handle_check_shared_area(struct mei_cl_device *cldev)
1160{
1161 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
1162 struct iwl_sap_q_ctrl_blk *notif_q;
1163 struct sk_buff_head tx_skbs;
1164 struct iwl_sap_dir *dir;
1165 void *q_head;
1166 u32 q_sz;
1167
1168 if (!mei->shared_mem.ctrl)
1169 return;
1170
1171 dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_ME_TO_HOST];
1172 notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF];
1173 q_head = mei->shared_mem.q_head[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_NOTIF];
1174 q_sz = mei->shared_mem.q_size[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_NOTIF];
1175
1176 /*
1177 * Do not hold the mutex here, but rather each and every message
1178 * handler takes it.
1179 * This allows message handlers to take it at a certain time.
1180 */
1181 iwl_mei_handle_sap_rx(cldev, notif_q, q_head, NULL, q_sz);
1182
1183 mutex_lock(&iwl_mei_mutex);
1184 dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_ME_TO_HOST];
1185 notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_DATA];
1186 q_head = mei->shared_mem.q_head[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_DATA];
1187 q_sz = mei->shared_mem.q_size[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_DATA];
1188
1189 __skb_queue_head_init(&tx_skbs);
1190
1191 iwl_mei_handle_sap_rx(cldev, notif_q, q_head, &tx_skbs, q_sz);
1192
1193 if (skb_queue_empty(&tx_skbs)) {
1194 mutex_unlock(&iwl_mei_mutex);
1195 return;
1196 }
1197
1198 /*
1199 * Take the RCU read lock before we unlock the mutex to make sure that
1200 * even if the netdev is replaced by another non-NULL netdev right after
1201 * we unlock the mutex, the old netdev will still be valid when we
1202 * transmit the frames. We can't allow to replace the netdev here because
1203 * the skbs hold a pointer to the netdev.
1204 */
1205 rcu_read_lock();
1206
1207 mutex_unlock(&iwl_mei_mutex);
1208
1209 if (!rcu_access_pointer(iwl_mei_cache.netdev)) {
1210 dev_err(&cldev->dev, "Can't Tx without a netdev\n");
1211 skb_queue_purge(&tx_skbs);
1212 goto out;
1213 }
1214
1215 while (!skb_queue_empty(&tx_skbs)) {
1216 struct sk_buff *skb = __skb_dequeue(&tx_skbs);
1217
1218 trace_iwlmei_sap_data(skb, IWL_SAP_RX_DATA_TO_AIR);
1219 dev_queue_xmit(skb);
1220 }
1221
1222out:
1223 rcu_read_unlock();
1224}
1225
1226static void iwl_mei_rx(struct mei_cl_device *cldev)
1227{
1228 struct iwl_sap_me_msg_hdr *hdr;
1229 u8 msg[100];
1230 ssize_t ret;
1231
1232 ret = mei_cldev_recv(cldev, (u8 *)&msg, sizeof(msg));
1233 if (ret < 0) {
1234 dev_err(&cldev->dev, "failed to receive data: %zd\n", ret);
1235 return;
1236 }
1237
1238 if (ret == 0) {
1239 dev_err(&cldev->dev, "got an empty response\n");
1240 return;
1241 }
1242
1243 hdr = (void *)msg;
1244 trace_iwlmei_me_msg(hdr, false);
1245
1246 switch (le32_to_cpu(hdr->type)) {
1247 case SAP_ME_MSG_START_OK:
1248 BUILD_BUG_ON(sizeof(struct iwl_sap_me_msg_start_ok) >
1249 sizeof(msg));
1250
1251 iwl_mei_handle_rx_start_ok(cldev, (void *)msg, ret);
1252 break;
1253 case SAP_ME_MSG_CHECK_SHARED_AREA:
1254 iwl_mei_handle_check_shared_area(cldev);
1255 break;
1256 default:
1257 dev_err(&cldev->dev, "got a RX notification: %d\n",
1258 le32_to_cpu(hdr->type));
1259 break;
1260 }
1261}
1262
1263static int iwl_mei_send_start(struct mei_cl_device *cldev)
1264{
1265 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
1266 struct iwl_sap_me_msg_start msg = {
1267 .hdr.type = cpu_to_le32(SAP_ME_MSG_START),
1268 .hdr.seq_num = cpu_to_le32(atomic_inc_return(&mei->seq_no)),
1269 .hdr.len = cpu_to_le32(sizeof(msg)),
1270 .supported_versions[0] = SAP_VERSION,
1271 .init_data_seq_num = cpu_to_le16(0x100),
1272 .init_notif_seq_num = cpu_to_le16(0x800),
1273 };
1274 int ret;
1275
1276 trace_iwlmei_me_msg(&msg.hdr, true);
1277 ret = mei_cldev_send(cldev, (void *)&msg, sizeof(msg));
1278 if (ret != sizeof(msg)) {
1279 dev_err(&cldev->dev,
1280 "failed to send the SAP_ME_MSG_START message %d\n",
1281 ret);
1282 return ret;
1283 }
1284
1285 return 0;
1286}
1287
1288static int iwl_mei_enable(struct mei_cl_device *cldev)
1289{
1290 int ret;
1291
1292 ret = mei_cldev_enable(cldev);
1293 if (ret < 0) {
1294 dev_err(&cldev->dev, "failed to enable the device: %d\n", ret);
1295 return ret;
1296 }
1297
1298 ret = mei_cldev_register_rx_cb(cldev, iwl_mei_rx);
1299 if (ret) {
1300 dev_err(&cldev->dev,
1301 "failed to register to the rx cb: %d\n", ret);
1302 mei_cldev_disable(cldev);
1303 return ret;
1304 }
1305
1306 return 0;
1307}
1308
1309struct iwl_mei_nvm *iwl_mei_get_nvm(void)
1310{
1311 struct iwl_mei_nvm *nvm = NULL;
1312 struct iwl_mei *mei;
1313 int ret;
1314
1315 mutex_lock(&iwl_mei_mutex);
1316
1317 if (!iwl_mei_is_connected())
1318 goto out;
1319
1320 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1321
1322 if (!mei)
1323 goto out;
1324
1325 ret = iwl_mei_send_sap_msg(iwl_mei_global_cldev,
1326 SAP_MSG_NOTIF_GET_NVM);
1327 if (ret)
1328 goto out;
1329
1330 mutex_unlock(&iwl_mei_mutex);
1331
1332 ret = wait_event_timeout(mei->get_nvm_wq, mei->nvm, 2 * HZ);
1333 if (!ret)
1334 return NULL;
1335
1336 mutex_lock(&iwl_mei_mutex);
1337
1338 if (!iwl_mei_is_connected())
1339 goto out;
1340
1341 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1342
1343 if (!mei)
1344 goto out;
1345
1346 if (mei->nvm)
1347 nvm = kmemdup(mei->nvm, sizeof(*mei->nvm), GFP_KERNEL);
1348
1349out:
1350 mutex_unlock(&iwl_mei_mutex);
1351 return nvm;
1352}
1353EXPORT_SYMBOL_GPL(iwl_mei_get_nvm);
1354
1355#define IWL_MEI_PLDR_NUM_RETRIES 3
1356
1357int iwl_mei_pldr_req(void)
1358{
1359 struct iwl_mei *mei;
1360 int ret;
1361 struct iwl_sap_pldr_data msg = {
1362 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_PLDR),
1363 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1364 };
1365 int i;
1366
1367 mutex_lock(&iwl_mei_mutex);
1368
1369 /* In case we didn't have a bind */
1370 if (!iwl_mei_is_connected()) {
1371 ret = 0;
1372 goto out;
1373 }
1374
1375 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1376
1377 if (!mei) {
1378 ret = -ENODEV;
1379 goto out;
1380 }
1381
1382 if (!mei->amt_enabled) {
1383 ret = 0;
1384 goto out;
1385 }
1386
1387 for (i = 0; i < IWL_MEI_PLDR_NUM_RETRIES; i++) {
1388 ret = iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1389 mutex_unlock(&iwl_mei_mutex);
1390 if (ret)
1391 return ret;
1392
1393 ret = wait_event_timeout(mei->pldr_wq, mei->pldr_active, HZ / 2);
1394 if (ret)
1395 break;
1396
1397 /* Take the mutex for the next iteration */
1398 mutex_lock(&iwl_mei_mutex);
1399 }
1400
1401 if (ret)
1402 return 0;
1403
1404 ret = -ETIMEDOUT;
1405out:
1406 mutex_unlock(&iwl_mei_mutex);
1407 return ret;
1408}
1409EXPORT_SYMBOL_GPL(iwl_mei_pldr_req);
1410
1411int iwl_mei_get_ownership(void)
1412{
1413 struct iwl_mei *mei;
1414 int ret;
1415
1416 mutex_lock(&iwl_mei_mutex);
1417
1418 /* In case we didn't have a bind */
1419 if (!iwl_mei_is_connected()) {
1420 ret = 0;
1421 goto out;
1422 }
1423
1424 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1425
1426 if (!mei) {
1427 ret = -ENODEV;
1428 goto out;
1429 }
1430
1431 if (!mei->amt_enabled) {
1432 ret = 0;
1433 goto out;
1434 }
1435
1436 if (mei->got_ownership) {
1437 ret = 0;
1438 goto out;
1439 }
1440
1441 ret = iwl_mei_send_sap_msg(mei->cldev,
1442 SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP);
1443 if (ret)
1444 goto out;
1445
1446 mutex_unlock(&iwl_mei_mutex);
1447
1448 ret = wait_event_timeout(mei->get_ownership_wq,
1449 mei->got_ownership, HZ / 2);
1450 return (!ret) ? -ETIMEDOUT : 0;
1451out:
1452 mutex_unlock(&iwl_mei_mutex);
1453 return ret;
1454}
1455EXPORT_SYMBOL_GPL(iwl_mei_get_ownership);
1456
1457void iwl_mei_alive_notif(bool success)
1458{
1459 struct iwl_mei *mei;
1460 struct iwl_sap_pldr_end_data msg = {
1461 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_PLDR_END),
1462 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1463 .status = success ? cpu_to_le32(SAP_PLDR_STATUS_SUCCESS) :
1464 cpu_to_le32(SAP_PLDR_STATUS_FAILURE),
1465 };
1466
1467 mutex_lock(&iwl_mei_mutex);
1468
1469 if (!iwl_mei_is_connected())
1470 goto out;
1471
1472 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1473 if (!mei || !mei->pldr_active)
1474 goto out;
1475
1476 mei->pldr_active = false;
1477
1478 iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1479out:
1480 mutex_unlock(&iwl_mei_mutex);
1481}
1482EXPORT_SYMBOL_GPL(iwl_mei_alive_notif);
1483
1484void iwl_mei_host_associated(const struct iwl_mei_conn_info *conn_info,
1485 const struct iwl_mei_colloc_info *colloc_info)
1486{
1487 struct iwl_sap_notif_host_link_up msg = {
1488 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_HOST_LINK_UP),
1489 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1490 .conn_info = {
1491 .ssid_len = cpu_to_le32(conn_info->ssid_len),
1492 .channel = conn_info->channel,
1493 .band = conn_info->band,
1494 .pairwise_cipher = cpu_to_le32(conn_info->pairwise_cipher),
1495 .auth_mode = cpu_to_le32(conn_info->auth_mode),
1496 },
1497 };
1498 struct iwl_mei *mei;
1499
1500 if (conn_info->ssid_len > ARRAY_SIZE(msg.conn_info.ssid))
1501 return;
1502
1503 memcpy(msg.conn_info.ssid, conn_info->ssid, conn_info->ssid_len);
1504 memcpy(msg.conn_info.bssid, conn_info->bssid, ETH_ALEN);
1505
1506 if (colloc_info) {
1507 msg.colloc_channel = colloc_info->channel;
1508 msg.colloc_band = colloc_info->channel <= 14 ? 0 : 1;
1509 memcpy(msg.colloc_bssid, colloc_info->bssid, ETH_ALEN);
1510 }
1511
1512 mutex_lock(&iwl_mei_mutex);
1513
1514 if (!iwl_mei_is_connected())
1515 goto out;
1516
1517 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1518
1519 if (!mei && !mei->amt_enabled)
1520 goto out;
1521
1522 iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1523
1524out:
1525 kfree(iwl_mei_cache.conn_info);
1526 iwl_mei_cache.conn_info =
1527 kmemdup(&msg.conn_info, sizeof(msg.conn_info), GFP_KERNEL);
1528 mutex_unlock(&iwl_mei_mutex);
1529}
1530EXPORT_SYMBOL_GPL(iwl_mei_host_associated);
1531
1532void iwl_mei_host_disassociated(void)
1533{
1534 struct iwl_mei *mei;
1535 struct iwl_sap_notif_host_link_down msg = {
1536 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_HOST_LINK_DOWN),
1537 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1538 .type = HOST_LINK_DOWN_TYPE_TEMPORARY,
1539 };
1540
1541 mutex_lock(&iwl_mei_mutex);
1542
1543 if (!iwl_mei_is_connected())
1544 goto out;
1545
1546 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1547
1548 if (!mei && !mei->amt_enabled)
1549 goto out;
1550
1551 iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1552
1553out:
1554 kfree(iwl_mei_cache.conn_info);
1555 iwl_mei_cache.conn_info = NULL;
1556 mutex_unlock(&iwl_mei_mutex);
1557}
1558EXPORT_SYMBOL_GPL(iwl_mei_host_disassociated);
1559
1560void iwl_mei_set_rfkill_state(bool hw_rfkill, bool sw_rfkill)
1561{
1562 struct iwl_mei *mei;
1563 u32 rfkill_state = 0;
1564 struct iwl_sap_msg_dw msg = {
1565 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_RADIO_STATE),
1566 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1567 };
1568
1569 if (!sw_rfkill)
1570 rfkill_state |= SAP_SW_RFKILL_DEASSERTED;
1571
1572 if (!hw_rfkill)
1573 rfkill_state |= SAP_HW_RFKILL_DEASSERTED;
1574
1575 mutex_lock(&iwl_mei_mutex);
1576
1577 if (!iwl_mei_is_connected())
1578 goto out;
1579
1580 msg.val = cpu_to_le32(rfkill_state);
1581
1582 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1583
1584 if (!mei && !mei->amt_enabled)
1585 goto out;
1586
1587 iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1588
1589out:
1590 iwl_mei_cache.rf_kill = rfkill_state;
1591 mutex_unlock(&iwl_mei_mutex);
1592}
1593EXPORT_SYMBOL_GPL(iwl_mei_set_rfkill_state);
1594
1595void iwl_mei_set_nic_info(const u8 *mac_address, const u8 *nvm_address)
1596{
1597 struct iwl_mei *mei;
1598 struct iwl_sap_notif_host_nic_info msg = {
1599 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_NIC_INFO),
1600 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1601 };
1602
1603 mutex_lock(&iwl_mei_mutex);
1604
1605 if (!iwl_mei_is_connected())
1606 goto out;
1607
1608 ether_addr_copy(msg.mac_address, mac_address);
1609 ether_addr_copy(msg.nvm_address, nvm_address);
1610
1611 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1612
1613 if (!mei && !mei->amt_enabled)
1614 goto out;
1615
1616 iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1617
1618out:
1619 ether_addr_copy(iwl_mei_cache.mac_address, mac_address);
1620 ether_addr_copy(iwl_mei_cache.nvm_address, nvm_address);
1621 mutex_unlock(&iwl_mei_mutex);
1622}
1623EXPORT_SYMBOL_GPL(iwl_mei_set_nic_info);
1624
1625void iwl_mei_set_country_code(u16 mcc)
1626{
1627 struct iwl_mei *mei;
1628 struct iwl_sap_notif_country_code msg = {
1629 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_COUNTRY_CODE),
1630 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1631 .mcc = cpu_to_le16(mcc),
1632 };
1633
1634 mutex_lock(&iwl_mei_mutex);
1635
1636 if (!iwl_mei_is_connected())
1637 goto out;
1638
1639 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1640
1641 if (!mei && !mei->amt_enabled)
1642 goto out;
1643
1644 iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1645
1646out:
1647 iwl_mei_cache.mcc = mcc;
1648 mutex_unlock(&iwl_mei_mutex);
1649}
1650EXPORT_SYMBOL_GPL(iwl_mei_set_country_code);
1651
1652void iwl_mei_set_power_limit(const __le16 *power_limit)
1653{
1654 struct iwl_mei *mei;
1655 struct iwl_sap_notif_sar_limits msg = {
1656 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_SAR_LIMITS),
1657 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1658 };
1659
1660 mutex_lock(&iwl_mei_mutex);
1661
1662 if (!iwl_mei_is_connected())
1663 goto out;
1664
1665 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1666
1667 if (!mei && !mei->amt_enabled)
1668 goto out;
1669
1670 memcpy(msg.sar_chain_info_table, power_limit, sizeof(msg.sar_chain_info_table));
1671
1672 iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1673
1674out:
1675 kfree(iwl_mei_cache.power_limit);
1676 iwl_mei_cache.power_limit = kmemdup(power_limit,
1677 sizeof(msg.sar_chain_info_table), GFP_KERNEL);
1678 mutex_unlock(&iwl_mei_mutex);
1679}
1680EXPORT_SYMBOL_GPL(iwl_mei_set_power_limit);
1681
1682void iwl_mei_set_netdev(struct net_device *netdev)
1683{
1684 struct iwl_mei *mei;
1685
1686 mutex_lock(&iwl_mei_mutex);
1687
1688 if (!iwl_mei_is_connected()) {
1689 rcu_assign_pointer(iwl_mei_cache.netdev, netdev);
1690 goto out;
1691 }
1692
1693 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1694
1695 if (!mei)
1696 goto out;
1697
1698 if (!netdev) {
1699 struct net_device *dev =
1700 rcu_dereference_protected(iwl_mei_cache.netdev,
1701 lockdep_is_held(&iwl_mei_mutex));
1702
1703 if (!dev)
1704 goto out;
1705
1706 netdev_rx_handler_unregister(dev);
1707 }
1708
1709 rcu_assign_pointer(iwl_mei_cache.netdev, netdev);
1710
1711 if (netdev && mei->amt_enabled)
1712 netdev_rx_handler_register(netdev, iwl_mei_rx_handler, mei);
1713
1714out:
1715 mutex_unlock(&iwl_mei_mutex);
1716}
1717EXPORT_SYMBOL_GPL(iwl_mei_set_netdev);
1718
1719void iwl_mei_device_state(bool up)
1720{
1721 struct iwl_mei *mei;
1722
1723 mutex_lock(&iwl_mei_mutex);
1724
1725 if (!iwl_mei_is_connected())
1726 goto out;
1727
1728 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1729
1730 if (!mei)
1731 goto out;
1732
1733 mei->device_down = !up;
1734
1735 if (up || !mei->csme_taking_ownership)
1736 goto out;
1737
1738 iwl_mei_send_sap_msg(mei->cldev,
1739 SAP_MSG_NOTIF_CSME_OWNERSHIP_CONFIRMED);
1740 mei->csme_taking_ownership = false;
1741out:
1742 mutex_unlock(&iwl_mei_mutex);
1743}
1744EXPORT_SYMBOL_GPL(iwl_mei_device_state);
1745
1746int iwl_mei_register(void *priv, const struct iwl_mei_ops *ops)
1747{
1748 int ret;
1749
1750 /*
1751 * We must have a non-NULL priv pointer to not crash when there are
1752 * multiple WiFi devices.
1753 */
1754 if (!priv)
1755 return -EINVAL;
1756
1757 mutex_lock(&iwl_mei_mutex);
1758
1759 /* do not allow registration if someone else already registered */
1760 if (iwl_mei_cache.priv || iwl_mei_cache.ops) {
1761 ret = -EBUSY;
1762 goto out;
1763 }
1764
1765 iwl_mei_cache.priv = priv;
1766 iwl_mei_cache.ops = ops;
1767
1768 if (iwl_mei_global_cldev) {
1769 struct iwl_mei *mei =
1770 mei_cldev_get_drvdata(iwl_mei_global_cldev);
1771
1772 /* we have already a SAP connection */
1773 if (iwl_mei_is_connected()) {
1774 if (mei->amt_enabled)
1775 iwl_mei_send_sap_msg(mei->cldev,
1776 SAP_MSG_NOTIF_WIFIDR_UP);
1777 ops->rfkill(priv, mei->link_prot_state, false);
1778 }
1779 }
1780 ret = 0;
1781
1782out:
1783 mutex_unlock(&iwl_mei_mutex);
1784 return ret;
1785}
1786EXPORT_SYMBOL_GPL(iwl_mei_register);
1787
1788void iwl_mei_start_unregister(void)
1789{
1790 mutex_lock(&iwl_mei_mutex);
1791
1792 /* At this point, the wifi driver should have removed the netdev */
1793 if (rcu_access_pointer(iwl_mei_cache.netdev))
1794 pr_err("Still had a netdev pointer set upon unregister\n");
1795
1796 kfree(iwl_mei_cache.conn_info);
1797 iwl_mei_cache.conn_info = NULL;
1798 kfree(iwl_mei_cache.power_limit);
1799 iwl_mei_cache.power_limit = NULL;
1800 iwl_mei_cache.ops = NULL;
1801 /* leave iwl_mei_cache.priv non-NULL to prevent any new registration */
1802
1803 mutex_unlock(&iwl_mei_mutex);
1804}
1805EXPORT_SYMBOL_GPL(iwl_mei_start_unregister);
1806
1807void iwl_mei_unregister_complete(void)
1808{
1809 mutex_lock(&iwl_mei_mutex);
1810
1811 iwl_mei_cache.priv = NULL;
1812
1813 if (iwl_mei_global_cldev) {
1814 struct iwl_mei *mei =
1815 mei_cldev_get_drvdata(iwl_mei_global_cldev);
1816
1817 iwl_mei_send_sap_msg(mei->cldev, SAP_MSG_NOTIF_WIFIDR_DOWN);
1818 mei->got_ownership = false;
1819 }
1820
1821 mutex_unlock(&iwl_mei_mutex);
1822}
1823EXPORT_SYMBOL_GPL(iwl_mei_unregister_complete);
1824
1825#if IS_ENABLED(CONFIG_DEBUG_FS)
1826
1827static ssize_t
1828iwl_mei_dbgfs_send_start_message_write(struct file *file,
1829 const char __user *user_buf,
1830 size_t count, loff_t *ppos)
1831{
1832 int ret;
1833
1834 mutex_lock(&iwl_mei_mutex);
1835
1836 if (!iwl_mei_global_cldev) {
1837 ret = -ENODEV;
1838 goto out;
1839 }
1840
1841 ret = iwl_mei_send_start(iwl_mei_global_cldev);
1842
1843out:
1844 mutex_unlock(&iwl_mei_mutex);
1845 return ret ?: count;
1846}
1847
1848static const struct file_operations iwl_mei_dbgfs_send_start_message_ops = {
1849 .write = iwl_mei_dbgfs_send_start_message_write,
1850 .open = simple_open,
1851 .llseek = default_llseek,
1852};
1853
1854static ssize_t iwl_mei_dbgfs_req_ownership_write(struct file *file,
1855 const char __user *user_buf,
1856 size_t count, loff_t *ppos)
1857{
1858 iwl_mei_get_ownership();
1859
1860 return count;
1861}
1862
1863static const struct file_operations iwl_mei_dbgfs_req_ownership_ops = {
1864 .write = iwl_mei_dbgfs_req_ownership_write,
1865 .open = simple_open,
1866 .llseek = default_llseek,
1867};
1868
1869static void iwl_mei_dbgfs_register(struct iwl_mei *mei)
1870{
1871 mei->dbgfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
1872
1873 if (!mei->dbgfs_dir)
1874 return;
1875
1876 debugfs_create_ulong("status", S_IRUSR,
1877 mei->dbgfs_dir, &iwl_mei_status);
1878 debugfs_create_file("send_start_message", S_IWUSR, mei->dbgfs_dir,
1879 mei, &iwl_mei_dbgfs_send_start_message_ops);
1880 debugfs_create_file("req_ownership", S_IWUSR, mei->dbgfs_dir,
1881 mei, &iwl_mei_dbgfs_req_ownership_ops);
1882}
1883
1884static void iwl_mei_dbgfs_unregister(struct iwl_mei *mei)
1885{
1886 debugfs_remove_recursive(mei->dbgfs_dir);
1887 mei->dbgfs_dir = NULL;
1888}
1889
1890#else
1891
1892static void iwl_mei_dbgfs_register(struct iwl_mei *mei) {}
1893static void iwl_mei_dbgfs_unregister(struct iwl_mei *mei) {}
1894
1895#endif /* CONFIG_DEBUG_FS */
1896
1897#define ALLOC_SHARED_MEM_RETRY_MAX_NUM 3
1898
1899/*
1900 * iwl_mei_probe - the probe function called by the mei bus enumeration
1901 *
1902 * This allocates the data needed by iwlmei and sets a pointer to this data
1903 * into the mei_cl_device's drvdata.
1904 * It starts the SAP protocol by sending the SAP_ME_MSG_START without
1905 * waiting for the answer. The answer will be caught later by the Rx callback.
1906 */
1907static int iwl_mei_probe(struct mei_cl_device *cldev,
1908 const struct mei_cl_device_id *id)
1909{
1910 int alloc_retry = ALLOC_SHARED_MEM_RETRY_MAX_NUM;
1911 struct iwl_mei *mei;
1912 int ret;
1913
1914 mei = devm_kzalloc(&cldev->dev, sizeof(*mei), GFP_KERNEL);
1915 if (!mei)
1916 return -ENOMEM;
1917
1918 init_waitqueue_head(&mei->get_nvm_wq);
1919 INIT_WORK(&mei->send_csa_msg_wk, iwl_mei_send_csa_msg_wk);
1920 INIT_DELAYED_WORK(&mei->csa_throttle_end_wk,
1921 iwl_mei_csa_throttle_end_wk);
1922 init_waitqueue_head(&mei->get_ownership_wq);
1923 init_waitqueue_head(&mei->pldr_wq);
1924 spin_lock_init(&mei->data_q_lock);
1925 INIT_WORK(&mei->netdev_work, iwl_mei_netdev_work);
1926
1927 mei_cldev_set_drvdata(cldev, mei);
1928 mei->cldev = cldev;
1929 mei->device_down = true;
1930
1931 do {
1932 ret = iwl_mei_alloc_shared_mem(cldev);
1933 if (!ret)
1934 break;
1935 /*
1936 * The CSME firmware needs to boot the internal WLAN client.
1937 * This can take time in certain configurations (usually
1938 * upon resume and when the whole CSME firmware is shut down
1939 * during suspend).
1940 *
1941 * Wait a bit before retrying and hope we'll succeed next time.
1942 */
1943
1944 dev_dbg(&cldev->dev,
1945 "Couldn't allocate the shared memory: %d, attempt %d / %d\n",
1946 ret, alloc_retry, ALLOC_SHARED_MEM_RETRY_MAX_NUM);
1947 msleep(100);
1948 alloc_retry--;
1949 } while (alloc_retry);
1950
1951 if (ret) {
1952 dev_err(&cldev->dev, "Couldn't allocate the shared memory: %d\n",
1953 ret);
1954 goto free;
1955 }
1956
1957 iwl_mei_init_shared_mem(mei);
1958
1959 ret = iwl_mei_enable(cldev);
1960 if (ret)
1961 goto free_shared_mem;
1962
1963 iwl_mei_dbgfs_register(mei);
1964
1965 /*
1966 * We now have a Rx function in place, start the SAP protocol
1967 * we expect to get the SAP_ME_MSG_START_OK response later on.
1968 */
1969 mutex_lock(&iwl_mei_mutex);
1970 ret = iwl_mei_send_start(cldev);
1971 mutex_unlock(&iwl_mei_mutex);
1972 if (ret)
1973 goto debugfs_unregister;
1974
1975 /* must be last */
1976 iwl_mei_global_cldev = cldev;
1977
1978 return 0;
1979
1980debugfs_unregister:
1981 iwl_mei_dbgfs_unregister(mei);
1982 mei_cldev_disable(cldev);
1983free_shared_mem:
1984 iwl_mei_free_shared_mem(cldev);
1985free:
1986 mei_cldev_set_drvdata(cldev, NULL);
1987 devm_kfree(&cldev->dev, mei);
1988
1989 return ret;
1990}
1991
1992#define SEND_SAP_MAX_WAIT_ITERATION 10
1993#define IWLMEI_DEVICE_DOWN_WAIT_ITERATION 50
1994
1995static void iwl_mei_remove(struct mei_cl_device *cldev)
1996{
1997 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
1998 int i;
1999
2000 /*
2001 * We are being removed while the bus is active, it means we are
2002 * going to suspend/ shutdown, so the NIC will disappear.
2003 */
2004 if (mei_cldev_enabled(cldev) && iwl_mei_cache.ops) {
2005 unsigned int iter = IWLMEI_DEVICE_DOWN_WAIT_ITERATION;
2006 bool down = false;
2007
2008 /*
2009 * In case of suspend, wait for the mac to stop and don't remove
2010 * the interface. This will allow the interface to come back
2011 * on resume.
2012 */
2013 while (!down && iter--) {
2014 mdelay(1);
2015
2016 mutex_lock(&iwl_mei_mutex);
2017 down = mei->device_down;
2018 mutex_unlock(&iwl_mei_mutex);
2019 }
2020
2021 if (!down)
2022 iwl_mei_cache.ops->nic_stolen(iwl_mei_cache.priv);
2023 }
2024
2025 if (rcu_access_pointer(iwl_mei_cache.netdev)) {
2026 struct net_device *dev;
2027
2028 /*
2029 * First take rtnl and only then the mutex to avoid an ABBA
2030 * with iwl_mei_set_netdev()
2031 */
2032 rtnl_lock();
2033 mutex_lock(&iwl_mei_mutex);
2034
2035 /*
2036 * If we are suspending and the wifi driver hasn't removed it's netdev
2037 * yet, do it now. In any case, don't change the cache.netdev pointer.
2038 */
2039 dev = rcu_dereference_protected(iwl_mei_cache.netdev,
2040 lockdep_is_held(&iwl_mei_mutex));
2041
2042 netdev_rx_handler_unregister(dev);
2043 mutex_unlock(&iwl_mei_mutex);
2044 rtnl_unlock();
2045 }
2046
2047 mutex_lock(&iwl_mei_mutex);
2048
2049 if (mei->amt_enabled) {
2050 /*
2051 * Tell CSME that we are going down so that it won't access the
2052 * memory anymore, make sure this message goes through immediately.
2053 */
2054 mei->csa_throttled = false;
2055 iwl_mei_send_sap_msg(mei->cldev,
2056 SAP_MSG_NOTIF_HOST_GOES_DOWN);
2057
2058 for (i = 0; i < SEND_SAP_MAX_WAIT_ITERATION; i++) {
2059 if (!iwl_mei_host_to_me_data_pending(mei))
2060 break;
2061
2062 msleep(20);
2063 }
2064
2065 /*
2066 * If we couldn't make sure that CSME saw the HOST_GOES_DOWN
2067 * message, it means that it will probably keep reading memory
2068 * that we are going to unmap and free, expect IOMMU error
2069 * messages.
2070 */
2071 if (i == SEND_SAP_MAX_WAIT_ITERATION)
2072 dev_err(&mei->cldev->dev,
2073 "Couldn't get ACK from CSME on HOST_GOES_DOWN message\n");
2074 }
2075
2076 mutex_unlock(&iwl_mei_mutex);
2077
2078 /*
2079 * This looks strange, but this lock is taken here to make sure that
2080 * iwl_mei_add_data_to_ring called from the Tx path sees that we
2081 * clear the IWL_MEI_STATUS_SAP_CONNECTED bit.
2082 * Rx isn't a problem because the rx_handler can't be called after
2083 * having been unregistered.
2084 */
2085 spin_lock_bh(&mei->data_q_lock);
2086 clear_bit(IWL_MEI_STATUS_SAP_CONNECTED, &iwl_mei_status);
2087 spin_unlock_bh(&mei->data_q_lock);
2088
2089 if (iwl_mei_cache.ops)
2090 iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false);
2091
2092 /*
2093 * mei_cldev_disable will return only after all the MEI Rx is done.
2094 * It must be called when iwl_mei_mutex is *not* held, since it waits
2095 * for our Rx handler to complete.
2096 * After it returns, no new Rx will start.
2097 */
2098 mei_cldev_disable(cldev);
2099
2100 /*
2101 * Since the netdev was already removed and the netdev's removal
2102 * includes a call to synchronize_net() so that we know there won't be
2103 * any new Rx that will trigger the following workers.
2104 */
2105 cancel_work_sync(&mei->send_csa_msg_wk);
2106 cancel_delayed_work_sync(&mei->csa_throttle_end_wk);
2107 cancel_work_sync(&mei->netdev_work);
2108
2109 /*
2110 * If someone waits for the ownership, let him know that we are going
2111 * down and that we are not connected anymore. He'll be able to take
2112 * the device.
2113 */
2114 wake_up_all(&mei->get_ownership_wq);
2115 wake_up_all(&mei->pldr_wq);
2116
2117 mutex_lock(&iwl_mei_mutex);
2118
2119 iwl_mei_global_cldev = NULL;
2120
2121 wake_up_all(&mei->get_nvm_wq);
2122
2123 iwl_mei_free_shared_mem(cldev);
2124
2125 iwl_mei_dbgfs_unregister(mei);
2126
2127 mei_cldev_set_drvdata(cldev, NULL);
2128
2129 kfree(mei->nvm);
2130
2131 kfree(rcu_access_pointer(mei->filters));
2132
2133 devm_kfree(&cldev->dev, mei);
2134
2135 mutex_unlock(&iwl_mei_mutex);
2136}
2137
2138static const struct mei_cl_device_id iwl_mei_tbl[] = {
2139 {
2140 .name = KBUILD_MODNAME,
2141 .uuid = MEI_WLAN_UUID,
2142 .version = MEI_CL_VERSION_ANY,
2143 },
2144
2145 /* required last entry */
2146 { }
2147};
2148
2149/*
2150 * Do not export the device table because this module is loaded by
2151 * iwlwifi's dependency.
2152 */
2153
2154static struct mei_cl_driver iwl_mei_cl_driver = {
2155 .id_table = iwl_mei_tbl,
2156 .name = KBUILD_MODNAME,
2157 .probe = iwl_mei_probe,
2158 .remove = iwl_mei_remove,
2159};
2160
2161module_mei_cl_driver(iwl_mei_cl_driver);