Loading...
1/*
2 * Ultra Wide Band
3 * Dynamic Reservation Protocol handling
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21#include <linux/kthread.h>
22#include <linux/freezer.h>
23#include <linux/slab.h>
24#include <linux/delay.h>
25#include "uwb-internal.h"
26
27
28/* DRP Conflict Actions ([ECMA-368 2nd Edition] 17.4.6) */
29enum uwb_drp_conflict_action {
30 /* Reservation is maintained, no action needed */
31 UWB_DRP_CONFLICT_MANTAIN = 0,
32
33 /* the device shall not transmit frames in conflicting MASs in
34 * the following superframe. If the device is the reservation
35 * target, it shall also set the Reason Code in its DRP IE to
36 * Conflict in its beacon in the following superframe.
37 */
38 UWB_DRP_CONFLICT_ACT1,
39
40 /* the device shall not set the Reservation Status bit to ONE
41 * and shall not transmit frames in conflicting MASs. If the
42 * device is the reservation target, it shall also set the
43 * Reason Code in its DRP IE to Conflict.
44 */
45 UWB_DRP_CONFLICT_ACT2,
46
47 /* the device shall not transmit frames in conflicting MASs in
48 * the following superframe. It shall remove the conflicting
49 * MASs from the reservation or set the Reservation Status to
50 * ZERO in its beacon in the following superframe. If the
51 * device is the reservation target, it shall also set the
52 * Reason Code in its DRP IE to Conflict.
53 */
54 UWB_DRP_CONFLICT_ACT3,
55};
56
57
58static void uwb_rc_set_drp_cmd_done(struct uwb_rc *rc, void *arg,
59 struct uwb_rceb *reply, ssize_t reply_size)
60{
61 struct uwb_rc_evt_set_drp_ie *r = (struct uwb_rc_evt_set_drp_ie *)reply;
62
63 if (r != NULL) {
64 if (r->bResultCode != UWB_RC_RES_SUCCESS)
65 dev_err(&rc->uwb_dev.dev, "SET-DRP-IE failed: %s (%d)\n",
66 uwb_rc_strerror(r->bResultCode), r->bResultCode);
67 } else
68 dev_err(&rc->uwb_dev.dev, "SET-DRP-IE: timeout\n");
69
70 spin_lock_bh(&rc->rsvs_lock);
71 if (rc->set_drp_ie_pending > 1) {
72 rc->set_drp_ie_pending = 0;
73 uwb_rsv_queue_update(rc);
74 } else {
75 rc->set_drp_ie_pending = 0;
76 }
77 spin_unlock_bh(&rc->rsvs_lock);
78}
79
80/**
81 * Construct and send the SET DRP IE
82 *
83 * @rc: UWB Host controller
84 * @returns: >= 0 number of bytes still available in the beacon
85 * < 0 errno code on error.
86 *
87 * See WUSB[8.6.2.7]: The host must set all the DRP IEs that it wants the
88 * device to include in its beacon at the same time. We thus have to
89 * traverse all reservations and include the DRP IEs of all PENDING
90 * and NEGOTIATED reservations in a SET DRP command for transmission.
91 *
92 * A DRP Availability IE is appended.
93 *
94 * rc->rsvs_mutex is held
95 *
96 * FIXME We currently ignore the returned value indicating the remaining space
97 * in beacon. This could be used to deny reservation requests earlier if
98 * determined that they would cause the beacon space to be exceeded.
99 */
100int uwb_rc_send_all_drp_ie(struct uwb_rc *rc)
101{
102 int result;
103 struct uwb_rc_cmd_set_drp_ie *cmd;
104 struct uwb_rsv *rsv;
105 struct uwb_rsv_move *mv;
106 int num_bytes = 0;
107 u8 *IEDataptr;
108
109 result = -ENOMEM;
110 /* First traverse all reservations to determine memory needed. */
111 list_for_each_entry(rsv, &rc->reservations, rc_node) {
112 if (rsv->drp_ie != NULL) {
113 num_bytes += rsv->drp_ie->hdr.length + 2;
114 if (uwb_rsv_has_two_drp_ies(rsv) &&
115 (rsv->mv.companion_drp_ie != NULL)) {
116 mv = &rsv->mv;
117 num_bytes += mv->companion_drp_ie->hdr.length + 2;
118 }
119 }
120 }
121 num_bytes += sizeof(rc->drp_avail.ie);
122 cmd = kzalloc(sizeof(*cmd) + num_bytes, GFP_KERNEL);
123 if (cmd == NULL)
124 goto error;
125 cmd->rccb.bCommandType = UWB_RC_CET_GENERAL;
126 cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_SET_DRP_IE);
127 cmd->wIELength = num_bytes;
128 IEDataptr = (u8 *)&cmd->IEData[0];
129
130 /* FIXME: DRV avail IE is not always needed */
131 /* put DRP avail IE first */
132 memcpy(IEDataptr, &rc->drp_avail.ie, sizeof(rc->drp_avail.ie));
133 IEDataptr += sizeof(struct uwb_ie_drp_avail);
134
135 /* Next traverse all reservations to place IEs in allocated memory. */
136 list_for_each_entry(rsv, &rc->reservations, rc_node) {
137 if (rsv->drp_ie != NULL) {
138 memcpy(IEDataptr, rsv->drp_ie,
139 rsv->drp_ie->hdr.length + 2);
140 IEDataptr += rsv->drp_ie->hdr.length + 2;
141
142 if (uwb_rsv_has_two_drp_ies(rsv) &&
143 (rsv->mv.companion_drp_ie != NULL)) {
144 mv = &rsv->mv;
145 memcpy(IEDataptr, mv->companion_drp_ie,
146 mv->companion_drp_ie->hdr.length + 2);
147 IEDataptr += mv->companion_drp_ie->hdr.length + 2;
148 }
149 }
150 }
151
152 result = uwb_rc_cmd_async(rc, "SET-DRP-IE", &cmd->rccb, sizeof(*cmd) + num_bytes,
153 UWB_RC_CET_GENERAL, UWB_RC_CMD_SET_DRP_IE,
154 uwb_rc_set_drp_cmd_done, NULL);
155
156 rc->set_drp_ie_pending = 1;
157
158 kfree(cmd);
159error:
160 return result;
161}
162
163/*
164 * Evaluate the action to perform using conflict resolution rules
165 *
166 * Return a uwb_drp_conflict_action.
167 */
168static int evaluate_conflict_action(struct uwb_ie_drp *ext_drp_ie, int ext_beacon_slot,
169 struct uwb_rsv *rsv, int our_status)
170{
171 int our_tie_breaker = rsv->tiebreaker;
172 int our_type = rsv->type;
173 int our_beacon_slot = rsv->rc->uwb_dev.beacon_slot;
174
175 int ext_tie_breaker = uwb_ie_drp_tiebreaker(ext_drp_ie);
176 int ext_status = uwb_ie_drp_status(ext_drp_ie);
177 int ext_type = uwb_ie_drp_type(ext_drp_ie);
178
179
180 /* [ECMA-368 2nd Edition] 17.4.6 */
181 if (ext_type == UWB_DRP_TYPE_PCA && our_type == UWB_DRP_TYPE_PCA) {
182 return UWB_DRP_CONFLICT_MANTAIN;
183 }
184
185 /* [ECMA-368 2nd Edition] 17.4.6-1 */
186 if (our_type == UWB_DRP_TYPE_ALIEN_BP) {
187 return UWB_DRP_CONFLICT_MANTAIN;
188 }
189
190 /* [ECMA-368 2nd Edition] 17.4.6-2 */
191 if (ext_type == UWB_DRP_TYPE_ALIEN_BP) {
192 /* here we know our_type != UWB_DRP_TYPE_ALIEN_BP */
193 return UWB_DRP_CONFLICT_ACT1;
194 }
195
196 /* [ECMA-368 2nd Edition] 17.4.6-3 */
197 if (our_status == 0 && ext_status == 1) {
198 return UWB_DRP_CONFLICT_ACT2;
199 }
200
201 /* [ECMA-368 2nd Edition] 17.4.6-4 */
202 if (our_status == 1 && ext_status == 0) {
203 return UWB_DRP_CONFLICT_MANTAIN;
204 }
205
206 /* [ECMA-368 2nd Edition] 17.4.6-5a */
207 if (our_tie_breaker == ext_tie_breaker &&
208 our_beacon_slot < ext_beacon_slot) {
209 return UWB_DRP_CONFLICT_MANTAIN;
210 }
211
212 /* [ECMA-368 2nd Edition] 17.4.6-5b */
213 if (our_tie_breaker != ext_tie_breaker &&
214 our_beacon_slot > ext_beacon_slot) {
215 return UWB_DRP_CONFLICT_MANTAIN;
216 }
217
218 if (our_status == 0) {
219 if (our_tie_breaker == ext_tie_breaker) {
220 /* [ECMA-368 2nd Edition] 17.4.6-6a */
221 if (our_beacon_slot > ext_beacon_slot) {
222 return UWB_DRP_CONFLICT_ACT2;
223 }
224 } else {
225 /* [ECMA-368 2nd Edition] 17.4.6-6b */
226 if (our_beacon_slot < ext_beacon_slot) {
227 return UWB_DRP_CONFLICT_ACT2;
228 }
229 }
230 } else {
231 if (our_tie_breaker == ext_tie_breaker) {
232 /* [ECMA-368 2nd Edition] 17.4.6-7a */
233 if (our_beacon_slot > ext_beacon_slot) {
234 return UWB_DRP_CONFLICT_ACT3;
235 }
236 } else {
237 /* [ECMA-368 2nd Edition] 17.4.6-7b */
238 if (our_beacon_slot < ext_beacon_slot) {
239 return UWB_DRP_CONFLICT_ACT3;
240 }
241 }
242 }
243 return UWB_DRP_CONFLICT_MANTAIN;
244}
245
246static void handle_conflict_normal(struct uwb_ie_drp *drp_ie,
247 int ext_beacon_slot,
248 struct uwb_rsv *rsv,
249 struct uwb_mas_bm *conflicting_mas)
250{
251 struct uwb_rc *rc = rsv->rc;
252 struct uwb_rsv_move *mv = &rsv->mv;
253 struct uwb_drp_backoff_win *bow = &rc->bow;
254 int action;
255
256 action = evaluate_conflict_action(drp_ie, ext_beacon_slot, rsv, uwb_rsv_status(rsv));
257
258 if (uwb_rsv_is_owner(rsv)) {
259 switch(action) {
260 case UWB_DRP_CONFLICT_ACT2:
261 /* try move */
262 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_TO_BE_MOVED);
263 if (bow->can_reserve_extra_mases == false)
264 uwb_rsv_backoff_win_increment(rc);
265
266 break;
267 case UWB_DRP_CONFLICT_ACT3:
268 uwb_rsv_backoff_win_increment(rc);
269 /* drop some mases with reason modified */
270 /* put in the companion the mases to be dropped */
271 bitmap_and(mv->companion_mas.bm, rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS);
272 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED);
273 default:
274 break;
275 }
276 } else {
277 switch(action) {
278 case UWB_DRP_CONFLICT_ACT2:
279 case UWB_DRP_CONFLICT_ACT3:
280 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT);
281 default:
282 break;
283 }
284
285 }
286
287}
288
289static void handle_conflict_expanding(struct uwb_ie_drp *drp_ie, int ext_beacon_slot,
290 struct uwb_rsv *rsv, bool companion_only,
291 struct uwb_mas_bm *conflicting_mas)
292{
293 struct uwb_rc *rc = rsv->rc;
294 struct uwb_drp_backoff_win *bow = &rc->bow;
295 struct uwb_rsv_move *mv = &rsv->mv;
296 int action;
297
298 if (companion_only) {
299 /* status of companion is 0 at this point */
300 action = evaluate_conflict_action(drp_ie, ext_beacon_slot, rsv, 0);
301 if (uwb_rsv_is_owner(rsv)) {
302 switch(action) {
303 case UWB_DRP_CONFLICT_ACT2:
304 case UWB_DRP_CONFLICT_ACT3:
305 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
306 rsv->needs_release_companion_mas = false;
307 if (bow->can_reserve_extra_mases == false)
308 uwb_rsv_backoff_win_increment(rc);
309 uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas);
310 }
311 } else { /* rsv is target */
312 switch(action) {
313 case UWB_DRP_CONFLICT_ACT2:
314 case UWB_DRP_CONFLICT_ACT3:
315 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_EXPANDING_CONFLICT);
316 /* send_drp_avail_ie = true; */
317 }
318 }
319 } else { /* also base part of the reservation is conflicting */
320 if (uwb_rsv_is_owner(rsv)) {
321 uwb_rsv_backoff_win_increment(rc);
322 /* remove companion part */
323 uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas);
324
325 /* drop some mases with reason modified */
326
327 /* put in the companion the mases to be dropped */
328 bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS);
329 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED);
330 } else { /* it is a target rsv */
331 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT);
332 /* send_drp_avail_ie = true; */
333 }
334 }
335}
336
337static void uwb_drp_handle_conflict_rsv(struct uwb_rc *rc, struct uwb_rsv *rsv,
338 struct uwb_rc_evt_drp *drp_evt,
339 struct uwb_ie_drp *drp_ie,
340 struct uwb_mas_bm *conflicting_mas)
341{
342 struct uwb_rsv_move *mv;
343
344 /* check if the conflicting reservation has two drp_ies */
345 if (uwb_rsv_has_two_drp_ies(rsv)) {
346 mv = &rsv->mv;
347 if (bitmap_intersects(rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS)) {
348 handle_conflict_expanding(drp_ie, drp_evt->beacon_slot_number,
349 rsv, false, conflicting_mas);
350 } else {
351 if (bitmap_intersects(mv->companion_mas.bm, conflicting_mas->bm, UWB_NUM_MAS)) {
352 handle_conflict_expanding(drp_ie, drp_evt->beacon_slot_number,
353 rsv, true, conflicting_mas);
354 }
355 }
356 } else if (bitmap_intersects(rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS)) {
357 handle_conflict_normal(drp_ie, drp_evt->beacon_slot_number, rsv, conflicting_mas);
358 }
359}
360
361static void uwb_drp_handle_all_conflict_rsv(struct uwb_rc *rc,
362 struct uwb_rc_evt_drp *drp_evt,
363 struct uwb_ie_drp *drp_ie,
364 struct uwb_mas_bm *conflicting_mas)
365{
366 struct uwb_rsv *rsv;
367
368 list_for_each_entry(rsv, &rc->reservations, rc_node) {
369 uwb_drp_handle_conflict_rsv(rc, rsv, drp_evt, drp_ie, conflicting_mas);
370 }
371}
372
373/*
374 * Based on the DRP IE, transition a target reservation to a new
375 * state.
376 */
377static void uwb_drp_process_target(struct uwb_rc *rc, struct uwb_rsv *rsv,
378 struct uwb_ie_drp *drp_ie, struct uwb_rc_evt_drp *drp_evt)
379{
380 struct device *dev = &rc->uwb_dev.dev;
381 struct uwb_rsv_move *mv = &rsv->mv;
382 int status;
383 enum uwb_drp_reason reason_code;
384 struct uwb_mas_bm mas;
385
386 status = uwb_ie_drp_status(drp_ie);
387 reason_code = uwb_ie_drp_reason_code(drp_ie);
388 uwb_drp_ie_to_bm(&mas, drp_ie);
389
390 switch (reason_code) {
391 case UWB_DRP_REASON_ACCEPTED:
392
393 if (rsv->state == UWB_RSV_STATE_T_CONFLICT) {
394 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT);
395 break;
396 }
397
398 if (rsv->state == UWB_RSV_STATE_T_EXPANDING_ACCEPTED) {
399 /* drp_ie is companion */
400 if (!bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS))
401 /* stroke companion */
402 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_EXPANDING_ACCEPTED);
403 } else {
404 if (!bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS)) {
405 if (uwb_drp_avail_reserve_pending(rc, &mas) == -EBUSY) {
406 /* FIXME: there is a conflict, find
407 * the conflicting reservations and
408 * take a sensible action. Consider
409 * that in drp_ie there is the
410 * "neighbour" */
411 uwb_drp_handle_all_conflict_rsv(rc, drp_evt, drp_ie, &mas);
412 } else {
413 /* accept the extra reservation */
414 bitmap_copy(mv->companion_mas.bm, mas.bm, UWB_NUM_MAS);
415 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_EXPANDING_ACCEPTED);
416 }
417 } else {
418 if (status) {
419 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED);
420 }
421 }
422
423 }
424 break;
425
426 case UWB_DRP_REASON_MODIFIED:
427 /* check to see if we have already modified the reservation */
428 if (bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS)) {
429 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED);
430 break;
431 }
432
433 /* find if the owner wants to expand or reduce */
434 if (bitmap_subset(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) {
435 /* owner is reducing */
436 bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, mas.bm, UWB_NUM_MAS);
437 uwb_drp_avail_release(rsv->rc, &mv->companion_mas);
438 }
439
440 bitmap_copy(rsv->mas.bm, mas.bm, UWB_NUM_MAS);
441 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_RESIZED);
442 break;
443 default:
444 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
445 reason_code, status);
446 }
447}
448
449/*
450 * Based on the DRP IE, transition an owner reservation to a new
451 * state.
452 */
453static void uwb_drp_process_owner(struct uwb_rc *rc, struct uwb_rsv *rsv,
454 struct uwb_dev *src, struct uwb_ie_drp *drp_ie,
455 struct uwb_rc_evt_drp *drp_evt)
456{
457 struct device *dev = &rc->uwb_dev.dev;
458 struct uwb_rsv_move *mv = &rsv->mv;
459 int status;
460 enum uwb_drp_reason reason_code;
461 struct uwb_mas_bm mas;
462
463 status = uwb_ie_drp_status(drp_ie);
464 reason_code = uwb_ie_drp_reason_code(drp_ie);
465 uwb_drp_ie_to_bm(&mas, drp_ie);
466
467 if (status) {
468 switch (reason_code) {
469 case UWB_DRP_REASON_ACCEPTED:
470 switch (rsv->state) {
471 case UWB_RSV_STATE_O_PENDING:
472 case UWB_RSV_STATE_O_INITIATED:
473 case UWB_RSV_STATE_O_ESTABLISHED:
474 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
475 break;
476 case UWB_RSV_STATE_O_MODIFIED:
477 if (bitmap_equal(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) {
478 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
479 } else {
480 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED);
481 }
482 break;
483
484 case UWB_RSV_STATE_O_MOVE_REDUCING: /* shouldn' t be a problem */
485 if (bitmap_equal(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) {
486 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
487 } else {
488 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING);
489 }
490 break;
491 case UWB_RSV_STATE_O_MOVE_EXPANDING:
492 if (bitmap_equal(mas.bm, mv->companion_mas.bm, UWB_NUM_MAS)) {
493 /* Companion reservation accepted */
494 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING);
495 } else {
496 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING);
497 }
498 break;
499 case UWB_RSV_STATE_O_MOVE_COMBINING:
500 if (bitmap_equal(mas.bm, rsv->mas.bm, UWB_NUM_MAS))
501 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING);
502 else
503 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING);
504 break;
505 default:
506 break;
507 }
508 break;
509 default:
510 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
511 reason_code, status);
512 }
513 } else {
514 switch (reason_code) {
515 case UWB_DRP_REASON_PENDING:
516 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_PENDING);
517 break;
518 case UWB_DRP_REASON_DENIED:
519 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
520 break;
521 case UWB_DRP_REASON_CONFLICT:
522 /* resolve the conflict */
523 bitmap_complement(mas.bm, src->last_availability_bm,
524 UWB_NUM_MAS);
525 uwb_drp_handle_conflict_rsv(rc, rsv, drp_evt, drp_ie, &mas);
526 break;
527 default:
528 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
529 reason_code, status);
530 }
531 }
532}
533
534static void uwb_cnflt_alien_stroke_timer(struct uwb_cnflt_alien *cnflt)
535{
536 unsigned timeout_us = UWB_MAX_LOST_BEACONS * UWB_SUPERFRAME_LENGTH_US;
537 mod_timer(&cnflt->timer, jiffies + usecs_to_jiffies(timeout_us));
538}
539
540static void uwb_cnflt_update_work(struct work_struct *work)
541{
542 struct uwb_cnflt_alien *cnflt = container_of(work,
543 struct uwb_cnflt_alien,
544 cnflt_update_work);
545 struct uwb_cnflt_alien *c;
546 struct uwb_rc *rc = cnflt->rc;
547
548 unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE;
549
550 mutex_lock(&rc->rsvs_mutex);
551
552 list_del(&cnflt->rc_node);
553
554 /* update rc global conflicting alien bitmap */
555 bitmap_zero(rc->cnflt_alien_bitmap.bm, UWB_NUM_MAS);
556
557 list_for_each_entry(c, &rc->cnflt_alien_list, rc_node) {
558 bitmap_or(rc->cnflt_alien_bitmap.bm, rc->cnflt_alien_bitmap.bm, c->mas.bm, UWB_NUM_MAS);
559 }
560
561 queue_delayed_work(rc->rsv_workq, &rc->rsv_alien_bp_work, usecs_to_jiffies(delay_us));
562
563 kfree(cnflt);
564 mutex_unlock(&rc->rsvs_mutex);
565}
566
567static void uwb_cnflt_timer(unsigned long arg)
568{
569 struct uwb_cnflt_alien *cnflt = (struct uwb_cnflt_alien *)arg;
570
571 queue_work(cnflt->rc->rsv_workq, &cnflt->cnflt_update_work);
572}
573
574/*
575 * We have received an DRP_IE of type Alien BP and we need to make
576 * sure we do not transmit in conflicting MASs.
577 */
578static void uwb_drp_handle_alien_drp(struct uwb_rc *rc, struct uwb_ie_drp *drp_ie)
579{
580 struct device *dev = &rc->uwb_dev.dev;
581 struct uwb_mas_bm mas;
582 struct uwb_cnflt_alien *cnflt;
583 char buf[72];
584 unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE;
585
586 uwb_drp_ie_to_bm(&mas, drp_ie);
587 bitmap_scnprintf(buf, sizeof(buf), mas.bm, UWB_NUM_MAS);
588
589 list_for_each_entry(cnflt, &rc->cnflt_alien_list, rc_node) {
590 if (bitmap_equal(cnflt->mas.bm, mas.bm, UWB_NUM_MAS)) {
591 /* Existing alien BP reservation conflicting
592 * bitmap, just reset the timer */
593 uwb_cnflt_alien_stroke_timer(cnflt);
594 return;
595 }
596 }
597
598 /* New alien BP reservation conflicting bitmap */
599
600 /* alloc and initialize new uwb_cnflt_alien */
601 cnflt = kzalloc(sizeof(struct uwb_cnflt_alien), GFP_KERNEL);
602 if (!cnflt)
603 dev_err(dev, "failed to alloc uwb_cnflt_alien struct\n");
604 INIT_LIST_HEAD(&cnflt->rc_node);
605 init_timer(&cnflt->timer);
606 cnflt->timer.function = uwb_cnflt_timer;
607 cnflt->timer.data = (unsigned long)cnflt;
608
609 cnflt->rc = rc;
610 INIT_WORK(&cnflt->cnflt_update_work, uwb_cnflt_update_work);
611
612 bitmap_copy(cnflt->mas.bm, mas.bm, UWB_NUM_MAS);
613
614 list_add_tail(&cnflt->rc_node, &rc->cnflt_alien_list);
615
616 /* update rc global conflicting alien bitmap */
617 bitmap_or(rc->cnflt_alien_bitmap.bm, rc->cnflt_alien_bitmap.bm, mas.bm, UWB_NUM_MAS);
618
619 queue_delayed_work(rc->rsv_workq, &rc->rsv_alien_bp_work, usecs_to_jiffies(delay_us));
620
621 /* start the timer */
622 uwb_cnflt_alien_stroke_timer(cnflt);
623}
624
625static void uwb_drp_process_not_involved(struct uwb_rc *rc,
626 struct uwb_rc_evt_drp *drp_evt,
627 struct uwb_ie_drp *drp_ie)
628{
629 struct uwb_mas_bm mas;
630
631 uwb_drp_ie_to_bm(&mas, drp_ie);
632 uwb_drp_handle_all_conflict_rsv(rc, drp_evt, drp_ie, &mas);
633}
634
635static void uwb_drp_process_involved(struct uwb_rc *rc, struct uwb_dev *src,
636 struct uwb_rc_evt_drp *drp_evt,
637 struct uwb_ie_drp *drp_ie)
638{
639 struct uwb_rsv *rsv;
640
641 rsv = uwb_rsv_find(rc, src, drp_ie);
642 if (!rsv) {
643 /*
644 * No reservation? It's either for a recently
645 * terminated reservation; or the DRP IE couldn't be
646 * processed (e.g., an invalid IE or out of memory).
647 */
648 return;
649 }
650
651 /*
652 * Do nothing with DRP IEs for reservations that have been
653 * terminated.
654 */
655 if (rsv->state == UWB_RSV_STATE_NONE) {
656 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
657 return;
658 }
659
660 if (uwb_ie_drp_owner(drp_ie))
661 uwb_drp_process_target(rc, rsv, drp_ie, drp_evt);
662 else
663 uwb_drp_process_owner(rc, rsv, src, drp_ie, drp_evt);
664
665}
666
667
668static bool uwb_drp_involves_us(struct uwb_rc *rc, struct uwb_ie_drp *drp_ie)
669{
670 return uwb_dev_addr_cmp(&rc->uwb_dev.dev_addr, &drp_ie->dev_addr) == 0;
671}
672
673/*
674 * Process a received DRP IE.
675 */
676static void uwb_drp_process(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt,
677 struct uwb_dev *src, struct uwb_ie_drp *drp_ie)
678{
679 if (uwb_ie_drp_type(drp_ie) == UWB_DRP_TYPE_ALIEN_BP)
680 uwb_drp_handle_alien_drp(rc, drp_ie);
681 else if (uwb_drp_involves_us(rc, drp_ie))
682 uwb_drp_process_involved(rc, src, drp_evt, drp_ie);
683 else
684 uwb_drp_process_not_involved(rc, drp_evt, drp_ie);
685}
686
687/*
688 * Process a received DRP Availability IE
689 */
690static void uwb_drp_availability_process(struct uwb_rc *rc, struct uwb_dev *src,
691 struct uwb_ie_drp_avail *drp_availability_ie)
692{
693 bitmap_copy(src->last_availability_bm,
694 drp_availability_ie->bmp, UWB_NUM_MAS);
695}
696
697/*
698 * Process all the DRP IEs (both DRP IEs and the DRP Availability IE)
699 * from a device.
700 */
701static
702void uwb_drp_process_all(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt,
703 size_t ielen, struct uwb_dev *src_dev)
704{
705 struct device *dev = &rc->uwb_dev.dev;
706 struct uwb_ie_hdr *ie_hdr;
707 void *ptr;
708
709 ptr = drp_evt->ie_data;
710 for (;;) {
711 ie_hdr = uwb_ie_next(&ptr, &ielen);
712 if (!ie_hdr)
713 break;
714
715 switch (ie_hdr->element_id) {
716 case UWB_IE_DRP_AVAILABILITY:
717 uwb_drp_availability_process(rc, src_dev, (struct uwb_ie_drp_avail *)ie_hdr);
718 break;
719 case UWB_IE_DRP:
720 uwb_drp_process(rc, drp_evt, src_dev, (struct uwb_ie_drp *)ie_hdr);
721 break;
722 default:
723 dev_warn(dev, "unexpected IE in DRP notification\n");
724 break;
725 }
726 }
727
728 if (ielen > 0)
729 dev_warn(dev, "%d octets remaining in DRP notification\n",
730 (int)ielen);
731}
732
733/**
734 * uwbd_evt_handle_rc_drp - handle a DRP_IE event
735 * @evt: the DRP_IE event from the radio controller
736 *
737 * This processes DRP notifications from the radio controller, either
738 * initiating a new reservation or transitioning an existing
739 * reservation into a different state.
740 *
741 * DRP notifications can occur for three different reasons:
742 *
743 * - UWB_DRP_NOTIF_DRP_IE_RECVD: one or more DRP IEs with the RC as
744 * the target or source have been received.
745 *
746 * These DRP IEs could be new or for an existing reservation.
747 *
748 * If the DRP IE for an existing reservation ceases to be to
749 * received for at least mMaxLostBeacons, the reservation should be
750 * considered to be terminated. Note that the TERMINATE reason (see
751 * below) may not always be signalled (e.g., the remote device has
752 * two or more reservations established with the RC).
753 *
754 * - UWB_DRP_NOTIF_CONFLICT: DRP IEs from any device in the beacon
755 * group conflict with the RC's reservations.
756 *
757 * - UWB_DRP_NOTIF_TERMINATE: DRP IEs are no longer being received
758 * from a device (i.e., it's terminated all reservations).
759 *
760 * Only the software state of the reservations is changed; the setting
761 * of the radio controller's DRP IEs is done after all the events in
762 * an event buffer are processed. This saves waiting multiple times
763 * for the SET_DRP_IE command to complete.
764 */
765int uwbd_evt_handle_rc_drp(struct uwb_event *evt)
766{
767 struct device *dev = &evt->rc->uwb_dev.dev;
768 struct uwb_rc *rc = evt->rc;
769 struct uwb_rc_evt_drp *drp_evt;
770 size_t ielength, bytes_left;
771 struct uwb_dev_addr src_addr;
772 struct uwb_dev *src_dev;
773
774 /* Is there enough data to decode the event (and any IEs in
775 its payload)? */
776 if (evt->notif.size < sizeof(*drp_evt)) {
777 dev_err(dev, "DRP event: Not enough data to decode event "
778 "[%zu bytes left, %zu needed]\n",
779 evt->notif.size, sizeof(*drp_evt));
780 return 0;
781 }
782 bytes_left = evt->notif.size - sizeof(*drp_evt);
783 drp_evt = container_of(evt->notif.rceb, struct uwb_rc_evt_drp, rceb);
784 ielength = le16_to_cpu(drp_evt->ie_length);
785 if (bytes_left != ielength) {
786 dev_err(dev, "DRP event: Not enough data in payload [%zu"
787 "bytes left, %zu declared in the event]\n",
788 bytes_left, ielength);
789 return 0;
790 }
791
792 memcpy(src_addr.data, &drp_evt->src_addr, sizeof(src_addr));
793 src_dev = uwb_dev_get_by_devaddr(rc, &src_addr);
794 if (!src_dev) {
795 /*
796 * A DRP notification from an unrecognized device.
797 *
798 * This is probably from a WUSB device that doesn't
799 * have an EUI-48 and therefore doesn't show up in the
800 * UWB device database. It's safe to simply ignore
801 * these.
802 */
803 return 0;
804 }
805
806 mutex_lock(&rc->rsvs_mutex);
807
808 /* We do not distinguish from the reason */
809 uwb_drp_process_all(rc, drp_evt, ielength, src_dev);
810
811 mutex_unlock(&rc->rsvs_mutex);
812
813 uwb_dev_put(src_dev);
814 return 0;
815}
1/*
2 * Ultra Wide Band
3 * Dynamic Reservation Protocol handling
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21#include <linux/kthread.h>
22#include <linux/freezer.h>
23#include <linux/slab.h>
24#include <linux/delay.h>
25#include "uwb-internal.h"
26
27
28/* DRP Conflict Actions ([ECMA-368 2nd Edition] 17.4.6) */
29enum uwb_drp_conflict_action {
30 /* Reservation is maintained, no action needed */
31 UWB_DRP_CONFLICT_MANTAIN = 0,
32
33 /* the device shall not transmit frames in conflicting MASs in
34 * the following superframe. If the device is the reservation
35 * target, it shall also set the Reason Code in its DRP IE to
36 * Conflict in its beacon in the following superframe.
37 */
38 UWB_DRP_CONFLICT_ACT1,
39
40 /* the device shall not set the Reservation Status bit to ONE
41 * and shall not transmit frames in conflicting MASs. If the
42 * device is the reservation target, it shall also set the
43 * Reason Code in its DRP IE to Conflict.
44 */
45 UWB_DRP_CONFLICT_ACT2,
46
47 /* the device shall not transmit frames in conflicting MASs in
48 * the following superframe. It shall remove the conflicting
49 * MASs from the reservation or set the Reservation Status to
50 * ZERO in its beacon in the following superframe. If the
51 * device is the reservation target, it shall also set the
52 * Reason Code in its DRP IE to Conflict.
53 */
54 UWB_DRP_CONFLICT_ACT3,
55};
56
57
58static void uwb_rc_set_drp_cmd_done(struct uwb_rc *rc, void *arg,
59 struct uwb_rceb *reply, ssize_t reply_size)
60{
61 struct uwb_rc_evt_set_drp_ie *r = (struct uwb_rc_evt_set_drp_ie *)reply;
62 unsigned long flags;
63
64 if (r != NULL) {
65 if (r->bResultCode != UWB_RC_RES_SUCCESS)
66 dev_err(&rc->uwb_dev.dev, "SET-DRP-IE failed: %s (%d)\n",
67 uwb_rc_strerror(r->bResultCode), r->bResultCode);
68 } else
69 dev_err(&rc->uwb_dev.dev, "SET-DRP-IE: timeout\n");
70
71 spin_lock_irqsave(&rc->rsvs_lock, flags);
72 if (rc->set_drp_ie_pending > 1) {
73 rc->set_drp_ie_pending = 0;
74 uwb_rsv_queue_update(rc);
75 } else {
76 rc->set_drp_ie_pending = 0;
77 }
78 spin_unlock_irqrestore(&rc->rsvs_lock, flags);
79}
80
81/**
82 * Construct and send the SET DRP IE
83 *
84 * @rc: UWB Host controller
85 * @returns: >= 0 number of bytes still available in the beacon
86 * < 0 errno code on error.
87 *
88 * See WUSB[8.6.2.7]: The host must set all the DRP IEs that it wants the
89 * device to include in its beacon at the same time. We thus have to
90 * traverse all reservations and include the DRP IEs of all PENDING
91 * and NEGOTIATED reservations in a SET DRP command for transmission.
92 *
93 * A DRP Availability IE is appended.
94 *
95 * rc->rsvs_mutex is held
96 *
97 * FIXME We currently ignore the returned value indicating the remaining space
98 * in beacon. This could be used to deny reservation requests earlier if
99 * determined that they would cause the beacon space to be exceeded.
100 */
101int uwb_rc_send_all_drp_ie(struct uwb_rc *rc)
102{
103 int result;
104 struct uwb_rc_cmd_set_drp_ie *cmd;
105 struct uwb_rsv *rsv;
106 struct uwb_rsv_move *mv;
107 int num_bytes = 0;
108 u8 *IEDataptr;
109
110 result = -ENOMEM;
111 /* First traverse all reservations to determine memory needed. */
112 list_for_each_entry(rsv, &rc->reservations, rc_node) {
113 if (rsv->drp_ie != NULL) {
114 num_bytes += rsv->drp_ie->hdr.length + 2;
115 if (uwb_rsv_has_two_drp_ies(rsv) &&
116 (rsv->mv.companion_drp_ie != NULL)) {
117 mv = &rsv->mv;
118 num_bytes +=
119 mv->companion_drp_ie->hdr.length + 2;
120 }
121 }
122 }
123 num_bytes += sizeof(rc->drp_avail.ie);
124 cmd = kzalloc(sizeof(*cmd) + num_bytes, GFP_KERNEL);
125 if (cmd == NULL)
126 goto error;
127 cmd->rccb.bCommandType = UWB_RC_CET_GENERAL;
128 cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_SET_DRP_IE);
129 cmd->wIELength = num_bytes;
130 IEDataptr = (u8 *)&cmd->IEData[0];
131
132 /* FIXME: DRV avail IE is not always needed */
133 /* put DRP avail IE first */
134 memcpy(IEDataptr, &rc->drp_avail.ie, sizeof(rc->drp_avail.ie));
135 IEDataptr += sizeof(struct uwb_ie_drp_avail);
136
137 /* Next traverse all reservations to place IEs in allocated memory. */
138 list_for_each_entry(rsv, &rc->reservations, rc_node) {
139 if (rsv->drp_ie != NULL) {
140 memcpy(IEDataptr, rsv->drp_ie,
141 rsv->drp_ie->hdr.length + 2);
142 IEDataptr += rsv->drp_ie->hdr.length + 2;
143
144 if (uwb_rsv_has_two_drp_ies(rsv) &&
145 (rsv->mv.companion_drp_ie != NULL)) {
146 mv = &rsv->mv;
147 memcpy(IEDataptr, mv->companion_drp_ie,
148 mv->companion_drp_ie->hdr.length + 2);
149 IEDataptr +=
150 mv->companion_drp_ie->hdr.length + 2;
151 }
152 }
153 }
154
155 result = uwb_rc_cmd_async(rc, "SET-DRP-IE",
156 &cmd->rccb, sizeof(*cmd) + num_bytes,
157 UWB_RC_CET_GENERAL, UWB_RC_CMD_SET_DRP_IE,
158 uwb_rc_set_drp_cmd_done, NULL);
159
160 rc->set_drp_ie_pending = 1;
161
162 kfree(cmd);
163error:
164 return result;
165}
166
167/*
168 * Evaluate the action to perform using conflict resolution rules
169 *
170 * Return a uwb_drp_conflict_action.
171 */
172static int evaluate_conflict_action(struct uwb_ie_drp *ext_drp_ie, int ext_beacon_slot,
173 struct uwb_rsv *rsv, int our_status)
174{
175 int our_tie_breaker = rsv->tiebreaker;
176 int our_type = rsv->type;
177 int our_beacon_slot = rsv->rc->uwb_dev.beacon_slot;
178
179 int ext_tie_breaker = uwb_ie_drp_tiebreaker(ext_drp_ie);
180 int ext_status = uwb_ie_drp_status(ext_drp_ie);
181 int ext_type = uwb_ie_drp_type(ext_drp_ie);
182
183
184 /* [ECMA-368 2nd Edition] 17.4.6 */
185 if (ext_type == UWB_DRP_TYPE_PCA && our_type == UWB_DRP_TYPE_PCA) {
186 return UWB_DRP_CONFLICT_MANTAIN;
187 }
188
189 /* [ECMA-368 2nd Edition] 17.4.6-1 */
190 if (our_type == UWB_DRP_TYPE_ALIEN_BP) {
191 return UWB_DRP_CONFLICT_MANTAIN;
192 }
193
194 /* [ECMA-368 2nd Edition] 17.4.6-2 */
195 if (ext_type == UWB_DRP_TYPE_ALIEN_BP) {
196 /* here we know our_type != UWB_DRP_TYPE_ALIEN_BP */
197 return UWB_DRP_CONFLICT_ACT1;
198 }
199
200 /* [ECMA-368 2nd Edition] 17.4.6-3 */
201 if (our_status == 0 && ext_status == 1) {
202 return UWB_DRP_CONFLICT_ACT2;
203 }
204
205 /* [ECMA-368 2nd Edition] 17.4.6-4 */
206 if (our_status == 1 && ext_status == 0) {
207 return UWB_DRP_CONFLICT_MANTAIN;
208 }
209
210 /* [ECMA-368 2nd Edition] 17.4.6-5a */
211 if (our_tie_breaker == ext_tie_breaker &&
212 our_beacon_slot < ext_beacon_slot) {
213 return UWB_DRP_CONFLICT_MANTAIN;
214 }
215
216 /* [ECMA-368 2nd Edition] 17.4.6-5b */
217 if (our_tie_breaker != ext_tie_breaker &&
218 our_beacon_slot > ext_beacon_slot) {
219 return UWB_DRP_CONFLICT_MANTAIN;
220 }
221
222 if (our_status == 0) {
223 if (our_tie_breaker == ext_tie_breaker) {
224 /* [ECMA-368 2nd Edition] 17.4.6-6a */
225 if (our_beacon_slot > ext_beacon_slot) {
226 return UWB_DRP_CONFLICT_ACT2;
227 }
228 } else {
229 /* [ECMA-368 2nd Edition] 17.4.6-6b */
230 if (our_beacon_slot < ext_beacon_slot) {
231 return UWB_DRP_CONFLICT_ACT2;
232 }
233 }
234 } else {
235 if (our_tie_breaker == ext_tie_breaker) {
236 /* [ECMA-368 2nd Edition] 17.4.6-7a */
237 if (our_beacon_slot > ext_beacon_slot) {
238 return UWB_DRP_CONFLICT_ACT3;
239 }
240 } else {
241 /* [ECMA-368 2nd Edition] 17.4.6-7b */
242 if (our_beacon_slot < ext_beacon_slot) {
243 return UWB_DRP_CONFLICT_ACT3;
244 }
245 }
246 }
247 return UWB_DRP_CONFLICT_MANTAIN;
248}
249
250static void handle_conflict_normal(struct uwb_ie_drp *drp_ie,
251 int ext_beacon_slot,
252 struct uwb_rsv *rsv,
253 struct uwb_mas_bm *conflicting_mas)
254{
255 struct uwb_rc *rc = rsv->rc;
256 struct uwb_rsv_move *mv = &rsv->mv;
257 struct uwb_drp_backoff_win *bow = &rc->bow;
258 int action;
259
260 action = evaluate_conflict_action(drp_ie, ext_beacon_slot, rsv, uwb_rsv_status(rsv));
261
262 if (uwb_rsv_is_owner(rsv)) {
263 switch(action) {
264 case UWB_DRP_CONFLICT_ACT2:
265 /* try move */
266 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_TO_BE_MOVED);
267 if (bow->can_reserve_extra_mases == false)
268 uwb_rsv_backoff_win_increment(rc);
269
270 break;
271 case UWB_DRP_CONFLICT_ACT3:
272 uwb_rsv_backoff_win_increment(rc);
273 /* drop some mases with reason modified */
274 /* put in the companion the mases to be dropped */
275 bitmap_and(mv->companion_mas.bm, rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS);
276 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED);
277 default:
278 break;
279 }
280 } else {
281 switch(action) {
282 case UWB_DRP_CONFLICT_ACT2:
283 case UWB_DRP_CONFLICT_ACT3:
284 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT);
285 default:
286 break;
287 }
288
289 }
290
291}
292
293static void handle_conflict_expanding(struct uwb_ie_drp *drp_ie, int ext_beacon_slot,
294 struct uwb_rsv *rsv, bool companion_only,
295 struct uwb_mas_bm *conflicting_mas)
296{
297 struct uwb_rc *rc = rsv->rc;
298 struct uwb_drp_backoff_win *bow = &rc->bow;
299 struct uwb_rsv_move *mv = &rsv->mv;
300 int action;
301
302 if (companion_only) {
303 /* status of companion is 0 at this point */
304 action = evaluate_conflict_action(drp_ie, ext_beacon_slot, rsv, 0);
305 if (uwb_rsv_is_owner(rsv)) {
306 switch(action) {
307 case UWB_DRP_CONFLICT_ACT2:
308 case UWB_DRP_CONFLICT_ACT3:
309 uwb_rsv_set_state(rsv,
310 UWB_RSV_STATE_O_ESTABLISHED);
311 rsv->needs_release_companion_mas = false;
312 if (bow->can_reserve_extra_mases == false)
313 uwb_rsv_backoff_win_increment(rc);
314 uwb_drp_avail_release(rsv->rc,
315 &rsv->mv.companion_mas);
316 }
317 } else { /* rsv is target */
318 switch(action) {
319 case UWB_DRP_CONFLICT_ACT2:
320 case UWB_DRP_CONFLICT_ACT3:
321 uwb_rsv_set_state(rsv,
322 UWB_RSV_STATE_T_EXPANDING_CONFLICT);
323 /* send_drp_avail_ie = true; */
324 }
325 }
326 } else { /* also base part of the reservation is conflicting */
327 if (uwb_rsv_is_owner(rsv)) {
328 uwb_rsv_backoff_win_increment(rc);
329 /* remove companion part */
330 uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas);
331
332 /* drop some mases with reason modified */
333
334 /* put in the companion the mases to be dropped */
335 bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm,
336 conflicting_mas->bm, UWB_NUM_MAS);
337 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED);
338 } else { /* it is a target rsv */
339 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT);
340 /* send_drp_avail_ie = true; */
341 }
342 }
343}
344
345static void uwb_drp_handle_conflict_rsv(struct uwb_rc *rc, struct uwb_rsv *rsv,
346 struct uwb_rc_evt_drp *drp_evt,
347 struct uwb_ie_drp *drp_ie,
348 struct uwb_mas_bm *conflicting_mas)
349{
350 struct uwb_rsv_move *mv;
351
352 /* check if the conflicting reservation has two drp_ies */
353 if (uwb_rsv_has_two_drp_ies(rsv)) {
354 mv = &rsv->mv;
355 if (bitmap_intersects(rsv->mas.bm, conflicting_mas->bm,
356 UWB_NUM_MAS)) {
357 handle_conflict_expanding(drp_ie,
358 drp_evt->beacon_slot_number,
359 rsv, false, conflicting_mas);
360 } else {
361 if (bitmap_intersects(mv->companion_mas.bm,
362 conflicting_mas->bm, UWB_NUM_MAS)) {
363 handle_conflict_expanding(
364 drp_ie, drp_evt->beacon_slot_number,
365 rsv, true, conflicting_mas);
366 }
367 }
368 } else if (bitmap_intersects(rsv->mas.bm, conflicting_mas->bm,
369 UWB_NUM_MAS)) {
370 handle_conflict_normal(drp_ie, drp_evt->beacon_slot_number,
371 rsv, conflicting_mas);
372 }
373}
374
375static void uwb_drp_handle_all_conflict_rsv(struct uwb_rc *rc,
376 struct uwb_rc_evt_drp *drp_evt,
377 struct uwb_ie_drp *drp_ie,
378 struct uwb_mas_bm *conflicting_mas)
379{
380 struct uwb_rsv *rsv;
381
382 list_for_each_entry(rsv, &rc->reservations, rc_node) {
383 uwb_drp_handle_conflict_rsv(rc, rsv, drp_evt, drp_ie,
384 conflicting_mas);
385 }
386}
387
388static void uwb_drp_process_target_accepted(struct uwb_rc *rc,
389 struct uwb_rsv *rsv, struct uwb_rc_evt_drp *drp_evt,
390 struct uwb_ie_drp *drp_ie, struct uwb_mas_bm *mas)
391{
392 struct uwb_rsv_move *mv = &rsv->mv;
393 int status;
394
395 status = uwb_ie_drp_status(drp_ie);
396
397 if (rsv->state == UWB_RSV_STATE_T_CONFLICT) {
398 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT);
399 return;
400 }
401
402 if (rsv->state == UWB_RSV_STATE_T_EXPANDING_ACCEPTED) {
403 /* drp_ie is companion */
404 if (!bitmap_equal(rsv->mas.bm, mas->bm, UWB_NUM_MAS)) {
405 /* stroke companion */
406 uwb_rsv_set_state(rsv,
407 UWB_RSV_STATE_T_EXPANDING_ACCEPTED);
408 }
409 } else {
410 if (!bitmap_equal(rsv->mas.bm, mas->bm, UWB_NUM_MAS)) {
411 if (uwb_drp_avail_reserve_pending(rc, mas) == -EBUSY) {
412 /* FIXME: there is a conflict, find
413 * the conflicting reservations and
414 * take a sensible action. Consider
415 * that in drp_ie there is the
416 * "neighbour" */
417 uwb_drp_handle_all_conflict_rsv(rc, drp_evt,
418 drp_ie, mas);
419 } else {
420 /* accept the extra reservation */
421 bitmap_copy(mv->companion_mas.bm, mas->bm,
422 UWB_NUM_MAS);
423 uwb_rsv_set_state(rsv,
424 UWB_RSV_STATE_T_EXPANDING_ACCEPTED);
425 }
426 } else {
427 if (status) {
428 uwb_rsv_set_state(rsv,
429 UWB_RSV_STATE_T_ACCEPTED);
430 }
431 }
432
433 }
434}
435
436/*
437 * Based on the DRP IE, transition a target reservation to a new
438 * state.
439 */
440static void uwb_drp_process_target(struct uwb_rc *rc, struct uwb_rsv *rsv,
441 struct uwb_ie_drp *drp_ie, struct uwb_rc_evt_drp *drp_evt)
442{
443 struct device *dev = &rc->uwb_dev.dev;
444 struct uwb_rsv_move *mv = &rsv->mv;
445 int status;
446 enum uwb_drp_reason reason_code;
447 struct uwb_mas_bm mas;
448
449 status = uwb_ie_drp_status(drp_ie);
450 reason_code = uwb_ie_drp_reason_code(drp_ie);
451 uwb_drp_ie_to_bm(&mas, drp_ie);
452
453 switch (reason_code) {
454 case UWB_DRP_REASON_ACCEPTED:
455 uwb_drp_process_target_accepted(rc, rsv, drp_evt, drp_ie, &mas);
456 break;
457
458 case UWB_DRP_REASON_MODIFIED:
459 /* check to see if we have already modified the reservation */
460 if (bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS)) {
461 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED);
462 break;
463 }
464
465 /* find if the owner wants to expand or reduce */
466 if (bitmap_subset(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) {
467 /* owner is reducing */
468 bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, mas.bm,
469 UWB_NUM_MAS);
470 uwb_drp_avail_release(rsv->rc, &mv->companion_mas);
471 }
472
473 bitmap_copy(rsv->mas.bm, mas.bm, UWB_NUM_MAS);
474 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_RESIZED);
475 break;
476 default:
477 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
478 reason_code, status);
479 }
480}
481
482static void uwb_drp_process_owner_accepted(struct uwb_rsv *rsv,
483 struct uwb_mas_bm *mas)
484{
485 struct uwb_rsv_move *mv = &rsv->mv;
486
487 switch (rsv->state) {
488 case UWB_RSV_STATE_O_PENDING:
489 case UWB_RSV_STATE_O_INITIATED:
490 case UWB_RSV_STATE_O_ESTABLISHED:
491 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
492 break;
493 case UWB_RSV_STATE_O_MODIFIED:
494 if (bitmap_equal(mas->bm, rsv->mas.bm, UWB_NUM_MAS))
495 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
496 else
497 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED);
498 break;
499
500 case UWB_RSV_STATE_O_MOVE_REDUCING: /* shouldn' t be a problem */
501 if (bitmap_equal(mas->bm, rsv->mas.bm, UWB_NUM_MAS))
502 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
503 else
504 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING);
505 break;
506 case UWB_RSV_STATE_O_MOVE_EXPANDING:
507 if (bitmap_equal(mas->bm, mv->companion_mas.bm, UWB_NUM_MAS)) {
508 /* Companion reservation accepted */
509 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING);
510 } else {
511 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING);
512 }
513 break;
514 case UWB_RSV_STATE_O_MOVE_COMBINING:
515 if (bitmap_equal(mas->bm, rsv->mas.bm, UWB_NUM_MAS))
516 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING);
517 else
518 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING);
519 break;
520 default:
521 break;
522 }
523}
524/*
525 * Based on the DRP IE, transition an owner reservation to a new
526 * state.
527 */
528static void uwb_drp_process_owner(struct uwb_rc *rc, struct uwb_rsv *rsv,
529 struct uwb_dev *src, struct uwb_ie_drp *drp_ie,
530 struct uwb_rc_evt_drp *drp_evt)
531{
532 struct device *dev = &rc->uwb_dev.dev;
533 int status;
534 enum uwb_drp_reason reason_code;
535 struct uwb_mas_bm mas;
536
537 status = uwb_ie_drp_status(drp_ie);
538 reason_code = uwb_ie_drp_reason_code(drp_ie);
539 uwb_drp_ie_to_bm(&mas, drp_ie);
540
541 if (status) {
542 switch (reason_code) {
543 case UWB_DRP_REASON_ACCEPTED:
544 uwb_drp_process_owner_accepted(rsv, &mas);
545 break;
546 default:
547 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
548 reason_code, status);
549 }
550 } else {
551 switch (reason_code) {
552 case UWB_DRP_REASON_PENDING:
553 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_PENDING);
554 break;
555 case UWB_DRP_REASON_DENIED:
556 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
557 break;
558 case UWB_DRP_REASON_CONFLICT:
559 /* resolve the conflict */
560 bitmap_complement(mas.bm, src->last_availability_bm,
561 UWB_NUM_MAS);
562 uwb_drp_handle_conflict_rsv(rc, rsv, drp_evt, drp_ie, &mas);
563 break;
564 default:
565 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
566 reason_code, status);
567 }
568 }
569}
570
571static void uwb_cnflt_alien_stroke_timer(struct uwb_cnflt_alien *cnflt)
572{
573 unsigned timeout_us = UWB_MAX_LOST_BEACONS * UWB_SUPERFRAME_LENGTH_US;
574 mod_timer(&cnflt->timer, jiffies + usecs_to_jiffies(timeout_us));
575}
576
577static void uwb_cnflt_update_work(struct work_struct *work)
578{
579 struct uwb_cnflt_alien *cnflt = container_of(work,
580 struct uwb_cnflt_alien,
581 cnflt_update_work);
582 struct uwb_cnflt_alien *c;
583 struct uwb_rc *rc = cnflt->rc;
584
585 unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE;
586
587 mutex_lock(&rc->rsvs_mutex);
588
589 list_del(&cnflt->rc_node);
590
591 /* update rc global conflicting alien bitmap */
592 bitmap_zero(rc->cnflt_alien_bitmap.bm, UWB_NUM_MAS);
593
594 list_for_each_entry(c, &rc->cnflt_alien_list, rc_node) {
595 bitmap_or(rc->cnflt_alien_bitmap.bm, rc->cnflt_alien_bitmap.bm,
596 c->mas.bm, UWB_NUM_MAS);
597 }
598
599 queue_delayed_work(rc->rsv_workq, &rc->rsv_alien_bp_work,
600 usecs_to_jiffies(delay_us));
601
602 kfree(cnflt);
603 mutex_unlock(&rc->rsvs_mutex);
604}
605
606static void uwb_cnflt_timer(unsigned long arg)
607{
608 struct uwb_cnflt_alien *cnflt = (struct uwb_cnflt_alien *)arg;
609
610 queue_work(cnflt->rc->rsv_workq, &cnflt->cnflt_update_work);
611}
612
613/*
614 * We have received an DRP_IE of type Alien BP and we need to make
615 * sure we do not transmit in conflicting MASs.
616 */
617static void uwb_drp_handle_alien_drp(struct uwb_rc *rc, struct uwb_ie_drp *drp_ie)
618{
619 struct device *dev = &rc->uwb_dev.dev;
620 struct uwb_mas_bm mas;
621 struct uwb_cnflt_alien *cnflt;
622 unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE;
623
624 uwb_drp_ie_to_bm(&mas, drp_ie);
625
626 list_for_each_entry(cnflt, &rc->cnflt_alien_list, rc_node) {
627 if (bitmap_equal(cnflt->mas.bm, mas.bm, UWB_NUM_MAS)) {
628 /* Existing alien BP reservation conflicting
629 * bitmap, just reset the timer */
630 uwb_cnflt_alien_stroke_timer(cnflt);
631 return;
632 }
633 }
634
635 /* New alien BP reservation conflicting bitmap */
636
637 /* alloc and initialize new uwb_cnflt_alien */
638 cnflt = kzalloc(sizeof(struct uwb_cnflt_alien), GFP_KERNEL);
639 if (!cnflt) {
640 dev_err(dev, "failed to alloc uwb_cnflt_alien struct\n");
641 return;
642 }
643
644 INIT_LIST_HEAD(&cnflt->rc_node);
645 setup_timer(&cnflt->timer, uwb_cnflt_timer, (unsigned long)cnflt);
646
647 cnflt->rc = rc;
648 INIT_WORK(&cnflt->cnflt_update_work, uwb_cnflt_update_work);
649
650 bitmap_copy(cnflt->mas.bm, mas.bm, UWB_NUM_MAS);
651
652 list_add_tail(&cnflt->rc_node, &rc->cnflt_alien_list);
653
654 /* update rc global conflicting alien bitmap */
655 bitmap_or(rc->cnflt_alien_bitmap.bm, rc->cnflt_alien_bitmap.bm, mas.bm, UWB_NUM_MAS);
656
657 queue_delayed_work(rc->rsv_workq, &rc->rsv_alien_bp_work, usecs_to_jiffies(delay_us));
658
659 /* start the timer */
660 uwb_cnflt_alien_stroke_timer(cnflt);
661}
662
663static void uwb_drp_process_not_involved(struct uwb_rc *rc,
664 struct uwb_rc_evt_drp *drp_evt,
665 struct uwb_ie_drp *drp_ie)
666{
667 struct uwb_mas_bm mas;
668
669 uwb_drp_ie_to_bm(&mas, drp_ie);
670 uwb_drp_handle_all_conflict_rsv(rc, drp_evt, drp_ie, &mas);
671}
672
673static void uwb_drp_process_involved(struct uwb_rc *rc, struct uwb_dev *src,
674 struct uwb_rc_evt_drp *drp_evt,
675 struct uwb_ie_drp *drp_ie)
676{
677 struct uwb_rsv *rsv;
678
679 rsv = uwb_rsv_find(rc, src, drp_ie);
680 if (!rsv) {
681 /*
682 * No reservation? It's either for a recently
683 * terminated reservation; or the DRP IE couldn't be
684 * processed (e.g., an invalid IE or out of memory).
685 */
686 return;
687 }
688
689 /*
690 * Do nothing with DRP IEs for reservations that have been
691 * terminated.
692 */
693 if (rsv->state == UWB_RSV_STATE_NONE) {
694 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
695 return;
696 }
697
698 if (uwb_ie_drp_owner(drp_ie))
699 uwb_drp_process_target(rc, rsv, drp_ie, drp_evt);
700 else
701 uwb_drp_process_owner(rc, rsv, src, drp_ie, drp_evt);
702
703}
704
705
706static bool uwb_drp_involves_us(struct uwb_rc *rc, struct uwb_ie_drp *drp_ie)
707{
708 return uwb_dev_addr_cmp(&rc->uwb_dev.dev_addr, &drp_ie->dev_addr) == 0;
709}
710
711/*
712 * Process a received DRP IE.
713 */
714static void uwb_drp_process(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt,
715 struct uwb_dev *src, struct uwb_ie_drp *drp_ie)
716{
717 if (uwb_ie_drp_type(drp_ie) == UWB_DRP_TYPE_ALIEN_BP)
718 uwb_drp_handle_alien_drp(rc, drp_ie);
719 else if (uwb_drp_involves_us(rc, drp_ie))
720 uwb_drp_process_involved(rc, src, drp_evt, drp_ie);
721 else
722 uwb_drp_process_not_involved(rc, drp_evt, drp_ie);
723}
724
725/*
726 * Process a received DRP Availability IE
727 */
728static void uwb_drp_availability_process(struct uwb_rc *rc, struct uwb_dev *src,
729 struct uwb_ie_drp_avail *drp_availability_ie)
730{
731 bitmap_copy(src->last_availability_bm,
732 drp_availability_ie->bmp, UWB_NUM_MAS);
733}
734
735/*
736 * Process all the DRP IEs (both DRP IEs and the DRP Availability IE)
737 * from a device.
738 */
739static
740void uwb_drp_process_all(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt,
741 size_t ielen, struct uwb_dev *src_dev)
742{
743 struct device *dev = &rc->uwb_dev.dev;
744 struct uwb_ie_hdr *ie_hdr;
745 void *ptr;
746
747 ptr = drp_evt->ie_data;
748 for (;;) {
749 ie_hdr = uwb_ie_next(&ptr, &ielen);
750 if (!ie_hdr)
751 break;
752
753 switch (ie_hdr->element_id) {
754 case UWB_IE_DRP_AVAILABILITY:
755 uwb_drp_availability_process(rc, src_dev, (struct uwb_ie_drp_avail *)ie_hdr);
756 break;
757 case UWB_IE_DRP:
758 uwb_drp_process(rc, drp_evt, src_dev, (struct uwb_ie_drp *)ie_hdr);
759 break;
760 default:
761 dev_warn(dev, "unexpected IE in DRP notification\n");
762 break;
763 }
764 }
765
766 if (ielen > 0)
767 dev_warn(dev, "%d octets remaining in DRP notification\n",
768 (int)ielen);
769}
770
771/**
772 * uwbd_evt_handle_rc_drp - handle a DRP_IE event
773 * @evt: the DRP_IE event from the radio controller
774 *
775 * This processes DRP notifications from the radio controller, either
776 * initiating a new reservation or transitioning an existing
777 * reservation into a different state.
778 *
779 * DRP notifications can occur for three different reasons:
780 *
781 * - UWB_DRP_NOTIF_DRP_IE_RECVD: one or more DRP IEs with the RC as
782 * the target or source have been received.
783 *
784 * These DRP IEs could be new or for an existing reservation.
785 *
786 * If the DRP IE for an existing reservation ceases to be to
787 * received for at least mMaxLostBeacons, the reservation should be
788 * considered to be terminated. Note that the TERMINATE reason (see
789 * below) may not always be signalled (e.g., the remote device has
790 * two or more reservations established with the RC).
791 *
792 * - UWB_DRP_NOTIF_CONFLICT: DRP IEs from any device in the beacon
793 * group conflict with the RC's reservations.
794 *
795 * - UWB_DRP_NOTIF_TERMINATE: DRP IEs are no longer being received
796 * from a device (i.e., it's terminated all reservations).
797 *
798 * Only the software state of the reservations is changed; the setting
799 * of the radio controller's DRP IEs is done after all the events in
800 * an event buffer are processed. This saves waiting multiple times
801 * for the SET_DRP_IE command to complete.
802 */
803int uwbd_evt_handle_rc_drp(struct uwb_event *evt)
804{
805 struct device *dev = &evt->rc->uwb_dev.dev;
806 struct uwb_rc *rc = evt->rc;
807 struct uwb_rc_evt_drp *drp_evt;
808 size_t ielength, bytes_left;
809 struct uwb_dev_addr src_addr;
810 struct uwb_dev *src_dev;
811
812 /* Is there enough data to decode the event (and any IEs in
813 its payload)? */
814 if (evt->notif.size < sizeof(*drp_evt)) {
815 dev_err(dev, "DRP event: Not enough data to decode event "
816 "[%zu bytes left, %zu needed]\n",
817 evt->notif.size, sizeof(*drp_evt));
818 return 0;
819 }
820 bytes_left = evt->notif.size - sizeof(*drp_evt);
821 drp_evt = container_of(evt->notif.rceb, struct uwb_rc_evt_drp, rceb);
822 ielength = le16_to_cpu(drp_evt->ie_length);
823 if (bytes_left != ielength) {
824 dev_err(dev, "DRP event: Not enough data in payload [%zu"
825 "bytes left, %zu declared in the event]\n",
826 bytes_left, ielength);
827 return 0;
828 }
829
830 memcpy(src_addr.data, &drp_evt->src_addr, sizeof(src_addr));
831 src_dev = uwb_dev_get_by_devaddr(rc, &src_addr);
832 if (!src_dev) {
833 /*
834 * A DRP notification from an unrecognized device.
835 *
836 * This is probably from a WUSB device that doesn't
837 * have an EUI-48 and therefore doesn't show up in the
838 * UWB device database. It's safe to simply ignore
839 * these.
840 */
841 return 0;
842 }
843
844 mutex_lock(&rc->rsvs_mutex);
845
846 /* We do not distinguish from the reason */
847 uwb_drp_process_all(rc, drp_evt, ielength, src_dev);
848
849 mutex_unlock(&rc->rsvs_mutex);
850
851 uwb_dev_put(src_dev);
852 return 0;
853}