Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: ISC
   2/*
   3 * Copyright (c) 2005-2011 Atheros Communications Inc.
   4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
 
 
 
 
 
 
 
 
 
 
 
 
   5 */
   6
   7#include "core.h"
   8#include "hif.h"
   9#include "debug.h"
  10
  11/********/
  12/* Send */
  13/********/
  14
  15static void ath10k_htc_control_tx_complete(struct ath10k *ar,
  16					   struct sk_buff *skb)
  17{
  18	kfree_skb(skb);
  19}
  20
  21static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar)
  22{
  23	struct sk_buff *skb;
  24	struct ath10k_skb_cb *skb_cb;
  25
  26	skb = dev_alloc_skb(ATH10K_HTC_CONTROL_BUFFER_SIZE);
  27	if (!skb)
  28		return NULL;
  29
  30	skb_reserve(skb, 20); /* FIXME: why 20 bytes? */
  31	WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
  32
  33	skb_cb = ATH10K_SKB_CB(skb);
  34	memset(skb_cb, 0, sizeof(*skb_cb));
  35
  36	ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %pK\n", __func__, skb);
  37	return skb;
  38}
  39
  40static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc,
  41					     struct sk_buff *skb)
  42{
  43	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
  44
  45	if (htc->ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL)
  46		dma_unmap_single(htc->ar->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
  47	skb_pull(skb, sizeof(struct ath10k_htc_hdr));
  48}
  49
  50void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
  51				     struct sk_buff *skb)
  52{
  53	struct ath10k *ar = ep->htc->ar;
  54	struct ath10k_htc_hdr *hdr;
  55
  56	ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %pK\n", __func__,
  57		   ep->eid, skb);
  58
  59	/* A corner case where the copy completion is reaching to host but still
  60	 * copy engine is processing it due to which host unmaps corresponding
  61	 * memory and causes SMMU fault, hence as workaround adding delay
  62	 * the unmapping memory to avoid SMMU faults.
  63	 */
  64	if (ar->hw_params.delay_unmap_buffer &&
  65	    ep->ul_pipe_id == 3)
  66		mdelay(2);
  67
  68	hdr = (struct ath10k_htc_hdr *)skb->data;
  69	ath10k_htc_restore_tx_skb(ep->htc, skb);
  70
  71	if (!ep->ep_ops.ep_tx_complete) {
  72		ath10k_warn(ar, "no tx handler for eid %d\n", ep->eid);
  73		dev_kfree_skb_any(skb);
  74		return;
  75	}
  76
  77	if (hdr->flags & ATH10K_HTC_FLAG_SEND_BUNDLE) {
  78		dev_kfree_skb_any(skb);
  79		return;
  80	}
  81
  82	ep->ep_ops.ep_tx_complete(ep->htc->ar, skb);
  83}
  84EXPORT_SYMBOL(ath10k_htc_notify_tx_completion);
  85
  86static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
  87				      struct sk_buff *skb)
  88{
  89	struct ath10k_htc_hdr *hdr;
  90
  91	hdr = (struct ath10k_htc_hdr *)skb->data;
  92	memset(hdr, 0, sizeof(struct ath10k_htc_hdr));
  93
  94	hdr->eid = ep->eid;
  95	hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
  96	hdr->flags = 0;
  97	if (ep->tx_credit_flow_enabled && !ep->bundle_tx)
  98		hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE;
  99
 100	spin_lock_bh(&ep->htc->tx_lock);
 101	hdr->seq_no = ep->seq_no++;
 102	spin_unlock_bh(&ep->htc->tx_lock);
 103}
 104
 105static int ath10k_htc_consume_credit(struct ath10k_htc_ep *ep,
 106				     unsigned int len,
 107				     bool consume)
 108{
 109	struct ath10k_htc *htc = ep->htc;
 110	struct ath10k *ar = htc->ar;
 111	enum ath10k_htc_ep_id eid = ep->eid;
 112	int credits, ret = 0;
 113
 114	if (!ep->tx_credit_flow_enabled)
 115		return 0;
 116
 117	credits = DIV_ROUND_UP(len, ep->tx_credit_size);
 118	spin_lock_bh(&htc->tx_lock);
 119
 120	if (ep->tx_credits < credits) {
 121		ath10k_dbg(ar, ATH10K_DBG_HTC,
 122			   "htc insufficient credits ep %d required %d available %d consume %d\n",
 123			   eid, credits, ep->tx_credits, consume);
 124		ret = -EAGAIN;
 125		goto unlock;
 126	}
 127
 128	if (consume) {
 129		ep->tx_credits -= credits;
 130		ath10k_dbg(ar, ATH10K_DBG_HTC,
 131			   "htc ep %d consumed %d credits total %d\n",
 132			   eid, credits, ep->tx_credits);
 133	}
 134
 135unlock:
 136	spin_unlock_bh(&htc->tx_lock);
 137	return ret;
 138}
 139
 140static void ath10k_htc_release_credit(struct ath10k_htc_ep *ep, unsigned int len)
 141{
 142	struct ath10k_htc *htc = ep->htc;
 143	struct ath10k *ar = htc->ar;
 144	enum ath10k_htc_ep_id eid = ep->eid;
 145	int credits;
 146
 147	if (!ep->tx_credit_flow_enabled)
 148		return;
 149
 150	credits = DIV_ROUND_UP(len, ep->tx_credit_size);
 151	spin_lock_bh(&htc->tx_lock);
 152	ep->tx_credits += credits;
 153	ath10k_dbg(ar, ATH10K_DBG_HTC,
 154		   "htc ep %d reverted %d credits back total %d\n",
 155		   eid, credits, ep->tx_credits);
 156	spin_unlock_bh(&htc->tx_lock);
 157
 158	if (ep->ep_ops.ep_tx_credits)
 159		ep->ep_ops.ep_tx_credits(htc->ar);
 160}
 161
 162int ath10k_htc_send(struct ath10k_htc *htc,
 163		    enum ath10k_htc_ep_id eid,
 164		    struct sk_buff *skb)
 165{
 166	struct ath10k *ar = htc->ar;
 167	struct ath10k_htc_ep *ep = &htc->endpoint[eid];
 168	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
 169	struct ath10k_hif_sg_item sg_item;
 170	struct device *dev = htc->ar->dev;
 
 171	int ret;
 172	unsigned int skb_len;
 173
 174	if (htc->ar->state == ATH10K_STATE_WEDGED)
 175		return -ECOMM;
 176
 177	if (eid >= ATH10K_HTC_EP_COUNT) {
 178		ath10k_warn(ar, "Invalid endpoint id: %d\n", eid);
 179		return -ENOENT;
 180	}
 181
 182	skb_push(skb, sizeof(struct ath10k_htc_hdr));
 183
 184	skb_len = skb->len;
 185	ret = ath10k_htc_consume_credit(ep, skb_len, true);
 186	if (ret)
 187		goto err_pull;
 
 
 
 
 
 
 
 
 
 
 188
 189	ath10k_htc_prepare_tx_skb(ep, skb);
 190
 191	skb_cb->eid = eid;
 192	if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) {
 193		skb_cb->paddr = dma_map_single(dev, skb->data, skb->len,
 194					       DMA_TO_DEVICE);
 195		ret = dma_mapping_error(dev, skb_cb->paddr);
 196		if (ret) {
 197			ret = -EIO;
 198			goto err_credits;
 199		}
 200	}
 201
 202	sg_item.transfer_id = ep->eid;
 203	sg_item.transfer_context = skb;
 204	sg_item.vaddr = skb->data;
 205	sg_item.paddr = skb_cb->paddr;
 206	sg_item.len = skb->len;
 207
 208	ret = ath10k_hif_tx_sg(htc->ar, ep->ul_pipe_id, &sg_item, 1);
 209	if (ret)
 210		goto err_unmap;
 211
 212	return 0;
 213
 214err_unmap:
 215	if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL)
 216		dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
 217err_credits:
 218	ath10k_htc_release_credit(ep, skb_len);
 
 
 
 
 
 
 
 
 
 
 219err_pull:
 220	skb_pull(skb, sizeof(struct ath10k_htc_hdr));
 221	return ret;
 222}
 223
 224void ath10k_htc_tx_completion_handler(struct ath10k *ar, struct sk_buff *skb)
 225{
 226	struct ath10k_htc *htc = &ar->htc;
 227	struct ath10k_skb_cb *skb_cb;
 228	struct ath10k_htc_ep *ep;
 229
 230	if (WARN_ON_ONCE(!skb))
 231		return;
 232
 233	skb_cb = ATH10K_SKB_CB(skb);
 234	ep = &htc->endpoint[skb_cb->eid];
 235
 236	ath10k_htc_notify_tx_completion(ep, skb);
 237	/* the skb now belongs to the completion handler */
 238}
 239EXPORT_SYMBOL(ath10k_htc_tx_completion_handler);
 240
 241/***********/
 242/* Receive */
 243/***********/
 244
 245static void
 246ath10k_htc_process_credit_report(struct ath10k_htc *htc,
 247				 const struct ath10k_htc_credit_report *report,
 248				 int len,
 249				 enum ath10k_htc_ep_id eid)
 250{
 251	struct ath10k *ar = htc->ar;
 252	struct ath10k_htc_ep *ep;
 253	int i, n_reports;
 254
 255	if (len % sizeof(*report))
 256		ath10k_warn(ar, "Uneven credit report len %d", len);
 257
 258	n_reports = len / sizeof(*report);
 259
 260	spin_lock_bh(&htc->tx_lock);
 261	for (i = 0; i < n_reports; i++, report++) {
 262		if (report->eid >= ATH10K_HTC_EP_COUNT)
 263			break;
 264
 265		ep = &htc->endpoint[report->eid];
 266		ep->tx_credits += report->credits;
 267
 268		ath10k_dbg(ar, ATH10K_DBG_HTC, "htc ep %d got %d credits (total %d)\n",
 269			   report->eid, report->credits, ep->tx_credits);
 270
 271		if (ep->ep_ops.ep_tx_credits) {
 272			spin_unlock_bh(&htc->tx_lock);
 273			ep->ep_ops.ep_tx_credits(htc->ar);
 274			spin_lock_bh(&htc->tx_lock);
 275		}
 276	}
 277	spin_unlock_bh(&htc->tx_lock);
 278}
 279
 280static int
 281ath10k_htc_process_lookahead(struct ath10k_htc *htc,
 282			     const struct ath10k_htc_lookahead_report *report,
 283			     int len,
 284			     enum ath10k_htc_ep_id eid,
 285			     void *next_lookaheads,
 286			     int *next_lookaheads_len)
 287{
 288	struct ath10k *ar = htc->ar;
 289
 290	/* Invalid lookahead flags are actually transmitted by
 291	 * the target in the HTC control message.
 292	 * Since this will happen at every boot we silently ignore
 293	 * the lookahead in this case
 294	 */
 295	if (report->pre_valid != ((~report->post_valid) & 0xFF))
 296		return 0;
 297
 298	if (next_lookaheads && next_lookaheads_len) {
 299		ath10k_dbg(ar, ATH10K_DBG_HTC,
 300			   "htc rx lookahead found pre_valid 0x%x post_valid 0x%x\n",
 301			   report->pre_valid, report->post_valid);
 302
 303		/* look ahead bytes are valid, copy them over */
 304		memcpy((u8 *)next_lookaheads, report->lookahead, 4);
 305
 306		*next_lookaheads_len = 1;
 307	}
 308
 309	return 0;
 310}
 311
 312static int
 313ath10k_htc_process_lookahead_bundle(struct ath10k_htc *htc,
 314				    const struct ath10k_htc_lookahead_bundle *report,
 315				    int len,
 316				    enum ath10k_htc_ep_id eid,
 317				    void *next_lookaheads,
 318				    int *next_lookaheads_len)
 319{
 320	struct ath10k *ar = htc->ar;
 321	int bundle_cnt = len / sizeof(*report);
 322
 323	if (!bundle_cnt || (bundle_cnt > htc->max_msgs_per_htc_bundle)) {
 324		ath10k_warn(ar, "Invalid lookahead bundle count: %d\n",
 325			    bundle_cnt);
 326		return -EINVAL;
 327	}
 328
 329	if (next_lookaheads && next_lookaheads_len) {
 330		int i;
 331
 332		for (i = 0; i < bundle_cnt; i++) {
 333			memcpy(((u8 *)next_lookaheads) + 4 * i,
 334			       report->lookahead, 4);
 335			report++;
 336		}
 337
 338		*next_lookaheads_len = bundle_cnt;
 339	}
 340
 341	return 0;
 342}
 343
 344int ath10k_htc_process_trailer(struct ath10k_htc *htc,
 345			       u8 *buffer,
 346			       int length,
 347			       enum ath10k_htc_ep_id src_eid,
 348			       void *next_lookaheads,
 349			       int *next_lookaheads_len)
 350{
 351	struct ath10k_htc_lookahead_bundle *bundle;
 352	struct ath10k *ar = htc->ar;
 353	int status = 0;
 354	struct ath10k_htc_record *record;
 355	u8 *orig_buffer;
 356	int orig_length;
 357	size_t len;
 358
 359	orig_buffer = buffer;
 360	orig_length = length;
 361
 362	while (length > 0) {
 363		record = (struct ath10k_htc_record *)buffer;
 364
 365		if (length < sizeof(record->hdr)) {
 366			status = -EINVAL;
 367			break;
 368		}
 369
 370		if (record->hdr.len > length) {
 371			/* no room left in buffer for record */
 372			ath10k_warn(ar, "Invalid record length: %d\n",
 373				    record->hdr.len);
 374			status = -EINVAL;
 375			break;
 376		}
 377
 378		switch (record->hdr.id) {
 379		case ATH10K_HTC_RECORD_CREDITS:
 380			len = sizeof(struct ath10k_htc_credit_report);
 381			if (record->hdr.len < len) {
 382				ath10k_warn(ar, "Credit report too long\n");
 383				status = -EINVAL;
 384				break;
 385			}
 386			ath10k_htc_process_credit_report(htc,
 387							 record->credit_report,
 388							 record->hdr.len,
 389							 src_eid);
 390			break;
 391		case ATH10K_HTC_RECORD_LOOKAHEAD:
 392			len = sizeof(struct ath10k_htc_lookahead_report);
 393			if (record->hdr.len < len) {
 394				ath10k_warn(ar, "Lookahead report too long\n");
 395				status = -EINVAL;
 396				break;
 397			}
 398			status = ath10k_htc_process_lookahead(htc,
 399							      record->lookahead_report,
 400							      record->hdr.len,
 401							      src_eid,
 402							      next_lookaheads,
 403							      next_lookaheads_len);
 404			break;
 405		case ATH10K_HTC_RECORD_LOOKAHEAD_BUNDLE:
 406			bundle = record->lookahead_bundle;
 407			status = ath10k_htc_process_lookahead_bundle(htc,
 408								     bundle,
 409								     record->hdr.len,
 410								     src_eid,
 411								     next_lookaheads,
 412								     next_lookaheads_len);
 413			break;
 414		default:
 415			ath10k_warn(ar, "Unhandled record: id:%d length:%d\n",
 416				    record->hdr.id, record->hdr.len);
 417			break;
 418		}
 419
 420		if (status)
 421			break;
 422
 423		/* multiple records may be present in a trailer */
 424		buffer += sizeof(record->hdr) + record->hdr.len;
 425		length -= sizeof(record->hdr) + record->hdr.len;
 426	}
 427
 428	if (status)
 429		ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc rx bad trailer", "",
 430				orig_buffer, orig_length);
 431
 432	return status;
 433}
 434EXPORT_SYMBOL(ath10k_htc_process_trailer);
 435
 436void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb)
 437{
 438	int status = 0;
 439	struct ath10k_htc *htc = &ar->htc;
 440	struct ath10k_htc_hdr *hdr;
 441	struct ath10k_htc_ep *ep;
 442	u16 payload_len;
 443	u32 trailer_len = 0;
 444	size_t min_len;
 445	u8 eid;
 446	bool trailer_present;
 447
 448	hdr = (struct ath10k_htc_hdr *)skb->data;
 449	skb_pull(skb, sizeof(*hdr));
 450
 451	eid = hdr->eid;
 452
 453	if (eid >= ATH10K_HTC_EP_COUNT) {
 454		ath10k_warn(ar, "HTC Rx: invalid eid %d\n", eid);
 455		ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad header", "",
 456				hdr, sizeof(*hdr));
 457		goto out;
 458	}
 459
 460	ep = &htc->endpoint[eid];
 461	if (ep->service_id == ATH10K_HTC_SVC_ID_UNUSED) {
 462		ath10k_warn(ar, "htc rx endpoint %d is not connected\n", eid);
 463		goto out;
 464	}
 465
 466	payload_len = __le16_to_cpu(hdr->len);
 467
 468	if (payload_len + sizeof(*hdr) > ATH10K_HTC_MAX_LEN) {
 469		ath10k_warn(ar, "HTC rx frame too long, len: %zu\n",
 470			    payload_len + sizeof(*hdr));
 471		ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len", "",
 472				hdr, sizeof(*hdr));
 473		goto out;
 474	}
 475
 476	if (skb->len < payload_len) {
 477		ath10k_dbg(ar, ATH10K_DBG_HTC,
 478			   "HTC Rx: insufficient length, got %d, expected %d\n",
 479			   skb->len, payload_len);
 480		ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len",
 481				"", hdr, sizeof(*hdr));
 482		goto out;
 483	}
 484
 485	/* get flags to check for trailer */
 486	trailer_present = hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT;
 487	if (trailer_present) {
 488		u8 *trailer;
 489
 490		trailer_len = hdr->trailer_len;
 491		min_len = sizeof(struct ath10k_ath10k_htc_record_hdr);
 492
 493		if ((trailer_len < min_len) ||
 494		    (trailer_len > payload_len)) {
 495			ath10k_warn(ar, "Invalid trailer length: %d\n",
 496				    trailer_len);
 497			goto out;
 498		}
 499
 500		trailer = (u8 *)hdr;
 501		trailer += sizeof(*hdr);
 502		trailer += payload_len;
 503		trailer -= trailer_len;
 504		status = ath10k_htc_process_trailer(htc, trailer,
 505						    trailer_len, hdr->eid,
 506						    NULL, NULL);
 507		if (status)
 508			goto out;
 509
 510		skb_trim(skb, skb->len - trailer_len);
 511	}
 512
 513	if (((int)payload_len - (int)trailer_len) <= 0)
 514		/* zero length packet with trailer data, just drop these */
 515		goto out;
 516
 517	ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %pK\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 518		   eid, skb);
 519	ep->ep_ops.ep_rx_complete(ar, skb);
 520
 521	/* skb is now owned by the rx completion handler */
 522	skb = NULL;
 523out:
 524	kfree_skb(skb);
 525}
 526EXPORT_SYMBOL(ath10k_htc_rx_completion_handler);
 527
 528static void ath10k_htc_control_rx_complete(struct ath10k *ar,
 529					   struct sk_buff *skb)
 530{
 531	struct ath10k_htc *htc = &ar->htc;
 532	struct ath10k_htc_msg *msg = (struct ath10k_htc_msg *)skb->data;
 533
 534	switch (__le16_to_cpu(msg->hdr.message_id)) {
 535	case ATH10K_HTC_MSG_READY_ID:
 536	case ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID:
 537		/* handle HTC control message */
 538		if (completion_done(&htc->ctl_resp)) {
 539			/* this is a fatal error, target should not be
 540			 * sending unsolicited messages on the ep 0
 541			 */
 542			ath10k_warn(ar, "HTC rx ctrl still processing\n");
 543			complete(&htc->ctl_resp);
 544			goto out;
 545		}
 546
 547		htc->control_resp_len =
 548			min_t(int, skb->len,
 549			      ATH10K_HTC_MAX_CTRL_MSG_LEN);
 550
 551		memcpy(htc->control_resp_buffer, skb->data,
 552		       htc->control_resp_len);
 553
 554		complete(&htc->ctl_resp);
 555		break;
 556	case ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE:
 557		htc->htc_ops.target_send_suspend_complete(ar);
 558		break;
 559	default:
 560		ath10k_warn(ar, "ignoring unsolicited htc ep0 event\n");
 561		break;
 562	}
 563
 564out:
 565	kfree_skb(skb);
 566}
 567
 568/***************/
 569/* Init/Deinit */
 570/***************/
 571
 572static const char *htc_service_name(enum ath10k_htc_svc_id id)
 573{
 574	switch (id) {
 575	case ATH10K_HTC_SVC_ID_RESERVED:
 576		return "Reserved";
 577	case ATH10K_HTC_SVC_ID_RSVD_CTRL:
 578		return "Control";
 579	case ATH10K_HTC_SVC_ID_WMI_CONTROL:
 580		return "WMI";
 581	case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
 582		return "DATA BE";
 583	case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
 584		return "DATA BK";
 585	case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
 586		return "DATA VI";
 587	case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
 588		return "DATA VO";
 589	case ATH10K_HTC_SVC_ID_NMI_CONTROL:
 590		return "NMI Control";
 591	case ATH10K_HTC_SVC_ID_NMI_DATA:
 592		return "NMI Data";
 593	case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
 594		return "HTT Data";
 595	case ATH10K_HTC_SVC_ID_HTT_DATA2_MSG:
 596		return "HTT Data";
 597	case ATH10K_HTC_SVC_ID_HTT_DATA3_MSG:
 598		return "HTT Data";
 599	case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
 600		return "RAW";
 601	case ATH10K_HTC_SVC_ID_HTT_LOG_MSG:
 602		return "PKTLOG";
 603	}
 604
 605	return "Unknown";
 606}
 607
 608static void ath10k_htc_reset_endpoint_states(struct ath10k_htc *htc)
 609{
 610	struct ath10k_htc_ep *ep;
 611	int i;
 612
 613	for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) {
 614		ep = &htc->endpoint[i];
 615		ep->service_id = ATH10K_HTC_SVC_ID_UNUSED;
 616		ep->max_ep_message_len = 0;
 617		ep->max_tx_queue_depth = 0;
 618		ep->eid = i;
 619		ep->htc = htc;
 620		ep->tx_credit_flow_enabled = true;
 621	}
 622}
 623
 624static u8 ath10k_htc_get_credit_allocation(struct ath10k_htc *htc,
 625					   u16 service_id)
 626{
 627	u8 allocation = 0;
 628
 629	/* The WMI control service is the only service with flow control.
 630	 * Let it have all transmit credits.
 631	 */
 632	if (service_id == ATH10K_HTC_SVC_ID_WMI_CONTROL)
 633		allocation = htc->total_transmit_credits;
 634
 635	return allocation;
 636}
 637
 638static int ath10k_htc_send_bundle(struct ath10k_htc_ep *ep,
 639				  struct sk_buff *bundle_skb,
 640				  struct sk_buff_head *tx_save_head)
 641{
 642	struct ath10k_hif_sg_item sg_item;
 643	struct ath10k_htc *htc = ep->htc;
 644	struct ath10k *ar = htc->ar;
 645	struct sk_buff *skb;
 646	int ret, cn = 0;
 647	unsigned int skb_len;
 648
 649	ath10k_dbg(ar, ATH10K_DBG_HTC, "bundle skb len %d\n", bundle_skb->len);
 650	skb_len = bundle_skb->len;
 651	ret = ath10k_htc_consume_credit(ep, skb_len, true);
 652
 653	if (!ret) {
 654		sg_item.transfer_id = ep->eid;
 655		sg_item.transfer_context = bundle_skb;
 656		sg_item.vaddr = bundle_skb->data;
 657		sg_item.len = bundle_skb->len;
 658
 659		ret = ath10k_hif_tx_sg(htc->ar, ep->ul_pipe_id, &sg_item, 1);
 660		if (ret)
 661			ath10k_htc_release_credit(ep, skb_len);
 662	}
 663
 664	if (ret)
 665		dev_kfree_skb_any(bundle_skb);
 666
 667	for (cn = 0; (skb = skb_dequeue_tail(tx_save_head)); cn++) {
 668		if (ret) {
 669			skb_pull(skb, sizeof(struct ath10k_htc_hdr));
 670			skb_queue_head(&ep->tx_req_head, skb);
 671		} else {
 672			skb_queue_tail(&ep->tx_complete_head, skb);
 673		}
 674	}
 675
 676	if (!ret)
 677		queue_work(ar->workqueue_tx_complete, &ar->tx_complete_work);
 678
 679	ath10k_dbg(ar, ATH10K_DBG_HTC,
 680		   "bundle tx status %d eid %d req count %d count %d len %d\n",
 681		   ret, ep->eid, skb_queue_len(&ep->tx_req_head), cn, skb_len);
 682	return ret;
 683}
 684
 685static void ath10k_htc_send_one_skb(struct ath10k_htc_ep *ep, struct sk_buff *skb)
 686{
 687	struct ath10k_htc *htc = ep->htc;
 688	struct ath10k *ar = htc->ar;
 689	int ret;
 690
 691	ret = ath10k_htc_send(htc, ep->eid, skb);
 692
 693	if (ret)
 694		skb_queue_head(&ep->tx_req_head, skb);
 695
 696	ath10k_dbg(ar, ATH10K_DBG_HTC, "tx one status %d eid %d len %d pending count %d\n",
 697		   ret, ep->eid, skb->len, skb_queue_len(&ep->tx_req_head));
 698}
 699
 700static int ath10k_htc_send_bundle_skbs(struct ath10k_htc_ep *ep)
 701{
 702	struct ath10k_htc *htc = ep->htc;
 703	struct sk_buff *bundle_skb, *skb;
 704	struct sk_buff_head tx_save_head;
 705	struct ath10k_htc_hdr *hdr;
 706	u8 *bundle_buf;
 707	int ret = 0, credit_pad, credit_remainder, trans_len, bundles_left = 0;
 708
 709	if (htc->ar->state == ATH10K_STATE_WEDGED)
 710		return -ECOMM;
 711
 712	if (ep->tx_credit_flow_enabled &&
 713	    ep->tx_credits < ATH10K_MIN_CREDIT_PER_HTC_TX_BUNDLE)
 714		return 0;
 715
 716	bundles_left = ATH10K_MAX_MSG_PER_HTC_TX_BUNDLE * ep->tx_credit_size;
 717	bundle_skb = dev_alloc_skb(bundles_left);
 718
 719	if (!bundle_skb)
 720		return -ENOMEM;
 721
 722	bundle_buf = bundle_skb->data;
 723	skb_queue_head_init(&tx_save_head);
 724
 725	while (true) {
 726		skb = skb_dequeue(&ep->tx_req_head);
 727		if (!skb)
 728			break;
 729
 730		credit_pad = 0;
 731		trans_len = skb->len + sizeof(*hdr);
 732		credit_remainder = trans_len % ep->tx_credit_size;
 733
 734		if (credit_remainder != 0) {
 735			credit_pad = ep->tx_credit_size - credit_remainder;
 736			trans_len += credit_pad;
 737		}
 738
 739		ret = ath10k_htc_consume_credit(ep,
 740						bundle_buf + trans_len - bundle_skb->data,
 741						false);
 742		if (ret) {
 743			skb_queue_head(&ep->tx_req_head, skb);
 744			break;
 745		}
 746
 747		if (bundles_left < trans_len) {
 748			bundle_skb->len = bundle_buf - bundle_skb->data;
 749			ret = ath10k_htc_send_bundle(ep, bundle_skb, &tx_save_head);
 750
 751			if (ret) {
 752				skb_queue_head(&ep->tx_req_head, skb);
 753				return ret;
 754			}
 755
 756			if (skb_queue_len(&ep->tx_req_head) == 0) {
 757				ath10k_htc_send_one_skb(ep, skb);
 758				return ret;
 759			}
 760
 761			if (ep->tx_credit_flow_enabled &&
 762			    ep->tx_credits < ATH10K_MIN_CREDIT_PER_HTC_TX_BUNDLE) {
 763				skb_queue_head(&ep->tx_req_head, skb);
 764				return 0;
 765			}
 766
 767			bundles_left =
 768				ATH10K_MAX_MSG_PER_HTC_TX_BUNDLE * ep->tx_credit_size;
 769			bundle_skb = dev_alloc_skb(bundles_left);
 770
 771			if (!bundle_skb) {
 772				skb_queue_head(&ep->tx_req_head, skb);
 773				return -ENOMEM;
 774			}
 775			bundle_buf = bundle_skb->data;
 776			skb_queue_head_init(&tx_save_head);
 777		}
 778
 779		skb_push(skb, sizeof(struct ath10k_htc_hdr));
 780		ath10k_htc_prepare_tx_skb(ep, skb);
 781
 782		memcpy(bundle_buf, skb->data, skb->len);
 783		hdr = (struct ath10k_htc_hdr *)bundle_buf;
 784		hdr->flags |= ATH10K_HTC_FLAG_SEND_BUNDLE;
 785		hdr->pad_len = __cpu_to_le16(credit_pad);
 786		bundle_buf += trans_len;
 787		bundles_left -= trans_len;
 788		skb_queue_tail(&tx_save_head, skb);
 789	}
 790
 791	if (bundle_buf != bundle_skb->data) {
 792		bundle_skb->len = bundle_buf - bundle_skb->data;
 793		ret = ath10k_htc_send_bundle(ep, bundle_skb, &tx_save_head);
 794	} else {
 795		dev_kfree_skb_any(bundle_skb);
 796	}
 797
 798	return ret;
 799}
 800
 801static void ath10k_htc_bundle_tx_work(struct work_struct *work)
 802{
 803	struct ath10k *ar = container_of(work, struct ath10k, bundle_tx_work);
 804	struct ath10k_htc_ep *ep;
 805	struct sk_buff *skb;
 806	int i;
 807
 808	for (i = 0; i < ARRAY_SIZE(ar->htc.endpoint); i++) {
 809		ep = &ar->htc.endpoint[i];
 810
 811		if (!ep->bundle_tx)
 812			continue;
 813
 814		ath10k_dbg(ar, ATH10K_DBG_HTC, "bundle tx work eid %d count %d\n",
 815			   ep->eid, skb_queue_len(&ep->tx_req_head));
 816
 817		if (skb_queue_len(&ep->tx_req_head) >=
 818		    ATH10K_MIN_MSG_PER_HTC_TX_BUNDLE) {
 819			ath10k_htc_send_bundle_skbs(ep);
 820		} else {
 821			skb = skb_dequeue(&ep->tx_req_head);
 822
 823			if (!skb)
 824				continue;
 825			ath10k_htc_send_one_skb(ep, skb);
 826		}
 827	}
 828}
 829
 830static void ath10k_htc_tx_complete_work(struct work_struct *work)
 
 831{
 832	struct ath10k *ar = container_of(work, struct ath10k, tx_complete_work);
 833	struct ath10k_htc_ep *ep;
 834	enum ath10k_htc_ep_id eid;
 835	struct sk_buff *skb;
 836	int i;
 837
 838	for (i = 0; i < ARRAY_SIZE(ar->htc.endpoint); i++) {
 839		ep = &ar->htc.endpoint[i];
 840		eid = ep->eid;
 841		if (ep->bundle_tx && eid == ar->htt.eid) {
 842			ath10k_dbg(ar, ATH10K_DBG_HTC, "bundle tx complete eid %d pending complete count%d\n",
 843				   ep->eid, skb_queue_len(&ep->tx_complete_head));
 844
 845			while (true) {
 846				skb = skb_dequeue(&ep->tx_complete_head);
 847				if (!skb)
 848					break;
 849				ath10k_htc_notify_tx_completion(ep, skb);
 850			}
 851		}
 852	}
 853}
 854
 855int ath10k_htc_send_hl(struct ath10k_htc *htc,
 856		       enum ath10k_htc_ep_id eid,
 857		       struct sk_buff *skb)
 858{
 859	struct ath10k_htc_ep *ep = &htc->endpoint[eid];
 860	struct ath10k *ar = htc->ar;
 861
 862	if (sizeof(struct ath10k_htc_hdr) + skb->len > ep->tx_credit_size) {
 863		ath10k_dbg(ar, ATH10K_DBG_HTC, "tx exceed max len %d\n", skb->len);
 864		return -ENOMEM;
 865	}
 866
 867	ath10k_dbg(ar, ATH10K_DBG_HTC, "htc send hl eid %d bundle %d tx count %d len %d\n",
 868		   eid, ep->bundle_tx, skb_queue_len(&ep->tx_req_head), skb->len);
 869
 870	if (ep->bundle_tx) {
 871		skb_queue_tail(&ep->tx_req_head, skb);
 872		queue_work(ar->workqueue, &ar->bundle_tx_work);
 873		return 0;
 874	} else {
 875		return ath10k_htc_send(htc, eid, skb);
 876	}
 877}
 878
 879void ath10k_htc_setup_tx_req(struct ath10k_htc_ep *ep)
 880{
 881	if (ep->htc->max_msgs_per_htc_bundle >= ATH10K_MIN_MSG_PER_HTC_TX_BUNDLE &&
 882	    !ep->bundle_tx) {
 883		ep->bundle_tx = true;
 884		skb_queue_head_init(&ep->tx_req_head);
 885		skb_queue_head_init(&ep->tx_complete_head);
 886	}
 887}
 888
 889void ath10k_htc_stop_hl(struct ath10k *ar)
 890{
 891	struct ath10k_htc_ep *ep;
 892	int i;
 893
 894	cancel_work_sync(&ar->bundle_tx_work);
 895	cancel_work_sync(&ar->tx_complete_work);
 896
 897	for (i = 0; i < ARRAY_SIZE(ar->htc.endpoint); i++) {
 898		ep = &ar->htc.endpoint[i];
 899
 900		if (!ep->bundle_tx)
 901			continue;
 902
 903		ath10k_dbg(ar, ATH10K_DBG_HTC, "stop tx work eid %d count %d\n",
 904			   ep->eid, skb_queue_len(&ep->tx_req_head));
 905
 906		skb_queue_purge(&ep->tx_req_head);
 907	}
 908}
 909
 910int ath10k_htc_wait_target(struct ath10k_htc *htc)
 911{
 912	struct ath10k *ar = htc->ar;
 913	int i, status = 0;
 914	unsigned long time_left;
 
 
 915	struct ath10k_htc_msg *msg;
 916	u16 message_id;
 
 
 917
 918	time_left = wait_for_completion_timeout(&htc->ctl_resp,
 919						ATH10K_HTC_WAIT_TIMEOUT_HZ);
 920	if (!time_left) {
 921		/* Workaround: In some cases the PCI HIF doesn't
 922		 * receive interrupt for the control response message
 923		 * even if the buffer was completed. It is suspected
 924		 * iomap writes unmasking PCI CE irqs aren't propagated
 925		 * properly in KVM PCI-passthrough sometimes.
 926		 */
 927		ath10k_warn(ar, "failed to receive control response completion, polling..\n");
 928
 929		for (i = 0; i < CE_COUNT; i++)
 930			ath10k_hif_send_complete_check(htc->ar, i, 1);
 931
 932		time_left =
 933		wait_for_completion_timeout(&htc->ctl_resp,
 934					    ATH10K_HTC_WAIT_TIMEOUT_HZ);
 935
 936		if (!time_left)
 937			status = -ETIMEDOUT;
 938	}
 939
 940	if (status < 0) {
 941		ath10k_err(ar, "ctl_resp never came in (%d)\n", status);
 942		return status;
 943	}
 944
 945	if (htc->control_resp_len < sizeof(msg->hdr) + sizeof(msg->ready)) {
 946		ath10k_err(ar, "Invalid HTC ready msg len:%d\n",
 947			   htc->control_resp_len);
 948		return -ECOMM;
 949	}
 950
 951	msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
 952	message_id   = __le16_to_cpu(msg->hdr.message_id);
 
 
 953
 954	if (message_id != ATH10K_HTC_MSG_READY_ID) {
 955		ath10k_err(ar, "Invalid HTC ready msg: 0x%x\n", message_id);
 956		return -ECOMM;
 957	}
 958
 959	if (ar->hw_params.use_fw_tx_credits)
 960		htc->total_transmit_credits = __le16_to_cpu(msg->ready.credit_count);
 961	else
 962		htc->total_transmit_credits = 1;
 963
 964	htc->target_credit_size = __le16_to_cpu(msg->ready.credit_size);
 965
 966	ath10k_dbg(ar, ATH10K_DBG_HTC,
 967		   "Target ready! transmit resources: %d size:%d actual credits:%d\n",
 968		   htc->total_transmit_credits,
 969		   htc->target_credit_size,
 970		   msg->ready.credit_count);
 971
 972	if ((htc->total_transmit_credits == 0) ||
 973	    (htc->target_credit_size == 0)) {
 974		ath10k_err(ar, "Invalid credit size received\n");
 975		return -ECOMM;
 976	}
 977
 978	/* The only way to determine if the ready message is an extended
 979	 * message is from the size.
 980	 */
 981	if (htc->control_resp_len >=
 982	    sizeof(msg->hdr) + sizeof(msg->ready_ext)) {
 983		htc->alt_data_credit_size =
 984			__le16_to_cpu(msg->ready_ext.reserved) &
 985			ATH10K_HTC_MSG_READY_EXT_ALT_DATA_MASK;
 986		htc->max_msgs_per_htc_bundle =
 987			min_t(u8, msg->ready_ext.max_msgs_per_htc_bundle,
 988			      HTC_HOST_MAX_MSG_PER_RX_BUNDLE);
 989		ath10k_dbg(ar, ATH10K_DBG_HTC,
 990			   "Extended ready message RX bundle size %d alt size %d\n",
 991			   htc->max_msgs_per_htc_bundle,
 992			   htc->alt_data_credit_size);
 993	}
 994
 995	INIT_WORK(&ar->bundle_tx_work, ath10k_htc_bundle_tx_work);
 996	INIT_WORK(&ar->tx_complete_work, ath10k_htc_tx_complete_work);
 997
 998	return 0;
 999}
 
 
 
 
 
1000
1001void ath10k_htc_change_tx_credit_flow(struct ath10k_htc *htc,
1002				      enum ath10k_htc_ep_id eid,
1003				      bool enable)
1004{
1005	struct ath10k *ar = htc->ar;
1006	struct ath10k_htc_ep *ep = &ar->htc.endpoint[eid];
 
1007
1008	ep->tx_credit_flow_enabled = enable;
1009}
1010
1011int ath10k_htc_connect_service(struct ath10k_htc *htc,
1012			       struct ath10k_htc_svc_conn_req *conn_req,
1013			       struct ath10k_htc_svc_conn_resp *conn_resp)
1014{
1015	struct ath10k *ar = htc->ar;
1016	struct ath10k_htc_msg *msg;
1017	struct ath10k_htc_conn_svc *req_msg;
1018	struct ath10k_htc_conn_svc_response resp_msg_dummy;
1019	struct ath10k_htc_conn_svc_response *resp_msg = &resp_msg_dummy;
1020	enum ath10k_htc_ep_id assigned_eid = ATH10K_HTC_EP_COUNT;
1021	struct ath10k_htc_ep *ep;
1022	struct sk_buff *skb;
1023	unsigned int max_msg_size = 0;
1024	int length, status;
1025	unsigned long time_left;
1026	bool disable_credit_flow_ctrl = false;
1027	u16 message_id, service_id, flags = 0;
1028	u8 tx_alloc = 0;
1029
1030	/* special case for HTC pseudo control service */
1031	if (conn_req->service_id == ATH10K_HTC_SVC_ID_RSVD_CTRL) {
1032		disable_credit_flow_ctrl = true;
1033		assigned_eid = ATH10K_HTC_EP_0;
1034		max_msg_size = ATH10K_HTC_MAX_CTRL_MSG_LEN;
1035		memset(&resp_msg_dummy, 0, sizeof(resp_msg_dummy));
1036		goto setup;
1037	}
1038
1039	tx_alloc = ath10k_htc_get_credit_allocation(htc,
1040						    conn_req->service_id);
1041	if (!tx_alloc)
1042		ath10k_dbg(ar, ATH10K_DBG_BOOT,
1043			   "boot htc service %s does not allocate target credits\n",
1044			   htc_service_name(conn_req->service_id));
1045
1046	skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
1047	if (!skb) {
1048		ath10k_err(ar, "Failed to allocate HTC packet\n");
1049		return -ENOMEM;
1050	}
1051
1052	length = sizeof(msg->hdr) + sizeof(msg->connect_service);
1053	skb_put(skb, length);
1054	memset(skb->data, 0, length);
1055
1056	msg = (struct ath10k_htc_msg *)skb->data;
1057	msg->hdr.message_id =
1058		__cpu_to_le16(ATH10K_HTC_MSG_CONNECT_SERVICE_ID);
1059
1060	flags |= SM(tx_alloc, ATH10K_HTC_CONN_FLAGS_RECV_ALLOC);
1061
1062	/* Only enable credit flow control for WMI ctrl service */
1063	if (conn_req->service_id != ATH10K_HTC_SVC_ID_WMI_CONTROL) {
1064		flags |= ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
1065		disable_credit_flow_ctrl = true;
1066	}
1067
1068	req_msg = &msg->connect_service;
1069	req_msg->flags = __cpu_to_le16(flags);
1070	req_msg->service_id = __cpu_to_le16(conn_req->service_id);
1071
1072	reinit_completion(&htc->ctl_resp);
1073
1074	status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
1075	if (status) {
1076		kfree_skb(skb);
1077		return status;
1078	}
1079
1080	/* wait for response */
1081	time_left = wait_for_completion_timeout(&htc->ctl_resp,
1082						ATH10K_HTC_CONN_SVC_TIMEOUT_HZ);
1083	if (!time_left) {
1084		ath10k_err(ar, "Service connect timeout\n");
1085		return -ETIMEDOUT;
1086	}
1087
1088	/* we controlled the buffer creation, it's aligned */
1089	msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
1090	resp_msg = &msg->connect_service_response;
1091	message_id = __le16_to_cpu(msg->hdr.message_id);
1092	service_id = __le16_to_cpu(resp_msg->service_id);
1093
1094	if ((message_id != ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID) ||
1095	    (htc->control_resp_len < sizeof(msg->hdr) +
1096	     sizeof(msg->connect_service_response))) {
1097		ath10k_err(ar, "Invalid resp message ID 0x%x", message_id);
1098		return -EPROTO;
1099	}
1100
1101	ath10k_dbg(ar, ATH10K_DBG_HTC,
1102		   "HTC Service %s connect response: status: 0x%x, assigned ep: 0x%x\n",
1103		   htc_service_name(service_id),
1104		   resp_msg->status, resp_msg->eid);
1105
1106	conn_resp->connect_resp_code = resp_msg->status;
1107
1108	/* check response status */
1109	if (resp_msg->status != ATH10K_HTC_CONN_SVC_STATUS_SUCCESS) {
1110		ath10k_err(ar, "HTC Service %s connect request failed: 0x%x)\n",
1111			   htc_service_name(service_id),
1112			   resp_msg->status);
1113		return -EPROTO;
1114	}
1115
1116	assigned_eid = (enum ath10k_htc_ep_id)resp_msg->eid;
1117	max_msg_size = __le16_to_cpu(resp_msg->max_msg_size);
1118
1119setup:
1120
1121	if (assigned_eid >= ATH10K_HTC_EP_COUNT)
1122		return -EPROTO;
1123
1124	if (max_msg_size == 0)
1125		return -EPROTO;
1126
1127	ep = &htc->endpoint[assigned_eid];
1128	ep->eid = assigned_eid;
1129
1130	if (ep->service_id != ATH10K_HTC_SVC_ID_UNUSED)
1131		return -EPROTO;
1132
1133	/* return assigned endpoint to caller */
1134	conn_resp->eid = assigned_eid;
1135	conn_resp->max_msg_len = __le16_to_cpu(resp_msg->max_msg_size);
1136
1137	/* setup the endpoint */
1138	ep->service_id = conn_req->service_id;
1139	ep->max_tx_queue_depth = conn_req->max_send_queue_depth;
1140	ep->max_ep_message_len = __le16_to_cpu(resp_msg->max_msg_size);
1141	ep->tx_credits = tx_alloc;
1142	ep->tx_credit_size = htc->target_credit_size;
 
 
1143
1144	if (conn_req->service_id == ATH10K_HTC_SVC_ID_HTT_DATA_MSG &&
1145	    htc->alt_data_credit_size != 0)
1146		ep->tx_credit_size = htc->alt_data_credit_size;
1147
1148	/* copy all the callbacks */
1149	ep->ep_ops = conn_req->ep_ops;
1150
1151	status = ath10k_hif_map_service_to_pipe(htc->ar,
1152						ep->service_id,
1153						&ep->ul_pipe_id,
1154						&ep->dl_pipe_id);
1155	if (status) {
1156		ath10k_dbg(ar, ATH10K_DBG_BOOT, "unsupported HTC service id: %d\n",
1157			   ep->service_id);
1158		return status;
1159	}
1160
1161	ath10k_dbg(ar, ATH10K_DBG_BOOT,
1162		   "boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
1163		   htc_service_name(ep->service_id), ep->ul_pipe_id,
1164		   ep->dl_pipe_id, ep->eid);
1165
1166	if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
1167		ep->tx_credit_flow_enabled = false;
1168		ath10k_dbg(ar, ATH10K_DBG_BOOT,
1169			   "boot htc service '%s' eid %d TX flow control disabled\n",
1170			   htc_service_name(ep->service_id), assigned_eid);
1171	}
1172
1173	return status;
1174}
1175
1176struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size)
1177{
1178	struct sk_buff *skb;
1179
1180	skb = dev_alloc_skb(size + sizeof(struct ath10k_htc_hdr));
1181	if (!skb)
1182		return NULL;
1183
1184	skb_reserve(skb, sizeof(struct ath10k_htc_hdr));
1185
1186	/* FW/HTC requires 4-byte aligned streams */
1187	if (!IS_ALIGNED((unsigned long)skb->data, 4))
1188		ath10k_warn(ar, "Unaligned HTC tx skb\n");
1189
1190	return skb;
1191}
1192
1193static void ath10k_htc_pktlog_process_rx(struct ath10k *ar, struct sk_buff *skb)
1194{
1195	trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
1196	dev_kfree_skb_any(skb);
1197}
1198
1199static int ath10k_htc_pktlog_connect(struct ath10k *ar)
1200{
1201	struct ath10k_htc_svc_conn_resp conn_resp;
1202	struct ath10k_htc_svc_conn_req conn_req;
1203	int status;
1204
1205	memset(&conn_req, 0, sizeof(conn_req));
1206	memset(&conn_resp, 0, sizeof(conn_resp));
1207
1208	conn_req.ep_ops.ep_tx_complete = NULL;
1209	conn_req.ep_ops.ep_rx_complete = ath10k_htc_pktlog_process_rx;
1210	conn_req.ep_ops.ep_tx_credits = NULL;
1211
1212	/* connect to control service */
1213	conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_LOG_MSG;
1214	status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp);
1215	if (status) {
1216		ath10k_warn(ar, "failed to connect to PKTLOG service: %d\n",
1217			    status);
1218		return status;
1219	}
1220
1221	return 0;
1222}
1223
1224static bool ath10k_htc_pktlog_svc_supported(struct ath10k *ar)
1225{
1226	u8 ul_pipe_id;
1227	u8 dl_pipe_id;
1228	int status;
1229
1230	status = ath10k_hif_map_service_to_pipe(ar, ATH10K_HTC_SVC_ID_HTT_LOG_MSG,
1231						&ul_pipe_id,
1232						&dl_pipe_id);
1233	if (status) {
1234		ath10k_dbg(ar, ATH10K_DBG_BOOT, "unsupported HTC pktlog service id: %d\n",
1235			   ATH10K_HTC_SVC_ID_HTT_LOG_MSG);
1236
1237		return false;
1238	}
1239
1240	return true;
1241}
1242
1243int ath10k_htc_start(struct ath10k_htc *htc)
1244{
1245	struct ath10k *ar = htc->ar;
1246	struct sk_buff *skb;
1247	int status = 0;
1248	struct ath10k_htc_msg *msg;
1249
1250	skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
1251	if (!skb)
1252		return -ENOMEM;
1253
1254	skb_put(skb, sizeof(msg->hdr) + sizeof(msg->setup_complete_ext));
1255	memset(skb->data, 0, skb->len);
1256
1257	msg = (struct ath10k_htc_msg *)skb->data;
1258	msg->hdr.message_id =
1259		__cpu_to_le16(ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID);
1260
1261	if (ar->hif.bus == ATH10K_BUS_SDIO) {
1262		/* Extra setup params used by SDIO */
1263		msg->setup_complete_ext.flags =
1264			__cpu_to_le32(ATH10K_HTC_SETUP_COMPLETE_FLAGS_RX_BNDL_EN);
1265		msg->setup_complete_ext.max_msgs_per_bundled_recv =
1266			htc->max_msgs_per_htc_bundle;
1267	}
1268	ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC is using TX credit flow control\n");
1269
1270	status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
1271	if (status) {
1272		kfree_skb(skb);
1273		return status;
1274	}
1275
1276	if (ath10k_htc_pktlog_svc_supported(ar)) {
1277		status = ath10k_htc_pktlog_connect(ar);
1278		if (status) {
1279			ath10k_err(ar, "failed to connect to pktlog: %d\n", status);
1280			return status;
1281		}
1282	}
1283
1284	return 0;
1285}
1286
1287/* registered target arrival callback from the HIF layer */
1288int ath10k_htc_init(struct ath10k *ar)
1289{
1290	int status;
1291	struct ath10k_htc *htc = &ar->htc;
1292	struct ath10k_htc_svc_conn_req conn_req;
1293	struct ath10k_htc_svc_conn_resp conn_resp;
1294
1295	spin_lock_init(&htc->tx_lock);
1296
1297	ath10k_htc_reset_endpoint_states(htc);
1298
1299	htc->ar = ar;
1300
1301	/* setup our pseudo HTC control endpoint connection */
1302	memset(&conn_req, 0, sizeof(conn_req));
1303	memset(&conn_resp, 0, sizeof(conn_resp));
1304	conn_req.ep_ops.ep_tx_complete = ath10k_htc_control_tx_complete;
1305	conn_req.ep_ops.ep_rx_complete = ath10k_htc_control_rx_complete;
1306	conn_req.max_send_queue_depth = ATH10K_NUM_CONTROL_TX_BUFFERS;
1307	conn_req.service_id = ATH10K_HTC_SVC_ID_RSVD_CTRL;
1308
1309	/* connect fake service */
1310	status = ath10k_htc_connect_service(htc, &conn_req, &conn_resp);
1311	if (status) {
1312		ath10k_err(ar, "could not connect to htc service (%d)\n",
1313			   status);
1314		return status;
1315	}
1316
1317	init_completion(&htc->ctl_resp);
1318
1319	return 0;
1320}
v4.6
 
  1/*
  2 * Copyright (c) 2005-2011 Atheros Communications Inc.
  3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
  4 *
  5 * Permission to use, copy, modify, and/or distribute this software for any
  6 * purpose with or without fee is hereby granted, provided that the above
  7 * copyright notice and this permission notice appear in all copies.
  8 *
  9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 16 */
 17
 18#include "core.h"
 19#include "hif.h"
 20#include "debug.h"
 21
 22/********/
 23/* Send */
 24/********/
 25
 26static void ath10k_htc_control_tx_complete(struct ath10k *ar,
 27					   struct sk_buff *skb)
 28{
 29	kfree_skb(skb);
 30}
 31
 32static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar)
 33{
 34	struct sk_buff *skb;
 35	struct ath10k_skb_cb *skb_cb;
 36
 37	skb = dev_alloc_skb(ATH10K_HTC_CONTROL_BUFFER_SIZE);
 38	if (!skb)
 39		return NULL;
 40
 41	skb_reserve(skb, 20); /* FIXME: why 20 bytes? */
 42	WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
 43
 44	skb_cb = ATH10K_SKB_CB(skb);
 45	memset(skb_cb, 0, sizeof(*skb_cb));
 46
 47	ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %p\n", __func__, skb);
 48	return skb;
 49}
 50
 51static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc,
 52					     struct sk_buff *skb)
 53{
 54	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
 55
 56	dma_unmap_single(htc->ar->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
 
 57	skb_pull(skb, sizeof(struct ath10k_htc_hdr));
 58}
 59
 60static void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
 61					    struct sk_buff *skb)
 62{
 63	struct ath10k *ar = ep->htc->ar;
 
 64
 65	ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__,
 66		   ep->eid, skb);
 67
 
 
 
 
 
 
 
 
 
 
 68	ath10k_htc_restore_tx_skb(ep->htc, skb);
 69
 70	if (!ep->ep_ops.ep_tx_complete) {
 71		ath10k_warn(ar, "no tx handler for eid %d\n", ep->eid);
 72		dev_kfree_skb_any(skb);
 73		return;
 74	}
 75
 
 
 
 
 
 76	ep->ep_ops.ep_tx_complete(ep->htc->ar, skb);
 77}
 
 78
 79static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
 80				      struct sk_buff *skb)
 81{
 82	struct ath10k_htc_hdr *hdr;
 83
 84	hdr = (struct ath10k_htc_hdr *)skb->data;
 
 85
 86	hdr->eid = ep->eid;
 87	hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
 88	hdr->flags = 0;
 89	hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE;
 
 90
 91	spin_lock_bh(&ep->htc->tx_lock);
 92	hdr->seq_no = ep->seq_no++;
 93	spin_unlock_bh(&ep->htc->tx_lock);
 94}
 95
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 96int ath10k_htc_send(struct ath10k_htc *htc,
 97		    enum ath10k_htc_ep_id eid,
 98		    struct sk_buff *skb)
 99{
100	struct ath10k *ar = htc->ar;
101	struct ath10k_htc_ep *ep = &htc->endpoint[eid];
102	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
103	struct ath10k_hif_sg_item sg_item;
104	struct device *dev = htc->ar->dev;
105	int credits = 0;
106	int ret;
 
107
108	if (htc->ar->state == ATH10K_STATE_WEDGED)
109		return -ECOMM;
110
111	if (eid >= ATH10K_HTC_EP_COUNT) {
112		ath10k_warn(ar, "Invalid endpoint id: %d\n", eid);
113		return -ENOENT;
114	}
115
116	skb_push(skb, sizeof(struct ath10k_htc_hdr));
117
118	if (ep->tx_credit_flow_enabled) {
119		credits = DIV_ROUND_UP(skb->len, htc->target_credit_size);
120		spin_lock_bh(&htc->tx_lock);
121		if (ep->tx_credits < credits) {
122			spin_unlock_bh(&htc->tx_lock);
123			ret = -EAGAIN;
124			goto err_pull;
125		}
126		ep->tx_credits -= credits;
127		ath10k_dbg(ar, ATH10K_DBG_HTC,
128			   "htc ep %d consumed %d credits (total %d)\n",
129			   eid, credits, ep->tx_credits);
130		spin_unlock_bh(&htc->tx_lock);
131	}
132
133	ath10k_htc_prepare_tx_skb(ep, skb);
134
135	skb_cb->eid = eid;
136	skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
137	ret = dma_mapping_error(dev, skb_cb->paddr);
138	if (ret) {
139		ret = -EIO;
140		goto err_credits;
 
 
 
141	}
142
143	sg_item.transfer_id = ep->eid;
144	sg_item.transfer_context = skb;
145	sg_item.vaddr = skb->data;
146	sg_item.paddr = skb_cb->paddr;
147	sg_item.len = skb->len;
148
149	ret = ath10k_hif_tx_sg(htc->ar, ep->ul_pipe_id, &sg_item, 1);
150	if (ret)
151		goto err_unmap;
152
153	return 0;
154
155err_unmap:
156	dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
 
157err_credits:
158	if (ep->tx_credit_flow_enabled) {
159		spin_lock_bh(&htc->tx_lock);
160		ep->tx_credits += credits;
161		ath10k_dbg(ar, ATH10K_DBG_HTC,
162			   "htc ep %d reverted %d credits back (total %d)\n",
163			   eid, credits, ep->tx_credits);
164		spin_unlock_bh(&htc->tx_lock);
165
166		if (ep->ep_ops.ep_tx_credits)
167			ep->ep_ops.ep_tx_credits(htc->ar);
168	}
169err_pull:
170	skb_pull(skb, sizeof(struct ath10k_htc_hdr));
171	return ret;
172}
173
174void ath10k_htc_tx_completion_handler(struct ath10k *ar, struct sk_buff *skb)
175{
176	struct ath10k_htc *htc = &ar->htc;
177	struct ath10k_skb_cb *skb_cb;
178	struct ath10k_htc_ep *ep;
179
180	if (WARN_ON_ONCE(!skb))
181		return;
182
183	skb_cb = ATH10K_SKB_CB(skb);
184	ep = &htc->endpoint[skb_cb->eid];
185
186	ath10k_htc_notify_tx_completion(ep, skb);
187	/* the skb now belongs to the completion handler */
188}
189EXPORT_SYMBOL(ath10k_htc_tx_completion_handler);
190
191/***********/
192/* Receive */
193/***********/
194
195static void
196ath10k_htc_process_credit_report(struct ath10k_htc *htc,
197				 const struct ath10k_htc_credit_report *report,
198				 int len,
199				 enum ath10k_htc_ep_id eid)
200{
201	struct ath10k *ar = htc->ar;
202	struct ath10k_htc_ep *ep;
203	int i, n_reports;
204
205	if (len % sizeof(*report))
206		ath10k_warn(ar, "Uneven credit report len %d", len);
207
208	n_reports = len / sizeof(*report);
209
210	spin_lock_bh(&htc->tx_lock);
211	for (i = 0; i < n_reports; i++, report++) {
212		if (report->eid >= ATH10K_HTC_EP_COUNT)
213			break;
214
215		ep = &htc->endpoint[report->eid];
216		ep->tx_credits += report->credits;
217
218		ath10k_dbg(ar, ATH10K_DBG_HTC, "htc ep %d got %d credits (total %d)\n",
219			   report->eid, report->credits, ep->tx_credits);
220
221		if (ep->ep_ops.ep_tx_credits) {
222			spin_unlock_bh(&htc->tx_lock);
223			ep->ep_ops.ep_tx_credits(htc->ar);
224			spin_lock_bh(&htc->tx_lock);
225		}
226	}
227	spin_unlock_bh(&htc->tx_lock);
228}
229
230static int ath10k_htc_process_trailer(struct ath10k_htc *htc,
231				      u8 *buffer,
232				      int length,
233				      enum ath10k_htc_ep_id src_eid)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
234{
 
235	struct ath10k *ar = htc->ar;
236	int status = 0;
237	struct ath10k_htc_record *record;
238	u8 *orig_buffer;
239	int orig_length;
240	size_t len;
241
242	orig_buffer = buffer;
243	orig_length = length;
244
245	while (length > 0) {
246		record = (struct ath10k_htc_record *)buffer;
247
248		if (length < sizeof(record->hdr)) {
249			status = -EINVAL;
250			break;
251		}
252
253		if (record->hdr.len > length) {
254			/* no room left in buffer for record */
255			ath10k_warn(ar, "Invalid record length: %d\n",
256				    record->hdr.len);
257			status = -EINVAL;
258			break;
259		}
260
261		switch (record->hdr.id) {
262		case ATH10K_HTC_RECORD_CREDITS:
263			len = sizeof(struct ath10k_htc_credit_report);
264			if (record->hdr.len < len) {
265				ath10k_warn(ar, "Credit report too long\n");
266				status = -EINVAL;
267				break;
268			}
269			ath10k_htc_process_credit_report(htc,
270							 record->credit_report,
271							 record->hdr.len,
272							 src_eid);
273			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
274		default:
275			ath10k_warn(ar, "Unhandled record: id:%d length:%d\n",
276				    record->hdr.id, record->hdr.len);
277			break;
278		}
279
280		if (status)
281			break;
282
283		/* multiple records may be present in a trailer */
284		buffer += sizeof(record->hdr) + record->hdr.len;
285		length -= sizeof(record->hdr) + record->hdr.len;
286	}
287
288	if (status)
289		ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc rx bad trailer", "",
290				orig_buffer, orig_length);
291
292	return status;
293}
 
294
295void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb)
296{
297	int status = 0;
298	struct ath10k_htc *htc = &ar->htc;
299	struct ath10k_htc_hdr *hdr;
300	struct ath10k_htc_ep *ep;
301	u16 payload_len;
302	u32 trailer_len = 0;
303	size_t min_len;
304	u8 eid;
305	bool trailer_present;
306
307	hdr = (struct ath10k_htc_hdr *)skb->data;
308	skb_pull(skb, sizeof(*hdr));
309
310	eid = hdr->eid;
311
312	if (eid >= ATH10K_HTC_EP_COUNT) {
313		ath10k_warn(ar, "HTC Rx: invalid eid %d\n", eid);
314		ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad header", "",
315				hdr, sizeof(*hdr));
316		goto out;
317	}
318
319	ep = &htc->endpoint[eid];
 
 
 
 
320
321	payload_len = __le16_to_cpu(hdr->len);
322
323	if (payload_len + sizeof(*hdr) > ATH10K_HTC_MAX_LEN) {
324		ath10k_warn(ar, "HTC rx frame too long, len: %zu\n",
325			    payload_len + sizeof(*hdr));
326		ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len", "",
327				hdr, sizeof(*hdr));
328		goto out;
329	}
330
331	if (skb->len < payload_len) {
332		ath10k_dbg(ar, ATH10K_DBG_HTC,
333			   "HTC Rx: insufficient length, got %d, expected %d\n",
334			   skb->len, payload_len);
335		ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len",
336				"", hdr, sizeof(*hdr));
337		goto out;
338	}
339
340	/* get flags to check for trailer */
341	trailer_present = hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT;
342	if (trailer_present) {
343		u8 *trailer;
344
345		trailer_len = hdr->trailer_len;
346		min_len = sizeof(struct ath10k_ath10k_htc_record_hdr);
347
348		if ((trailer_len < min_len) ||
349		    (trailer_len > payload_len)) {
350			ath10k_warn(ar, "Invalid trailer length: %d\n",
351				    trailer_len);
352			goto out;
353		}
354
355		trailer = (u8 *)hdr;
356		trailer += sizeof(*hdr);
357		trailer += payload_len;
358		trailer -= trailer_len;
359		status = ath10k_htc_process_trailer(htc, trailer,
360						    trailer_len, hdr->eid);
 
361		if (status)
362			goto out;
363
364		skb_trim(skb, skb->len - trailer_len);
365	}
366
367	if (((int)payload_len - (int)trailer_len) <= 0)
368		/* zero length packet with trailer data, just drop these */
369		goto out;
370
371	if (eid == ATH10K_HTC_EP_0) {
372		struct ath10k_htc_msg *msg = (struct ath10k_htc_msg *)skb->data;
373
374		switch (__le16_to_cpu(msg->hdr.message_id)) {
375		case ATH10K_HTC_MSG_READY_ID:
376		case ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID:
377			/* handle HTC control message */
378			if (completion_done(&htc->ctl_resp)) {
379				/*
380				 * this is a fatal error, target should not be
381				 * sending unsolicited messages on the ep 0
382				 */
383				ath10k_warn(ar, "HTC rx ctrl still processing\n");
384				complete(&htc->ctl_resp);
385				goto out;
386			}
387
388			htc->control_resp_len =
389				min_t(int, skb->len,
390				      ATH10K_HTC_MAX_CTRL_MSG_LEN);
391
392			memcpy(htc->control_resp_buffer, skb->data,
393			       htc->control_resp_len);
394
395			complete(&htc->ctl_resp);
396			break;
397		case ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE:
398			htc->htc_ops.target_send_suspend_complete(ar);
399			break;
400		default:
401			ath10k_warn(ar, "ignoring unsolicited htc ep0 event\n");
402			break;
403		}
404		goto out;
405	}
406
407	ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %p\n",
408		   eid, skb);
409	ep->ep_ops.ep_rx_complete(ar, skb);
410
411	/* skb is now owned by the rx completion handler */
412	skb = NULL;
413out:
414	kfree_skb(skb);
415}
416EXPORT_SYMBOL(ath10k_htc_rx_completion_handler);
417
418static void ath10k_htc_control_rx_complete(struct ath10k *ar,
419					   struct sk_buff *skb)
420{
421	/* This is unexpected. FW is not supposed to send regular rx on this
422	 * endpoint. */
423	ath10k_warn(ar, "unexpected htc rx\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
424	kfree_skb(skb);
425}
426
427/***************/
428/* Init/Deinit */
429/***************/
430
431static const char *htc_service_name(enum ath10k_htc_svc_id id)
432{
433	switch (id) {
434	case ATH10K_HTC_SVC_ID_RESERVED:
435		return "Reserved";
436	case ATH10K_HTC_SVC_ID_RSVD_CTRL:
437		return "Control";
438	case ATH10K_HTC_SVC_ID_WMI_CONTROL:
439		return "WMI";
440	case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
441		return "DATA BE";
442	case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
443		return "DATA BK";
444	case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
445		return "DATA VI";
446	case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
447		return "DATA VO";
448	case ATH10K_HTC_SVC_ID_NMI_CONTROL:
449		return "NMI Control";
450	case ATH10K_HTC_SVC_ID_NMI_DATA:
451		return "NMI Data";
452	case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
453		return "HTT Data";
 
 
 
 
454	case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
455		return "RAW";
 
 
456	}
457
458	return "Unknown";
459}
460
461static void ath10k_htc_reset_endpoint_states(struct ath10k_htc *htc)
462{
463	struct ath10k_htc_ep *ep;
464	int i;
465
466	for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) {
467		ep = &htc->endpoint[i];
468		ep->service_id = ATH10K_HTC_SVC_ID_UNUSED;
469		ep->max_ep_message_len = 0;
470		ep->max_tx_queue_depth = 0;
471		ep->eid = i;
472		ep->htc = htc;
473		ep->tx_credit_flow_enabled = true;
474	}
475}
476
477static void ath10k_htc_setup_target_buffer_assignments(struct ath10k_htc *htc)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
478{
479	struct ath10k_htc_svc_tx_credits *entry;
 
 
 
 
 
 
 
 
 
480
481	entry = &htc->service_tx_alloc[0];
 
482
483	/*
484	 * for PCIE allocate all credists/HTC buffers to WMI.
485	 * no buffers are used/required for data. data always
486	 * remains on host.
487	 */
488	entry++;
489	entry->service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
490	entry->credit_allocation = htc->total_transmit_credits;
 
 
 
491}
492
493static u8 ath10k_htc_get_credit_allocation(struct ath10k_htc *htc,
494					   u16 service_id)
495{
496	u8 allocation = 0;
 
 
 
497	int i;
498
499	for (i = 0; i < ATH10K_HTC_EP_COUNT; i++) {
500		if (htc->service_tx_alloc[i].service_id == service_id)
501			allocation =
502			    htc->service_tx_alloc[i].credit_allocation;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
503	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
504
505	return allocation;
 
 
 
 
 
 
 
506}
507
508int ath10k_htc_wait_target(struct ath10k_htc *htc)
509{
510	struct ath10k *ar = htc->ar;
511	int i, status = 0;
512	unsigned long time_left;
513	struct ath10k_htc_svc_conn_req conn_req;
514	struct ath10k_htc_svc_conn_resp conn_resp;
515	struct ath10k_htc_msg *msg;
516	u16 message_id;
517	u16 credit_count;
518	u16 credit_size;
519
520	time_left = wait_for_completion_timeout(&htc->ctl_resp,
521						ATH10K_HTC_WAIT_TIMEOUT_HZ);
522	if (!time_left) {
523		/* Workaround: In some cases the PCI HIF doesn't
524		 * receive interrupt for the control response message
525		 * even if the buffer was completed. It is suspected
526		 * iomap writes unmasking PCI CE irqs aren't propagated
527		 * properly in KVM PCI-passthrough sometimes.
528		 */
529		ath10k_warn(ar, "failed to receive control response completion, polling..\n");
530
531		for (i = 0; i < CE_COUNT; i++)
532			ath10k_hif_send_complete_check(htc->ar, i, 1);
533
534		time_left =
535		wait_for_completion_timeout(&htc->ctl_resp,
536					    ATH10K_HTC_WAIT_TIMEOUT_HZ);
537
538		if (!time_left)
539			status = -ETIMEDOUT;
540	}
541
542	if (status < 0) {
543		ath10k_err(ar, "ctl_resp never came in (%d)\n", status);
544		return status;
545	}
546
547	if (htc->control_resp_len < sizeof(msg->hdr) + sizeof(msg->ready)) {
548		ath10k_err(ar, "Invalid HTC ready msg len:%d\n",
549			   htc->control_resp_len);
550		return -ECOMM;
551	}
552
553	msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
554	message_id   = __le16_to_cpu(msg->hdr.message_id);
555	credit_count = __le16_to_cpu(msg->ready.credit_count);
556	credit_size  = __le16_to_cpu(msg->ready.credit_size);
557
558	if (message_id != ATH10K_HTC_MSG_READY_ID) {
559		ath10k_err(ar, "Invalid HTC ready msg: 0x%x\n", message_id);
560		return -ECOMM;
561	}
562
563	htc->total_transmit_credits = credit_count;
564	htc->target_credit_size = credit_size;
 
 
 
 
565
566	ath10k_dbg(ar, ATH10K_DBG_HTC,
567		   "Target ready! transmit resources: %d size:%d\n",
568		   htc->total_transmit_credits,
569		   htc->target_credit_size);
 
570
571	if ((htc->total_transmit_credits == 0) ||
572	    (htc->target_credit_size == 0)) {
573		ath10k_err(ar, "Invalid credit size received\n");
574		return -ECOMM;
575	}
576
577	ath10k_htc_setup_target_buffer_assignments(htc);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
578
579	/* setup our pseudo HTC control endpoint connection */
580	memset(&conn_req, 0, sizeof(conn_req));
581	memset(&conn_resp, 0, sizeof(conn_resp));
582	conn_req.ep_ops.ep_tx_complete = ath10k_htc_control_tx_complete;
583	conn_req.ep_ops.ep_rx_complete = ath10k_htc_control_rx_complete;
584	conn_req.max_send_queue_depth = ATH10K_NUM_CONTROL_TX_BUFFERS;
585	conn_req.service_id = ATH10K_HTC_SVC_ID_RSVD_CTRL;
586
587	/* connect fake service */
588	status = ath10k_htc_connect_service(htc, &conn_req, &conn_resp);
589	if (status) {
590		ath10k_err(ar, "could not connect to htc service (%d)\n",
591			   status);
592		return status;
593	}
594
595	return 0;
596}
597
598int ath10k_htc_connect_service(struct ath10k_htc *htc,
599			       struct ath10k_htc_svc_conn_req *conn_req,
600			       struct ath10k_htc_svc_conn_resp *conn_resp)
601{
602	struct ath10k *ar = htc->ar;
603	struct ath10k_htc_msg *msg;
604	struct ath10k_htc_conn_svc *req_msg;
605	struct ath10k_htc_conn_svc_response resp_msg_dummy;
606	struct ath10k_htc_conn_svc_response *resp_msg = &resp_msg_dummy;
607	enum ath10k_htc_ep_id assigned_eid = ATH10K_HTC_EP_COUNT;
608	struct ath10k_htc_ep *ep;
609	struct sk_buff *skb;
610	unsigned int max_msg_size = 0;
611	int length, status;
612	unsigned long time_left;
613	bool disable_credit_flow_ctrl = false;
614	u16 message_id, service_id, flags = 0;
615	u8 tx_alloc = 0;
616
617	/* special case for HTC pseudo control service */
618	if (conn_req->service_id == ATH10K_HTC_SVC_ID_RSVD_CTRL) {
619		disable_credit_flow_ctrl = true;
620		assigned_eid = ATH10K_HTC_EP_0;
621		max_msg_size = ATH10K_HTC_MAX_CTRL_MSG_LEN;
622		memset(&resp_msg_dummy, 0, sizeof(resp_msg_dummy));
623		goto setup;
624	}
625
626	tx_alloc = ath10k_htc_get_credit_allocation(htc,
627						    conn_req->service_id);
628	if (!tx_alloc)
629		ath10k_dbg(ar, ATH10K_DBG_BOOT,
630			   "boot htc service %s does not allocate target credits\n",
631			   htc_service_name(conn_req->service_id));
632
633	skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
634	if (!skb) {
635		ath10k_err(ar, "Failed to allocate HTC packet\n");
636		return -ENOMEM;
637	}
638
639	length = sizeof(msg->hdr) + sizeof(msg->connect_service);
640	skb_put(skb, length);
641	memset(skb->data, 0, length);
642
643	msg = (struct ath10k_htc_msg *)skb->data;
644	msg->hdr.message_id =
645		__cpu_to_le16(ATH10K_HTC_MSG_CONNECT_SERVICE_ID);
646
647	flags |= SM(tx_alloc, ATH10K_HTC_CONN_FLAGS_RECV_ALLOC);
648
649	/* Only enable credit flow control for WMI ctrl service */
650	if (conn_req->service_id != ATH10K_HTC_SVC_ID_WMI_CONTROL) {
651		flags |= ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
652		disable_credit_flow_ctrl = true;
653	}
654
655	req_msg = &msg->connect_service;
656	req_msg->flags = __cpu_to_le16(flags);
657	req_msg->service_id = __cpu_to_le16(conn_req->service_id);
658
659	reinit_completion(&htc->ctl_resp);
660
661	status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
662	if (status) {
663		kfree_skb(skb);
664		return status;
665	}
666
667	/* wait for response */
668	time_left = wait_for_completion_timeout(&htc->ctl_resp,
669						ATH10K_HTC_CONN_SVC_TIMEOUT_HZ);
670	if (!time_left) {
671		ath10k_err(ar, "Service connect timeout\n");
672		return -ETIMEDOUT;
673	}
674
675	/* we controlled the buffer creation, it's aligned */
676	msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
677	resp_msg = &msg->connect_service_response;
678	message_id = __le16_to_cpu(msg->hdr.message_id);
679	service_id = __le16_to_cpu(resp_msg->service_id);
680
681	if ((message_id != ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID) ||
682	    (htc->control_resp_len < sizeof(msg->hdr) +
683	     sizeof(msg->connect_service_response))) {
684		ath10k_err(ar, "Invalid resp message ID 0x%x", message_id);
685		return -EPROTO;
686	}
687
688	ath10k_dbg(ar, ATH10K_DBG_HTC,
689		   "HTC Service %s connect response: status: 0x%x, assigned ep: 0x%x\n",
690		   htc_service_name(service_id),
691		   resp_msg->status, resp_msg->eid);
692
693	conn_resp->connect_resp_code = resp_msg->status;
694
695	/* check response status */
696	if (resp_msg->status != ATH10K_HTC_CONN_SVC_STATUS_SUCCESS) {
697		ath10k_err(ar, "HTC Service %s connect request failed: 0x%x)\n",
698			   htc_service_name(service_id),
699			   resp_msg->status);
700		return -EPROTO;
701	}
702
703	assigned_eid = (enum ath10k_htc_ep_id)resp_msg->eid;
704	max_msg_size = __le16_to_cpu(resp_msg->max_msg_size);
705
706setup:
707
708	if (assigned_eid >= ATH10K_HTC_EP_COUNT)
709		return -EPROTO;
710
711	if (max_msg_size == 0)
712		return -EPROTO;
713
714	ep = &htc->endpoint[assigned_eid];
715	ep->eid = assigned_eid;
716
717	if (ep->service_id != ATH10K_HTC_SVC_ID_UNUSED)
718		return -EPROTO;
719
720	/* return assigned endpoint to caller */
721	conn_resp->eid = assigned_eid;
722	conn_resp->max_msg_len = __le16_to_cpu(resp_msg->max_msg_size);
723
724	/* setup the endpoint */
725	ep->service_id = conn_req->service_id;
726	ep->max_tx_queue_depth = conn_req->max_send_queue_depth;
727	ep->max_ep_message_len = __le16_to_cpu(resp_msg->max_msg_size);
728	ep->tx_credits = tx_alloc;
729	ep->tx_credit_size = htc->target_credit_size;
730	ep->tx_credits_per_max_message = ep->max_ep_message_len /
731					 htc->target_credit_size;
732
733	if (ep->max_ep_message_len % htc->target_credit_size)
734		ep->tx_credits_per_max_message++;
 
735
736	/* copy all the callbacks */
737	ep->ep_ops = conn_req->ep_ops;
738
739	status = ath10k_hif_map_service_to_pipe(htc->ar,
740						ep->service_id,
741						&ep->ul_pipe_id,
742						&ep->dl_pipe_id);
743	if (status)
 
 
744		return status;
 
745
746	ath10k_dbg(ar, ATH10K_DBG_BOOT,
747		   "boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
748		   htc_service_name(ep->service_id), ep->ul_pipe_id,
749		   ep->dl_pipe_id, ep->eid);
750
751	if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
752		ep->tx_credit_flow_enabled = false;
753		ath10k_dbg(ar, ATH10K_DBG_BOOT,
754			   "boot htc service '%s' eid %d TX flow control disabled\n",
755			   htc_service_name(ep->service_id), assigned_eid);
756	}
757
758	return status;
759}
760
761struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size)
762{
763	struct sk_buff *skb;
764
765	skb = dev_alloc_skb(size + sizeof(struct ath10k_htc_hdr));
766	if (!skb)
767		return NULL;
768
769	skb_reserve(skb, sizeof(struct ath10k_htc_hdr));
770
771	/* FW/HTC requires 4-byte aligned streams */
772	if (!IS_ALIGNED((unsigned long)skb->data, 4))
773		ath10k_warn(ar, "Unaligned HTC tx skb\n");
774
775	return skb;
776}
777
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
778int ath10k_htc_start(struct ath10k_htc *htc)
779{
780	struct ath10k *ar = htc->ar;
781	struct sk_buff *skb;
782	int status = 0;
783	struct ath10k_htc_msg *msg;
784
785	skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
786	if (!skb)
787		return -ENOMEM;
788
789	skb_put(skb, sizeof(msg->hdr) + sizeof(msg->setup_complete_ext));
790	memset(skb->data, 0, skb->len);
791
792	msg = (struct ath10k_htc_msg *)skb->data;
793	msg->hdr.message_id =
794		__cpu_to_le16(ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID);
795
 
 
 
 
 
 
 
796	ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC is using TX credit flow control\n");
797
798	status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
799	if (status) {
800		kfree_skb(skb);
801		return status;
802	}
803
 
 
 
 
 
 
 
 
804	return 0;
805}
806
807/* registered target arrival callback from the HIF layer */
808int ath10k_htc_init(struct ath10k *ar)
809{
810	struct ath10k_htc_ep *ep = NULL;
811	struct ath10k_htc *htc = &ar->htc;
 
 
812
813	spin_lock_init(&htc->tx_lock);
814
815	ath10k_htc_reset_endpoint_states(htc);
816
817	htc->ar = ar;
818
819	/* Get HIF default pipe for HTC message exchange */
820	ep = &htc->endpoint[ATH10K_HTC_EP_0];
 
 
 
 
 
821
822	ath10k_hif_get_default_pipe(ar, &ep->ul_pipe_id, &ep->dl_pipe_id);
 
 
 
 
 
 
823
824	init_completion(&htc->ctl_resp);
825
826	return 0;
827}