Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: ISC
   2/*
   3 * Copyright (c) 2005-2011 Atheros Communications Inc.
   4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
   5 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
   6 */
   7
   8#include "core.h"
   9#include "hif.h"
  10#include "debug.h"
  11
  12/********/
  13/* Send */
  14/********/
  15
  16static void ath10k_htc_control_tx_complete(struct ath10k *ar,
  17					   struct sk_buff *skb)
  18{
  19	kfree_skb(skb);
  20}
  21
  22static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar)
  23{
  24	struct sk_buff *skb;
  25	struct ath10k_skb_cb *skb_cb;
  26
  27	skb = dev_alloc_skb(ATH10K_HTC_CONTROL_BUFFER_SIZE);
  28	if (!skb)
  29		return NULL;
  30
  31	skb_reserve(skb, 20); /* FIXME: why 20 bytes? */
  32	WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
  33
  34	skb_cb = ATH10K_SKB_CB(skb);
  35	memset(skb_cb, 0, sizeof(*skb_cb));
  36
  37	ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %pK\n", __func__, skb);
  38	return skb;
  39}
  40
  41static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc,
  42					     struct sk_buff *skb)
  43{
  44	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
  45
  46	if (htc->ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL)
  47		dma_unmap_single(htc->ar->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
  48	skb_pull(skb, sizeof(struct ath10k_htc_hdr));
  49}
  50
  51void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
  52				     struct sk_buff *skb)
  53{
  54	struct ath10k *ar = ep->htc->ar;
  55	struct ath10k_htc_hdr *hdr;
  56
  57	ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %pK\n", __func__,
  58		   ep->eid, skb);
  59
  60	/* A corner case where the copy completion is reaching to host but still
  61	 * copy engine is processing it due to which host unmaps corresponding
  62	 * memory and causes SMMU fault, hence as workaround adding delay
  63	 * the unmapping memory to avoid SMMU faults.
  64	 */
  65	if (ar->hw_params.delay_unmap_buffer &&
  66	    ep->ul_pipe_id == 3)
  67		mdelay(2);
  68
  69	hdr = (struct ath10k_htc_hdr *)skb->data;
  70	ath10k_htc_restore_tx_skb(ep->htc, skb);
  71
  72	if (!ep->ep_ops.ep_tx_complete) {
  73		ath10k_warn(ar, "no tx handler for eid %d\n", ep->eid);
  74		dev_kfree_skb_any(skb);
  75		return;
  76	}
  77
  78	if (hdr->flags & ATH10K_HTC_FLAG_SEND_BUNDLE) {
  79		dev_kfree_skb_any(skb);
  80		return;
  81	}
  82
  83	ep->ep_ops.ep_tx_complete(ep->htc->ar, skb);
  84}
  85EXPORT_SYMBOL(ath10k_htc_notify_tx_completion);
  86
  87static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
  88				      struct sk_buff *skb)
  89{
  90	struct ath10k_htc_hdr *hdr;
  91
  92	hdr = (struct ath10k_htc_hdr *)skb->data;
  93	memset(hdr, 0, sizeof(struct ath10k_htc_hdr));
  94
  95	hdr->eid = ep->eid;
  96	hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
  97	hdr->flags = 0;
  98	if (ep->tx_credit_flow_enabled && !ep->bundle_tx)
  99		hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE;
 100
 101	spin_lock_bh(&ep->htc->tx_lock);
 102	hdr->seq_no = ep->seq_no++;
 103	spin_unlock_bh(&ep->htc->tx_lock);
 104}
 105
 106static int ath10k_htc_consume_credit(struct ath10k_htc_ep *ep,
 107				     unsigned int len,
 108				     bool consume)
 109{
 110	struct ath10k_htc *htc = ep->htc;
 111	struct ath10k *ar = htc->ar;
 112	enum ath10k_htc_ep_id eid = ep->eid;
 113	int credits, ret = 0;
 114
 115	if (!ep->tx_credit_flow_enabled)
 116		return 0;
 117
 118	credits = DIV_ROUND_UP(len, ep->tx_credit_size);
 119	spin_lock_bh(&htc->tx_lock);
 120
 121	if (ep->tx_credits < credits) {
 122		ath10k_dbg(ar, ATH10K_DBG_HTC,
 123			   "htc insufficient credits ep %d required %d available %d consume %d\n",
 124			   eid, credits, ep->tx_credits, consume);
 125		ret = -EAGAIN;
 126		goto unlock;
 127	}
 128
 129	if (consume) {
 130		ep->tx_credits -= credits;
 131		ath10k_dbg(ar, ATH10K_DBG_HTC,
 132			   "htc ep %d consumed %d credits total %d\n",
 133			   eid, credits, ep->tx_credits);
 134	}
 135
 136unlock:
 137	spin_unlock_bh(&htc->tx_lock);
 138	return ret;
 139}
 140
 141static void ath10k_htc_release_credit(struct ath10k_htc_ep *ep, unsigned int len)
 142{
 143	struct ath10k_htc *htc = ep->htc;
 144	struct ath10k *ar = htc->ar;
 145	enum ath10k_htc_ep_id eid = ep->eid;
 146	int credits;
 147
 148	if (!ep->tx_credit_flow_enabled)
 149		return;
 150
 151	credits = DIV_ROUND_UP(len, ep->tx_credit_size);
 152	spin_lock_bh(&htc->tx_lock);
 153	ep->tx_credits += credits;
 154	ath10k_dbg(ar, ATH10K_DBG_HTC,
 155		   "htc ep %d reverted %d credits back total %d\n",
 156		   eid, credits, ep->tx_credits);
 157	spin_unlock_bh(&htc->tx_lock);
 158
 159	if (ep->ep_ops.ep_tx_credits)
 160		ep->ep_ops.ep_tx_credits(htc->ar);
 161}
 162
 163int ath10k_htc_send(struct ath10k_htc *htc,
 164		    enum ath10k_htc_ep_id eid,
 165		    struct sk_buff *skb)
 166{
 167	struct ath10k *ar = htc->ar;
 168	struct ath10k_htc_ep *ep = &htc->endpoint[eid];
 169	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
 170	struct ath10k_hif_sg_item sg_item;
 171	struct device *dev = htc->ar->dev;
 172	int ret;
 173	unsigned int skb_len;
 174
 175	if (htc->ar->state == ATH10K_STATE_WEDGED)
 176		return -ECOMM;
 177
 178	if (eid >= ATH10K_HTC_EP_COUNT) {
 179		ath10k_warn(ar, "Invalid endpoint id: %d\n", eid);
 180		return -ENOENT;
 181	}
 182
 183	skb_push(skb, sizeof(struct ath10k_htc_hdr));
 184
 185	skb_len = skb->len;
 186	ret = ath10k_htc_consume_credit(ep, skb_len, true);
 187	if (ret)
 188		goto err_pull;
 189
 190	ath10k_htc_prepare_tx_skb(ep, skb);
 191
 192	skb_cb->eid = eid;
 193	if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) {
 194		skb_cb->paddr = dma_map_single(dev, skb->data, skb->len,
 195					       DMA_TO_DEVICE);
 196		ret = dma_mapping_error(dev, skb_cb->paddr);
 197		if (ret) {
 198			ret = -EIO;
 199			goto err_credits;
 200		}
 201	}
 202
 203	sg_item.transfer_id = ep->eid;
 204	sg_item.transfer_context = skb;
 205	sg_item.vaddr = skb->data;
 206	sg_item.paddr = skb_cb->paddr;
 207	sg_item.len = skb->len;
 208
 209	ret = ath10k_hif_tx_sg(htc->ar, ep->ul_pipe_id, &sg_item, 1);
 210	if (ret)
 211		goto err_unmap;
 212
 213	return 0;
 214
 215err_unmap:
 216	if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL)
 217		dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
 218err_credits:
 219	ath10k_htc_release_credit(ep, skb_len);
 220err_pull:
 221	skb_pull(skb, sizeof(struct ath10k_htc_hdr));
 222	return ret;
 223}
 224
 225void ath10k_htc_tx_completion_handler(struct ath10k *ar, struct sk_buff *skb)
 226{
 227	struct ath10k_htc *htc = &ar->htc;
 228	struct ath10k_skb_cb *skb_cb;
 229	struct ath10k_htc_ep *ep;
 230
 231	if (WARN_ON_ONCE(!skb))
 232		return;
 233
 234	skb_cb = ATH10K_SKB_CB(skb);
 235	ep = &htc->endpoint[skb_cb->eid];
 236
 237	ath10k_htc_notify_tx_completion(ep, skb);
 238	/* the skb now belongs to the completion handler */
 239}
 240EXPORT_SYMBOL(ath10k_htc_tx_completion_handler);
 241
 242/***********/
 243/* Receive */
 244/***********/
 245
 246static void
 247ath10k_htc_process_credit_report(struct ath10k_htc *htc,
 248				 const struct ath10k_htc_credit_report *report,
 249				 int len,
 250				 enum ath10k_htc_ep_id eid)
 251{
 252	struct ath10k *ar = htc->ar;
 253	struct ath10k_htc_ep *ep;
 254	int i, n_reports;
 255
 256	if (len % sizeof(*report))
 257		ath10k_warn(ar, "Uneven credit report len %d", len);
 258
 259	n_reports = len / sizeof(*report);
 260
 261	spin_lock_bh(&htc->tx_lock);
 262	for (i = 0; i < n_reports; i++, report++) {
 263		if (report->eid >= ATH10K_HTC_EP_COUNT)
 264			break;
 265
 266		ep = &htc->endpoint[report->eid];
 267		ep->tx_credits += report->credits;
 268
 269		ath10k_dbg(ar, ATH10K_DBG_HTC, "htc ep %d got %d credits (total %d)\n",
 270			   report->eid, report->credits, ep->tx_credits);
 271
 272		if (ep->ep_ops.ep_tx_credits) {
 273			spin_unlock_bh(&htc->tx_lock);
 274			ep->ep_ops.ep_tx_credits(htc->ar);
 275			spin_lock_bh(&htc->tx_lock);
 276		}
 277	}
 278	spin_unlock_bh(&htc->tx_lock);
 279}
 280
 281static int
 282ath10k_htc_process_lookahead(struct ath10k_htc *htc,
 283			     const struct ath10k_htc_lookahead_report *report,
 284			     int len,
 285			     enum ath10k_htc_ep_id eid,
 286			     void *next_lookaheads,
 287			     int *next_lookaheads_len)
 288{
 289	struct ath10k *ar = htc->ar;
 290
 291	/* Invalid lookahead flags are actually transmitted by
 292	 * the target in the HTC control message.
 293	 * Since this will happen at every boot we silently ignore
 294	 * the lookahead in this case
 295	 */
 296	if (report->pre_valid != ((~report->post_valid) & 0xFF))
 297		return 0;
 298
 299	if (next_lookaheads && next_lookaheads_len) {
 300		ath10k_dbg(ar, ATH10K_DBG_HTC,
 301			   "htc rx lookahead found pre_valid 0x%x post_valid 0x%x\n",
 302			   report->pre_valid, report->post_valid);
 303
 304		/* look ahead bytes are valid, copy them over */
 305		memcpy((u8 *)next_lookaheads, report->lookahead, 4);
 306
 307		*next_lookaheads_len = 1;
 308	}
 309
 310	return 0;
 311}
 312
 313static int
 314ath10k_htc_process_lookahead_bundle(struct ath10k_htc *htc,
 315				    const struct ath10k_htc_lookahead_bundle *report,
 316				    int len,
 317				    enum ath10k_htc_ep_id eid,
 318				    void *next_lookaheads,
 319				    int *next_lookaheads_len)
 320{
 321	struct ath10k *ar = htc->ar;
 322	int bundle_cnt = len / sizeof(*report);
 323
 324	if (!bundle_cnt || (bundle_cnt > htc->max_msgs_per_htc_bundle)) {
 325		ath10k_warn(ar, "Invalid lookahead bundle count: %d\n",
 326			    bundle_cnt);
 327		return -EINVAL;
 328	}
 329
 330	if (next_lookaheads && next_lookaheads_len) {
 331		int i;
 332
 333		for (i = 0; i < bundle_cnt; i++) {
 334			memcpy(((u8 *)next_lookaheads) + 4 * i,
 335			       report->lookahead, 4);
 336			report++;
 337		}
 338
 339		*next_lookaheads_len = bundle_cnt;
 340	}
 341
 342	return 0;
 343}
 344
 345int ath10k_htc_process_trailer(struct ath10k_htc *htc,
 346			       u8 *buffer,
 347			       int length,
 348			       enum ath10k_htc_ep_id src_eid,
 349			       void *next_lookaheads,
 350			       int *next_lookaheads_len)
 351{
 352	struct ath10k_htc_lookahead_bundle *bundle;
 353	struct ath10k *ar = htc->ar;
 354	int status = 0;
 355	struct ath10k_htc_record *record;
 356	u8 *orig_buffer;
 357	int orig_length;
 358	size_t len;
 359
 360	orig_buffer = buffer;
 361	orig_length = length;
 362
 363	while (length > 0) {
 364		record = (struct ath10k_htc_record *)buffer;
 365
 366		if (length < sizeof(record->hdr)) {
 367			status = -EINVAL;
 368			break;
 369		}
 370
 371		if (record->hdr.len > length) {
 372			/* no room left in buffer for record */
 373			ath10k_warn(ar, "Invalid record length: %d\n",
 374				    record->hdr.len);
 375			status = -EINVAL;
 376			break;
 377		}
 378
 379		switch (record->hdr.id) {
 380		case ATH10K_HTC_RECORD_CREDITS:
 381			len = sizeof(struct ath10k_htc_credit_report);
 382			if (record->hdr.len < len) {
 383				ath10k_warn(ar, "Credit report too long\n");
 384				status = -EINVAL;
 385				break;
 386			}
 387			ath10k_htc_process_credit_report(htc,
 388							 record->credit_report,
 389							 record->hdr.len,
 390							 src_eid);
 391			break;
 392		case ATH10K_HTC_RECORD_LOOKAHEAD:
 393			len = sizeof(struct ath10k_htc_lookahead_report);
 394			if (record->hdr.len < len) {
 395				ath10k_warn(ar, "Lookahead report too long\n");
 396				status = -EINVAL;
 397				break;
 398			}
 399			status = ath10k_htc_process_lookahead(htc,
 400							      record->lookahead_report,
 401							      record->hdr.len,
 402							      src_eid,
 403							      next_lookaheads,
 404							      next_lookaheads_len);
 405			break;
 406		case ATH10K_HTC_RECORD_LOOKAHEAD_BUNDLE:
 407			bundle = record->lookahead_bundle;
 408			status = ath10k_htc_process_lookahead_bundle(htc,
 409								     bundle,
 410								     record->hdr.len,
 411								     src_eid,
 412								     next_lookaheads,
 413								     next_lookaheads_len);
 414			break;
 415		default:
 416			ath10k_warn(ar, "Unhandled record: id:%d length:%d\n",
 417				    record->hdr.id, record->hdr.len);
 418			break;
 419		}
 420
 421		if (status)
 422			break;
 423
 424		/* multiple records may be present in a trailer */
 425		buffer += sizeof(record->hdr) + record->hdr.len;
 426		length -= sizeof(record->hdr) + record->hdr.len;
 427	}
 428
 429	if (status)
 430		ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc rx bad trailer", "",
 431				orig_buffer, orig_length);
 432
 433	return status;
 434}
 435EXPORT_SYMBOL(ath10k_htc_process_trailer);
 436
 437void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb)
 438{
 439	int status = 0;
 440	struct ath10k_htc *htc = &ar->htc;
 441	struct ath10k_htc_hdr *hdr;
 442	struct ath10k_htc_ep *ep;
 443	u16 payload_len;
 444	u32 trailer_len = 0;
 445	size_t min_len;
 446	u8 eid;
 447	bool trailer_present;
 448
 449	hdr = (struct ath10k_htc_hdr *)skb->data;
 450	skb_pull(skb, sizeof(*hdr));
 451
 452	eid = hdr->eid;
 453
 454	if (eid >= ATH10K_HTC_EP_COUNT) {
 455		ath10k_warn(ar, "HTC Rx: invalid eid %d\n", eid);
 456		ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad header", "",
 457				hdr, sizeof(*hdr));
 458		goto out;
 459	}
 460
 461	ep = &htc->endpoint[eid];
 462	if (ep->service_id == ATH10K_HTC_SVC_ID_UNUSED) {
 463		ath10k_warn(ar, "htc rx endpoint %d is not connected\n", eid);
 464		goto out;
 465	}
 466
 467	payload_len = __le16_to_cpu(hdr->len);
 468
 469	if (payload_len + sizeof(*hdr) > ATH10K_HTC_MAX_LEN) {
 470		ath10k_warn(ar, "HTC rx frame too long, len: %zu\n",
 471			    payload_len + sizeof(*hdr));
 472		ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len", "",
 473				hdr, sizeof(*hdr));
 474		goto out;
 475	}
 476
 477	if (skb->len < payload_len) {
 478		ath10k_dbg(ar, ATH10K_DBG_HTC,
 479			   "HTC Rx: insufficient length, got %d, expected %d\n",
 480			   skb->len, payload_len);
 481		ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len",
 482				"", hdr, sizeof(*hdr));
 483		goto out;
 484	}
 485
 486	/* get flags to check for trailer */
 487	trailer_present = hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT;
 488	if (trailer_present) {
 489		u8 *trailer;
 490
 491		trailer_len = hdr->trailer_len;
 492		min_len = sizeof(struct ath10k_ath10k_htc_record_hdr);
 493
 494		if ((trailer_len < min_len) ||
 495		    (trailer_len > payload_len)) {
 496			ath10k_warn(ar, "Invalid trailer length: %d\n",
 497				    trailer_len);
 498			goto out;
 499		}
 500
 501		trailer = (u8 *)hdr;
 502		trailer += sizeof(*hdr);
 503		trailer += payload_len;
 504		trailer -= trailer_len;
 505		status = ath10k_htc_process_trailer(htc, trailer,
 506						    trailer_len, hdr->eid,
 507						    NULL, NULL);
 508		if (status)
 509			goto out;
 510
 511		skb_trim(skb, skb->len - trailer_len);
 512	}
 513
 514	if (((int)payload_len - (int)trailer_len) <= 0)
 515		/* zero length packet with trailer data, just drop these */
 516		goto out;
 517
 518	ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %pK\n",
 519		   eid, skb);
 520	ep->ep_ops.ep_rx_complete(ar, skb);
 521
 522	/* skb is now owned by the rx completion handler */
 523	skb = NULL;
 524out:
 525	kfree_skb(skb);
 526}
 527EXPORT_SYMBOL(ath10k_htc_rx_completion_handler);
 528
 529static void ath10k_htc_control_rx_complete(struct ath10k *ar,
 530					   struct sk_buff *skb)
 531{
 532	struct ath10k_htc *htc = &ar->htc;
 533	struct ath10k_htc_msg *msg = (struct ath10k_htc_msg *)skb->data;
 534
 535	switch (__le16_to_cpu(msg->hdr.message_id)) {
 536	case ATH10K_HTC_MSG_READY_ID:
 537	case ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID:
 538		/* handle HTC control message */
 539		if (completion_done(&htc->ctl_resp)) {
 540			/* this is a fatal error, target should not be
 541			 * sending unsolicited messages on the ep 0
 542			 */
 543			ath10k_warn(ar, "HTC rx ctrl still processing\n");
 544			complete(&htc->ctl_resp);
 545			goto out;
 546		}
 547
 548		htc->control_resp_len =
 549			min_t(int, skb->len,
 550			      ATH10K_HTC_MAX_CTRL_MSG_LEN);
 551
 552		memcpy(htc->control_resp_buffer, skb->data,
 553		       htc->control_resp_len);
 554
 555		complete(&htc->ctl_resp);
 556		break;
 557	case ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE:
 558		htc->htc_ops.target_send_suspend_complete(ar);
 559		break;
 560	default:
 561		ath10k_warn(ar, "ignoring unsolicited htc ep0 event\n");
 562		break;
 563	}
 564
 565out:
 566	kfree_skb(skb);
 567}
 568
 569/***************/
 570/* Init/Deinit */
 571/***************/
 572
 573static const char *htc_service_name(enum ath10k_htc_svc_id id)
 574{
 575	switch (id) {
 576	case ATH10K_HTC_SVC_ID_RESERVED:
 577		return "Reserved";
 578	case ATH10K_HTC_SVC_ID_RSVD_CTRL:
 579		return "Control";
 580	case ATH10K_HTC_SVC_ID_WMI_CONTROL:
 581		return "WMI";
 582	case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
 583		return "DATA BE";
 584	case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
 585		return "DATA BK";
 586	case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
 587		return "DATA VI";
 588	case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
 589		return "DATA VO";
 590	case ATH10K_HTC_SVC_ID_NMI_CONTROL:
 591		return "NMI Control";
 592	case ATH10K_HTC_SVC_ID_NMI_DATA:
 593		return "NMI Data";
 594	case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
 595		return "HTT Data";
 596	case ATH10K_HTC_SVC_ID_HTT_DATA2_MSG:
 597		return "HTT Data";
 598	case ATH10K_HTC_SVC_ID_HTT_DATA3_MSG:
 599		return "HTT Data";
 600	case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
 601		return "RAW";
 602	case ATH10K_HTC_SVC_ID_HTT_LOG_MSG:
 603		return "PKTLOG";
 604	}
 605
 606	return "Unknown";
 607}
 608
 609static void ath10k_htc_reset_endpoint_states(struct ath10k_htc *htc)
 610{
 611	struct ath10k_htc_ep *ep;
 612	int i;
 613
 614	for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) {
 615		ep = &htc->endpoint[i];
 616		ep->service_id = ATH10K_HTC_SVC_ID_UNUSED;
 617		ep->max_ep_message_len = 0;
 618		ep->max_tx_queue_depth = 0;
 619		ep->eid = i;
 620		ep->htc = htc;
 621		ep->tx_credit_flow_enabled = true;
 622	}
 623}
 624
 625static u8 ath10k_htc_get_credit_allocation(struct ath10k_htc *htc,
 626					   u16 service_id)
 627{
 628	u8 allocation = 0;
 629
 630	/* The WMI control service is the only service with flow control.
 631	 * Let it have all transmit credits.
 632	 */
 633	if (service_id == ATH10K_HTC_SVC_ID_WMI_CONTROL)
 634		allocation = htc->total_transmit_credits;
 635
 636	return allocation;
 637}
 638
 639static int ath10k_htc_send_bundle(struct ath10k_htc_ep *ep,
 640				  struct sk_buff *bundle_skb,
 641				  struct sk_buff_head *tx_save_head)
 642{
 643	struct ath10k_hif_sg_item sg_item;
 644	struct ath10k_htc *htc = ep->htc;
 645	struct ath10k *ar = htc->ar;
 646	struct sk_buff *skb;
 647	int ret, cn = 0;
 648	unsigned int skb_len;
 649
 650	ath10k_dbg(ar, ATH10K_DBG_HTC, "bundle skb len %d\n", bundle_skb->len);
 651	skb_len = bundle_skb->len;
 652	ret = ath10k_htc_consume_credit(ep, skb_len, true);
 653
 654	if (!ret) {
 655		sg_item.transfer_id = ep->eid;
 656		sg_item.transfer_context = bundle_skb;
 657		sg_item.vaddr = bundle_skb->data;
 658		sg_item.len = bundle_skb->len;
 659
 660		ret = ath10k_hif_tx_sg(htc->ar, ep->ul_pipe_id, &sg_item, 1);
 661		if (ret)
 662			ath10k_htc_release_credit(ep, skb_len);
 663	}
 664
 665	if (ret)
 666		dev_kfree_skb_any(bundle_skb);
 667
 668	for (cn = 0; (skb = skb_dequeue_tail(tx_save_head)); cn++) {
 669		if (ret) {
 670			skb_pull(skb, sizeof(struct ath10k_htc_hdr));
 671			skb_queue_head(&ep->tx_req_head, skb);
 672		} else {
 673			skb_queue_tail(&ep->tx_complete_head, skb);
 674		}
 675	}
 676
 677	if (!ret)
 678		queue_work(ar->workqueue_tx_complete, &ar->tx_complete_work);
 679
 680	ath10k_dbg(ar, ATH10K_DBG_HTC,
 681		   "bundle tx status %d eid %d req count %d count %d len %d\n",
 682		   ret, ep->eid, skb_queue_len(&ep->tx_req_head), cn, skb_len);
 683	return ret;
 684}
 685
 686static void ath10k_htc_send_one_skb(struct ath10k_htc_ep *ep, struct sk_buff *skb)
 687{
 688	struct ath10k_htc *htc = ep->htc;
 689	struct ath10k *ar = htc->ar;
 690	int ret;
 691
 692	ret = ath10k_htc_send(htc, ep->eid, skb);
 693
 694	if (ret)
 695		skb_queue_head(&ep->tx_req_head, skb);
 696
 697	ath10k_dbg(ar, ATH10K_DBG_HTC, "tx one status %d eid %d len %d pending count %d\n",
 698		   ret, ep->eid, skb->len, skb_queue_len(&ep->tx_req_head));
 699}
 700
 701static int ath10k_htc_send_bundle_skbs(struct ath10k_htc_ep *ep)
 702{
 703	struct ath10k_htc *htc = ep->htc;
 704	struct sk_buff *bundle_skb, *skb;
 705	struct sk_buff_head tx_save_head;
 706	struct ath10k_htc_hdr *hdr;
 707	u8 *bundle_buf;
 708	int ret = 0, credit_pad, credit_remainder, trans_len, bundles_left = 0;
 709
 710	if (htc->ar->state == ATH10K_STATE_WEDGED)
 711		return -ECOMM;
 712
 713	if (ep->tx_credit_flow_enabled &&
 714	    ep->tx_credits < ATH10K_MIN_CREDIT_PER_HTC_TX_BUNDLE)
 715		return 0;
 716
 717	bundles_left = ATH10K_MAX_MSG_PER_HTC_TX_BUNDLE * ep->tx_credit_size;
 718	bundle_skb = dev_alloc_skb(bundles_left);
 719
 720	if (!bundle_skb)
 721		return -ENOMEM;
 722
 723	bundle_buf = bundle_skb->data;
 724	skb_queue_head_init(&tx_save_head);
 725
 726	while (true) {
 727		skb = skb_dequeue(&ep->tx_req_head);
 728		if (!skb)
 729			break;
 730
 731		credit_pad = 0;
 732		trans_len = skb->len + sizeof(*hdr);
 733		credit_remainder = trans_len % ep->tx_credit_size;
 734
 735		if (credit_remainder != 0) {
 736			credit_pad = ep->tx_credit_size - credit_remainder;
 737			trans_len += credit_pad;
 738		}
 739
 740		ret = ath10k_htc_consume_credit(ep,
 741						bundle_buf + trans_len - bundle_skb->data,
 742						false);
 743		if (ret) {
 744			skb_queue_head(&ep->tx_req_head, skb);
 745			break;
 746		}
 747
 748		if (bundles_left < trans_len) {
 749			bundle_skb->len = bundle_buf - bundle_skb->data;
 750			ret = ath10k_htc_send_bundle(ep, bundle_skb, &tx_save_head);
 751
 752			if (ret) {
 753				skb_queue_head(&ep->tx_req_head, skb);
 754				return ret;
 755			}
 756
 757			if (skb_queue_len(&ep->tx_req_head) == 0) {
 758				ath10k_htc_send_one_skb(ep, skb);
 759				return ret;
 760			}
 761
 762			if (ep->tx_credit_flow_enabled &&
 763			    ep->tx_credits < ATH10K_MIN_CREDIT_PER_HTC_TX_BUNDLE) {
 764				skb_queue_head(&ep->tx_req_head, skb);
 765				return 0;
 766			}
 767
 768			bundles_left =
 769				ATH10K_MAX_MSG_PER_HTC_TX_BUNDLE * ep->tx_credit_size;
 770			bundle_skb = dev_alloc_skb(bundles_left);
 771
 772			if (!bundle_skb) {
 773				skb_queue_head(&ep->tx_req_head, skb);
 774				return -ENOMEM;
 775			}
 776			bundle_buf = bundle_skb->data;
 777			skb_queue_head_init(&tx_save_head);
 778		}
 779
 780		skb_push(skb, sizeof(struct ath10k_htc_hdr));
 781		ath10k_htc_prepare_tx_skb(ep, skb);
 782
 783		memcpy(bundle_buf, skb->data, skb->len);
 784		hdr = (struct ath10k_htc_hdr *)bundle_buf;
 785		hdr->flags |= ATH10K_HTC_FLAG_SEND_BUNDLE;
 786		hdr->pad_len = __cpu_to_le16(credit_pad);
 787		bundle_buf += trans_len;
 788		bundles_left -= trans_len;
 789		skb_queue_tail(&tx_save_head, skb);
 790	}
 791
 792	if (bundle_buf != bundle_skb->data) {
 793		bundle_skb->len = bundle_buf - bundle_skb->data;
 794		ret = ath10k_htc_send_bundle(ep, bundle_skb, &tx_save_head);
 795	} else {
 796		dev_kfree_skb_any(bundle_skb);
 797	}
 798
 799	return ret;
 800}
 801
 802static void ath10k_htc_bundle_tx_work(struct work_struct *work)
 803{
 804	struct ath10k *ar = container_of(work, struct ath10k, bundle_tx_work);
 805	struct ath10k_htc_ep *ep;
 806	struct sk_buff *skb;
 807	int i;
 808
 809	for (i = 0; i < ARRAY_SIZE(ar->htc.endpoint); i++) {
 810		ep = &ar->htc.endpoint[i];
 811
 812		if (!ep->bundle_tx)
 813			continue;
 814
 815		ath10k_dbg(ar, ATH10K_DBG_HTC, "bundle tx work eid %d count %d\n",
 816			   ep->eid, skb_queue_len(&ep->tx_req_head));
 817
 818		if (skb_queue_len(&ep->tx_req_head) >=
 819		    ATH10K_MIN_MSG_PER_HTC_TX_BUNDLE) {
 820			ath10k_htc_send_bundle_skbs(ep);
 821		} else {
 822			skb = skb_dequeue(&ep->tx_req_head);
 823
 824			if (!skb)
 825				continue;
 826			ath10k_htc_send_one_skb(ep, skb);
 827		}
 828	}
 829}
 830
 831static void ath10k_htc_tx_complete_work(struct work_struct *work)
 832{
 833	struct ath10k *ar = container_of(work, struct ath10k, tx_complete_work);
 834	struct ath10k_htc_ep *ep;
 835	enum ath10k_htc_ep_id eid;
 836	struct sk_buff *skb;
 837	int i;
 838
 839	for (i = 0; i < ARRAY_SIZE(ar->htc.endpoint); i++) {
 840		ep = &ar->htc.endpoint[i];
 841		eid = ep->eid;
 842		if (ep->bundle_tx && eid == ar->htt.eid) {
 843			ath10k_dbg(ar, ATH10K_DBG_HTC, "bundle tx complete eid %d pending complete count%d\n",
 844				   ep->eid, skb_queue_len(&ep->tx_complete_head));
 845
 846			while (true) {
 847				skb = skb_dequeue(&ep->tx_complete_head);
 848				if (!skb)
 849					break;
 850				ath10k_htc_notify_tx_completion(ep, skb);
 851			}
 852		}
 853	}
 854}
 855
 856int ath10k_htc_send_hl(struct ath10k_htc *htc,
 857		       enum ath10k_htc_ep_id eid,
 858		       struct sk_buff *skb)
 859{
 860	struct ath10k_htc_ep *ep = &htc->endpoint[eid];
 861	struct ath10k *ar = htc->ar;
 862
 863	if (sizeof(struct ath10k_htc_hdr) + skb->len > ep->tx_credit_size) {
 864		ath10k_dbg(ar, ATH10K_DBG_HTC, "tx exceed max len %d\n", skb->len);
 865		return -ENOMEM;
 866	}
 867
 868	ath10k_dbg(ar, ATH10K_DBG_HTC, "htc send hl eid %d bundle %d tx count %d len %d\n",
 869		   eid, ep->bundle_tx, skb_queue_len(&ep->tx_req_head), skb->len);
 870
 871	if (ep->bundle_tx) {
 872		skb_queue_tail(&ep->tx_req_head, skb);
 873		queue_work(ar->workqueue, &ar->bundle_tx_work);
 874		return 0;
 875	} else {
 876		return ath10k_htc_send(htc, eid, skb);
 877	}
 878}
 879
 880void ath10k_htc_setup_tx_req(struct ath10k_htc_ep *ep)
 881{
 882	if (ep->htc->max_msgs_per_htc_bundle >= ATH10K_MIN_MSG_PER_HTC_TX_BUNDLE &&
 883	    !ep->bundle_tx) {
 884		ep->bundle_tx = true;
 885		skb_queue_head_init(&ep->tx_req_head);
 886		skb_queue_head_init(&ep->tx_complete_head);
 887	}
 888}
 889
 890void ath10k_htc_stop_hl(struct ath10k *ar)
 891{
 892	struct ath10k_htc_ep *ep;
 893	int i;
 894
 895	cancel_work_sync(&ar->bundle_tx_work);
 896	cancel_work_sync(&ar->tx_complete_work);
 897
 898	for (i = 0; i < ARRAY_SIZE(ar->htc.endpoint); i++) {
 899		ep = &ar->htc.endpoint[i];
 900
 901		if (!ep->bundle_tx)
 902			continue;
 903
 904		ath10k_dbg(ar, ATH10K_DBG_HTC, "stop tx work eid %d count %d\n",
 905			   ep->eid, skb_queue_len(&ep->tx_req_head));
 906
 907		skb_queue_purge(&ep->tx_req_head);
 908	}
 909}
 910
 911int ath10k_htc_wait_target(struct ath10k_htc *htc)
 912{
 913	struct ath10k *ar = htc->ar;
 914	int i, status = 0;
 915	unsigned long time_left;
 916	struct ath10k_htc_msg *msg;
 917	u16 message_id;
 918
 919	time_left = wait_for_completion_timeout(&htc->ctl_resp,
 920						ATH10K_HTC_WAIT_TIMEOUT_HZ);
 921	if (!time_left) {
 922		/* Workaround: In some cases the PCI HIF doesn't
 923		 * receive interrupt for the control response message
 924		 * even if the buffer was completed. It is suspected
 925		 * iomap writes unmasking PCI CE irqs aren't propagated
 926		 * properly in KVM PCI-passthrough sometimes.
 927		 */
 928		ath10k_warn(ar, "failed to receive control response completion, polling..\n");
 929
 930		for (i = 0; i < CE_COUNT; i++)
 931			ath10k_hif_send_complete_check(htc->ar, i, 1);
 932
 933		time_left =
 934		wait_for_completion_timeout(&htc->ctl_resp,
 935					    ATH10K_HTC_WAIT_TIMEOUT_HZ);
 936
 937		if (!time_left)
 938			status = -ETIMEDOUT;
 939	}
 940
 941	if (status < 0) {
 942		ath10k_err(ar, "ctl_resp never came in (%d)\n", status);
 943		return status;
 944	}
 945
 946	if (htc->control_resp_len < sizeof(msg->hdr) + sizeof(msg->ready)) {
 947		ath10k_err(ar, "Invalid HTC ready msg len:%d\n",
 948			   htc->control_resp_len);
 949		return -ECOMM;
 950	}
 951
 952	msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
 953	message_id   = __le16_to_cpu(msg->hdr.message_id);
 954
 955	if (message_id != ATH10K_HTC_MSG_READY_ID) {
 956		ath10k_err(ar, "Invalid HTC ready msg: 0x%x\n", message_id);
 957		return -ECOMM;
 958	}
 959
 960	if (ar->hw_params.use_fw_tx_credits)
 961		htc->total_transmit_credits = __le16_to_cpu(msg->ready.credit_count);
 962	else
 963		htc->total_transmit_credits = 1;
 964
 965	htc->target_credit_size = __le16_to_cpu(msg->ready.credit_size);
 966
 967	ath10k_dbg(ar, ATH10K_DBG_HTC,
 968		   "Target ready! transmit resources: %d size:%d actual credits:%d\n",
 969		   htc->total_transmit_credits,
 970		   htc->target_credit_size,
 971		   msg->ready.credit_count);
 972
 973	if ((htc->total_transmit_credits == 0) ||
 974	    (htc->target_credit_size == 0)) {
 975		ath10k_err(ar, "Invalid credit size received\n");
 976		return -ECOMM;
 977	}
 978
 979	/* The only way to determine if the ready message is an extended
 980	 * message is from the size.
 981	 */
 982	if (htc->control_resp_len >=
 983	    sizeof(msg->hdr) + sizeof(msg->ready_ext)) {
 984		htc->alt_data_credit_size =
 985			__le16_to_cpu(msg->ready_ext.reserved) &
 986			ATH10K_HTC_MSG_READY_EXT_ALT_DATA_MASK;
 987		htc->max_msgs_per_htc_bundle =
 988			min_t(u8, msg->ready_ext.max_msgs_per_htc_bundle,
 989			      HTC_HOST_MAX_MSG_PER_RX_BUNDLE);
 990		ath10k_dbg(ar, ATH10K_DBG_HTC,
 991			   "Extended ready message RX bundle size %d alt size %d\n",
 992			   htc->max_msgs_per_htc_bundle,
 993			   htc->alt_data_credit_size);
 994	}
 995
 996	INIT_WORK(&ar->bundle_tx_work, ath10k_htc_bundle_tx_work);
 997	INIT_WORK(&ar->tx_complete_work, ath10k_htc_tx_complete_work);
 998
 999	return 0;
1000}
1001
1002void ath10k_htc_change_tx_credit_flow(struct ath10k_htc *htc,
1003				      enum ath10k_htc_ep_id eid,
1004				      bool enable)
1005{
1006	struct ath10k *ar = htc->ar;
1007	struct ath10k_htc_ep *ep = &ar->htc.endpoint[eid];
1008
1009	ep->tx_credit_flow_enabled = enable;
1010}
1011
1012int ath10k_htc_connect_service(struct ath10k_htc *htc,
1013			       struct ath10k_htc_svc_conn_req *conn_req,
1014			       struct ath10k_htc_svc_conn_resp *conn_resp)
1015{
1016	struct ath10k *ar = htc->ar;
1017	struct ath10k_htc_msg *msg;
1018	struct ath10k_htc_conn_svc *req_msg;
1019	struct ath10k_htc_conn_svc_response resp_msg_dummy;
1020	struct ath10k_htc_conn_svc_response *resp_msg = &resp_msg_dummy;
1021	enum ath10k_htc_ep_id assigned_eid = ATH10K_HTC_EP_COUNT;
1022	struct ath10k_htc_ep *ep;
1023	struct sk_buff *skb;
1024	unsigned int max_msg_size = 0;
1025	int length, status;
1026	unsigned long time_left;
1027	bool disable_credit_flow_ctrl = false;
1028	u16 message_id, service_id, flags = 0;
1029	u8 tx_alloc = 0;
1030
1031	/* special case for HTC pseudo control service */
1032	if (conn_req->service_id == ATH10K_HTC_SVC_ID_RSVD_CTRL) {
1033		disable_credit_flow_ctrl = true;
1034		assigned_eid = ATH10K_HTC_EP_0;
1035		max_msg_size = ATH10K_HTC_MAX_CTRL_MSG_LEN;
1036		memset(&resp_msg_dummy, 0, sizeof(resp_msg_dummy));
1037		goto setup;
1038	}
1039
1040	tx_alloc = ath10k_htc_get_credit_allocation(htc,
1041						    conn_req->service_id);
1042	if (!tx_alloc)
1043		ath10k_dbg(ar, ATH10K_DBG_BOOT,
1044			   "boot htc service %s does not allocate target credits\n",
1045			   htc_service_name(conn_req->service_id));
1046
1047	skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
1048	if (!skb) {
1049		ath10k_err(ar, "Failed to allocate HTC packet\n");
1050		return -ENOMEM;
1051	}
1052
1053	length = sizeof(msg->hdr) + sizeof(msg->connect_service);
1054	skb_put(skb, length);
1055	memset(skb->data, 0, length);
1056
1057	msg = (struct ath10k_htc_msg *)skb->data;
1058	msg->hdr.message_id =
1059		__cpu_to_le16(ATH10K_HTC_MSG_CONNECT_SERVICE_ID);
1060
1061	flags |= SM(tx_alloc, ATH10K_HTC_CONN_FLAGS_RECV_ALLOC);
1062
1063	/* Only enable credit flow control for WMI ctrl service */
1064	if (conn_req->service_id != ATH10K_HTC_SVC_ID_WMI_CONTROL) {
1065		flags |= ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
1066		disable_credit_flow_ctrl = true;
1067	}
1068
1069	req_msg = &msg->connect_service;
1070	req_msg->flags = __cpu_to_le16(flags);
1071	req_msg->service_id = __cpu_to_le16(conn_req->service_id);
1072
1073	reinit_completion(&htc->ctl_resp);
1074
1075	status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
1076	if (status) {
1077		kfree_skb(skb);
1078		return status;
1079	}
1080
1081	/* wait for response */
1082	time_left = wait_for_completion_timeout(&htc->ctl_resp,
1083						ATH10K_HTC_CONN_SVC_TIMEOUT_HZ);
1084	if (!time_left) {
1085		ath10k_err(ar, "Service connect timeout\n");
1086		return -ETIMEDOUT;
1087	}
1088
1089	/* we controlled the buffer creation, it's aligned */
1090	msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
1091	resp_msg = &msg->connect_service_response;
1092	message_id = __le16_to_cpu(msg->hdr.message_id);
1093	service_id = __le16_to_cpu(resp_msg->service_id);
1094
1095	if ((message_id != ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID) ||
1096	    (htc->control_resp_len < sizeof(msg->hdr) +
1097	     sizeof(msg->connect_service_response))) {
1098		ath10k_err(ar, "Invalid resp message ID 0x%x", message_id);
1099		return -EPROTO;
1100	}
1101
1102	ath10k_dbg(ar, ATH10K_DBG_HTC,
1103		   "HTC Service %s connect response: status: 0x%x, assigned ep: 0x%x\n",
1104		   htc_service_name(service_id),
1105		   resp_msg->status, resp_msg->eid);
1106
1107	conn_resp->connect_resp_code = resp_msg->status;
1108
1109	/* check response status */
1110	if (resp_msg->status != ATH10K_HTC_CONN_SVC_STATUS_SUCCESS) {
1111		ath10k_err(ar, "HTC Service %s connect request failed: 0x%x)\n",
1112			   htc_service_name(service_id),
1113			   resp_msg->status);
1114		return -EPROTO;
1115	}
1116
1117	assigned_eid = (enum ath10k_htc_ep_id)resp_msg->eid;
1118	max_msg_size = __le16_to_cpu(resp_msg->max_msg_size);
1119
1120setup:
1121
1122	if (assigned_eid >= ATH10K_HTC_EP_COUNT)
1123		return -EPROTO;
1124
1125	if (max_msg_size == 0)
1126		return -EPROTO;
1127
1128	ep = &htc->endpoint[assigned_eid];
1129	ep->eid = assigned_eid;
1130
1131	if (ep->service_id != ATH10K_HTC_SVC_ID_UNUSED)
1132		return -EPROTO;
1133
1134	/* return assigned endpoint to caller */
1135	conn_resp->eid = assigned_eid;
1136	conn_resp->max_msg_len = __le16_to_cpu(resp_msg->max_msg_size);
1137
1138	/* setup the endpoint */
1139	ep->service_id = conn_req->service_id;
1140	ep->max_tx_queue_depth = conn_req->max_send_queue_depth;
1141	ep->max_ep_message_len = __le16_to_cpu(resp_msg->max_msg_size);
1142	ep->tx_credits = tx_alloc;
1143	ep->tx_credit_size = htc->target_credit_size;
1144
1145	if (conn_req->service_id == ATH10K_HTC_SVC_ID_HTT_DATA_MSG &&
1146	    htc->alt_data_credit_size != 0)
1147		ep->tx_credit_size = htc->alt_data_credit_size;
1148
1149	/* copy all the callbacks */
1150	ep->ep_ops = conn_req->ep_ops;
1151
1152	status = ath10k_hif_map_service_to_pipe(htc->ar,
1153						ep->service_id,
1154						&ep->ul_pipe_id,
1155						&ep->dl_pipe_id);
1156	if (status) {
1157		ath10k_dbg(ar, ATH10K_DBG_BOOT, "unsupported HTC service id: %d\n",
1158			   ep->service_id);
1159		return status;
1160	}
1161
1162	ath10k_dbg(ar, ATH10K_DBG_BOOT,
1163		   "boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
1164		   htc_service_name(ep->service_id), ep->ul_pipe_id,
1165		   ep->dl_pipe_id, ep->eid);
1166
1167	if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
1168		ep->tx_credit_flow_enabled = false;
1169		ath10k_dbg(ar, ATH10K_DBG_BOOT,
1170			   "boot htc service '%s' eid %d TX flow control disabled\n",
1171			   htc_service_name(ep->service_id), assigned_eid);
1172	}
1173
1174	return status;
1175}
1176
1177struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size)
1178{
1179	struct sk_buff *skb;
1180
1181	skb = dev_alloc_skb(size + sizeof(struct ath10k_htc_hdr));
1182	if (!skb)
1183		return NULL;
1184
1185	skb_reserve(skb, sizeof(struct ath10k_htc_hdr));
1186
1187	/* FW/HTC requires 4-byte aligned streams */
1188	if (!IS_ALIGNED((unsigned long)skb->data, 4))
1189		ath10k_warn(ar, "Unaligned HTC tx skb\n");
1190
1191	return skb;
1192}
1193
1194static void ath10k_htc_pktlog_process_rx(struct ath10k *ar, struct sk_buff *skb)
1195{
1196	trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
1197	dev_kfree_skb_any(skb);
1198}
1199
1200static int ath10k_htc_pktlog_connect(struct ath10k *ar)
1201{
1202	struct ath10k_htc_svc_conn_resp conn_resp;
1203	struct ath10k_htc_svc_conn_req conn_req;
1204	int status;
1205
1206	memset(&conn_req, 0, sizeof(conn_req));
1207	memset(&conn_resp, 0, sizeof(conn_resp));
1208
1209	conn_req.ep_ops.ep_tx_complete = NULL;
1210	conn_req.ep_ops.ep_rx_complete = ath10k_htc_pktlog_process_rx;
1211	conn_req.ep_ops.ep_tx_credits = NULL;
1212
1213	/* connect to control service */
1214	conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_LOG_MSG;
1215	status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp);
1216	if (status) {
1217		ath10k_warn(ar, "failed to connect to PKTLOG service: %d\n",
1218			    status);
1219		return status;
1220	}
1221
1222	return 0;
1223}
1224
1225static bool ath10k_htc_pktlog_svc_supported(struct ath10k *ar)
1226{
1227	u8 ul_pipe_id;
1228	u8 dl_pipe_id;
1229	int status;
1230
1231	status = ath10k_hif_map_service_to_pipe(ar, ATH10K_HTC_SVC_ID_HTT_LOG_MSG,
1232						&ul_pipe_id,
1233						&dl_pipe_id);
1234	if (status) {
1235		ath10k_dbg(ar, ATH10K_DBG_BOOT, "unsupported HTC pktlog service id: %d\n",
1236			   ATH10K_HTC_SVC_ID_HTT_LOG_MSG);
1237
1238		return false;
1239	}
1240
1241	return true;
1242}
1243
1244int ath10k_htc_start(struct ath10k_htc *htc)
1245{
1246	struct ath10k *ar = htc->ar;
1247	struct sk_buff *skb;
1248	int status = 0;
1249	struct ath10k_htc_msg *msg;
1250
1251	skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
1252	if (!skb)
1253		return -ENOMEM;
1254
1255	skb_put(skb, sizeof(msg->hdr) + sizeof(msg->setup_complete_ext));
1256	memset(skb->data, 0, skb->len);
1257
1258	msg = (struct ath10k_htc_msg *)skb->data;
1259	msg->hdr.message_id =
1260		__cpu_to_le16(ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID);
1261
1262	if (ar->hif.bus == ATH10K_BUS_SDIO) {
1263		/* Extra setup params used by SDIO */
1264		msg->setup_complete_ext.flags =
1265			__cpu_to_le32(ATH10K_HTC_SETUP_COMPLETE_FLAGS_RX_BNDL_EN);
1266		msg->setup_complete_ext.max_msgs_per_bundled_recv =
1267			htc->max_msgs_per_htc_bundle;
1268	}
1269	ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC is using TX credit flow control\n");
1270
1271	status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
1272	if (status) {
1273		kfree_skb(skb);
1274		return status;
1275	}
1276
1277	if (ath10k_htc_pktlog_svc_supported(ar)) {
1278		status = ath10k_htc_pktlog_connect(ar);
1279		if (status) {
1280			ath10k_err(ar, "failed to connect to pktlog: %d\n", status);
1281			return status;
1282		}
1283	}
1284
1285	return 0;
1286}
1287
1288/* registered target arrival callback from the HIF layer */
1289int ath10k_htc_init(struct ath10k *ar)
1290{
1291	int status;
1292	struct ath10k_htc *htc = &ar->htc;
1293	struct ath10k_htc_svc_conn_req conn_req;
1294	struct ath10k_htc_svc_conn_resp conn_resp;
1295
1296	spin_lock_init(&htc->tx_lock);
1297
1298	ath10k_htc_reset_endpoint_states(htc);
1299
1300	htc->ar = ar;
1301
1302	/* setup our pseudo HTC control endpoint connection */
1303	memset(&conn_req, 0, sizeof(conn_req));
1304	memset(&conn_resp, 0, sizeof(conn_resp));
1305	conn_req.ep_ops.ep_tx_complete = ath10k_htc_control_tx_complete;
1306	conn_req.ep_ops.ep_rx_complete = ath10k_htc_control_rx_complete;
1307	conn_req.max_send_queue_depth = ATH10K_NUM_CONTROL_TX_BUFFERS;
1308	conn_req.service_id = ATH10K_HTC_SVC_ID_RSVD_CTRL;
1309
1310	/* connect fake service */
1311	status = ath10k_htc_connect_service(htc, &conn_req, &conn_resp);
1312	if (status) {
1313		ath10k_err(ar, "could not connect to htc service (%d)\n",
1314			   status);
1315		return status;
1316	}
1317
1318	init_completion(&htc->ctl_resp);
1319
1320	return 0;
1321}
v5.9
   1// SPDX-License-Identifier: ISC
   2/*
   3 * Copyright (c) 2005-2011 Atheros Communications Inc.
   4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
 
   5 */
   6
   7#include "core.h"
   8#include "hif.h"
   9#include "debug.h"
  10
  11/********/
  12/* Send */
  13/********/
  14
  15static void ath10k_htc_control_tx_complete(struct ath10k *ar,
  16					   struct sk_buff *skb)
  17{
  18	kfree_skb(skb);
  19}
  20
  21static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar)
  22{
  23	struct sk_buff *skb;
  24	struct ath10k_skb_cb *skb_cb;
  25
  26	skb = dev_alloc_skb(ATH10K_HTC_CONTROL_BUFFER_SIZE);
  27	if (!skb)
  28		return NULL;
  29
  30	skb_reserve(skb, 20); /* FIXME: why 20 bytes? */
  31	WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
  32
  33	skb_cb = ATH10K_SKB_CB(skb);
  34	memset(skb_cb, 0, sizeof(*skb_cb));
  35
  36	ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %pK\n", __func__, skb);
  37	return skb;
  38}
  39
  40static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc,
  41					     struct sk_buff *skb)
  42{
  43	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
  44
  45	if (htc->ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL)
  46		dma_unmap_single(htc->ar->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
  47	skb_pull(skb, sizeof(struct ath10k_htc_hdr));
  48}
  49
  50void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
  51				     struct sk_buff *skb)
  52{
  53	struct ath10k *ar = ep->htc->ar;
  54	struct ath10k_htc_hdr *hdr;
  55
  56	ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %pK\n", __func__,
  57		   ep->eid, skb);
  58
 
 
 
 
 
 
 
 
 
  59	hdr = (struct ath10k_htc_hdr *)skb->data;
  60	ath10k_htc_restore_tx_skb(ep->htc, skb);
  61
  62	if (!ep->ep_ops.ep_tx_complete) {
  63		ath10k_warn(ar, "no tx handler for eid %d\n", ep->eid);
  64		dev_kfree_skb_any(skb);
  65		return;
  66	}
  67
  68	if (hdr->flags & ATH10K_HTC_FLAG_SEND_BUNDLE) {
  69		dev_kfree_skb_any(skb);
  70		return;
  71	}
  72
  73	ep->ep_ops.ep_tx_complete(ep->htc->ar, skb);
  74}
  75EXPORT_SYMBOL(ath10k_htc_notify_tx_completion);
  76
  77static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
  78				      struct sk_buff *skb)
  79{
  80	struct ath10k_htc_hdr *hdr;
  81
  82	hdr = (struct ath10k_htc_hdr *)skb->data;
  83	memset(hdr, 0, sizeof(struct ath10k_htc_hdr));
  84
  85	hdr->eid = ep->eid;
  86	hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
  87	hdr->flags = 0;
  88	if (ep->tx_credit_flow_enabled && !ep->bundle_tx)
  89		hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE;
  90
  91	spin_lock_bh(&ep->htc->tx_lock);
  92	hdr->seq_no = ep->seq_no++;
  93	spin_unlock_bh(&ep->htc->tx_lock);
  94}
  95
  96static int ath10k_htc_consume_credit(struct ath10k_htc_ep *ep,
  97				     unsigned int len,
  98				     bool consume)
  99{
 100	struct ath10k_htc *htc = ep->htc;
 101	struct ath10k *ar = htc->ar;
 102	enum ath10k_htc_ep_id eid = ep->eid;
 103	int credits, ret = 0;
 104
 105	if (!ep->tx_credit_flow_enabled)
 106		return 0;
 107
 108	credits = DIV_ROUND_UP(len, ep->tx_credit_size);
 109	spin_lock_bh(&htc->tx_lock);
 110
 111	if (ep->tx_credits < credits) {
 112		ath10k_dbg(ar, ATH10K_DBG_HTC,
 113			   "htc insufficient credits ep %d required %d available %d consume %d\n",
 114			   eid, credits, ep->tx_credits, consume);
 115		ret = -EAGAIN;
 116		goto unlock;
 117	}
 118
 119	if (consume) {
 120		ep->tx_credits -= credits;
 121		ath10k_dbg(ar, ATH10K_DBG_HTC,
 122			   "htc ep %d consumed %d credits total %d\n",
 123			   eid, credits, ep->tx_credits);
 124	}
 125
 126unlock:
 127	spin_unlock_bh(&htc->tx_lock);
 128	return ret;
 129}
 130
 131static void ath10k_htc_release_credit(struct ath10k_htc_ep *ep, unsigned int len)
 132{
 133	struct ath10k_htc *htc = ep->htc;
 134	struct ath10k *ar = htc->ar;
 135	enum ath10k_htc_ep_id eid = ep->eid;
 136	int credits;
 137
 138	if (!ep->tx_credit_flow_enabled)
 139		return;
 140
 141	credits = DIV_ROUND_UP(len, ep->tx_credit_size);
 142	spin_lock_bh(&htc->tx_lock);
 143	ep->tx_credits += credits;
 144	ath10k_dbg(ar, ATH10K_DBG_HTC,
 145		   "htc ep %d reverted %d credits back total %d\n",
 146		   eid, credits, ep->tx_credits);
 147	spin_unlock_bh(&htc->tx_lock);
 148
 149	if (ep->ep_ops.ep_tx_credits)
 150		ep->ep_ops.ep_tx_credits(htc->ar);
 151}
 152
 153int ath10k_htc_send(struct ath10k_htc *htc,
 154		    enum ath10k_htc_ep_id eid,
 155		    struct sk_buff *skb)
 156{
 157	struct ath10k *ar = htc->ar;
 158	struct ath10k_htc_ep *ep = &htc->endpoint[eid];
 159	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
 160	struct ath10k_hif_sg_item sg_item;
 161	struct device *dev = htc->ar->dev;
 162	int ret;
 163	unsigned int skb_len;
 164
 165	if (htc->ar->state == ATH10K_STATE_WEDGED)
 166		return -ECOMM;
 167
 168	if (eid >= ATH10K_HTC_EP_COUNT) {
 169		ath10k_warn(ar, "Invalid endpoint id: %d\n", eid);
 170		return -ENOENT;
 171	}
 172
 173	skb_push(skb, sizeof(struct ath10k_htc_hdr));
 174
 175	skb_len = skb->len;
 176	ret = ath10k_htc_consume_credit(ep, skb_len, true);
 177	if (ret)
 178		goto err_pull;
 179
 180	ath10k_htc_prepare_tx_skb(ep, skb);
 181
 182	skb_cb->eid = eid;
 183	if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) {
 184		skb_cb->paddr = dma_map_single(dev, skb->data, skb->len,
 185					       DMA_TO_DEVICE);
 186		ret = dma_mapping_error(dev, skb_cb->paddr);
 187		if (ret) {
 188			ret = -EIO;
 189			goto err_credits;
 190		}
 191	}
 192
 193	sg_item.transfer_id = ep->eid;
 194	sg_item.transfer_context = skb;
 195	sg_item.vaddr = skb->data;
 196	sg_item.paddr = skb_cb->paddr;
 197	sg_item.len = skb->len;
 198
 199	ret = ath10k_hif_tx_sg(htc->ar, ep->ul_pipe_id, &sg_item, 1);
 200	if (ret)
 201		goto err_unmap;
 202
 203	return 0;
 204
 205err_unmap:
 206	if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL)
 207		dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
 208err_credits:
 209	ath10k_htc_release_credit(ep, skb_len);
 210err_pull:
 211	skb_pull(skb, sizeof(struct ath10k_htc_hdr));
 212	return ret;
 213}
 214
 215void ath10k_htc_tx_completion_handler(struct ath10k *ar, struct sk_buff *skb)
 216{
 217	struct ath10k_htc *htc = &ar->htc;
 218	struct ath10k_skb_cb *skb_cb;
 219	struct ath10k_htc_ep *ep;
 220
 221	if (WARN_ON_ONCE(!skb))
 222		return;
 223
 224	skb_cb = ATH10K_SKB_CB(skb);
 225	ep = &htc->endpoint[skb_cb->eid];
 226
 227	ath10k_htc_notify_tx_completion(ep, skb);
 228	/* the skb now belongs to the completion handler */
 229}
 230EXPORT_SYMBOL(ath10k_htc_tx_completion_handler);
 231
 232/***********/
 233/* Receive */
 234/***********/
 235
 236static void
 237ath10k_htc_process_credit_report(struct ath10k_htc *htc,
 238				 const struct ath10k_htc_credit_report *report,
 239				 int len,
 240				 enum ath10k_htc_ep_id eid)
 241{
 242	struct ath10k *ar = htc->ar;
 243	struct ath10k_htc_ep *ep;
 244	int i, n_reports;
 245
 246	if (len % sizeof(*report))
 247		ath10k_warn(ar, "Uneven credit report len %d", len);
 248
 249	n_reports = len / sizeof(*report);
 250
 251	spin_lock_bh(&htc->tx_lock);
 252	for (i = 0; i < n_reports; i++, report++) {
 253		if (report->eid >= ATH10K_HTC_EP_COUNT)
 254			break;
 255
 256		ep = &htc->endpoint[report->eid];
 257		ep->tx_credits += report->credits;
 258
 259		ath10k_dbg(ar, ATH10K_DBG_HTC, "htc ep %d got %d credits (total %d)\n",
 260			   report->eid, report->credits, ep->tx_credits);
 261
 262		if (ep->ep_ops.ep_tx_credits) {
 263			spin_unlock_bh(&htc->tx_lock);
 264			ep->ep_ops.ep_tx_credits(htc->ar);
 265			spin_lock_bh(&htc->tx_lock);
 266		}
 267	}
 268	spin_unlock_bh(&htc->tx_lock);
 269}
 270
 271static int
 272ath10k_htc_process_lookahead(struct ath10k_htc *htc,
 273			     const struct ath10k_htc_lookahead_report *report,
 274			     int len,
 275			     enum ath10k_htc_ep_id eid,
 276			     void *next_lookaheads,
 277			     int *next_lookaheads_len)
 278{
 279	struct ath10k *ar = htc->ar;
 280
 281	/* Invalid lookahead flags are actually transmitted by
 282	 * the target in the HTC control message.
 283	 * Since this will happen at every boot we silently ignore
 284	 * the lookahead in this case
 285	 */
 286	if (report->pre_valid != ((~report->post_valid) & 0xFF))
 287		return 0;
 288
 289	if (next_lookaheads && next_lookaheads_len) {
 290		ath10k_dbg(ar, ATH10K_DBG_HTC,
 291			   "htc rx lookahead found pre_valid 0x%x post_valid 0x%x\n",
 292			   report->pre_valid, report->post_valid);
 293
 294		/* look ahead bytes are valid, copy them over */
 295		memcpy((u8 *)next_lookaheads, report->lookahead, 4);
 296
 297		*next_lookaheads_len = 1;
 298	}
 299
 300	return 0;
 301}
 302
 303static int
 304ath10k_htc_process_lookahead_bundle(struct ath10k_htc *htc,
 305				    const struct ath10k_htc_lookahead_bundle *report,
 306				    int len,
 307				    enum ath10k_htc_ep_id eid,
 308				    void *next_lookaheads,
 309				    int *next_lookaheads_len)
 310{
 311	struct ath10k *ar = htc->ar;
 312	int bundle_cnt = len / sizeof(*report);
 313
 314	if (!bundle_cnt || (bundle_cnt > htc->max_msgs_per_htc_bundle)) {
 315		ath10k_warn(ar, "Invalid lookahead bundle count: %d\n",
 316			    bundle_cnt);
 317		return -EINVAL;
 318	}
 319
 320	if (next_lookaheads && next_lookaheads_len) {
 321		int i;
 322
 323		for (i = 0; i < bundle_cnt; i++) {
 324			memcpy(((u8 *)next_lookaheads) + 4 * i,
 325			       report->lookahead, 4);
 326			report++;
 327		}
 328
 329		*next_lookaheads_len = bundle_cnt;
 330	}
 331
 332	return 0;
 333}
 334
 335int ath10k_htc_process_trailer(struct ath10k_htc *htc,
 336			       u8 *buffer,
 337			       int length,
 338			       enum ath10k_htc_ep_id src_eid,
 339			       void *next_lookaheads,
 340			       int *next_lookaheads_len)
 341{
 342	struct ath10k_htc_lookahead_bundle *bundle;
 343	struct ath10k *ar = htc->ar;
 344	int status = 0;
 345	struct ath10k_htc_record *record;
 346	u8 *orig_buffer;
 347	int orig_length;
 348	size_t len;
 349
 350	orig_buffer = buffer;
 351	orig_length = length;
 352
 353	while (length > 0) {
 354		record = (struct ath10k_htc_record *)buffer;
 355
 356		if (length < sizeof(record->hdr)) {
 357			status = -EINVAL;
 358			break;
 359		}
 360
 361		if (record->hdr.len > length) {
 362			/* no room left in buffer for record */
 363			ath10k_warn(ar, "Invalid record length: %d\n",
 364				    record->hdr.len);
 365			status = -EINVAL;
 366			break;
 367		}
 368
 369		switch (record->hdr.id) {
 370		case ATH10K_HTC_RECORD_CREDITS:
 371			len = sizeof(struct ath10k_htc_credit_report);
 372			if (record->hdr.len < len) {
 373				ath10k_warn(ar, "Credit report too long\n");
 374				status = -EINVAL;
 375				break;
 376			}
 377			ath10k_htc_process_credit_report(htc,
 378							 record->credit_report,
 379							 record->hdr.len,
 380							 src_eid);
 381			break;
 382		case ATH10K_HTC_RECORD_LOOKAHEAD:
 383			len = sizeof(struct ath10k_htc_lookahead_report);
 384			if (record->hdr.len < len) {
 385				ath10k_warn(ar, "Lookahead report too long\n");
 386				status = -EINVAL;
 387				break;
 388			}
 389			status = ath10k_htc_process_lookahead(htc,
 390							      record->lookahead_report,
 391							      record->hdr.len,
 392							      src_eid,
 393							      next_lookaheads,
 394							      next_lookaheads_len);
 395			break;
 396		case ATH10K_HTC_RECORD_LOOKAHEAD_BUNDLE:
 397			bundle = record->lookahead_bundle;
 398			status = ath10k_htc_process_lookahead_bundle(htc,
 399								     bundle,
 400								     record->hdr.len,
 401								     src_eid,
 402								     next_lookaheads,
 403								     next_lookaheads_len);
 404			break;
 405		default:
 406			ath10k_warn(ar, "Unhandled record: id:%d length:%d\n",
 407				    record->hdr.id, record->hdr.len);
 408			break;
 409		}
 410
 411		if (status)
 412			break;
 413
 414		/* multiple records may be present in a trailer */
 415		buffer += sizeof(record->hdr) + record->hdr.len;
 416		length -= sizeof(record->hdr) + record->hdr.len;
 417	}
 418
 419	if (status)
 420		ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc rx bad trailer", "",
 421				orig_buffer, orig_length);
 422
 423	return status;
 424}
 425EXPORT_SYMBOL(ath10k_htc_process_trailer);
 426
 427void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb)
 428{
 429	int status = 0;
 430	struct ath10k_htc *htc = &ar->htc;
 431	struct ath10k_htc_hdr *hdr;
 432	struct ath10k_htc_ep *ep;
 433	u16 payload_len;
 434	u32 trailer_len = 0;
 435	size_t min_len;
 436	u8 eid;
 437	bool trailer_present;
 438
 439	hdr = (struct ath10k_htc_hdr *)skb->data;
 440	skb_pull(skb, sizeof(*hdr));
 441
 442	eid = hdr->eid;
 443
 444	if (eid >= ATH10K_HTC_EP_COUNT) {
 445		ath10k_warn(ar, "HTC Rx: invalid eid %d\n", eid);
 446		ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad header", "",
 447				hdr, sizeof(*hdr));
 448		goto out;
 449	}
 450
 451	ep = &htc->endpoint[eid];
 
 
 
 
 452
 453	payload_len = __le16_to_cpu(hdr->len);
 454
 455	if (payload_len + sizeof(*hdr) > ATH10K_HTC_MAX_LEN) {
 456		ath10k_warn(ar, "HTC rx frame too long, len: %zu\n",
 457			    payload_len + sizeof(*hdr));
 458		ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len", "",
 459				hdr, sizeof(*hdr));
 460		goto out;
 461	}
 462
 463	if (skb->len < payload_len) {
 464		ath10k_dbg(ar, ATH10K_DBG_HTC,
 465			   "HTC Rx: insufficient length, got %d, expected %d\n",
 466			   skb->len, payload_len);
 467		ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len",
 468				"", hdr, sizeof(*hdr));
 469		goto out;
 470	}
 471
 472	/* get flags to check for trailer */
 473	trailer_present = hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT;
 474	if (trailer_present) {
 475		u8 *trailer;
 476
 477		trailer_len = hdr->trailer_len;
 478		min_len = sizeof(struct ath10k_ath10k_htc_record_hdr);
 479
 480		if ((trailer_len < min_len) ||
 481		    (trailer_len > payload_len)) {
 482			ath10k_warn(ar, "Invalid trailer length: %d\n",
 483				    trailer_len);
 484			goto out;
 485		}
 486
 487		trailer = (u8 *)hdr;
 488		trailer += sizeof(*hdr);
 489		trailer += payload_len;
 490		trailer -= trailer_len;
 491		status = ath10k_htc_process_trailer(htc, trailer,
 492						    trailer_len, hdr->eid,
 493						    NULL, NULL);
 494		if (status)
 495			goto out;
 496
 497		skb_trim(skb, skb->len - trailer_len);
 498	}
 499
 500	if (((int)payload_len - (int)trailer_len) <= 0)
 501		/* zero length packet with trailer data, just drop these */
 502		goto out;
 503
 504	ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %pK\n",
 505		   eid, skb);
 506	ep->ep_ops.ep_rx_complete(ar, skb);
 507
 508	/* skb is now owned by the rx completion handler */
 509	skb = NULL;
 510out:
 511	kfree_skb(skb);
 512}
 513EXPORT_SYMBOL(ath10k_htc_rx_completion_handler);
 514
 515static void ath10k_htc_control_rx_complete(struct ath10k *ar,
 516					   struct sk_buff *skb)
 517{
 518	struct ath10k_htc *htc = &ar->htc;
 519	struct ath10k_htc_msg *msg = (struct ath10k_htc_msg *)skb->data;
 520
 521	switch (__le16_to_cpu(msg->hdr.message_id)) {
 522	case ATH10K_HTC_MSG_READY_ID:
 523	case ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID:
 524		/* handle HTC control message */
 525		if (completion_done(&htc->ctl_resp)) {
 526			/* this is a fatal error, target should not be
 527			 * sending unsolicited messages on the ep 0
 528			 */
 529			ath10k_warn(ar, "HTC rx ctrl still processing\n");
 530			complete(&htc->ctl_resp);
 531			goto out;
 532		}
 533
 534		htc->control_resp_len =
 535			min_t(int, skb->len,
 536			      ATH10K_HTC_MAX_CTRL_MSG_LEN);
 537
 538		memcpy(htc->control_resp_buffer, skb->data,
 539		       htc->control_resp_len);
 540
 541		complete(&htc->ctl_resp);
 542		break;
 543	case ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE:
 544		htc->htc_ops.target_send_suspend_complete(ar);
 545		break;
 546	default:
 547		ath10k_warn(ar, "ignoring unsolicited htc ep0 event\n");
 548		break;
 549	}
 550
 551out:
 552	kfree_skb(skb);
 553}
 554
 555/***************/
 556/* Init/Deinit */
 557/***************/
 558
 559static const char *htc_service_name(enum ath10k_htc_svc_id id)
 560{
 561	switch (id) {
 562	case ATH10K_HTC_SVC_ID_RESERVED:
 563		return "Reserved";
 564	case ATH10K_HTC_SVC_ID_RSVD_CTRL:
 565		return "Control";
 566	case ATH10K_HTC_SVC_ID_WMI_CONTROL:
 567		return "WMI";
 568	case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
 569		return "DATA BE";
 570	case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
 571		return "DATA BK";
 572	case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
 573		return "DATA VI";
 574	case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
 575		return "DATA VO";
 576	case ATH10K_HTC_SVC_ID_NMI_CONTROL:
 577		return "NMI Control";
 578	case ATH10K_HTC_SVC_ID_NMI_DATA:
 579		return "NMI Data";
 580	case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
 581		return "HTT Data";
 582	case ATH10K_HTC_SVC_ID_HTT_DATA2_MSG:
 583		return "HTT Data";
 584	case ATH10K_HTC_SVC_ID_HTT_DATA3_MSG:
 585		return "HTT Data";
 586	case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
 587		return "RAW";
 588	case ATH10K_HTC_SVC_ID_HTT_LOG_MSG:
 589		return "PKTLOG";
 590	}
 591
 592	return "Unknown";
 593}
 594
 595static void ath10k_htc_reset_endpoint_states(struct ath10k_htc *htc)
 596{
 597	struct ath10k_htc_ep *ep;
 598	int i;
 599
 600	for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) {
 601		ep = &htc->endpoint[i];
 602		ep->service_id = ATH10K_HTC_SVC_ID_UNUSED;
 603		ep->max_ep_message_len = 0;
 604		ep->max_tx_queue_depth = 0;
 605		ep->eid = i;
 606		ep->htc = htc;
 607		ep->tx_credit_flow_enabled = true;
 608	}
 609}
 610
 611static u8 ath10k_htc_get_credit_allocation(struct ath10k_htc *htc,
 612					   u16 service_id)
 613{
 614	u8 allocation = 0;
 615
 616	/* The WMI control service is the only service with flow control.
 617	 * Let it have all transmit credits.
 618	 */
 619	if (service_id == ATH10K_HTC_SVC_ID_WMI_CONTROL)
 620		allocation = htc->total_transmit_credits;
 621
 622	return allocation;
 623}
 624
 625static int ath10k_htc_send_bundle(struct ath10k_htc_ep *ep,
 626				  struct sk_buff *bundle_skb,
 627				  struct sk_buff_head *tx_save_head)
 628{
 629	struct ath10k_hif_sg_item sg_item;
 630	struct ath10k_htc *htc = ep->htc;
 631	struct ath10k *ar = htc->ar;
 632	struct sk_buff *skb;
 633	int ret, cn = 0;
 634	unsigned int skb_len;
 635
 636	ath10k_dbg(ar, ATH10K_DBG_HTC, "bundle skb len %d\n", bundle_skb->len);
 637	skb_len = bundle_skb->len;
 638	ret = ath10k_htc_consume_credit(ep, skb_len, true);
 639
 640	if (!ret) {
 641		sg_item.transfer_id = ep->eid;
 642		sg_item.transfer_context = bundle_skb;
 643		sg_item.vaddr = bundle_skb->data;
 644		sg_item.len = bundle_skb->len;
 645
 646		ret = ath10k_hif_tx_sg(htc->ar, ep->ul_pipe_id, &sg_item, 1);
 647		if (ret)
 648			ath10k_htc_release_credit(ep, skb_len);
 649	}
 650
 651	if (ret)
 652		dev_kfree_skb_any(bundle_skb);
 653
 654	for (cn = 0; (skb = skb_dequeue_tail(tx_save_head)); cn++) {
 655		if (ret) {
 656			skb_pull(skb, sizeof(struct ath10k_htc_hdr));
 657			skb_queue_head(&ep->tx_req_head, skb);
 658		} else {
 659			skb_queue_tail(&ep->tx_complete_head, skb);
 660		}
 661	}
 662
 663	if (!ret)
 664		queue_work(ar->workqueue_tx_complete, &ar->tx_complete_work);
 665
 666	ath10k_dbg(ar, ATH10K_DBG_HTC,
 667		   "bundle tx status %d eid %d req count %d count %d len %d\n",
 668		   ret, ep->eid, skb_queue_len(&ep->tx_req_head), cn, bundle_skb->len);
 669	return ret;
 670}
 671
 672static void ath10k_htc_send_one_skb(struct ath10k_htc_ep *ep, struct sk_buff *skb)
 673{
 674	struct ath10k_htc *htc = ep->htc;
 675	struct ath10k *ar = htc->ar;
 676	int ret;
 677
 678	ret = ath10k_htc_send(htc, ep->eid, skb);
 679
 680	if (ret)
 681		skb_queue_head(&ep->tx_req_head, skb);
 682
 683	ath10k_dbg(ar, ATH10K_DBG_HTC, "tx one status %d eid %d len %d pending count %d\n",
 684		   ret, ep->eid, skb->len, skb_queue_len(&ep->tx_req_head));
 685}
 686
 687static int ath10k_htc_send_bundle_skbs(struct ath10k_htc_ep *ep)
 688{
 689	struct ath10k_htc *htc = ep->htc;
 690	struct sk_buff *bundle_skb, *skb;
 691	struct sk_buff_head tx_save_head;
 692	struct ath10k_htc_hdr *hdr;
 693	u8 *bundle_buf;
 694	int ret = 0, credit_pad, credit_remainder, trans_len, bundles_left = 0;
 695
 696	if (htc->ar->state == ATH10K_STATE_WEDGED)
 697		return -ECOMM;
 698
 699	if (ep->tx_credit_flow_enabled &&
 700	    ep->tx_credits < ATH10K_MIN_CREDIT_PER_HTC_TX_BUNDLE)
 701		return 0;
 702
 703	bundles_left = ATH10K_MAX_MSG_PER_HTC_TX_BUNDLE * ep->tx_credit_size;
 704	bundle_skb = dev_alloc_skb(bundles_left);
 705
 706	if (!bundle_skb)
 707		return -ENOMEM;
 708
 709	bundle_buf = bundle_skb->data;
 710	skb_queue_head_init(&tx_save_head);
 711
 712	while (true) {
 713		skb = skb_dequeue(&ep->tx_req_head);
 714		if (!skb)
 715			break;
 716
 717		credit_pad = 0;
 718		trans_len = skb->len + sizeof(*hdr);
 719		credit_remainder = trans_len % ep->tx_credit_size;
 720
 721		if (credit_remainder != 0) {
 722			credit_pad = ep->tx_credit_size - credit_remainder;
 723			trans_len += credit_pad;
 724		}
 725
 726		ret = ath10k_htc_consume_credit(ep,
 727						bundle_buf + trans_len - bundle_skb->data,
 728						false);
 729		if (ret) {
 730			skb_queue_head(&ep->tx_req_head, skb);
 731			break;
 732		}
 733
 734		if (bundles_left < trans_len) {
 735			bundle_skb->len = bundle_buf - bundle_skb->data;
 736			ret = ath10k_htc_send_bundle(ep, bundle_skb, &tx_save_head);
 737
 738			if (ret) {
 739				skb_queue_head(&ep->tx_req_head, skb);
 740				return ret;
 741			}
 742
 743			if (skb_queue_len(&ep->tx_req_head) == 0) {
 744				ath10k_htc_send_one_skb(ep, skb);
 745				return ret;
 746			}
 747
 748			if (ep->tx_credit_flow_enabled &&
 749			    ep->tx_credits < ATH10K_MIN_CREDIT_PER_HTC_TX_BUNDLE) {
 750				skb_queue_head(&ep->tx_req_head, skb);
 751				return 0;
 752			}
 753
 754			bundles_left =
 755				ATH10K_MAX_MSG_PER_HTC_TX_BUNDLE * ep->tx_credit_size;
 756			bundle_skb = dev_alloc_skb(bundles_left);
 757
 758			if (!bundle_skb) {
 759				skb_queue_head(&ep->tx_req_head, skb);
 760				return -ENOMEM;
 761			}
 762			bundle_buf = bundle_skb->data;
 763			skb_queue_head_init(&tx_save_head);
 764		}
 765
 766		skb_push(skb, sizeof(struct ath10k_htc_hdr));
 767		ath10k_htc_prepare_tx_skb(ep, skb);
 768
 769		memcpy(bundle_buf, skb->data, skb->len);
 770		hdr = (struct ath10k_htc_hdr *)bundle_buf;
 771		hdr->flags |= ATH10K_HTC_FLAG_SEND_BUNDLE;
 772		hdr->pad_len = __cpu_to_le16(credit_pad);
 773		bundle_buf += trans_len;
 774		bundles_left -= trans_len;
 775		skb_queue_tail(&tx_save_head, skb);
 776	}
 777
 778	if (bundle_buf != bundle_skb->data) {
 779		bundle_skb->len = bundle_buf - bundle_skb->data;
 780		ret = ath10k_htc_send_bundle(ep, bundle_skb, &tx_save_head);
 781	} else {
 782		dev_kfree_skb_any(bundle_skb);
 783	}
 784
 785	return ret;
 786}
 787
 788static void ath10k_htc_bundle_tx_work(struct work_struct *work)
 789{
 790	struct ath10k *ar = container_of(work, struct ath10k, bundle_tx_work);
 791	struct ath10k_htc_ep *ep;
 792	struct sk_buff *skb;
 793	int i;
 794
 795	for (i = 0; i < ARRAY_SIZE(ar->htc.endpoint); i++) {
 796		ep = &ar->htc.endpoint[i];
 797
 798		if (!ep->bundle_tx)
 799			continue;
 800
 801		ath10k_dbg(ar, ATH10K_DBG_HTC, "bundle tx work eid %d count %d\n",
 802			   ep->eid, skb_queue_len(&ep->tx_req_head));
 803
 804		if (skb_queue_len(&ep->tx_req_head) >=
 805		    ATH10K_MIN_MSG_PER_HTC_TX_BUNDLE) {
 806			ath10k_htc_send_bundle_skbs(ep);
 807		} else {
 808			skb = skb_dequeue(&ep->tx_req_head);
 809
 810			if (!skb)
 811				continue;
 812			ath10k_htc_send_one_skb(ep, skb);
 813		}
 814	}
 815}
 816
 817static void ath10k_htc_tx_complete_work(struct work_struct *work)
 818{
 819	struct ath10k *ar = container_of(work, struct ath10k, tx_complete_work);
 820	struct ath10k_htc_ep *ep;
 821	enum ath10k_htc_ep_id eid;
 822	struct sk_buff *skb;
 823	int i;
 824
 825	for (i = 0; i < ARRAY_SIZE(ar->htc.endpoint); i++) {
 826		ep = &ar->htc.endpoint[i];
 827		eid = ep->eid;
 828		if (ep->bundle_tx && eid == ar->htt.eid) {
 829			ath10k_dbg(ar, ATH10K_DBG_HTC, "bundle tx complete eid %d pending complete count%d\n",
 830				   ep->eid, skb_queue_len(&ep->tx_complete_head));
 831
 832			while (true) {
 833				skb = skb_dequeue(&ep->tx_complete_head);
 834				if (!skb)
 835					break;
 836				ath10k_htc_notify_tx_completion(ep, skb);
 837			}
 838		}
 839	}
 840}
 841
 842int ath10k_htc_send_hl(struct ath10k_htc *htc,
 843		       enum ath10k_htc_ep_id eid,
 844		       struct sk_buff *skb)
 845{
 846	struct ath10k_htc_ep *ep = &htc->endpoint[eid];
 847	struct ath10k *ar = htc->ar;
 848
 849	if (sizeof(struct ath10k_htc_hdr) + skb->len > ep->tx_credit_size) {
 850		ath10k_dbg(ar, ATH10K_DBG_HTC, "tx exceed max len %d\n", skb->len);
 851		return -ENOMEM;
 852	}
 853
 854	ath10k_dbg(ar, ATH10K_DBG_HTC, "htc send hl eid %d bundle %d tx count %d len %d\n",
 855		   eid, ep->bundle_tx, skb_queue_len(&ep->tx_req_head), skb->len);
 856
 857	if (ep->bundle_tx) {
 858		skb_queue_tail(&ep->tx_req_head, skb);
 859		queue_work(ar->workqueue, &ar->bundle_tx_work);
 860		return 0;
 861	} else {
 862		return ath10k_htc_send(htc, eid, skb);
 863	}
 864}
 865
 866void ath10k_htc_setup_tx_req(struct ath10k_htc_ep *ep)
 867{
 868	if (ep->htc->max_msgs_per_htc_bundle >= ATH10K_MIN_MSG_PER_HTC_TX_BUNDLE &&
 869	    !ep->bundle_tx) {
 870		ep->bundle_tx = true;
 871		skb_queue_head_init(&ep->tx_req_head);
 872		skb_queue_head_init(&ep->tx_complete_head);
 873	}
 874}
 875
 876void ath10k_htc_stop_hl(struct ath10k *ar)
 877{
 878	struct ath10k_htc_ep *ep;
 879	int i;
 880
 881	cancel_work_sync(&ar->bundle_tx_work);
 882	cancel_work_sync(&ar->tx_complete_work);
 883
 884	for (i = 0; i < ARRAY_SIZE(ar->htc.endpoint); i++) {
 885		ep = &ar->htc.endpoint[i];
 886
 887		if (!ep->bundle_tx)
 888			continue;
 889
 890		ath10k_dbg(ar, ATH10K_DBG_HTC, "stop tx work eid %d count %d\n",
 891			   ep->eid, skb_queue_len(&ep->tx_req_head));
 892
 893		skb_queue_purge(&ep->tx_req_head);
 894	}
 895}
 896
 897int ath10k_htc_wait_target(struct ath10k_htc *htc)
 898{
 899	struct ath10k *ar = htc->ar;
 900	int i, status = 0;
 901	unsigned long time_left;
 902	struct ath10k_htc_msg *msg;
 903	u16 message_id;
 904
 905	time_left = wait_for_completion_timeout(&htc->ctl_resp,
 906						ATH10K_HTC_WAIT_TIMEOUT_HZ);
 907	if (!time_left) {
 908		/* Workaround: In some cases the PCI HIF doesn't
 909		 * receive interrupt for the control response message
 910		 * even if the buffer was completed. It is suspected
 911		 * iomap writes unmasking PCI CE irqs aren't propagated
 912		 * properly in KVM PCI-passthrough sometimes.
 913		 */
 914		ath10k_warn(ar, "failed to receive control response completion, polling..\n");
 915
 916		for (i = 0; i < CE_COUNT; i++)
 917			ath10k_hif_send_complete_check(htc->ar, i, 1);
 918
 919		time_left =
 920		wait_for_completion_timeout(&htc->ctl_resp,
 921					    ATH10K_HTC_WAIT_TIMEOUT_HZ);
 922
 923		if (!time_left)
 924			status = -ETIMEDOUT;
 925	}
 926
 927	if (status < 0) {
 928		ath10k_err(ar, "ctl_resp never came in (%d)\n", status);
 929		return status;
 930	}
 931
 932	if (htc->control_resp_len < sizeof(msg->hdr) + sizeof(msg->ready)) {
 933		ath10k_err(ar, "Invalid HTC ready msg len:%d\n",
 934			   htc->control_resp_len);
 935		return -ECOMM;
 936	}
 937
 938	msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
 939	message_id   = __le16_to_cpu(msg->hdr.message_id);
 940
 941	if (message_id != ATH10K_HTC_MSG_READY_ID) {
 942		ath10k_err(ar, "Invalid HTC ready msg: 0x%x\n", message_id);
 943		return -ECOMM;
 944	}
 945
 946	htc->total_transmit_credits = __le16_to_cpu(msg->ready.credit_count);
 
 
 
 
 947	htc->target_credit_size = __le16_to_cpu(msg->ready.credit_size);
 948
 949	ath10k_dbg(ar, ATH10K_DBG_HTC,
 950		   "Target ready! transmit resources: %d size:%d\n",
 951		   htc->total_transmit_credits,
 952		   htc->target_credit_size);
 
 953
 954	if ((htc->total_transmit_credits == 0) ||
 955	    (htc->target_credit_size == 0)) {
 956		ath10k_err(ar, "Invalid credit size received\n");
 957		return -ECOMM;
 958	}
 959
 960	/* The only way to determine if the ready message is an extended
 961	 * message is from the size.
 962	 */
 963	if (htc->control_resp_len >=
 964	    sizeof(msg->hdr) + sizeof(msg->ready_ext)) {
 965		htc->alt_data_credit_size =
 966			__le16_to_cpu(msg->ready_ext.reserved) &
 967			ATH10K_HTC_MSG_READY_EXT_ALT_DATA_MASK;
 968		htc->max_msgs_per_htc_bundle =
 969			min_t(u8, msg->ready_ext.max_msgs_per_htc_bundle,
 970			      HTC_HOST_MAX_MSG_PER_RX_BUNDLE);
 971		ath10k_dbg(ar, ATH10K_DBG_HTC,
 972			   "Extended ready message RX bundle size %d alt size %d\n",
 973			   htc->max_msgs_per_htc_bundle,
 974			   htc->alt_data_credit_size);
 975	}
 976
 977	INIT_WORK(&ar->bundle_tx_work, ath10k_htc_bundle_tx_work);
 978	INIT_WORK(&ar->tx_complete_work, ath10k_htc_tx_complete_work);
 979
 980	return 0;
 981}
 982
 983void ath10k_htc_change_tx_credit_flow(struct ath10k_htc *htc,
 984				      enum ath10k_htc_ep_id eid,
 985				      bool enable)
 986{
 987	struct ath10k *ar = htc->ar;
 988	struct ath10k_htc_ep *ep = &ar->htc.endpoint[eid];
 989
 990	ep->tx_credit_flow_enabled = enable;
 991}
 992
 993int ath10k_htc_connect_service(struct ath10k_htc *htc,
 994			       struct ath10k_htc_svc_conn_req *conn_req,
 995			       struct ath10k_htc_svc_conn_resp *conn_resp)
 996{
 997	struct ath10k *ar = htc->ar;
 998	struct ath10k_htc_msg *msg;
 999	struct ath10k_htc_conn_svc *req_msg;
1000	struct ath10k_htc_conn_svc_response resp_msg_dummy;
1001	struct ath10k_htc_conn_svc_response *resp_msg = &resp_msg_dummy;
1002	enum ath10k_htc_ep_id assigned_eid = ATH10K_HTC_EP_COUNT;
1003	struct ath10k_htc_ep *ep;
1004	struct sk_buff *skb;
1005	unsigned int max_msg_size = 0;
1006	int length, status;
1007	unsigned long time_left;
1008	bool disable_credit_flow_ctrl = false;
1009	u16 message_id, service_id, flags = 0;
1010	u8 tx_alloc = 0;
1011
1012	/* special case for HTC pseudo control service */
1013	if (conn_req->service_id == ATH10K_HTC_SVC_ID_RSVD_CTRL) {
1014		disable_credit_flow_ctrl = true;
1015		assigned_eid = ATH10K_HTC_EP_0;
1016		max_msg_size = ATH10K_HTC_MAX_CTRL_MSG_LEN;
1017		memset(&resp_msg_dummy, 0, sizeof(resp_msg_dummy));
1018		goto setup;
1019	}
1020
1021	tx_alloc = ath10k_htc_get_credit_allocation(htc,
1022						    conn_req->service_id);
1023	if (!tx_alloc)
1024		ath10k_dbg(ar, ATH10K_DBG_BOOT,
1025			   "boot htc service %s does not allocate target credits\n",
1026			   htc_service_name(conn_req->service_id));
1027
1028	skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
1029	if (!skb) {
1030		ath10k_err(ar, "Failed to allocate HTC packet\n");
1031		return -ENOMEM;
1032	}
1033
1034	length = sizeof(msg->hdr) + sizeof(msg->connect_service);
1035	skb_put(skb, length);
1036	memset(skb->data, 0, length);
1037
1038	msg = (struct ath10k_htc_msg *)skb->data;
1039	msg->hdr.message_id =
1040		__cpu_to_le16(ATH10K_HTC_MSG_CONNECT_SERVICE_ID);
1041
1042	flags |= SM(tx_alloc, ATH10K_HTC_CONN_FLAGS_RECV_ALLOC);
1043
1044	/* Only enable credit flow control for WMI ctrl service */
1045	if (conn_req->service_id != ATH10K_HTC_SVC_ID_WMI_CONTROL) {
1046		flags |= ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
1047		disable_credit_flow_ctrl = true;
1048	}
1049
1050	req_msg = &msg->connect_service;
1051	req_msg->flags = __cpu_to_le16(flags);
1052	req_msg->service_id = __cpu_to_le16(conn_req->service_id);
1053
1054	reinit_completion(&htc->ctl_resp);
1055
1056	status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
1057	if (status) {
1058		kfree_skb(skb);
1059		return status;
1060	}
1061
1062	/* wait for response */
1063	time_left = wait_for_completion_timeout(&htc->ctl_resp,
1064						ATH10K_HTC_CONN_SVC_TIMEOUT_HZ);
1065	if (!time_left) {
1066		ath10k_err(ar, "Service connect timeout\n");
1067		return -ETIMEDOUT;
1068	}
1069
1070	/* we controlled the buffer creation, it's aligned */
1071	msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
1072	resp_msg = &msg->connect_service_response;
1073	message_id = __le16_to_cpu(msg->hdr.message_id);
1074	service_id = __le16_to_cpu(resp_msg->service_id);
1075
1076	if ((message_id != ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID) ||
1077	    (htc->control_resp_len < sizeof(msg->hdr) +
1078	     sizeof(msg->connect_service_response))) {
1079		ath10k_err(ar, "Invalid resp message ID 0x%x", message_id);
1080		return -EPROTO;
1081	}
1082
1083	ath10k_dbg(ar, ATH10K_DBG_HTC,
1084		   "HTC Service %s connect response: status: 0x%x, assigned ep: 0x%x\n",
1085		   htc_service_name(service_id),
1086		   resp_msg->status, resp_msg->eid);
1087
1088	conn_resp->connect_resp_code = resp_msg->status;
1089
1090	/* check response status */
1091	if (resp_msg->status != ATH10K_HTC_CONN_SVC_STATUS_SUCCESS) {
1092		ath10k_err(ar, "HTC Service %s connect request failed: 0x%x)\n",
1093			   htc_service_name(service_id),
1094			   resp_msg->status);
1095		return -EPROTO;
1096	}
1097
1098	assigned_eid = (enum ath10k_htc_ep_id)resp_msg->eid;
1099	max_msg_size = __le16_to_cpu(resp_msg->max_msg_size);
1100
1101setup:
1102
1103	if (assigned_eid >= ATH10K_HTC_EP_COUNT)
1104		return -EPROTO;
1105
1106	if (max_msg_size == 0)
1107		return -EPROTO;
1108
1109	ep = &htc->endpoint[assigned_eid];
1110	ep->eid = assigned_eid;
1111
1112	if (ep->service_id != ATH10K_HTC_SVC_ID_UNUSED)
1113		return -EPROTO;
1114
1115	/* return assigned endpoint to caller */
1116	conn_resp->eid = assigned_eid;
1117	conn_resp->max_msg_len = __le16_to_cpu(resp_msg->max_msg_size);
1118
1119	/* setup the endpoint */
1120	ep->service_id = conn_req->service_id;
1121	ep->max_tx_queue_depth = conn_req->max_send_queue_depth;
1122	ep->max_ep_message_len = __le16_to_cpu(resp_msg->max_msg_size);
1123	ep->tx_credits = tx_alloc;
1124	ep->tx_credit_size = htc->target_credit_size;
1125
1126	if (conn_req->service_id == ATH10K_HTC_SVC_ID_HTT_DATA_MSG &&
1127	    htc->alt_data_credit_size != 0)
1128		ep->tx_credit_size = htc->alt_data_credit_size;
1129
1130	/* copy all the callbacks */
1131	ep->ep_ops = conn_req->ep_ops;
1132
1133	status = ath10k_hif_map_service_to_pipe(htc->ar,
1134						ep->service_id,
1135						&ep->ul_pipe_id,
1136						&ep->dl_pipe_id);
1137	if (status) {
1138		ath10k_dbg(ar, ATH10K_DBG_BOOT, "unsupported HTC service id: %d\n",
1139			   ep->service_id);
1140		return status;
1141	}
1142
1143	ath10k_dbg(ar, ATH10K_DBG_BOOT,
1144		   "boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
1145		   htc_service_name(ep->service_id), ep->ul_pipe_id,
1146		   ep->dl_pipe_id, ep->eid);
1147
1148	if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
1149		ep->tx_credit_flow_enabled = false;
1150		ath10k_dbg(ar, ATH10K_DBG_BOOT,
1151			   "boot htc service '%s' eid %d TX flow control disabled\n",
1152			   htc_service_name(ep->service_id), assigned_eid);
1153	}
1154
1155	return status;
1156}
1157
1158struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size)
1159{
1160	struct sk_buff *skb;
1161
1162	skb = dev_alloc_skb(size + sizeof(struct ath10k_htc_hdr));
1163	if (!skb)
1164		return NULL;
1165
1166	skb_reserve(skb, sizeof(struct ath10k_htc_hdr));
1167
1168	/* FW/HTC requires 4-byte aligned streams */
1169	if (!IS_ALIGNED((unsigned long)skb->data, 4))
1170		ath10k_warn(ar, "Unaligned HTC tx skb\n");
1171
1172	return skb;
1173}
1174
1175static void ath10k_htc_pktlog_process_rx(struct ath10k *ar, struct sk_buff *skb)
1176{
1177	trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
1178	dev_kfree_skb_any(skb);
1179}
1180
1181static int ath10k_htc_pktlog_connect(struct ath10k *ar)
1182{
1183	struct ath10k_htc_svc_conn_resp conn_resp;
1184	struct ath10k_htc_svc_conn_req conn_req;
1185	int status;
1186
1187	memset(&conn_req, 0, sizeof(conn_req));
1188	memset(&conn_resp, 0, sizeof(conn_resp));
1189
1190	conn_req.ep_ops.ep_tx_complete = NULL;
1191	conn_req.ep_ops.ep_rx_complete = ath10k_htc_pktlog_process_rx;
1192	conn_req.ep_ops.ep_tx_credits = NULL;
1193
1194	/* connect to control service */
1195	conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_LOG_MSG;
1196	status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp);
1197	if (status) {
1198		ath10k_warn(ar, "failed to connect to PKTLOG service: %d\n",
1199			    status);
1200		return status;
1201	}
1202
1203	return 0;
1204}
1205
1206static bool ath10k_htc_pktlog_svc_supported(struct ath10k *ar)
1207{
1208	u8 ul_pipe_id;
1209	u8 dl_pipe_id;
1210	int status;
1211
1212	status = ath10k_hif_map_service_to_pipe(ar, ATH10K_HTC_SVC_ID_HTT_LOG_MSG,
1213						&ul_pipe_id,
1214						&dl_pipe_id);
1215	if (status) {
1216		ath10k_dbg(ar, ATH10K_DBG_BOOT, "unsupported HTC pktlog service id: %d\n",
1217			   ATH10K_HTC_SVC_ID_HTT_LOG_MSG);
1218
1219		return false;
1220	}
1221
1222	return true;
1223}
1224
1225int ath10k_htc_start(struct ath10k_htc *htc)
1226{
1227	struct ath10k *ar = htc->ar;
1228	struct sk_buff *skb;
1229	int status = 0;
1230	struct ath10k_htc_msg *msg;
1231
1232	skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
1233	if (!skb)
1234		return -ENOMEM;
1235
1236	skb_put(skb, sizeof(msg->hdr) + sizeof(msg->setup_complete_ext));
1237	memset(skb->data, 0, skb->len);
1238
1239	msg = (struct ath10k_htc_msg *)skb->data;
1240	msg->hdr.message_id =
1241		__cpu_to_le16(ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID);
1242
1243	if (ar->hif.bus == ATH10K_BUS_SDIO) {
1244		/* Extra setup params used by SDIO */
1245		msg->setup_complete_ext.flags =
1246			__cpu_to_le32(ATH10K_HTC_SETUP_COMPLETE_FLAGS_RX_BNDL_EN);
1247		msg->setup_complete_ext.max_msgs_per_bundled_recv =
1248			htc->max_msgs_per_htc_bundle;
1249	}
1250	ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC is using TX credit flow control\n");
1251
1252	status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
1253	if (status) {
1254		kfree_skb(skb);
1255		return status;
1256	}
1257
1258	if (ath10k_htc_pktlog_svc_supported(ar)) {
1259		status = ath10k_htc_pktlog_connect(ar);
1260		if (status) {
1261			ath10k_err(ar, "failed to connect to pktlog: %d\n", status);
1262			return status;
1263		}
1264	}
1265
1266	return 0;
1267}
1268
1269/* registered target arrival callback from the HIF layer */
1270int ath10k_htc_init(struct ath10k *ar)
1271{
1272	int status;
1273	struct ath10k_htc *htc = &ar->htc;
1274	struct ath10k_htc_svc_conn_req conn_req;
1275	struct ath10k_htc_svc_conn_resp conn_resp;
1276
1277	spin_lock_init(&htc->tx_lock);
1278
1279	ath10k_htc_reset_endpoint_states(htc);
1280
1281	htc->ar = ar;
1282
1283	/* setup our pseudo HTC control endpoint connection */
1284	memset(&conn_req, 0, sizeof(conn_req));
1285	memset(&conn_resp, 0, sizeof(conn_resp));
1286	conn_req.ep_ops.ep_tx_complete = ath10k_htc_control_tx_complete;
1287	conn_req.ep_ops.ep_rx_complete = ath10k_htc_control_rx_complete;
1288	conn_req.max_send_queue_depth = ATH10K_NUM_CONTROL_TX_BUFFERS;
1289	conn_req.service_id = ATH10K_HTC_SVC_ID_RSVD_CTRL;
1290
1291	/* connect fake service */
1292	status = ath10k_htc_connect_service(htc, &conn_req, &conn_resp);
1293	if (status) {
1294		ath10k_err(ar, "could not connect to htc service (%d)\n",
1295			   status);
1296		return status;
1297	}
1298
1299	init_completion(&htc->ctl_resp);
1300
1301	return 0;
1302}