Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Greybus operations
   4 *
   5 * Copyright 2014-2015 Google Inc.
   6 * Copyright 2014-2015 Linaro Ltd.
   7 */
   8
   9#include <linux/kernel.h>
  10#include <linux/slab.h>
  11#include <linux/module.h>
  12#include <linux/sched.h>
  13#include <linux/wait.h>
  14#include <linux/workqueue.h>
  15#include <linux/greybus.h>
  16
  17#include "greybus_trace.h"
  18
  19static struct kmem_cache *gb_operation_cache;
  20static struct kmem_cache *gb_message_cache;
  21
  22/* Workqueue to handle Greybus operation completions. */
  23static struct workqueue_struct *gb_operation_completion_wq;
  24
  25/* Wait queue for synchronous cancellations. */
  26static DECLARE_WAIT_QUEUE_HEAD(gb_operation_cancellation_queue);
  27
  28/*
  29 * Protects updates to operation->errno.
  30 */
  31static DEFINE_SPINLOCK(gb_operations_lock);
  32
  33static int gb_operation_response_send(struct gb_operation *operation,
  34				      int errno);
  35
  36/*
  37 * Increment operation active count and add to connection list unless the
  38 * connection is going away.
  39 *
  40 * Caller holds operation reference.
  41 */
  42static int gb_operation_get_active(struct gb_operation *operation)
  43{
  44	struct gb_connection *connection = operation->connection;
  45	unsigned long flags;
  46
  47	spin_lock_irqsave(&connection->lock, flags);
  48	switch (connection->state) {
  49	case GB_CONNECTION_STATE_ENABLED:
  50		break;
  51	case GB_CONNECTION_STATE_ENABLED_TX:
  52		if (gb_operation_is_incoming(operation))
  53			goto err_unlock;
  54		break;
  55	case GB_CONNECTION_STATE_DISCONNECTING:
  56		if (!gb_operation_is_core(operation))
  57			goto err_unlock;
  58		break;
  59	default:
  60		goto err_unlock;
  61	}
  62
  63	if (operation->active++ == 0)
  64		list_add_tail(&operation->links, &connection->operations);
  65
  66	trace_gb_operation_get_active(operation);
  67
  68	spin_unlock_irqrestore(&connection->lock, flags);
  69
  70	return 0;
  71
  72err_unlock:
  73	spin_unlock_irqrestore(&connection->lock, flags);
  74
  75	return -ENOTCONN;
  76}
  77
  78/* Caller holds operation reference. */
  79static void gb_operation_put_active(struct gb_operation *operation)
  80{
  81	struct gb_connection *connection = operation->connection;
  82	unsigned long flags;
  83
  84	spin_lock_irqsave(&connection->lock, flags);
  85
  86	trace_gb_operation_put_active(operation);
  87
  88	if (--operation->active == 0) {
  89		list_del(&operation->links);
  90		if (atomic_read(&operation->waiters))
  91			wake_up(&gb_operation_cancellation_queue);
  92	}
  93	spin_unlock_irqrestore(&connection->lock, flags);
  94}
  95
  96static bool gb_operation_is_active(struct gb_operation *operation)
  97{
  98	struct gb_connection *connection = operation->connection;
  99	unsigned long flags;
 100	bool ret;
 101
 102	spin_lock_irqsave(&connection->lock, flags);
 103	ret = operation->active;
 104	spin_unlock_irqrestore(&connection->lock, flags);
 105
 106	return ret;
 107}
 108
 109/*
 110 * Set an operation's result.
 111 *
 112 * Initially an outgoing operation's errno value is -EBADR.
 113 * If no error occurs before sending the request message the only
 114 * valid value operation->errno can be set to is -EINPROGRESS,
 115 * indicating the request has been (or rather is about to be) sent.
 116 * At that point nobody should be looking at the result until the
 117 * response arrives.
 118 *
 119 * The first time the result gets set after the request has been
 120 * sent, that result "sticks."  That is, if two concurrent threads
 121 * race to set the result, the first one wins.  The return value
 122 * tells the caller whether its result was recorded; if not the
 123 * caller has nothing more to do.
 124 *
 125 * The result value -EILSEQ is reserved to signal an implementation
 126 * error; if it's ever observed, the code performing the request has
 127 * done something fundamentally wrong.  It is an error to try to set
 128 * the result to -EBADR, and attempts to do so result in a warning,
 129 * and -EILSEQ is used instead.  Similarly, the only valid result
 130 * value to set for an operation in initial state is -EINPROGRESS.
 131 * Attempts to do otherwise will also record a (successful) -EILSEQ
 132 * operation result.
 133 */
 134static bool gb_operation_result_set(struct gb_operation *operation, int result)
 135{
 136	unsigned long flags;
 137	int prev;
 138
 139	if (result == -EINPROGRESS) {
 140		/*
 141		 * -EINPROGRESS is used to indicate the request is
 142		 * in flight.  It should be the first result value
 143		 * set after the initial -EBADR.  Issue a warning
 144		 * and record an implementation error if it's
 145		 * set at any other time.
 146		 */
 147		spin_lock_irqsave(&gb_operations_lock, flags);
 148		prev = operation->errno;
 149		if (prev == -EBADR)
 150			operation->errno = result;
 151		else
 152			operation->errno = -EILSEQ;
 153		spin_unlock_irqrestore(&gb_operations_lock, flags);
 154		WARN_ON(prev != -EBADR);
 155
 156		return true;
 157	}
 158
 159	/*
 160	 * The first result value set after a request has been sent
 161	 * will be the final result of the operation.  Subsequent
 162	 * attempts to set the result are ignored.
 163	 *
 164	 * Note that -EBADR is a reserved "initial state" result
 165	 * value.  Attempts to set this value result in a warning,
 166	 * and the result code is set to -EILSEQ instead.
 167	 */
 168	if (WARN_ON(result == -EBADR))
 169		result = -EILSEQ; /* Nobody should be setting -EBADR */
 170
 171	spin_lock_irqsave(&gb_operations_lock, flags);
 172	prev = operation->errno;
 173	if (prev == -EINPROGRESS)
 174		operation->errno = result;	/* First and final result */
 175	spin_unlock_irqrestore(&gb_operations_lock, flags);
 176
 177	return prev == -EINPROGRESS;
 178}
 179
 180int gb_operation_result(struct gb_operation *operation)
 181{
 182	int result = operation->errno;
 183
 184	WARN_ON(result == -EBADR);
 185	WARN_ON(result == -EINPROGRESS);
 186
 187	return result;
 188}
 189EXPORT_SYMBOL_GPL(gb_operation_result);
 190
 191/*
 192 * Looks up an outgoing operation on a connection and returns a refcounted
 193 * pointer if found, or NULL otherwise.
 194 */
 195static struct gb_operation *
 196gb_operation_find_outgoing(struct gb_connection *connection, u16 operation_id)
 197{
 198	struct gb_operation *operation;
 199	unsigned long flags;
 200	bool found = false;
 201
 202	spin_lock_irqsave(&connection->lock, flags);
 203	list_for_each_entry(operation, &connection->operations, links)
 204		if (operation->id == operation_id &&
 205		    !gb_operation_is_incoming(operation)) {
 206			gb_operation_get(operation);
 207			found = true;
 208			break;
 209		}
 210	spin_unlock_irqrestore(&connection->lock, flags);
 211
 212	return found ? operation : NULL;
 213}
 214
 215static int gb_message_send(struct gb_message *message, gfp_t gfp)
 216{
 217	struct gb_connection *connection = message->operation->connection;
 218
 219	trace_gb_message_send(message);
 220	return connection->hd->driver->message_send(connection->hd,
 221					connection->hd_cport_id,
 222					message,
 223					gfp);
 224}
 225
 226/*
 227 * Cancel a message we have passed to the host device layer to be sent.
 228 */
 229static void gb_message_cancel(struct gb_message *message)
 230{
 231	struct gb_host_device *hd = message->operation->connection->hd;
 232
 233	hd->driver->message_cancel(message);
 234}
 235
 236static void gb_operation_request_handle(struct gb_operation *operation)
 237{
 238	struct gb_connection *connection = operation->connection;
 239	int status;
 240	int ret;
 241
 242	if (connection->handler) {
 243		status = connection->handler(operation);
 244	} else {
 245		dev_err(&connection->hd->dev,
 246			"%s: unexpected incoming request of type 0x%02x\n",
 247			connection->name, operation->type);
 248
 249		status = -EPROTONOSUPPORT;
 250	}
 251
 252	ret = gb_operation_response_send(operation, status);
 253	if (ret) {
 254		dev_err(&connection->hd->dev,
 255			"%s: failed to send response %d for type 0x%02x: %d\n",
 256			connection->name, status, operation->type, ret);
 257		return;
 258	}
 259}
 260
 261/*
 262 * Process operation work.
 263 *
 264 * For incoming requests, call the protocol request handler. The operation
 265 * result should be -EINPROGRESS at this point.
 266 *
 267 * For outgoing requests, the operation result value should have
 268 * been set before queueing this.  The operation callback function
 269 * allows the original requester to know the request has completed
 270 * and its result is available.
 271 */
 272static void gb_operation_work(struct work_struct *work)
 273{
 274	struct gb_operation *operation;
 275	int ret;
 276
 277	operation = container_of(work, struct gb_operation, work);
 278
 279	if (gb_operation_is_incoming(operation)) {
 280		gb_operation_request_handle(operation);
 281	} else {
 282		ret = del_timer_sync(&operation->timer);
 283		if (!ret) {
 284			/* Cancel request message if scheduled by timeout. */
 285			if (gb_operation_result(operation) == -ETIMEDOUT)
 286				gb_message_cancel(operation->request);
 287		}
 288
 289		operation->callback(operation);
 290	}
 291
 292	gb_operation_put_active(operation);
 293	gb_operation_put(operation);
 294}
 295
 296static void gb_operation_timeout(struct timer_list *t)
 297{
 298	struct gb_operation *operation = from_timer(operation, t, timer);
 299
 300	if (gb_operation_result_set(operation, -ETIMEDOUT)) {
 301		/*
 302		 * A stuck request message will be cancelled from the
 303		 * workqueue.
 304		 */
 305		queue_work(gb_operation_completion_wq, &operation->work);
 306	}
 307}
 308
 309static void gb_operation_message_init(struct gb_host_device *hd,
 310				      struct gb_message *message,
 311				      u16 operation_id,
 312				      size_t payload_size, u8 type)
 313{
 314	struct gb_operation_msg_hdr *header;
 315
 316	header = message->buffer;
 317
 318	message->header = header;
 319	message->payload = payload_size ? header + 1 : NULL;
 320	message->payload_size = payload_size;
 321
 322	/*
 323	 * The type supplied for incoming message buffers will be
 324	 * GB_REQUEST_TYPE_INVALID. Such buffers will be overwritten by
 325	 * arriving data so there's no need to initialize the message header.
 326	 */
 327	if (type != GB_REQUEST_TYPE_INVALID) {
 328		u16 message_size = (u16)(sizeof(*header) + payload_size);
 329
 330		/*
 331		 * For a request, the operation id gets filled in
 332		 * when the message is sent.  For a response, it
 333		 * will be copied from the request by the caller.
 334		 *
 335		 * The result field in a request message must be
 336		 * zero.  It will be set just prior to sending for
 337		 * a response.
 338		 */
 339		header->size = cpu_to_le16(message_size);
 340		header->operation_id = 0;
 341		header->type = type;
 342		header->result = 0;
 343	}
 344}
 345
 346/*
 347 * Allocate a message to be used for an operation request or response.
 348 * Both types of message contain a common header.  The request message
 349 * for an outgoing operation is outbound, as is the response message
 350 * for an incoming operation.  The message header for an outbound
 351 * message is partially initialized here.
 352 *
 353 * The headers for inbound messages don't need to be initialized;
 354 * they'll be filled in by arriving data.
 355 *
 356 * Our message buffers have the following layout:
 357 *	message header  \_ these combined are
 358 *	message payload /  the message size
 359 */
 360static struct gb_message *
 361gb_operation_message_alloc(struct gb_host_device *hd, u8 type,
 362			   size_t payload_size, gfp_t gfp_flags)
 363{
 364	struct gb_message *message;
 365	struct gb_operation_msg_hdr *header;
 366	size_t message_size = payload_size + sizeof(*header);
 367
 368	if (message_size > hd->buffer_size_max) {
 369		dev_warn(&hd->dev, "requested message size too big (%zu > %zu)\n",
 370			 message_size, hd->buffer_size_max);
 371		return NULL;
 372	}
 373
 374	/* Allocate the message structure and buffer. */
 375	message = kmem_cache_zalloc(gb_message_cache, gfp_flags);
 376	if (!message)
 377		return NULL;
 378
 379	message->buffer = kzalloc(message_size, gfp_flags);
 380	if (!message->buffer)
 381		goto err_free_message;
 382
 383	/* Initialize the message.  Operation id is filled in later. */
 384	gb_operation_message_init(hd, message, 0, payload_size, type);
 385
 386	return message;
 387
 388err_free_message:
 389	kmem_cache_free(gb_message_cache, message);
 390
 391	return NULL;
 392}
 393
 394static void gb_operation_message_free(struct gb_message *message)
 395{
 396	kfree(message->buffer);
 397	kmem_cache_free(gb_message_cache, message);
 398}
 399
 400/*
 401 * Map an enum gb_operation_status value (which is represented in a
 402 * message as a single byte) to an appropriate Linux negative errno.
 403 */
 404static int gb_operation_status_map(u8 status)
 405{
 406	switch (status) {
 407	case GB_OP_SUCCESS:
 408		return 0;
 409	case GB_OP_INTERRUPTED:
 410		return -EINTR;
 411	case GB_OP_TIMEOUT:
 412		return -ETIMEDOUT;
 413	case GB_OP_NO_MEMORY:
 414		return -ENOMEM;
 415	case GB_OP_PROTOCOL_BAD:
 416		return -EPROTONOSUPPORT;
 417	case GB_OP_OVERFLOW:
 418		return -EMSGSIZE;
 419	case GB_OP_INVALID:
 420		return -EINVAL;
 421	case GB_OP_RETRY:
 422		return -EAGAIN;
 423	case GB_OP_NONEXISTENT:
 424		return -ENODEV;
 425	case GB_OP_MALFUNCTION:
 426		return -EILSEQ;
 427	case GB_OP_UNKNOWN_ERROR:
 428	default:
 429		return -EIO;
 430	}
 431}
 432
 433/*
 434 * Map a Linux errno value (from operation->errno) into the value
 435 * that should represent it in a response message status sent
 436 * over the wire.  Returns an enum gb_operation_status value (which
 437 * is represented in a message as a single byte).
 438 */
 439static u8 gb_operation_errno_map(int errno)
 440{
 441	switch (errno) {
 442	case 0:
 443		return GB_OP_SUCCESS;
 444	case -EINTR:
 445		return GB_OP_INTERRUPTED;
 446	case -ETIMEDOUT:
 447		return GB_OP_TIMEOUT;
 448	case -ENOMEM:
 449		return GB_OP_NO_MEMORY;
 450	case -EPROTONOSUPPORT:
 451		return GB_OP_PROTOCOL_BAD;
 452	case -EMSGSIZE:
 453		return GB_OP_OVERFLOW;	/* Could be underflow too */
 454	case -EINVAL:
 455		return GB_OP_INVALID;
 456	case -EAGAIN:
 457		return GB_OP_RETRY;
 458	case -EILSEQ:
 459		return GB_OP_MALFUNCTION;
 460	case -ENODEV:
 461		return GB_OP_NONEXISTENT;
 462	case -EIO:
 463	default:
 464		return GB_OP_UNKNOWN_ERROR;
 465	}
 466}
 467
 468bool gb_operation_response_alloc(struct gb_operation *operation,
 469				 size_t response_size, gfp_t gfp)
 470{
 471	struct gb_host_device *hd = operation->connection->hd;
 472	struct gb_operation_msg_hdr *request_header;
 473	struct gb_message *response;
 474	u8 type;
 475
 476	type = operation->type | GB_MESSAGE_TYPE_RESPONSE;
 477	response = gb_operation_message_alloc(hd, type, response_size, gfp);
 478	if (!response)
 479		return false;
 480	response->operation = operation;
 481
 482	/*
 483	 * Size and type get initialized when the message is
 484	 * allocated.  The errno will be set before sending.  All
 485	 * that's left is the operation id, which we copy from the
 486	 * request message header (as-is, in little-endian order).
 487	 */
 488	request_header = operation->request->header;
 489	response->header->operation_id = request_header->operation_id;
 490	operation->response = response;
 491
 492	return true;
 493}
 494EXPORT_SYMBOL_GPL(gb_operation_response_alloc);
 495
 496/*
 497 * Create a Greybus operation to be sent over the given connection.
 498 * The request buffer will be big enough for a payload of the given
 499 * size.
 500 *
 501 * For outgoing requests, the request message's header will be
 502 * initialized with the type of the request and the message size.
 503 * Outgoing operations must also specify the response buffer size,
 504 * which must be sufficient to hold all expected response data.  The
 505 * response message header will eventually be overwritten, so there's
 506 * no need to initialize it here.
 507 *
 508 * Request messages for incoming operations can arrive in interrupt
 509 * context, so they must be allocated with GFP_ATOMIC.  In this case
 510 * the request buffer will be immediately overwritten, so there is
 511 * no need to initialize the message header.  Responsibility for
 512 * allocating a response buffer lies with the incoming request
 513 * handler for a protocol.  So we don't allocate that here.
 514 *
 515 * Returns a pointer to the new operation or a null pointer if an
 516 * error occurs.
 517 */
 518static struct gb_operation *
 519gb_operation_create_common(struct gb_connection *connection, u8 type,
 520			   size_t request_size, size_t response_size,
 521			   unsigned long op_flags, gfp_t gfp_flags)
 522{
 523	struct gb_host_device *hd = connection->hd;
 524	struct gb_operation *operation;
 525
 526	operation = kmem_cache_zalloc(gb_operation_cache, gfp_flags);
 527	if (!operation)
 528		return NULL;
 529	operation->connection = connection;
 530
 531	operation->request = gb_operation_message_alloc(hd, type, request_size,
 532							gfp_flags);
 533	if (!operation->request)
 534		goto err_cache;
 535	operation->request->operation = operation;
 536
 537	/* Allocate the response buffer for outgoing operations */
 538	if (!(op_flags & GB_OPERATION_FLAG_INCOMING)) {
 539		if (!gb_operation_response_alloc(operation, response_size,
 540						 gfp_flags)) {
 541			goto err_request;
 542		}
 543
 544		timer_setup(&operation->timer, gb_operation_timeout, 0);
 545	}
 546
 547	operation->flags = op_flags;
 548	operation->type = type;
 549	operation->errno = -EBADR;  /* Initial value--means "never set" */
 550
 551	INIT_WORK(&operation->work, gb_operation_work);
 552	init_completion(&operation->completion);
 553	kref_init(&operation->kref);
 554	atomic_set(&operation->waiters, 0);
 555
 556	return operation;
 557
 558err_request:
 559	gb_operation_message_free(operation->request);
 560err_cache:
 561	kmem_cache_free(gb_operation_cache, operation);
 562
 563	return NULL;
 564}
 565
 566/*
 567 * Create a new operation associated with the given connection.  The
 568 * request and response sizes provided are the number of bytes
 569 * required to hold the request/response payload only.  Both of
 570 * these are allowed to be 0.  Note that 0x00 is reserved as an
 571 * invalid operation type for all protocols, and this is enforced
 572 * here.
 573 */
 574struct gb_operation *
 575gb_operation_create_flags(struct gb_connection *connection,
 576			  u8 type, size_t request_size,
 577			  size_t response_size, unsigned long flags,
 578			  gfp_t gfp)
 579{
 580	struct gb_operation *operation;
 581
 582	if (WARN_ON_ONCE(type == GB_REQUEST_TYPE_INVALID))
 583		return NULL;
 584	if (WARN_ON_ONCE(type & GB_MESSAGE_TYPE_RESPONSE))
 585		type &= ~GB_MESSAGE_TYPE_RESPONSE;
 586
 587	if (WARN_ON_ONCE(flags & ~GB_OPERATION_FLAG_USER_MASK))
 588		flags &= GB_OPERATION_FLAG_USER_MASK;
 589
 590	operation = gb_operation_create_common(connection, type,
 591					       request_size, response_size,
 592					       flags, gfp);
 593	if (operation)
 594		trace_gb_operation_create(operation);
 595
 596	return operation;
 597}
 598EXPORT_SYMBOL_GPL(gb_operation_create_flags);
 599
 600struct gb_operation *
 601gb_operation_create_core(struct gb_connection *connection,
 602			 u8 type, size_t request_size,
 603			 size_t response_size, unsigned long flags,
 604			 gfp_t gfp)
 605{
 606	struct gb_operation *operation;
 607
 608	flags |= GB_OPERATION_FLAG_CORE;
 609
 610	operation = gb_operation_create_common(connection, type,
 611					       request_size, response_size,
 612					       flags, gfp);
 613	if (operation)
 614		trace_gb_operation_create_core(operation);
 615
 616	return operation;
 617}
 618
 619/* Do not export this function. */
 620
 621size_t gb_operation_get_payload_size_max(struct gb_connection *connection)
 622{
 623	struct gb_host_device *hd = connection->hd;
 624
 625	return hd->buffer_size_max - sizeof(struct gb_operation_msg_hdr);
 626}
 627EXPORT_SYMBOL_GPL(gb_operation_get_payload_size_max);
 628
 629static struct gb_operation *
 630gb_operation_create_incoming(struct gb_connection *connection, u16 id,
 631			     u8 type, void *data, size_t size)
 632{
 633	struct gb_operation *operation;
 634	size_t request_size;
 635	unsigned long flags = GB_OPERATION_FLAG_INCOMING;
 636
 637	/* Caller has made sure we at least have a message header. */
 638	request_size = size - sizeof(struct gb_operation_msg_hdr);
 639
 640	if (!id)
 641		flags |= GB_OPERATION_FLAG_UNIDIRECTIONAL;
 642
 643	operation = gb_operation_create_common(connection, type,
 644					       request_size,
 645					       GB_REQUEST_TYPE_INVALID,
 646					       flags, GFP_ATOMIC);
 647	if (!operation)
 648		return NULL;
 649
 650	operation->id = id;
 651	memcpy(operation->request->header, data, size);
 652	trace_gb_operation_create_incoming(operation);
 653
 654	return operation;
 655}
 656
 657/*
 658 * Get an additional reference on an operation.
 659 */
 660void gb_operation_get(struct gb_operation *operation)
 661{
 662	kref_get(&operation->kref);
 663}
 664EXPORT_SYMBOL_GPL(gb_operation_get);
 665
 666/*
 667 * Destroy a previously created operation.
 668 */
 669static void _gb_operation_destroy(struct kref *kref)
 670{
 671	struct gb_operation *operation;
 672
 673	operation = container_of(kref, struct gb_operation, kref);
 674
 675	trace_gb_operation_destroy(operation);
 676
 677	if (operation->response)
 678		gb_operation_message_free(operation->response);
 679	gb_operation_message_free(operation->request);
 680
 681	kmem_cache_free(gb_operation_cache, operation);
 682}
 683
 684/*
 685 * Drop a reference on an operation, and destroy it when the last
 686 * one is gone.
 687 */
 688void gb_operation_put(struct gb_operation *operation)
 689{
 690	if (WARN_ON(!operation))
 691		return;
 692
 693	kref_put(&operation->kref, _gb_operation_destroy);
 694}
 695EXPORT_SYMBOL_GPL(gb_operation_put);
 696
 697/* Tell the requester we're done */
 698static void gb_operation_sync_callback(struct gb_operation *operation)
 699{
 700	complete(&operation->completion);
 701}
 702
 703/**
 704 * gb_operation_request_send() - send an operation request message
 705 * @operation:	the operation to initiate
 706 * @callback:	the operation completion callback
 707 * @timeout:	operation timeout in milliseconds, or zero for no timeout
 708 * @gfp:	the memory flags to use for any allocations
 709 *
 710 * The caller has filled in any payload so the request message is ready to go.
 711 * The callback function supplied will be called when the response message has
 712 * arrived, a unidirectional request has been sent, or the operation is
 713 * cancelled, indicating that the operation is complete. The callback function
 714 * can fetch the result of the operation using gb_operation_result() if
 715 * desired.
 716 *
 717 * Return: 0 if the request was successfully queued in the host-driver queues,
 718 * or a negative errno.
 719 */
 720int gb_operation_request_send(struct gb_operation *operation,
 721			      gb_operation_callback callback,
 722			      unsigned int timeout,
 723			      gfp_t gfp)
 724{
 725	struct gb_connection *connection = operation->connection;
 726	struct gb_operation_msg_hdr *header;
 727	unsigned int cycle;
 728	int ret;
 729
 730	if (gb_connection_is_offloaded(connection))
 731		return -EBUSY;
 732
 733	if (!callback)
 734		return -EINVAL;
 735
 736	/*
 737	 * Record the callback function, which is executed in
 738	 * non-atomic (workqueue) context when the final result
 739	 * of an operation has been set.
 740	 */
 741	operation->callback = callback;
 742
 743	/*
 744	 * Assign the operation's id, and store it in the request header.
 745	 * Zero is a reserved operation id for unidirectional operations.
 746	 */
 747	if (gb_operation_is_unidirectional(operation)) {
 748		operation->id = 0;
 749	} else {
 750		cycle = (unsigned int)atomic_inc_return(&connection->op_cycle);
 751		operation->id = (u16)(cycle % U16_MAX + 1);
 752	}
 753
 754	header = operation->request->header;
 755	header->operation_id = cpu_to_le16(operation->id);
 756
 757	gb_operation_result_set(operation, -EINPROGRESS);
 758
 759	/*
 760	 * Get an extra reference on the operation. It'll be dropped when the
 761	 * operation completes.
 762	 */
 763	gb_operation_get(operation);
 764	ret = gb_operation_get_active(operation);
 765	if (ret)
 766		goto err_put;
 767
 768	ret = gb_message_send(operation->request, gfp);
 769	if (ret)
 770		goto err_put_active;
 771
 772	if (timeout) {
 773		operation->timer.expires = jiffies + msecs_to_jiffies(timeout);
 774		add_timer(&operation->timer);
 775	}
 776
 777	return 0;
 778
 779err_put_active:
 780	gb_operation_put_active(operation);
 781err_put:
 782	gb_operation_put(operation);
 783
 784	return ret;
 785}
 786EXPORT_SYMBOL_GPL(gb_operation_request_send);
 787
 788/*
 789 * Send a synchronous operation.  This function is expected to
 790 * block, returning only when the response has arrived, (or when an
 791 * error is detected.  The return value is the result of the
 792 * operation.
 793 */
 794int gb_operation_request_send_sync_timeout(struct gb_operation *operation,
 795					   unsigned int timeout)
 796{
 797	int ret;
 798
 799	ret = gb_operation_request_send(operation, gb_operation_sync_callback,
 800					timeout, GFP_KERNEL);
 801	if (ret)
 802		return ret;
 803
 804	ret = wait_for_completion_interruptible(&operation->completion);
 805	if (ret < 0) {
 806		/* Cancel the operation if interrupted */
 807		gb_operation_cancel(operation, -ECANCELED);
 808	}
 809
 810	return gb_operation_result(operation);
 811}
 812EXPORT_SYMBOL_GPL(gb_operation_request_send_sync_timeout);
 813
 814/*
 815 * Send a response for an incoming operation request.  A non-zero
 816 * errno indicates a failed operation.
 817 *
 818 * If there is any response payload, the incoming request handler is
 819 * responsible for allocating the response message.  Otherwise the
 820 * it can simply supply the result errno; this function will
 821 * allocate the response message if necessary.
 822 */
 823static int gb_operation_response_send(struct gb_operation *operation,
 824				      int errno)
 825{
 826	struct gb_connection *connection = operation->connection;
 827	int ret;
 828
 829	if (!operation->response &&
 830	    !gb_operation_is_unidirectional(operation)) {
 831		if (!gb_operation_response_alloc(operation, 0, GFP_KERNEL))
 832			return -ENOMEM;
 833	}
 834
 835	/* Record the result */
 836	if (!gb_operation_result_set(operation, errno)) {
 837		dev_err(&connection->hd->dev, "request result already set\n");
 838		return -EIO;	/* Shouldn't happen */
 839	}
 840
 841	/* Sender of request does not care about response. */
 842	if (gb_operation_is_unidirectional(operation))
 843		return 0;
 844
 845	/* Reference will be dropped when message has been sent. */
 846	gb_operation_get(operation);
 847	ret = gb_operation_get_active(operation);
 848	if (ret)
 849		goto err_put;
 850
 851	/* Fill in the response header and send it */
 852	operation->response->header->result = gb_operation_errno_map(errno);
 853
 854	ret = gb_message_send(operation->response, GFP_KERNEL);
 855	if (ret)
 856		goto err_put_active;
 857
 858	return 0;
 859
 860err_put_active:
 861	gb_operation_put_active(operation);
 862err_put:
 863	gb_operation_put(operation);
 864
 865	return ret;
 866}
 867
 868/*
 869 * This function is called when a message send request has completed.
 870 */
 871void greybus_message_sent(struct gb_host_device *hd,
 872			  struct gb_message *message, int status)
 873{
 874	struct gb_operation *operation = message->operation;
 875	struct gb_connection *connection = operation->connection;
 876
 877	/*
 878	 * If the message was a response, we just need to drop our
 879	 * reference to the operation.  If an error occurred, report
 880	 * it.
 881	 *
 882	 * For requests, if there's no error and the operation in not
 883	 * unidirectional, there's nothing more to do until the response
 884	 * arrives. If an error occurred attempting to send it, or if the
 885	 * operation is unidrectional, record the result of the operation and
 886	 * schedule its completion.
 887	 */
 888	if (message == operation->response) {
 889		if (status) {
 890			dev_err(&connection->hd->dev,
 891				"%s: error sending response 0x%02x: %d\n",
 892				connection->name, operation->type, status);
 893		}
 894
 895		gb_operation_put_active(operation);
 896		gb_operation_put(operation);
 897	} else if (status || gb_operation_is_unidirectional(operation)) {
 898		if (gb_operation_result_set(operation, status)) {
 899			queue_work(gb_operation_completion_wq,
 900				   &operation->work);
 901		}
 902	}
 903}
 904EXPORT_SYMBOL_GPL(greybus_message_sent);
 905
 906/*
 907 * We've received data on a connection, and it doesn't look like a
 908 * response, so we assume it's a request.
 909 *
 910 * This is called in interrupt context, so just copy the incoming
 911 * data into the request buffer and handle the rest via workqueue.
 912 */
 913static void gb_connection_recv_request(struct gb_connection *connection,
 914				const struct gb_operation_msg_hdr *header,
 915				void *data, size_t size)
 916{
 917	struct gb_operation *operation;
 918	u16 operation_id;
 919	u8 type;
 920	int ret;
 921
 922	operation_id = le16_to_cpu(header->operation_id);
 923	type = header->type;
 924
 925	operation = gb_operation_create_incoming(connection, operation_id,
 926						 type, data, size);
 927	if (!operation) {
 928		dev_err(&connection->hd->dev,
 929			"%s: can't create incoming operation\n",
 930			connection->name);
 931		return;
 932	}
 933
 934	ret = gb_operation_get_active(operation);
 935	if (ret) {
 936		gb_operation_put(operation);
 937		return;
 938	}
 939	trace_gb_message_recv_request(operation->request);
 940
 941	/*
 942	 * The initial reference to the operation will be dropped when the
 943	 * request handler returns.
 944	 */
 945	if (gb_operation_result_set(operation, -EINPROGRESS))
 946		queue_work(connection->wq, &operation->work);
 947}
 948
 949/*
 950 * We've received data that appears to be an operation response
 951 * message.  Look up the operation, and record that we've received
 952 * its response.
 953 *
 954 * This is called in interrupt context, so just copy the incoming
 955 * data into the response buffer and handle the rest via workqueue.
 956 */
 957static void gb_connection_recv_response(struct gb_connection *connection,
 958				const struct gb_operation_msg_hdr *header,
 959				void *data, size_t size)
 960{
 961	struct gb_operation *operation;
 962	struct gb_message *message;
 963	size_t message_size;
 964	u16 operation_id;
 965	int errno;
 966
 967	operation_id = le16_to_cpu(header->operation_id);
 968
 969	if (!operation_id) {
 970		dev_err_ratelimited(&connection->hd->dev,
 971				    "%s: invalid response id 0 received\n",
 972				    connection->name);
 973		return;
 974	}
 975
 976	operation = gb_operation_find_outgoing(connection, operation_id);
 977	if (!operation) {
 978		dev_err_ratelimited(&connection->hd->dev,
 979				    "%s: unexpected response id 0x%04x received\n",
 980				    connection->name, operation_id);
 981		return;
 982	}
 983
 984	errno = gb_operation_status_map(header->result);
 985	message = operation->response;
 986	message_size = sizeof(*header) + message->payload_size;
 987	if (!errno && size > message_size) {
 988		dev_err_ratelimited(&connection->hd->dev,
 989				    "%s: malformed response 0x%02x received (%zu > %zu)\n",
 990				    connection->name, header->type,
 991				    size, message_size);
 992		errno = -EMSGSIZE;
 993	} else if (!errno && size < message_size) {
 994		if (gb_operation_short_response_allowed(operation)) {
 995			message->payload_size = size - sizeof(*header);
 996		} else {
 997			dev_err_ratelimited(&connection->hd->dev,
 998					    "%s: short response 0x%02x received (%zu < %zu)\n",
 999					    connection->name, header->type,
1000					    size, message_size);
1001			errno = -EMSGSIZE;
1002		}
1003	}
1004
1005	/* We must ignore the payload if a bad status is returned */
1006	if (errno)
1007		size = sizeof(*header);
1008
1009	/* The rest will be handled in work queue context */
1010	if (gb_operation_result_set(operation, errno)) {
1011		memcpy(message->buffer, data, size);
1012
1013		trace_gb_message_recv_response(message);
1014
1015		queue_work(gb_operation_completion_wq, &operation->work);
1016	}
1017
1018	gb_operation_put(operation);
1019}
1020
1021/*
1022 * Handle data arriving on a connection.  As soon as we return the
1023 * supplied data buffer will be reused (so unless we do something
1024 * with, it's effectively dropped).
1025 */
1026void gb_connection_recv(struct gb_connection *connection,
1027			void *data, size_t size)
1028{
1029	struct gb_operation_msg_hdr header;
1030	struct device *dev = &connection->hd->dev;
1031	size_t msg_size;
1032
1033	if (connection->state == GB_CONNECTION_STATE_DISABLED ||
1034	    gb_connection_is_offloaded(connection)) {
1035		dev_warn_ratelimited(dev, "%s: dropping %zu received bytes\n",
1036				     connection->name, size);
1037		return;
1038	}
1039
1040	if (size < sizeof(header)) {
1041		dev_err_ratelimited(dev, "%s: short message received\n",
1042				    connection->name);
1043		return;
1044	}
1045
1046	/* Use memcpy as data may be unaligned */
1047	memcpy(&header, data, sizeof(header));
1048	msg_size = le16_to_cpu(header.size);
1049	if (size < msg_size) {
1050		dev_err_ratelimited(dev,
1051				    "%s: incomplete message 0x%04x of type 0x%02x received (%zu < %zu)\n",
1052				    connection->name,
1053				    le16_to_cpu(header.operation_id),
1054				    header.type, size, msg_size);
1055		return;		/* XXX Should still complete operation */
1056	}
1057
1058	if (header.type & GB_MESSAGE_TYPE_RESPONSE) {
1059		gb_connection_recv_response(connection,	&header, data,
1060					    msg_size);
1061	} else {
1062		gb_connection_recv_request(connection, &header, data,
1063					   msg_size);
1064	}
1065}
1066
1067/*
1068 * Cancel an outgoing operation synchronously, and record the given error to
1069 * indicate why.
1070 */
1071void gb_operation_cancel(struct gb_operation *operation, int errno)
1072{
1073	if (WARN_ON(gb_operation_is_incoming(operation)))
1074		return;
1075
1076	if (gb_operation_result_set(operation, errno)) {
1077		gb_message_cancel(operation->request);
1078		queue_work(gb_operation_completion_wq, &operation->work);
1079	}
1080	trace_gb_message_cancel_outgoing(operation->request);
1081
1082	atomic_inc(&operation->waiters);
1083	wait_event(gb_operation_cancellation_queue,
1084		   !gb_operation_is_active(operation));
1085	atomic_dec(&operation->waiters);
1086}
1087EXPORT_SYMBOL_GPL(gb_operation_cancel);
1088
1089/*
1090 * Cancel an incoming operation synchronously. Called during connection tear
1091 * down.
1092 */
1093void gb_operation_cancel_incoming(struct gb_operation *operation, int errno)
1094{
1095	if (WARN_ON(!gb_operation_is_incoming(operation)))
1096		return;
1097
1098	if (!gb_operation_is_unidirectional(operation)) {
1099		/*
1100		 * Make sure the request handler has submitted the response
1101		 * before cancelling it.
1102		 */
1103		flush_work(&operation->work);
1104		if (!gb_operation_result_set(operation, errno))
1105			gb_message_cancel(operation->response);
1106	}
1107	trace_gb_message_cancel_incoming(operation->response);
1108
1109	atomic_inc(&operation->waiters);
1110	wait_event(gb_operation_cancellation_queue,
1111		   !gb_operation_is_active(operation));
1112	atomic_dec(&operation->waiters);
1113}
1114
1115/**
1116 * gb_operation_sync_timeout() - implement a "simple" synchronous operation
1117 * @connection: the Greybus connection to send this to
1118 * @type: the type of operation to send
1119 * @request: pointer to a memory buffer to copy the request from
1120 * @request_size: size of @request
1121 * @response: pointer to a memory buffer to copy the response to
1122 * @response_size: the size of @response.
1123 * @timeout: operation timeout in milliseconds
1124 *
1125 * This function implements a simple synchronous Greybus operation.  It sends
1126 * the provided operation request and waits (sleeps) until the corresponding
1127 * operation response message has been successfully received, or an error
1128 * occurs.  @request and @response are buffers to hold the request and response
1129 * data respectively, and if they are not NULL, their size must be specified in
1130 * @request_size and @response_size.
1131 *
1132 * If a response payload is to come back, and @response is not NULL,
1133 * @response_size number of bytes will be copied into @response if the operation
1134 * is successful.
1135 *
1136 * If there is an error, the response buffer is left alone.
1137 */
1138int gb_operation_sync_timeout(struct gb_connection *connection, int type,
1139			      void *request, int request_size,
1140			      void *response, int response_size,
1141			      unsigned int timeout)
1142{
1143	struct gb_operation *operation;
1144	int ret;
1145
1146	if ((response_size && !response) ||
1147	    (request_size && !request))
1148		return -EINVAL;
1149
1150	operation = gb_operation_create(connection, type,
1151					request_size, response_size,
1152					GFP_KERNEL);
1153	if (!operation)
1154		return -ENOMEM;
1155
1156	if (request_size)
1157		memcpy(operation->request->payload, request, request_size);
1158
1159	ret = gb_operation_request_send_sync_timeout(operation, timeout);
1160	if (ret) {
1161		dev_err(&connection->hd->dev,
1162			"%s: synchronous operation id 0x%04x of type 0x%02x failed: %d\n",
1163			connection->name, operation->id, type, ret);
1164	} else {
1165		if (response_size) {
1166			memcpy(response, operation->response->payload,
1167			       response_size);
1168		}
1169	}
1170
1171	gb_operation_put(operation);
1172
1173	return ret;
1174}
1175EXPORT_SYMBOL_GPL(gb_operation_sync_timeout);
1176
1177/**
1178 * gb_operation_unidirectional_timeout() - initiate a unidirectional operation
1179 * @connection:		connection to use
1180 * @type:		type of operation to send
1181 * @request:		memory buffer to copy the request from
1182 * @request_size:	size of @request
1183 * @timeout:		send timeout in milliseconds
1184 *
1185 * Initiate a unidirectional operation by sending a request message and
1186 * waiting for it to be acknowledged as sent by the host device.
1187 *
1188 * Note that successful send of a unidirectional operation does not imply that
1189 * the request as actually reached the remote end of the connection.
1190 */
1191int gb_operation_unidirectional_timeout(struct gb_connection *connection,
1192					int type, void *request,
1193					int request_size,
1194					unsigned int timeout)
1195{
1196	struct gb_operation *operation;
1197	int ret;
1198
1199	if (request_size && !request)
1200		return -EINVAL;
1201
1202	operation = gb_operation_create_flags(connection, type,
1203					      request_size, 0,
1204					      GB_OPERATION_FLAG_UNIDIRECTIONAL,
1205					      GFP_KERNEL);
1206	if (!operation)
1207		return -ENOMEM;
1208
1209	if (request_size)
1210		memcpy(operation->request->payload, request, request_size);
1211
1212	ret = gb_operation_request_send_sync_timeout(operation, timeout);
1213	if (ret) {
1214		dev_err(&connection->hd->dev,
1215			"%s: unidirectional operation of type 0x%02x failed: %d\n",
1216			connection->name, type, ret);
1217	}
1218
1219	gb_operation_put(operation);
1220
1221	return ret;
1222}
1223EXPORT_SYMBOL_GPL(gb_operation_unidirectional_timeout);
1224
1225int __init gb_operation_init(void)
1226{
1227	gb_message_cache = kmem_cache_create("gb_message_cache",
1228					     sizeof(struct gb_message), 0, 0,
1229					     NULL);
1230	if (!gb_message_cache)
1231		return -ENOMEM;
1232
1233	gb_operation_cache = kmem_cache_create("gb_operation_cache",
1234					       sizeof(struct gb_operation), 0,
1235					       0, NULL);
1236	if (!gb_operation_cache)
1237		goto err_destroy_message_cache;
1238
1239	gb_operation_completion_wq = alloc_workqueue("greybus_completion",
1240						     0, 0);
1241	if (!gb_operation_completion_wq)
1242		goto err_destroy_operation_cache;
1243
1244	return 0;
1245
1246err_destroy_operation_cache:
1247	kmem_cache_destroy(gb_operation_cache);
1248	gb_operation_cache = NULL;
1249err_destroy_message_cache:
1250	kmem_cache_destroy(gb_message_cache);
1251	gb_message_cache = NULL;
1252
1253	return -ENOMEM;
1254}
1255
1256void gb_operation_exit(void)
1257{
1258	destroy_workqueue(gb_operation_completion_wq);
1259	gb_operation_completion_wq = NULL;
1260	kmem_cache_destroy(gb_operation_cache);
1261	gb_operation_cache = NULL;
1262	kmem_cache_destroy(gb_message_cache);
1263	gb_message_cache = NULL;
1264}
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Greybus operations
   4 *
   5 * Copyright 2014-2015 Google Inc.
   6 * Copyright 2014-2015 Linaro Ltd.
   7 */
   8
   9#include <linux/kernel.h>
  10#include <linux/slab.h>
  11#include <linux/module.h>
  12#include <linux/sched.h>
  13#include <linux/wait.h>
  14#include <linux/workqueue.h>
  15#include <linux/greybus.h>
  16
  17#include "greybus_trace.h"
  18
  19static struct kmem_cache *gb_operation_cache;
  20static struct kmem_cache *gb_message_cache;
  21
  22/* Workqueue to handle Greybus operation completions. */
  23static struct workqueue_struct *gb_operation_completion_wq;
  24
  25/* Wait queue for synchronous cancellations. */
  26static DECLARE_WAIT_QUEUE_HEAD(gb_operation_cancellation_queue);
  27
  28/*
  29 * Protects updates to operation->errno.
  30 */
  31static DEFINE_SPINLOCK(gb_operations_lock);
  32
  33static int gb_operation_response_send(struct gb_operation *operation,
  34				      int errno);
  35
  36/*
  37 * Increment operation active count and add to connection list unless the
  38 * connection is going away.
  39 *
  40 * Caller holds operation reference.
  41 */
  42static int gb_operation_get_active(struct gb_operation *operation)
  43{
  44	struct gb_connection *connection = operation->connection;
  45	unsigned long flags;
  46
  47	spin_lock_irqsave(&connection->lock, flags);
  48	switch (connection->state) {
  49	case GB_CONNECTION_STATE_ENABLED:
  50		break;
  51	case GB_CONNECTION_STATE_ENABLED_TX:
  52		if (gb_operation_is_incoming(operation))
  53			goto err_unlock;
  54		break;
  55	case GB_CONNECTION_STATE_DISCONNECTING:
  56		if (!gb_operation_is_core(operation))
  57			goto err_unlock;
  58		break;
  59	default:
  60		goto err_unlock;
  61	}
  62
  63	if (operation->active++ == 0)
  64		list_add_tail(&operation->links, &connection->operations);
  65
  66	trace_gb_operation_get_active(operation);
  67
  68	spin_unlock_irqrestore(&connection->lock, flags);
  69
  70	return 0;
  71
  72err_unlock:
  73	spin_unlock_irqrestore(&connection->lock, flags);
  74
  75	return -ENOTCONN;
  76}
  77
  78/* Caller holds operation reference. */
  79static void gb_operation_put_active(struct gb_operation *operation)
  80{
  81	struct gb_connection *connection = operation->connection;
  82	unsigned long flags;
  83
  84	spin_lock_irqsave(&connection->lock, flags);
  85
  86	trace_gb_operation_put_active(operation);
  87
  88	if (--operation->active == 0) {
  89		list_del(&operation->links);
  90		if (atomic_read(&operation->waiters))
  91			wake_up(&gb_operation_cancellation_queue);
  92	}
  93	spin_unlock_irqrestore(&connection->lock, flags);
  94}
  95
  96static bool gb_operation_is_active(struct gb_operation *operation)
  97{
  98	struct gb_connection *connection = operation->connection;
  99	unsigned long flags;
 100	bool ret;
 101
 102	spin_lock_irqsave(&connection->lock, flags);
 103	ret = operation->active;
 104	spin_unlock_irqrestore(&connection->lock, flags);
 105
 106	return ret;
 107}
 108
 109/*
 110 * Set an operation's result.
 111 *
 112 * Initially an outgoing operation's errno value is -EBADR.
 113 * If no error occurs before sending the request message the only
 114 * valid value operation->errno can be set to is -EINPROGRESS,
 115 * indicating the request has been (or rather is about to be) sent.
 116 * At that point nobody should be looking at the result until the
 117 * response arrives.
 118 *
 119 * The first time the result gets set after the request has been
 120 * sent, that result "sticks."  That is, if two concurrent threads
 121 * race to set the result, the first one wins.  The return value
 122 * tells the caller whether its result was recorded; if not the
 123 * caller has nothing more to do.
 124 *
 125 * The result value -EILSEQ is reserved to signal an implementation
 126 * error; if it's ever observed, the code performing the request has
 127 * done something fundamentally wrong.  It is an error to try to set
 128 * the result to -EBADR, and attempts to do so result in a warning,
 129 * and -EILSEQ is used instead.  Similarly, the only valid result
 130 * value to set for an operation in initial state is -EINPROGRESS.
 131 * Attempts to do otherwise will also record a (successful) -EILSEQ
 132 * operation result.
 133 */
 134static bool gb_operation_result_set(struct gb_operation *operation, int result)
 135{
 136	unsigned long flags;
 137	int prev;
 138
 139	if (result == -EINPROGRESS) {
 140		/*
 141		 * -EINPROGRESS is used to indicate the request is
 142		 * in flight.  It should be the first result value
 143		 * set after the initial -EBADR.  Issue a warning
 144		 * and record an implementation error if it's
 145		 * set at any other time.
 146		 */
 147		spin_lock_irqsave(&gb_operations_lock, flags);
 148		prev = operation->errno;
 149		if (prev == -EBADR)
 150			operation->errno = result;
 151		else
 152			operation->errno = -EILSEQ;
 153		spin_unlock_irqrestore(&gb_operations_lock, flags);
 154		WARN_ON(prev != -EBADR);
 155
 156		return true;
 157	}
 158
 159	/*
 160	 * The first result value set after a request has been sent
 161	 * will be the final result of the operation.  Subsequent
 162	 * attempts to set the result are ignored.
 163	 *
 164	 * Note that -EBADR is a reserved "initial state" result
 165	 * value.  Attempts to set this value result in a warning,
 166	 * and the result code is set to -EILSEQ instead.
 167	 */
 168	if (WARN_ON(result == -EBADR))
 169		result = -EILSEQ; /* Nobody should be setting -EBADR */
 170
 171	spin_lock_irqsave(&gb_operations_lock, flags);
 172	prev = operation->errno;
 173	if (prev == -EINPROGRESS)
 174		operation->errno = result;	/* First and final result */
 175	spin_unlock_irqrestore(&gb_operations_lock, flags);
 176
 177	return prev == -EINPROGRESS;
 178}
 179
 180int gb_operation_result(struct gb_operation *operation)
 181{
 182	int result = operation->errno;
 183
 184	WARN_ON(result == -EBADR);
 185	WARN_ON(result == -EINPROGRESS);
 186
 187	return result;
 188}
 189EXPORT_SYMBOL_GPL(gb_operation_result);
 190
 191/*
 192 * Looks up an outgoing operation on a connection and returns a refcounted
 193 * pointer if found, or NULL otherwise.
 194 */
 195static struct gb_operation *
 196gb_operation_find_outgoing(struct gb_connection *connection, u16 operation_id)
 197{
 198	struct gb_operation *operation;
 199	unsigned long flags;
 200	bool found = false;
 201
 202	spin_lock_irqsave(&connection->lock, flags);
 203	list_for_each_entry(operation, &connection->operations, links)
 204		if (operation->id == operation_id &&
 205		    !gb_operation_is_incoming(operation)) {
 206			gb_operation_get(operation);
 207			found = true;
 208			break;
 209		}
 210	spin_unlock_irqrestore(&connection->lock, flags);
 211
 212	return found ? operation : NULL;
 213}
 214
 215static int gb_message_send(struct gb_message *message, gfp_t gfp)
 216{
 217	struct gb_connection *connection = message->operation->connection;
 218
 219	trace_gb_message_send(message);
 220	return connection->hd->driver->message_send(connection->hd,
 221					connection->hd_cport_id,
 222					message,
 223					gfp);
 224}
 225
 226/*
 227 * Cancel a message we have passed to the host device layer to be sent.
 228 */
 229static void gb_message_cancel(struct gb_message *message)
 230{
 231	struct gb_host_device *hd = message->operation->connection->hd;
 232
 233	hd->driver->message_cancel(message);
 234}
 235
 236static void gb_operation_request_handle(struct gb_operation *operation)
 237{
 238	struct gb_connection *connection = operation->connection;
 239	int status;
 240	int ret;
 241
 242	if (connection->handler) {
 243		status = connection->handler(operation);
 244	} else {
 245		dev_err(&connection->hd->dev,
 246			"%s: unexpected incoming request of type 0x%02x\n",
 247			connection->name, operation->type);
 248
 249		status = -EPROTONOSUPPORT;
 250	}
 251
 252	ret = gb_operation_response_send(operation, status);
 253	if (ret) {
 254		dev_err(&connection->hd->dev,
 255			"%s: failed to send response %d for type 0x%02x: %d\n",
 256			connection->name, status, operation->type, ret);
 257		return;
 258	}
 259}
 260
 261/*
 262 * Process operation work.
 263 *
 264 * For incoming requests, call the protocol request handler. The operation
 265 * result should be -EINPROGRESS at this point.
 266 *
 267 * For outgoing requests, the operation result value should have
 268 * been set before queueing this.  The operation callback function
 269 * allows the original requester to know the request has completed
 270 * and its result is available.
 271 */
 272static void gb_operation_work(struct work_struct *work)
 273{
 274	struct gb_operation *operation;
 275	int ret;
 276
 277	operation = container_of(work, struct gb_operation, work);
 278
 279	if (gb_operation_is_incoming(operation)) {
 280		gb_operation_request_handle(operation);
 281	} else {
 282		ret = del_timer_sync(&operation->timer);
 283		if (!ret) {
 284			/* Cancel request message if scheduled by timeout. */
 285			if (gb_operation_result(operation) == -ETIMEDOUT)
 286				gb_message_cancel(operation->request);
 287		}
 288
 289		operation->callback(operation);
 290	}
 291
 292	gb_operation_put_active(operation);
 293	gb_operation_put(operation);
 294}
 295
 296static void gb_operation_timeout(struct timer_list *t)
 297{
 298	struct gb_operation *operation = from_timer(operation, t, timer);
 299
 300	if (gb_operation_result_set(operation, -ETIMEDOUT)) {
 301		/*
 302		 * A stuck request message will be cancelled from the
 303		 * workqueue.
 304		 */
 305		queue_work(gb_operation_completion_wq, &operation->work);
 306	}
 307}
 308
 309static void gb_operation_message_init(struct gb_host_device *hd,
 310				      struct gb_message *message,
 311				      u16 operation_id,
 312				      size_t payload_size, u8 type)
 313{
 314	struct gb_operation_msg_hdr *header;
 315
 316	header = message->buffer;
 317
 318	message->header = header;
 319	message->payload = payload_size ? header + 1 : NULL;
 320	message->payload_size = payload_size;
 321
 322	/*
 323	 * The type supplied for incoming message buffers will be
 324	 * GB_REQUEST_TYPE_INVALID. Such buffers will be overwritten by
 325	 * arriving data so there's no need to initialize the message header.
 326	 */
 327	if (type != GB_REQUEST_TYPE_INVALID) {
 328		u16 message_size = (u16)(sizeof(*header) + payload_size);
 329
 330		/*
 331		 * For a request, the operation id gets filled in
 332		 * when the message is sent.  For a response, it
 333		 * will be copied from the request by the caller.
 334		 *
 335		 * The result field in a request message must be
 336		 * zero.  It will be set just prior to sending for
 337		 * a response.
 338		 */
 339		header->size = cpu_to_le16(message_size);
 340		header->operation_id = 0;
 341		header->type = type;
 342		header->result = 0;
 343	}
 344}
 345
 346/*
 347 * Allocate a message to be used for an operation request or response.
 348 * Both types of message contain a common header.  The request message
 349 * for an outgoing operation is outbound, as is the response message
 350 * for an incoming operation.  The message header for an outbound
 351 * message is partially initialized here.
 352 *
 353 * The headers for inbound messages don't need to be initialized;
 354 * they'll be filled in by arriving data.
 355 *
 356 * Our message buffers have the following layout:
 357 *	message header  \_ these combined are
 358 *	message payload /  the message size
 359 */
 360static struct gb_message *
 361gb_operation_message_alloc(struct gb_host_device *hd, u8 type,
 362			   size_t payload_size, gfp_t gfp_flags)
 363{
 364	struct gb_message *message;
 365	struct gb_operation_msg_hdr *header;
 366	size_t message_size = payload_size + sizeof(*header);
 367
 368	if (message_size > hd->buffer_size_max) {
 369		dev_warn(&hd->dev, "requested message size too big (%zu > %zu)\n",
 370			 message_size, hd->buffer_size_max);
 371		return NULL;
 372	}
 373
 374	/* Allocate the message structure and buffer. */
 375	message = kmem_cache_zalloc(gb_message_cache, gfp_flags);
 376	if (!message)
 377		return NULL;
 378
 379	message->buffer = kzalloc(message_size, gfp_flags);
 380	if (!message->buffer)
 381		goto err_free_message;
 382
 383	/* Initialize the message.  Operation id is filled in later. */
 384	gb_operation_message_init(hd, message, 0, payload_size, type);
 385
 386	return message;
 387
 388err_free_message:
 389	kmem_cache_free(gb_message_cache, message);
 390
 391	return NULL;
 392}
 393
 394static void gb_operation_message_free(struct gb_message *message)
 395{
 396	kfree(message->buffer);
 397	kmem_cache_free(gb_message_cache, message);
 398}
 399
 400/*
 401 * Map an enum gb_operation_status value (which is represented in a
 402 * message as a single byte) to an appropriate Linux negative errno.
 403 */
 404static int gb_operation_status_map(u8 status)
 405{
 406	switch (status) {
 407	case GB_OP_SUCCESS:
 408		return 0;
 409	case GB_OP_INTERRUPTED:
 410		return -EINTR;
 411	case GB_OP_TIMEOUT:
 412		return -ETIMEDOUT;
 413	case GB_OP_NO_MEMORY:
 414		return -ENOMEM;
 415	case GB_OP_PROTOCOL_BAD:
 416		return -EPROTONOSUPPORT;
 417	case GB_OP_OVERFLOW:
 418		return -EMSGSIZE;
 419	case GB_OP_INVALID:
 420		return -EINVAL;
 421	case GB_OP_RETRY:
 422		return -EAGAIN;
 423	case GB_OP_NONEXISTENT:
 424		return -ENODEV;
 425	case GB_OP_MALFUNCTION:
 426		return -EILSEQ;
 427	case GB_OP_UNKNOWN_ERROR:
 428	default:
 429		return -EIO;
 430	}
 431}
 432
 433/*
 434 * Map a Linux errno value (from operation->errno) into the value
 435 * that should represent it in a response message status sent
 436 * over the wire.  Returns an enum gb_operation_status value (which
 437 * is represented in a message as a single byte).
 438 */
 439static u8 gb_operation_errno_map(int errno)
 440{
 441	switch (errno) {
 442	case 0:
 443		return GB_OP_SUCCESS;
 444	case -EINTR:
 445		return GB_OP_INTERRUPTED;
 446	case -ETIMEDOUT:
 447		return GB_OP_TIMEOUT;
 448	case -ENOMEM:
 449		return GB_OP_NO_MEMORY;
 450	case -EPROTONOSUPPORT:
 451		return GB_OP_PROTOCOL_BAD;
 452	case -EMSGSIZE:
 453		return GB_OP_OVERFLOW;	/* Could be underflow too */
 454	case -EINVAL:
 455		return GB_OP_INVALID;
 456	case -EAGAIN:
 457		return GB_OP_RETRY;
 458	case -EILSEQ:
 459		return GB_OP_MALFUNCTION;
 460	case -ENODEV:
 461		return GB_OP_NONEXISTENT;
 462	case -EIO:
 463	default:
 464		return GB_OP_UNKNOWN_ERROR;
 465	}
 466}
 467
 468bool gb_operation_response_alloc(struct gb_operation *operation,
 469				 size_t response_size, gfp_t gfp)
 470{
 471	struct gb_host_device *hd = operation->connection->hd;
 472	struct gb_operation_msg_hdr *request_header;
 473	struct gb_message *response;
 474	u8 type;
 475
 476	type = operation->type | GB_MESSAGE_TYPE_RESPONSE;
 477	response = gb_operation_message_alloc(hd, type, response_size, gfp);
 478	if (!response)
 479		return false;
 480	response->operation = operation;
 481
 482	/*
 483	 * Size and type get initialized when the message is
 484	 * allocated.  The errno will be set before sending.  All
 485	 * that's left is the operation id, which we copy from the
 486	 * request message header (as-is, in little-endian order).
 487	 */
 488	request_header = operation->request->header;
 489	response->header->operation_id = request_header->operation_id;
 490	operation->response = response;
 491
 492	return true;
 493}
 494EXPORT_SYMBOL_GPL(gb_operation_response_alloc);
 495
 496/*
 497 * Create a Greybus operation to be sent over the given connection.
 498 * The request buffer will be big enough for a payload of the given
 499 * size.
 500 *
 501 * For outgoing requests, the request message's header will be
 502 * initialized with the type of the request and the message size.
 503 * Outgoing operations must also specify the response buffer size,
 504 * which must be sufficient to hold all expected response data.  The
 505 * response message header will eventually be overwritten, so there's
 506 * no need to initialize it here.
 507 *
 508 * Request messages for incoming operations can arrive in interrupt
 509 * context, so they must be allocated with GFP_ATOMIC.  In this case
 510 * the request buffer will be immediately overwritten, so there is
 511 * no need to initialize the message header.  Responsibility for
 512 * allocating a response buffer lies with the incoming request
 513 * handler for a protocol.  So we don't allocate that here.
 514 *
 515 * Returns a pointer to the new operation or a null pointer if an
 516 * error occurs.
 517 */
 518static struct gb_operation *
 519gb_operation_create_common(struct gb_connection *connection, u8 type,
 520			   size_t request_size, size_t response_size,
 521			   unsigned long op_flags, gfp_t gfp_flags)
 522{
 523	struct gb_host_device *hd = connection->hd;
 524	struct gb_operation *operation;
 525
 526	operation = kmem_cache_zalloc(gb_operation_cache, gfp_flags);
 527	if (!operation)
 528		return NULL;
 529	operation->connection = connection;
 530
 531	operation->request = gb_operation_message_alloc(hd, type, request_size,
 532							gfp_flags);
 533	if (!operation->request)
 534		goto err_cache;
 535	operation->request->operation = operation;
 536
 537	/* Allocate the response buffer for outgoing operations */
 538	if (!(op_flags & GB_OPERATION_FLAG_INCOMING)) {
 539		if (!gb_operation_response_alloc(operation, response_size,
 540						 gfp_flags)) {
 541			goto err_request;
 542		}
 543
 544		timer_setup(&operation->timer, gb_operation_timeout, 0);
 545	}
 546
 547	operation->flags = op_flags;
 548	operation->type = type;
 549	operation->errno = -EBADR;  /* Initial value--means "never set" */
 550
 551	INIT_WORK(&operation->work, gb_operation_work);
 552	init_completion(&operation->completion);
 553	kref_init(&operation->kref);
 554	atomic_set(&operation->waiters, 0);
 555
 556	return operation;
 557
 558err_request:
 559	gb_operation_message_free(operation->request);
 560err_cache:
 561	kmem_cache_free(gb_operation_cache, operation);
 562
 563	return NULL;
 564}
 565
 566/*
 567 * Create a new operation associated with the given connection.  The
 568 * request and response sizes provided are the number of bytes
 569 * required to hold the request/response payload only.  Both of
 570 * these are allowed to be 0.  Note that 0x00 is reserved as an
 571 * invalid operation type for all protocols, and this is enforced
 572 * here.
 573 */
 574struct gb_operation *
 575gb_operation_create_flags(struct gb_connection *connection,
 576			  u8 type, size_t request_size,
 577			  size_t response_size, unsigned long flags,
 578			  gfp_t gfp)
 579{
 580	struct gb_operation *operation;
 581
 582	if (WARN_ON_ONCE(type == GB_REQUEST_TYPE_INVALID))
 583		return NULL;
 584	if (WARN_ON_ONCE(type & GB_MESSAGE_TYPE_RESPONSE))
 585		type &= ~GB_MESSAGE_TYPE_RESPONSE;
 586
 587	if (WARN_ON_ONCE(flags & ~GB_OPERATION_FLAG_USER_MASK))
 588		flags &= GB_OPERATION_FLAG_USER_MASK;
 589
 590	operation = gb_operation_create_common(connection, type,
 591					       request_size, response_size,
 592					       flags, gfp);
 593	if (operation)
 594		trace_gb_operation_create(operation);
 595
 596	return operation;
 597}
 598EXPORT_SYMBOL_GPL(gb_operation_create_flags);
 599
 600struct gb_operation *
 601gb_operation_create_core(struct gb_connection *connection,
 602			 u8 type, size_t request_size,
 603			 size_t response_size, unsigned long flags,
 604			 gfp_t gfp)
 605{
 606	struct gb_operation *operation;
 607
 608	flags |= GB_OPERATION_FLAG_CORE;
 609
 610	operation = gb_operation_create_common(connection, type,
 611					       request_size, response_size,
 612					       flags, gfp);
 613	if (operation)
 614		trace_gb_operation_create_core(operation);
 615
 616	return operation;
 617}
 618
 619/* Do not export this function. */
 620
 621size_t gb_operation_get_payload_size_max(struct gb_connection *connection)
 622{
 623	struct gb_host_device *hd = connection->hd;
 624
 625	return hd->buffer_size_max - sizeof(struct gb_operation_msg_hdr);
 626}
 627EXPORT_SYMBOL_GPL(gb_operation_get_payload_size_max);
 628
 629static struct gb_operation *
 630gb_operation_create_incoming(struct gb_connection *connection, u16 id,
 631			     u8 type, void *data, size_t size)
 632{
 633	struct gb_operation *operation;
 634	size_t request_size;
 635	unsigned long flags = GB_OPERATION_FLAG_INCOMING;
 636
 637	/* Caller has made sure we at least have a message header. */
 638	request_size = size - sizeof(struct gb_operation_msg_hdr);
 639
 640	if (!id)
 641		flags |= GB_OPERATION_FLAG_UNIDIRECTIONAL;
 642
 643	operation = gb_operation_create_common(connection, type,
 644					       request_size,
 645					       GB_REQUEST_TYPE_INVALID,
 646					       flags, GFP_ATOMIC);
 647	if (!operation)
 648		return NULL;
 649
 650	operation->id = id;
 651	memcpy(operation->request->header, data, size);
 652	trace_gb_operation_create_incoming(operation);
 653
 654	return operation;
 655}
 656
 657/*
 658 * Get an additional reference on an operation.
 659 */
 660void gb_operation_get(struct gb_operation *operation)
 661{
 662	kref_get(&operation->kref);
 663}
 664EXPORT_SYMBOL_GPL(gb_operation_get);
 665
 666/*
 667 * Destroy a previously created operation.
 668 */
 669static void _gb_operation_destroy(struct kref *kref)
 670{
 671	struct gb_operation *operation;
 672
 673	operation = container_of(kref, struct gb_operation, kref);
 674
 675	trace_gb_operation_destroy(operation);
 676
 677	if (operation->response)
 678		gb_operation_message_free(operation->response);
 679	gb_operation_message_free(operation->request);
 680
 681	kmem_cache_free(gb_operation_cache, operation);
 682}
 683
 684/*
 685 * Drop a reference on an operation, and destroy it when the last
 686 * one is gone.
 687 */
 688void gb_operation_put(struct gb_operation *operation)
 689{
 690	if (WARN_ON(!operation))
 691		return;
 692
 693	kref_put(&operation->kref, _gb_operation_destroy);
 694}
 695EXPORT_SYMBOL_GPL(gb_operation_put);
 696
 697/* Tell the requester we're done */
 698static void gb_operation_sync_callback(struct gb_operation *operation)
 699{
 700	complete(&operation->completion);
 701}
 702
 703/**
 704 * gb_operation_request_send() - send an operation request message
 705 * @operation:	the operation to initiate
 706 * @callback:	the operation completion callback
 707 * @timeout:	operation timeout in milliseconds, or zero for no timeout
 708 * @gfp:	the memory flags to use for any allocations
 709 *
 710 * The caller has filled in any payload so the request message is ready to go.
 711 * The callback function supplied will be called when the response message has
 712 * arrived, a unidirectional request has been sent, or the operation is
 713 * cancelled, indicating that the operation is complete. The callback function
 714 * can fetch the result of the operation using gb_operation_result() if
 715 * desired.
 716 *
 717 * Return: 0 if the request was successfully queued in the host-driver queues,
 718 * or a negative errno.
 719 */
 720int gb_operation_request_send(struct gb_operation *operation,
 721			      gb_operation_callback callback,
 722			      unsigned int timeout,
 723			      gfp_t gfp)
 724{
 725	struct gb_connection *connection = operation->connection;
 726	struct gb_operation_msg_hdr *header;
 727	unsigned int cycle;
 728	int ret;
 729
 730	if (gb_connection_is_offloaded(connection))
 731		return -EBUSY;
 732
 733	if (!callback)
 734		return -EINVAL;
 735
 736	/*
 737	 * Record the callback function, which is executed in
 738	 * non-atomic (workqueue) context when the final result
 739	 * of an operation has been set.
 740	 */
 741	operation->callback = callback;
 742
 743	/*
 744	 * Assign the operation's id, and store it in the request header.
 745	 * Zero is a reserved operation id for unidirectional operations.
 746	 */
 747	if (gb_operation_is_unidirectional(operation)) {
 748		operation->id = 0;
 749	} else {
 750		cycle = (unsigned int)atomic_inc_return(&connection->op_cycle);
 751		operation->id = (u16)(cycle % U16_MAX + 1);
 752	}
 753
 754	header = operation->request->header;
 755	header->operation_id = cpu_to_le16(operation->id);
 756
 757	gb_operation_result_set(operation, -EINPROGRESS);
 758
 759	/*
 760	 * Get an extra reference on the operation. It'll be dropped when the
 761	 * operation completes.
 762	 */
 763	gb_operation_get(operation);
 764	ret = gb_operation_get_active(operation);
 765	if (ret)
 766		goto err_put;
 767
 768	ret = gb_message_send(operation->request, gfp);
 769	if (ret)
 770		goto err_put_active;
 771
 772	if (timeout) {
 773		operation->timer.expires = jiffies + msecs_to_jiffies(timeout);
 774		add_timer(&operation->timer);
 775	}
 776
 777	return 0;
 778
 779err_put_active:
 780	gb_operation_put_active(operation);
 781err_put:
 782	gb_operation_put(operation);
 783
 784	return ret;
 785}
 786EXPORT_SYMBOL_GPL(gb_operation_request_send);
 787
 788/*
 789 * Send a synchronous operation.  This function is expected to
 790 * block, returning only when the response has arrived, (or when an
 791 * error is detected.  The return value is the result of the
 792 * operation.
 793 */
 794int gb_operation_request_send_sync_timeout(struct gb_operation *operation,
 795					   unsigned int timeout)
 796{
 797	int ret;
 798
 799	ret = gb_operation_request_send(operation, gb_operation_sync_callback,
 800					timeout, GFP_KERNEL);
 801	if (ret)
 802		return ret;
 803
 804	ret = wait_for_completion_interruptible(&operation->completion);
 805	if (ret < 0) {
 806		/* Cancel the operation if interrupted */
 807		gb_operation_cancel(operation, -ECANCELED);
 808	}
 809
 810	return gb_operation_result(operation);
 811}
 812EXPORT_SYMBOL_GPL(gb_operation_request_send_sync_timeout);
 813
 814/*
 815 * Send a response for an incoming operation request.  A non-zero
 816 * errno indicates a failed operation.
 817 *
 818 * If there is any response payload, the incoming request handler is
 819 * responsible for allocating the response message.  Otherwise the
 820 * it can simply supply the result errno; this function will
 821 * allocate the response message if necessary.
 822 */
 823static int gb_operation_response_send(struct gb_operation *operation,
 824				      int errno)
 825{
 826	struct gb_connection *connection = operation->connection;
 827	int ret;
 828
 829	if (!operation->response &&
 830	    !gb_operation_is_unidirectional(operation)) {
 831		if (!gb_operation_response_alloc(operation, 0, GFP_KERNEL))
 832			return -ENOMEM;
 833	}
 834
 835	/* Record the result */
 836	if (!gb_operation_result_set(operation, errno)) {
 837		dev_err(&connection->hd->dev, "request result already set\n");
 838		return -EIO;	/* Shouldn't happen */
 839	}
 840
 841	/* Sender of request does not care about response. */
 842	if (gb_operation_is_unidirectional(operation))
 843		return 0;
 844
 845	/* Reference will be dropped when message has been sent. */
 846	gb_operation_get(operation);
 847	ret = gb_operation_get_active(operation);
 848	if (ret)
 849		goto err_put;
 850
 851	/* Fill in the response header and send it */
 852	operation->response->header->result = gb_operation_errno_map(errno);
 853
 854	ret = gb_message_send(operation->response, GFP_KERNEL);
 855	if (ret)
 856		goto err_put_active;
 857
 858	return 0;
 859
 860err_put_active:
 861	gb_operation_put_active(operation);
 862err_put:
 863	gb_operation_put(operation);
 864
 865	return ret;
 866}
 867
 868/*
 869 * This function is called when a message send request has completed.
 870 */
 871void greybus_message_sent(struct gb_host_device *hd,
 872			  struct gb_message *message, int status)
 873{
 874	struct gb_operation *operation = message->operation;
 875	struct gb_connection *connection = operation->connection;
 876
 877	/*
 878	 * If the message was a response, we just need to drop our
 879	 * reference to the operation.  If an error occurred, report
 880	 * it.
 881	 *
 882	 * For requests, if there's no error and the operation in not
 883	 * unidirectional, there's nothing more to do until the response
 884	 * arrives. If an error occurred attempting to send it, or if the
 885	 * operation is unidrectional, record the result of the operation and
 886	 * schedule its completion.
 887	 */
 888	if (message == operation->response) {
 889		if (status) {
 890			dev_err(&connection->hd->dev,
 891				"%s: error sending response 0x%02x: %d\n",
 892				connection->name, operation->type, status);
 893		}
 894
 895		gb_operation_put_active(operation);
 896		gb_operation_put(operation);
 897	} else if (status || gb_operation_is_unidirectional(operation)) {
 898		if (gb_operation_result_set(operation, status)) {
 899			queue_work(gb_operation_completion_wq,
 900				   &operation->work);
 901		}
 902	}
 903}
 904EXPORT_SYMBOL_GPL(greybus_message_sent);
 905
 906/*
 907 * We've received data on a connection, and it doesn't look like a
 908 * response, so we assume it's a request.
 909 *
 910 * This is called in interrupt context, so just copy the incoming
 911 * data into the request buffer and handle the rest via workqueue.
 912 */
 913static void gb_connection_recv_request(struct gb_connection *connection,
 914				const struct gb_operation_msg_hdr *header,
 915				void *data, size_t size)
 916{
 917	struct gb_operation *operation;
 918	u16 operation_id;
 919	u8 type;
 920	int ret;
 921
 922	operation_id = le16_to_cpu(header->operation_id);
 923	type = header->type;
 924
 925	operation = gb_operation_create_incoming(connection, operation_id,
 926						 type, data, size);
 927	if (!operation) {
 928		dev_err(&connection->hd->dev,
 929			"%s: can't create incoming operation\n",
 930			connection->name);
 931		return;
 932	}
 933
 934	ret = gb_operation_get_active(operation);
 935	if (ret) {
 936		gb_operation_put(operation);
 937		return;
 938	}
 939	trace_gb_message_recv_request(operation->request);
 940
 941	/*
 942	 * The initial reference to the operation will be dropped when the
 943	 * request handler returns.
 944	 */
 945	if (gb_operation_result_set(operation, -EINPROGRESS))
 946		queue_work(connection->wq, &operation->work);
 947}
 948
 949/*
 950 * We've received data that appears to be an operation response
 951 * message.  Look up the operation, and record that we've received
 952 * its response.
 953 *
 954 * This is called in interrupt context, so just copy the incoming
 955 * data into the response buffer and handle the rest via workqueue.
 956 */
 957static void gb_connection_recv_response(struct gb_connection *connection,
 958				const struct gb_operation_msg_hdr *header,
 959				void *data, size_t size)
 960{
 961	struct gb_operation *operation;
 962	struct gb_message *message;
 963	size_t message_size;
 964	u16 operation_id;
 965	int errno;
 966
 967	operation_id = le16_to_cpu(header->operation_id);
 968
 969	if (!operation_id) {
 970		dev_err_ratelimited(&connection->hd->dev,
 971				    "%s: invalid response id 0 received\n",
 972				    connection->name);
 973		return;
 974	}
 975
 976	operation = gb_operation_find_outgoing(connection, operation_id);
 977	if (!operation) {
 978		dev_err_ratelimited(&connection->hd->dev,
 979				    "%s: unexpected response id 0x%04x received\n",
 980				    connection->name, operation_id);
 981		return;
 982	}
 983
 984	errno = gb_operation_status_map(header->result);
 985	message = operation->response;
 986	message_size = sizeof(*header) + message->payload_size;
 987	if (!errno && size > message_size) {
 988		dev_err_ratelimited(&connection->hd->dev,
 989				    "%s: malformed response 0x%02x received (%zu > %zu)\n",
 990				    connection->name, header->type,
 991				    size, message_size);
 992		errno = -EMSGSIZE;
 993	} else if (!errno && size < message_size) {
 994		if (gb_operation_short_response_allowed(operation)) {
 995			message->payload_size = size - sizeof(*header);
 996		} else {
 997			dev_err_ratelimited(&connection->hd->dev,
 998					    "%s: short response 0x%02x received (%zu < %zu)\n",
 999					    connection->name, header->type,
1000					    size, message_size);
1001			errno = -EMSGSIZE;
1002		}
1003	}
1004
1005	/* We must ignore the payload if a bad status is returned */
1006	if (errno)
1007		size = sizeof(*header);
1008
1009	/* The rest will be handled in work queue context */
1010	if (gb_operation_result_set(operation, errno)) {
1011		memcpy(message->buffer, data, size);
1012
1013		trace_gb_message_recv_response(message);
1014
1015		queue_work(gb_operation_completion_wq, &operation->work);
1016	}
1017
1018	gb_operation_put(operation);
1019}
1020
1021/*
1022 * Handle data arriving on a connection.  As soon as we return the
1023 * supplied data buffer will be reused (so unless we do something
1024 * with, it's effectively dropped).
1025 */
1026void gb_connection_recv(struct gb_connection *connection,
1027			void *data, size_t size)
1028{
1029	struct gb_operation_msg_hdr header;
1030	struct device *dev = &connection->hd->dev;
1031	size_t msg_size;
1032
1033	if (connection->state == GB_CONNECTION_STATE_DISABLED ||
1034	    gb_connection_is_offloaded(connection)) {
1035		dev_warn_ratelimited(dev, "%s: dropping %zu received bytes\n",
1036				     connection->name, size);
1037		return;
1038	}
1039
1040	if (size < sizeof(header)) {
1041		dev_err_ratelimited(dev, "%s: short message received\n",
1042				    connection->name);
1043		return;
1044	}
1045
1046	/* Use memcpy as data may be unaligned */
1047	memcpy(&header, data, sizeof(header));
1048	msg_size = le16_to_cpu(header.size);
1049	if (size < msg_size) {
1050		dev_err_ratelimited(dev,
1051				    "%s: incomplete message 0x%04x of type 0x%02x received (%zu < %zu)\n",
1052				    connection->name,
1053				    le16_to_cpu(header.operation_id),
1054				    header.type, size, msg_size);
1055		return;		/* XXX Should still complete operation */
1056	}
1057
1058	if (header.type & GB_MESSAGE_TYPE_RESPONSE) {
1059		gb_connection_recv_response(connection,	&header, data,
1060					    msg_size);
1061	} else {
1062		gb_connection_recv_request(connection, &header, data,
1063					   msg_size);
1064	}
1065}
1066
1067/*
1068 * Cancel an outgoing operation synchronously, and record the given error to
1069 * indicate why.
1070 */
1071void gb_operation_cancel(struct gb_operation *operation, int errno)
1072{
1073	if (WARN_ON(gb_operation_is_incoming(operation)))
1074		return;
1075
1076	if (gb_operation_result_set(operation, errno)) {
1077		gb_message_cancel(operation->request);
1078		queue_work(gb_operation_completion_wq, &operation->work);
1079	}
1080	trace_gb_message_cancel_outgoing(operation->request);
1081
1082	atomic_inc(&operation->waiters);
1083	wait_event(gb_operation_cancellation_queue,
1084		   !gb_operation_is_active(operation));
1085	atomic_dec(&operation->waiters);
1086}
1087EXPORT_SYMBOL_GPL(gb_operation_cancel);
1088
1089/*
1090 * Cancel an incoming operation synchronously. Called during connection tear
1091 * down.
1092 */
1093void gb_operation_cancel_incoming(struct gb_operation *operation, int errno)
1094{
1095	if (WARN_ON(!gb_operation_is_incoming(operation)))
1096		return;
1097
1098	if (!gb_operation_is_unidirectional(operation)) {
1099		/*
1100		 * Make sure the request handler has submitted the response
1101		 * before cancelling it.
1102		 */
1103		flush_work(&operation->work);
1104		if (!gb_operation_result_set(operation, errno))
1105			gb_message_cancel(operation->response);
1106	}
1107	trace_gb_message_cancel_incoming(operation->response);
1108
1109	atomic_inc(&operation->waiters);
1110	wait_event(gb_operation_cancellation_queue,
1111		   !gb_operation_is_active(operation));
1112	atomic_dec(&operation->waiters);
1113}
1114
1115/**
1116 * gb_operation_sync_timeout() - implement a "simple" synchronous operation
1117 * @connection: the Greybus connection to send this to
1118 * @type: the type of operation to send
1119 * @request: pointer to a memory buffer to copy the request from
1120 * @request_size: size of @request
1121 * @response: pointer to a memory buffer to copy the response to
1122 * @response_size: the size of @response.
1123 * @timeout: operation timeout in milliseconds
1124 *
1125 * This function implements a simple synchronous Greybus operation.  It sends
1126 * the provided operation request and waits (sleeps) until the corresponding
1127 * operation response message has been successfully received, or an error
1128 * occurs.  @request and @response are buffers to hold the request and response
1129 * data respectively, and if they are not NULL, their size must be specified in
1130 * @request_size and @response_size.
1131 *
1132 * If a response payload is to come back, and @response is not NULL,
1133 * @response_size number of bytes will be copied into @response if the operation
1134 * is successful.
1135 *
1136 * If there is an error, the response buffer is left alone.
1137 */
1138int gb_operation_sync_timeout(struct gb_connection *connection, int type,
1139			      void *request, int request_size,
1140			      void *response, int response_size,
1141			      unsigned int timeout)
1142{
1143	struct gb_operation *operation;
1144	int ret;
1145
1146	if ((response_size && !response) ||
1147	    (request_size && !request))
1148		return -EINVAL;
1149
1150	operation = gb_operation_create(connection, type,
1151					request_size, response_size,
1152					GFP_KERNEL);
1153	if (!operation)
1154		return -ENOMEM;
1155
1156	if (request_size)
1157		memcpy(operation->request->payload, request, request_size);
1158
1159	ret = gb_operation_request_send_sync_timeout(operation, timeout);
1160	if (ret) {
1161		dev_err(&connection->hd->dev,
1162			"%s: synchronous operation id 0x%04x of type 0x%02x failed: %d\n",
1163			connection->name, operation->id, type, ret);
1164	} else {
1165		if (response_size) {
1166			memcpy(response, operation->response->payload,
1167			       response_size);
1168		}
1169	}
1170
1171	gb_operation_put(operation);
1172
1173	return ret;
1174}
1175EXPORT_SYMBOL_GPL(gb_operation_sync_timeout);
1176
1177/**
1178 * gb_operation_unidirectional_timeout() - initiate a unidirectional operation
1179 * @connection:		connection to use
1180 * @type:		type of operation to send
1181 * @request:		memory buffer to copy the request from
1182 * @request_size:	size of @request
1183 * @timeout:		send timeout in milliseconds
1184 *
1185 * Initiate a unidirectional operation by sending a request message and
1186 * waiting for it to be acknowledged as sent by the host device.
1187 *
1188 * Note that successful send of a unidirectional operation does not imply that
1189 * the request as actually reached the remote end of the connection.
1190 */
1191int gb_operation_unidirectional_timeout(struct gb_connection *connection,
1192					int type, void *request,
1193					int request_size,
1194					unsigned int timeout)
1195{
1196	struct gb_operation *operation;
1197	int ret;
1198
1199	if (request_size && !request)
1200		return -EINVAL;
1201
1202	operation = gb_operation_create_flags(connection, type,
1203					      request_size, 0,
1204					      GB_OPERATION_FLAG_UNIDIRECTIONAL,
1205					      GFP_KERNEL);
1206	if (!operation)
1207		return -ENOMEM;
1208
1209	if (request_size)
1210		memcpy(operation->request->payload, request, request_size);
1211
1212	ret = gb_operation_request_send_sync_timeout(operation, timeout);
1213	if (ret) {
1214		dev_err(&connection->hd->dev,
1215			"%s: unidirectional operation of type 0x%02x failed: %d\n",
1216			connection->name, type, ret);
1217	}
1218
1219	gb_operation_put(operation);
1220
1221	return ret;
1222}
1223EXPORT_SYMBOL_GPL(gb_operation_unidirectional_timeout);
1224
1225int __init gb_operation_init(void)
1226{
1227	gb_message_cache = kmem_cache_create("gb_message_cache",
1228					     sizeof(struct gb_message), 0, 0,
1229					     NULL);
1230	if (!gb_message_cache)
1231		return -ENOMEM;
1232
1233	gb_operation_cache = kmem_cache_create("gb_operation_cache",
1234					       sizeof(struct gb_operation), 0,
1235					       0, NULL);
1236	if (!gb_operation_cache)
1237		goto err_destroy_message_cache;
1238
1239	gb_operation_completion_wq = alloc_workqueue("greybus_completion",
1240						     0, 0);
1241	if (!gb_operation_completion_wq)
1242		goto err_destroy_operation_cache;
1243
1244	return 0;
1245
1246err_destroy_operation_cache:
1247	kmem_cache_destroy(gb_operation_cache);
1248	gb_operation_cache = NULL;
1249err_destroy_message_cache:
1250	kmem_cache_destroy(gb_message_cache);
1251	gb_message_cache = NULL;
1252
1253	return -ENOMEM;
1254}
1255
1256void gb_operation_exit(void)
1257{
1258	destroy_workqueue(gb_operation_completion_wq);
1259	gb_operation_completion_wq = NULL;
1260	kmem_cache_destroy(gb_operation_cache);
1261	gb_operation_cache = NULL;
1262	kmem_cache_destroy(gb_message_cache);
1263	gb_message_cache = NULL;
1264}