Linux Audio

Check our new training course

Linux BSP upgrade and security maintenance

Need help to get security updates for your Linux BSP?
Loading...
Note: File does not exist in v3.15.
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Greybus operations
   4 *
   5 * Copyright 2014-2015 Google Inc.
   6 * Copyright 2014-2015 Linaro Ltd.
   7 */
   8
   9#include <linux/kernel.h>
  10#include <linux/slab.h>
  11#include <linux/module.h>
  12#include <linux/sched.h>
  13#include <linux/wait.h>
  14#include <linux/workqueue.h>
  15
  16#include "greybus.h"
  17#include "greybus_trace.h"
  18
  19static struct kmem_cache *gb_operation_cache;
  20static struct kmem_cache *gb_message_cache;
  21
  22/* Workqueue to handle Greybus operation completions. */
  23static struct workqueue_struct *gb_operation_completion_wq;
  24
  25/* Wait queue for synchronous cancellations. */
  26static DECLARE_WAIT_QUEUE_HEAD(gb_operation_cancellation_queue);
  27
  28/*
  29 * Protects updates to operation->errno.
  30 */
  31static DEFINE_SPINLOCK(gb_operations_lock);
  32
  33static int gb_operation_response_send(struct gb_operation *operation,
  34					int errno);
  35
  36/*
  37 * Increment operation active count and add to connection list unless the
  38 * connection is going away.
  39 *
  40 * Caller holds operation reference.
  41 */
  42static int gb_operation_get_active(struct gb_operation *operation)
  43{
  44	struct gb_connection *connection = operation->connection;
  45	unsigned long flags;
  46
  47	spin_lock_irqsave(&connection->lock, flags);
  48	switch (connection->state) {
  49	case GB_CONNECTION_STATE_ENABLED:
  50		break;
  51	case GB_CONNECTION_STATE_ENABLED_TX:
  52		if (gb_operation_is_incoming(operation))
  53			goto err_unlock;
  54		break;
  55	case GB_CONNECTION_STATE_DISCONNECTING:
  56		if (!gb_operation_is_core(operation))
  57			goto err_unlock;
  58		break;
  59	default:
  60		goto err_unlock;
  61	}
  62
  63	if (operation->active++ == 0)
  64		list_add_tail(&operation->links, &connection->operations);
  65
  66	trace_gb_operation_get_active(operation);
  67
  68	spin_unlock_irqrestore(&connection->lock, flags);
  69
  70	return 0;
  71
  72err_unlock:
  73	spin_unlock_irqrestore(&connection->lock, flags);
  74
  75	return -ENOTCONN;
  76}
  77
  78/* Caller holds operation reference. */
  79static void gb_operation_put_active(struct gb_operation *operation)
  80{
  81	struct gb_connection *connection = operation->connection;
  82	unsigned long flags;
  83
  84	spin_lock_irqsave(&connection->lock, flags);
  85
  86	trace_gb_operation_put_active(operation);
  87
  88	if (--operation->active == 0) {
  89		list_del(&operation->links);
  90		if (atomic_read(&operation->waiters))
  91			wake_up(&gb_operation_cancellation_queue);
  92	}
  93	spin_unlock_irqrestore(&connection->lock, flags);
  94}
  95
  96static bool gb_operation_is_active(struct gb_operation *operation)
  97{
  98	struct gb_connection *connection = operation->connection;
  99	unsigned long flags;
 100	bool ret;
 101
 102	spin_lock_irqsave(&connection->lock, flags);
 103	ret = operation->active;
 104	spin_unlock_irqrestore(&connection->lock, flags);
 105
 106	return ret;
 107}
 108
 109/*
 110 * Set an operation's result.
 111 *
 112 * Initially an outgoing operation's errno value is -EBADR.
 113 * If no error occurs before sending the request message the only
 114 * valid value operation->errno can be set to is -EINPROGRESS,
 115 * indicating the request has been (or rather is about to be) sent.
 116 * At that point nobody should be looking at the result until the
 117 * response arrives.
 118 *
 119 * The first time the result gets set after the request has been
 120 * sent, that result "sticks."  That is, if two concurrent threads
 121 * race to set the result, the first one wins.  The return value
 122 * tells the caller whether its result was recorded; if not the
 123 * caller has nothing more to do.
 124 *
 125 * The result value -EILSEQ is reserved to signal an implementation
 126 * error; if it's ever observed, the code performing the request has
 127 * done something fundamentally wrong.  It is an error to try to set
 128 * the result to -EBADR, and attempts to do so result in a warning,
 129 * and -EILSEQ is used instead.  Similarly, the only valid result
 130 * value to set for an operation in initial state is -EINPROGRESS.
 131 * Attempts to do otherwise will also record a (successful) -EILSEQ
 132 * operation result.
 133 */
 134static bool gb_operation_result_set(struct gb_operation *operation, int result)
 135{
 136	unsigned long flags;
 137	int prev;
 138
 139	if (result == -EINPROGRESS) {
 140		/*
 141		 * -EINPROGRESS is used to indicate the request is
 142		 * in flight.  It should be the first result value
 143		 * set after the initial -EBADR.  Issue a warning
 144		 * and record an implementation error if it's
 145		 * set at any other time.
 146		 */
 147		spin_lock_irqsave(&gb_operations_lock, flags);
 148		prev = operation->errno;
 149		if (prev == -EBADR)
 150			operation->errno = result;
 151		else
 152			operation->errno = -EILSEQ;
 153		spin_unlock_irqrestore(&gb_operations_lock, flags);
 154		WARN_ON(prev != -EBADR);
 155
 156		return true;
 157	}
 158
 159	/*
 160	 * The first result value set after a request has been sent
 161	 * will be the final result of the operation.  Subsequent
 162	 * attempts to set the result are ignored.
 163	 *
 164	 * Note that -EBADR is a reserved "initial state" result
 165	 * value.  Attempts to set this value result in a warning,
 166	 * and the result code is set to -EILSEQ instead.
 167	 */
 168	if (WARN_ON(result == -EBADR))
 169		result = -EILSEQ; /* Nobody should be setting -EBADR */
 170
 171	spin_lock_irqsave(&gb_operations_lock, flags);
 172	prev = operation->errno;
 173	if (prev == -EINPROGRESS)
 174		operation->errno = result;	/* First and final result */
 175	spin_unlock_irqrestore(&gb_operations_lock, flags);
 176
 177	return prev == -EINPROGRESS;
 178}
 179
 180int gb_operation_result(struct gb_operation *operation)
 181{
 182	int result = operation->errno;
 183
 184	WARN_ON(result == -EBADR);
 185	WARN_ON(result == -EINPROGRESS);
 186
 187	return result;
 188}
 189EXPORT_SYMBOL_GPL(gb_operation_result);
 190
 191/*
 192 * Looks up an outgoing operation on a connection and returns a refcounted
 193 * pointer if found, or NULL otherwise.
 194 */
 195static struct gb_operation *
 196gb_operation_find_outgoing(struct gb_connection *connection, u16 operation_id)
 197{
 198	struct gb_operation *operation;
 199	unsigned long flags;
 200	bool found = false;
 201
 202	spin_lock_irqsave(&connection->lock, flags);
 203	list_for_each_entry(operation, &connection->operations, links)
 204		if (operation->id == operation_id &&
 205				!gb_operation_is_incoming(operation)) {
 206			gb_operation_get(operation);
 207			found = true;
 208			break;
 209		}
 210	spin_unlock_irqrestore(&connection->lock, flags);
 211
 212	return found ? operation : NULL;
 213}
 214
 215static int gb_message_send(struct gb_message *message, gfp_t gfp)
 216{
 217	struct gb_connection *connection = message->operation->connection;
 218
 219	trace_gb_message_send(message);
 220	return connection->hd->driver->message_send(connection->hd,
 221					connection->hd_cport_id,
 222					message,
 223					gfp);
 224}
 225
 226/*
 227 * Cancel a message we have passed to the host device layer to be sent.
 228 */
 229static void gb_message_cancel(struct gb_message *message)
 230{
 231	struct gb_host_device *hd = message->operation->connection->hd;
 232
 233	hd->driver->message_cancel(message);
 234}
 235
 236static void gb_operation_request_handle(struct gb_operation *operation)
 237{
 238	struct gb_connection *connection = operation->connection;
 239	int status;
 240	int ret;
 241
 242	if (connection->handler) {
 243		status = connection->handler(operation);
 244	} else {
 245		dev_err(&connection->hd->dev,
 246			"%s: unexpected incoming request of type 0x%02x\n",
 247			connection->name, operation->type);
 248
 249		status = -EPROTONOSUPPORT;
 250	}
 251
 252	ret = gb_operation_response_send(operation, status);
 253	if (ret) {
 254		dev_err(&connection->hd->dev,
 255			"%s: failed to send response %d for type 0x%02x: %d\n",
 256			connection->name, status, operation->type, ret);
 257		return;
 258	}
 259}
 260
 261/*
 262 * Process operation work.
 263 *
 264 * For incoming requests, call the protocol request handler. The operation
 265 * result should be -EINPROGRESS at this point.
 266 *
 267 * For outgoing requests, the operation result value should have
 268 * been set before queueing this.  The operation callback function
 269 * allows the original requester to know the request has completed
 270 * and its result is available.
 271 */
 272static void gb_operation_work(struct work_struct *work)
 273{
 274	struct gb_operation *operation;
 275	int ret;
 276
 277	operation = container_of(work, struct gb_operation, work);
 278
 279	if (gb_operation_is_incoming(operation)) {
 280		gb_operation_request_handle(operation);
 281	} else {
 282		ret = del_timer_sync(&operation->timer);
 283		if (!ret) {
 284			/* Cancel request message if scheduled by timeout. */
 285			if (gb_operation_result(operation) == -ETIMEDOUT)
 286				gb_message_cancel(operation->request);
 287		}
 288
 289		operation->callback(operation);
 290	}
 291
 292	gb_operation_put_active(operation);
 293	gb_operation_put(operation);
 294}
 295
 296static void gb_operation_timeout(struct timer_list *t)
 297{
 298	struct gb_operation *operation = from_timer(operation, t, timer);
 299
 300	if (gb_operation_result_set(operation, -ETIMEDOUT)) {
 301		/*
 302		 * A stuck request message will be cancelled from the
 303		 * workqueue.
 304		 */
 305		queue_work(gb_operation_completion_wq, &operation->work);
 306	}
 307}
 308
 309static void gb_operation_message_init(struct gb_host_device *hd,
 310				struct gb_message *message, u16 operation_id,
 311				size_t payload_size, u8 type)
 312{
 313	struct gb_operation_msg_hdr *header;
 314
 315	header = message->buffer;
 316
 317	message->header = header;
 318	message->payload = payload_size ? header + 1 : NULL;
 319	message->payload_size = payload_size;
 320
 321	/*
 322	 * The type supplied for incoming message buffers will be
 323	 * GB_REQUEST_TYPE_INVALID. Such buffers will be overwritten by
 324	 * arriving data so there's no need to initialize the message header.
 325	 */
 326	if (type != GB_REQUEST_TYPE_INVALID) {
 327		u16 message_size = (u16)(sizeof(*header) + payload_size);
 328
 329		/*
 330		 * For a request, the operation id gets filled in
 331		 * when the message is sent.  For a response, it
 332		 * will be copied from the request by the caller.
 333		 *
 334		 * The result field in a request message must be
 335		 * zero.  It will be set just prior to sending for
 336		 * a response.
 337		 */
 338		header->size = cpu_to_le16(message_size);
 339		header->operation_id = 0;
 340		header->type = type;
 341		header->result = 0;
 342	}
 343}
 344
 345/*
 346 * Allocate a message to be used for an operation request or response.
 347 * Both types of message contain a common header.  The request message
 348 * for an outgoing operation is outbound, as is the response message
 349 * for an incoming operation.  The message header for an outbound
 350 * message is partially initialized here.
 351 *
 352 * The headers for inbound messages don't need to be initialized;
 353 * they'll be filled in by arriving data.
 354 *
 355 * Our message buffers have the following layout:
 356 *	message header  \_ these combined are
 357 *	message payload /  the message size
 358 */
 359static struct gb_message *
 360gb_operation_message_alloc(struct gb_host_device *hd, u8 type,
 361				size_t payload_size, gfp_t gfp_flags)
 362{
 363	struct gb_message *message;
 364	struct gb_operation_msg_hdr *header;
 365	size_t message_size = payload_size + sizeof(*header);
 366
 367	if (message_size > hd->buffer_size_max) {
 368		dev_warn(&hd->dev, "requested message size too big (%zu > %zu)\n",
 369				message_size, hd->buffer_size_max);
 370		return NULL;
 371	}
 372
 373	/* Allocate the message structure and buffer. */
 374	message = kmem_cache_zalloc(gb_message_cache, gfp_flags);
 375	if (!message)
 376		return NULL;
 377
 378	message->buffer = kzalloc(message_size, gfp_flags);
 379	if (!message->buffer)
 380		goto err_free_message;
 381
 382	/* Initialize the message.  Operation id is filled in later. */
 383	gb_operation_message_init(hd, message, 0, payload_size, type);
 384
 385	return message;
 386
 387err_free_message:
 388	kmem_cache_free(gb_message_cache, message);
 389
 390	return NULL;
 391}
 392
 393static void gb_operation_message_free(struct gb_message *message)
 394{
 395	kfree(message->buffer);
 396	kmem_cache_free(gb_message_cache, message);
 397}
 398
 399/*
 400 * Map an enum gb_operation_status value (which is represented in a
 401 * message as a single byte) to an appropriate Linux negative errno.
 402 */
 403static int gb_operation_status_map(u8 status)
 404{
 405	switch (status) {
 406	case GB_OP_SUCCESS:
 407		return 0;
 408	case GB_OP_INTERRUPTED:
 409		return -EINTR;
 410	case GB_OP_TIMEOUT:
 411		return -ETIMEDOUT;
 412	case GB_OP_NO_MEMORY:
 413		return -ENOMEM;
 414	case GB_OP_PROTOCOL_BAD:
 415		return -EPROTONOSUPPORT;
 416	case GB_OP_OVERFLOW:
 417		return -EMSGSIZE;
 418	case GB_OP_INVALID:
 419		return -EINVAL;
 420	case GB_OP_RETRY:
 421		return -EAGAIN;
 422	case GB_OP_NONEXISTENT:
 423		return -ENODEV;
 424	case GB_OP_MALFUNCTION:
 425		return -EILSEQ;
 426	case GB_OP_UNKNOWN_ERROR:
 427	default:
 428		return -EIO;
 429	}
 430}
 431
 432/*
 433 * Map a Linux errno value (from operation->errno) into the value
 434 * that should represent it in a response message status sent
 435 * over the wire.  Returns an enum gb_operation_status value (which
 436 * is represented in a message as a single byte).
 437 */
 438static u8 gb_operation_errno_map(int errno)
 439{
 440	switch (errno) {
 441	case 0:
 442		return GB_OP_SUCCESS;
 443	case -EINTR:
 444		return GB_OP_INTERRUPTED;
 445	case -ETIMEDOUT:
 446		return GB_OP_TIMEOUT;
 447	case -ENOMEM:
 448		return GB_OP_NO_MEMORY;
 449	case -EPROTONOSUPPORT:
 450		return GB_OP_PROTOCOL_BAD;
 451	case -EMSGSIZE:
 452		return GB_OP_OVERFLOW;	/* Could be underflow too */
 453	case -EINVAL:
 454		return GB_OP_INVALID;
 455	case -EAGAIN:
 456		return GB_OP_RETRY;
 457	case -EILSEQ:
 458		return GB_OP_MALFUNCTION;
 459	case -ENODEV:
 460		return GB_OP_NONEXISTENT;
 461	case -EIO:
 462	default:
 463		return GB_OP_UNKNOWN_ERROR;
 464	}
 465}
 466
 467bool gb_operation_response_alloc(struct gb_operation *operation,
 468					size_t response_size, gfp_t gfp)
 469{
 470	struct gb_host_device *hd = operation->connection->hd;
 471	struct gb_operation_msg_hdr *request_header;
 472	struct gb_message *response;
 473	u8 type;
 474
 475	type = operation->type | GB_MESSAGE_TYPE_RESPONSE;
 476	response = gb_operation_message_alloc(hd, type, response_size, gfp);
 477	if (!response)
 478		return false;
 479	response->operation = operation;
 480
 481	/*
 482	 * Size and type get initialized when the message is
 483	 * allocated.  The errno will be set before sending.  All
 484	 * that's left is the operation id, which we copy from the
 485	 * request message header (as-is, in little-endian order).
 486	 */
 487	request_header = operation->request->header;
 488	response->header->operation_id = request_header->operation_id;
 489	operation->response = response;
 490
 491	return true;
 492}
 493EXPORT_SYMBOL_GPL(gb_operation_response_alloc);
 494
 495/*
 496 * Create a Greybus operation to be sent over the given connection.
 497 * The request buffer will be big enough for a payload of the given
 498 * size.
 499 *
 500 * For outgoing requests, the request message's header will be
 501 * initialized with the type of the request and the message size.
 502 * Outgoing operations must also specify the response buffer size,
 503 * which must be sufficient to hold all expected response data.  The
 504 * response message header will eventually be overwritten, so there's
 505 * no need to initialize it here.
 506 *
 507 * Request messages for incoming operations can arrive in interrupt
 508 * context, so they must be allocated with GFP_ATOMIC.  In this case
 509 * the request buffer will be immediately overwritten, so there is
 510 * no need to initialize the message header.  Responsibility for
 511 * allocating a response buffer lies with the incoming request
 512 * handler for a protocol.  So we don't allocate that here.
 513 *
 514 * Returns a pointer to the new operation or a null pointer if an
 515 * error occurs.
 516 */
 517static struct gb_operation *
 518gb_operation_create_common(struct gb_connection *connection, u8 type,
 519				size_t request_size, size_t response_size,
 520				unsigned long op_flags, gfp_t gfp_flags)
 521{
 522	struct gb_host_device *hd = connection->hd;
 523	struct gb_operation *operation;
 524
 525	operation = kmem_cache_zalloc(gb_operation_cache, gfp_flags);
 526	if (!operation)
 527		return NULL;
 528	operation->connection = connection;
 529
 530	operation->request = gb_operation_message_alloc(hd, type, request_size,
 531							gfp_flags);
 532	if (!operation->request)
 533		goto err_cache;
 534	operation->request->operation = operation;
 535
 536	/* Allocate the response buffer for outgoing operations */
 537	if (!(op_flags & GB_OPERATION_FLAG_INCOMING)) {
 538		if (!gb_operation_response_alloc(operation, response_size,
 539						 gfp_flags)) {
 540			goto err_request;
 541		}
 542
 543		timer_setup(&operation->timer, gb_operation_timeout, 0);
 544	}
 545
 546	operation->flags = op_flags;
 547	operation->type = type;
 548	operation->errno = -EBADR;  /* Initial value--means "never set" */
 549
 550	INIT_WORK(&operation->work, gb_operation_work);
 551	init_completion(&operation->completion);
 552	kref_init(&operation->kref);
 553	atomic_set(&operation->waiters, 0);
 554
 555	return operation;
 556
 557err_request:
 558	gb_operation_message_free(operation->request);
 559err_cache:
 560	kmem_cache_free(gb_operation_cache, operation);
 561
 562	return NULL;
 563}
 564
 565/*
 566 * Create a new operation associated with the given connection.  The
 567 * request and response sizes provided are the number of bytes
 568 * required to hold the request/response payload only.  Both of
 569 * these are allowed to be 0.  Note that 0x00 is reserved as an
 570 * invalid operation type for all protocols, and this is enforced
 571 * here.
 572 */
 573struct gb_operation *
 574gb_operation_create_flags(struct gb_connection *connection,
 575				u8 type, size_t request_size,
 576				size_t response_size, unsigned long flags,
 577				gfp_t gfp)
 578{
 579	struct gb_operation *operation;
 580
 581	if (WARN_ON_ONCE(type == GB_REQUEST_TYPE_INVALID))
 582		return NULL;
 583	if (WARN_ON_ONCE(type & GB_MESSAGE_TYPE_RESPONSE))
 584		type &= ~GB_MESSAGE_TYPE_RESPONSE;
 585
 586	if (WARN_ON_ONCE(flags & ~GB_OPERATION_FLAG_USER_MASK))
 587		flags &= GB_OPERATION_FLAG_USER_MASK;
 588
 589	operation = gb_operation_create_common(connection, type,
 590						request_size, response_size,
 591						flags, gfp);
 592	if (operation)
 593		trace_gb_operation_create(operation);
 594
 595	return operation;
 596}
 597EXPORT_SYMBOL_GPL(gb_operation_create_flags);
 598
 599struct gb_operation *
 600gb_operation_create_core(struct gb_connection *connection,
 601				u8 type, size_t request_size,
 602				size_t response_size, unsigned long flags,
 603				gfp_t gfp)
 604{
 605	struct gb_operation *operation;
 606
 607	flags |= GB_OPERATION_FLAG_CORE;
 608
 609	operation = gb_operation_create_common(connection, type,
 610						request_size, response_size,
 611						flags, gfp);
 612	if (operation)
 613		trace_gb_operation_create_core(operation);
 614
 615	return operation;
 616}
 617/* Do not export this function. */
 618
 619size_t gb_operation_get_payload_size_max(struct gb_connection *connection)
 620{
 621	struct gb_host_device *hd = connection->hd;
 622
 623	return hd->buffer_size_max - sizeof(struct gb_operation_msg_hdr);
 624}
 625EXPORT_SYMBOL_GPL(gb_operation_get_payload_size_max);
 626
 627static struct gb_operation *
 628gb_operation_create_incoming(struct gb_connection *connection, u16 id,
 629				u8 type, void *data, size_t size)
 630{
 631	struct gb_operation *operation;
 632	size_t request_size;
 633	unsigned long flags = GB_OPERATION_FLAG_INCOMING;
 634
 635	/* Caller has made sure we at least have a message header. */
 636	request_size = size - sizeof(struct gb_operation_msg_hdr);
 637
 638	if (!id)
 639		flags |= GB_OPERATION_FLAG_UNIDIRECTIONAL;
 640
 641	operation = gb_operation_create_common(connection, type,
 642						request_size,
 643						GB_REQUEST_TYPE_INVALID,
 644						flags, GFP_ATOMIC);
 645	if (!operation)
 646		return NULL;
 647
 648	operation->id = id;
 649	memcpy(operation->request->header, data, size);
 650	trace_gb_operation_create_incoming(operation);
 651
 652	return operation;
 653}
 654
 655/*
 656 * Get an additional reference on an operation.
 657 */
 658void gb_operation_get(struct gb_operation *operation)
 659{
 660	kref_get(&operation->kref);
 661}
 662EXPORT_SYMBOL_GPL(gb_operation_get);
 663
 664/*
 665 * Destroy a previously created operation.
 666 */
 667static void _gb_operation_destroy(struct kref *kref)
 668{
 669	struct gb_operation *operation;
 670
 671	operation = container_of(kref, struct gb_operation, kref);
 672
 673	trace_gb_operation_destroy(operation);
 674
 675	if (operation->response)
 676		gb_operation_message_free(operation->response);
 677	gb_operation_message_free(operation->request);
 678
 679	kmem_cache_free(gb_operation_cache, operation);
 680}
 681
 682/*
 683 * Drop a reference on an operation, and destroy it when the last
 684 * one is gone.
 685 */
 686void gb_operation_put(struct gb_operation *operation)
 687{
 688	if (WARN_ON(!operation))
 689		return;
 690
 691	kref_put(&operation->kref, _gb_operation_destroy);
 692}
 693EXPORT_SYMBOL_GPL(gb_operation_put);
 694
 695/* Tell the requester we're done */
 696static void gb_operation_sync_callback(struct gb_operation *operation)
 697{
 698	complete(&operation->completion);
 699}
 700
 701/**
 702 * gb_operation_request_send() - send an operation request message
 703 * @operation:	the operation to initiate
 704 * @callback:	the operation completion callback
 705 * @timeout:	operation timeout in milliseconds, or zero for no timeout
 706 * @gfp:	the memory flags to use for any allocations
 707 *
 708 * The caller has filled in any payload so the request message is ready to go.
 709 * The callback function supplied will be called when the response message has
 710 * arrived, a unidirectional request has been sent, or the operation is
 711 * cancelled, indicating that the operation is complete. The callback function
 712 * can fetch the result of the operation using gb_operation_result() if
 713 * desired.
 714 *
 715 * Return: 0 if the request was successfully queued in the host-driver queues,
 716 * or a negative errno.
 717 */
 718int gb_operation_request_send(struct gb_operation *operation,
 719				gb_operation_callback callback,
 720				unsigned int timeout,
 721				gfp_t gfp)
 722{
 723	struct gb_connection *connection = operation->connection;
 724	struct gb_operation_msg_hdr *header;
 725	unsigned int cycle;
 726	int ret;
 727
 728	if (gb_connection_is_offloaded(connection))
 729		return -EBUSY;
 730
 731	if (!callback)
 732		return -EINVAL;
 733
 734	/*
 735	 * Record the callback function, which is executed in
 736	 * non-atomic (workqueue) context when the final result
 737	 * of an operation has been set.
 738	 */
 739	operation->callback = callback;
 740
 741	/*
 742	 * Assign the operation's id, and store it in the request header.
 743	 * Zero is a reserved operation id for unidirectional operations.
 744	 */
 745	if (gb_operation_is_unidirectional(operation)) {
 746		operation->id = 0;
 747	} else {
 748		cycle = (unsigned int)atomic_inc_return(&connection->op_cycle);
 749		operation->id = (u16)(cycle % U16_MAX + 1);
 750	}
 751
 752	header = operation->request->header;
 753	header->operation_id = cpu_to_le16(operation->id);
 754
 755	gb_operation_result_set(operation, -EINPROGRESS);
 756
 757	/*
 758	 * Get an extra reference on the operation. It'll be dropped when the
 759	 * operation completes.
 760	 */
 761	gb_operation_get(operation);
 762	ret = gb_operation_get_active(operation);
 763	if (ret)
 764		goto err_put;
 765
 766	ret = gb_message_send(operation->request, gfp);
 767	if (ret)
 768		goto err_put_active;
 769
 770	if (timeout) {
 771		operation->timer.expires = jiffies + msecs_to_jiffies(timeout);
 772		add_timer(&operation->timer);
 773	}
 774
 775	return 0;
 776
 777err_put_active:
 778	gb_operation_put_active(operation);
 779err_put:
 780	gb_operation_put(operation);
 781
 782	return ret;
 783}
 784EXPORT_SYMBOL_GPL(gb_operation_request_send);
 785
 786/*
 787 * Send a synchronous operation.  This function is expected to
 788 * block, returning only when the response has arrived, (or when an
 789 * error is detected.  The return value is the result of the
 790 * operation.
 791 */
 792int gb_operation_request_send_sync_timeout(struct gb_operation *operation,
 793						unsigned int timeout)
 794{
 795	int ret;
 796
 797	ret = gb_operation_request_send(operation, gb_operation_sync_callback,
 798					timeout, GFP_KERNEL);
 799	if (ret)
 800		return ret;
 801
 802	ret = wait_for_completion_interruptible(&operation->completion);
 803	if (ret < 0) {
 804		/* Cancel the operation if interrupted */
 805		gb_operation_cancel(operation, -ECANCELED);
 806	}
 807
 808	return gb_operation_result(operation);
 809}
 810EXPORT_SYMBOL_GPL(gb_operation_request_send_sync_timeout);
 811
 812/*
 813 * Send a response for an incoming operation request.  A non-zero
 814 * errno indicates a failed operation.
 815 *
 816 * If there is any response payload, the incoming request handler is
 817 * responsible for allocating the response message.  Otherwise the
 818 * it can simply supply the result errno; this function will
 819 * allocate the response message if necessary.
 820 */
 821static int gb_operation_response_send(struct gb_operation *operation,
 822					int errno)
 823{
 824	struct gb_connection *connection = operation->connection;
 825	int ret;
 826
 827	if (!operation->response &&
 828			!gb_operation_is_unidirectional(operation)) {
 829		if (!gb_operation_response_alloc(operation, 0, GFP_KERNEL))
 830			return -ENOMEM;
 831	}
 832
 833	/* Record the result */
 834	if (!gb_operation_result_set(operation, errno)) {
 835		dev_err(&connection->hd->dev, "request result already set\n");
 836		return -EIO;	/* Shouldn't happen */
 837	}
 838
 839	/* Sender of request does not care about response. */
 840	if (gb_operation_is_unidirectional(operation))
 841		return 0;
 842
 843	/* Reference will be dropped when message has been sent. */
 844	gb_operation_get(operation);
 845	ret = gb_operation_get_active(operation);
 846	if (ret)
 847		goto err_put;
 848
 849	/* Fill in the response header and send it */
 850	operation->response->header->result = gb_operation_errno_map(errno);
 851
 852	ret = gb_message_send(operation->response, GFP_KERNEL);
 853	if (ret)
 854		goto err_put_active;
 855
 856	return 0;
 857
 858err_put_active:
 859	gb_operation_put_active(operation);
 860err_put:
 861	gb_operation_put(operation);
 862
 863	return ret;
 864}
 865
 866/*
 867 * This function is called when a message send request has completed.
 868 */
 869void greybus_message_sent(struct gb_host_device *hd,
 870					struct gb_message *message, int status)
 871{
 872	struct gb_operation *operation = message->operation;
 873	struct gb_connection *connection = operation->connection;
 874
 875	/*
 876	 * If the message was a response, we just need to drop our
 877	 * reference to the operation.  If an error occurred, report
 878	 * it.
 879	 *
 880	 * For requests, if there's no error and the operation in not
 881	 * unidirectional, there's nothing more to do until the response
 882	 * arrives. If an error occurred attempting to send it, or if the
 883	 * operation is unidrectional, record the result of the operation and
 884	 * schedule its completion.
 885	 */
 886	if (message == operation->response) {
 887		if (status) {
 888			dev_err(&connection->hd->dev,
 889				"%s: error sending response 0x%02x: %d\n",
 890				connection->name, operation->type, status);
 891		}
 892
 893		gb_operation_put_active(operation);
 894		gb_operation_put(operation);
 895	} else if (status || gb_operation_is_unidirectional(operation)) {
 896		if (gb_operation_result_set(operation, status)) {
 897			queue_work(gb_operation_completion_wq,
 898					&operation->work);
 899		}
 900	}
 901}
 902EXPORT_SYMBOL_GPL(greybus_message_sent);
 903
 904/*
 905 * We've received data on a connection, and it doesn't look like a
 906 * response, so we assume it's a request.
 907 *
 908 * This is called in interrupt context, so just copy the incoming
 909 * data into the request buffer and handle the rest via workqueue.
 910 */
 911static void gb_connection_recv_request(struct gb_connection *connection,
 912				const struct gb_operation_msg_hdr *header,
 913				void *data, size_t size)
 914{
 915	struct gb_operation *operation;
 916	u16 operation_id;
 917	u8 type;
 918	int ret;
 919
 920	operation_id = le16_to_cpu(header->operation_id);
 921	type = header->type;
 922
 923	operation = gb_operation_create_incoming(connection, operation_id,
 924						type, data, size);
 925	if (!operation) {
 926		dev_err(&connection->hd->dev,
 927			"%s: can't create incoming operation\n",
 928			connection->name);
 929		return;
 930	}
 931
 932	ret = gb_operation_get_active(operation);
 933	if (ret) {
 934		gb_operation_put(operation);
 935		return;
 936	}
 937	trace_gb_message_recv_request(operation->request);
 938
 939	/*
 940	 * The initial reference to the operation will be dropped when the
 941	 * request handler returns.
 942	 */
 943	if (gb_operation_result_set(operation, -EINPROGRESS))
 944		queue_work(connection->wq, &operation->work);
 945}
 946
 947/*
 948 * We've received data that appears to be an operation response
 949 * message.  Look up the operation, and record that we've received
 950 * its response.
 951 *
 952 * This is called in interrupt context, so just copy the incoming
 953 * data into the response buffer and handle the rest via workqueue.
 954 */
 955static void gb_connection_recv_response(struct gb_connection *connection,
 956				const struct gb_operation_msg_hdr *header,
 957				void *data, size_t size)
 958{
 959	struct gb_operation *operation;
 960	struct gb_message *message;
 961	size_t message_size;
 962	u16 operation_id;
 963	int errno;
 964
 965	operation_id = le16_to_cpu(header->operation_id);
 966
 967	if (!operation_id) {
 968		dev_err_ratelimited(&connection->hd->dev,
 969				"%s: invalid response id 0 received\n",
 970				connection->name);
 971		return;
 972	}
 973
 974	operation = gb_operation_find_outgoing(connection, operation_id);
 975	if (!operation) {
 976		dev_err_ratelimited(&connection->hd->dev,
 977				"%s: unexpected response id 0x%04x received\n",
 978				connection->name, operation_id);
 979		return;
 980	}
 981
 982	errno = gb_operation_status_map(header->result);
 983	message = operation->response;
 984	message_size = sizeof(*header) + message->payload_size;
 985	if (!errno && size > message_size) {
 986		dev_err_ratelimited(&connection->hd->dev,
 987				"%s: malformed response 0x%02x received (%zu > %zu)\n",
 988				connection->name, header->type,
 989				size, message_size);
 990		errno = -EMSGSIZE;
 991	} else if (!errno && size < message_size) {
 992		if (gb_operation_short_response_allowed(operation)) {
 993			message->payload_size = size - sizeof(*header);
 994		} else {
 995			dev_err_ratelimited(&connection->hd->dev,
 996					"%s: short response 0x%02x received (%zu < %zu)\n",
 997					connection->name, header->type,
 998					size, message_size);
 999			errno = -EMSGSIZE;
1000		}
1001	}
1002
1003	/* We must ignore the payload if a bad status is returned */
1004	if (errno)
1005		size = sizeof(*header);
1006
1007	/* The rest will be handled in work queue context */
1008	if (gb_operation_result_set(operation, errno)) {
1009		memcpy(message->buffer, data, size);
1010
1011		trace_gb_message_recv_response(message);
1012
1013		queue_work(gb_operation_completion_wq, &operation->work);
1014	}
1015
1016	gb_operation_put(operation);
1017}
1018
1019/*
1020 * Handle data arriving on a connection.  As soon as we return the
1021 * supplied data buffer will be reused (so unless we do something
1022 * with, it's effectively dropped).
1023 */
1024void gb_connection_recv(struct gb_connection *connection,
1025				void *data, size_t size)
1026{
1027	struct gb_operation_msg_hdr header;
1028	struct device *dev = &connection->hd->dev;
1029	size_t msg_size;
1030
1031	if (connection->state == GB_CONNECTION_STATE_DISABLED ||
1032			gb_connection_is_offloaded(connection)) {
1033		dev_warn_ratelimited(dev, "%s: dropping %zu received bytes\n",
1034				connection->name, size);
1035		return;
1036	}
1037
1038	if (size < sizeof(header)) {
1039		dev_err_ratelimited(dev, "%s: short message received\n",
1040				connection->name);
1041		return;
1042	}
1043
1044	/* Use memcpy as data may be unaligned */
1045	memcpy(&header, data, sizeof(header));
1046	msg_size = le16_to_cpu(header.size);
1047	if (size < msg_size) {
1048		dev_err_ratelimited(dev,
1049				"%s: incomplete message 0x%04x of type 0x%02x received (%zu < %zu)\n",
1050				connection->name,
1051				le16_to_cpu(header.operation_id),
1052				header.type, size, msg_size);
1053		return;		/* XXX Should still complete operation */
1054	}
1055
1056	if (header.type & GB_MESSAGE_TYPE_RESPONSE) {
1057		gb_connection_recv_response(connection,	&header, data,
1058						msg_size);
1059	} else {
1060		gb_connection_recv_request(connection, &header, data,
1061						msg_size);
1062	}
1063}
1064
1065/*
1066 * Cancel an outgoing operation synchronously, and record the given error to
1067 * indicate why.
1068 */
1069void gb_operation_cancel(struct gb_operation *operation, int errno)
1070{
1071	if (WARN_ON(gb_operation_is_incoming(operation)))
1072		return;
1073
1074	if (gb_operation_result_set(operation, errno)) {
1075		gb_message_cancel(operation->request);
1076		queue_work(gb_operation_completion_wq, &operation->work);
1077	}
1078	trace_gb_message_cancel_outgoing(operation->request);
1079
1080	atomic_inc(&operation->waiters);
1081	wait_event(gb_operation_cancellation_queue,
1082			!gb_operation_is_active(operation));
1083	atomic_dec(&operation->waiters);
1084}
1085EXPORT_SYMBOL_GPL(gb_operation_cancel);
1086
1087/*
1088 * Cancel an incoming operation synchronously. Called during connection tear
1089 * down.
1090 */
1091void gb_operation_cancel_incoming(struct gb_operation *operation, int errno)
1092{
1093	if (WARN_ON(!gb_operation_is_incoming(operation)))
1094		return;
1095
1096	if (!gb_operation_is_unidirectional(operation)) {
1097		/*
1098		 * Make sure the request handler has submitted the response
1099		 * before cancelling it.
1100		 */
1101		flush_work(&operation->work);
1102		if (!gb_operation_result_set(operation, errno))
1103			gb_message_cancel(operation->response);
1104	}
1105	trace_gb_message_cancel_incoming(operation->response);
1106
1107	atomic_inc(&operation->waiters);
1108	wait_event(gb_operation_cancellation_queue,
1109			!gb_operation_is_active(operation));
1110	atomic_dec(&operation->waiters);
1111}
1112
1113/**
1114 * gb_operation_sync_timeout() - implement a "simple" synchronous operation
1115 * @connection: the Greybus connection to send this to
1116 * @type: the type of operation to send
1117 * @request: pointer to a memory buffer to copy the request from
1118 * @request_size: size of @request
1119 * @response: pointer to a memory buffer to copy the response to
1120 * @response_size: the size of @response.
1121 * @timeout: operation timeout in milliseconds
1122 *
1123 * This function implements a simple synchronous Greybus operation.  It sends
1124 * the provided operation request and waits (sleeps) until the corresponding
1125 * operation response message has been successfully received, or an error
1126 * occurs.  @request and @response are buffers to hold the request and response
1127 * data respectively, and if they are not NULL, their size must be specified in
1128 * @request_size and @response_size.
1129 *
1130 * If a response payload is to come back, and @response is not NULL,
1131 * @response_size number of bytes will be copied into @response if the operation
1132 * is successful.
1133 *
1134 * If there is an error, the response buffer is left alone.
1135 */
1136int gb_operation_sync_timeout(struct gb_connection *connection, int type,
1137				void *request, int request_size,
1138				void *response, int response_size,
1139				unsigned int timeout)
1140{
1141	struct gb_operation *operation;
1142	int ret;
1143
1144	if ((response_size && !response) ||
1145	    (request_size && !request))
1146		return -EINVAL;
1147
1148	operation = gb_operation_create(connection, type,
1149					request_size, response_size,
1150					GFP_KERNEL);
1151	if (!operation)
1152		return -ENOMEM;
1153
1154	if (request_size)
1155		memcpy(operation->request->payload, request, request_size);
1156
1157	ret = gb_operation_request_send_sync_timeout(operation, timeout);
1158	if (ret) {
1159		dev_err(&connection->hd->dev,
1160			"%s: synchronous operation id 0x%04x of type 0x%02x failed: %d\n",
1161			connection->name, operation->id, type, ret);
1162	} else {
1163		if (response_size) {
1164			memcpy(response, operation->response->payload,
1165			       response_size);
1166		}
1167	}
1168
1169	gb_operation_put(operation);
1170
1171	return ret;
1172}
1173EXPORT_SYMBOL_GPL(gb_operation_sync_timeout);
1174
1175/**
1176 * gb_operation_unidirectional_timeout() - initiate a unidirectional operation
1177 * @connection:		connection to use
1178 * @type:		type of operation to send
1179 * @request:		memory buffer to copy the request from
1180 * @request_size:	size of @request
1181 * @timeout:		send timeout in milliseconds
1182 *
1183 * Initiate a unidirectional operation by sending a request message and
1184 * waiting for it to be acknowledged as sent by the host device.
1185 *
1186 * Note that successful send of a unidirectional operation does not imply that
1187 * the request as actually reached the remote end of the connection.
1188 */
1189int gb_operation_unidirectional_timeout(struct gb_connection *connection,
1190				int type, void *request, int request_size,
1191				unsigned int timeout)
1192{
1193	struct gb_operation *operation;
1194	int ret;
1195
1196	if (request_size && !request)
1197		return -EINVAL;
1198
1199	operation = gb_operation_create_flags(connection, type,
1200					request_size, 0,
1201					GB_OPERATION_FLAG_UNIDIRECTIONAL,
1202					GFP_KERNEL);
1203	if (!operation)
1204		return -ENOMEM;
1205
1206	if (request_size)
1207		memcpy(operation->request->payload, request, request_size);
1208
1209	ret = gb_operation_request_send_sync_timeout(operation, timeout);
1210	if (ret) {
1211		dev_err(&connection->hd->dev,
1212			"%s: unidirectional operation of type 0x%02x failed: %d\n",
1213			connection->name, type, ret);
1214	}
1215
1216	gb_operation_put(operation);
1217
1218	return ret;
1219}
1220EXPORT_SYMBOL_GPL(gb_operation_unidirectional_timeout);
1221
1222int __init gb_operation_init(void)
1223{
1224	gb_message_cache = kmem_cache_create("gb_message_cache",
1225				sizeof(struct gb_message), 0, 0, NULL);
1226	if (!gb_message_cache)
1227		return -ENOMEM;
1228
1229	gb_operation_cache = kmem_cache_create("gb_operation_cache",
1230				sizeof(struct gb_operation), 0, 0, NULL);
1231	if (!gb_operation_cache)
1232		goto err_destroy_message_cache;
1233
1234	gb_operation_completion_wq = alloc_workqueue("greybus_completion",
1235				0, 0);
1236	if (!gb_operation_completion_wq)
1237		goto err_destroy_operation_cache;
1238
1239	return 0;
1240
1241err_destroy_operation_cache:
1242	kmem_cache_destroy(gb_operation_cache);
1243	gb_operation_cache = NULL;
1244err_destroy_message_cache:
1245	kmem_cache_destroy(gb_message_cache);
1246	gb_message_cache = NULL;
1247
1248	return -ENOMEM;
1249}
1250
1251void gb_operation_exit(void)
1252{
1253	destroy_workqueue(gb_operation_completion_wq);
1254	gb_operation_completion_wq = NULL;
1255	kmem_cache_destroy(gb_operation_cache);
1256	gb_operation_cache = NULL;
1257	kmem_cache_destroy(gb_message_cache);
1258	gb_message_cache = NULL;
1259}