Linux Audio

Check our new training course

Loading...
v3.15
  1/*
  2 * Copyright (C) ST-Ericsson AB 2010
  3 * Author:	Sjur Brendeland
  4 * License terms: GNU General Public License (GPL) version 2
  5 */
  6
  7#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
  8
  9#include <linux/stddef.h>
 10#include <linux/spinlock.h>
 11#include <linux/slab.h>
 12#include <asm/unaligned.h>
 13#include <net/caif/caif_layer.h>
 14#include <net/caif/cfsrvl.h>
 15#include <net/caif/cfpkt.h>
 16
 17#define container_obj(layr) container_of(layr, struct cfrfml, serv.layer)
 18#define RFM_SEGMENTATION_BIT 0x01
 19#define RFM_HEAD_SIZE 7
 20
 21static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt);
 22static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt);
 23
 24struct cfrfml {
 25	struct cfsrvl serv;
 26	struct cfpkt *incomplete_frm;
 27	int fragment_size;
 28	u8  seghead[6];
 29	u16 pdu_size;
 30	/* Protects serialized processing of packets */
 31	spinlock_t sync;
 32};
 33
 34static void cfrfml_release(struct cflayer *layer)
 35{
 36	struct cfsrvl *srvl = container_of(layer, struct cfsrvl, layer);
 37	struct cfrfml *rfml = container_obj(&srvl->layer);
 38
 39	if (rfml->incomplete_frm)
 40		cfpkt_destroy(rfml->incomplete_frm);
 41
 42	kfree(srvl);
 43}
 44
 45struct cflayer *cfrfml_create(u8 channel_id, struct dev_info *dev_info,
 46			      int mtu_size)
 47{
 48	int tmp;
 49	struct cfrfml *this = kzalloc(sizeof(struct cfrfml), GFP_ATOMIC);
 
 50
 51	if (!this)
 
 52		return NULL;
 
 53
 54	cfsrvl_init(&this->serv, channel_id, dev_info, false);
 55	this->serv.release = cfrfml_release;
 56	this->serv.layer.receive = cfrfml_receive;
 57	this->serv.layer.transmit = cfrfml_transmit;
 58
 59	/* Round down to closest multiple of 16 */
 60	tmp = (mtu_size - RFM_HEAD_SIZE - 6) / 16;
 61	tmp *= 16;
 62
 63	this->fragment_size = tmp;
 64	spin_lock_init(&this->sync);
 65	snprintf(this->serv.layer.name, CAIF_LAYER_NAME_SZ,
 66		"rfm%d", channel_id);
 67
 68	return &this->serv.layer;
 69}
 70
 71static struct cfpkt *rfm_append(struct cfrfml *rfml, char *seghead,
 72				struct cfpkt *pkt, int *err)
 73{
 74	struct cfpkt *tmppkt;
 75	*err = -EPROTO;
 76	/* n-th but not last segment */
 77
 78	if (cfpkt_extr_head(pkt, seghead, 6) < 0)
 79		return NULL;
 80
 81	/* Verify correct header */
 82	if (memcmp(seghead, rfml->seghead, 6) != 0)
 83		return NULL;
 84
 85	tmppkt = cfpkt_append(rfml->incomplete_frm, pkt,
 86			rfml->pdu_size + RFM_HEAD_SIZE);
 87
 88	/* If cfpkt_append failes input pkts are not freed */
 89	*err = -ENOMEM;
 90	if (tmppkt == NULL)
 91		return NULL;
 92
 93	*err = 0;
 94	return tmppkt;
 95}
 96
 97static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt)
 98{
 99	u8 tmp;
100	bool segmented;
101	int err;
102	u8 seghead[6];
103	struct cfrfml *rfml;
104	struct cfpkt *tmppkt = NULL;
105
106	caif_assert(layr->up != NULL);
107	caif_assert(layr->receive != NULL);
108	rfml = container_obj(layr);
109	spin_lock(&rfml->sync);
110
111	err = -EPROTO;
112	if (cfpkt_extr_head(pkt, &tmp, 1) < 0)
113		goto out;
114	segmented = tmp & RFM_SEGMENTATION_BIT;
115
116	if (segmented) {
117		if (rfml->incomplete_frm == NULL) {
118			/* Initial Segment */
119			if (cfpkt_peek_head(pkt, rfml->seghead, 6) < 0)
120				goto out;
121
122			rfml->pdu_size = get_unaligned_le16(rfml->seghead+4);
123
124			if (cfpkt_erroneous(pkt))
125				goto out;
126			rfml->incomplete_frm = pkt;
127			pkt = NULL;
128		} else {
129
130			tmppkt = rfm_append(rfml, seghead, pkt, &err);
131			if (tmppkt == NULL)
132				goto out;
133
134			if (cfpkt_erroneous(tmppkt))
135				goto out;
136
137			rfml->incomplete_frm = tmppkt;
138
139
140			if (cfpkt_erroneous(tmppkt))
141				goto out;
142		}
143		err = 0;
144		goto out;
145	}
146
147	if (rfml->incomplete_frm) {
148
149		/* Last Segment */
150		tmppkt = rfm_append(rfml, seghead, pkt, &err);
151		if (tmppkt == NULL)
152			goto out;
153
154		if (cfpkt_erroneous(tmppkt))
155			goto out;
156
157		rfml->incomplete_frm = NULL;
158		pkt = tmppkt;
159		tmppkt = NULL;
160
161		/* Verify that length is correct */
162		err = EPROTO;
163		if (rfml->pdu_size != cfpkt_getlen(pkt) - RFM_HEAD_SIZE + 1)
164			goto out;
165	}
166
167	err = rfml->serv.layer.up->receive(rfml->serv.layer.up, pkt);
168
169out:
170
171	if (err != 0) {
172		if (tmppkt)
173			cfpkt_destroy(tmppkt);
174		if (pkt)
175			cfpkt_destroy(pkt);
176		if (rfml->incomplete_frm)
177			cfpkt_destroy(rfml->incomplete_frm);
178		rfml->incomplete_frm = NULL;
179
180		pr_info("Connection error %d triggered on RFM link\n", err);
181
182		/* Trigger connection error upon failure.*/
183		layr->up->ctrlcmd(layr->up, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND,
184					rfml->serv.dev_info.id);
185	}
186	spin_unlock(&rfml->sync);
187
188	if (unlikely(err == -EAGAIN))
189		/* It is not possible to recover after drop of a fragment */
190		err = -EIO;
191
192	return err;
193}
194
195
196static int cfrfml_transmit_segment(struct cfrfml *rfml, struct cfpkt *pkt)
197{
198	caif_assert(cfpkt_getlen(pkt) < rfml->fragment_size + RFM_HEAD_SIZE);
199
200	/* Add info for MUX-layer to route the packet out. */
201	cfpkt_info(pkt)->channel_id = rfml->serv.layer.id;
202
203	/*
204	 * To optimize alignment, we add up the size of CAIF header before
205	 * payload.
206	 */
207	cfpkt_info(pkt)->hdr_len = RFM_HEAD_SIZE;
208	cfpkt_info(pkt)->dev_info = &rfml->serv.dev_info;
209
210	return rfml->serv.layer.dn->transmit(rfml->serv.layer.dn, pkt);
211}
212
213static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt)
214{
215	int err;
216	u8 seg;
217	u8 head[6];
218	struct cfpkt *rearpkt = NULL;
219	struct cfpkt *frontpkt = pkt;
220	struct cfrfml *rfml = container_obj(layr);
221
222	caif_assert(layr->dn != NULL);
223	caif_assert(layr->dn->transmit != NULL);
224
225	if (!cfsrvl_ready(&rfml->serv, &err))
226		goto out;
227
228	err = -EPROTO;
229	if (cfpkt_getlen(pkt) <= RFM_HEAD_SIZE-1)
230		goto out;
231
232	err = 0;
233	if (cfpkt_getlen(pkt) > rfml->fragment_size + RFM_HEAD_SIZE)
234		err = cfpkt_peek_head(pkt, head, 6);
235
236	if (err < 0)
237		goto out;
238
239	while (cfpkt_getlen(frontpkt) > rfml->fragment_size + RFM_HEAD_SIZE) {
240
241		seg = 1;
242		err = -EPROTO;
243
244		if (cfpkt_add_head(frontpkt, &seg, 1) < 0)
245			goto out;
246		/*
247		 * On OOM error cfpkt_split returns NULL.
248		 *
249		 * NOTE: Segmented pdu is not correctly aligned.
250		 * This has negative performance impact.
251		 */
252
253		rearpkt = cfpkt_split(frontpkt, rfml->fragment_size);
254		if (rearpkt == NULL)
255			goto out;
256
257		err = cfrfml_transmit_segment(rfml, frontpkt);
258
259		if (err != 0) {
260			frontpkt = NULL;
261			goto out;
262		}
263
264		frontpkt = rearpkt;
265		rearpkt = NULL;
266
267		err = -ENOMEM;
268		if (frontpkt == NULL)
269			goto out;
270		err = -EPROTO;
271		if (cfpkt_add_head(frontpkt, head, 6) < 0)
272			goto out;
273
274	}
275
276	seg = 0;
277	err = -EPROTO;
278
279	if (cfpkt_add_head(frontpkt, &seg, 1) < 0)
280		goto out;
281
282	err = cfrfml_transmit_segment(rfml, frontpkt);
283
284	frontpkt = NULL;
285out:
286
287	if (err != 0) {
288		pr_info("Connection error %d triggered on RFM link\n", err);
289		/* Trigger connection error upon failure.*/
290
291		layr->up->ctrlcmd(layr->up, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND,
292					rfml->serv.dev_info.id);
293
294		if (rearpkt)
295			cfpkt_destroy(rearpkt);
296
297		if (frontpkt)
 
298			cfpkt_destroy(frontpkt);
 
 
 
 
 
 
 
 
 
 
299	}
300
301	return err;
302}
v3.1
  1/*
  2 * Copyright (C) ST-Ericsson AB 2010
  3 * Author:	Sjur Brendeland/sjur.brandeland@stericsson.com
  4 * License terms: GNU General Public License (GPL) version 2
  5 */
  6
  7#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
  8
  9#include <linux/stddef.h>
 10#include <linux/spinlock.h>
 11#include <linux/slab.h>
 12#include <asm/unaligned.h>
 13#include <net/caif/caif_layer.h>
 14#include <net/caif/cfsrvl.h>
 15#include <net/caif/cfpkt.h>
 16
 17#define container_obj(layr) container_of(layr, struct cfrfml, serv.layer)
 18#define RFM_SEGMENTATION_BIT 0x01
 19#define RFM_HEAD_SIZE 7
 20
 21static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt);
 22static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt);
 23
 24struct cfrfml {
 25	struct cfsrvl serv;
 26	struct cfpkt *incomplete_frm;
 27	int fragment_size;
 28	u8  seghead[6];
 29	u16 pdu_size;
 30	/* Protects serialized processing of packets */
 31	spinlock_t sync;
 32};
 33
 34static void cfrfml_release(struct cflayer *layer)
 35{
 36	struct cfsrvl *srvl = container_of(layer, struct cfsrvl, layer);
 37	struct cfrfml *rfml = container_obj(&srvl->layer);
 38
 39	if (rfml->incomplete_frm)
 40		cfpkt_destroy(rfml->incomplete_frm);
 41
 42	kfree(srvl);
 43}
 44
 45struct cflayer *cfrfml_create(u8 channel_id, struct dev_info *dev_info,
 46					int mtu_size)
 47{
 48	int tmp;
 49	struct cfrfml *this =
 50		kzalloc(sizeof(struct cfrfml), GFP_ATOMIC);
 51
 52	if (!this) {
 53		pr_warn("Out of memory\n");
 54		return NULL;
 55	}
 56
 57	cfsrvl_init(&this->serv, channel_id, dev_info, false);
 58	this->serv.release = cfrfml_release;
 59	this->serv.layer.receive = cfrfml_receive;
 60	this->serv.layer.transmit = cfrfml_transmit;
 61
 62	/* Round down to closest multiple of 16 */
 63	tmp = (mtu_size - RFM_HEAD_SIZE - 6) / 16;
 64	tmp *= 16;
 65
 66	this->fragment_size = tmp;
 67	spin_lock_init(&this->sync);
 68	snprintf(this->serv.layer.name, CAIF_LAYER_NAME_SZ,
 69		"rfm%d", channel_id);
 70
 71	return &this->serv.layer;
 72}
 73
 74static struct cfpkt *rfm_append(struct cfrfml *rfml, char *seghead,
 75			struct cfpkt *pkt, int *err)
 76{
 77	struct cfpkt *tmppkt;
 78	*err = -EPROTO;
 79	/* n-th but not last segment */
 80
 81	if (cfpkt_extr_head(pkt, seghead, 6) < 0)
 82		return NULL;
 83
 84	/* Verify correct header */
 85	if (memcmp(seghead, rfml->seghead, 6) != 0)
 86		return NULL;
 87
 88	tmppkt = cfpkt_append(rfml->incomplete_frm, pkt,
 89			rfml->pdu_size + RFM_HEAD_SIZE);
 90
 91	/* If cfpkt_append failes input pkts are not freed */
 92	*err = -ENOMEM;
 93	if (tmppkt == NULL)
 94		return NULL;
 95
 96	*err = 0;
 97	return tmppkt;
 98}
 99
100static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt)
101{
102	u8 tmp;
103	bool segmented;
104	int err;
105	u8 seghead[6];
106	struct cfrfml *rfml;
107	struct cfpkt *tmppkt = NULL;
108
109	caif_assert(layr->up != NULL);
110	caif_assert(layr->receive != NULL);
111	rfml = container_obj(layr);
112	spin_lock(&rfml->sync);
113
114	err = -EPROTO;
115	if (cfpkt_extr_head(pkt, &tmp, 1) < 0)
116		goto out;
117	segmented = tmp & RFM_SEGMENTATION_BIT;
118
119	if (segmented) {
120		if (rfml->incomplete_frm == NULL) {
121			/* Initial Segment */
122			if (cfpkt_peek_head(pkt, rfml->seghead, 6) < 0)
123				goto out;
124
125			rfml->pdu_size = get_unaligned_le16(rfml->seghead+4);
126
127			if (cfpkt_erroneous(pkt))
128				goto out;
129			rfml->incomplete_frm = pkt;
130			pkt = NULL;
131		} else {
132
133			tmppkt = rfm_append(rfml, seghead, pkt, &err);
134			if (tmppkt == NULL)
135				goto out;
136
137			if (cfpkt_erroneous(tmppkt))
138				goto out;
139
140			rfml->incomplete_frm = tmppkt;
141
142
143			if (cfpkt_erroneous(tmppkt))
144				goto out;
145		}
146		err = 0;
147		goto out;
148	}
149
150	if (rfml->incomplete_frm) {
151
152		/* Last Segment */
153		tmppkt = rfm_append(rfml, seghead, pkt, &err);
154		if (tmppkt == NULL)
155			goto out;
156
157		if (cfpkt_erroneous(tmppkt))
158			goto out;
159
160		rfml->incomplete_frm = NULL;
161		pkt = tmppkt;
162		tmppkt = NULL;
163
164		/* Verify that length is correct */
165		err = EPROTO;
166		if (rfml->pdu_size != cfpkt_getlen(pkt) - RFM_HEAD_SIZE + 1)
167			goto out;
168	}
169
170	err = rfml->serv.layer.up->receive(rfml->serv.layer.up, pkt);
171
172out:
173
174	if (err != 0) {
175		if (tmppkt)
176			cfpkt_destroy(tmppkt);
177		if (pkt)
178			cfpkt_destroy(pkt);
179		if (rfml->incomplete_frm)
180			cfpkt_destroy(rfml->incomplete_frm);
181		rfml->incomplete_frm = NULL;
182
183		pr_info("Connection error %d triggered on RFM link\n", err);
184
185		/* Trigger connection error upon failure.*/
186		layr->up->ctrlcmd(layr->up, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND,
187					rfml->serv.dev_info.id);
188	}
189	spin_unlock(&rfml->sync);
 
 
 
 
 
190	return err;
191}
192
193
194static int cfrfml_transmit_segment(struct cfrfml *rfml, struct cfpkt *pkt)
195{
196	caif_assert(cfpkt_getlen(pkt) < rfml->fragment_size);
197
198	/* Add info for MUX-layer to route the packet out. */
199	cfpkt_info(pkt)->channel_id = rfml->serv.layer.id;
200
201	/*
202	 * To optimize alignment, we add up the size of CAIF header before
203	 * payload.
204	 */
205	cfpkt_info(pkt)->hdr_len = RFM_HEAD_SIZE;
206	cfpkt_info(pkt)->dev_info = &rfml->serv.dev_info;
207
208	return rfml->serv.layer.dn->transmit(rfml->serv.layer.dn, pkt);
209}
210
211static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt)
212{
213	int err;
214	u8 seg;
215	u8 head[6];
216	struct cfpkt *rearpkt = NULL;
217	struct cfpkt *frontpkt = pkt;
218	struct cfrfml *rfml = container_obj(layr);
219
220	caif_assert(layr->dn != NULL);
221	caif_assert(layr->dn->transmit != NULL);
222
223	if (!cfsrvl_ready(&rfml->serv, &err))
224		return err;
225
226	err = -EPROTO;
227	if (cfpkt_getlen(pkt) <= RFM_HEAD_SIZE-1)
228		goto out;
229
230	err = 0;
231	if (cfpkt_getlen(pkt) > rfml->fragment_size + RFM_HEAD_SIZE)
232		err = cfpkt_peek_head(pkt, head, 6);
233
234	if (err < 0)
235		goto out;
236
237	while (cfpkt_getlen(frontpkt) > rfml->fragment_size + RFM_HEAD_SIZE) {
238
239		seg = 1;
240		err = -EPROTO;
241
242		if (cfpkt_add_head(frontpkt, &seg, 1) < 0)
243			goto out;
244		/*
245		 * On OOM error cfpkt_split returns NULL.
246		 *
247		 * NOTE: Segmented pdu is not correctly aligned.
248		 * This has negative performance impact.
249		 */
250
251		rearpkt = cfpkt_split(frontpkt, rfml->fragment_size);
252		if (rearpkt == NULL)
253			goto out;
254
255		err = cfrfml_transmit_segment(rfml, frontpkt);
256
257		if (err != 0)
 
258			goto out;
 
 
259		frontpkt = rearpkt;
260		rearpkt = NULL;
261
262		err = -ENOMEM;
263		if (frontpkt == NULL)
264			goto out;
265		err = -EPROTO;
266		if (cfpkt_add_head(frontpkt, head, 6) < 0)
267			goto out;
268
269	}
270
271	seg = 0;
272	err = -EPROTO;
273
274	if (cfpkt_add_head(frontpkt, &seg, 1) < 0)
275		goto out;
276
277	err = cfrfml_transmit_segment(rfml, frontpkt);
278
279	frontpkt = NULL;
280out:
281
282	if (err != 0) {
283		pr_info("Connection error %d triggered on RFM link\n", err);
284		/* Trigger connection error upon failure.*/
285
286		layr->up->ctrlcmd(layr->up, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND,
287					rfml->serv.dev_info.id);
288
289		if (rearpkt)
290			cfpkt_destroy(rearpkt);
291
292		if (frontpkt && frontpkt != pkt) {
293
294			cfpkt_destroy(frontpkt);
295			/*
296			 * Socket layer will free the original packet,
297			 * but this packet may already be sent and
298			 * freed. So we have to return 0 in this case
299			 * to avoid socket layer to re-free this packet.
300			 * The return of shutdown indication will
301			 * cause connection to be invalidated anyhow.
302			 */
303			err = 0;
304		}
305	}
306
307	return err;
308}