Loading...
1/******************************************************************************
2
3 AudioScience HPI driver
4 Copyright (C) 1997-2011 AudioScience Inc. <support@audioscience.com>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of version 2 of the GNU General Public License as
8 published by the Free Software Foundation;
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18
19Extended Message Function With Response Caching
20
21(C) Copyright AudioScience Inc. 2002
22*****************************************************************************/
23#define SOURCEFILE_NAME "hpimsgx.c"
24#include "hpi_internal.h"
25#include "hpi_version.h"
26#include "hpimsginit.h"
27#include "hpicmn.h"
28#include "hpimsgx.h"
29#include "hpidebug.h"
30
31static struct pci_device_id asihpi_pci_tbl[] = {
32#include "hpipcida.h"
33};
34
35static struct hpios_spinlock msgx_lock;
36
37static hpi_handler_func *hpi_entry_points[HPI_MAX_ADAPTERS];
38
39static hpi_handler_func *hpi_lookup_entry_point_function(const struct hpi_pci
40 *pci_info)
41{
42
43 int i;
44
45 for (i = 0; asihpi_pci_tbl[i].vendor != 0; i++) {
46 if (asihpi_pci_tbl[i].vendor != PCI_ANY_ID
47 && asihpi_pci_tbl[i].vendor !=
48 pci_info->pci_dev->vendor)
49 continue;
50 if (asihpi_pci_tbl[i].device != PCI_ANY_ID
51 && asihpi_pci_tbl[i].device !=
52 pci_info->pci_dev->device)
53 continue;
54 if (asihpi_pci_tbl[i].subvendor != PCI_ANY_ID
55 && asihpi_pci_tbl[i].subvendor !=
56 pci_info->pci_dev->subsystem_vendor)
57 continue;
58 if (asihpi_pci_tbl[i].subdevice != PCI_ANY_ID
59 && asihpi_pci_tbl[i].subdevice !=
60 pci_info->pci_dev->subsystem_device)
61 continue;
62
63 /* HPI_DEBUG_LOG(DEBUG, " %x,%lx\n", i,
64 asihpi_pci_tbl[i].driver_data); */
65 return (hpi_handler_func *) asihpi_pci_tbl[i].driver_data;
66 }
67
68 return NULL;
69}
70
71static inline void hw_entry_point(struct hpi_message *phm,
72 struct hpi_response *phr)
73{
74 if ((phm->adapter_index < HPI_MAX_ADAPTERS)
75 && hpi_entry_points[phm->adapter_index])
76 hpi_entry_points[phm->adapter_index] (phm, phr);
77 else
78 hpi_init_response(phr, phm->object, phm->function,
79 HPI_ERROR_PROCESSING_MESSAGE);
80}
81
82static void adapter_open(struct hpi_message *phm, struct hpi_response *phr);
83static void adapter_close(struct hpi_message *phm, struct hpi_response *phr);
84
85static void mixer_open(struct hpi_message *phm, struct hpi_response *phr);
86static void mixer_close(struct hpi_message *phm, struct hpi_response *phr);
87
88static void outstream_open(struct hpi_message *phm, struct hpi_response *phr,
89 void *h_owner);
90static void outstream_close(struct hpi_message *phm, struct hpi_response *phr,
91 void *h_owner);
92static void instream_open(struct hpi_message *phm, struct hpi_response *phr,
93 void *h_owner);
94static void instream_close(struct hpi_message *phm, struct hpi_response *phr,
95 void *h_owner);
96
97static void HPIMSGX__reset(u16 adapter_index);
98
99static u16 HPIMSGX__init(struct hpi_message *phm, struct hpi_response *phr);
100static void HPIMSGX__cleanup(u16 adapter_index, void *h_owner);
101
102#ifndef DISABLE_PRAGMA_PACK1
103#pragma pack(push, 1)
104#endif
105
106struct hpi_subsys_response {
107 struct hpi_response_header h;
108 struct hpi_subsys_res s;
109};
110
111struct hpi_adapter_response {
112 struct hpi_response_header h;
113 struct hpi_adapter_res a;
114};
115
116struct hpi_mixer_response {
117 struct hpi_response_header h;
118 struct hpi_mixer_res m;
119};
120
121struct hpi_stream_response {
122 struct hpi_response_header h;
123 struct hpi_stream_res d;
124};
125
126struct adapter_info {
127 u16 type;
128 u16 num_instreams;
129 u16 num_outstreams;
130};
131
132struct asi_open_state {
133 int open_flag;
134 void *h_owner;
135};
136
137#ifndef DISABLE_PRAGMA_PACK1
138#pragma pack(pop)
139#endif
140
141/* Globals */
142static struct hpi_adapter_response rESP_HPI_ADAPTER_OPEN[HPI_MAX_ADAPTERS];
143
144static struct hpi_stream_response
145 rESP_HPI_OSTREAM_OPEN[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
146
147static struct hpi_stream_response
148 rESP_HPI_ISTREAM_OPEN[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
149
150static struct hpi_mixer_response rESP_HPI_MIXER_OPEN[HPI_MAX_ADAPTERS];
151
152static struct adapter_info aDAPTER_INFO[HPI_MAX_ADAPTERS];
153
154/* use these to keep track of opens from user mode apps/DLLs */
155static struct asi_open_state
156 outstream_user_open[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
157
158static struct asi_open_state
159 instream_user_open[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
160
161static void subsys_message(struct hpi_message *phm, struct hpi_response *phr,
162 void *h_owner)
163{
164 if (phm->adapter_index != HPI_ADAPTER_INDEX_INVALID)
165 HPI_DEBUG_LOG(WARNING,
166 "suspicious adapter index %d in subsys message 0x%x.\n",
167 phm->adapter_index, phm->function);
168
169 switch (phm->function) {
170 case HPI_SUBSYS_GET_VERSION:
171 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
172 HPI_SUBSYS_GET_VERSION, 0);
173 phr->u.s.version = HPI_VER >> 8; /* return major.minor */
174 phr->u.s.data = HPI_VER; /* return major.minor.release */
175 break;
176 case HPI_SUBSYS_OPEN:
177 /*do not propagate the message down the chain */
178 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_OPEN, 0);
179 break;
180 case HPI_SUBSYS_CLOSE:
181 /*do not propagate the message down the chain */
182 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_CLOSE,
183 0);
184 HPIMSGX__cleanup(HPIMSGX_ALLADAPTERS, h_owner);
185 break;
186 case HPI_SUBSYS_DRIVER_LOAD:
187 /* Initialize this module's internal state */
188 hpios_msgxlock_init(&msgx_lock);
189 memset(&hpi_entry_points, 0, sizeof(hpi_entry_points));
190 /* Init subsys_findadapters response to no-adapters */
191 HPIMSGX__reset(HPIMSGX_ALLADAPTERS);
192 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
193 HPI_SUBSYS_DRIVER_LOAD, 0);
194 /* individual HPIs dont implement driver load */
195 HPI_COMMON(phm, phr);
196 break;
197 case HPI_SUBSYS_DRIVER_UNLOAD:
198 HPI_COMMON(phm, phr);
199 HPIMSGX__cleanup(HPIMSGX_ALLADAPTERS, h_owner);
200 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
201 HPI_SUBSYS_DRIVER_UNLOAD, 0);
202 return;
203
204 case HPI_SUBSYS_GET_NUM_ADAPTERS:
205 case HPI_SUBSYS_GET_ADAPTER:
206 HPI_COMMON(phm, phr);
207 break;
208
209 case HPI_SUBSYS_CREATE_ADAPTER:
210 HPIMSGX__init(phm, phr);
211 break;
212
213 default:
214 /* Must explicitly handle every subsys message in this switch */
215 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, phm->function,
216 HPI_ERROR_INVALID_FUNC);
217 break;
218 }
219}
220
221static void adapter_message(struct hpi_message *phm, struct hpi_response *phr,
222 void *h_owner)
223{
224 switch (phm->function) {
225 case HPI_ADAPTER_OPEN:
226 adapter_open(phm, phr);
227 break;
228 case HPI_ADAPTER_CLOSE:
229 adapter_close(phm, phr);
230 break;
231 case HPI_ADAPTER_DELETE:
232 HPIMSGX__cleanup(phm->adapter_index, h_owner);
233 {
234 struct hpi_message hm;
235 struct hpi_response hr;
236 hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
237 HPI_ADAPTER_CLOSE);
238 hm.adapter_index = phm->adapter_index;
239 hw_entry_point(&hm, &hr);
240 }
241 hw_entry_point(phm, phr);
242 break;
243
244 default:
245 hw_entry_point(phm, phr);
246 break;
247 }
248}
249
250static void mixer_message(struct hpi_message *phm, struct hpi_response *phr)
251{
252 switch (phm->function) {
253 case HPI_MIXER_OPEN:
254 mixer_open(phm, phr);
255 break;
256 case HPI_MIXER_CLOSE:
257 mixer_close(phm, phr);
258 break;
259 default:
260 hw_entry_point(phm, phr);
261 break;
262 }
263}
264
265static void outstream_message(struct hpi_message *phm,
266 struct hpi_response *phr, void *h_owner)
267{
268 if (phm->obj_index >= aDAPTER_INFO[phm->adapter_index].num_outstreams) {
269 hpi_init_response(phr, HPI_OBJ_OSTREAM, phm->function,
270 HPI_ERROR_INVALID_OBJ_INDEX);
271 return;
272 }
273
274 switch (phm->function) {
275 case HPI_OSTREAM_OPEN:
276 outstream_open(phm, phr, h_owner);
277 break;
278 case HPI_OSTREAM_CLOSE:
279 outstream_close(phm, phr, h_owner);
280 break;
281 default:
282 hw_entry_point(phm, phr);
283 break;
284 }
285}
286
287static void instream_message(struct hpi_message *phm,
288 struct hpi_response *phr, void *h_owner)
289{
290 if (phm->obj_index >= aDAPTER_INFO[phm->adapter_index].num_instreams) {
291 hpi_init_response(phr, HPI_OBJ_ISTREAM, phm->function,
292 HPI_ERROR_INVALID_OBJ_INDEX);
293 return;
294 }
295
296 switch (phm->function) {
297 case HPI_ISTREAM_OPEN:
298 instream_open(phm, phr, h_owner);
299 break;
300 case HPI_ISTREAM_CLOSE:
301 instream_close(phm, phr, h_owner);
302 break;
303 default:
304 hw_entry_point(phm, phr);
305 break;
306 }
307}
308
309/* NOTE: HPI_Message() must be defined in the driver as a wrapper for
310 * HPI_MessageEx so that functions in hpifunc.c compile.
311 */
312void hpi_send_recv_ex(struct hpi_message *phm, struct hpi_response *phr,
313 void *h_owner)
314{
315 HPI_DEBUG_MESSAGE(DEBUG, phm);
316
317 if (phm->type != HPI_TYPE_REQUEST) {
318 hpi_init_response(phr, phm->object, phm->function,
319 HPI_ERROR_INVALID_TYPE);
320 return;
321 }
322
323 if (phm->adapter_index >= HPI_MAX_ADAPTERS
324 && phm->adapter_index != HPIMSGX_ALLADAPTERS) {
325 hpi_init_response(phr, phm->object, phm->function,
326 HPI_ERROR_BAD_ADAPTER_NUMBER);
327 return;
328 }
329
330 switch (phm->object) {
331 case HPI_OBJ_SUBSYSTEM:
332 subsys_message(phm, phr, h_owner);
333 break;
334
335 case HPI_OBJ_ADAPTER:
336 adapter_message(phm, phr, h_owner);
337 break;
338
339 case HPI_OBJ_MIXER:
340 mixer_message(phm, phr);
341 break;
342
343 case HPI_OBJ_OSTREAM:
344 outstream_message(phm, phr, h_owner);
345 break;
346
347 case HPI_OBJ_ISTREAM:
348 instream_message(phm, phr, h_owner);
349 break;
350
351 default:
352 hw_entry_point(phm, phr);
353 break;
354 }
355 HPI_DEBUG_RESPONSE(phr);
356
357}
358
359static void adapter_open(struct hpi_message *phm, struct hpi_response *phr)
360{
361 HPI_DEBUG_LOG(VERBOSE, "adapter_open\n");
362 memcpy(phr, &rESP_HPI_ADAPTER_OPEN[phm->adapter_index],
363 sizeof(rESP_HPI_ADAPTER_OPEN[0]));
364}
365
366static void adapter_close(struct hpi_message *phm, struct hpi_response *phr)
367{
368 HPI_DEBUG_LOG(VERBOSE, "adapter_close\n");
369 hpi_init_response(phr, HPI_OBJ_ADAPTER, HPI_ADAPTER_CLOSE, 0);
370}
371
372static void mixer_open(struct hpi_message *phm, struct hpi_response *phr)
373{
374 memcpy(phr, &rESP_HPI_MIXER_OPEN[phm->adapter_index],
375 sizeof(rESP_HPI_MIXER_OPEN[0]));
376}
377
378static void mixer_close(struct hpi_message *phm, struct hpi_response *phr)
379{
380 hpi_init_response(phr, HPI_OBJ_MIXER, HPI_MIXER_CLOSE, 0);
381}
382
383static void instream_open(struct hpi_message *phm, struct hpi_response *phr,
384 void *h_owner)
385{
386
387 struct hpi_message hm;
388 struct hpi_response hr;
389
390 hpi_init_response(phr, HPI_OBJ_ISTREAM, HPI_ISTREAM_OPEN, 0);
391
392 hpios_msgxlock_lock(&msgx_lock);
393
394 if (instream_user_open[phm->adapter_index][phm->obj_index].open_flag)
395 phr->error = HPI_ERROR_OBJ_ALREADY_OPEN;
396 else if (rESP_HPI_ISTREAM_OPEN[phm->adapter_index]
397 [phm->obj_index].h.error)
398 memcpy(phr,
399 &rESP_HPI_ISTREAM_OPEN[phm->adapter_index][phm->
400 obj_index],
401 sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
402 else {
403 instream_user_open[phm->adapter_index][phm->
404 obj_index].open_flag = 1;
405 hpios_msgxlock_unlock(&msgx_lock);
406
407 /* issue a reset */
408 hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
409 HPI_ISTREAM_RESET);
410 hm.adapter_index = phm->adapter_index;
411 hm.obj_index = phm->obj_index;
412 hw_entry_point(&hm, &hr);
413
414 hpios_msgxlock_lock(&msgx_lock);
415 if (hr.error) {
416 instream_user_open[phm->adapter_index][phm->
417 obj_index].open_flag = 0;
418 phr->error = hr.error;
419 } else {
420 instream_user_open[phm->adapter_index][phm->
421 obj_index].open_flag = 1;
422 instream_user_open[phm->adapter_index][phm->
423 obj_index].h_owner = h_owner;
424 memcpy(phr,
425 &rESP_HPI_ISTREAM_OPEN[phm->adapter_index]
426 [phm->obj_index],
427 sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
428 }
429 }
430 hpios_msgxlock_unlock(&msgx_lock);
431}
432
433static void instream_close(struct hpi_message *phm, struct hpi_response *phr,
434 void *h_owner)
435{
436
437 struct hpi_message hm;
438 struct hpi_response hr;
439
440 hpi_init_response(phr, HPI_OBJ_ISTREAM, HPI_ISTREAM_CLOSE, 0);
441
442 hpios_msgxlock_lock(&msgx_lock);
443 if (h_owner ==
444 instream_user_open[phm->adapter_index][phm->
445 obj_index].h_owner) {
446 /* HPI_DEBUG_LOG(INFO,"closing adapter %d "
447 "instream %d owned by %p\n",
448 phm->wAdapterIndex, phm->wObjIndex, hOwner); */
449 instream_user_open[phm->adapter_index][phm->
450 obj_index].h_owner = NULL;
451 hpios_msgxlock_unlock(&msgx_lock);
452 /* issue a reset */
453 hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
454 HPI_ISTREAM_RESET);
455 hm.adapter_index = phm->adapter_index;
456 hm.obj_index = phm->obj_index;
457 hw_entry_point(&hm, &hr);
458 hpios_msgxlock_lock(&msgx_lock);
459 if (hr.error) {
460 instream_user_open[phm->adapter_index][phm->
461 obj_index].h_owner = h_owner;
462 phr->error = hr.error;
463 } else {
464 instream_user_open[phm->adapter_index][phm->
465 obj_index].open_flag = 0;
466 instream_user_open[phm->adapter_index][phm->
467 obj_index].h_owner = NULL;
468 }
469 } else {
470 HPI_DEBUG_LOG(WARNING,
471 "%p trying to close %d instream %d owned by %p\n",
472 h_owner, phm->adapter_index, phm->obj_index,
473 instream_user_open[phm->adapter_index][phm->
474 obj_index].h_owner);
475 phr->error = HPI_ERROR_OBJ_NOT_OPEN;
476 }
477 hpios_msgxlock_unlock(&msgx_lock);
478}
479
480static void outstream_open(struct hpi_message *phm, struct hpi_response *phr,
481 void *h_owner)
482{
483
484 struct hpi_message hm;
485 struct hpi_response hr;
486
487 hpi_init_response(phr, HPI_OBJ_OSTREAM, HPI_OSTREAM_OPEN, 0);
488
489 hpios_msgxlock_lock(&msgx_lock);
490
491 if (outstream_user_open[phm->adapter_index][phm->obj_index].open_flag)
492 phr->error = HPI_ERROR_OBJ_ALREADY_OPEN;
493 else if (rESP_HPI_OSTREAM_OPEN[phm->adapter_index]
494 [phm->obj_index].h.error)
495 memcpy(phr,
496 &rESP_HPI_OSTREAM_OPEN[phm->adapter_index][phm->
497 obj_index],
498 sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
499 else {
500 outstream_user_open[phm->adapter_index][phm->
501 obj_index].open_flag = 1;
502 hpios_msgxlock_unlock(&msgx_lock);
503
504 /* issue a reset */
505 hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
506 HPI_OSTREAM_RESET);
507 hm.adapter_index = phm->adapter_index;
508 hm.obj_index = phm->obj_index;
509 hw_entry_point(&hm, &hr);
510
511 hpios_msgxlock_lock(&msgx_lock);
512 if (hr.error) {
513 outstream_user_open[phm->adapter_index][phm->
514 obj_index].open_flag = 0;
515 phr->error = hr.error;
516 } else {
517 outstream_user_open[phm->adapter_index][phm->
518 obj_index].open_flag = 1;
519 outstream_user_open[phm->adapter_index][phm->
520 obj_index].h_owner = h_owner;
521 memcpy(phr,
522 &rESP_HPI_OSTREAM_OPEN[phm->adapter_index]
523 [phm->obj_index],
524 sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
525 }
526 }
527 hpios_msgxlock_unlock(&msgx_lock);
528}
529
530static void outstream_close(struct hpi_message *phm, struct hpi_response *phr,
531 void *h_owner)
532{
533
534 struct hpi_message hm;
535 struct hpi_response hr;
536
537 hpi_init_response(phr, HPI_OBJ_OSTREAM, HPI_OSTREAM_CLOSE, 0);
538
539 hpios_msgxlock_lock(&msgx_lock);
540
541 if (h_owner ==
542 outstream_user_open[phm->adapter_index][phm->
543 obj_index].h_owner) {
544 /* HPI_DEBUG_LOG(INFO,"closing adapter %d "
545 "outstream %d owned by %p\n",
546 phm->wAdapterIndex, phm->wObjIndex, hOwner); */
547 outstream_user_open[phm->adapter_index][phm->
548 obj_index].h_owner = NULL;
549 hpios_msgxlock_unlock(&msgx_lock);
550 /* issue a reset */
551 hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
552 HPI_OSTREAM_RESET);
553 hm.adapter_index = phm->adapter_index;
554 hm.obj_index = phm->obj_index;
555 hw_entry_point(&hm, &hr);
556 hpios_msgxlock_lock(&msgx_lock);
557 if (hr.error) {
558 outstream_user_open[phm->adapter_index][phm->
559 obj_index].h_owner = h_owner;
560 phr->error = hr.error;
561 } else {
562 outstream_user_open[phm->adapter_index][phm->
563 obj_index].open_flag = 0;
564 outstream_user_open[phm->adapter_index][phm->
565 obj_index].h_owner = NULL;
566 }
567 } else {
568 HPI_DEBUG_LOG(WARNING,
569 "%p trying to close %d outstream %d owned by %p\n",
570 h_owner, phm->adapter_index, phm->obj_index,
571 outstream_user_open[phm->adapter_index][phm->
572 obj_index].h_owner);
573 phr->error = HPI_ERROR_OBJ_NOT_OPEN;
574 }
575 hpios_msgxlock_unlock(&msgx_lock);
576}
577
578static u16 adapter_prepare(u16 adapter)
579{
580 struct hpi_message hm;
581 struct hpi_response hr;
582
583 /* Open the adapter and streams */
584 u16 i;
585
586 /* call to HPI_ADAPTER_OPEN */
587 hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
588 HPI_ADAPTER_OPEN);
589 hm.adapter_index = adapter;
590 hw_entry_point(&hm, &hr);
591 memcpy(&rESP_HPI_ADAPTER_OPEN[adapter], &hr,
592 sizeof(rESP_HPI_ADAPTER_OPEN[0]));
593 if (hr.error)
594 return hr.error;
595
596 /* call to HPI_ADAPTER_GET_INFO */
597 hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
598 HPI_ADAPTER_GET_INFO);
599 hm.adapter_index = adapter;
600 hw_entry_point(&hm, &hr);
601 if (hr.error)
602 return hr.error;
603
604 aDAPTER_INFO[adapter].num_outstreams = hr.u.ax.info.num_outstreams;
605 aDAPTER_INFO[adapter].num_instreams = hr.u.ax.info.num_instreams;
606 aDAPTER_INFO[adapter].type = hr.u.ax.info.adapter_type;
607
608 /* call to HPI_OSTREAM_OPEN */
609 for (i = 0; i < aDAPTER_INFO[adapter].num_outstreams; i++) {
610 hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
611 HPI_OSTREAM_OPEN);
612 hm.adapter_index = adapter;
613 hm.obj_index = i;
614 hw_entry_point(&hm, &hr);
615 memcpy(&rESP_HPI_OSTREAM_OPEN[adapter][i], &hr,
616 sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
617 outstream_user_open[adapter][i].open_flag = 0;
618 outstream_user_open[adapter][i].h_owner = NULL;
619 }
620
621 /* call to HPI_ISTREAM_OPEN */
622 for (i = 0; i < aDAPTER_INFO[adapter].num_instreams; i++) {
623 hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
624 HPI_ISTREAM_OPEN);
625 hm.adapter_index = adapter;
626 hm.obj_index = i;
627 hw_entry_point(&hm, &hr);
628 memcpy(&rESP_HPI_ISTREAM_OPEN[adapter][i], &hr,
629 sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
630 instream_user_open[adapter][i].open_flag = 0;
631 instream_user_open[adapter][i].h_owner = NULL;
632 }
633
634 /* call to HPI_MIXER_OPEN */
635 hpi_init_message_response(&hm, &hr, HPI_OBJ_MIXER, HPI_MIXER_OPEN);
636 hm.adapter_index = adapter;
637 hw_entry_point(&hm, &hr);
638 memcpy(&rESP_HPI_MIXER_OPEN[adapter], &hr,
639 sizeof(rESP_HPI_MIXER_OPEN[0]));
640
641 return 0;
642}
643
644static void HPIMSGX__reset(u16 adapter_index)
645{
646 int i;
647 u16 adapter;
648 struct hpi_response hr;
649
650 if (adapter_index == HPIMSGX_ALLADAPTERS) {
651 for (adapter = 0; adapter < HPI_MAX_ADAPTERS; adapter++) {
652
653 hpi_init_response(&hr, HPI_OBJ_ADAPTER,
654 HPI_ADAPTER_OPEN, HPI_ERROR_BAD_ADAPTER);
655 memcpy(&rESP_HPI_ADAPTER_OPEN[adapter], &hr,
656 sizeof(rESP_HPI_ADAPTER_OPEN[adapter]));
657
658 hpi_init_response(&hr, HPI_OBJ_MIXER, HPI_MIXER_OPEN,
659 HPI_ERROR_INVALID_OBJ);
660 memcpy(&rESP_HPI_MIXER_OPEN[adapter], &hr,
661 sizeof(rESP_HPI_MIXER_OPEN[adapter]));
662
663 for (i = 0; i < HPI_MAX_STREAMS; i++) {
664 hpi_init_response(&hr, HPI_OBJ_OSTREAM,
665 HPI_OSTREAM_OPEN,
666 HPI_ERROR_INVALID_OBJ);
667 memcpy(&rESP_HPI_OSTREAM_OPEN[adapter][i],
668 &hr,
669 sizeof(rESP_HPI_OSTREAM_OPEN[adapter]
670 [i]));
671 hpi_init_response(&hr, HPI_OBJ_ISTREAM,
672 HPI_ISTREAM_OPEN,
673 HPI_ERROR_INVALID_OBJ);
674 memcpy(&rESP_HPI_ISTREAM_OPEN[adapter][i],
675 &hr,
676 sizeof(rESP_HPI_ISTREAM_OPEN[adapter]
677 [i]));
678 }
679 }
680 } else if (adapter_index < HPI_MAX_ADAPTERS) {
681 rESP_HPI_ADAPTER_OPEN[adapter_index].h.error =
682 HPI_ERROR_BAD_ADAPTER;
683 rESP_HPI_MIXER_OPEN[adapter_index].h.error =
684 HPI_ERROR_INVALID_OBJ;
685 for (i = 0; i < HPI_MAX_STREAMS; i++) {
686 rESP_HPI_OSTREAM_OPEN[adapter_index][i].h.error =
687 HPI_ERROR_INVALID_OBJ;
688 rESP_HPI_ISTREAM_OPEN[adapter_index][i].h.error =
689 HPI_ERROR_INVALID_OBJ;
690 }
691 }
692}
693
694static u16 HPIMSGX__init(struct hpi_message *phm,
695 /* HPI_SUBSYS_CREATE_ADAPTER structure with */
696 /* resource list or NULL=find all */
697 struct hpi_response *phr
698 /* response from HPI_ADAPTER_GET_INFO */
699 )
700{
701 hpi_handler_func *entry_point_func;
702 struct hpi_response hr;
703
704 /* Init response here so we can pass in previous adapter list */
705 hpi_init_response(&hr, phm->object, phm->function,
706 HPI_ERROR_INVALID_OBJ);
707
708 entry_point_func =
709 hpi_lookup_entry_point_function(phm->u.s.resource.r.pci);
710
711 if (entry_point_func) {
712 HPI_DEBUG_MESSAGE(DEBUG, phm);
713 entry_point_func(phm, &hr);
714 } else {
715 phr->error = HPI_ERROR_PROCESSING_MESSAGE;
716 return phr->error;
717 }
718 if (hr.error == 0) {
719 /* the adapter was created successfully
720 save the mapping for future use */
721 hpi_entry_points[hr.u.s.adapter_index] = entry_point_func;
722 /* prepare adapter (pre-open streams etc.) */
723 HPI_DEBUG_LOG(DEBUG,
724 "HPI_SUBSYS_CREATE_ADAPTER successful,"
725 " preparing adapter\n");
726 adapter_prepare(hr.u.s.adapter_index);
727 }
728 memcpy(phr, &hr, hr.size);
729 return phr->error;
730}
731
732static void HPIMSGX__cleanup(u16 adapter_index, void *h_owner)
733{
734 int i, adapter, adapter_limit;
735
736 if (!h_owner)
737 return;
738
739 if (adapter_index == HPIMSGX_ALLADAPTERS) {
740 adapter = 0;
741 adapter_limit = HPI_MAX_ADAPTERS;
742 } else {
743 adapter = adapter_index;
744 adapter_limit = adapter + 1;
745 }
746
747 for (; adapter < adapter_limit; adapter++) {
748 /* printk(KERN_INFO "Cleanup adapter #%d\n",wAdapter); */
749 for (i = 0; i < HPI_MAX_STREAMS; i++) {
750 if (h_owner ==
751 outstream_user_open[adapter][i].h_owner) {
752 struct hpi_message hm;
753 struct hpi_response hr;
754
755 HPI_DEBUG_LOG(DEBUG,
756 "Close adapter %d ostream %d\n",
757 adapter, i);
758
759 hpi_init_message_response(&hm, &hr,
760 HPI_OBJ_OSTREAM, HPI_OSTREAM_RESET);
761 hm.adapter_index = (u16)adapter;
762 hm.obj_index = (u16)i;
763 hw_entry_point(&hm, &hr);
764
765 hm.function = HPI_OSTREAM_HOSTBUFFER_FREE;
766 hw_entry_point(&hm, &hr);
767
768 hm.function = HPI_OSTREAM_GROUP_RESET;
769 hw_entry_point(&hm, &hr);
770
771 outstream_user_open[adapter][i].open_flag = 0;
772 outstream_user_open[adapter][i].h_owner =
773 NULL;
774 }
775 if (h_owner == instream_user_open[adapter][i].h_owner) {
776 struct hpi_message hm;
777 struct hpi_response hr;
778
779 HPI_DEBUG_LOG(DEBUG,
780 "Close adapter %d istream %d\n",
781 adapter, i);
782
783 hpi_init_message_response(&hm, &hr,
784 HPI_OBJ_ISTREAM, HPI_ISTREAM_RESET);
785 hm.adapter_index = (u16)adapter;
786 hm.obj_index = (u16)i;
787 hw_entry_point(&hm, &hr);
788
789 hm.function = HPI_ISTREAM_HOSTBUFFER_FREE;
790 hw_entry_point(&hm, &hr);
791
792 hm.function = HPI_ISTREAM_GROUP_RESET;
793 hw_entry_point(&hm, &hr);
794
795 instream_user_open[adapter][i].open_flag = 0;
796 instream_user_open[adapter][i].h_owner = NULL;
797 }
798 }
799 }
800}
1// SPDX-License-Identifier: GPL-2.0-only
2/******************************************************************************
3
4 AudioScience HPI driver
5 Copyright (C) 1997-2014 AudioScience Inc. <support@audioscience.com>
6
7
8Extended Message Function With Response Caching
9
10(C) Copyright AudioScience Inc. 2002
11*****************************************************************************/
12#define SOURCEFILE_NAME "hpimsgx.c"
13#include "hpi_internal.h"
14#include "hpi_version.h"
15#include "hpimsginit.h"
16#include "hpicmn.h"
17#include "hpimsgx.h"
18#include "hpidebug.h"
19
20static const struct pci_device_id asihpi_pci_tbl[] = {
21#include "hpipcida.h"
22};
23
24static struct hpios_spinlock msgx_lock;
25
26static hpi_handler_func *hpi_entry_points[HPI_MAX_ADAPTERS];
27static int logging_enabled = 1;
28
29static hpi_handler_func *hpi_lookup_entry_point_function(const struct hpi_pci
30 *pci_info)
31{
32
33 int i;
34
35 for (i = 0; asihpi_pci_tbl[i].vendor != 0; i++) {
36 if (asihpi_pci_tbl[i].vendor != PCI_ANY_ID
37 && asihpi_pci_tbl[i].vendor !=
38 pci_info->pci_dev->vendor)
39 continue;
40 if (asihpi_pci_tbl[i].device != PCI_ANY_ID
41 && asihpi_pci_tbl[i].device !=
42 pci_info->pci_dev->device)
43 continue;
44 if (asihpi_pci_tbl[i].subvendor != PCI_ANY_ID
45 && asihpi_pci_tbl[i].subvendor !=
46 pci_info->pci_dev->subsystem_vendor)
47 continue;
48 if (asihpi_pci_tbl[i].subdevice != PCI_ANY_ID
49 && asihpi_pci_tbl[i].subdevice !=
50 pci_info->pci_dev->subsystem_device)
51 continue;
52
53 /* HPI_DEBUG_LOG(DEBUG, " %x,%lx\n", i,
54 asihpi_pci_tbl[i].driver_data); */
55 return (hpi_handler_func *) asihpi_pci_tbl[i].driver_data;
56 }
57
58 return NULL;
59}
60
61static inline void hw_entry_point(struct hpi_message *phm,
62 struct hpi_response *phr)
63{
64 if ((phm->adapter_index < HPI_MAX_ADAPTERS)
65 && hpi_entry_points[phm->adapter_index])
66 hpi_entry_points[phm->adapter_index] (phm, phr);
67 else
68 hpi_init_response(phr, phm->object, phm->function,
69 HPI_ERROR_PROCESSING_MESSAGE);
70}
71
72static void adapter_open(struct hpi_message *phm, struct hpi_response *phr);
73static void adapter_close(struct hpi_message *phm, struct hpi_response *phr);
74
75static void mixer_open(struct hpi_message *phm, struct hpi_response *phr);
76static void mixer_close(struct hpi_message *phm, struct hpi_response *phr);
77
78static void outstream_open(struct hpi_message *phm, struct hpi_response *phr,
79 void *h_owner);
80static void outstream_close(struct hpi_message *phm, struct hpi_response *phr,
81 void *h_owner);
82static void instream_open(struct hpi_message *phm, struct hpi_response *phr,
83 void *h_owner);
84static void instream_close(struct hpi_message *phm, struct hpi_response *phr,
85 void *h_owner);
86
87static void HPIMSGX__reset(u16 adapter_index);
88
89static u16 HPIMSGX__init(struct hpi_message *phm, struct hpi_response *phr);
90static void HPIMSGX__cleanup(u16 adapter_index, void *h_owner);
91
92#ifndef DISABLE_PRAGMA_PACK1
93#pragma pack(push, 1)
94#endif
95
96struct hpi_adapter_response {
97 struct hpi_response_header h;
98 struct hpi_adapter_res a;
99};
100
101struct hpi_mixer_response {
102 struct hpi_response_header h;
103 struct hpi_mixer_res m;
104};
105
106struct hpi_stream_response {
107 struct hpi_response_header h;
108 struct hpi_stream_res d;
109};
110
111struct adapter_info {
112 u16 type;
113 u16 num_instreams;
114 u16 num_outstreams;
115};
116
117struct asi_open_state {
118 int open_flag;
119 void *h_owner;
120};
121
122#ifndef DISABLE_PRAGMA_PACK1
123#pragma pack(pop)
124#endif
125
126/* Globals */
127static struct hpi_adapter_response rESP_HPI_ADAPTER_OPEN[HPI_MAX_ADAPTERS];
128
129static struct hpi_stream_response
130 rESP_HPI_OSTREAM_OPEN[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
131
132static struct hpi_stream_response
133 rESP_HPI_ISTREAM_OPEN[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
134
135static struct hpi_mixer_response rESP_HPI_MIXER_OPEN[HPI_MAX_ADAPTERS];
136
137static struct adapter_info aDAPTER_INFO[HPI_MAX_ADAPTERS];
138
139/* use these to keep track of opens from user mode apps/DLLs */
140static struct asi_open_state
141 outstream_user_open[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
142
143static struct asi_open_state
144 instream_user_open[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
145
146static void subsys_message(struct hpi_message *phm, struct hpi_response *phr,
147 void *h_owner)
148{
149 if (phm->adapter_index != HPI_ADAPTER_INDEX_INVALID)
150 HPI_DEBUG_LOG(WARNING,
151 "suspicious adapter index %d in subsys message 0x%x.\n",
152 phm->adapter_index, phm->function);
153
154 switch (phm->function) {
155 case HPI_SUBSYS_GET_VERSION:
156 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
157 HPI_SUBSYS_GET_VERSION, 0);
158 phr->u.s.version = HPI_VER >> 8; /* return major.minor */
159 phr->u.s.data = HPI_VER; /* return major.minor.release */
160 break;
161 case HPI_SUBSYS_OPEN:
162 /*do not propagate the message down the chain */
163 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_OPEN, 0);
164 break;
165 case HPI_SUBSYS_CLOSE:
166 /*do not propagate the message down the chain */
167 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_CLOSE,
168 0);
169 HPIMSGX__cleanup(HPIMSGX_ALLADAPTERS, h_owner);
170 break;
171 case HPI_SUBSYS_DRIVER_LOAD:
172 /* Initialize this module's internal state */
173 hpios_msgxlock_init(&msgx_lock);
174 memset(&hpi_entry_points, 0, sizeof(hpi_entry_points));
175 /* Init subsys_findadapters response to no-adapters */
176 HPIMSGX__reset(HPIMSGX_ALLADAPTERS);
177 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
178 HPI_SUBSYS_DRIVER_LOAD, 0);
179 /* individual HPIs dont implement driver load */
180 HPI_COMMON(phm, phr);
181 break;
182 case HPI_SUBSYS_DRIVER_UNLOAD:
183 HPI_COMMON(phm, phr);
184 HPIMSGX__cleanup(HPIMSGX_ALLADAPTERS, h_owner);
185 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
186 HPI_SUBSYS_DRIVER_UNLOAD, 0);
187 return;
188
189 case HPI_SUBSYS_GET_NUM_ADAPTERS:
190 case HPI_SUBSYS_GET_ADAPTER:
191 HPI_COMMON(phm, phr);
192 break;
193
194 case HPI_SUBSYS_CREATE_ADAPTER:
195 HPIMSGX__init(phm, phr);
196 break;
197
198 default:
199 /* Must explicitly handle every subsys message in this switch */
200 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, phm->function,
201 HPI_ERROR_INVALID_FUNC);
202 break;
203 }
204}
205
206static void adapter_message(struct hpi_message *phm, struct hpi_response *phr,
207 void *h_owner)
208{
209 switch (phm->function) {
210 case HPI_ADAPTER_OPEN:
211 adapter_open(phm, phr);
212 break;
213 case HPI_ADAPTER_CLOSE:
214 adapter_close(phm, phr);
215 break;
216 case HPI_ADAPTER_DELETE:
217 HPIMSGX__cleanup(phm->adapter_index, h_owner);
218 {
219 struct hpi_message hm;
220 struct hpi_response hr;
221 hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
222 HPI_ADAPTER_CLOSE);
223 hm.adapter_index = phm->adapter_index;
224 hw_entry_point(&hm, &hr);
225 }
226 hw_entry_point(phm, phr);
227 break;
228
229 default:
230 hw_entry_point(phm, phr);
231 break;
232 }
233}
234
235static void mixer_message(struct hpi_message *phm, struct hpi_response *phr)
236{
237 switch (phm->function) {
238 case HPI_MIXER_OPEN:
239 mixer_open(phm, phr);
240 break;
241 case HPI_MIXER_CLOSE:
242 mixer_close(phm, phr);
243 break;
244 default:
245 hw_entry_point(phm, phr);
246 break;
247 }
248}
249
250static void outstream_message(struct hpi_message *phm,
251 struct hpi_response *phr, void *h_owner)
252{
253 if (phm->obj_index >= aDAPTER_INFO[phm->adapter_index].num_outstreams) {
254 hpi_init_response(phr, HPI_OBJ_OSTREAM, phm->function,
255 HPI_ERROR_INVALID_OBJ_INDEX);
256 return;
257 }
258
259 switch (phm->function) {
260 case HPI_OSTREAM_OPEN:
261 outstream_open(phm, phr, h_owner);
262 break;
263 case HPI_OSTREAM_CLOSE:
264 outstream_close(phm, phr, h_owner);
265 break;
266 default:
267 hw_entry_point(phm, phr);
268 break;
269 }
270}
271
272static void instream_message(struct hpi_message *phm,
273 struct hpi_response *phr, void *h_owner)
274{
275 if (phm->obj_index >= aDAPTER_INFO[phm->adapter_index].num_instreams) {
276 hpi_init_response(phr, HPI_OBJ_ISTREAM, phm->function,
277 HPI_ERROR_INVALID_OBJ_INDEX);
278 return;
279 }
280
281 switch (phm->function) {
282 case HPI_ISTREAM_OPEN:
283 instream_open(phm, phr, h_owner);
284 break;
285 case HPI_ISTREAM_CLOSE:
286 instream_close(phm, phr, h_owner);
287 break;
288 default:
289 hw_entry_point(phm, phr);
290 break;
291 }
292}
293
294/* NOTE: HPI_Message() must be defined in the driver as a wrapper for
295 * HPI_MessageEx so that functions in hpifunc.c compile.
296 */
297void hpi_send_recv_ex(struct hpi_message *phm, struct hpi_response *phr,
298 void *h_owner)
299{
300
301 if (logging_enabled)
302 HPI_DEBUG_MESSAGE(DEBUG, phm);
303
304 if (phm->type != HPI_TYPE_REQUEST) {
305 hpi_init_response(phr, phm->object, phm->function,
306 HPI_ERROR_INVALID_TYPE);
307 return;
308 }
309
310 if (phm->adapter_index >= HPI_MAX_ADAPTERS
311 && phm->adapter_index != HPIMSGX_ALLADAPTERS) {
312 hpi_init_response(phr, phm->object, phm->function,
313 HPI_ERROR_BAD_ADAPTER_NUMBER);
314 return;
315 }
316
317 switch (phm->object) {
318 case HPI_OBJ_SUBSYSTEM:
319 subsys_message(phm, phr, h_owner);
320 break;
321
322 case HPI_OBJ_ADAPTER:
323 adapter_message(phm, phr, h_owner);
324 break;
325
326 case HPI_OBJ_MIXER:
327 mixer_message(phm, phr);
328 break;
329
330 case HPI_OBJ_OSTREAM:
331 outstream_message(phm, phr, h_owner);
332 break;
333
334 case HPI_OBJ_ISTREAM:
335 instream_message(phm, phr, h_owner);
336 break;
337
338 default:
339 hw_entry_point(phm, phr);
340 break;
341 }
342
343 if (logging_enabled)
344 HPI_DEBUG_RESPONSE(phr);
345
346 if (phr->error >= HPI_ERROR_DSP_COMMUNICATION) {
347 hpi_debug_level_set(HPI_DEBUG_LEVEL_ERROR);
348 logging_enabled = 0;
349 }
350}
351
352static void adapter_open(struct hpi_message *phm, struct hpi_response *phr)
353{
354 HPI_DEBUG_LOG(VERBOSE, "adapter_open\n");
355 memcpy(phr, &rESP_HPI_ADAPTER_OPEN[phm->adapter_index],
356 sizeof(rESP_HPI_ADAPTER_OPEN[0]));
357}
358
359static void adapter_close(struct hpi_message *phm, struct hpi_response *phr)
360{
361 HPI_DEBUG_LOG(VERBOSE, "adapter_close\n");
362 hpi_init_response(phr, HPI_OBJ_ADAPTER, HPI_ADAPTER_CLOSE, 0);
363}
364
365static void mixer_open(struct hpi_message *phm, struct hpi_response *phr)
366{
367 memcpy(phr, &rESP_HPI_MIXER_OPEN[phm->adapter_index],
368 sizeof(rESP_HPI_MIXER_OPEN[0]));
369}
370
371static void mixer_close(struct hpi_message *phm, struct hpi_response *phr)
372{
373 hpi_init_response(phr, HPI_OBJ_MIXER, HPI_MIXER_CLOSE, 0);
374}
375
376static void instream_open(struct hpi_message *phm, struct hpi_response *phr,
377 void *h_owner)
378{
379
380 struct hpi_message hm;
381 struct hpi_response hr;
382
383 hpi_init_response(phr, HPI_OBJ_ISTREAM, HPI_ISTREAM_OPEN, 0);
384
385 hpios_msgxlock_lock(&msgx_lock);
386
387 if (instream_user_open[phm->adapter_index][phm->obj_index].open_flag)
388 phr->error = HPI_ERROR_OBJ_ALREADY_OPEN;
389 else if (rESP_HPI_ISTREAM_OPEN[phm->adapter_index]
390 [phm->obj_index].h.error)
391 memcpy(phr,
392 &rESP_HPI_ISTREAM_OPEN[phm->adapter_index][phm->
393 obj_index],
394 sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
395 else {
396 instream_user_open[phm->adapter_index][phm->
397 obj_index].open_flag = 1;
398 hpios_msgxlock_unlock(&msgx_lock);
399
400 /* issue a reset */
401 hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
402 HPI_ISTREAM_RESET);
403 hm.adapter_index = phm->adapter_index;
404 hm.obj_index = phm->obj_index;
405 hw_entry_point(&hm, &hr);
406
407 hpios_msgxlock_lock(&msgx_lock);
408 if (hr.error) {
409 instream_user_open[phm->adapter_index][phm->
410 obj_index].open_flag = 0;
411 phr->error = hr.error;
412 } else {
413 instream_user_open[phm->adapter_index][phm->
414 obj_index].open_flag = 1;
415 instream_user_open[phm->adapter_index][phm->
416 obj_index].h_owner = h_owner;
417 memcpy(phr,
418 &rESP_HPI_ISTREAM_OPEN[phm->adapter_index]
419 [phm->obj_index],
420 sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
421 }
422 }
423 hpios_msgxlock_unlock(&msgx_lock);
424}
425
426static void instream_close(struct hpi_message *phm, struct hpi_response *phr,
427 void *h_owner)
428{
429
430 struct hpi_message hm;
431 struct hpi_response hr;
432
433 hpi_init_response(phr, HPI_OBJ_ISTREAM, HPI_ISTREAM_CLOSE, 0);
434
435 hpios_msgxlock_lock(&msgx_lock);
436 if (h_owner ==
437 instream_user_open[phm->adapter_index][phm->
438 obj_index].h_owner) {
439 /* HPI_DEBUG_LOG(INFO,"closing adapter %d "
440 "instream %d owned by %p\n",
441 phm->wAdapterIndex, phm->wObjIndex, hOwner); */
442 instream_user_open[phm->adapter_index][phm->
443 obj_index].h_owner = NULL;
444 hpios_msgxlock_unlock(&msgx_lock);
445 /* issue a reset */
446 hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
447 HPI_ISTREAM_RESET);
448 hm.adapter_index = phm->adapter_index;
449 hm.obj_index = phm->obj_index;
450 hw_entry_point(&hm, &hr);
451 hpios_msgxlock_lock(&msgx_lock);
452 if (hr.error) {
453 instream_user_open[phm->adapter_index][phm->
454 obj_index].h_owner = h_owner;
455 phr->error = hr.error;
456 } else {
457 instream_user_open[phm->adapter_index][phm->
458 obj_index].open_flag = 0;
459 instream_user_open[phm->adapter_index][phm->
460 obj_index].h_owner = NULL;
461 }
462 } else {
463 HPI_DEBUG_LOG(WARNING,
464 "%p trying to close %d instream %d owned by %p\n",
465 h_owner, phm->adapter_index, phm->obj_index,
466 instream_user_open[phm->adapter_index][phm->
467 obj_index].h_owner);
468 phr->error = HPI_ERROR_OBJ_NOT_OPEN;
469 }
470 hpios_msgxlock_unlock(&msgx_lock);
471}
472
473static void outstream_open(struct hpi_message *phm, struct hpi_response *phr,
474 void *h_owner)
475{
476
477 struct hpi_message hm;
478 struct hpi_response hr;
479
480 hpi_init_response(phr, HPI_OBJ_OSTREAM, HPI_OSTREAM_OPEN, 0);
481
482 hpios_msgxlock_lock(&msgx_lock);
483
484 if (outstream_user_open[phm->adapter_index][phm->obj_index].open_flag)
485 phr->error = HPI_ERROR_OBJ_ALREADY_OPEN;
486 else if (rESP_HPI_OSTREAM_OPEN[phm->adapter_index]
487 [phm->obj_index].h.error)
488 memcpy(phr,
489 &rESP_HPI_OSTREAM_OPEN[phm->adapter_index][phm->
490 obj_index],
491 sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
492 else {
493 outstream_user_open[phm->adapter_index][phm->
494 obj_index].open_flag = 1;
495 hpios_msgxlock_unlock(&msgx_lock);
496
497 /* issue a reset */
498 hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
499 HPI_OSTREAM_RESET);
500 hm.adapter_index = phm->adapter_index;
501 hm.obj_index = phm->obj_index;
502 hw_entry_point(&hm, &hr);
503
504 hpios_msgxlock_lock(&msgx_lock);
505 if (hr.error) {
506 outstream_user_open[phm->adapter_index][phm->
507 obj_index].open_flag = 0;
508 phr->error = hr.error;
509 } else {
510 outstream_user_open[phm->adapter_index][phm->
511 obj_index].open_flag = 1;
512 outstream_user_open[phm->adapter_index][phm->
513 obj_index].h_owner = h_owner;
514 memcpy(phr,
515 &rESP_HPI_OSTREAM_OPEN[phm->adapter_index]
516 [phm->obj_index],
517 sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
518 }
519 }
520 hpios_msgxlock_unlock(&msgx_lock);
521}
522
523static void outstream_close(struct hpi_message *phm, struct hpi_response *phr,
524 void *h_owner)
525{
526
527 struct hpi_message hm;
528 struct hpi_response hr;
529
530 hpi_init_response(phr, HPI_OBJ_OSTREAM, HPI_OSTREAM_CLOSE, 0);
531
532 hpios_msgxlock_lock(&msgx_lock);
533
534 if (h_owner ==
535 outstream_user_open[phm->adapter_index][phm->
536 obj_index].h_owner) {
537 /* HPI_DEBUG_LOG(INFO,"closing adapter %d "
538 "outstream %d owned by %p\n",
539 phm->wAdapterIndex, phm->wObjIndex, hOwner); */
540 outstream_user_open[phm->adapter_index][phm->
541 obj_index].h_owner = NULL;
542 hpios_msgxlock_unlock(&msgx_lock);
543 /* issue a reset */
544 hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
545 HPI_OSTREAM_RESET);
546 hm.adapter_index = phm->adapter_index;
547 hm.obj_index = phm->obj_index;
548 hw_entry_point(&hm, &hr);
549 hpios_msgxlock_lock(&msgx_lock);
550 if (hr.error) {
551 outstream_user_open[phm->adapter_index][phm->
552 obj_index].h_owner = h_owner;
553 phr->error = hr.error;
554 } else {
555 outstream_user_open[phm->adapter_index][phm->
556 obj_index].open_flag = 0;
557 outstream_user_open[phm->adapter_index][phm->
558 obj_index].h_owner = NULL;
559 }
560 } else {
561 HPI_DEBUG_LOG(WARNING,
562 "%p trying to close %d outstream %d owned by %p\n",
563 h_owner, phm->adapter_index, phm->obj_index,
564 outstream_user_open[phm->adapter_index][phm->
565 obj_index].h_owner);
566 phr->error = HPI_ERROR_OBJ_NOT_OPEN;
567 }
568 hpios_msgxlock_unlock(&msgx_lock);
569}
570
571static u16 adapter_prepare(u16 adapter)
572{
573 struct hpi_message hm;
574 struct hpi_response hr;
575
576 /* Open the adapter and streams */
577 u16 i;
578
579 /* call to HPI_ADAPTER_OPEN */
580 hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
581 HPI_ADAPTER_OPEN);
582 hm.adapter_index = adapter;
583 hw_entry_point(&hm, &hr);
584 memcpy(&rESP_HPI_ADAPTER_OPEN[adapter], &hr,
585 sizeof(rESP_HPI_ADAPTER_OPEN[0]));
586 if (hr.error)
587 return hr.error;
588
589 /* call to HPI_ADAPTER_GET_INFO */
590 hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
591 HPI_ADAPTER_GET_INFO);
592 hm.adapter_index = adapter;
593 hw_entry_point(&hm, &hr);
594 if (hr.error)
595 return hr.error;
596
597 aDAPTER_INFO[adapter].num_outstreams = hr.u.ax.info.num_outstreams;
598 aDAPTER_INFO[adapter].num_instreams = hr.u.ax.info.num_instreams;
599 aDAPTER_INFO[adapter].type = hr.u.ax.info.adapter_type;
600
601 /* call to HPI_OSTREAM_OPEN */
602 for (i = 0; i < aDAPTER_INFO[adapter].num_outstreams; i++) {
603 hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
604 HPI_OSTREAM_OPEN);
605 hm.adapter_index = adapter;
606 hm.obj_index = i;
607 hw_entry_point(&hm, &hr);
608 memcpy(&rESP_HPI_OSTREAM_OPEN[adapter][i], &hr,
609 sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
610 outstream_user_open[adapter][i].open_flag = 0;
611 outstream_user_open[adapter][i].h_owner = NULL;
612 }
613
614 /* call to HPI_ISTREAM_OPEN */
615 for (i = 0; i < aDAPTER_INFO[adapter].num_instreams; i++) {
616 hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
617 HPI_ISTREAM_OPEN);
618 hm.adapter_index = adapter;
619 hm.obj_index = i;
620 hw_entry_point(&hm, &hr);
621 memcpy(&rESP_HPI_ISTREAM_OPEN[adapter][i], &hr,
622 sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
623 instream_user_open[adapter][i].open_flag = 0;
624 instream_user_open[adapter][i].h_owner = NULL;
625 }
626
627 /* call to HPI_MIXER_OPEN */
628 hpi_init_message_response(&hm, &hr, HPI_OBJ_MIXER, HPI_MIXER_OPEN);
629 hm.adapter_index = adapter;
630 hw_entry_point(&hm, &hr);
631 memcpy(&rESP_HPI_MIXER_OPEN[adapter], &hr,
632 sizeof(rESP_HPI_MIXER_OPEN[0]));
633
634 return 0;
635}
636
637static void HPIMSGX__reset(u16 adapter_index)
638{
639 int i;
640 u16 adapter;
641 struct hpi_response hr;
642
643 if (adapter_index == HPIMSGX_ALLADAPTERS) {
644 for (adapter = 0; adapter < HPI_MAX_ADAPTERS; adapter++) {
645
646 hpi_init_response(&hr, HPI_OBJ_ADAPTER,
647 HPI_ADAPTER_OPEN, HPI_ERROR_BAD_ADAPTER);
648 memcpy(&rESP_HPI_ADAPTER_OPEN[adapter], &hr,
649 sizeof(rESP_HPI_ADAPTER_OPEN[adapter]));
650
651 hpi_init_response(&hr, HPI_OBJ_MIXER, HPI_MIXER_OPEN,
652 HPI_ERROR_INVALID_OBJ);
653 memcpy(&rESP_HPI_MIXER_OPEN[adapter], &hr,
654 sizeof(rESP_HPI_MIXER_OPEN[adapter]));
655
656 for (i = 0; i < HPI_MAX_STREAMS; i++) {
657 hpi_init_response(&hr, HPI_OBJ_OSTREAM,
658 HPI_OSTREAM_OPEN,
659 HPI_ERROR_INVALID_OBJ);
660 memcpy(&rESP_HPI_OSTREAM_OPEN[adapter][i],
661 &hr,
662 sizeof(rESP_HPI_OSTREAM_OPEN[adapter]
663 [i]));
664 hpi_init_response(&hr, HPI_OBJ_ISTREAM,
665 HPI_ISTREAM_OPEN,
666 HPI_ERROR_INVALID_OBJ);
667 memcpy(&rESP_HPI_ISTREAM_OPEN[adapter][i],
668 &hr,
669 sizeof(rESP_HPI_ISTREAM_OPEN[adapter]
670 [i]));
671 }
672 }
673 } else if (adapter_index < HPI_MAX_ADAPTERS) {
674 rESP_HPI_ADAPTER_OPEN[adapter_index].h.error =
675 HPI_ERROR_BAD_ADAPTER;
676 rESP_HPI_MIXER_OPEN[adapter_index].h.error =
677 HPI_ERROR_INVALID_OBJ;
678 for (i = 0; i < HPI_MAX_STREAMS; i++) {
679 rESP_HPI_OSTREAM_OPEN[adapter_index][i].h.error =
680 HPI_ERROR_INVALID_OBJ;
681 rESP_HPI_ISTREAM_OPEN[adapter_index][i].h.error =
682 HPI_ERROR_INVALID_OBJ;
683 }
684 }
685}
686
687static u16 HPIMSGX__init(struct hpi_message *phm,
688 /* HPI_SUBSYS_CREATE_ADAPTER structure with */
689 /* resource list or NULL=find all */
690 struct hpi_response *phr
691 /* response from HPI_ADAPTER_GET_INFO */
692 )
693{
694 hpi_handler_func *entry_point_func;
695 struct hpi_response hr;
696
697 /* Init response here so we can pass in previous adapter list */
698 hpi_init_response(&hr, phm->object, phm->function,
699 HPI_ERROR_INVALID_OBJ);
700
701 entry_point_func =
702 hpi_lookup_entry_point_function(phm->u.s.resource.r.pci);
703
704 if (entry_point_func) {
705 HPI_DEBUG_MESSAGE(DEBUG, phm);
706 entry_point_func(phm, &hr);
707 } else {
708 phr->error = HPI_ERROR_PROCESSING_MESSAGE;
709 return phr->error;
710 }
711 if (hr.error == 0 && hr.u.s.adapter_index < HPI_MAX_ADAPTERS) {
712 /* the adapter was created successfully
713 save the mapping for future use */
714 hpi_entry_points[hr.u.s.adapter_index] = entry_point_func;
715 /* prepare adapter (pre-open streams etc.) */
716 HPI_DEBUG_LOG(DEBUG,
717 "HPI_SUBSYS_CREATE_ADAPTER successful,"
718 " preparing adapter\n");
719 adapter_prepare(hr.u.s.adapter_index);
720 }
721 memcpy(phr, &hr, hr.size);
722 return phr->error;
723}
724
725static void HPIMSGX__cleanup(u16 adapter_index, void *h_owner)
726{
727 int i, adapter, adapter_limit;
728
729 if (!h_owner)
730 return;
731
732 if (adapter_index == HPIMSGX_ALLADAPTERS) {
733 adapter = 0;
734 adapter_limit = HPI_MAX_ADAPTERS;
735 } else {
736 adapter = adapter_index;
737 adapter_limit = adapter + 1;
738 }
739
740 for (; adapter < adapter_limit; adapter++) {
741 /* printk(KERN_INFO "Cleanup adapter #%d\n",wAdapter); */
742 for (i = 0; i < HPI_MAX_STREAMS; i++) {
743 if (h_owner ==
744 outstream_user_open[adapter][i].h_owner) {
745 struct hpi_message hm;
746 struct hpi_response hr;
747
748 HPI_DEBUG_LOG(DEBUG,
749 "Close adapter %d ostream %d\n",
750 adapter, i);
751
752 hpi_init_message_response(&hm, &hr,
753 HPI_OBJ_OSTREAM, HPI_OSTREAM_RESET);
754 hm.adapter_index = (u16)adapter;
755 hm.obj_index = (u16)i;
756 hw_entry_point(&hm, &hr);
757
758 hm.function = HPI_OSTREAM_HOSTBUFFER_FREE;
759 hw_entry_point(&hm, &hr);
760
761 hm.function = HPI_OSTREAM_GROUP_RESET;
762 hw_entry_point(&hm, &hr);
763
764 outstream_user_open[adapter][i].open_flag = 0;
765 outstream_user_open[adapter][i].h_owner =
766 NULL;
767 }
768 if (h_owner == instream_user_open[adapter][i].h_owner) {
769 struct hpi_message hm;
770 struct hpi_response hr;
771
772 HPI_DEBUG_LOG(DEBUG,
773 "Close adapter %d istream %d\n",
774 adapter, i);
775
776 hpi_init_message_response(&hm, &hr,
777 HPI_OBJ_ISTREAM, HPI_ISTREAM_RESET);
778 hm.adapter_index = (u16)adapter;
779 hm.obj_index = (u16)i;
780 hw_entry_point(&hm, &hr);
781
782 hm.function = HPI_ISTREAM_HOSTBUFFER_FREE;
783 hw_entry_point(&hm, &hr);
784
785 hm.function = HPI_ISTREAM_GROUP_RESET;
786 hw_entry_point(&hm, &hr);
787
788 instream_user_open[adapter][i].open_flag = 0;
789 instream_user_open[adapter][i].h_owner = NULL;
790 }
791 }
792 }
793}