4 Copyright (c) Eicon Networks, 2002.
6 This source file is supplied for the use with
7 Eicon Networks range of DIVA Server Adapters.
9 Eicon File Revision : 2.1
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 2, or (at your option)
16 This program is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
18 implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
19 See the GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with this program; if not, write to the Free Software
23 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
32 #include "pkmaint.h" /* pc_main.h, packed in os-dependent fashion */
36 extern ADAPTER * adapter[MAX_ADAPTER];
37 extern PISDN_ADAPTER IoAdapters[MAX_ADAPTER];
38 void request (PISDN_ADAPTER, ENTITY *);
39 void pcm_req (PISDN_ADAPTER, ENTITY *);
40 /* --------------------------------------------------------------------------
42 -------------------------------------------------------------------------- */
44 static void Request##N(ENTITY *e) \
45 { if ( IoAdapters[N] ) (* IoAdapters[N]->DIRequest)(IoAdapters[N], e) ; }
62 IDI_CALL Requests[MAX_ADAPTER] =
63 { &Request0, &Request1, &Request2, &Request3,
64 &Request4, &Request5, &Request6, &Request7,
65 &Request8, &Request9, &Request10, &Request11,
66 &Request12, &Request13, &Request14, &Request15
68 /*****************************************************************************/
70 This array should indicate all new services, that this version of XDI
71 is able to provide to his clients
73 static byte extended_xdi_features[DIVA_XDI_EXTENDED_FEATURES_MAX_SZ+1] = {
74 (DIVA_XDI_EXTENDED_FEATURES_VALID |
75 DIVA_XDI_EXTENDED_FEATURE_SDRAM_BAR |
76 DIVA_XDI_EXTENDED_FEATURE_CAPI_PRMS |
77 #if defined(DIVA_IDI_RX_DMA)
78 DIVA_XDI_EXTENDED_FEATURE_CMA |
79 DIVA_XDI_EXTENDED_FEATURE_RX_DMA |
81 DIVA_XDI_EXTENDED_FEATURE_NO_CANCEL_RC),
84 /*****************************************************************************/
86 dump_xlog_buffer (PISDN_ADAPTER IoAdapter, Xdesc *xlogDesc)
89 word *Xlog = xlogDesc->buf ;
90 word logCnt = xlogDesc->cnt ;
91 word logOut = xlogDesc->out / sizeof(*Xlog) ;
92 DBG_FTL(("%s: ************* XLOG recovery (%d) *************",
93 &IoAdapter->Name[0], (int)logCnt))
94 DBG_FTL(("Microcode: %s", &IoAdapter->ProtocolIdString[0]))
95 for ( ; logCnt > 0 ; --logCnt )
97 if ( !READ_WORD(&Xlog[logOut]) )
103 if ( READ_WORD(&Xlog[logOut]) <= (logOut * sizeof(*Xlog)) )
107 DBG_FTL(("Possibly corrupted XLOG: %d entries left",
112 logLen = (dword)(READ_WORD(&Xlog[logOut]) - (logOut * sizeof(*Xlog))) ;
113 DBG_FTL_MXLOG(( (char *)&Xlog[logOut + 1], (dword)(logLen - 2) ))
114 logOut = (READ_WORD(&Xlog[logOut]) + 1) / sizeof(*Xlog) ;
116 DBG_FTL(("%s: ***************** end of XLOG *****************",
117 &IoAdapter->Name[0]))
119 /*****************************************************************************/
120 char *(ExceptionCauseTable[]) =
126 "Address error load",
127 "Address error store",
128 "Instruction load bus error",
129 "Data load/store bus error",
132 "Reverd instruction",
133 "Coprocessor unusable",
137 "Floating Point Exception",
156 dump_trap_frame (PISDN_ADAPTER IoAdapter, byte *exceptionFrame)
158 MP_XCPTC *xcept = (MP_XCPTC *)exceptionFrame ;
160 regs = &xcept->regs[0] ;
161 DBG_FTL(("%s: ***************** CPU TRAPPED *****************",
162 &IoAdapter->Name[0]))
163 DBG_FTL(("Microcode: %s", &IoAdapter->ProtocolIdString[0]))
164 DBG_FTL(("Cause: %s",
165 ExceptionCauseTable[(READ_DWORD(&xcept->cr) & 0x0000007c) >> 2]))
166 DBG_FTL(("sr 0x%08x cr 0x%08x epc 0x%08x vaddr 0x%08x",
167 READ_DWORD(&xcept->sr), READ_DWORD(&xcept->cr),
168 READ_DWORD(&xcept->epc), READ_DWORD(&xcept->vaddr)))
169 DBG_FTL(("zero 0x%08x at 0x%08x v0 0x%08x v1 0x%08x",
170 READ_DWORD(®s[ 0]), READ_DWORD(®s[ 1]),
171 READ_DWORD(®s[ 2]), READ_DWORD(®s[ 3])))
172 DBG_FTL(("a0 0x%08x a1 0x%08x a2 0x%08x a3 0x%08x",
173 READ_DWORD(®s[ 4]), READ_DWORD(®s[ 5]),
174 READ_DWORD(®s[ 6]), READ_DWORD(®s[ 7])))
175 DBG_FTL(("t0 0x%08x t1 0x%08x t2 0x%08x t3 0x%08x",
176 READ_DWORD(®s[ 8]), READ_DWORD(®s[ 9]),
177 READ_DWORD(®s[10]), READ_DWORD(®s[11])))
178 DBG_FTL(("t4 0x%08x t5 0x%08x t6 0x%08x t7 0x%08x",
179 READ_DWORD(®s[12]), READ_DWORD(®s[13]),
180 READ_DWORD(®s[14]), READ_DWORD(®s[15])))
181 DBG_FTL(("s0 0x%08x s1 0x%08x s2 0x%08x s3 0x%08x",
182 READ_DWORD(®s[16]), READ_DWORD(®s[17]),
183 READ_DWORD(®s[18]), READ_DWORD(®s[19])))
184 DBG_FTL(("s4 0x%08x s5 0x%08x s6 0x%08x s7 0x%08x",
185 READ_DWORD(®s[20]), READ_DWORD(®s[21]),
186 READ_DWORD(®s[22]), READ_DWORD(®s[23])))
187 DBG_FTL(("t8 0x%08x t9 0x%08x k0 0x%08x k1 0x%08x",
188 READ_DWORD(®s[24]), READ_DWORD(®s[25]),
189 READ_DWORD(®s[26]), READ_DWORD(®s[27])))
190 DBG_FTL(("gp 0x%08x sp 0x%08x s8 0x%08x ra 0x%08x",
191 READ_DWORD(®s[28]), READ_DWORD(®s[29]),
192 READ_DWORD(®s[30]), READ_DWORD(®s[31])))
193 DBG_FTL(("md 0x%08x|%08x resvd 0x%08x class 0x%08x",
194 READ_DWORD(&xcept->mdhi), READ_DWORD(&xcept->mdlo),
195 READ_DWORD(&xcept->reseverd), READ_DWORD(&xcept->xclass)))
197 /* --------------------------------------------------------------------------
198 Real XDI Request function
199 -------------------------------------------------------------------------- */
200 void request(PISDN_ADAPTER IoAdapter, ENTITY * e)
203 diva_os_spin_lock_magic_t irql;
205 * if the Req field in the entity structure is 0,
206 * we treat this request as a special function call
210 IDI_SYNC_REQ *syncReq = (IDI_SYNC_REQ *)e ;
213 #if defined(DIVA_IDI_RX_DMA)
214 case IDI_SYNC_REQ_DMA_DESCRIPTOR_OPERATION: {
215 diva_xdi_dma_descriptor_operation_t* pI = \
216 &syncReq->xdi_dma_descriptor_operation.info;
217 if (!IoAdapter->dma_map) {
219 pI->descriptor_number = -1;
222 diva_os_enter_spin_lock (&IoAdapter->data_spin_lock, &irql, "dma_op");
223 if (pI->operation == IDI_SYNC_REQ_DMA_DESCRIPTOR_ALLOC) {
224 pI->descriptor_number = diva_alloc_dma_map_entry (\
225 (struct _diva_dma_map_entry*)IoAdapter->dma_map);
226 if (pI->descriptor_number >= 0) {
229 DBG_TRC(("A(%d) dma_alloc(%d)",
230 IoAdapter->ANum, pI->descriptor_number))
231 diva_get_dma_map_entry (\
232 (struct _diva_dma_map_entry*)IoAdapter->dma_map,
233 pI->descriptor_number,
234 &local_addr, &dma_magic);
235 pI->descriptor_address = local_addr;
236 pI->descriptor_magic = dma_magic;
241 } else if ((pI->operation == IDI_SYNC_REQ_DMA_DESCRIPTOR_FREE) &&
242 (pI->descriptor_number >= 0)) {
243 DBG_TRC(("A(%d) dma_free(%d)", IoAdapter->ANum, pI->descriptor_number))
244 diva_free_dma_map_entry((struct _diva_dma_map_entry*)IoAdapter->dma_map,
245 pI->descriptor_number);
246 pI->descriptor_number = -1;
249 pI->descriptor_number = -1;
252 diva_os_leave_spin_lock (&IoAdapter->data_spin_lock, &irql, "dma_op");
255 case IDI_SYNC_REQ_XDI_GET_LOGICAL_ADAPTER_NUMBER: {
256 diva_xdi_get_logical_adapter_number_s_t *pI = \
257 &syncReq->xdi_logical_adapter_number.info;
258 pI->logical_adapter_number = IoAdapter->ANum;
259 pI->controller = IoAdapter->ControllerNumber;
261 case IDI_SYNC_REQ_XDI_GET_CAPI_PARAMS: {
262 diva_xdi_get_capi_parameters_t prms, *pI = &syncReq->xdi_capi_prms.info;
263 memset (&prms, 0x00, sizeof(prms));
264 prms.structure_length = MIN(sizeof(prms), pI->structure_length);
265 memset (pI, 0x00, pI->structure_length);
266 prms.flag_dynamic_l1_down = (IoAdapter->capi_cfg.cfg_1 & \
267 DIVA_XDI_CAPI_CFG_1_DYNAMIC_L1_ON) ? 1 : 0;
268 prms.group_optimization_enabled = (IoAdapter->capi_cfg.cfg_1 & \
269 DIVA_XDI_CAPI_CFG_1_GROUP_POPTIMIZATION_ON) ? 1 : 0;
270 memcpy (pI, &prms, prms.structure_length);
272 case IDI_SYNC_REQ_XDI_GET_ADAPTER_SDRAM_BAR:
273 syncReq->xdi_sdram_bar.info.bar = IoAdapter->sdram_bar;
275 case IDI_SYNC_REQ_XDI_GET_EXTENDED_FEATURES: {
277 diva_xdi_get_extended_xdi_features_t* pI =\
278 &syncReq->xdi_extended_features.info;
279 pI->buffer_length_in_bytes &= ~0x80000000;
280 if (pI->buffer_length_in_bytes && pI->features) {
281 memset (pI->features, 0x00, pI->buffer_length_in_bytes);
283 for (i = 0; ((pI->features) && (i < pI->buffer_length_in_bytes) &&
284 (i < DIVA_XDI_EXTENDED_FEATURES_MAX_SZ)); i++) {
285 pI->features[i] = extended_xdi_features[i];
287 if ((pI->buffer_length_in_bytes < DIVA_XDI_EXTENDED_FEATURES_MAX_SZ) ||
289 pI->buffer_length_in_bytes =\
290 (0x80000000 | DIVA_XDI_EXTENDED_FEATURES_MAX_SZ);
293 case IDI_SYNC_REQ_XDI_GET_STREAM:
295 diva_xdi_provide_istream_info (&IoAdapter->a,
296 &syncReq->xdi_stream_info.info);
298 syncReq->xdi_stream_info.info.provided_service = 0;
301 case IDI_SYNC_REQ_GET_NAME:
304 strcpy (&syncReq->GetName.name[0], IoAdapter->Name) ;
305 DBG_TRC(("xdi: Adapter %d / Name '%s'",
306 IoAdapter->ANum, IoAdapter->Name))
309 syncReq->GetName.name[0] = '\0' ;
311 case IDI_SYNC_REQ_GET_SERIAL:
314 syncReq->GetSerial.serial = IoAdapter->serialNo ;
315 DBG_TRC(("xdi: Adapter %d / SerialNo %ld",
316 IoAdapter->ANum, IoAdapter->serialNo))
319 syncReq->GetSerial.serial = 0 ;
321 case IDI_SYNC_REQ_GET_XLOG:
324 pcm_req (IoAdapter, e) ;
329 case IDI_SYNC_REQ_GET_FEATURES:
332 syncReq->GetFeatures.features =
333 (unsigned short)IoAdapter->features ;
336 syncReq->GetFeatures.features = 0 ;
338 case IDI_SYNC_REQ_PORTDRV_HOOK:
341 DBG_TRC(("Xdi:IDI_SYNC_REQ_PORTDRV_HOOK - ignored"))
348 DBG_FTL(("xdi: unknown Req 0 / Rc %d !", e->Rc))
352 DBG_TRC(("xdi: Id 0x%x / Req 0x%x / Rc 0x%x", e->Id, e->Req, e->Rc))
355 DBG_FTL(("xdi: uninitialized Adapter used - ignore request"))
358 diva_os_enter_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_req");
362 if ( !(e->Id &0x1f) )
364 if ( IoAdapter->e_count >= IoAdapter->e_max )
366 DBG_FTL(("xdi: all Ids in use (max=%d) --> Req ignored",
368 diva_os_leave_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_req");
374 for ( i = 1 ; IoAdapter->e_tbl[i].e ; ++i ) ;
375 IoAdapter->e_tbl[i].e = e ;
376 IoAdapter->e_count++ ;
386 * if the entity is still busy, ignore the request call
388 if ( e->More & XBUSY )
390 DBG_FTL(("xdi: Id 0x%x busy --> Req 0x%x ignored", e->Id, e->Req))
391 if ( !IoAdapter->trapped && IoAdapter->trapFnc )
393 IoAdapter->trapFnc (IoAdapter) ;
395 Firs trap, also notify user if supported
397 if (IoAdapter->trapped && IoAdapter->os_trap_nfy_Fnc) {
398 (*(IoAdapter->os_trap_nfy_Fnc))(IoAdapter, IoAdapter->ANum);
401 diva_os_leave_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_req");
405 * initialize transmit status variables
412 * queue this entity in the adapter request queue
414 IoAdapter->e_tbl[i].next = 0 ;
415 if ( IoAdapter->head )
417 IoAdapter->e_tbl[IoAdapter->tail].next = i ;
418 IoAdapter->tail = i ;
422 IoAdapter->head = i ;
423 IoAdapter->tail = i ;
426 * queue the DPC to process the request
428 diva_os_schedule_soft_isr (&IoAdapter->req_soft_isr);
429 diva_os_leave_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_req");
431 /* ---------------------------------------------------------------------
433 --------------------------------------------------------------------- */
434 void DIDpcRoutine (struct _diva_os_soft_isr* psoft_isr, void* Context) {
435 PISDN_ADAPTER IoAdapter = (PISDN_ADAPTER)Context ;
436 ADAPTER* a = &IoAdapter->a ;
437 diva_os_atomic_t* pin_dpc = &IoAdapter->in_dpc;
438 if (diva_os_atomic_increment (pin_dpc) == 1) {
440 if ( IoAdapter->tst_irq (a) )
442 if ( !IoAdapter->Unavailable )
444 IoAdapter->clr_irq (a) ;
447 } while (diva_os_atomic_decrement (pin_dpc) > 0);
448 /* ----------------------------------------------------------------
449 Look for XLOG request (cards with indirect addressing)
450 ---------------------------------------------------------------- */
451 if (IoAdapter->pcm_pending) {
452 struct pc_maint *pcm;
453 diva_os_spin_lock_magic_t OldIrql ;
454 diva_os_enter_spin_lock (&IoAdapter->data_spin_lock,
457 pcm = (struct pc_maint *)IoAdapter->pcm_data;
458 switch (IoAdapter->pcm_pending) {
459 case 1: /* ask card for XLOG */
460 a->ram_out (a, &IoAdapter->pcm->rc, 0) ;
461 a->ram_out (a, &IoAdapter->pcm->req, pcm->req) ;
462 IoAdapter->pcm_pending = 2;
464 case 2: /* Try to get XLOG from the card */
465 if ((int)(a->ram_in (a, &IoAdapter->pcm->rc))) {
466 a->ram_in_buffer (a, IoAdapter->pcm, pcm, sizeof(*pcm)) ;
467 IoAdapter->pcm_pending = 3;
470 case 3: /* let XDI recovery XLOG */
473 diva_os_leave_spin_lock (&IoAdapter->data_spin_lock,
477 /* ---------------------------------------------------------------- */
480 /* --------------------------------------------------------------------------
482 -------------------------------------------------------------------------- */
484 pcm_req (PISDN_ADAPTER IoAdapter, ENTITY *e)
486 diva_os_spin_lock_magic_t OldIrql ;
488 ADAPTER *a = &IoAdapter->a ;
489 struct pc_maint *pcm = (struct pc_maint *)&e->Ind ;
491 * special handling of I/O based card interface
492 * the memory access isn't an atomic operation !
494 if ( IoAdapter->Properties.Card == CARD_MAE )
496 diva_os_enter_spin_lock (&IoAdapter->data_spin_lock,
499 IoAdapter->pcm_data = (unsigned long)pcm;
500 IoAdapter->pcm_pending = 1;
501 diva_os_schedule_soft_isr (&IoAdapter->req_soft_isr);
502 diva_os_leave_spin_lock (&IoAdapter->data_spin_lock,
505 for ( rc = 0, i = (IoAdapter->trapped ? 3000 : 250) ; !rc && (i > 0) ; --i )
508 if (IoAdapter->pcm_pending == 3) {
509 diva_os_enter_spin_lock (&IoAdapter->data_spin_lock,
512 IoAdapter->pcm_pending = 0;
513 IoAdapter->pcm_data = 0;
514 diva_os_leave_spin_lock (&IoAdapter->data_spin_lock,
519 diva_os_enter_spin_lock (&IoAdapter->data_spin_lock,
522 diva_os_schedule_soft_isr (&IoAdapter->req_soft_isr);
523 diva_os_leave_spin_lock (&IoAdapter->data_spin_lock,
527 diva_os_enter_spin_lock (&IoAdapter->data_spin_lock,
530 IoAdapter->pcm_pending = 0;
531 IoAdapter->pcm_data = 0;
532 diva_os_leave_spin_lock (&IoAdapter->data_spin_lock,
538 * memory based shared ram is accessible from different
539 * processors without disturbing concurrent processes.
541 a->ram_out (a, &IoAdapter->pcm->rc, 0) ;
542 a->ram_out (a, &IoAdapter->pcm->req, pcm->req) ;
543 for ( i = (IoAdapter->trapped ? 3000 : 250) ; --i > 0 ; )
546 rc = (int)(a->ram_in (a, &IoAdapter->pcm->rc)) ;
549 a->ram_in_buffer (a, IoAdapter->pcm, pcm, sizeof(*pcm)) ;
554 if ( IoAdapter->trapFnc )
556 int trapped = IoAdapter->trapped;
557 IoAdapter->trapFnc (IoAdapter) ;
559 Firs trap, also notify user if supported
561 if (!trapped && IoAdapter->trapped && IoAdapter->os_trap_nfy_Fnc) {
562 (*(IoAdapter->os_trap_nfy_Fnc))(IoAdapter, IoAdapter->ANum);
566 /*------------------------------------------------------------------*/
567 /* ram access functions for memory mapped cards */
568 /*------------------------------------------------------------------*/
569 byte mem_in (ADAPTER *a, void *addr)
574 Base = (volatile byte *)DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
575 val = *(Base + (unsigned long)addr);
576 DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
579 word mem_inw (ADAPTER *a, void *addr)
584 Base = (volatile byte*)DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
585 val = READ_WORD((Base + (unsigned long)addr));
586 DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
589 void mem_in_dw (ADAPTER *a, void *addr, dword* data, int dwords)
591 volatile byte* Base = (volatile byte*)DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
593 *data++ = READ_DWORD((Base + (unsigned long)addr));
596 DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
598 void mem_in_buffer (ADAPTER *a, void *addr, void *buffer, word length)
600 volatile byte* Base = (volatile byte*)DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
601 memcpy (buffer, (void *)(Base + (unsigned long)addr), length);
602 DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
604 void mem_look_ahead (ADAPTER *a, PBUFFER *RBuffer, ENTITY *e)
606 PISDN_ADAPTER IoAdapter = (PISDN_ADAPTER)a->io ;
607 IoAdapter->RBuffer.length = mem_inw (a, &RBuffer->length) ;
608 mem_in_buffer (a, RBuffer->P, IoAdapter->RBuffer.P,
609 IoAdapter->RBuffer.length) ;
610 e->RBuffer = (DBUFFER *)&IoAdapter->RBuffer ;
612 void mem_out (ADAPTER *a, void *addr, byte data)
614 volatile byte* Base = (volatile byte*)DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
615 *(Base + (unsigned long)addr) = data ;
616 DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
618 void mem_outw (ADAPTER *a, void *addr, word data)
620 volatile byte* Base = (volatile byte*)DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
621 WRITE_WORD((Base + (unsigned long)addr), data);
622 DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
624 void mem_out_dw (ADAPTER *a, void *addr, const dword* data, int dwords)
626 volatile byte* Base = (volatile byte*)DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
628 WRITE_DWORD((Base + (unsigned long)addr), *data);
632 DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
634 void mem_out_buffer (ADAPTER *a, void *addr, void *buffer, word length)
636 volatile byte* Base = (volatile byte*)DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
637 memcpy ((void *)(Base + (unsigned long)addr), buffer, length) ;
638 DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
640 void mem_inc (ADAPTER *a, void *addr)
642 volatile byte* Base = (volatile byte*)DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
643 byte x = *(Base + (unsigned long)addr);
644 *(Base + (unsigned long)addr) = x + 1 ;
645 DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
647 /*------------------------------------------------------------------*/
648 /* ram access functions for io-mapped cards */
649 /*------------------------------------------------------------------*/
650 byte io_in(ADAPTER * a, void * adr)
653 byte *Port = (byte*)DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
654 outppw(Port + 4, (word)(unsigned long)adr);
656 DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
659 word io_inw(ADAPTER * a, void * adr)
662 byte *Port = (byte*)DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
663 outppw(Port + 4, (word)(unsigned long)adr);
665 DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
668 void io_in_buffer(ADAPTER * a, void * adr, void * buffer, word len)
670 byte *Port = (byte*)DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
671 byte* P = (byte*)buffer;
673 outppw(Port+4, (word)(unsigned long)adr);
676 adr = ((byte *) adr) + 1;
679 DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
683 outppw(Port+4, (word)(unsigned long)adr);
684 inppw_buffer (Port, P, len+1);
685 DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
687 void io_look_ahead(ADAPTER * a, PBUFFER * RBuffer, ENTITY * e)
689 byte *Port = (byte*)DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
690 outppw(Port+4, (word)(unsigned long)RBuffer);
691 ((PISDN_ADAPTER)a->io)->RBuffer.length = inppw(Port);
692 inppw_buffer (Port, ((PISDN_ADAPTER)a->io)->RBuffer.P, ((PISDN_ADAPTER)a->io)->RBuffer.length + 1);
693 e->RBuffer = (DBUFFER *) &(((PISDN_ADAPTER)a->io)->RBuffer);
694 DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
696 void io_out(ADAPTER * a, void * adr, byte data)
698 byte *Port = (byte*)DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
699 outppw(Port+4, (word)(unsigned long)adr);
701 DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
703 void io_outw(ADAPTER * a, void * adr, word data)
705 byte *Port = (byte*)DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
706 outppw(Port+4, (word)(unsigned long)adr);
708 DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
710 void io_out_buffer(ADAPTER * a, void * adr, void * buffer, word len)
712 byte *Port = (byte*)DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
713 byte* P = (byte*)buffer;
715 outppw(Port+4, (word)(unsigned long)adr);
718 adr = ((byte *) adr) + 1;
721 DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
725 outppw(Port+4, (word)(unsigned long)adr);
726 outppw_buffer (Port, P, len+1);
727 DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
729 void io_inc(ADAPTER * a, void * adr)
732 byte *Port = (byte*)DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
733 outppw(Port+4, (word)(unsigned long)adr);
735 outppw(Port+4, (word)(unsigned long)adr);
737 DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
739 /*------------------------------------------------------------------*/
740 /* OS specific functions related to queuing of entities */
741 /*------------------------------------------------------------------*/
742 void free_entity(ADAPTER * a, byte e_no)
744 PISDN_ADAPTER IoAdapter;
745 diva_os_spin_lock_magic_t irql;
746 IoAdapter = (PISDN_ADAPTER) a->io;
747 diva_os_enter_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_free");
748 IoAdapter->e_tbl[e_no].e = NULL;
749 IoAdapter->e_count--;
750 diva_os_leave_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_free");
752 void assign_queue(ADAPTER * a, byte e_no, word ref)
754 PISDN_ADAPTER IoAdapter;
755 diva_os_spin_lock_magic_t irql;
756 IoAdapter = (PISDN_ADAPTER) a->io;
757 diva_os_enter_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_assign");
758 IoAdapter->e_tbl[e_no].assign_ref = ref;
759 IoAdapter->e_tbl[e_no].next = (byte)IoAdapter->assign;
760 IoAdapter->assign = e_no;
761 diva_os_leave_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_assign");
763 byte get_assign(ADAPTER * a, word ref)
765 PISDN_ADAPTER IoAdapter;
766 diva_os_spin_lock_magic_t irql;
768 IoAdapter = (PISDN_ADAPTER) a->io;
769 diva_os_enter_spin_lock (&IoAdapter->data_spin_lock,
772 for(e_no = (byte)IoAdapter->assign;
773 e_no && IoAdapter->e_tbl[e_no].assign_ref!=ref;
774 e_no = IoAdapter->e_tbl[e_no].next);
775 diva_os_leave_spin_lock (&IoAdapter->data_spin_lock,
780 void req_queue(ADAPTER * a, byte e_no)
782 PISDN_ADAPTER IoAdapter;
783 diva_os_spin_lock_magic_t irql;
784 IoAdapter = (PISDN_ADAPTER) a->io;
785 diva_os_enter_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_req_q");
786 IoAdapter->e_tbl[e_no].next = 0;
787 if(IoAdapter->head) {
788 IoAdapter->e_tbl[IoAdapter->tail].next = e_no;
789 IoAdapter->tail = e_no;
792 IoAdapter->head = e_no;
793 IoAdapter->tail = e_no;
795 diva_os_leave_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_req_q");
797 byte look_req(ADAPTER * a)
799 PISDN_ADAPTER IoAdapter;
800 IoAdapter = (PISDN_ADAPTER) a->io;
801 return ((byte)IoAdapter->head) ;
803 void next_req(ADAPTER * a)
805 PISDN_ADAPTER IoAdapter;
806 diva_os_spin_lock_magic_t irql;
807 IoAdapter = (PISDN_ADAPTER) a->io;
808 diva_os_enter_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_req_next");
809 IoAdapter->head = IoAdapter->e_tbl[IoAdapter->head].next;
810 if(!IoAdapter->head) IoAdapter->tail = 0;
811 diva_os_leave_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_req_next");
813 /*------------------------------------------------------------------*/
814 /* memory map functions */
815 /*------------------------------------------------------------------*/
816 ENTITY * entity_ptr(ADAPTER * a, byte e_no)
818 PISDN_ADAPTER IoAdapter;
819 IoAdapter = (PISDN_ADAPTER) a->io;
820 return (IoAdapter->e_tbl[e_no].e);
822 void * PTR_X(ADAPTER * a, ENTITY * e)
824 return ((void *) e->X);
826 void * PTR_R(ADAPTER * a, ENTITY * e)
828 return ((void *) e->R);
830 void * PTR_P(ADAPTER * a, ENTITY * e, void * P)
834 void CALLBACK(ADAPTER * a, ENTITY * e)
836 if ( e && e->callback )
839 /* --------------------------------------------------------------------------
840 routines for aligned reading and writing on RISC
841 -------------------------------------------------------------------------- */
842 void outp_words_from_buffer (word* adr, byte* P, word len)
846 while (i < (len & 0xfffe)) {
852 void inp_words_to_buffer (word* adr, byte* P, word len)
856 while (i < (len & 0xfffe)) {
859 P[i++] = (byte)(w>>8);