4 Copyright (c) Eicon Networks, 2002.
6 This source file is supplied for the use with
7 Eicon Networks range of DIVA Server Adapters.
9 Eicon File Revision : 2.1
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 2, or (at your option)
16 This program is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
18 implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
19 See the GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with this program; if not, write to the Free Software
23 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
32 #include "pkmaint.h" /* pc_main.h, packed in os-dependent fashion */
36 extern ADAPTER * adapter[MAX_ADAPTER];
37 extern PISDN_ADAPTER IoAdapters[MAX_ADAPTER];
38 void request (PISDN_ADAPTER, ENTITY *);
39 void pcm_req (PISDN_ADAPTER, ENTITY *);
40 /* --------------------------------------------------------------------------
42 -------------------------------------------------------------------------- */
44 static void Request##N(ENTITY *e) \
45 { if ( IoAdapters[N] ) (* IoAdapters[N]->DIRequest)(IoAdapters[N], e) ; }
62 IDI_CALL Requests[MAX_ADAPTER] =
63 { &Request0, &Request1, &Request2, &Request3,
64 &Request4, &Request5, &Request6, &Request7,
65 &Request8, &Request9, &Request10, &Request11,
66 &Request12, &Request13, &Request14, &Request15
68 /*****************************************************************************/
70 This array should indicate all new services, that this version of XDI
71 is able to provide to his clients
73 static byte extended_xdi_features[DIVA_XDI_EXTENDED_FEATURES_MAX_SZ+1] = {
74 (DIVA_XDI_EXTENDED_FEATURES_VALID |
75 DIVA_XDI_EXTENDED_FEATURE_SDRAM_BAR |
76 DIVA_XDI_EXTENDED_FEATURE_CAPI_PRMS |
77 #if defined(DIVA_IDI_RX_DMA)
78 DIVA_XDI_EXTENDED_FEATURE_CMA |
79 DIVA_XDI_EXTENDED_FEATURE_RX_DMA |
80 DIVA_XDI_EXTENDED_FEATURE_MANAGEMENT_DMA |
82 DIVA_XDI_EXTENDED_FEATURE_NO_CANCEL_RC),
85 /*****************************************************************************/
87 dump_xlog_buffer (PISDN_ADAPTER IoAdapter, Xdesc *xlogDesc)
90 word *Xlog = xlogDesc->buf ;
91 word logCnt = xlogDesc->cnt ;
92 word logOut = xlogDesc->out / sizeof(*Xlog) ;
93 DBG_FTL(("%s: ************* XLOG recovery (%d) *************",
94 &IoAdapter->Name[0], (int)logCnt))
95 DBG_FTL(("Microcode: %s", &IoAdapter->ProtocolIdString[0]))
96 for ( ; logCnt > 0 ; --logCnt )
98 if ( !READ_WORD(&Xlog[logOut]) )
104 if ( READ_WORD(&Xlog[logOut]) <= (logOut * sizeof(*Xlog)) )
108 DBG_FTL(("Possibly corrupted XLOG: %d entries left",
113 logLen = (dword)(READ_WORD(&Xlog[logOut]) - (logOut * sizeof(*Xlog))) ;
114 DBG_FTL_MXLOG(( (char *)&Xlog[logOut + 1], (dword)(logLen - 2) ))
115 logOut = (READ_WORD(&Xlog[logOut]) + 1) / sizeof(*Xlog) ;
117 DBG_FTL(("%s: ***************** end of XLOG *****************",
118 &IoAdapter->Name[0]))
120 /*****************************************************************************/
121 char *(ExceptionCauseTable[]) =
127 "Address error load",
128 "Address error store",
129 "Instruction load bus error",
130 "Data load/store bus error",
133 "Reverd instruction",
134 "Coprocessor unusable",
138 "Floating Point Exception",
157 dump_trap_frame (PISDN_ADAPTER IoAdapter, byte *exceptionFrame)
159 MP_XCPTC *xcept = (MP_XCPTC *)exceptionFrame ;
161 regs = &xcept->regs[0] ;
162 DBG_FTL(("%s: ***************** CPU TRAPPED *****************",
163 &IoAdapter->Name[0]))
164 DBG_FTL(("Microcode: %s", &IoAdapter->ProtocolIdString[0]))
165 DBG_FTL(("Cause: %s",
166 ExceptionCauseTable[(READ_DWORD(&xcept->cr) & 0x0000007c) >> 2]))
167 DBG_FTL(("sr 0x%08x cr 0x%08x epc 0x%08x vaddr 0x%08x",
168 READ_DWORD(&xcept->sr), READ_DWORD(&xcept->cr),
169 READ_DWORD(&xcept->epc), READ_DWORD(&xcept->vaddr)))
170 DBG_FTL(("zero 0x%08x at 0x%08x v0 0x%08x v1 0x%08x",
171 READ_DWORD(®s[ 0]), READ_DWORD(®s[ 1]),
172 READ_DWORD(®s[ 2]), READ_DWORD(®s[ 3])))
173 DBG_FTL(("a0 0x%08x a1 0x%08x a2 0x%08x a3 0x%08x",
174 READ_DWORD(®s[ 4]), READ_DWORD(®s[ 5]),
175 READ_DWORD(®s[ 6]), READ_DWORD(®s[ 7])))
176 DBG_FTL(("t0 0x%08x t1 0x%08x t2 0x%08x t3 0x%08x",
177 READ_DWORD(®s[ 8]), READ_DWORD(®s[ 9]),
178 READ_DWORD(®s[10]), READ_DWORD(®s[11])))
179 DBG_FTL(("t4 0x%08x t5 0x%08x t6 0x%08x t7 0x%08x",
180 READ_DWORD(®s[12]), READ_DWORD(®s[13]),
181 READ_DWORD(®s[14]), READ_DWORD(®s[15])))
182 DBG_FTL(("s0 0x%08x s1 0x%08x s2 0x%08x s3 0x%08x",
183 READ_DWORD(®s[16]), READ_DWORD(®s[17]),
184 READ_DWORD(®s[18]), READ_DWORD(®s[19])))
185 DBG_FTL(("s4 0x%08x s5 0x%08x s6 0x%08x s7 0x%08x",
186 READ_DWORD(®s[20]), READ_DWORD(®s[21]),
187 READ_DWORD(®s[22]), READ_DWORD(®s[23])))
188 DBG_FTL(("t8 0x%08x t9 0x%08x k0 0x%08x k1 0x%08x",
189 READ_DWORD(®s[24]), READ_DWORD(®s[25]),
190 READ_DWORD(®s[26]), READ_DWORD(®s[27])))
191 DBG_FTL(("gp 0x%08x sp 0x%08x s8 0x%08x ra 0x%08x",
192 READ_DWORD(®s[28]), READ_DWORD(®s[29]),
193 READ_DWORD(®s[30]), READ_DWORD(®s[31])))
194 DBG_FTL(("md 0x%08x|%08x resvd 0x%08x class 0x%08x",
195 READ_DWORD(&xcept->mdhi), READ_DWORD(&xcept->mdlo),
196 READ_DWORD(&xcept->reseverd), READ_DWORD(&xcept->xclass)))
198 /* --------------------------------------------------------------------------
199 Real XDI Request function
200 -------------------------------------------------------------------------- */
201 void request(PISDN_ADAPTER IoAdapter, ENTITY * e)
204 diva_os_spin_lock_magic_t irql;
206 * if the Req field in the entity structure is 0,
207 * we treat this request as a special function call
211 IDI_SYNC_REQ *syncReq = (IDI_SYNC_REQ *)e ;
214 #if defined(DIVA_IDI_RX_DMA)
215 case IDI_SYNC_REQ_DMA_DESCRIPTOR_OPERATION: {
216 diva_xdi_dma_descriptor_operation_t* pI = \
217 &syncReq->xdi_dma_descriptor_operation.info;
218 if (!IoAdapter->dma_map) {
220 pI->descriptor_number = -1;
223 diva_os_enter_spin_lock (&IoAdapter->data_spin_lock, &irql, "dma_op");
224 if (pI->operation == IDI_SYNC_REQ_DMA_DESCRIPTOR_ALLOC) {
225 pI->descriptor_number = diva_alloc_dma_map_entry (\
226 (struct _diva_dma_map_entry*)IoAdapter->dma_map);
227 if (pI->descriptor_number >= 0) {
231 DBG_TRC(("A(%d) dma_alloc(%d)",
232 IoAdapter->ANum, pI->descriptor_number))
234 diva_get_dma_map_entry (\
235 (struct _diva_dma_map_entry*)IoAdapter->dma_map,
236 pI->descriptor_number,
237 &local_addr, &dma_magic);
238 pI->descriptor_address = local_addr;
239 pI->descriptor_magic = dma_magic;
244 } else if ((pI->operation == IDI_SYNC_REQ_DMA_DESCRIPTOR_FREE) &&
245 (pI->descriptor_number >= 0)) {
247 DBG_TRC(("A(%d) dma_free(%d)", IoAdapter->ANum, pI->descriptor_number))
249 diva_free_dma_map_entry((struct _diva_dma_map_entry*)IoAdapter->dma_map,
250 pI->descriptor_number);
251 pI->descriptor_number = -1;
254 pI->descriptor_number = -1;
257 diva_os_leave_spin_lock (&IoAdapter->data_spin_lock, &irql, "dma_op");
260 case IDI_SYNC_REQ_XDI_GET_LOGICAL_ADAPTER_NUMBER: {
261 diva_xdi_get_logical_adapter_number_s_t *pI = \
262 &syncReq->xdi_logical_adapter_number.info;
263 pI->logical_adapter_number = IoAdapter->ANum;
264 pI->controller = IoAdapter->ControllerNumber;
265 pI->total_controllers = IoAdapter->Properties.Adapters;
267 case IDI_SYNC_REQ_XDI_GET_CAPI_PARAMS: {
268 diva_xdi_get_capi_parameters_t prms, *pI = &syncReq->xdi_capi_prms.info;
269 memset (&prms, 0x00, sizeof(prms));
270 prms.structure_length = MIN(sizeof(prms), pI->structure_length);
271 memset (pI, 0x00, pI->structure_length);
272 prms.flag_dynamic_l1_down = (IoAdapter->capi_cfg.cfg_1 & \
273 DIVA_XDI_CAPI_CFG_1_DYNAMIC_L1_ON) ? 1 : 0;
274 prms.group_optimization_enabled = (IoAdapter->capi_cfg.cfg_1 & \
275 DIVA_XDI_CAPI_CFG_1_GROUP_POPTIMIZATION_ON) ? 1 : 0;
276 memcpy (pI, &prms, prms.structure_length);
278 case IDI_SYNC_REQ_XDI_GET_ADAPTER_SDRAM_BAR:
279 syncReq->xdi_sdram_bar.info.bar = IoAdapter->sdram_bar;
281 case IDI_SYNC_REQ_XDI_GET_EXTENDED_FEATURES: {
283 diva_xdi_get_extended_xdi_features_t* pI =\
284 &syncReq->xdi_extended_features.info;
285 pI->buffer_length_in_bytes &= ~0x80000000;
286 if (pI->buffer_length_in_bytes && pI->features) {
287 memset (pI->features, 0x00, pI->buffer_length_in_bytes);
289 for (i = 0; ((pI->features) && (i < pI->buffer_length_in_bytes) &&
290 (i < DIVA_XDI_EXTENDED_FEATURES_MAX_SZ)); i++) {
291 pI->features[i] = extended_xdi_features[i];
293 if ((pI->buffer_length_in_bytes < DIVA_XDI_EXTENDED_FEATURES_MAX_SZ) ||
295 pI->buffer_length_in_bytes =\
296 (0x80000000 | DIVA_XDI_EXTENDED_FEATURES_MAX_SZ);
299 case IDI_SYNC_REQ_XDI_GET_STREAM:
301 diva_xdi_provide_istream_info (&IoAdapter->a,
302 &syncReq->xdi_stream_info.info);
304 syncReq->xdi_stream_info.info.provided_service = 0;
307 case IDI_SYNC_REQ_GET_NAME:
310 strcpy (&syncReq->GetName.name[0], IoAdapter->Name) ;
311 DBG_TRC(("xdi: Adapter %d / Name '%s'",
312 IoAdapter->ANum, IoAdapter->Name))
315 syncReq->GetName.name[0] = '\0' ;
317 case IDI_SYNC_REQ_GET_SERIAL:
320 syncReq->GetSerial.serial = IoAdapter->serialNo ;
321 DBG_TRC(("xdi: Adapter %d / SerialNo %ld",
322 IoAdapter->ANum, IoAdapter->serialNo))
325 syncReq->GetSerial.serial = 0 ;
327 case IDI_SYNC_REQ_GET_CARDTYPE:
330 syncReq->GetCardType.cardtype = IoAdapter->cardType ;
331 DBG_TRC(("xdi: Adapter %d / CardType %ld",
332 IoAdapter->ANum, IoAdapter->cardType))
335 syncReq->GetCardType.cardtype = 0 ;
337 case IDI_SYNC_REQ_GET_XLOG:
340 pcm_req (IoAdapter, e) ;
345 case IDI_SYNC_REQ_GET_DBG_XLOG:
348 pcm_req (IoAdapter, e) ;
353 case IDI_SYNC_REQ_GET_FEATURES:
356 syncReq->GetFeatures.features =
357 (unsigned short)IoAdapter->features ;
360 syncReq->GetFeatures.features = 0 ;
362 case IDI_SYNC_REQ_PORTDRV_HOOK:
365 DBG_TRC(("Xdi:IDI_SYNC_REQ_PORTDRV_HOOK - ignored"))
373 DBG_FTL(("xdi: unknown Req 0 / Rc %d !", e->Rc))
378 DBG_TRC(("xdi: Id 0x%x / Req 0x%x / Rc 0x%x", e->Id, e->Req, e->Rc))
381 DBG_FTL(("xdi: uninitialized Adapter used - ignore request"))
384 diva_os_enter_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_req");
388 if ( !(e->Id &0x1f) )
390 if ( IoAdapter->e_count >= IoAdapter->e_max )
392 DBG_FTL(("xdi: all Ids in use (max=%d) --> Req ignored",
394 diva_os_leave_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_req");
400 for ( i = 1 ; IoAdapter->e_tbl[i].e ; ++i ) ;
401 IoAdapter->e_tbl[i].e = e ;
402 IoAdapter->e_count++ ;
412 * if the entity is still busy, ignore the request call
414 if ( e->More & XBUSY )
416 DBG_FTL(("xdi: Id 0x%x busy --> Req 0x%x ignored", e->Id, e->Req))
417 if ( !IoAdapter->trapped && IoAdapter->trapFnc )
419 IoAdapter->trapFnc (IoAdapter) ;
421 Firs trap, also notify user if supported
423 if (IoAdapter->trapped && IoAdapter->os_trap_nfy_Fnc) {
424 (*(IoAdapter->os_trap_nfy_Fnc))(IoAdapter, IoAdapter->ANum);
427 diva_os_leave_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_req");
431 * initialize transmit status variables
438 * queue this entity in the adapter request queue
440 IoAdapter->e_tbl[i].next = 0 ;
441 if ( IoAdapter->head )
443 IoAdapter->e_tbl[IoAdapter->tail].next = i ;
444 IoAdapter->tail = i ;
448 IoAdapter->head = i ;
449 IoAdapter->tail = i ;
452 * queue the DPC to process the request
454 diva_os_schedule_soft_isr (&IoAdapter->req_soft_isr);
455 diva_os_leave_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_req");
457 /* ---------------------------------------------------------------------
459 --------------------------------------------------------------------- */
460 void DIDpcRoutine (struct _diva_os_soft_isr* psoft_isr, void* Context) {
461 PISDN_ADAPTER IoAdapter = (PISDN_ADAPTER)Context ;
462 ADAPTER* a = &IoAdapter->a ;
463 diva_os_atomic_t* pin_dpc = &IoAdapter->in_dpc;
464 if (diva_os_atomic_increment (pin_dpc) == 1) {
466 if ( IoAdapter->tst_irq (a) )
468 if ( !IoAdapter->Unavailable )
470 IoAdapter->clr_irq (a) ;
473 } while (diva_os_atomic_decrement (pin_dpc) > 0);
474 /* ----------------------------------------------------------------
475 Look for XLOG request (cards with indirect addressing)
476 ---------------------------------------------------------------- */
477 if (IoAdapter->pcm_pending) {
478 struct pc_maint *pcm;
479 diva_os_spin_lock_magic_t OldIrql ;
480 diva_os_enter_spin_lock (&IoAdapter->data_spin_lock,
483 pcm = (struct pc_maint *)IoAdapter->pcm_data;
484 switch (IoAdapter->pcm_pending) {
485 case 1: /* ask card for XLOG */
486 a->ram_out (a, &IoAdapter->pcm->rc, 0) ;
487 a->ram_out (a, &IoAdapter->pcm->req, pcm->req) ;
488 IoAdapter->pcm_pending = 2;
490 case 2: /* Try to get XLOG from the card */
491 if ((int)(a->ram_in (a, &IoAdapter->pcm->rc))) {
492 a->ram_in_buffer (a, IoAdapter->pcm, pcm, sizeof(*pcm)) ;
493 IoAdapter->pcm_pending = 3;
496 case 3: /* let XDI recovery XLOG */
499 diva_os_leave_spin_lock (&IoAdapter->data_spin_lock,
503 /* ---------------------------------------------------------------- */
506 /* --------------------------------------------------------------------------
508 -------------------------------------------------------------------------- */
510 pcm_req (PISDN_ADAPTER IoAdapter, ENTITY *e)
512 diva_os_spin_lock_magic_t OldIrql ;
514 ADAPTER *a = &IoAdapter->a ;
515 struct pc_maint *pcm = (struct pc_maint *)&e->Ind ;
517 * special handling of I/O based card interface
518 * the memory access isn't an atomic operation !
520 if ( IoAdapter->Properties.Card == CARD_MAE )
522 diva_os_enter_spin_lock (&IoAdapter->data_spin_lock,
525 IoAdapter->pcm_data = (void *)pcm;
526 IoAdapter->pcm_pending = 1;
527 diva_os_schedule_soft_isr (&IoAdapter->req_soft_isr);
528 diva_os_leave_spin_lock (&IoAdapter->data_spin_lock,
531 for ( rc = 0, i = (IoAdapter->trapped ? 3000 : 250) ; !rc && (i > 0) ; --i )
534 if (IoAdapter->pcm_pending == 3) {
535 diva_os_enter_spin_lock (&IoAdapter->data_spin_lock,
538 IoAdapter->pcm_pending = 0;
539 IoAdapter->pcm_data = NULL ;
540 diva_os_leave_spin_lock (&IoAdapter->data_spin_lock,
545 diva_os_enter_spin_lock (&IoAdapter->data_spin_lock,
548 diva_os_schedule_soft_isr (&IoAdapter->req_soft_isr);
549 diva_os_leave_spin_lock (&IoAdapter->data_spin_lock,
553 diva_os_enter_spin_lock (&IoAdapter->data_spin_lock,
556 IoAdapter->pcm_pending = 0;
557 IoAdapter->pcm_data = NULL ;
558 diva_os_leave_spin_lock (&IoAdapter->data_spin_lock,
564 * memory based shared ram is accessible from different
565 * processors without disturbing concurrent processes.
567 a->ram_out (a, &IoAdapter->pcm->rc, 0) ;
568 a->ram_out (a, &IoAdapter->pcm->req, pcm->req) ;
569 for ( i = (IoAdapter->trapped ? 3000 : 250) ; --i > 0 ; )
572 rc = (int)(a->ram_in (a, &IoAdapter->pcm->rc)) ;
575 a->ram_in_buffer (a, IoAdapter->pcm, pcm, sizeof(*pcm)) ;
580 if ( IoAdapter->trapFnc )
582 int trapped = IoAdapter->trapped;
583 IoAdapter->trapFnc (IoAdapter) ;
585 Firs trap, also notify user if supported
587 if (!trapped && IoAdapter->trapped && IoAdapter->os_trap_nfy_Fnc) {
588 (*(IoAdapter->os_trap_nfy_Fnc))(IoAdapter, IoAdapter->ANum);
592 /*------------------------------------------------------------------*/
593 /* ram access functions for memory mapped cards */
594 /*------------------------------------------------------------------*/
595 byte mem_in (ADAPTER *a, void *addr)
600 Base = (volatile byte *)DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
601 val = *(Base + (unsigned long)addr);
602 DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
605 word mem_inw (ADAPTER *a, void *addr)
610 Base = (volatile byte*)DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
611 val = READ_WORD((Base + (unsigned long)addr));
612 DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
615 void mem_in_dw (ADAPTER *a, void *addr, dword* data, int dwords)
617 volatile byte* Base = (volatile byte*)DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
619 *data++ = READ_DWORD((Base + (unsigned long)addr));
622 DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
624 void mem_in_buffer (ADAPTER *a, void *addr, void *buffer, word length)
626 volatile byte* Base = (volatile byte*)DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
627 memcpy (buffer, (void *)(Base + (unsigned long)addr), length);
628 DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
630 void mem_look_ahead (ADAPTER *a, PBUFFER *RBuffer, ENTITY *e)
632 PISDN_ADAPTER IoAdapter = (PISDN_ADAPTER)a->io ;
633 IoAdapter->RBuffer.length = mem_inw (a, &RBuffer->length) ;
634 mem_in_buffer (a, RBuffer->P, IoAdapter->RBuffer.P,
635 IoAdapter->RBuffer.length) ;
636 e->RBuffer = (DBUFFER *)&IoAdapter->RBuffer ;
638 void mem_out (ADAPTER *a, void *addr, byte data)
640 volatile byte* Base = (volatile byte*)DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
641 *(Base + (unsigned long)addr) = data ;
642 DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
644 void mem_outw (ADAPTER *a, void *addr, word data)
646 volatile byte* Base = (volatile byte*)DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
647 WRITE_WORD((Base + (unsigned long)addr), data);
648 DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
650 void mem_out_dw (ADAPTER *a, void *addr, const dword* data, int dwords)
652 volatile byte* Base = (volatile byte*)DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
654 WRITE_DWORD((Base + (unsigned long)addr), *data);
658 DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
660 void mem_out_buffer (ADAPTER *a, void *addr, void *buffer, word length)
662 volatile byte* Base = (volatile byte*)DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
663 memcpy ((void *)(Base + (unsigned long)addr), buffer, length) ;
664 DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
666 void mem_inc (ADAPTER *a, void *addr)
668 volatile byte* Base = (volatile byte*)DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
669 byte x = *(Base + (unsigned long)addr);
670 *(Base + (unsigned long)addr) = x + 1 ;
671 DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
673 /*------------------------------------------------------------------*/
674 /* ram access functions for io-mapped cards */
675 /*------------------------------------------------------------------*/
676 byte io_in(ADAPTER * a, void * adr)
679 byte *Port = (byte*)DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
680 outppw(Port + 4, (word)(unsigned long)adr);
682 DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
685 word io_inw(ADAPTER * a, void * adr)
688 byte *Port = (byte*)DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
689 outppw(Port + 4, (word)(unsigned long)adr);
691 DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
694 void io_in_buffer(ADAPTER * a, void * adr, void * buffer, word len)
696 byte *Port = (byte*)DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
697 byte* P = (byte*)buffer;
699 outppw(Port+4, (word)(unsigned long)adr);
702 adr = ((byte *) adr) + 1;
705 DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
709 outppw(Port+4, (word)(unsigned long)adr);
710 inppw_buffer (Port, P, len+1);
711 DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
713 void io_look_ahead(ADAPTER * a, PBUFFER * RBuffer, ENTITY * e)
715 byte *Port = (byte*)DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
716 outppw(Port+4, (word)(unsigned long)RBuffer);
717 ((PISDN_ADAPTER)a->io)->RBuffer.length = inppw(Port);
718 inppw_buffer (Port, ((PISDN_ADAPTER)a->io)->RBuffer.P, ((PISDN_ADAPTER)a->io)->RBuffer.length + 1);
719 e->RBuffer = (DBUFFER *) &(((PISDN_ADAPTER)a->io)->RBuffer);
720 DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
722 void io_out(ADAPTER * a, void * adr, byte data)
724 byte *Port = (byte*)DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
725 outppw(Port+4, (word)(unsigned long)adr);
727 DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
729 void io_outw(ADAPTER * a, void * adr, word data)
731 byte *Port = (byte*)DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
732 outppw(Port+4, (word)(unsigned long)adr);
734 DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
736 void io_out_buffer(ADAPTER * a, void * adr, void * buffer, word len)
738 byte *Port = (byte*)DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
739 byte* P = (byte*)buffer;
741 outppw(Port+4, (word)(unsigned long)adr);
744 adr = ((byte *) adr) + 1;
747 DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
751 outppw(Port+4, (word)(unsigned long)adr);
752 outppw_buffer (Port, P, len+1);
753 DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
755 void io_inc(ADAPTER * a, void * adr)
758 byte *Port = (byte*)DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
759 outppw(Port+4, (word)(unsigned long)adr);
761 outppw(Port+4, (word)(unsigned long)adr);
763 DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
765 /*------------------------------------------------------------------*/
766 /* OS specific functions related to queuing of entities */
767 /*------------------------------------------------------------------*/
768 void free_entity(ADAPTER * a, byte e_no)
770 PISDN_ADAPTER IoAdapter;
771 diva_os_spin_lock_magic_t irql;
772 IoAdapter = (PISDN_ADAPTER) a->io;
773 diva_os_enter_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_free");
774 IoAdapter->e_tbl[e_no].e = NULL;
775 IoAdapter->e_count--;
776 diva_os_leave_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_free");
778 void assign_queue(ADAPTER * a, byte e_no, word ref)
780 PISDN_ADAPTER IoAdapter;
781 diva_os_spin_lock_magic_t irql;
782 IoAdapter = (PISDN_ADAPTER) a->io;
783 diva_os_enter_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_assign");
784 IoAdapter->e_tbl[e_no].assign_ref = ref;
785 IoAdapter->e_tbl[e_no].next = (byte)IoAdapter->assign;
786 IoAdapter->assign = e_no;
787 diva_os_leave_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_assign");
789 byte get_assign(ADAPTER * a, word ref)
791 PISDN_ADAPTER IoAdapter;
792 diva_os_spin_lock_magic_t irql;
794 IoAdapter = (PISDN_ADAPTER) a->io;
795 diva_os_enter_spin_lock (&IoAdapter->data_spin_lock,
798 for(e_no = (byte)IoAdapter->assign;
799 e_no && IoAdapter->e_tbl[e_no].assign_ref!=ref;
800 e_no = IoAdapter->e_tbl[e_no].next);
801 diva_os_leave_spin_lock (&IoAdapter->data_spin_lock,
806 void req_queue(ADAPTER * a, byte e_no)
808 PISDN_ADAPTER IoAdapter;
809 diva_os_spin_lock_magic_t irql;
810 IoAdapter = (PISDN_ADAPTER) a->io;
811 diva_os_enter_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_req_q");
812 IoAdapter->e_tbl[e_no].next = 0;
813 if(IoAdapter->head) {
814 IoAdapter->e_tbl[IoAdapter->tail].next = e_no;
815 IoAdapter->tail = e_no;
818 IoAdapter->head = e_no;
819 IoAdapter->tail = e_no;
821 diva_os_leave_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_req_q");
823 byte look_req(ADAPTER * a)
825 PISDN_ADAPTER IoAdapter;
826 IoAdapter = (PISDN_ADAPTER) a->io;
827 return ((byte)IoAdapter->head) ;
829 void next_req(ADAPTER * a)
831 PISDN_ADAPTER IoAdapter;
832 diva_os_spin_lock_magic_t irql;
833 IoAdapter = (PISDN_ADAPTER) a->io;
834 diva_os_enter_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_req_next");
835 IoAdapter->head = IoAdapter->e_tbl[IoAdapter->head].next;
836 if(!IoAdapter->head) IoAdapter->tail = 0;
837 diva_os_leave_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_req_next");
839 /*------------------------------------------------------------------*/
840 /* memory map functions */
841 /*------------------------------------------------------------------*/
842 ENTITY * entity_ptr(ADAPTER * a, byte e_no)
844 PISDN_ADAPTER IoAdapter;
845 IoAdapter = (PISDN_ADAPTER) a->io;
846 return (IoAdapter->e_tbl[e_no].e);
848 void * PTR_X(ADAPTER * a, ENTITY * e)
850 return ((void *) e->X);
852 void * PTR_R(ADAPTER * a, ENTITY * e)
854 return ((void *) e->R);
856 void * PTR_P(ADAPTER * a, ENTITY * e, void * P)
860 void CALLBACK(ADAPTER * a, ENTITY * e)
862 if ( e && e->callback )
865 /* --------------------------------------------------------------------------
866 routines for aligned reading and writing on RISC
867 -------------------------------------------------------------------------- */
868 void outp_words_from_buffer (word* adr, byte* P, dword len)
872 while (i < (len & 0xfffffffe)) {
878 void inp_words_to_buffer (word* adr, byte* P, dword len)
882 while (i < (len & 0xfffffffe)) {
885 P[i++] = (byte)(w>>8);