4 Copyright (c) Eicon Networks, 2002.
6 This source file is supplied for the use with
7 Eicon Networks range of DIVA Server Adapters.
9 Eicon File Revision : 2.1
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 2, or (at your option)
16 This program is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
18 implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
19 See the GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with this program; if not, write to the Free Software
23 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
32 #include "pkmaint.h" /* pc_main.h, packed in os-dependent fashion */
36 extern ADAPTER * adapter[MAX_ADAPTER];
37 extern PISDN_ADAPTER IoAdapters[MAX_ADAPTER];
38 void request (PISDN_ADAPTER, ENTITY *);
39 void pcm_req (PISDN_ADAPTER, ENTITY *);
40 /* --------------------------------------------------------------------------
42 -------------------------------------------------------------------------- */
44 static void Request##N(ENTITY *e) \
45 { if ( IoAdapters[N] ) (* IoAdapters[N]->DIRequest)(IoAdapters[N], e) ; }
62 IDI_CALL Requests[MAX_ADAPTER] =
63 { &Request0, &Request1, &Request2, &Request3,
64 &Request4, &Request5, &Request6, &Request7,
65 &Request8, &Request9, &Request10, &Request11,
66 &Request12, &Request13, &Request14, &Request15
68 /*****************************************************************************/
70 This array should indicate all new services, that this version of XDI
71 is able to provide to his clients
73 static byte extended_xdi_features[DIVA_XDI_EXTENDED_FEATURES_MAX_SZ+1] = {
74 (DIVA_XDI_EXTENDED_FEATURES_VALID |
75 DIVA_XDI_EXTENDED_FEATURE_SDRAM_BAR |
76 DIVA_XDI_EXTENDED_FEATURE_CAPI_PRMS |
77 #if defined(DIVA_IDI_RX_DMA)
78 DIVA_XDI_EXTENDED_FEATURE_CMA |
79 DIVA_XDI_EXTENDED_FEATURE_RX_DMA |
80 DIVA_XDI_EXTENDED_FEATURE_MANAGEMENT_DMA |
82 DIVA_XDI_EXTENDED_FEATURE_NO_CANCEL_RC),
85 /*****************************************************************************/
87 dump_xlog_buffer (PISDN_ADAPTER IoAdapter, Xdesc *xlogDesc)
90 word *Xlog = xlogDesc->buf ;
91 word logCnt = xlogDesc->cnt ;
92 word logOut = xlogDesc->out / sizeof(*Xlog) ;
93 DBG_FTL(("%s: ************* XLOG recovery (%d) *************",
94 &IoAdapter->Name[0], (int)logCnt))
95 DBG_FTL(("Microcode: %s", &IoAdapter->ProtocolIdString[0]))
96 for ( ; logCnt > 0 ; --logCnt )
98 if ( !GET_WORD(&Xlog[logOut]) )
104 if ( GET_WORD(&Xlog[logOut]) <= (logOut * sizeof(*Xlog)) )
108 DBG_FTL(("Possibly corrupted XLOG: %d entries left",
113 logLen = (dword)(GET_WORD(&Xlog[logOut]) - (logOut * sizeof(*Xlog))) ;
114 DBG_FTL_MXLOG(( (char *)&Xlog[logOut + 1], (dword)(logLen - 2) ))
115 logOut = (GET_WORD(&Xlog[logOut]) + 1) / sizeof(*Xlog) ;
117 DBG_FTL(("%s: ***************** end of XLOG *****************",
118 &IoAdapter->Name[0]))
120 /*****************************************************************************/
121 char *(ExceptionCauseTable[]) =
127 "Address error load",
128 "Address error store",
129 "Instruction load bus error",
130 "Data load/store bus error",
133 "Reverd instruction",
134 "Coprocessor unusable",
138 "Floating Point Exception",
157 dump_trap_frame (PISDN_ADAPTER IoAdapter, byte __iomem *exceptionFrame)
159 MP_XCPTC __iomem *xcept = (MP_XCPTC __iomem *)exceptionFrame ;
161 regs = &xcept->regs[0] ;
162 DBG_FTL(("%s: ***************** CPU TRAPPED *****************",
163 &IoAdapter->Name[0]))
164 DBG_FTL(("Microcode: %s", &IoAdapter->ProtocolIdString[0]))
165 DBG_FTL(("Cause: %s",
166 ExceptionCauseTable[(READ_DWORD(&xcept->cr) & 0x0000007c) >> 2]))
167 DBG_FTL(("sr 0x%08x cr 0x%08x epc 0x%08x vaddr 0x%08x",
168 READ_DWORD(&xcept->sr), READ_DWORD(&xcept->cr),
169 READ_DWORD(&xcept->epc), READ_DWORD(&xcept->vaddr)))
170 DBG_FTL(("zero 0x%08x at 0x%08x v0 0x%08x v1 0x%08x",
171 READ_DWORD(®s[ 0]), READ_DWORD(®s[ 1]),
172 READ_DWORD(®s[ 2]), READ_DWORD(®s[ 3])))
173 DBG_FTL(("a0 0x%08x a1 0x%08x a2 0x%08x a3 0x%08x",
174 READ_DWORD(®s[ 4]), READ_DWORD(®s[ 5]),
175 READ_DWORD(®s[ 6]), READ_DWORD(®s[ 7])))
176 DBG_FTL(("t0 0x%08x t1 0x%08x t2 0x%08x t3 0x%08x",
177 READ_DWORD(®s[ 8]), READ_DWORD(®s[ 9]),
178 READ_DWORD(®s[10]), READ_DWORD(®s[11])))
179 DBG_FTL(("t4 0x%08x t5 0x%08x t6 0x%08x t7 0x%08x",
180 READ_DWORD(®s[12]), READ_DWORD(®s[13]),
181 READ_DWORD(®s[14]), READ_DWORD(®s[15])))
182 DBG_FTL(("s0 0x%08x s1 0x%08x s2 0x%08x s3 0x%08x",
183 READ_DWORD(®s[16]), READ_DWORD(®s[17]),
184 READ_DWORD(®s[18]), READ_DWORD(®s[19])))
185 DBG_FTL(("s4 0x%08x s5 0x%08x s6 0x%08x s7 0x%08x",
186 READ_DWORD(®s[20]), READ_DWORD(®s[21]),
187 READ_DWORD(®s[22]), READ_DWORD(®s[23])))
188 DBG_FTL(("t8 0x%08x t9 0x%08x k0 0x%08x k1 0x%08x",
189 READ_DWORD(®s[24]), READ_DWORD(®s[25]),
190 READ_DWORD(®s[26]), READ_DWORD(®s[27])))
191 DBG_FTL(("gp 0x%08x sp 0x%08x s8 0x%08x ra 0x%08x",
192 READ_DWORD(®s[28]), READ_DWORD(®s[29]),
193 READ_DWORD(®s[30]), READ_DWORD(®s[31])))
194 DBG_FTL(("md 0x%08x|%08x resvd 0x%08x class 0x%08x",
195 READ_DWORD(&xcept->mdhi), READ_DWORD(&xcept->mdlo),
196 READ_DWORD(&xcept->reseverd), READ_DWORD(&xcept->xclass)))
198 /* --------------------------------------------------------------------------
199 Real XDI Request function
200 -------------------------------------------------------------------------- */
201 void request(PISDN_ADAPTER IoAdapter, ENTITY * e)
204 diva_os_spin_lock_magic_t irql;
206 * if the Req field in the entity structure is 0,
207 * we treat this request as a special function call
211 IDI_SYNC_REQ *syncReq = (IDI_SYNC_REQ *)e ;
214 #if defined(DIVA_IDI_RX_DMA)
215 case IDI_SYNC_REQ_DMA_DESCRIPTOR_OPERATION: {
216 diva_xdi_dma_descriptor_operation_t* pI = \
217 &syncReq->xdi_dma_descriptor_operation.info;
218 if (!IoAdapter->dma_map) {
220 pI->descriptor_number = -1;
223 diva_os_enter_spin_lock (&IoAdapter->data_spin_lock, &irql, "dma_op");
224 if (pI->operation == IDI_SYNC_REQ_DMA_DESCRIPTOR_ALLOC) {
225 pI->descriptor_number = diva_alloc_dma_map_entry (\
226 (struct _diva_dma_map_entry*)IoAdapter->dma_map);
227 if (pI->descriptor_number >= 0) {
231 DBG_TRC(("A(%d) dma_alloc(%d)",
232 IoAdapter->ANum, pI->descriptor_number))
234 diva_get_dma_map_entry (\
235 (struct _diva_dma_map_entry*)IoAdapter->dma_map,
236 pI->descriptor_number,
237 &local_addr, &dma_magic);
238 pI->descriptor_address = local_addr;
239 pI->descriptor_magic = dma_magic;
244 } else if ((pI->operation == IDI_SYNC_REQ_DMA_DESCRIPTOR_FREE) &&
245 (pI->descriptor_number >= 0)) {
247 DBG_TRC(("A(%d) dma_free(%d)", IoAdapter->ANum, pI->descriptor_number))
249 diva_free_dma_map_entry((struct _diva_dma_map_entry*)IoAdapter->dma_map,
250 pI->descriptor_number);
251 pI->descriptor_number = -1;
254 pI->descriptor_number = -1;
257 diva_os_leave_spin_lock (&IoAdapter->data_spin_lock, &irql, "dma_op");
260 case IDI_SYNC_REQ_XDI_GET_LOGICAL_ADAPTER_NUMBER: {
261 diva_xdi_get_logical_adapter_number_s_t *pI = \
262 &syncReq->xdi_logical_adapter_number.info;
263 pI->logical_adapter_number = IoAdapter->ANum;
264 pI->controller = IoAdapter->ControllerNumber;
265 pI->total_controllers = IoAdapter->Properties.Adapters;
267 case IDI_SYNC_REQ_XDI_GET_CAPI_PARAMS: {
268 diva_xdi_get_capi_parameters_t prms, *pI = &syncReq->xdi_capi_prms.info;
269 memset (&prms, 0x00, sizeof(prms));
270 prms.structure_length = MIN(sizeof(prms), pI->structure_length);
271 memset (pI, 0x00, pI->structure_length);
272 prms.flag_dynamic_l1_down = (IoAdapter->capi_cfg.cfg_1 & \
273 DIVA_XDI_CAPI_CFG_1_DYNAMIC_L1_ON) ? 1 : 0;
274 prms.group_optimization_enabled = (IoAdapter->capi_cfg.cfg_1 & \
275 DIVA_XDI_CAPI_CFG_1_GROUP_POPTIMIZATION_ON) ? 1 : 0;
276 memcpy (pI, &prms, prms.structure_length);
278 case IDI_SYNC_REQ_XDI_GET_ADAPTER_SDRAM_BAR:
279 syncReq->xdi_sdram_bar.info.bar = IoAdapter->sdram_bar;
281 case IDI_SYNC_REQ_XDI_GET_EXTENDED_FEATURES: {
283 diva_xdi_get_extended_xdi_features_t* pI =\
284 &syncReq->xdi_extended_features.info;
285 pI->buffer_length_in_bytes &= ~0x80000000;
286 if (pI->buffer_length_in_bytes && pI->features) {
287 memset (pI->features, 0x00, pI->buffer_length_in_bytes);
289 for (i = 0; ((pI->features) && (i < pI->buffer_length_in_bytes) &&
290 (i < DIVA_XDI_EXTENDED_FEATURES_MAX_SZ)); i++) {
291 pI->features[i] = extended_xdi_features[i];
293 if ((pI->buffer_length_in_bytes < DIVA_XDI_EXTENDED_FEATURES_MAX_SZ) ||
295 pI->buffer_length_in_bytes =\
296 (0x80000000 | DIVA_XDI_EXTENDED_FEATURES_MAX_SZ);
299 case IDI_SYNC_REQ_XDI_GET_STREAM:
301 diva_xdi_provide_istream_info (&IoAdapter->a,
302 &syncReq->xdi_stream_info.info);
304 syncReq->xdi_stream_info.info.provided_service = 0;
307 case IDI_SYNC_REQ_GET_NAME:
310 strcpy (&syncReq->GetName.name[0], IoAdapter->Name) ;
311 DBG_TRC(("xdi: Adapter %d / Name '%s'",
312 IoAdapter->ANum, IoAdapter->Name))
315 syncReq->GetName.name[0] = '\0' ;
317 case IDI_SYNC_REQ_GET_SERIAL:
320 syncReq->GetSerial.serial = IoAdapter->serialNo ;
321 DBG_TRC(("xdi: Adapter %d / SerialNo %ld",
322 IoAdapter->ANum, IoAdapter->serialNo))
325 syncReq->GetSerial.serial = 0 ;
327 case IDI_SYNC_REQ_GET_CARDTYPE:
330 syncReq->GetCardType.cardtype = IoAdapter->cardType ;
331 DBG_TRC(("xdi: Adapter %d / CardType %ld",
332 IoAdapter->ANum, IoAdapter->cardType))
335 syncReq->GetCardType.cardtype = 0 ;
337 case IDI_SYNC_REQ_GET_XLOG:
340 pcm_req (IoAdapter, e) ;
345 case IDI_SYNC_REQ_GET_DBG_XLOG:
348 pcm_req (IoAdapter, e) ;
353 case IDI_SYNC_REQ_GET_FEATURES:
356 syncReq->GetFeatures.features =
357 (unsigned short)IoAdapter->features ;
360 syncReq->GetFeatures.features = 0 ;
362 case IDI_SYNC_REQ_PORTDRV_HOOK:
365 DBG_TRC(("Xdi:IDI_SYNC_REQ_PORTDRV_HOOK - ignored"))
373 DBG_FTL(("xdi: unknown Req 0 / Rc %d !", e->Rc))
378 DBG_TRC(("xdi: Id 0x%x / Req 0x%x / Rc 0x%x", e->Id, e->Req, e->Rc))
381 DBG_FTL(("xdi: uninitialized Adapter used - ignore request"))
384 diva_os_enter_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_req");
388 if ( !(e->Id &0x1f) )
390 if ( IoAdapter->e_count >= IoAdapter->e_max )
392 DBG_FTL(("xdi: all Ids in use (max=%d) --> Req ignored",
394 diva_os_leave_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_req");
400 for ( i = 1 ; IoAdapter->e_tbl[i].e ; ++i ) ;
401 IoAdapter->e_tbl[i].e = e ;
402 IoAdapter->e_count++ ;
412 * if the entity is still busy, ignore the request call
414 if ( e->More & XBUSY )
416 DBG_FTL(("xdi: Id 0x%x busy --> Req 0x%x ignored", e->Id, e->Req))
417 if ( !IoAdapter->trapped && IoAdapter->trapFnc )
419 IoAdapter->trapFnc (IoAdapter) ;
421 Firs trap, also notify user if supported
423 if (IoAdapter->trapped && IoAdapter->os_trap_nfy_Fnc) {
424 (*(IoAdapter->os_trap_nfy_Fnc))(IoAdapter, IoAdapter->ANum);
427 diva_os_leave_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_req");
431 * initialize transmit status variables
438 * queue this entity in the adapter request queue
440 IoAdapter->e_tbl[i].next = 0 ;
441 if ( IoAdapter->head )
443 IoAdapter->e_tbl[IoAdapter->tail].next = i ;
444 IoAdapter->tail = i ;
448 IoAdapter->head = i ;
449 IoAdapter->tail = i ;
452 * queue the DPC to process the request
454 diva_os_schedule_soft_isr (&IoAdapter->req_soft_isr);
455 diva_os_leave_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_req");
457 /* ---------------------------------------------------------------------
459 --------------------------------------------------------------------- */
460 void DIDpcRoutine (struct _diva_os_soft_isr* psoft_isr, void* Context) {
461 PISDN_ADAPTER IoAdapter = (PISDN_ADAPTER)Context ;
462 ADAPTER* a = &IoAdapter->a ;
463 diva_os_atomic_t* pin_dpc = &IoAdapter->in_dpc;
464 if (diva_os_atomic_increment (pin_dpc) == 1) {
466 if ( IoAdapter->tst_irq (a) )
468 if ( !IoAdapter->Unavailable )
470 IoAdapter->clr_irq (a) ;
473 } while (diva_os_atomic_decrement (pin_dpc) > 0);
474 /* ----------------------------------------------------------------
475 Look for XLOG request (cards with indirect addressing)
476 ---------------------------------------------------------------- */
477 if (IoAdapter->pcm_pending) {
478 struct pc_maint *pcm;
479 diva_os_spin_lock_magic_t OldIrql ;
480 diva_os_enter_spin_lock (&IoAdapter->data_spin_lock,
483 pcm = (struct pc_maint *)IoAdapter->pcm_data;
484 switch (IoAdapter->pcm_pending) {
485 case 1: /* ask card for XLOG */
486 a->ram_out (a, &IoAdapter->pcm->rc, 0) ;
487 a->ram_out (a, &IoAdapter->pcm->req, pcm->req) ;
488 IoAdapter->pcm_pending = 2;
490 case 2: /* Try to get XLOG from the card */
491 if ((int)(a->ram_in (a, &IoAdapter->pcm->rc))) {
492 a->ram_in_buffer (a, IoAdapter->pcm, pcm, sizeof(*pcm)) ;
493 IoAdapter->pcm_pending = 3;
496 case 3: /* let XDI recovery XLOG */
499 diva_os_leave_spin_lock (&IoAdapter->data_spin_lock,
503 /* ---------------------------------------------------------------- */
506 /* --------------------------------------------------------------------------
508 -------------------------------------------------------------------------- */
510 pcm_req (PISDN_ADAPTER IoAdapter, ENTITY *e)
512 diva_os_spin_lock_magic_t OldIrql ;
514 ADAPTER *a = &IoAdapter->a ;
515 struct pc_maint *pcm = (struct pc_maint *)&e->Ind ;
517 * special handling of I/O based card interface
518 * the memory access isn't an atomic operation !
520 if ( IoAdapter->Properties.Card == CARD_MAE )
522 diva_os_enter_spin_lock (&IoAdapter->data_spin_lock,
525 IoAdapter->pcm_data = (void *)pcm;
526 IoAdapter->pcm_pending = 1;
527 diva_os_schedule_soft_isr (&IoAdapter->req_soft_isr);
528 diva_os_leave_spin_lock (&IoAdapter->data_spin_lock,
531 for ( rc = 0, i = (IoAdapter->trapped ? 3000 : 250) ; !rc && (i > 0) ; --i )
534 if (IoAdapter->pcm_pending == 3) {
535 diva_os_enter_spin_lock (&IoAdapter->data_spin_lock,
538 IoAdapter->pcm_pending = 0;
539 IoAdapter->pcm_data = NULL ;
540 diva_os_leave_spin_lock (&IoAdapter->data_spin_lock,
545 diva_os_enter_spin_lock (&IoAdapter->data_spin_lock,
548 diva_os_schedule_soft_isr (&IoAdapter->req_soft_isr);
549 diva_os_leave_spin_lock (&IoAdapter->data_spin_lock,
553 diva_os_enter_spin_lock (&IoAdapter->data_spin_lock,
556 IoAdapter->pcm_pending = 0;
557 IoAdapter->pcm_data = NULL ;
558 diva_os_leave_spin_lock (&IoAdapter->data_spin_lock,
564 * memory based shared ram is accessible from different
565 * processors without disturbing concurrent processes.
567 a->ram_out (a, &IoAdapter->pcm->rc, 0) ;
568 a->ram_out (a, &IoAdapter->pcm->req, pcm->req) ;
569 for ( i = (IoAdapter->trapped ? 3000 : 250) ; --i > 0 ; )
572 rc = (int)(a->ram_in (a, &IoAdapter->pcm->rc)) ;
575 a->ram_in_buffer (a, IoAdapter->pcm, pcm, sizeof(*pcm)) ;
580 if ( IoAdapter->trapFnc )
582 int trapped = IoAdapter->trapped;
583 IoAdapter->trapFnc (IoAdapter) ;
585 Firs trap, also notify user if supported
587 if (!trapped && IoAdapter->trapped && IoAdapter->os_trap_nfy_Fnc) {
588 (*(IoAdapter->os_trap_nfy_Fnc))(IoAdapter, IoAdapter->ANum);
592 /*------------------------------------------------------------------*/
593 /* ram access functions for memory mapped cards */
594 /*------------------------------------------------------------------*/
595 byte mem_in (ADAPTER *a, void *addr)
598 volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
599 val = READ_BYTE(Base + (unsigned long)addr);
600 DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
603 word mem_inw (ADAPTER *a, void *addr)
606 volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
607 val = READ_WORD((Base + (unsigned long)addr));
608 DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
611 void mem_in_dw (ADAPTER *a, void *addr, dword* data, int dwords)
613 volatile byte __iomem * Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
615 *data++ = READ_DWORD((Base + (unsigned long)addr));
618 DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
620 void mem_in_buffer (ADAPTER *a, void *addr, void *buffer, word length)
622 volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
623 memcpy_fromio(buffer, (Base + (unsigned long)addr), length);
624 DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
626 void mem_look_ahead (ADAPTER *a, PBUFFER *RBuffer, ENTITY *e)
628 PISDN_ADAPTER IoAdapter = (PISDN_ADAPTER)a->io ;
629 IoAdapter->RBuffer.length = mem_inw (a, &RBuffer->length) ;
630 mem_in_buffer (a, RBuffer->P, IoAdapter->RBuffer.P,
631 IoAdapter->RBuffer.length) ;
632 e->RBuffer = (DBUFFER *)&IoAdapter->RBuffer ;
634 void mem_out (ADAPTER *a, void *addr, byte data)
636 volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
637 WRITE_BYTE(Base + (unsigned long)addr, data);
638 DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
640 void mem_outw (ADAPTER *a, void *addr, word data)
642 volatile byte __iomem * Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
643 WRITE_WORD((Base + (unsigned long)addr), data);
644 DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
646 void mem_out_dw (ADAPTER *a, void *addr, const dword* data, int dwords)
648 volatile byte __iomem * Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
650 WRITE_DWORD((Base + (unsigned long)addr), *data);
654 DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
656 void mem_out_buffer (ADAPTER *a, void *addr, void *buffer, word length)
658 volatile byte __iomem * Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
659 memcpy_toio((Base + (unsigned long)addr), buffer, length) ;
660 DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
662 void mem_inc (ADAPTER *a, void *addr)
664 volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
665 byte x = READ_BYTE(Base + (unsigned long)addr);
666 WRITE_BYTE(Base + (unsigned long)addr, x + 1);
667 DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
669 /*------------------------------------------------------------------*/
670 /* ram access functions for io-mapped cards */
671 /*------------------------------------------------------------------*/
672 byte io_in(ADAPTER * a, void * adr)
675 byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
676 outppw(Port + 4, (word)(unsigned long)adr);
678 DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
681 word io_inw(ADAPTER * a, void * adr)
684 byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
685 outppw(Port + 4, (word)(unsigned long)adr);
687 DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
690 void io_in_buffer(ADAPTER * a, void * adr, void * buffer, word len)
692 byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
693 byte* P = (byte*)buffer;
695 outppw(Port+4, (word)(unsigned long)adr);
698 adr = ((byte *) adr) + 1;
701 DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
705 outppw(Port+4, (word)(unsigned long)adr);
706 inppw_buffer (Port, P, len+1);
707 DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
709 void io_look_ahead(ADAPTER * a, PBUFFER * RBuffer, ENTITY * e)
711 byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
712 outppw(Port+4, (word)(unsigned long)RBuffer);
713 ((PISDN_ADAPTER)a->io)->RBuffer.length = inppw(Port);
714 inppw_buffer (Port, ((PISDN_ADAPTER)a->io)->RBuffer.P, ((PISDN_ADAPTER)a->io)->RBuffer.length + 1);
715 e->RBuffer = (DBUFFER *) &(((PISDN_ADAPTER)a->io)->RBuffer);
716 DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
718 void io_out(ADAPTER * a, void * adr, byte data)
720 byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
721 outppw(Port+4, (word)(unsigned long)adr);
723 DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
725 void io_outw(ADAPTER * a, void * adr, word data)
727 byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
728 outppw(Port+4, (word)(unsigned long)adr);
730 DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
732 void io_out_buffer(ADAPTER * a, void * adr, void * buffer, word len)
734 byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
735 byte* P = (byte*)buffer;
737 outppw(Port+4, (word)(unsigned long)adr);
740 adr = ((byte *) adr) + 1;
743 DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
747 outppw(Port+4, (word)(unsigned long)adr);
748 outppw_buffer (Port, P, len+1);
749 DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
751 void io_inc(ADAPTER * a, void * adr)
754 byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
755 outppw(Port+4, (word)(unsigned long)adr);
757 outppw(Port+4, (word)(unsigned long)adr);
759 DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
761 /*------------------------------------------------------------------*/
762 /* OS specific functions related to queuing of entities */
763 /*------------------------------------------------------------------*/
764 void free_entity(ADAPTER * a, byte e_no)
766 PISDN_ADAPTER IoAdapter;
767 diva_os_spin_lock_magic_t irql;
768 IoAdapter = (PISDN_ADAPTER) a->io;
769 diva_os_enter_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_free");
770 IoAdapter->e_tbl[e_no].e = NULL;
771 IoAdapter->e_count--;
772 diva_os_leave_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_free");
774 void assign_queue(ADAPTER * a, byte e_no, word ref)
776 PISDN_ADAPTER IoAdapter;
777 diva_os_spin_lock_magic_t irql;
778 IoAdapter = (PISDN_ADAPTER) a->io;
779 diva_os_enter_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_assign");
780 IoAdapter->e_tbl[e_no].assign_ref = ref;
781 IoAdapter->e_tbl[e_no].next = (byte)IoAdapter->assign;
782 IoAdapter->assign = e_no;
783 diva_os_leave_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_assign");
785 byte get_assign(ADAPTER * a, word ref)
787 PISDN_ADAPTER IoAdapter;
788 diva_os_spin_lock_magic_t irql;
790 IoAdapter = (PISDN_ADAPTER) a->io;
791 diva_os_enter_spin_lock (&IoAdapter->data_spin_lock,
794 for(e_no = (byte)IoAdapter->assign;
795 e_no && IoAdapter->e_tbl[e_no].assign_ref!=ref;
796 e_no = IoAdapter->e_tbl[e_no].next);
797 diva_os_leave_spin_lock (&IoAdapter->data_spin_lock,
802 void req_queue(ADAPTER * a, byte e_no)
804 PISDN_ADAPTER IoAdapter;
805 diva_os_spin_lock_magic_t irql;
806 IoAdapter = (PISDN_ADAPTER) a->io;
807 diva_os_enter_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_req_q");
808 IoAdapter->e_tbl[e_no].next = 0;
809 if(IoAdapter->head) {
810 IoAdapter->e_tbl[IoAdapter->tail].next = e_no;
811 IoAdapter->tail = e_no;
814 IoAdapter->head = e_no;
815 IoAdapter->tail = e_no;
817 diva_os_leave_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_req_q");
819 byte look_req(ADAPTER * a)
821 PISDN_ADAPTER IoAdapter;
822 IoAdapter = (PISDN_ADAPTER) a->io;
823 return ((byte)IoAdapter->head) ;
825 void next_req(ADAPTER * a)
827 PISDN_ADAPTER IoAdapter;
828 diva_os_spin_lock_magic_t irql;
829 IoAdapter = (PISDN_ADAPTER) a->io;
830 diva_os_enter_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_req_next");
831 IoAdapter->head = IoAdapter->e_tbl[IoAdapter->head].next;
832 if(!IoAdapter->head) IoAdapter->tail = 0;
833 diva_os_leave_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_req_next");
835 /*------------------------------------------------------------------*/
836 /* memory map functions */
837 /*------------------------------------------------------------------*/
838 ENTITY * entity_ptr(ADAPTER * a, byte e_no)
840 PISDN_ADAPTER IoAdapter;
841 IoAdapter = (PISDN_ADAPTER) a->io;
842 return (IoAdapter->e_tbl[e_no].e);
844 void * PTR_X(ADAPTER * a, ENTITY * e)
846 return ((void *) e->X);
848 void * PTR_R(ADAPTER * a, ENTITY * e)
850 return ((void *) e->R);
852 void * PTR_P(ADAPTER * a, ENTITY * e, void * P)
856 void CALLBACK(ADAPTER * a, ENTITY * e)
858 if ( e && e->callback )
861 /* --------------------------------------------------------------------------
862 routines for aligned reading and writing on RISC
863 -------------------------------------------------------------------------- */
864 void outp_words_from_buffer (word __iomem * adr, byte* P, dword len)
868 while (i < (len & 0xfffffffe)) {
874 void inp_words_to_buffer (word __iomem * adr, byte* P, dword len)
878 while (i < (len & 0xfffffffe)) {
881 P[i++] = (byte)(w>>8);