2 * ohci1394.c - driver for OHCI 1394 boards
3 * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
4 * Gord Peters <GordPeters@smarttech.com>
5 * 2001 Ben Collins <bcollins@debian.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software Foundation,
19 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 * Things known to be working:
24 * . Async Request Transmit
25 * . Async Response Receive
26 * . Async Request Receive
27 * . Async Response Transmit
29 * . DMA mmap for iso receive
30 * . Config ROM generation
32 * Things implemented, but still in test phase:
34 * . Async Stream Packets Transmit (Receive done via Iso interface)
36 * Things not implemented:
37 * . DMA error recovery
40 * . devctl BUS_RESET arg confusion (reset type or root holdoff?)
41 * added LONG_RESET_ROOT and SHORT_RESET_ROOT for root holdoff --kk
47 * Adam J Richter <adam@yggdrasil.com>
48 * . Use of pci_class to find device
50 * Emilie Chung <emilie.chung@axis.com>
51 * . Tip on Async Request Filter
53 * Pascal Drolet <pascal.drolet@informission.ca>
54 * . Various tips for optimization and functionnalities
56 * Robert Ficklin <rficklin@westengineering.com>
57 * . Loop in irq_handler
59 * James Goodwin <jamesg@Filanet.com>
60 * . Various tips on initialization, self-id reception, etc.
62 * Albrecht Dress <ad@mpifr-bonn.mpg.de>
63 * . Apple PowerBook detection
65 * Daniel Kobras <daniel.kobras@student.uni-tuebingen.de>
66 * . Reset the board properly before leaving + misc cleanups
68 * Leon van Stuivenberg <leonvs@iae.nl>
71 * Ben Collins <bcollins@debian.org>
72 * . Working big-endian support
73 * . Updated to 2.4.x module scheme (PCI aswell)
74 * . Config ROM generation
76 * Manfred Weihs <weihs@ict.tuwien.ac.at>
77 * . Reworked code for initiating bus resets
78 * (long, short, with or without hold-off)
80 * Nandu Santhi <contactnandu@users.sourceforge.net>
81 * . Added support for nVidia nForce2 onboard Firewire chipset
85 #include <linux/config.h>
86 #include <linux/kernel.h>
87 #include <linux/list.h>
88 #include <linux/slab.h>
89 #include <linux/interrupt.h>
90 #include <linux/wait.h>
91 #include <linux/errno.h>
92 #include <linux/module.h>
93 #include <linux/moduleparam.h>
94 #include <linux/pci.h>
96 #include <linux/poll.h>
97 #include <linux/irq.h>
98 #include <asm/byteorder.h>
99 #include <asm/atomic.h>
100 #include <asm/uaccess.h>
101 #include <linux/delay.h>
102 #include <linux/spinlock.h>
104 #include <asm/pgtable.h>
105 #include <asm/page.h>
106 #include <linux/sched.h>
107 #include <linux/types.h>
108 #include <linux/vmalloc.h>
109 #include <linux/init.h>
111 #ifdef CONFIG_PPC_PMAC
112 #include <asm/machdep.h>
113 #include <asm/pmac_feature.h>
114 #include <asm/prom.h>
115 #include <asm/pci-bridge.h>
119 #include "ieee1394.h"
120 #include "ieee1394_types.h"
124 #include "ieee1394_core.h"
125 #include "highlevel.h"
126 #include "ohci1394.h"
128 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
129 #define OHCI1394_DEBUG
136 #ifdef OHCI1394_DEBUG
137 #define DBGMSG(fmt, args...) \
138 printk(KERN_INFO "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
140 #define DBGMSG(fmt, args...)
143 #ifdef CONFIG_IEEE1394_OHCI_DMA_DEBUG
144 #define OHCI_DMA_ALLOC(fmt, args...) \
145 HPSB_ERR("%s(%s)alloc(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
146 ++global_outstanding_dmas, ## args)
147 #define OHCI_DMA_FREE(fmt, args...) \
148 HPSB_ERR("%s(%s)free(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
149 --global_outstanding_dmas, ## args)
150 static int global_outstanding_dmas = 0;
152 #define OHCI_DMA_ALLOC(fmt, args...)
153 #define OHCI_DMA_FREE(fmt, args...)
156 /* print general (card independent) information */
157 #define PRINT_G(level, fmt, args...) \
158 printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
160 /* print card specific information */
161 #define PRINT(level, fmt, args...) \
162 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
164 static char version[] __devinitdata =
165 "$Rev: 1203 $ Ben Collins <bcollins@debian.org>";
167 /* Module Parameters */
168 static int phys_dma = 1;
169 module_param(phys_dma, int, 0644);
170 MODULE_PARM_DESC(phys_dma, "Enable physical dma (default = 1).");
172 static void dma_trm_tasklet(unsigned long data);
173 static void dma_trm_reset(struct dma_trm_ctx *d);
175 static int alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
176 enum context_type type, int ctx, int num_desc,
177 int buf_size, int split_buf_size, int context_base);
178 static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d);
179 static void free_dma_rcv_ctx(struct dma_rcv_ctx *d);
181 static int alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
182 enum context_type type, int ctx, int num_desc,
185 static void ohci1394_pci_remove(struct pci_dev *pdev);
187 #ifndef __LITTLE_ENDIAN
188 static unsigned hdr_sizes[] =
190 3, /* TCODE_WRITEQ */
191 4, /* TCODE_WRITEB */
192 3, /* TCODE_WRITE_RESPONSE */
196 3, /* TCODE_READQ_RESPONSE */
197 4, /* TCODE_READB_RESPONSE */
198 1, /* TCODE_CYCLE_START (???) */
199 4, /* TCODE_LOCK_REQUEST */
200 2, /* TCODE_ISO_DATA */
201 4, /* TCODE_LOCK_RESPONSE */
205 static inline void packet_swab(quadlet_t *data, int tcode)
207 size_t size = hdr_sizes[tcode];
209 if (tcode > TCODE_LOCK_RESPONSE || hdr_sizes[tcode] == 0)
213 data[size] = swab32(data[size]);
216 /* Don't waste cycles on same sex byte swaps */
217 #define packet_swab(w,x)
218 #endif /* !LITTLE_ENDIAN */
220 /***********************************
221 * IEEE-1394 functionality section *
222 ***********************************/
224 static u8 get_phy_reg(struct ti_ohci *ohci, u8 addr)
230 spin_lock_irqsave (&ohci->phy_reg_lock, flags);
232 reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000);
234 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
235 if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000)
241 r = reg_read(ohci, OHCI1394_PhyControl);
243 if (i >= OHCI_LOOP_COUNT)
244 PRINT (KERN_ERR, "Get PHY Reg timeout [0x%08x/0x%08x/%d]",
245 r, r & 0x80000000, i);
247 spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
249 return (r & 0x00ff0000) >> 16;
252 static void set_phy_reg(struct ti_ohci *ohci, u8 addr, u8 data)
258 spin_lock_irqsave (&ohci->phy_reg_lock, flags);
260 reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000);
262 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
263 r = reg_read(ohci, OHCI1394_PhyControl);
264 if (!(r & 0x00004000))
270 if (i == OHCI_LOOP_COUNT)
271 PRINT (KERN_ERR, "Set PHY Reg timeout [0x%08x/0x%08x/%d]",
272 r, r & 0x00004000, i);
274 spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
279 /* Or's our value into the current value */
280 static void set_phy_reg_mask(struct ti_ohci *ohci, u8 addr, u8 data)
284 old = get_phy_reg (ohci, addr);
286 set_phy_reg (ohci, addr, old);
291 static void handle_selfid(struct ti_ohci *ohci, struct hpsb_host *host,
292 int phyid, int isroot)
294 quadlet_t *q = ohci->selfid_buf_cpu;
295 quadlet_t self_id_count=reg_read(ohci, OHCI1394_SelfIDCount);
299 /* Check status of self-id reception */
301 if (ohci->selfid_swap)
302 q0 = le32_to_cpu(q[0]);
306 if ((self_id_count & 0x80000000) ||
307 ((self_id_count & 0x00FF0000) != (q0 & 0x00FF0000))) {
309 "Error in reception of SelfID packets [0x%08x/0x%08x] (count: %d)",
310 self_id_count, q0, ohci->self_id_errors);
312 /* Tip by James Goodwin <jamesg@Filanet.com>:
313 * We had an error, generate another bus reset in response. */
314 if (ohci->self_id_errors<OHCI1394_MAX_SELF_ID_ERRORS) {
315 set_phy_reg_mask (ohci, 1, 0x40);
316 ohci->self_id_errors++;
319 "Too many errors on SelfID error reception, giving up!");
324 /* SelfID Ok, reset error counter. */
325 ohci->self_id_errors = 0;
327 size = ((self_id_count & 0x00001FFC) >> 2) - 1;
331 if (ohci->selfid_swap) {
332 q0 = le32_to_cpu(q[0]);
333 q1 = le32_to_cpu(q[1]);
340 DBGMSG ("SelfID packet 0x%x received", q0);
341 hpsb_selfid_received(host, cpu_to_be32(q0));
342 if (((q0 & 0x3f000000) >> 24) == phyid)
343 DBGMSG ("SelfID for this node is 0x%08x", q0);
346 "SelfID is inconsistent [0x%08x/0x%08x]", q0, q1);
352 DBGMSG("SelfID complete");
357 static void ohci_soft_reset(struct ti_ohci *ohci) {
360 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
362 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
363 if (!(reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_softReset))
367 DBGMSG ("Soft reset finished");
371 /* Generate the dma receive prgs and start the context */
372 static void initialize_dma_rcv_ctx(struct dma_rcv_ctx *d, int generate_irq)
374 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
377 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
379 for (i=0; i<d->num_desc; i++) {
382 c = DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE | DMA_CTL_BRANCH;
386 d->prg_cpu[i]->control = cpu_to_le32(c | d->buf_size);
388 /* End of descriptor list? */
389 if (i + 1 < d->num_desc) {
390 d->prg_cpu[i]->branchAddress =
391 cpu_to_le32((d->prg_bus[i+1] & 0xfffffff0) | 0x1);
393 d->prg_cpu[i]->branchAddress =
394 cpu_to_le32((d->prg_bus[0] & 0xfffffff0));
397 d->prg_cpu[i]->address = cpu_to_le32(d->buf_bus[i]);
398 d->prg_cpu[i]->status = cpu_to_le32(d->buf_size);
404 if (d->type == DMA_CTX_ISO) {
405 /* Clear contextControl */
406 reg_write(ohci, d->ctrlClear, 0xffffffff);
408 /* Set bufferFill, isochHeader, multichannel for IR context */
409 reg_write(ohci, d->ctrlSet, 0xd0000000);
411 /* Set the context match register to match on all tags */
412 reg_write(ohci, d->ctxtMatch, 0xf0000000);
414 /* Clear the multi channel mask high and low registers */
415 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, 0xffffffff);
416 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, 0xffffffff);
418 /* Set up isoRecvIntMask to generate interrupts */
419 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << d->ctx);
422 /* Tell the controller where the first AR program is */
423 reg_write(ohci, d->cmdPtr, d->prg_bus[0] | 0x1);
426 reg_write(ohci, d->ctrlSet, 0x00008000);
428 DBGMSG("Receive DMA ctx=%d initialized", d->ctx);
431 /* Initialize the dma transmit context */
432 static void initialize_dma_trm_ctx(struct dma_trm_ctx *d)
434 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
436 /* Stop the context */
437 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
441 d->free_prgs = d->num_desc;
442 d->branchAddrPtr = NULL;
443 INIT_LIST_HEAD(&d->fifo_list);
444 INIT_LIST_HEAD(&d->pending_list);
446 if (d->type == DMA_CTX_ISO) {
447 /* enable interrupts */
448 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << d->ctx);
451 DBGMSG("Transmit DMA ctx=%d initialized", d->ctx);
454 /* Count the number of available iso contexts */
455 static int get_nb_iso_ctx(struct ti_ohci *ohci, int reg)
460 reg_write(ohci, reg, 0xffffffff);
461 tmp = reg_read(ohci, reg);
463 DBGMSG("Iso contexts reg: %08x implemented: %08x", reg, tmp);
465 /* Count the number of contexts */
466 for (i=0; i<32; i++) {
473 /* Global initialization */
474 static void ohci_initialize(struct ti_ohci *ohci)
480 spin_lock_init(&ohci->phy_reg_lock);
481 spin_lock_init(&ohci->event_lock);
483 /* Put some defaults to these undefined bus options */
484 buf = reg_read(ohci, OHCI1394_BusOptions);
485 buf |= 0xE0000000; /* Enable IRMC, CMC and ISC */
486 buf &= ~0x00ff0000; /* XXX: Set cyc_clk_acc to zero for now */
487 buf &= ~0x18000000; /* Disable PMC and BMC */
488 reg_write(ohci, OHCI1394_BusOptions, buf);
490 /* Set the bus number */
491 reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0);
493 /* Enable posted writes */
494 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_postedWriteEnable);
496 /* Clear link control register */
497 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
499 /* Enable cycle timer and cycle master and set the IRM
500 * contender bit in our self ID packets. */
501 reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_CycleTimerEnable |
502 OHCI1394_LinkControl_CycleMaster);
503 set_phy_reg_mask(ohci, 4, 0xc0);
505 /* Set up self-id dma buffer */
506 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->selfid_buf_bus);
508 /* enable self-id and phys */
509 reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_RcvSelfID |
510 OHCI1394_LinkControl_RcvPhyPkt);
512 /* Set the Config ROM mapping register */
513 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->csr_config_rom_bus);
515 /* Now get our max packet size */
516 ohci->max_packet_size =
517 1<<(((reg_read(ohci, OHCI1394_BusOptions)>>12)&0xf)+1);
519 /* Don't accept phy packets into AR request context */
520 reg_write(ohci, OHCI1394_LinkControlClear, 0x00000400);
522 /* Clear the interrupt mask */
523 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
524 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
526 /* Clear the interrupt mask */
527 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
528 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
530 /* Initialize AR dma */
531 initialize_dma_rcv_ctx(&ohci->ar_req_context, 0);
532 initialize_dma_rcv_ctx(&ohci->ar_resp_context, 0);
534 /* Initialize AT dma */
535 initialize_dma_trm_ctx(&ohci->at_req_context);
536 initialize_dma_trm_ctx(&ohci->at_resp_context);
539 * Accept AT requests from all nodes. This probably
540 * will have to be controlled from the subsystem
541 * on a per node basis.
543 reg_write(ohci,OHCI1394_AsReqFilterHiSet, 0x80000000);
545 /* Specify AT retries */
546 reg_write(ohci, OHCI1394_ATRetries,
547 OHCI1394_MAX_AT_REQ_RETRIES |
548 (OHCI1394_MAX_AT_RESP_RETRIES<<4) |
549 (OHCI1394_MAX_PHYS_RESP_RETRIES<<8));
551 /* We don't want hardware swapping */
552 reg_write(ohci, OHCI1394_HCControlClear, OHCI1394_HCControl_noByteSwap);
554 /* Enable interrupts */
555 reg_write(ohci, OHCI1394_IntMaskSet,
556 OHCI1394_unrecoverableError |
557 OHCI1394_masterIntEnable |
559 OHCI1394_selfIDComplete |
562 OHCI1394_respTxComplete |
563 OHCI1394_reqTxComplete |
566 OHCI1394_cycleInconsistent);
569 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable);
571 buf = reg_read(ohci, OHCI1394_Version);
573 sprintf (irq_buf, "%d", ohci->dev->irq);
575 sprintf (irq_buf, "%s", __irq_itoa(ohci->dev->irq));
577 PRINT(KERN_INFO, "OHCI-1394 %d.%d (PCI): IRQ=[%s] "
578 "MMIO=[%lx-%lx] Max Packet=[%d]",
579 ((((buf) >> 16) & 0xf) + (((buf) >> 20) & 0xf) * 10),
580 ((((buf) >> 4) & 0xf) + ((buf) & 0xf) * 10), irq_buf,
581 pci_resource_start(ohci->dev, 0),
582 pci_resource_start(ohci->dev, 0) + OHCI1394_REGISTER_SIZE - 1,
583 ohci->max_packet_size);
585 /* Check all of our ports to make sure that if anything is
586 * connected, we enable that port. */
587 num_ports = get_phy_reg(ohci, 2) & 0xf;
588 for (i = 0; i < num_ports; i++) {
591 set_phy_reg(ohci, 7, i);
592 status = get_phy_reg(ohci, 8);
595 set_phy_reg(ohci, 8, status & ~1);
598 /* Serial EEPROM Sanity check. */
599 if ((ohci->max_packet_size < 512) ||
600 (ohci->max_packet_size > 4096)) {
601 /* Serial EEPROM contents are suspect, set a sane max packet
602 * size and print the raw contents for bug reports if verbose
603 * debug is enabled. */
604 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
608 PRINT(KERN_DEBUG, "Serial EEPROM has suspicious values, "
609 "attempting to setting max_packet_size to 512 bytes");
610 reg_write(ohci, OHCI1394_BusOptions,
611 (reg_read(ohci, OHCI1394_BusOptions) & 0xf007) | 0x8002);
612 ohci->max_packet_size = 512;
613 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
614 PRINT(KERN_DEBUG, " EEPROM Present: %d",
615 (reg_read(ohci, OHCI1394_Version) >> 24) & 0x1);
616 reg_write(ohci, OHCI1394_GUID_ROM, 0x80000000);
620 (reg_read(ohci, OHCI1394_GUID_ROM) & 0x80000000)); i++)
623 for (i = 0; i < 0x20; i++) {
624 reg_write(ohci, OHCI1394_GUID_ROM, 0x02000000);
625 PRINT(KERN_DEBUG, " EEPROM %02x: %02x", i,
626 (reg_read(ohci, OHCI1394_GUID_ROM) >> 16) & 0xff);
633 * Insert a packet in the DMA fifo and generate the DMA prg
634 * FIXME: rewrite the program in order to accept packets crossing
636 * check also that a single dma descriptor doesn't cross a
639 static void insert_packet(struct ti_ohci *ohci,
640 struct dma_trm_ctx *d, struct hpsb_packet *packet)
643 int idx = d->prg_ind;
645 DBGMSG("Inserting packet for node " NODE_BUS_FMT
646 ", tlabel=%d, tcode=0x%x, speed=%d",
647 NODE_BUS_ARGS(ohci->host, packet->node_id), packet->tlabel,
648 packet->tcode, packet->speed_code);
650 d->prg_cpu[idx]->begin.address = 0;
651 d->prg_cpu[idx]->begin.branchAddress = 0;
653 if (d->type == DMA_CTX_ASYNC_RESP) {
655 * For response packets, we need to put a timeout value in
656 * the 16 lower bits of the status... let's try 1 sec timeout
658 cycleTimer = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
659 d->prg_cpu[idx]->begin.status = cpu_to_le32(
660 (((((cycleTimer>>25)&0x7)+1)&0x7)<<13) |
661 ((cycleTimer&0x01fff000)>>12));
663 DBGMSG("cycleTimer: %08x timeStamp: %08x",
664 cycleTimer, d->prg_cpu[idx]->begin.status);
666 d->prg_cpu[idx]->begin.status = 0;
668 if ( (packet->type == hpsb_async) || (packet->type == hpsb_raw) ) {
670 if (packet->type == hpsb_raw) {
671 d->prg_cpu[idx]->data[0] = cpu_to_le32(OHCI1394_TCODE_PHY<<4);
672 d->prg_cpu[idx]->data[1] = cpu_to_le32(packet->header[0]);
673 d->prg_cpu[idx]->data[2] = cpu_to_le32(packet->header[1]);
675 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
676 (packet->header[0] & 0xFFFF);
678 if (packet->tcode == TCODE_ISO_DATA) {
679 /* Sending an async stream packet */
680 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
682 /* Sending a normal async request or response */
683 d->prg_cpu[idx]->data[1] =
684 (packet->header[1] & 0xFFFF) |
685 (packet->header[0] & 0xFFFF0000);
686 d->prg_cpu[idx]->data[2] = packet->header[2];
687 d->prg_cpu[idx]->data[3] = packet->header[3];
689 packet_swab(d->prg_cpu[idx]->data, packet->tcode);
692 if (packet->data_size) { /* block transmit */
693 if (packet->tcode == TCODE_STREAM_DATA){
694 d->prg_cpu[idx]->begin.control =
695 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
696 DMA_CTL_IMMEDIATE | 0x8);
698 d->prg_cpu[idx]->begin.control =
699 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
700 DMA_CTL_IMMEDIATE | 0x10);
702 d->prg_cpu[idx]->end.control =
703 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
708 * Check that the packet data buffer
709 * does not cross a page boundary.
711 * XXX Fix this some day. eth1394 seems to trigger
712 * it, but ignoring it doesn't seem to cause a
716 if (cross_bound((unsigned long)packet->data,
717 packet->data_size)>0) {
718 /* FIXME: do something about it */
720 "%s: packet data addr: %p size %Zd bytes "
721 "cross page boundary", __FUNCTION__,
722 packet->data, packet->data_size);
725 d->prg_cpu[idx]->end.address = cpu_to_le32(
726 pci_map_single(ohci->dev, packet->data,
729 OHCI_DMA_ALLOC("single, block transmit packet");
731 d->prg_cpu[idx]->end.branchAddress = 0;
732 d->prg_cpu[idx]->end.status = 0;
733 if (d->branchAddrPtr)
734 *(d->branchAddrPtr) =
735 cpu_to_le32(d->prg_bus[idx] | 0x3);
737 &(d->prg_cpu[idx]->end.branchAddress);
738 } else { /* quadlet transmit */
739 if (packet->type == hpsb_raw)
740 d->prg_cpu[idx]->begin.control =
741 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
745 (packet->header_size + 4));
747 d->prg_cpu[idx]->begin.control =
748 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
752 packet->header_size);
754 if (d->branchAddrPtr)
755 *(d->branchAddrPtr) =
756 cpu_to_le32(d->prg_bus[idx] | 0x2);
758 &(d->prg_cpu[idx]->begin.branchAddress);
761 } else { /* iso packet */
762 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
763 (packet->header[0] & 0xFFFF);
764 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
765 packet_swab(d->prg_cpu[idx]->data, packet->tcode);
767 d->prg_cpu[idx]->begin.control =
768 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
769 DMA_CTL_IMMEDIATE | 0x8);
770 d->prg_cpu[idx]->end.control =
771 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
776 d->prg_cpu[idx]->end.address = cpu_to_le32(
777 pci_map_single(ohci->dev, packet->data,
778 packet->data_size, PCI_DMA_TODEVICE));
779 OHCI_DMA_ALLOC("single, iso transmit packet");
781 d->prg_cpu[idx]->end.branchAddress = 0;
782 d->prg_cpu[idx]->end.status = 0;
783 DBGMSG("Iso xmit context info: header[%08x %08x]\n"
784 " begin=%08x %08x %08x %08x\n"
785 " %08x %08x %08x %08x\n"
786 " end =%08x %08x %08x %08x",
787 d->prg_cpu[idx]->data[0], d->prg_cpu[idx]->data[1],
788 d->prg_cpu[idx]->begin.control,
789 d->prg_cpu[idx]->begin.address,
790 d->prg_cpu[idx]->begin.branchAddress,
791 d->prg_cpu[idx]->begin.status,
792 d->prg_cpu[idx]->data[0],
793 d->prg_cpu[idx]->data[1],
794 d->prg_cpu[idx]->data[2],
795 d->prg_cpu[idx]->data[3],
796 d->prg_cpu[idx]->end.control,
797 d->prg_cpu[idx]->end.address,
798 d->prg_cpu[idx]->end.branchAddress,
799 d->prg_cpu[idx]->end.status);
800 if (d->branchAddrPtr)
801 *(d->branchAddrPtr) = cpu_to_le32(d->prg_bus[idx] | 0x3);
802 d->branchAddrPtr = &(d->prg_cpu[idx]->end.branchAddress);
806 /* queue the packet in the appropriate context queue */
807 list_add_tail(&packet->driver_list, &d->fifo_list);
808 d->prg_ind = (d->prg_ind + 1) % d->num_desc;
812 * This function fills the FIFO with the (eventual) pending packets
813 * and runs or wakes up the DMA prg if necessary.
815 * The function MUST be called with the d->lock held.
817 static void dma_trm_flush(struct ti_ohci *ohci, struct dma_trm_ctx *d)
819 struct hpsb_packet *packet, *ptmp;
820 int idx = d->prg_ind;
823 /* insert the packets into the dma fifo */
824 list_for_each_entry_safe(packet, ptmp, &d->pending_list, driver_list) {
828 /* For the first packet only */
830 z = (packet->data_size) ? 3 : 2;
832 /* Insert the packet */
833 list_del_init(&packet->driver_list);
834 insert_packet(ohci, d, packet);
837 /* Nothing must have been done, either no free_prgs or no packets */
841 /* Is the context running ? (should be unless it is
842 the first packet to be sent in this context) */
843 if (!(reg_read(ohci, d->ctrlSet) & 0x8000)) {
844 u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
846 DBGMSG("Starting transmit DMA ctx=%d",d->ctx);
847 reg_write(ohci, d->cmdPtr, d->prg_bus[idx] | z);
849 /* Check that the node id is valid, and not 63 */
850 if (!(nodeId & 0x80000000) || (nodeId & 0x3f) == 63)
851 PRINT(KERN_ERR, "Running dma failed because Node ID is not valid");
853 reg_write(ohci, d->ctrlSet, 0x8000);
855 /* Wake up the dma context if necessary */
856 if (!(reg_read(ohci, d->ctrlSet) & 0x400))
857 DBGMSG("Waking transmit DMA ctx=%d",d->ctx);
859 /* do this always, to avoid race condition */
860 reg_write(ohci, d->ctrlSet, 0x1000);
866 /* Transmission of an async or iso packet */
867 static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
869 struct ti_ohci *ohci = host->hostdata;
870 struct dma_trm_ctx *d;
873 if (packet->data_size > ohci->max_packet_size) {
875 "Transmit packet size %Zd is too big",
880 /* Decide whether we have an iso, a request, or a response packet */
881 if (packet->type == hpsb_raw)
882 d = &ohci->at_req_context;
883 else if ((packet->tcode == TCODE_ISO_DATA) && (packet->type == hpsb_iso)) {
884 /* The legacy IT DMA context is initialized on first
885 * use. However, the alloc cannot be run from
886 * interrupt context, so we bail out if that is the
887 * case. I don't see anyone sending ISO packets from
888 * interrupt context anyway... */
890 if (ohci->it_legacy_context.ohci == NULL) {
891 if (in_interrupt()) {
893 "legacy IT context cannot be initialized during interrupt");
897 if (alloc_dma_trm_ctx(ohci, &ohci->it_legacy_context,
898 DMA_CTX_ISO, 0, IT_NUM_DESC,
899 OHCI1394_IsoXmitContextBase) < 0) {
901 "error initializing legacy IT context");
905 initialize_dma_trm_ctx(&ohci->it_legacy_context);
908 d = &ohci->it_legacy_context;
909 } else if ((packet->tcode & 0x02) && (packet->tcode != TCODE_ISO_DATA))
910 d = &ohci->at_resp_context;
912 d = &ohci->at_req_context;
914 spin_lock_irqsave(&d->lock,flags);
916 list_add_tail(&packet->driver_list, &d->pending_list);
918 dma_trm_flush(ohci, d);
920 spin_unlock_irqrestore(&d->lock,flags);
925 static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
927 struct ti_ohci *ohci = host->hostdata;
936 phy_reg = get_phy_reg(ohci, 5);
938 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
941 phy_reg = get_phy_reg(ohci, 1);
943 set_phy_reg(ohci, 1, phy_reg); /* set IBR */
945 case SHORT_RESET_NO_FORCE_ROOT:
946 phy_reg = get_phy_reg(ohci, 1);
947 if (phy_reg & 0x80) {
949 set_phy_reg(ohci, 1, phy_reg); /* clear RHB */
952 phy_reg = get_phy_reg(ohci, 5);
954 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
956 case LONG_RESET_NO_FORCE_ROOT:
957 phy_reg = get_phy_reg(ohci, 1);
960 set_phy_reg(ohci, 1, phy_reg); /* clear RHB, set IBR */
962 case SHORT_RESET_FORCE_ROOT:
963 phy_reg = get_phy_reg(ohci, 1);
964 if (!(phy_reg & 0x80)) {
966 set_phy_reg(ohci, 1, phy_reg); /* set RHB */
969 phy_reg = get_phy_reg(ohci, 5);
971 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
973 case LONG_RESET_FORCE_ROOT:
974 phy_reg = get_phy_reg(ohci, 1);
976 set_phy_reg(ohci, 1, phy_reg); /* set RHB and IBR */
983 case GET_CYCLE_COUNTER:
984 retval = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
987 case SET_CYCLE_COUNTER:
988 reg_write(ohci, OHCI1394_IsochronousCycleTimer, arg);
992 PRINT(KERN_ERR, "devctl command SET_BUS_ID err");
995 case ACT_CYCLE_MASTER:
997 /* check if we are root and other nodes are present */
998 u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
999 if ((nodeId & (1<<30)) && (nodeId & 0x3f)) {
1001 * enable cycleTimer, cycleMaster
1003 DBGMSG("Cycle master enabled");
1004 reg_write(ohci, OHCI1394_LinkControlSet,
1005 OHCI1394_LinkControl_CycleTimerEnable |
1006 OHCI1394_LinkControl_CycleMaster);
1009 /* disable cycleTimer, cycleMaster, cycleSource */
1010 reg_write(ohci, OHCI1394_LinkControlClear,
1011 OHCI1394_LinkControl_CycleTimerEnable |
1012 OHCI1394_LinkControl_CycleMaster |
1013 OHCI1394_LinkControl_CycleSource);
1017 case CANCEL_REQUESTS:
1018 DBGMSG("Cancel request received");
1019 dma_trm_reset(&ohci->at_req_context);
1020 dma_trm_reset(&ohci->at_resp_context);
1023 case ISO_LISTEN_CHANNEL:
1027 if (arg<0 || arg>63) {
1029 "%s: IS0 listen channel %d is out of range",
1034 /* activate the legacy IR context */
1035 if (ohci->ir_legacy_context.ohci == NULL) {
1036 if (alloc_dma_rcv_ctx(ohci, &ohci->ir_legacy_context,
1037 DMA_CTX_ISO, 0, IR_NUM_DESC,
1038 IR_BUF_SIZE, IR_SPLIT_BUF_SIZE,
1039 OHCI1394_IsoRcvContextBase) < 0) {
1040 PRINT(KERN_ERR, "%s: failed to allocate an IR context",
1044 ohci->ir_legacy_channels = 0;
1045 initialize_dma_rcv_ctx(&ohci->ir_legacy_context, 1);
1047 DBGMSG("ISO receive legacy context activated");
1050 mask = (u64)0x1<<arg;
1052 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1054 if (ohci->ISO_channel_usage & mask) {
1056 "%s: IS0 listen channel %d is already used",
1058 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1062 ohci->ISO_channel_usage |= mask;
1063 ohci->ir_legacy_channels |= mask;
1066 reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet,
1069 reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet,
1072 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1073 DBGMSG("Listening enabled on channel %d", arg);
1076 case ISO_UNLISTEN_CHANNEL:
1080 if (arg<0 || arg>63) {
1082 "%s: IS0 unlisten channel %d is out of range",
1087 mask = (u64)0x1<<arg;
1089 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1091 if (!(ohci->ISO_channel_usage & mask)) {
1093 "%s: IS0 unlisten channel %d is not used",
1095 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1099 ohci->ISO_channel_usage &= ~mask;
1100 ohci->ir_legacy_channels &= ~mask;
1103 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear,
1106 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear,
1109 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1110 DBGMSG("Listening disabled on channel %d", arg);
1112 if (ohci->ir_legacy_channels == 0) {
1113 stop_dma_rcv_ctx(&ohci->ir_legacy_context);
1114 free_dma_rcv_ctx(&ohci->ir_legacy_context);
1115 DBGMSG("ISO receive legacy context deactivated");
1120 PRINT_G(KERN_ERR, "ohci_devctl cmd %d not implemented yet",
1127 /***********************************
1128 * rawiso ISO reception *
1129 ***********************************/
1132 We use either buffer-fill or packet-per-buffer DMA mode. The DMA
1133 buffer is split into "blocks" (regions described by one DMA
1134 descriptor). Each block must be one page or less in size, and
1135 must not cross a page boundary.
1137 There is one little wrinkle with buffer-fill mode: a packet that
1138 starts in the final block may wrap around into the first block. But
1139 the user API expects all packets to be contiguous. Our solution is
1140 to keep the very last page of the DMA buffer in reserve - if a
1141 packet spans the gap, we copy its tail into this page.
1144 struct ohci_iso_recv {
1145 struct ti_ohci *ohci;
1147 struct ohci1394_iso_tasklet task;
1150 enum { BUFFER_FILL_MODE = 0,
1151 PACKET_PER_BUFFER_MODE = 1 } dma_mode;
1153 /* memory and PCI mapping for the DMA descriptors */
1154 struct dma_prog_region prog;
1155 struct dma_cmd *block; /* = (struct dma_cmd*) prog.virt */
1157 /* how many DMA blocks fit in the buffer */
1158 unsigned int nblocks;
1160 /* stride of DMA blocks */
1161 unsigned int buf_stride;
1163 /* number of blocks to batch between interrupts */
1164 int block_irq_interval;
1166 /* block that DMA will finish next */
1169 /* (buffer-fill only) block that the reader will release next */
1172 /* (buffer-fill only) bytes of buffer the reader has released,
1173 less than one block */
1176 /* (buffer-fill only) buffer offset at which the next packet will appear */
1179 /* OHCI DMA context control registers */
1180 u32 ContextControlSet;
1181 u32 ContextControlClear;
1186 static void ohci_iso_recv_task(unsigned long data);
1187 static void ohci_iso_recv_stop(struct hpsb_iso *iso);
1188 static void ohci_iso_recv_shutdown(struct hpsb_iso *iso);
1189 static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync);
1190 static void ohci_iso_recv_program(struct hpsb_iso *iso);
1192 static int ohci_iso_recv_init(struct hpsb_iso *iso)
1194 struct ti_ohci *ohci = iso->host->hostdata;
1195 struct ohci_iso_recv *recv;
1199 recv = kmalloc(sizeof(*recv), SLAB_KERNEL);
1203 iso->hostdata = recv;
1205 recv->task_active = 0;
1206 dma_prog_region_init(&recv->prog);
1209 /* use buffer-fill mode, unless irq_interval is 1
1210 (note: multichannel requires buffer-fill) */
1212 if (((iso->irq_interval == 1 && iso->dma_mode == HPSB_ISO_DMA_OLD_ABI) ||
1213 iso->dma_mode == HPSB_ISO_DMA_PACKET_PER_BUFFER) && iso->channel != -1) {
1214 recv->dma_mode = PACKET_PER_BUFFER_MODE;
1216 recv->dma_mode = BUFFER_FILL_MODE;
1219 /* set nblocks, buf_stride, block_irq_interval */
1221 if (recv->dma_mode == BUFFER_FILL_MODE) {
1222 recv->buf_stride = PAGE_SIZE;
1224 /* one block per page of data in the DMA buffer, minus the final guard page */
1225 recv->nblocks = iso->buf_size/PAGE_SIZE - 1;
1226 if (recv->nblocks < 3) {
1227 DBGMSG("ohci_iso_recv_init: DMA buffer too small");
1231 /* iso->irq_interval is in packets - translate that to blocks */
1232 if (iso->irq_interval == 1)
1233 recv->block_irq_interval = 1;
1235 recv->block_irq_interval = iso->irq_interval *
1236 ((recv->nblocks+1)/iso->buf_packets);
1237 if (recv->block_irq_interval*4 > recv->nblocks)
1238 recv->block_irq_interval = recv->nblocks/4;
1239 if (recv->block_irq_interval < 1)
1240 recv->block_irq_interval = 1;
1243 int max_packet_size;
1245 recv->nblocks = iso->buf_packets;
1246 recv->block_irq_interval = iso->irq_interval;
1247 if (recv->block_irq_interval * 4 > iso->buf_packets)
1248 recv->block_irq_interval = iso->buf_packets / 4;
1249 if (recv->block_irq_interval < 1)
1250 recv->block_irq_interval = 1;
1252 /* choose a buffer stride */
1253 /* must be a power of 2, and <= PAGE_SIZE */
1255 max_packet_size = iso->buf_size / iso->buf_packets;
1257 for (recv->buf_stride = 8; recv->buf_stride < max_packet_size;
1258 recv->buf_stride *= 2);
1260 if (recv->buf_stride*iso->buf_packets > iso->buf_size ||
1261 recv->buf_stride > PAGE_SIZE) {
1262 /* this shouldn't happen, but anyway... */
1263 DBGMSG("ohci_iso_recv_init: problem choosing a buffer stride");
1268 recv->block_reader = 0;
1269 recv->released_bytes = 0;
1270 recv->block_dma = 0;
1271 recv->dma_offset = 0;
1273 /* size of DMA program = one descriptor per block */
1274 if (dma_prog_region_alloc(&recv->prog,
1275 sizeof(struct dma_cmd) * recv->nblocks,
1279 recv->block = (struct dma_cmd*) recv->prog.kvirt;
1281 ohci1394_init_iso_tasklet(&recv->task,
1282 iso->channel == -1 ? OHCI_ISO_MULTICHANNEL_RECEIVE :
1284 ohci_iso_recv_task, (unsigned long) iso);
1286 if (ohci1394_register_iso_tasklet(recv->ohci, &recv->task) < 0)
1289 recv->task_active = 1;
1291 /* recv context registers are spaced 32 bytes apart */
1292 ctx = recv->task.context;
1293 recv->ContextControlSet = OHCI1394_IsoRcvContextControlSet + 32 * ctx;
1294 recv->ContextControlClear = OHCI1394_IsoRcvContextControlClear + 32 * ctx;
1295 recv->CommandPtr = OHCI1394_IsoRcvCommandPtr + 32 * ctx;
1296 recv->ContextMatch = OHCI1394_IsoRcvContextMatch + 32 * ctx;
1298 if (iso->channel == -1) {
1299 /* clear multi-channel selection mask */
1300 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, 0xFFFFFFFF);
1301 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, 0xFFFFFFFF);
1304 /* write the DMA program */
1305 ohci_iso_recv_program(iso);
1307 DBGMSG("ohci_iso_recv_init: %s mode, DMA buffer is %lu pages"
1308 " (%u bytes), using %u blocks, buf_stride %u, block_irq_interval %d",
1309 recv->dma_mode == BUFFER_FILL_MODE ?
1310 "buffer-fill" : "packet-per-buffer",
1311 iso->buf_size/PAGE_SIZE, iso->buf_size,
1312 recv->nblocks, recv->buf_stride, recv->block_irq_interval);
1317 ohci_iso_recv_shutdown(iso);
1321 static void ohci_iso_recv_stop(struct hpsb_iso *iso)
1323 struct ohci_iso_recv *recv = iso->hostdata;
1325 /* disable interrupts */
1326 reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << recv->task.context);
1329 ohci1394_stop_context(recv->ohci, recv->ContextControlClear, NULL);
1332 static void ohci_iso_recv_shutdown(struct hpsb_iso *iso)
1334 struct ohci_iso_recv *recv = iso->hostdata;
1336 if (recv->task_active) {
1337 ohci_iso_recv_stop(iso);
1338 ohci1394_unregister_iso_tasklet(recv->ohci, &recv->task);
1339 recv->task_active = 0;
1342 dma_prog_region_free(&recv->prog);
1344 iso->hostdata = NULL;
1347 /* set up a "gapped" ring buffer DMA program */
1348 static void ohci_iso_recv_program(struct hpsb_iso *iso)
1350 struct ohci_iso_recv *recv = iso->hostdata;
1353 /* address of 'branch' field in previous DMA descriptor */
1354 u32 *prev_branch = NULL;
1356 for (blk = 0; blk < recv->nblocks; blk++) {
1359 /* the DMA descriptor */
1360 struct dma_cmd *cmd = &recv->block[blk];
1362 /* offset of the DMA descriptor relative to the DMA prog buffer */
1363 unsigned long prog_offset = blk * sizeof(struct dma_cmd);
1365 /* offset of this packet's data within the DMA buffer */
1366 unsigned long buf_offset = blk * recv->buf_stride;
1368 if (recv->dma_mode == BUFFER_FILL_MODE) {
1369 control = 2 << 28; /* INPUT_MORE */
1371 control = 3 << 28; /* INPUT_LAST */
1374 control |= 8 << 24; /* s = 1, update xferStatus and resCount */
1376 /* interrupt on last block, and at intervals */
1377 if (blk == recv->nblocks-1 || (blk % recv->block_irq_interval) == 0) {
1378 control |= 3 << 20; /* want interrupt */
1381 control |= 3 << 18; /* enable branch to address */
1382 control |= recv->buf_stride;
1384 cmd->control = cpu_to_le32(control);
1385 cmd->address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, buf_offset));
1386 cmd->branchAddress = 0; /* filled in on next loop */
1387 cmd->status = cpu_to_le32(recv->buf_stride);
1389 /* link the previous descriptor to this one */
1391 *prev_branch = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog, prog_offset) | 1);
1394 prev_branch = &cmd->branchAddress;
1397 /* the final descriptor's branch address and Z should be left at 0 */
1400 /* listen or unlisten to a specific channel (multi-channel mode only) */
1401 static void ohci_iso_recv_change_channel(struct hpsb_iso *iso, unsigned char channel, int listen)
1403 struct ohci_iso_recv *recv = iso->hostdata;
1407 reg = listen ? OHCI1394_IRMultiChanMaskLoSet : OHCI1394_IRMultiChanMaskLoClear;
1410 reg = listen ? OHCI1394_IRMultiChanMaskHiSet : OHCI1394_IRMultiChanMaskHiClear;
1414 reg_write(recv->ohci, reg, (1 << i));
1416 /* issue a dummy read to force all PCI writes to be posted immediately */
1418 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1421 static void ohci_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
1423 struct ohci_iso_recv *recv = iso->hostdata;
1426 for (i = 0; i < 64; i++) {
1427 if (mask & (1ULL << i)) {
1429 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoSet, (1 << i));
1431 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiSet, (1 << (i-32)));
1434 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, (1 << i));
1436 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, (1 << (i-32)));
1440 /* issue a dummy read to force all PCI writes to be posted immediately */
1442 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1445 static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
1447 struct ohci_iso_recv *recv = iso->hostdata;
1448 struct ti_ohci *ohci = recv->ohci;
1449 u32 command, contextMatch;
1451 reg_write(recv->ohci, recv->ContextControlClear, 0xFFFFFFFF);
1454 /* always keep ISO headers */
1455 command = (1 << 30);
1457 if (recv->dma_mode == BUFFER_FILL_MODE)
1458 command |= (1 << 31);
1460 reg_write(recv->ohci, recv->ContextControlSet, command);
1462 /* match on specified tags */
1463 contextMatch = tag_mask << 28;
1465 if (iso->channel == -1) {
1466 /* enable multichannel reception */
1467 reg_write(recv->ohci, recv->ContextControlSet, (1 << 28));
1469 /* listen on channel */
1470 contextMatch |= iso->channel;
1476 /* enable cycleMatch */
1477 reg_write(recv->ohci, recv->ContextControlSet, (1 << 29));
1479 /* set starting cycle */
1482 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
1483 just snarf them from the current time */
1484 seconds = reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
1486 /* advance one second to give some extra time for DMA to start */
1489 cycle |= (seconds & 3) << 13;
1491 contextMatch |= cycle << 12;
1495 /* set sync flag on first DMA descriptor */
1496 struct dma_cmd *cmd = &recv->block[recv->block_dma];
1497 cmd->control |= cpu_to_le32(DMA_CTL_WAIT);
1499 /* match sync field */
1500 contextMatch |= (sync&0xf)<<8;
1503 reg_write(recv->ohci, recv->ContextMatch, contextMatch);
1505 /* address of first descriptor block */
1506 command = dma_prog_region_offset_to_bus(&recv->prog,
1507 recv->block_dma * sizeof(struct dma_cmd));
1508 command |= 1; /* Z=1 */
1510 reg_write(recv->ohci, recv->CommandPtr, command);
1512 /* enable interrupts */
1513 reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskSet, 1 << recv->task.context);
1518 reg_write(recv->ohci, recv->ContextControlSet, 0x8000);
1520 /* issue a dummy read of the cycle timer register to force
1521 all PCI writes to be posted immediately */
1523 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1526 if (!(reg_read(recv->ohci, recv->ContextControlSet) & 0x8000)) {
1528 "Error starting IR DMA (ContextControl 0x%08x)\n",
1529 reg_read(recv->ohci, recv->ContextControlSet));
1536 static void ohci_iso_recv_release_block(struct ohci_iso_recv *recv, int block)
1538 /* re-use the DMA descriptor for the block */
1539 /* by linking the previous descriptor to it */
1542 int prev_i = (next_i == 0) ? (recv->nblocks - 1) : (next_i - 1);
1544 struct dma_cmd *next = &recv->block[next_i];
1545 struct dma_cmd *prev = &recv->block[prev_i];
1547 /* 'next' becomes the new end of the DMA chain,
1548 so disable branch and enable interrupt */
1549 next->branchAddress = 0;
1550 next->control |= cpu_to_le32(3 << 20);
1551 next->status = cpu_to_le32(recv->buf_stride);
1553 /* link prev to next */
1554 prev->branchAddress = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog,
1555 sizeof(struct dma_cmd) * next_i)
1558 /* disable interrupt on previous DMA descriptor, except at intervals */
1559 if ((prev_i % recv->block_irq_interval) == 0) {
1560 prev->control |= cpu_to_le32(3 << 20); /* enable interrupt */
1562 prev->control &= cpu_to_le32(~(3<<20)); /* disable interrupt */
1566 /* wake up DMA in case it fell asleep */
1567 reg_write(recv->ohci, recv->ContextControlSet, (1 << 12));
1570 static void ohci_iso_recv_bufferfill_release(struct ohci_iso_recv *recv,
1571 struct hpsb_iso_packet_info *info)
1575 /* release the memory where the packet was */
1578 /* add the wasted space for padding to 4 bytes */
1580 len += 4 - (len % 4);
1582 /* add 8 bytes for the OHCI DMA data format overhead */
1585 recv->released_bytes += len;
1587 /* have we released enough memory for one block? */
1588 while (recv->released_bytes > recv->buf_stride) {
1589 ohci_iso_recv_release_block(recv, recv->block_reader);
1590 recv->block_reader = (recv->block_reader + 1) % recv->nblocks;
1591 recv->released_bytes -= recv->buf_stride;
1595 static inline void ohci_iso_recv_release(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
1597 struct ohci_iso_recv *recv = iso->hostdata;
1598 if (recv->dma_mode == BUFFER_FILL_MODE) {
1599 ohci_iso_recv_bufferfill_release(recv, info);
1601 ohci_iso_recv_release_block(recv, info - iso->infos);
1605 /* parse all packets from blocks that have been fully received */
1606 static void ohci_iso_recv_bufferfill_parse(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1610 struct ti_ohci *ohci = recv->ohci;
1613 /* we expect the next parsable packet to begin at recv->dma_offset */
1614 /* note: packet layout is as shown in section 10.6.1.1 of the OHCI spec */
1616 unsigned int offset;
1617 unsigned short len, cycle;
1618 unsigned char channel, tag, sy;
1620 unsigned char *p = iso->data_buf.kvirt;
1622 unsigned int this_block = recv->dma_offset/recv->buf_stride;
1624 /* don't loop indefinitely */
1625 if (runaway++ > 100000) {
1626 atomic_inc(&iso->overflows);
1628 "IR DMA error - Runaway during buffer parsing!\n");
1632 /* stop parsing once we arrive at block_dma (i.e. don't get ahead of DMA) */
1633 if (this_block == recv->block_dma)
1638 /* parse data length, tag, channel, and sy */
1640 /* note: we keep our own local copies of 'len' and 'offset'
1641 so the user can't mess with them by poking in the mmap area */
1643 len = p[recv->dma_offset+2] | (p[recv->dma_offset+3] << 8);
1647 "IR DMA error - bogus 'len' value %u\n", len);
1650 channel = p[recv->dma_offset+1] & 0x3F;
1651 tag = p[recv->dma_offset+1] >> 6;
1652 sy = p[recv->dma_offset+0] & 0xF;
1654 /* advance to data payload */
1655 recv->dma_offset += 4;
1657 /* check for wrap-around */
1658 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1659 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1662 /* dma_offset now points to the first byte of the data payload */
1663 offset = recv->dma_offset;
1665 /* advance to xferStatus/timeStamp */
1666 recv->dma_offset += len;
1668 /* payload is padded to 4 bytes */
1670 recv->dma_offset += 4 - (len%4);
1673 /* check for wrap-around */
1674 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1675 /* uh oh, the packet data wraps from the last
1676 to the first DMA block - make the packet
1677 contiguous by copying its "tail" into the
1680 int guard_off = recv->buf_stride*recv->nblocks;
1681 int tail_len = len - (guard_off - offset);
1683 if (tail_len > 0 && tail_len < recv->buf_stride) {
1684 memcpy(iso->data_buf.kvirt + guard_off,
1685 iso->data_buf.kvirt,
1689 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1692 /* parse timestamp */
1693 cycle = p[recv->dma_offset+0] | (p[recv->dma_offset+1]<<8);
1696 /* advance to next packet */
1697 recv->dma_offset += 4;
1699 /* check for wrap-around */
1700 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1701 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1704 hpsb_iso_packet_received(iso, offset, len, cycle, channel, tag, sy);
1711 static void ohci_iso_recv_bufferfill_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1714 struct ti_ohci *ohci = recv->ohci;
1716 /* loop over all blocks */
1717 for (loop = 0; loop < recv->nblocks; loop++) {
1719 /* check block_dma to see if it's done */
1720 struct dma_cmd *im = &recv->block[recv->block_dma];
1722 /* check the DMA descriptor for new writes to xferStatus */
1723 u16 xferstatus = le32_to_cpu(im->status) >> 16;
1725 /* rescount is the number of bytes *remaining to be written* in the block */
1726 u16 rescount = le32_to_cpu(im->status) & 0xFFFF;
1728 unsigned char event = xferstatus & 0x1F;
1731 /* nothing has happened to this block yet */
1735 if (event != 0x11) {
1736 atomic_inc(&iso->overflows);
1738 "IR DMA error - OHCI error code 0x%02x\n", event);
1741 if (rescount != 0) {
1742 /* the card is still writing to this block;
1743 we can't touch it until it's done */
1747 /* OK, the block is finished... */
1749 /* sync our view of the block */
1750 dma_region_sync_for_cpu(&iso->data_buf, recv->block_dma*recv->buf_stride, recv->buf_stride);
1752 /* reset the DMA descriptor */
1753 im->status = recv->buf_stride;
1755 /* advance block_dma */
1756 recv->block_dma = (recv->block_dma + 1) % recv->nblocks;
1758 if ((recv->block_dma+1) % recv->nblocks == recv->block_reader) {
1759 atomic_inc(&iso->overflows);
1760 DBGMSG("ISO reception overflow - "
1761 "ran out of DMA blocks");
1765 /* parse any packets that have arrived */
1766 ohci_iso_recv_bufferfill_parse(iso, recv);
1769 static void ohci_iso_recv_packetperbuf_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1773 struct ti_ohci *ohci = recv->ohci;
1775 /* loop over the entire buffer */
1776 for (count = 0; count < recv->nblocks; count++) {
1779 /* pointer to the DMA descriptor */
1780 struct dma_cmd *il = ((struct dma_cmd*) recv->prog.kvirt) + iso->pkt_dma;
1782 /* check the DMA descriptor for new writes to xferStatus */
1783 u16 xferstatus = le32_to_cpu(il->status) >> 16;
1784 u16 rescount = le32_to_cpu(il->status) & 0xFFFF;
1786 unsigned char event = xferstatus & 0x1F;
1789 /* this packet hasn't come in yet; we are done for now */
1793 if (event == 0x11) {
1794 /* packet received successfully! */
1796 /* rescount is the number of bytes *remaining* in the packet buffer,
1797 after the packet was written */
1798 packet_len = recv->buf_stride - rescount;
1800 } else if (event == 0x02) {
1801 PRINT(KERN_ERR, "IR DMA error - packet too long for buffer\n");
1803 PRINT(KERN_ERR, "IR DMA error - OHCI error code 0x%02x\n", event);
1806 /* sync our view of the buffer */
1807 dma_region_sync_for_cpu(&iso->data_buf, iso->pkt_dma * recv->buf_stride, recv->buf_stride);
1809 /* record the per-packet info */
1811 /* iso header is 8 bytes ahead of the data payload */
1814 unsigned int offset;
1815 unsigned short cycle;
1816 unsigned char channel, tag, sy;
1818 offset = iso->pkt_dma * recv->buf_stride;
1819 hdr = iso->data_buf.kvirt + offset;
1821 /* skip iso header */
1825 cycle = (hdr[0] | (hdr[1] << 8)) & 0x1FFF;
1826 channel = hdr[5] & 0x3F;
1830 hpsb_iso_packet_received(iso, offset, packet_len, cycle, channel, tag, sy);
1833 /* reset the DMA descriptor */
1834 il->status = recv->buf_stride;
1837 recv->block_dma = iso->pkt_dma;
1845 static void ohci_iso_recv_task(unsigned long data)
1847 struct hpsb_iso *iso = (struct hpsb_iso*) data;
1848 struct ohci_iso_recv *recv = iso->hostdata;
1850 if (recv->dma_mode == BUFFER_FILL_MODE)
1851 ohci_iso_recv_bufferfill_task(iso, recv);
1853 ohci_iso_recv_packetperbuf_task(iso, recv);
1856 /***********************************
1857 * rawiso ISO transmission *
1858 ***********************************/
1860 struct ohci_iso_xmit {
1861 struct ti_ohci *ohci;
1862 struct dma_prog_region prog;
1863 struct ohci1394_iso_tasklet task;
1866 u32 ContextControlSet;
1867 u32 ContextControlClear;
1871 /* transmission DMA program:
1872 one OUTPUT_MORE_IMMEDIATE for the IT header
1873 one OUTPUT_LAST for the buffer data */
1875 struct iso_xmit_cmd {
1876 struct dma_cmd output_more_immediate;
1879 struct dma_cmd output_last;
1882 static int ohci_iso_xmit_init(struct hpsb_iso *iso);
1883 static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle);
1884 static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso);
1885 static void ohci_iso_xmit_task(unsigned long data);
1887 static int ohci_iso_xmit_init(struct hpsb_iso *iso)
1889 struct ohci_iso_xmit *xmit;
1890 unsigned int prog_size;
1894 xmit = kmalloc(sizeof(*xmit), SLAB_KERNEL);
1898 iso->hostdata = xmit;
1899 xmit->ohci = iso->host->hostdata;
1900 xmit->task_active = 0;
1902 dma_prog_region_init(&xmit->prog);
1904 prog_size = sizeof(struct iso_xmit_cmd) * iso->buf_packets;
1906 if (dma_prog_region_alloc(&xmit->prog, prog_size, xmit->ohci->dev))
1909 ohci1394_init_iso_tasklet(&xmit->task, OHCI_ISO_TRANSMIT,
1910 ohci_iso_xmit_task, (unsigned long) iso);
1912 if (ohci1394_register_iso_tasklet(xmit->ohci, &xmit->task) < 0)
1915 xmit->task_active = 1;
1917 /* xmit context registers are spaced 16 bytes apart */
1918 ctx = xmit->task.context;
1919 xmit->ContextControlSet = OHCI1394_IsoXmitContextControlSet + 16 * ctx;
1920 xmit->ContextControlClear = OHCI1394_IsoXmitContextControlClear + 16 * ctx;
1921 xmit->CommandPtr = OHCI1394_IsoXmitCommandPtr + 16 * ctx;
1926 ohci_iso_xmit_shutdown(iso);
1930 static void ohci_iso_xmit_stop(struct hpsb_iso *iso)
1932 struct ohci_iso_xmit *xmit = iso->hostdata;
1933 struct ti_ohci *ohci = xmit->ohci;
1935 /* disable interrupts */
1936 reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskClear, 1 << xmit->task.context);
1939 if (ohci1394_stop_context(xmit->ohci, xmit->ContextControlClear, NULL)) {
1940 /* XXX the DMA context will lock up if you try to send too much data! */
1942 "you probably exceeded the OHCI card's bandwidth limit - "
1943 "reload the module and reduce xmit bandwidth");
1947 static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso)
1949 struct ohci_iso_xmit *xmit = iso->hostdata;
1951 if (xmit->task_active) {
1952 ohci_iso_xmit_stop(iso);
1953 ohci1394_unregister_iso_tasklet(xmit->ohci, &xmit->task);
1954 xmit->task_active = 0;
1957 dma_prog_region_free(&xmit->prog);
1959 iso->hostdata = NULL;
1962 static void ohci_iso_xmit_task(unsigned long data)
1964 struct hpsb_iso *iso = (struct hpsb_iso*) data;
1965 struct ohci_iso_xmit *xmit = iso->hostdata;
1966 struct ti_ohci *ohci = xmit->ohci;
1970 /* check the whole buffer if necessary, starting at pkt_dma */
1971 for (count = 0; count < iso->buf_packets; count++) {
1974 /* DMA descriptor */
1975 struct iso_xmit_cmd *cmd = dma_region_i(&xmit->prog, struct iso_xmit_cmd, iso->pkt_dma);
1977 /* check for new writes to xferStatus */
1978 u16 xferstatus = le32_to_cpu(cmd->output_last.status) >> 16;
1979 u8 event = xferstatus & 0x1F;
1982 /* packet hasn't been sent yet; we are done for now */
1988 "IT DMA error - OHCI error code 0x%02x\n", event);
1990 /* at least one packet went out, so wake up the writer */
1994 cycle = le32_to_cpu(cmd->output_last.status) & 0x1FFF;
1996 /* tell the subsystem the packet has gone out */
1997 hpsb_iso_packet_sent(iso, cycle, event != 0x11);
1999 /* reset the DMA descriptor for next time */
2000 cmd->output_last.status = 0;
2007 static int ohci_iso_xmit_queue(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
2009 struct ohci_iso_xmit *xmit = iso->hostdata;
2010 struct ti_ohci *ohci = xmit->ohci;
2013 struct iso_xmit_cmd *next, *prev;
2015 unsigned int offset;
2017 unsigned char tag, sy;
2019 /* check that the packet doesn't cross a page boundary
2020 (we could allow this if we added OUTPUT_MORE descriptor support) */
2021 if (cross_bound(info->offset, info->len)) {
2023 "rawiso xmit: packet %u crosses a page boundary",
2028 offset = info->offset;
2033 /* sync up the card's view of the buffer */
2034 dma_region_sync_for_device(&iso->data_buf, offset, len);
2036 /* append first_packet to the DMA chain */
2037 /* by linking the previous descriptor to it */
2038 /* (next will become the new end of the DMA chain) */
2040 next_i = iso->first_packet;
2041 prev_i = (next_i == 0) ? (iso->buf_packets - 1) : (next_i - 1);
2043 next = dma_region_i(&xmit->prog, struct iso_xmit_cmd, next_i);
2044 prev = dma_region_i(&xmit->prog, struct iso_xmit_cmd, prev_i);
2046 /* set up the OUTPUT_MORE_IMMEDIATE descriptor */
2047 memset(next, 0, sizeof(struct iso_xmit_cmd));
2048 next->output_more_immediate.control = cpu_to_le32(0x02000008);
2050 /* ISO packet header is embedded in the OUTPUT_MORE_IMMEDIATE */
2052 /* tcode = 0xA, and sy */
2053 next->iso_hdr[0] = 0xA0 | (sy & 0xF);
2055 /* tag and channel number */
2056 next->iso_hdr[1] = (tag << 6) | (iso->channel & 0x3F);
2058 /* transmission speed */
2059 next->iso_hdr[2] = iso->speed & 0x7;
2062 next->iso_hdr[6] = len & 0xFF;
2063 next->iso_hdr[7] = len >> 8;
2065 /* set up the OUTPUT_LAST */
2066 next->output_last.control = cpu_to_le32(1 << 28);
2067 next->output_last.control |= cpu_to_le32(1 << 27); /* update timeStamp */
2068 next->output_last.control |= cpu_to_le32(3 << 20); /* want interrupt */
2069 next->output_last.control |= cpu_to_le32(3 << 18); /* enable branch */
2070 next->output_last.control |= cpu_to_le32(len);
2072 /* payload bus address */
2073 next->output_last.address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, offset));
2075 /* leave branchAddress at zero for now */
2077 /* re-write the previous DMA descriptor to chain to this one */
2079 /* set prev branch address to point to next (Z=3) */
2080 prev->output_last.branchAddress = cpu_to_le32(
2081 dma_prog_region_offset_to_bus(&xmit->prog, sizeof(struct iso_xmit_cmd) * next_i) | 3);
2083 /* disable interrupt, unless required by the IRQ interval */
2084 if (prev_i % iso->irq_interval) {
2085 prev->output_last.control &= cpu_to_le32(~(3 << 20)); /* no interrupt */
2087 prev->output_last.control |= cpu_to_le32(3 << 20); /* enable interrupt */
2092 /* wake DMA in case it is sleeping */
2093 reg_write(xmit->ohci, xmit->ContextControlSet, 1 << 12);
2095 /* issue a dummy read of the cycle timer to force all PCI
2096 writes to be posted immediately */
2098 reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer);
2103 static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle)
2105 struct ohci_iso_xmit *xmit = iso->hostdata;
2106 struct ti_ohci *ohci = xmit->ohci;
2108 /* clear out the control register */
2109 reg_write(xmit->ohci, xmit->ContextControlClear, 0xFFFFFFFF);
2112 /* address and length of first descriptor block (Z=3) */
2113 reg_write(xmit->ohci, xmit->CommandPtr,
2114 dma_prog_region_offset_to_bus(&xmit->prog, iso->pkt_dma * sizeof(struct iso_xmit_cmd)) | 3);
2118 u32 start = cycle & 0x1FFF;
2120 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
2121 just snarf them from the current time */
2122 u32 seconds = reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
2124 /* advance one second to give some extra time for DMA to start */
2127 start |= (seconds & 3) << 13;
2129 reg_write(xmit->ohci, xmit->ContextControlSet, 0x80000000 | (start << 16));
2132 /* enable interrupts */
2133 reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskSet, 1 << xmit->task.context);
2136 reg_write(xmit->ohci, xmit->ContextControlSet, 0x8000);
2139 /* wait 100 usec to give the card time to go active */
2142 /* check the RUN bit */
2143 if (!(reg_read(xmit->ohci, xmit->ContextControlSet) & 0x8000)) {
2144 PRINT(KERN_ERR, "Error starting IT DMA (ContextControl 0x%08x)\n",
2145 reg_read(xmit->ohci, xmit->ContextControlSet));
2152 static int ohci_isoctl(struct hpsb_iso *iso, enum isoctl_cmd cmd, unsigned long arg)
2157 return ohci_iso_xmit_init(iso);
2159 return ohci_iso_xmit_start(iso, arg);
2161 ohci_iso_xmit_stop(iso);
2164 return ohci_iso_xmit_queue(iso, (struct hpsb_iso_packet_info*) arg);
2166 ohci_iso_xmit_shutdown(iso);
2170 return ohci_iso_recv_init(iso);
2172 int *args = (int*) arg;
2173 return ohci_iso_recv_start(iso, args[0], args[1], args[2]);
2176 ohci_iso_recv_stop(iso);
2179 ohci_iso_recv_release(iso, (struct hpsb_iso_packet_info*) arg);
2182 ohci_iso_recv_task((unsigned long) iso);
2185 ohci_iso_recv_shutdown(iso);
2187 case RECV_LISTEN_CHANNEL:
2188 ohci_iso_recv_change_channel(iso, arg, 1);
2190 case RECV_UNLISTEN_CHANNEL:
2191 ohci_iso_recv_change_channel(iso, arg, 0);
2193 case RECV_SET_CHANNEL_MASK:
2194 ohci_iso_recv_set_channel_mask(iso, *((u64*) arg));
2198 PRINT_G(KERN_ERR, "ohci_isoctl cmd %d not implemented yet",
2205 /***************************************
2206 * IEEE-1394 functionality section END *
2207 ***************************************/
2210 /********************************************************
2211 * Global stuff (interrupt handler, init/shutdown code) *
2212 ********************************************************/
2214 static void dma_trm_reset(struct dma_trm_ctx *d)
2216 unsigned long flags;
2217 LIST_HEAD(packet_list);
2218 struct ti_ohci *ohci = d->ohci;
2219 struct hpsb_packet *packet, *ptmp;
2221 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
2223 /* Lock the context, reset it and release it. Move the packets
2224 * that were pending in the context to packet_list and free
2225 * them after releasing the lock. */
2227 spin_lock_irqsave(&d->lock, flags);
2229 list_splice(&d->fifo_list, &packet_list);
2230 list_splice(&d->pending_list, &packet_list);
2231 INIT_LIST_HEAD(&d->fifo_list);
2232 INIT_LIST_HEAD(&d->pending_list);
2234 d->branchAddrPtr = NULL;
2235 d->sent_ind = d->prg_ind;
2236 d->free_prgs = d->num_desc;
2238 spin_unlock_irqrestore(&d->lock, flags);
2240 if (list_empty(&packet_list))
2243 PRINT(KERN_INFO, "AT dma reset ctx=%d, aborting transmission", d->ctx);
2245 /* Now process subsystem callbacks for the packets from this
2247 list_for_each_entry_safe(packet, ptmp, &packet_list, driver_list) {
2248 list_del_init(&packet->driver_list);
2249 hpsb_packet_sent(ohci->host, packet, ACKX_ABORTED);
2253 static void ohci_schedule_iso_tasklets(struct ti_ohci *ohci,
2257 struct ohci1394_iso_tasklet *t;
2260 spin_lock(&ohci->iso_tasklet_list_lock);
2262 list_for_each_entry(t, &ohci->iso_tasklet_list, link) {
2263 mask = 1 << t->context;
2265 if (t->type == OHCI_ISO_TRANSMIT && tx_event & mask)
2266 tasklet_schedule(&t->tasklet);
2267 else if (rx_event & mask)
2268 tasklet_schedule(&t->tasklet);
2271 spin_unlock(&ohci->iso_tasklet_list_lock);
2275 static irqreturn_t ohci_irq_handler(int irq, void *dev_id,
2276 struct pt_regs *regs_are_unused)
2278 quadlet_t event, node_id;
2279 struct ti_ohci *ohci = (struct ti_ohci *)dev_id;
2280 struct hpsb_host *host = ohci->host;
2281 int phyid = -1, isroot = 0;
2282 unsigned long flags;
2284 /* Read and clear the interrupt event register. Don't clear
2285 * the busReset event, though. This is done when we get the
2286 * selfIDComplete interrupt. */
2287 spin_lock_irqsave(&ohci->event_lock, flags);
2288 event = reg_read(ohci, OHCI1394_IntEventClear);
2289 reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
2290 spin_unlock_irqrestore(&ohci->event_lock, flags);
2295 /* If event is ~(u32)0 cardbus card was ejected. In this case
2296 * we just return, and clean up in the ohci1394_pci_remove
2298 if (event == ~(u32) 0) {
2299 DBGMSG("Device removed.");
2303 DBGMSG("IntEvent: %08x", event);
2305 if (event & OHCI1394_unrecoverableError) {
2307 PRINT(KERN_ERR, "Unrecoverable error!");
2309 if (reg_read(ohci, OHCI1394_AsReqTrContextControlSet) & 0x800)
2310 PRINT(KERN_ERR, "Async Req Tx Context died: "
2311 "ctrl[%08x] cmdptr[%08x]",
2312 reg_read(ohci, OHCI1394_AsReqTrContextControlSet),
2313 reg_read(ohci, OHCI1394_AsReqTrCommandPtr));
2315 if (reg_read(ohci, OHCI1394_AsRspTrContextControlSet) & 0x800)
2316 PRINT(KERN_ERR, "Async Rsp Tx Context died: "
2317 "ctrl[%08x] cmdptr[%08x]",
2318 reg_read(ohci, OHCI1394_AsRspTrContextControlSet),
2319 reg_read(ohci, OHCI1394_AsRspTrCommandPtr));
2321 if (reg_read(ohci, OHCI1394_AsReqRcvContextControlSet) & 0x800)
2322 PRINT(KERN_ERR, "Async Req Rcv Context died: "
2323 "ctrl[%08x] cmdptr[%08x]",
2324 reg_read(ohci, OHCI1394_AsReqRcvContextControlSet),
2325 reg_read(ohci, OHCI1394_AsReqRcvCommandPtr));
2327 if (reg_read(ohci, OHCI1394_AsRspRcvContextControlSet) & 0x800)
2328 PRINT(KERN_ERR, "Async Rsp Rcv Context died: "
2329 "ctrl[%08x] cmdptr[%08x]",
2330 reg_read(ohci, OHCI1394_AsRspRcvContextControlSet),
2331 reg_read(ohci, OHCI1394_AsRspRcvCommandPtr));
2333 for (ctx = 0; ctx < ohci->nb_iso_xmit_ctx; ctx++) {
2334 if (reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)) & 0x800)
2335 PRINT(KERN_ERR, "Iso Xmit %d Context died: "
2336 "ctrl[%08x] cmdptr[%08x]", ctx,
2337 reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)),
2338 reg_read(ohci, OHCI1394_IsoXmitCommandPtr + (16 * ctx)));
2341 for (ctx = 0; ctx < ohci->nb_iso_rcv_ctx; ctx++) {
2342 if (reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)) & 0x800)
2343 PRINT(KERN_ERR, "Iso Recv %d Context died: "
2344 "ctrl[%08x] cmdptr[%08x] match[%08x]", ctx,
2345 reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)),
2346 reg_read(ohci, OHCI1394_IsoRcvCommandPtr + (32 * ctx)),
2347 reg_read(ohci, OHCI1394_IsoRcvContextMatch + (32 * ctx)));
2350 event &= ~OHCI1394_unrecoverableError;
2353 if (event & OHCI1394_cycleInconsistent) {
2354 /* We subscribe to the cycleInconsistent event only to
2355 * clear the corresponding event bit... otherwise,
2356 * isochronous cycleMatch DMA won't work. */
2357 DBGMSG("OHCI1394_cycleInconsistent");
2358 event &= ~OHCI1394_cycleInconsistent;
2361 if (event & OHCI1394_busReset) {
2362 /* The busReset event bit can't be cleared during the
2363 * selfID phase, so we disable busReset interrupts, to
2364 * avoid burying the cpu in interrupt requests. */
2365 spin_lock_irqsave(&ohci->event_lock, flags);
2366 reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
2368 if (ohci->check_busreset) {
2373 while (reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
2374 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2376 spin_unlock_irqrestore(&ohci->event_lock, flags);
2378 spin_lock_irqsave(&ohci->event_lock, flags);
2380 /* The loop counter check is to prevent the driver
2381 * from remaining in this state forever. For the
2382 * initial bus reset, the loop continues for ever
2383 * and the system hangs, until some device is plugged-in
2384 * or out manually into a port! The forced reset seems
2385 * to solve this problem. This mainly effects nForce2. */
2386 if (loop_count > 10000) {
2387 ohci_devctl(host, RESET_BUS, LONG_RESET);
2388 DBGMSG("Detected bus-reset loop. Forced a bus reset!");
2395 spin_unlock_irqrestore(&ohci->event_lock, flags);
2396 if (!host->in_bus_reset) {
2397 DBGMSG("irq_handler: Bus reset requested");
2399 /* Subsystem call */
2400 hpsb_bus_reset(ohci->host);
2402 event &= ~OHCI1394_busReset;
2405 if (event & OHCI1394_reqTxComplete) {
2406 struct dma_trm_ctx *d = &ohci->at_req_context;
2407 DBGMSG("Got reqTxComplete interrupt "
2408 "status=0x%08X", reg_read(ohci, d->ctrlSet));
2409 if (reg_read(ohci, d->ctrlSet) & 0x800)
2410 ohci1394_stop_context(ohci, d->ctrlClear,
2413 dma_trm_tasklet((unsigned long)d);
2414 //tasklet_schedule(&d->task);
2415 event &= ~OHCI1394_reqTxComplete;
2417 if (event & OHCI1394_respTxComplete) {
2418 struct dma_trm_ctx *d = &ohci->at_resp_context;
2419 DBGMSG("Got respTxComplete interrupt "
2420 "status=0x%08X", reg_read(ohci, d->ctrlSet));
2421 if (reg_read(ohci, d->ctrlSet) & 0x800)
2422 ohci1394_stop_context(ohci, d->ctrlClear,
2425 tasklet_schedule(&d->task);
2426 event &= ~OHCI1394_respTxComplete;
2428 if (event & OHCI1394_RQPkt) {
2429 struct dma_rcv_ctx *d = &ohci->ar_req_context;
2430 DBGMSG("Got RQPkt interrupt status=0x%08X",
2431 reg_read(ohci, d->ctrlSet));
2432 if (reg_read(ohci, d->ctrlSet) & 0x800)
2433 ohci1394_stop_context(ohci, d->ctrlClear, "RQPkt");
2435 tasklet_schedule(&d->task);
2436 event &= ~OHCI1394_RQPkt;
2438 if (event & OHCI1394_RSPkt) {
2439 struct dma_rcv_ctx *d = &ohci->ar_resp_context;
2440 DBGMSG("Got RSPkt interrupt status=0x%08X",
2441 reg_read(ohci, d->ctrlSet));
2442 if (reg_read(ohci, d->ctrlSet) & 0x800)
2443 ohci1394_stop_context(ohci, d->ctrlClear, "RSPkt");
2445 tasklet_schedule(&d->task);
2446 event &= ~OHCI1394_RSPkt;
2448 if (event & OHCI1394_isochRx) {
2451 rx_event = reg_read(ohci, OHCI1394_IsoRecvIntEventSet);
2452 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, rx_event);
2453 ohci_schedule_iso_tasklets(ohci, rx_event, 0);
2454 event &= ~OHCI1394_isochRx;
2456 if (event & OHCI1394_isochTx) {
2459 tx_event = reg_read(ohci, OHCI1394_IsoXmitIntEventSet);
2460 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, tx_event);
2461 ohci_schedule_iso_tasklets(ohci, 0, tx_event);
2462 event &= ~OHCI1394_isochTx;
2464 if (event & OHCI1394_selfIDComplete) {
2465 if (host->in_bus_reset) {
2466 node_id = reg_read(ohci, OHCI1394_NodeID);
2468 if (!(node_id & 0x80000000)) {
2470 "SelfID received, but NodeID invalid "
2471 "(probably new bus reset occurred): %08X",
2473 goto selfid_not_valid;
2476 phyid = node_id & 0x0000003f;
2477 isroot = (node_id & 0x40000000) != 0;
2479 DBGMSG("SelfID interrupt received "
2480 "(phyid %d, %s)", phyid,
2481 (isroot ? "root" : "not root"));
2483 handle_selfid(ohci, host, phyid, isroot);
2485 /* Clear the bus reset event and re-enable the
2486 * busReset interrupt. */
2487 spin_lock_irqsave(&ohci->event_lock, flags);
2488 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2489 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
2490 spin_unlock_irqrestore(&ohci->event_lock, flags);
2492 /* Accept Physical requests from all nodes. */
2493 reg_write(ohci,OHCI1394_AsReqFilterHiSet, 0xffffffff);
2494 reg_write(ohci,OHCI1394_AsReqFilterLoSet, 0xffffffff);
2496 /* Turn on phys dma reception.
2498 * TODO: Enable some sort of filtering management.
2501 reg_write(ohci,OHCI1394_PhyReqFilterHiSet, 0xffffffff);
2502 reg_write(ohci,OHCI1394_PhyReqFilterLoSet, 0xffffffff);
2503 reg_write(ohci,OHCI1394_PhyUpperBound, 0xffff0000);
2505 reg_write(ohci,OHCI1394_PhyReqFilterHiSet, 0x00000000);
2506 reg_write(ohci,OHCI1394_PhyReqFilterLoSet, 0x00000000);
2509 DBGMSG("PhyReqFilter=%08x%08x",
2510 reg_read(ohci,OHCI1394_PhyReqFilterHiSet),
2511 reg_read(ohci,OHCI1394_PhyReqFilterLoSet));
2513 hpsb_selfid_complete(host, phyid, isroot);
2516 "SelfID received outside of bus reset sequence");
2519 event &= ~OHCI1394_selfIDComplete;
2522 /* Make sure we handle everything, just in case we accidentally
2523 * enabled an interrupt that we didn't write a handler for. */
2525 PRINT(KERN_ERR, "Unhandled interrupt(s) 0x%08x",
2531 /* Put the buffer back into the dma context */
2532 static void insert_dma_buffer(struct dma_rcv_ctx *d, int idx)
2534 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2535 DBGMSG("Inserting dma buf ctx=%d idx=%d", d->ctx, idx);
2537 d->prg_cpu[idx]->status = cpu_to_le32(d->buf_size);
2538 d->prg_cpu[idx]->branchAddress &= le32_to_cpu(0xfffffff0);
2539 idx = (idx + d->num_desc - 1 ) % d->num_desc;
2540 d->prg_cpu[idx]->branchAddress |= le32_to_cpu(0x00000001);
2542 /* wake up the dma context if necessary */
2543 if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
2545 "Waking dma ctx=%d ... processing is probably too slow",
2549 /* do this always, to avoid race condition */
2550 reg_write(ohci, d->ctrlSet, 0x1000);
2553 #define cond_le32_to_cpu(data, noswap) \
2554 (noswap ? data : le32_to_cpu(data))
2556 static const int TCODE_SIZE[16] = {20, 0, 16, -1, 16, 20, 20, 0,
2557 -1, 0, -1, 0, -1, -1, 16, -1};
2560 * Determine the length of a packet in the buffer
2561 * Optimization suggested by Pascal Drolet <pascal.drolet@informission.ca>
2563 static __inline__ int packet_length(struct dma_rcv_ctx *d, int idx, quadlet_t *buf_ptr,
2564 int offset, unsigned char tcode, int noswap)
2568 if (d->type == DMA_CTX_ASYNC_REQ || d->type == DMA_CTX_ASYNC_RESP) {
2569 length = TCODE_SIZE[tcode];
2571 if (offset + 12 >= d->buf_size) {
2572 length = (cond_le32_to_cpu(d->buf_cpu[(idx + 1) % d->num_desc]
2573 [3 - ((d->buf_size - offset) >> 2)], noswap) >> 16);
2575 length = (cond_le32_to_cpu(buf_ptr[3], noswap) >> 16);
2579 } else if (d->type == DMA_CTX_ISO) {
2580 /* Assumption: buffer fill mode with header/trailer */
2581 length = (cond_le32_to_cpu(buf_ptr[0], noswap) >> 16) + 8;
2584 if (length > 0 && length % 4)
2585 length += 4 - (length % 4);
2590 /* Tasklet that processes dma receive buffers */
2591 static void dma_rcv_tasklet (unsigned long data)
2593 struct dma_rcv_ctx *d = (struct dma_rcv_ctx*)data;
2594 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2595 unsigned int split_left, idx, offset, rescount;
2596 unsigned char tcode;
2597 int length, bytes_left, ack;
2598 unsigned long flags;
2603 spin_lock_irqsave(&d->lock, flags);
2606 offset = d->buf_offset;
2607 buf_ptr = d->buf_cpu[idx] + offset/4;
2609 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2610 bytes_left = d->buf_size - rescount - offset;
2612 while (bytes_left > 0) {
2613 tcode = (cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming) >> 4) & 0xf;
2615 /* packet_length() will return < 4 for an error */
2616 length = packet_length(d, idx, buf_ptr, offset, tcode, ohci->no_swap_incoming);
2618 if (length < 4) { /* something is wrong */
2619 sprintf(msg,"Unexpected tcode 0x%x(0x%08x) in AR ctx=%d, length=%d",
2620 tcode, cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming),
2622 ohci1394_stop_context(ohci, d->ctrlClear, msg);
2623 spin_unlock_irqrestore(&d->lock, flags);
2627 /* The first case is where we have a packet that crosses
2628 * over more than one descriptor. The next case is where
2629 * it's all in the first descriptor. */
2630 if ((offset + length) > d->buf_size) {
2631 DBGMSG("Split packet rcv'd");
2632 if (length > d->split_buf_size) {
2633 ohci1394_stop_context(ohci, d->ctrlClear,
2634 "Split packet size exceeded");
2636 d->buf_offset = offset;
2637 spin_unlock_irqrestore(&d->lock, flags);
2641 if (le32_to_cpu(d->prg_cpu[(idx+1)%d->num_desc]->status)
2643 /* Other part of packet not written yet.
2644 * this should never happen I think
2645 * anyway we'll get it on the next call. */
2647 "Got only half a packet!");
2649 d->buf_offset = offset;
2650 spin_unlock_irqrestore(&d->lock, flags);
2654 split_left = length;
2655 split_ptr = (char *)d->spb;
2656 memcpy(split_ptr,buf_ptr,d->buf_size-offset);
2657 split_left -= d->buf_size-offset;
2658 split_ptr += d->buf_size-offset;
2659 insert_dma_buffer(d, idx);
2660 idx = (idx+1) % d->num_desc;
2661 buf_ptr = d->buf_cpu[idx];
2664 while (split_left >= d->buf_size) {
2665 memcpy(split_ptr,buf_ptr,d->buf_size);
2666 split_ptr += d->buf_size;
2667 split_left -= d->buf_size;
2668 insert_dma_buffer(d, idx);
2669 idx = (idx+1) % d->num_desc;
2670 buf_ptr = d->buf_cpu[idx];
2673 if (split_left > 0) {
2674 memcpy(split_ptr, buf_ptr, split_left);
2675 offset = split_left;
2676 buf_ptr += offset/4;
2679 DBGMSG("Single packet rcv'd");
2680 memcpy(d->spb, buf_ptr, length);
2682 buf_ptr += length/4;
2683 if (offset==d->buf_size) {
2684 insert_dma_buffer(d, idx);
2685 idx = (idx+1) % d->num_desc;
2686 buf_ptr = d->buf_cpu[idx];
2691 /* We get one phy packet to the async descriptor for each
2692 * bus reset. We always ignore it. */
2693 if (tcode != OHCI1394_TCODE_PHY) {
2694 if (!ohci->no_swap_incoming)
2695 packet_swab(d->spb, tcode);
2696 DBGMSG("Packet received from node"
2697 " %d ack=0x%02X spd=%d tcode=0x%X"
2698 " length=%d ctx=%d tlabel=%d",
2699 (d->spb[1]>>16)&0x3f,
2700 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f,
2701 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>21)&0x3,
2702 tcode, length, d->ctx,
2703 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>10)&0x3f);
2705 ack = (((cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f)
2708 hpsb_packet_received(ohci->host, d->spb,
2711 #ifdef OHCI1394_DEBUG
2713 PRINT (KERN_DEBUG, "Got phy packet ctx=%d ... discarded",
2717 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2719 bytes_left = d->buf_size - rescount - offset;
2724 d->buf_offset = offset;
2726 spin_unlock_irqrestore(&d->lock, flags);
2729 /* Bottom half that processes sent packets */
2730 static void dma_trm_tasklet (unsigned long data)
2732 struct dma_trm_ctx *d = (struct dma_trm_ctx*)data;
2733 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2734 struct hpsb_packet *packet, *ptmp;
2735 unsigned long flags;
2739 spin_lock_irqsave(&d->lock, flags);
2741 list_for_each_entry_safe(packet, ptmp, &d->fifo_list, driver_list) {
2742 datasize = packet->data_size;
2743 if (datasize && packet->type != hpsb_raw)
2744 status = le32_to_cpu(
2745 d->prg_cpu[d->sent_ind]->end.status) >> 16;
2747 status = le32_to_cpu(
2748 d->prg_cpu[d->sent_ind]->begin.status) >> 16;
2751 /* this packet hasn't been sent yet*/
2754 #ifdef OHCI1394_DEBUG
2756 if (((le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf) == 0xa)
2757 DBGMSG("Stream packet sent to channel %d tcode=0x%X "
2758 "ack=0x%X spd=%d dataLength=%d ctx=%d",
2759 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>8)&0x3f,
2760 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2761 status&0x1f, (status>>5)&0x3,
2762 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16,
2765 DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2766 "0x%02X ack=0x%X spd=%d dataLength=%d ctx=%d",
2767 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16)&0x3f,
2768 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2769 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>10)&0x3f,
2770 status&0x1f, (status>>5)&0x3,
2771 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3])>>16,
2774 DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2775 "0x%02X ack=0x%X spd=%d data=0x%08X ctx=%d",
2776 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])
2778 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2780 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2782 status&0x1f, (status>>5)&0x3,
2783 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3]),
2787 if (status & 0x10) {
2790 switch (status & 0x1f) {
2791 case EVT_NO_STATUS: /* that should never happen */
2792 case EVT_RESERVED_A: /* that should never happen */
2793 case EVT_LONG_PACKET: /* that should never happen */
2794 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2795 ack = ACKX_SEND_ERROR;
2797 case EVT_MISSING_ACK:
2801 ack = ACKX_SEND_ERROR;
2803 case EVT_OVERRUN: /* that should never happen */
2804 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2805 ack = ACKX_SEND_ERROR;
2807 case EVT_DESCRIPTOR_READ:
2809 case EVT_DATA_WRITE:
2810 ack = ACKX_SEND_ERROR;
2812 case EVT_BUS_RESET: /* that should never happen */
2813 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2814 ack = ACKX_SEND_ERROR;
2820 ack = ACKX_SEND_ERROR;
2822 case EVT_RESERVED_B: /* that should never happen */
2823 case EVT_RESERVED_C: /* that should never happen */
2824 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2825 ack = ACKX_SEND_ERROR;
2829 ack = ACKX_SEND_ERROR;
2832 PRINT(KERN_ERR, "Unhandled OHCI evt_* error 0x%x", status & 0x1f);
2833 ack = ACKX_SEND_ERROR;
2838 list_del_init(&packet->driver_list);
2839 hpsb_packet_sent(ohci->host, packet, ack);
2842 pci_unmap_single(ohci->dev,
2843 cpu_to_le32(d->prg_cpu[d->sent_ind]->end.address),
2844 datasize, PCI_DMA_TODEVICE);
2845 OHCI_DMA_FREE("single Xmit data packet");
2848 d->sent_ind = (d->sent_ind+1)%d->num_desc;
2852 dma_trm_flush(ohci, d);
2854 spin_unlock_irqrestore(&d->lock, flags);
2857 static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d)
2860 ohci1394_stop_context(d->ohci, d->ctrlClear, NULL);
2862 if (d->type == DMA_CTX_ISO) {
2863 /* disable interrupts */
2864 reg_write(d->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << d->ctx);
2865 ohci1394_unregister_iso_tasklet(d->ohci, &d->ohci->ir_legacy_tasklet);
2867 tasklet_kill(&d->task);
2873 static void free_dma_rcv_ctx(struct dma_rcv_ctx *d)
2876 struct ti_ohci *ohci = d->ohci;
2881 DBGMSG("Freeing dma_rcv_ctx %d", d->ctx);
2884 for (i=0; i<d->num_desc; i++)
2885 if (d->buf_cpu[i] && d->buf_bus[i]) {
2886 pci_free_consistent(
2887 ohci->dev, d->buf_size,
2888 d->buf_cpu[i], d->buf_bus[i]);
2889 OHCI_DMA_FREE("consistent dma_rcv buf[%d]", i);
2895 for (i=0; i<d->num_desc; i++)
2896 if (d->prg_cpu[i] && d->prg_bus[i]) {
2897 pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
2898 OHCI_DMA_FREE("consistent dma_rcv prg[%d]", i);
2900 pci_pool_destroy(d->prg_pool);
2901 OHCI_DMA_FREE("dma_rcv prg pool");
2905 if (d->spb) kfree(d->spb);
2907 /* Mark this context as freed. */
2912 alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
2913 enum context_type type, int ctx, int num_desc,
2914 int buf_size, int split_buf_size, int context_base)
2922 d->num_desc = num_desc;
2923 d->buf_size = buf_size;
2924 d->split_buf_size = split_buf_size;
2930 d->buf_cpu = kmalloc(d->num_desc * sizeof(quadlet_t*), GFP_KERNEL);
2931 d->buf_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_KERNEL);
2933 if (d->buf_cpu == NULL || d->buf_bus == NULL) {
2934 PRINT(KERN_ERR, "Failed to allocate dma buffer");
2935 free_dma_rcv_ctx(d);
2938 memset(d->buf_cpu, 0, d->num_desc * sizeof(quadlet_t*));
2939 memset(d->buf_bus, 0, d->num_desc * sizeof(dma_addr_t));
2941 d->prg_cpu = kmalloc(d->num_desc * sizeof(struct dma_cmd*),
2943 d->prg_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_KERNEL);
2945 if (d->prg_cpu == NULL || d->prg_bus == NULL) {
2946 PRINT(KERN_ERR, "Failed to allocate dma prg");
2947 free_dma_rcv_ctx(d);
2950 memset(d->prg_cpu, 0, d->num_desc * sizeof(struct dma_cmd*));
2951 memset(d->prg_bus, 0, d->num_desc * sizeof(dma_addr_t));
2953 d->spb = kmalloc(d->split_buf_size, GFP_KERNEL);
2955 if (d->spb == NULL) {
2956 PRINT(KERN_ERR, "Failed to allocate split buffer");
2957 free_dma_rcv_ctx(d);
2961 d->prg_pool = pci_pool_create("ohci1394 rcv prg", ohci->dev,
2962 sizeof(struct dma_cmd), 4, 0);
2963 OHCI_DMA_ALLOC("dma_rcv prg pool");
2965 for (i=0; i<d->num_desc; i++) {
2966 d->buf_cpu[i] = pci_alloc_consistent(ohci->dev,
2969 OHCI_DMA_ALLOC("consistent dma_rcv buf[%d]", i);
2971 if (d->buf_cpu[i] != NULL) {
2972 memset(d->buf_cpu[i], 0, d->buf_size);
2975 "Failed to allocate dma buffer");
2976 free_dma_rcv_ctx(d);
2980 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
2981 OHCI_DMA_ALLOC("pool dma_rcv prg[%d]", i);
2983 if (d->prg_cpu[i] != NULL) {
2984 memset(d->prg_cpu[i], 0, sizeof(struct dma_cmd));
2987 "Failed to allocate dma prg");
2988 free_dma_rcv_ctx(d);
2993 spin_lock_init(&d->lock);
2995 if (type == DMA_CTX_ISO) {
2996 ohci1394_init_iso_tasklet(&ohci->ir_legacy_tasklet,
2997 OHCI_ISO_MULTICHANNEL_RECEIVE,
2998 dma_rcv_tasklet, (unsigned long) d);
2999 if (ohci1394_register_iso_tasklet(ohci,
3000 &ohci->ir_legacy_tasklet) < 0) {
3001 PRINT(KERN_ERR, "No IR DMA context available");
3002 free_dma_rcv_ctx(d);
3006 /* the IR context can be assigned to any DMA context
3007 * by ohci1394_register_iso_tasklet */
3008 d->ctx = ohci->ir_legacy_tasklet.context;
3009 d->ctrlSet = OHCI1394_IsoRcvContextControlSet + 32*d->ctx;
3010 d->ctrlClear = OHCI1394_IsoRcvContextControlClear + 32*d->ctx;
3011 d->cmdPtr = OHCI1394_IsoRcvCommandPtr + 32*d->ctx;
3012 d->ctxtMatch = OHCI1394_IsoRcvContextMatch + 32*d->ctx;
3014 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3015 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3016 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3018 tasklet_init (&d->task, dma_rcv_tasklet, (unsigned long) d);
3024 static void free_dma_trm_ctx(struct dma_trm_ctx *d)
3027 struct ti_ohci *ohci = d->ohci;
3032 DBGMSG("Freeing dma_trm_ctx %d", d->ctx);
3035 for (i=0; i<d->num_desc; i++)
3036 if (d->prg_cpu[i] && d->prg_bus[i]) {
3037 pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
3038 OHCI_DMA_FREE("pool dma_trm prg[%d]", i);
3040 pci_pool_destroy(d->prg_pool);
3041 OHCI_DMA_FREE("dma_trm prg pool");
3046 /* Mark this context as freed. */
3051 alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
3052 enum context_type type, int ctx, int num_desc,
3060 d->num_desc = num_desc;
3065 d->prg_cpu = kmalloc(d->num_desc * sizeof(struct at_dma_prg*),
3067 d->prg_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_KERNEL);
3069 if (d->prg_cpu == NULL || d->prg_bus == NULL) {
3070 PRINT(KERN_ERR, "Failed to allocate at dma prg");
3071 free_dma_trm_ctx(d);
3074 memset(d->prg_cpu, 0, d->num_desc * sizeof(struct at_dma_prg*));
3075 memset(d->prg_bus, 0, d->num_desc * sizeof(dma_addr_t));
3077 d->prg_pool = pci_pool_create("ohci1394 trm prg", ohci->dev,
3078 sizeof(struct at_dma_prg), 4, 0);
3079 OHCI_DMA_ALLOC("dma_rcv prg pool");
3081 for (i = 0; i < d->num_desc; i++) {
3082 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
3083 OHCI_DMA_ALLOC("pool dma_trm prg[%d]", i);
3085 if (d->prg_cpu[i] != NULL) {
3086 memset(d->prg_cpu[i], 0, sizeof(struct at_dma_prg));
3089 "Failed to allocate at dma prg");
3090 free_dma_trm_ctx(d);
3095 spin_lock_init(&d->lock);
3097 /* initialize tasklet */
3098 if (type == DMA_CTX_ISO) {
3099 ohci1394_init_iso_tasklet(&ohci->it_legacy_tasklet, OHCI_ISO_TRANSMIT,
3100 dma_trm_tasklet, (unsigned long) d);
3101 if (ohci1394_register_iso_tasklet(ohci,
3102 &ohci->it_legacy_tasklet) < 0) {
3103 PRINT(KERN_ERR, "No IT DMA context available");
3104 free_dma_trm_ctx(d);
3108 /* IT can be assigned to any context by register_iso_tasklet */
3109 d->ctx = ohci->it_legacy_tasklet.context;
3110 d->ctrlSet = OHCI1394_IsoXmitContextControlSet + 16 * d->ctx;
3111 d->ctrlClear = OHCI1394_IsoXmitContextControlClear + 16 * d->ctx;
3112 d->cmdPtr = OHCI1394_IsoXmitCommandPtr + 16 * d->ctx;
3114 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3115 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3116 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3117 tasklet_init (&d->task, dma_trm_tasklet, (unsigned long)d);
3123 static void ohci_set_hw_config_rom(struct hpsb_host *host, quadlet_t *config_rom)
3125 struct ti_ohci *ohci = host->hostdata;
3127 reg_write(ohci, OHCI1394_ConfigROMhdr, be32_to_cpu(config_rom[0]));
3128 reg_write(ohci, OHCI1394_BusOptions, be32_to_cpu(config_rom[2]));
3130 memcpy(ohci->csr_config_rom_cpu, config_rom, OHCI_CONFIG_ROM_LEN);
3134 static quadlet_t ohci_hw_csr_reg(struct hpsb_host *host, int reg,
3135 quadlet_t data, quadlet_t compare)
3137 struct ti_ohci *ohci = host->hostdata;
3140 reg_write(ohci, OHCI1394_CSRData, data);
3141 reg_write(ohci, OHCI1394_CSRCompareData, compare);
3142 reg_write(ohci, OHCI1394_CSRControl, reg & 0x3);
3144 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
3145 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
3151 return reg_read(ohci, OHCI1394_CSRData);
3154 static struct hpsb_host_driver ohci1394_driver = {
3155 .owner = THIS_MODULE,
3156 .name = OHCI1394_DRIVER_NAME,
3157 .set_hw_config_rom = ohci_set_hw_config_rom,
3158 .transmit_packet = ohci_transmit,
3159 .devctl = ohci_devctl,
3160 .isoctl = ohci_isoctl,
3161 .hw_csr_reg = ohci_hw_csr_reg,
3166 /***********************************
3167 * PCI Driver Interface functions *
3168 ***********************************/
3170 #define FAIL(err, fmt, args...) \
3172 PRINT_G(KERN_ERR, fmt , ## args); \
3173 ohci1394_pci_remove(dev); \
3177 static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
3178 const struct pci_device_id *ent)
3180 static int version_printed = 0;
3182 struct hpsb_host *host;
3183 struct ti_ohci *ohci; /* shortcut to currently handled device */
3184 unsigned long ohci_base;
3186 if (version_printed++ == 0)
3187 PRINT_G(KERN_INFO, "%s", version);
3189 if (pci_enable_device(dev))
3190 FAIL(-ENXIO, "Failed to enable OHCI hardware");
3191 pci_set_master(dev);
3193 host = hpsb_alloc_host(&ohci1394_driver, sizeof(struct ti_ohci), &dev->dev);
3194 if (!host) FAIL(-ENOMEM, "Failed to allocate host structure");
3196 ohci = host->hostdata;
3199 ohci->init_state = OHCI_INIT_ALLOC_HOST;
3201 pci_set_drvdata(dev, ohci);
3203 /* We don't want hardware swapping */
3204 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
3206 /* Some oddball Apple controllers do not order the selfid
3207 * properly, so we make up for it here. */
3208 #ifndef __LITTLE_ENDIAN
3209 /* XXX: Need a better way to check this. I'm wondering if we can
3210 * read the values of the OHCI1394_PCI_HCI_Control and the
3211 * noByteSwapData registers to see if they were not cleared to
3212 * zero. Should this work? Obviously it's not defined what these
3213 * registers will read when they aren't supported. Bleh! */
3214 if (dev->vendor == PCI_VENDOR_ID_APPLE &&
3215 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) {
3216 ohci->no_swap_incoming = 1;
3217 ohci->selfid_swap = 0;
3219 ohci->selfid_swap = 1;
3223 #ifndef PCI_DEVICE_ID_NVIDIA_NFORCE2_FW
3224 #define PCI_DEVICE_ID_NVIDIA_NFORCE2_FW 0x006e
3227 /* These chipsets require a bit of extra care when checking after
3229 if ((dev->vendor == PCI_VENDOR_ID_APPLE &&
3230 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) ||
3231 (dev->vendor == PCI_VENDOR_ID_NVIDIA &&
3232 dev->device == PCI_DEVICE_ID_NVIDIA_NFORCE2_FW))
3233 ohci->check_busreset = 1;
3235 /* We hardwire the MMIO length, since some CardBus adaptors
3236 * fail to report the right length. Anyway, the ohci spec
3237 * clearly says it's 2kb, so this shouldn't be a problem. */
3238 ohci_base = pci_resource_start(dev, 0);
3239 if (pci_resource_len(dev, 0) != OHCI1394_REGISTER_SIZE)
3240 PRINT(KERN_WARNING, "Unexpected PCI resource length of %lx!",
3241 pci_resource_len(dev, 0));
3243 /* Seems PCMCIA handles this internally. Not sure why. Seems
3244 * pretty bogus to force a driver to special case this. */
3246 if (!request_mem_region (ohci_base, OHCI1394_REGISTER_SIZE, OHCI1394_DRIVER_NAME))
3247 FAIL(-ENOMEM, "MMIO resource (0x%lx - 0x%lx) unavailable",
3248 ohci_base, ohci_base + OHCI1394_REGISTER_SIZE);
3250 ohci->init_state = OHCI_INIT_HAVE_MEM_REGION;
3252 ohci->registers = ioremap(ohci_base, OHCI1394_REGISTER_SIZE);
3253 if (ohci->registers == NULL)
3254 FAIL(-ENXIO, "Failed to remap registers - card not accessible");
3255 ohci->init_state = OHCI_INIT_HAVE_IOMAPPING;
3256 DBGMSG("Remapped memory spaces reg 0x%p", ohci->registers);
3258 /* csr_config rom allocation */
3259 ohci->csr_config_rom_cpu =
3260 pci_alloc_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3261 &ohci->csr_config_rom_bus);
3262 OHCI_DMA_ALLOC("consistent csr_config_rom");
3263 if (ohci->csr_config_rom_cpu == NULL)
3264 FAIL(-ENOMEM, "Failed to allocate buffer config rom");
3265 ohci->init_state = OHCI_INIT_HAVE_CONFIG_ROM_BUFFER;
3267 /* self-id dma buffer allocation */
3268 ohci->selfid_buf_cpu =
3269 pci_alloc_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3270 &ohci->selfid_buf_bus);
3271 OHCI_DMA_ALLOC("consistent selfid_buf");
3273 if (ohci->selfid_buf_cpu == NULL)
3274 FAIL(-ENOMEM, "Failed to allocate DMA buffer for self-id packets");
3275 ohci->init_state = OHCI_INIT_HAVE_SELFID_BUFFER;
3277 if ((unsigned long)ohci->selfid_buf_cpu & 0x1fff)
3278 PRINT(KERN_INFO, "SelfID buffer %p is not aligned on "
3279 "8Kb boundary... may cause problems on some CXD3222 chip",
3280 ohci->selfid_buf_cpu);
3282 /* No self-id errors at startup */
3283 ohci->self_id_errors = 0;
3285 ohci->init_state = OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE;
3286 /* AR DMA request context allocation */
3287 if (alloc_dma_rcv_ctx(ohci, &ohci->ar_req_context,
3288 DMA_CTX_ASYNC_REQ, 0, AR_REQ_NUM_DESC,
3289 AR_REQ_BUF_SIZE, AR_REQ_SPLIT_BUF_SIZE,
3290 OHCI1394_AsReqRcvContextBase) < 0)
3291 FAIL(-ENOMEM, "Failed to allocate AR Req context");
3293 /* AR DMA response context allocation */
3294 if (alloc_dma_rcv_ctx(ohci, &ohci->ar_resp_context,
3295 DMA_CTX_ASYNC_RESP, 0, AR_RESP_NUM_DESC,
3296 AR_RESP_BUF_SIZE, AR_RESP_SPLIT_BUF_SIZE,
3297 OHCI1394_AsRspRcvContextBase) < 0)
3298 FAIL(-ENOMEM, "Failed to allocate AR Resp context");
3300 /* AT DMA request context */
3301 if (alloc_dma_trm_ctx(ohci, &ohci->at_req_context,
3302 DMA_CTX_ASYNC_REQ, 0, AT_REQ_NUM_DESC,
3303 OHCI1394_AsReqTrContextBase) < 0)
3304 FAIL(-ENOMEM, "Failed to allocate AT Req context");
3306 /* AT DMA response context */
3307 if (alloc_dma_trm_ctx(ohci, &ohci->at_resp_context,
3308 DMA_CTX_ASYNC_RESP, 1, AT_RESP_NUM_DESC,
3309 OHCI1394_AsRspTrContextBase) < 0)
3310 FAIL(-ENOMEM, "Failed to allocate AT Resp context");
3312 /* Start off with a soft reset, to clear everything to a sane
3314 ohci_soft_reset(ohci);
3316 /* Now enable LPS, which we need in order to start accessing
3317 * most of the registers. In fact, on some cards (ALI M5251),
3318 * accessing registers in the SClk domain without LPS enabled
3319 * will lock up the machine. Wait 50msec to make sure we have
3320 * full link enabled. */
3321 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
3323 /* Disable and clear interrupts */
3324 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3325 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3329 /* Determine the number of available IR and IT contexts. */
3330 ohci->nb_iso_rcv_ctx =
3331 get_nb_iso_ctx(ohci, OHCI1394_IsoRecvIntMaskSet);
3332 DBGMSG("%d iso receive contexts available",
3333 ohci->nb_iso_rcv_ctx);
3335 ohci->nb_iso_xmit_ctx =
3336 get_nb_iso_ctx(ohci, OHCI1394_IsoXmitIntMaskSet);
3337 DBGMSG("%d iso transmit contexts available",
3338 ohci->nb_iso_xmit_ctx);
3340 /* Set the usage bits for non-existent contexts so they can't
3342 ohci->ir_ctx_usage = ~0 << ohci->nb_iso_rcv_ctx;
3343 ohci->it_ctx_usage = ~0 << ohci->nb_iso_xmit_ctx;
3345 INIT_LIST_HEAD(&ohci->iso_tasklet_list);
3346 spin_lock_init(&ohci->iso_tasklet_list_lock);
3347 ohci->ISO_channel_usage = 0;
3348 spin_lock_init(&ohci->IR_channel_lock);
3350 /* the IR DMA context is allocated on-demand; mark it inactive */
3351 ohci->ir_legacy_context.ohci = NULL;
3353 /* same for the IT DMA context */
3354 ohci->it_legacy_context.ohci = NULL;
3356 if (request_irq(dev->irq, ohci_irq_handler, SA_SHIRQ,
3357 OHCI1394_DRIVER_NAME, ohci))
3358 FAIL(-ENOMEM, "Failed to allocate shared interrupt %d", dev->irq);
3360 ohci->init_state = OHCI_INIT_HAVE_IRQ;
3361 ohci_initialize(ohci);
3363 /* Set certain csr values */
3364 host->csr.guid_hi = reg_read(ohci, OHCI1394_GUIDHi);
3365 host->csr.guid_lo = reg_read(ohci, OHCI1394_GUIDLo);
3366 host->csr.cyc_clk_acc = 100; /* how do we determine clk accuracy? */
3367 host->csr.max_rec = (reg_read(ohci, OHCI1394_BusOptions) >> 12) & 0xf;
3368 host->csr.lnk_spd = reg_read(ohci, OHCI1394_BusOptions) & 0x7;
3370 /* Tell the highlevel this host is ready */
3371 if (hpsb_add_host(host))
3372 FAIL(-ENOMEM, "Failed to register host with highlevel");
3374 ohci->init_state = OHCI_INIT_DONE;
3380 static void ohci1394_pci_remove(struct pci_dev *pdev)
3382 struct ti_ohci *ohci;
3385 ohci = pci_get_drvdata(pdev);
3389 dev = get_device(&ohci->host->device);
3391 switch (ohci->init_state) {
3392 case OHCI_INIT_DONE:
3393 hpsb_remove_host(ohci->host);
3395 /* Clear out BUS Options */
3396 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
3397 reg_write(ohci, OHCI1394_BusOptions,
3398 (reg_read(ohci, OHCI1394_BusOptions) & 0x0000f007) |
3400 memset(ohci->csr_config_rom_cpu, 0, OHCI_CONFIG_ROM_LEN);
3402 case OHCI_INIT_HAVE_IRQ:
3403 /* Clear interrupt registers */
3404 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3405 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3406 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
3407 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
3408 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
3409 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
3411 /* Disable IRM Contender */
3412 set_phy_reg(ohci, 4, ~0xc0 & get_phy_reg(ohci, 4));
3414 /* Clear link control register */
3415 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
3417 /* Let all other nodes know to ignore us */
3418 ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
3420 /* Soft reset before we start - this disables
3421 * interrupts and clears linkEnable and LPS. */
3422 ohci_soft_reset(ohci);
3423 free_irq(ohci->dev->irq, ohci);
3425 case OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE:
3426 /* The ohci_soft_reset() stops all DMA contexts, so we
3427 * dont need to do this. */
3429 free_dma_rcv_ctx(&ohci->ar_req_context);
3430 free_dma_rcv_ctx(&ohci->ar_resp_context);
3433 free_dma_trm_ctx(&ohci->at_req_context);
3434 free_dma_trm_ctx(&ohci->at_resp_context);
3437 free_dma_rcv_ctx(&ohci->ir_legacy_context);
3440 free_dma_trm_ctx(&ohci->it_legacy_context);
3442 case OHCI_INIT_HAVE_SELFID_BUFFER:
3443 pci_free_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3444 ohci->selfid_buf_cpu,
3445 ohci->selfid_buf_bus);
3446 OHCI_DMA_FREE("consistent selfid_buf");
3448 case OHCI_INIT_HAVE_CONFIG_ROM_BUFFER:
3449 pci_free_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3450 ohci->csr_config_rom_cpu,
3451 ohci->csr_config_rom_bus);
3452 OHCI_DMA_FREE("consistent csr_config_rom");
3454 case OHCI_INIT_HAVE_IOMAPPING:
3455 iounmap(ohci->registers);
3457 case OHCI_INIT_HAVE_MEM_REGION:
3459 release_mem_region(pci_resource_start(ohci->dev, 0),
3460 OHCI1394_REGISTER_SIZE);
3463 #ifdef CONFIG_PPC_PMAC
3464 /* On UniNorth, power down the cable and turn off the chip
3465 * clock when the module is removed to save power on
3466 * laptops. Turning it back ON is done by the arch code when
3467 * pci_enable_device() is called */
3469 struct device_node* of_node;
3471 of_node = pci_device_to_OF_node(ohci->dev);
3473 pmac_call_feature(PMAC_FTR_1394_ENABLE, of_node, 0, 0);
3474 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, of_node, 0, 0);
3477 #endif /* CONFIG_PPC_PMAC */
3479 case OHCI_INIT_ALLOC_HOST:
3480 pci_set_drvdata(ohci->dev, NULL);
3488 static int ohci1394_pci_resume (struct pci_dev *pdev)
3490 #ifdef CONFIG_PMAC_PBOOK
3492 struct device_node *of_node;
3494 /* Re-enable 1394 */
3495 of_node = pci_device_to_OF_node (pdev);
3497 pmac_call_feature (PMAC_FTR_1394_ENABLE, of_node, 0, 1);
3501 pci_enable_device(pdev);
3507 static int ohci1394_pci_suspend (struct pci_dev *pdev, u32 state)
3509 #ifdef CONFIG_PMAC_PBOOK
3511 struct device_node *of_node;
3514 of_node = pci_device_to_OF_node (pdev);
3516 pmac_call_feature(PMAC_FTR_1394_ENABLE, of_node, 0, 0);
3524 #define PCI_CLASS_FIREWIRE_OHCI ((PCI_CLASS_SERIAL_FIREWIRE << 8) | 0x10)
3526 static struct pci_device_id ohci1394_pci_tbl[] = {
3528 .class = PCI_CLASS_FIREWIRE_OHCI,
3529 .class_mask = PCI_ANY_ID,
3530 .vendor = PCI_ANY_ID,
3531 .device = PCI_ANY_ID,
3532 .subvendor = PCI_ANY_ID,
3533 .subdevice = PCI_ANY_ID,
3538 MODULE_DEVICE_TABLE(pci, ohci1394_pci_tbl);
3540 static struct pci_driver ohci1394_pci_driver = {
3541 .name = OHCI1394_DRIVER_NAME,
3542 .id_table = ohci1394_pci_tbl,
3543 .probe = ohci1394_pci_probe,
3544 .remove = ohci1394_pci_remove,
3545 .resume = ohci1394_pci_resume,
3546 .suspend = ohci1394_pci_suspend,
3551 /***********************************
3552 * OHCI1394 Video Interface *
3553 ***********************************/
3555 /* essentially the only purpose of this code is to allow another
3556 module to hook into ohci's interrupt handler */
3558 int ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg)
3562 /* stop the channel program if it's still running */
3563 reg_write(ohci, reg, 0x8000);
3565 /* Wait until it effectively stops */
3566 while (reg_read(ohci, reg) & 0x400) {
3570 "Runaway loop while stopping context: %s...", msg ? msg : "");
3577 if (msg) PRINT(KERN_ERR, "%s: dma prg stopped", msg);
3581 void ohci1394_init_iso_tasklet(struct ohci1394_iso_tasklet *tasklet, int type,
3582 void (*func)(unsigned long), unsigned long data)
3584 tasklet_init(&tasklet->tasklet, func, data);
3585 tasklet->type = type;
3586 /* We init the tasklet->link field, so we can list_del() it
3587 * without worrying whether it was added to the list or not. */
3588 INIT_LIST_HEAD(&tasklet->link);
3591 int ohci1394_register_iso_tasklet(struct ti_ohci *ohci,
3592 struct ohci1394_iso_tasklet *tasklet)
3594 unsigned long flags, *usage;
3595 int n, i, r = -EBUSY;
3597 if (tasklet->type == OHCI_ISO_TRANSMIT) {
3598 n = ohci->nb_iso_xmit_ctx;
3599 usage = &ohci->it_ctx_usage;
3602 n = ohci->nb_iso_rcv_ctx;
3603 usage = &ohci->ir_ctx_usage;
3605 /* only one receive context can be multichannel (OHCI sec 10.4.1) */
3606 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3607 if (test_and_set_bit(0, &ohci->ir_multichannel_used)) {
3613 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3615 for (i = 0; i < n; i++)
3616 if (!test_and_set_bit(i, usage)) {
3617 tasklet->context = i;
3618 list_add_tail(&tasklet->link, &ohci->iso_tasklet_list);
3623 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3628 void ohci1394_unregister_iso_tasklet(struct ti_ohci *ohci,
3629 struct ohci1394_iso_tasklet *tasklet)
3631 unsigned long flags;
3633 tasklet_kill(&tasklet->tasklet);
3635 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3637 if (tasklet->type == OHCI_ISO_TRANSMIT)
3638 clear_bit(tasklet->context, &ohci->it_ctx_usage);
3640 clear_bit(tasklet->context, &ohci->ir_ctx_usage);
3642 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3643 clear_bit(0, &ohci->ir_multichannel_used);
3647 list_del(&tasklet->link);
3649 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3652 EXPORT_SYMBOL(ohci1394_stop_context);
3653 EXPORT_SYMBOL(ohci1394_init_iso_tasklet);
3654 EXPORT_SYMBOL(ohci1394_register_iso_tasklet);
3655 EXPORT_SYMBOL(ohci1394_unregister_iso_tasklet);
3658 /***********************************
3659 * General module initialization *
3660 ***********************************/
3662 MODULE_AUTHOR("Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>");
3663 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE-1394 controllers");
3664 MODULE_LICENSE("GPL");
3666 static void __exit ohci1394_cleanup (void)
3668 pci_unregister_driver(&ohci1394_pci_driver);
3671 static int __init ohci1394_init(void)
3673 return pci_module_init(&ohci1394_pci_driver);
3676 module_init(ohci1394_init);
3677 module_exit(ohci1394_cleanup);