2 * ohci1394.c - driver for OHCI 1394 boards
3 * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
4 * Gord Peters <GordPeters@smarttech.com>
5 * 2001 Ben Collins <bcollins@debian.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software Foundation,
19 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 * Things known to be working:
24 * . Async Request Transmit
25 * . Async Response Receive
26 * . Async Request Receive
27 * . Async Response Transmit
29 * . DMA mmap for iso receive
30 * . Config ROM generation
32 * Things implemented, but still in test phase:
34 * . Async Stream Packets Transmit (Receive done via Iso interface)
36 * Things not implemented:
37 * . DMA error recovery
40 * . devctl BUS_RESET arg confusion (reset type or root holdoff?)
41 * added LONG_RESET_ROOT and SHORT_RESET_ROOT for root holdoff --kk
47 * Adam J Richter <adam@yggdrasil.com>
48 * . Use of pci_class to find device
50 * Emilie Chung <emilie.chung@axis.com>
51 * . Tip on Async Request Filter
53 * Pascal Drolet <pascal.drolet@informission.ca>
54 * . Various tips for optimization and functionnalities
56 * Robert Ficklin <rficklin@westengineering.com>
57 * . Loop in irq_handler
59 * James Goodwin <jamesg@Filanet.com>
60 * . Various tips on initialization, self-id reception, etc.
62 * Albrecht Dress <ad@mpifr-bonn.mpg.de>
63 * . Apple PowerBook detection
65 * Daniel Kobras <daniel.kobras@student.uni-tuebingen.de>
66 * . Reset the board properly before leaving + misc cleanups
68 * Leon van Stuivenberg <leonvs@iae.nl>
71 * Ben Collins <bcollins@debian.org>
72 * . Working big-endian support
73 * . Updated to 2.4.x module scheme (PCI aswell)
74 * . Config ROM generation
76 * Manfred Weihs <weihs@ict.tuwien.ac.at>
77 * . Reworked code for initiating bus resets
78 * (long, short, with or without hold-off)
80 * Nandu Santhi <contactnandu@users.sourceforge.net>
81 * . Added support for nVidia nForce2 onboard Firewire chipset
85 #include <linux/config.h>
86 #include <linux/kernel.h>
87 #include <linux/list.h>
88 #include <linux/slab.h>
89 #include <linux/interrupt.h>
90 #include <linux/wait.h>
91 #include <linux/errno.h>
92 #include <linux/module.h>
93 #include <linux/moduleparam.h>
94 #include <linux/pci.h>
96 #include <linux/poll.h>
97 #include <asm/byteorder.h>
98 #include <asm/atomic.h>
99 #include <asm/uaccess.h>
100 #include <linux/delay.h>
101 #include <linux/spinlock.h>
103 #include <asm/pgtable.h>
104 #include <asm/page.h>
106 #include <linux/sched.h>
107 #include <linux/types.h>
108 #include <linux/vmalloc.h>
109 #include <linux/init.h>
111 #ifdef CONFIG_PPC_PMAC
112 #include <asm/machdep.h>
113 #include <asm/pmac_feature.h>
114 #include <asm/prom.h>
115 #include <asm/pci-bridge.h>
119 #include "ieee1394.h"
120 #include "ieee1394_types.h"
124 #include "ieee1394_core.h"
125 #include "highlevel.h"
126 #include "ohci1394.h"
128 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
129 #define OHCI1394_DEBUG
136 #ifdef OHCI1394_DEBUG
137 #define DBGMSG(fmt, args...) \
138 printk(KERN_INFO "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
140 #define DBGMSG(fmt, args...)
143 #ifdef CONFIG_IEEE1394_OHCI_DMA_DEBUG
144 #define OHCI_DMA_ALLOC(fmt, args...) \
145 HPSB_ERR("%s(%s)alloc(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
146 ++global_outstanding_dmas, ## args)
147 #define OHCI_DMA_FREE(fmt, args...) \
148 HPSB_ERR("%s(%s)free(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
149 --global_outstanding_dmas, ## args)
150 static int global_outstanding_dmas = 0;
152 #define OHCI_DMA_ALLOC(fmt, args...)
153 #define OHCI_DMA_FREE(fmt, args...)
156 /* print general (card independent) information */
157 #define PRINT_G(level, fmt, args...) \
158 printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
160 /* print card specific information */
161 #define PRINT(level, fmt, args...) \
162 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
164 static char version[] __devinitdata =
165 "$Rev: 1250 $ Ben Collins <bcollins@debian.org>";
167 /* Module Parameters */
168 static int phys_dma = 1;
169 module_param(phys_dma, int, 0644);
170 MODULE_PARM_DESC(phys_dma, "Enable physical dma (default = 1).");
172 static void dma_trm_tasklet(unsigned long data);
173 static void dma_trm_reset(struct dma_trm_ctx *d);
175 static int alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
176 enum context_type type, int ctx, int num_desc,
177 int buf_size, int split_buf_size, int context_base);
178 static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d);
179 static void free_dma_rcv_ctx(struct dma_rcv_ctx *d);
181 static int alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
182 enum context_type type, int ctx, int num_desc,
185 static void ohci1394_pci_remove(struct pci_dev *pdev);
187 #ifndef __LITTLE_ENDIAN
188 static unsigned hdr_sizes[] =
190 3, /* TCODE_WRITEQ */
191 4, /* TCODE_WRITEB */
192 3, /* TCODE_WRITE_RESPONSE */
196 3, /* TCODE_READQ_RESPONSE */
197 4, /* TCODE_READB_RESPONSE */
198 1, /* TCODE_CYCLE_START (???) */
199 4, /* TCODE_LOCK_REQUEST */
200 2, /* TCODE_ISO_DATA */
201 4, /* TCODE_LOCK_RESPONSE */
205 static inline void packet_swab(quadlet_t *data, int tcode)
207 size_t size = hdr_sizes[tcode];
209 if (tcode > TCODE_LOCK_RESPONSE || hdr_sizes[tcode] == 0)
213 data[size] = swab32(data[size]);
216 /* Don't waste cycles on same sex byte swaps */
217 #define packet_swab(w,x)
218 #endif /* !LITTLE_ENDIAN */
220 /***********************************
221 * IEEE-1394 functionality section *
222 ***********************************/
224 static u8 get_phy_reg(struct ti_ohci *ohci, u8 addr)
230 spin_lock_irqsave (&ohci->phy_reg_lock, flags);
232 reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000);
234 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
235 if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000)
241 r = reg_read(ohci, OHCI1394_PhyControl);
243 if (i >= OHCI_LOOP_COUNT)
244 PRINT (KERN_ERR, "Get PHY Reg timeout [0x%08x/0x%08x/%d]",
245 r, r & 0x80000000, i);
247 spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
249 return (r & 0x00ff0000) >> 16;
252 static void set_phy_reg(struct ti_ohci *ohci, u8 addr, u8 data)
258 spin_lock_irqsave (&ohci->phy_reg_lock, flags);
260 reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000);
262 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
263 r = reg_read(ohci, OHCI1394_PhyControl);
264 if (!(r & 0x00004000))
270 if (i == OHCI_LOOP_COUNT)
271 PRINT (KERN_ERR, "Set PHY Reg timeout [0x%08x/0x%08x/%d]",
272 r, r & 0x00004000, i);
274 spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
279 /* Or's our value into the current value */
280 static void set_phy_reg_mask(struct ti_ohci *ohci, u8 addr, u8 data)
284 old = get_phy_reg (ohci, addr);
286 set_phy_reg (ohci, addr, old);
291 static void handle_selfid(struct ti_ohci *ohci, struct hpsb_host *host,
292 int phyid, int isroot)
294 quadlet_t *q = ohci->selfid_buf_cpu;
295 quadlet_t self_id_count=reg_read(ohci, OHCI1394_SelfIDCount);
299 /* Check status of self-id reception */
301 if (ohci->selfid_swap)
302 q0 = le32_to_cpu(q[0]);
306 if ((self_id_count & 0x80000000) ||
307 ((self_id_count & 0x00FF0000) != (q0 & 0x00FF0000))) {
309 "Error in reception of SelfID packets [0x%08x/0x%08x] (count: %d)",
310 self_id_count, q0, ohci->self_id_errors);
312 /* Tip by James Goodwin <jamesg@Filanet.com>:
313 * We had an error, generate another bus reset in response. */
314 if (ohci->self_id_errors<OHCI1394_MAX_SELF_ID_ERRORS) {
315 set_phy_reg_mask (ohci, 1, 0x40);
316 ohci->self_id_errors++;
319 "Too many errors on SelfID error reception, giving up!");
324 /* SelfID Ok, reset error counter. */
325 ohci->self_id_errors = 0;
327 size = ((self_id_count & 0x00001FFC) >> 2) - 1;
331 if (ohci->selfid_swap) {
332 q0 = le32_to_cpu(q[0]);
333 q1 = le32_to_cpu(q[1]);
340 DBGMSG ("SelfID packet 0x%x received", q0);
341 hpsb_selfid_received(host, cpu_to_be32(q0));
342 if (((q0 & 0x3f000000) >> 24) == phyid)
343 DBGMSG ("SelfID for this node is 0x%08x", q0);
346 "SelfID is inconsistent [0x%08x/0x%08x]", q0, q1);
352 DBGMSG("SelfID complete");
357 static void ohci_soft_reset(struct ti_ohci *ohci) {
360 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
362 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
363 if (!(reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_softReset))
367 DBGMSG ("Soft reset finished");
371 /* Generate the dma receive prgs and start the context */
372 static void initialize_dma_rcv_ctx(struct dma_rcv_ctx *d, int generate_irq)
374 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
377 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
379 for (i=0; i<d->num_desc; i++) {
382 c = DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE | DMA_CTL_BRANCH;
386 d->prg_cpu[i]->control = cpu_to_le32(c | d->buf_size);
388 /* End of descriptor list? */
389 if (i + 1 < d->num_desc) {
390 d->prg_cpu[i]->branchAddress =
391 cpu_to_le32((d->prg_bus[i+1] & 0xfffffff0) | 0x1);
393 d->prg_cpu[i]->branchAddress =
394 cpu_to_le32((d->prg_bus[0] & 0xfffffff0));
397 d->prg_cpu[i]->address = cpu_to_le32(d->buf_bus[i]);
398 d->prg_cpu[i]->status = cpu_to_le32(d->buf_size);
404 if (d->type == DMA_CTX_ISO) {
405 /* Clear contextControl */
406 reg_write(ohci, d->ctrlClear, 0xffffffff);
408 /* Set bufferFill, isochHeader, multichannel for IR context */
409 reg_write(ohci, d->ctrlSet, 0xd0000000);
411 /* Set the context match register to match on all tags */
412 reg_write(ohci, d->ctxtMatch, 0xf0000000);
414 /* Clear the multi channel mask high and low registers */
415 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, 0xffffffff);
416 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, 0xffffffff);
418 /* Set up isoRecvIntMask to generate interrupts */
419 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << d->ctx);
422 /* Tell the controller where the first AR program is */
423 reg_write(ohci, d->cmdPtr, d->prg_bus[0] | 0x1);
426 reg_write(ohci, d->ctrlSet, 0x00008000);
428 DBGMSG("Receive DMA ctx=%d initialized", d->ctx);
431 /* Initialize the dma transmit context */
432 static void initialize_dma_trm_ctx(struct dma_trm_ctx *d)
434 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
436 /* Stop the context */
437 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
441 d->free_prgs = d->num_desc;
442 d->branchAddrPtr = NULL;
443 INIT_LIST_HEAD(&d->fifo_list);
444 INIT_LIST_HEAD(&d->pending_list);
446 if (d->type == DMA_CTX_ISO) {
447 /* enable interrupts */
448 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << d->ctx);
451 DBGMSG("Transmit DMA ctx=%d initialized", d->ctx);
454 /* Count the number of available iso contexts */
455 static int get_nb_iso_ctx(struct ti_ohci *ohci, int reg)
460 reg_write(ohci, reg, 0xffffffff);
461 tmp = reg_read(ohci, reg);
463 DBGMSG("Iso contexts reg: %08x implemented: %08x", reg, tmp);
465 /* Count the number of contexts */
466 for (i=0; i<32; i++) {
473 /* Global initialization */
474 static void ohci_initialize(struct ti_ohci *ohci)
480 spin_lock_init(&ohci->phy_reg_lock);
482 /* Put some defaults to these undefined bus options */
483 buf = reg_read(ohci, OHCI1394_BusOptions);
484 buf |= 0x60000000; /* Enable CMC and ISC */
485 if (!hpsb_disable_irm)
486 buf |= 0x80000000; /* Enable IRMC */
487 buf &= ~0x00ff0000; /* XXX: Set cyc_clk_acc to zero for now */
488 buf &= ~0x18000000; /* Disable PMC and BMC */
489 reg_write(ohci, OHCI1394_BusOptions, buf);
491 /* Set the bus number */
492 reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0);
494 /* Enable posted writes */
495 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_postedWriteEnable);
497 /* Clear link control register */
498 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
500 /* Enable cycle timer and cycle master and set the IRM
501 * contender bit in our self ID packets if appropriate. */
502 reg_write(ohci, OHCI1394_LinkControlSet,
503 OHCI1394_LinkControl_CycleTimerEnable |
504 OHCI1394_LinkControl_CycleMaster);
505 set_phy_reg_mask(ohci, 4, PHY_04_LCTRL |
506 (hpsb_disable_irm ? 0 : PHY_04_CONTENDER));
508 /* Set up self-id dma buffer */
509 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->selfid_buf_bus);
511 /* enable self-id and phys */
512 reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_RcvSelfID |
513 OHCI1394_LinkControl_RcvPhyPkt);
515 /* Set the Config ROM mapping register */
516 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->csr_config_rom_bus);
518 /* Now get our max packet size */
519 ohci->max_packet_size =
520 1<<(((reg_read(ohci, OHCI1394_BusOptions)>>12)&0xf)+1);
522 /* Don't accept phy packets into AR request context */
523 reg_write(ohci, OHCI1394_LinkControlClear, 0x00000400);
525 /* Clear the interrupt mask */
526 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
527 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
529 /* Clear the interrupt mask */
530 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
531 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
533 /* Initialize AR dma */
534 initialize_dma_rcv_ctx(&ohci->ar_req_context, 0);
535 initialize_dma_rcv_ctx(&ohci->ar_resp_context, 0);
537 /* Initialize AT dma */
538 initialize_dma_trm_ctx(&ohci->at_req_context);
539 initialize_dma_trm_ctx(&ohci->at_resp_context);
541 /* Initialize IR Legacy DMA channel mask */
542 ohci->ir_legacy_channels = 0;
545 * Accept AT requests from all nodes. This probably
546 * will have to be controlled from the subsystem
547 * on a per node basis.
549 reg_write(ohci,OHCI1394_AsReqFilterHiSet, 0x80000000);
551 /* Specify AT retries */
552 reg_write(ohci, OHCI1394_ATRetries,
553 OHCI1394_MAX_AT_REQ_RETRIES |
554 (OHCI1394_MAX_AT_RESP_RETRIES<<4) |
555 (OHCI1394_MAX_PHYS_RESP_RETRIES<<8));
557 /* We don't want hardware swapping */
558 reg_write(ohci, OHCI1394_HCControlClear, OHCI1394_HCControl_noByteSwap);
560 /* Enable interrupts */
561 reg_write(ohci, OHCI1394_IntMaskSet,
562 OHCI1394_unrecoverableError |
563 OHCI1394_masterIntEnable |
565 OHCI1394_selfIDComplete |
568 OHCI1394_respTxComplete |
569 OHCI1394_reqTxComplete |
572 OHCI1394_cycleInconsistent);
575 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable);
577 buf = reg_read(ohci, OHCI1394_Version);
579 sprintf (irq_buf, "%d", ohci->dev->irq);
581 sprintf (irq_buf, "%s", __irq_itoa(ohci->dev->irq));
583 PRINT(KERN_INFO, "OHCI-1394 %d.%d (PCI): IRQ=[%s] "
584 "MMIO=[%lx-%lx] Max Packet=[%d]",
585 ((((buf) >> 16) & 0xf) + (((buf) >> 20) & 0xf) * 10),
586 ((((buf) >> 4) & 0xf) + ((buf) & 0xf) * 10), irq_buf,
587 pci_resource_start(ohci->dev, 0),
588 pci_resource_start(ohci->dev, 0) + OHCI1394_REGISTER_SIZE - 1,
589 ohci->max_packet_size);
591 /* Check all of our ports to make sure that if anything is
592 * connected, we enable that port. */
593 num_ports = get_phy_reg(ohci, 2) & 0xf;
594 for (i = 0; i < num_ports; i++) {
597 set_phy_reg(ohci, 7, i);
598 status = get_phy_reg(ohci, 8);
601 set_phy_reg(ohci, 8, status & ~1);
604 /* Serial EEPROM Sanity check. */
605 if ((ohci->max_packet_size < 512) ||
606 (ohci->max_packet_size > 4096)) {
607 /* Serial EEPROM contents are suspect, set a sane max packet
608 * size and print the raw contents for bug reports if verbose
609 * debug is enabled. */
610 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
614 PRINT(KERN_DEBUG, "Serial EEPROM has suspicious values, "
615 "attempting to setting max_packet_size to 512 bytes");
616 reg_write(ohci, OHCI1394_BusOptions,
617 (reg_read(ohci, OHCI1394_BusOptions) & 0xf007) | 0x8002);
618 ohci->max_packet_size = 512;
619 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
620 PRINT(KERN_DEBUG, " EEPROM Present: %d",
621 (reg_read(ohci, OHCI1394_Version) >> 24) & 0x1);
622 reg_write(ohci, OHCI1394_GUID_ROM, 0x80000000);
626 (reg_read(ohci, OHCI1394_GUID_ROM) & 0x80000000)); i++)
629 for (i = 0; i < 0x20; i++) {
630 reg_write(ohci, OHCI1394_GUID_ROM, 0x02000000);
631 PRINT(KERN_DEBUG, " EEPROM %02x: %02x", i,
632 (reg_read(ohci, OHCI1394_GUID_ROM) >> 16) & 0xff);
639 * Insert a packet in the DMA fifo and generate the DMA prg
640 * FIXME: rewrite the program in order to accept packets crossing
642 * check also that a single dma descriptor doesn't cross a
645 static void insert_packet(struct ti_ohci *ohci,
646 struct dma_trm_ctx *d, struct hpsb_packet *packet)
649 int idx = d->prg_ind;
651 DBGMSG("Inserting packet for node " NODE_BUS_FMT
652 ", tlabel=%d, tcode=0x%x, speed=%d",
653 NODE_BUS_ARGS(ohci->host, packet->node_id), packet->tlabel,
654 packet->tcode, packet->speed_code);
656 d->prg_cpu[idx]->begin.address = 0;
657 d->prg_cpu[idx]->begin.branchAddress = 0;
659 if (d->type == DMA_CTX_ASYNC_RESP) {
661 * For response packets, we need to put a timeout value in
662 * the 16 lower bits of the status... let's try 1 sec timeout
664 cycleTimer = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
665 d->prg_cpu[idx]->begin.status = cpu_to_le32(
666 (((((cycleTimer>>25)&0x7)+1)&0x7)<<13) |
667 ((cycleTimer&0x01fff000)>>12));
669 DBGMSG("cycleTimer: %08x timeStamp: %08x",
670 cycleTimer, d->prg_cpu[idx]->begin.status);
672 d->prg_cpu[idx]->begin.status = 0;
674 if ( (packet->type == hpsb_async) || (packet->type == hpsb_raw) ) {
676 if (packet->type == hpsb_raw) {
677 d->prg_cpu[idx]->data[0] = cpu_to_le32(OHCI1394_TCODE_PHY<<4);
678 d->prg_cpu[idx]->data[1] = cpu_to_le32(packet->header[0]);
679 d->prg_cpu[idx]->data[2] = cpu_to_le32(packet->header[1]);
681 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
682 (packet->header[0] & 0xFFFF);
684 if (packet->tcode == TCODE_ISO_DATA) {
685 /* Sending an async stream packet */
686 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
688 /* Sending a normal async request or response */
689 d->prg_cpu[idx]->data[1] =
690 (packet->header[1] & 0xFFFF) |
691 (packet->header[0] & 0xFFFF0000);
692 d->prg_cpu[idx]->data[2] = packet->header[2];
693 d->prg_cpu[idx]->data[3] = packet->header[3];
695 packet_swab(d->prg_cpu[idx]->data, packet->tcode);
698 if (packet->data_size) { /* block transmit */
699 if (packet->tcode == TCODE_STREAM_DATA){
700 d->prg_cpu[idx]->begin.control =
701 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
702 DMA_CTL_IMMEDIATE | 0x8);
704 d->prg_cpu[idx]->begin.control =
705 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
706 DMA_CTL_IMMEDIATE | 0x10);
708 d->prg_cpu[idx]->end.control =
709 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
714 * Check that the packet data buffer
715 * does not cross a page boundary.
717 * XXX Fix this some day. eth1394 seems to trigger
718 * it, but ignoring it doesn't seem to cause a
722 if (cross_bound((unsigned long)packet->data,
723 packet->data_size)>0) {
724 /* FIXME: do something about it */
726 "%s: packet data addr: %p size %Zd bytes "
727 "cross page boundary", __FUNCTION__,
728 packet->data, packet->data_size);
731 d->prg_cpu[idx]->end.address = cpu_to_le32(
732 pci_map_single(ohci->dev, packet->data,
735 OHCI_DMA_ALLOC("single, block transmit packet");
737 d->prg_cpu[idx]->end.branchAddress = 0;
738 d->prg_cpu[idx]->end.status = 0;
739 if (d->branchAddrPtr)
740 *(d->branchAddrPtr) =
741 cpu_to_le32(d->prg_bus[idx] | 0x3);
743 &(d->prg_cpu[idx]->end.branchAddress);
744 } else { /* quadlet transmit */
745 if (packet->type == hpsb_raw)
746 d->prg_cpu[idx]->begin.control =
747 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
751 (packet->header_size + 4));
753 d->prg_cpu[idx]->begin.control =
754 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
758 packet->header_size);
760 if (d->branchAddrPtr)
761 *(d->branchAddrPtr) =
762 cpu_to_le32(d->prg_bus[idx] | 0x2);
764 &(d->prg_cpu[idx]->begin.branchAddress);
767 } else { /* iso packet */
768 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
769 (packet->header[0] & 0xFFFF);
770 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
771 packet_swab(d->prg_cpu[idx]->data, packet->tcode);
773 d->prg_cpu[idx]->begin.control =
774 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
775 DMA_CTL_IMMEDIATE | 0x8);
776 d->prg_cpu[idx]->end.control =
777 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
782 d->prg_cpu[idx]->end.address = cpu_to_le32(
783 pci_map_single(ohci->dev, packet->data,
784 packet->data_size, PCI_DMA_TODEVICE));
785 OHCI_DMA_ALLOC("single, iso transmit packet");
787 d->prg_cpu[idx]->end.branchAddress = 0;
788 d->prg_cpu[idx]->end.status = 0;
789 DBGMSG("Iso xmit context info: header[%08x %08x]\n"
790 " begin=%08x %08x %08x %08x\n"
791 " %08x %08x %08x %08x\n"
792 " end =%08x %08x %08x %08x",
793 d->prg_cpu[idx]->data[0], d->prg_cpu[idx]->data[1],
794 d->prg_cpu[idx]->begin.control,
795 d->prg_cpu[idx]->begin.address,
796 d->prg_cpu[idx]->begin.branchAddress,
797 d->prg_cpu[idx]->begin.status,
798 d->prg_cpu[idx]->data[0],
799 d->prg_cpu[idx]->data[1],
800 d->prg_cpu[idx]->data[2],
801 d->prg_cpu[idx]->data[3],
802 d->prg_cpu[idx]->end.control,
803 d->prg_cpu[idx]->end.address,
804 d->prg_cpu[idx]->end.branchAddress,
805 d->prg_cpu[idx]->end.status);
806 if (d->branchAddrPtr)
807 *(d->branchAddrPtr) = cpu_to_le32(d->prg_bus[idx] | 0x3);
808 d->branchAddrPtr = &(d->prg_cpu[idx]->end.branchAddress);
812 /* queue the packet in the appropriate context queue */
813 list_add_tail(&packet->driver_list, &d->fifo_list);
814 d->prg_ind = (d->prg_ind + 1) % d->num_desc;
818 * This function fills the FIFO with the (eventual) pending packets
819 * and runs or wakes up the DMA prg if necessary.
821 * The function MUST be called with the d->lock held.
823 static void dma_trm_flush(struct ti_ohci *ohci, struct dma_trm_ctx *d)
825 struct hpsb_packet *packet, *ptmp;
826 int idx = d->prg_ind;
829 /* insert the packets into the dma fifo */
830 list_for_each_entry_safe(packet, ptmp, &d->pending_list, driver_list) {
834 /* For the first packet only */
836 z = (packet->data_size) ? 3 : 2;
838 /* Insert the packet */
839 list_del_init(&packet->driver_list);
840 insert_packet(ohci, d, packet);
843 /* Nothing must have been done, either no free_prgs or no packets */
847 /* Is the context running ? (should be unless it is
848 the first packet to be sent in this context) */
849 if (!(reg_read(ohci, d->ctrlSet) & 0x8000)) {
850 u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
852 DBGMSG("Starting transmit DMA ctx=%d",d->ctx);
853 reg_write(ohci, d->cmdPtr, d->prg_bus[idx] | z);
855 /* Check that the node id is valid, and not 63 */
856 if (!(nodeId & 0x80000000) || (nodeId & 0x3f) == 63)
857 PRINT(KERN_ERR, "Running dma failed because Node ID is not valid");
859 reg_write(ohci, d->ctrlSet, 0x8000);
861 /* Wake up the dma context if necessary */
862 if (!(reg_read(ohci, d->ctrlSet) & 0x400))
863 DBGMSG("Waking transmit DMA ctx=%d",d->ctx);
865 /* do this always, to avoid race condition */
866 reg_write(ohci, d->ctrlSet, 0x1000);
872 /* Transmission of an async or iso packet */
873 static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
875 struct ti_ohci *ohci = host->hostdata;
876 struct dma_trm_ctx *d;
879 if (packet->data_size > ohci->max_packet_size) {
881 "Transmit packet size %Zd is too big",
886 /* Decide whether we have an iso, a request, or a response packet */
887 if (packet->type == hpsb_raw)
888 d = &ohci->at_req_context;
889 else if ((packet->tcode == TCODE_ISO_DATA) && (packet->type == hpsb_iso)) {
890 /* The legacy IT DMA context is initialized on first
891 * use. However, the alloc cannot be run from
892 * interrupt context, so we bail out if that is the
893 * case. I don't see anyone sending ISO packets from
894 * interrupt context anyway... */
896 if (ohci->it_legacy_context.ohci == NULL) {
897 if (in_interrupt()) {
899 "legacy IT context cannot be initialized during interrupt");
903 if (alloc_dma_trm_ctx(ohci, &ohci->it_legacy_context,
904 DMA_CTX_ISO, 0, IT_NUM_DESC,
905 OHCI1394_IsoXmitContextBase) < 0) {
907 "error initializing legacy IT context");
911 initialize_dma_trm_ctx(&ohci->it_legacy_context);
914 d = &ohci->it_legacy_context;
915 } else if ((packet->tcode & 0x02) && (packet->tcode != TCODE_ISO_DATA))
916 d = &ohci->at_resp_context;
918 d = &ohci->at_req_context;
920 spin_lock_irqsave(&d->lock,flags);
922 list_add_tail(&packet->driver_list, &d->pending_list);
924 dma_trm_flush(ohci, d);
926 spin_unlock_irqrestore(&d->lock,flags);
931 static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
933 struct ti_ohci *ohci = host->hostdata;
942 phy_reg = get_phy_reg(ohci, 5);
944 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
947 phy_reg = get_phy_reg(ohci, 1);
949 set_phy_reg(ohci, 1, phy_reg); /* set IBR */
951 case SHORT_RESET_NO_FORCE_ROOT:
952 phy_reg = get_phy_reg(ohci, 1);
953 if (phy_reg & 0x80) {
955 set_phy_reg(ohci, 1, phy_reg); /* clear RHB */
958 phy_reg = get_phy_reg(ohci, 5);
960 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
962 case LONG_RESET_NO_FORCE_ROOT:
963 phy_reg = get_phy_reg(ohci, 1);
966 set_phy_reg(ohci, 1, phy_reg); /* clear RHB, set IBR */
968 case SHORT_RESET_FORCE_ROOT:
969 phy_reg = get_phy_reg(ohci, 1);
970 if (!(phy_reg & 0x80)) {
972 set_phy_reg(ohci, 1, phy_reg); /* set RHB */
975 phy_reg = get_phy_reg(ohci, 5);
977 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
979 case LONG_RESET_FORCE_ROOT:
980 phy_reg = get_phy_reg(ohci, 1);
982 set_phy_reg(ohci, 1, phy_reg); /* set RHB and IBR */
989 case GET_CYCLE_COUNTER:
990 retval = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
993 case SET_CYCLE_COUNTER:
994 reg_write(ohci, OHCI1394_IsochronousCycleTimer, arg);
998 PRINT(KERN_ERR, "devctl command SET_BUS_ID err");
1001 case ACT_CYCLE_MASTER:
1003 /* check if we are root and other nodes are present */
1004 u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
1005 if ((nodeId & (1<<30)) && (nodeId & 0x3f)) {
1007 * enable cycleTimer, cycleMaster
1009 DBGMSG("Cycle master enabled");
1010 reg_write(ohci, OHCI1394_LinkControlSet,
1011 OHCI1394_LinkControl_CycleTimerEnable |
1012 OHCI1394_LinkControl_CycleMaster);
1015 /* disable cycleTimer, cycleMaster, cycleSource */
1016 reg_write(ohci, OHCI1394_LinkControlClear,
1017 OHCI1394_LinkControl_CycleTimerEnable |
1018 OHCI1394_LinkControl_CycleMaster |
1019 OHCI1394_LinkControl_CycleSource);
1023 case CANCEL_REQUESTS:
1024 DBGMSG("Cancel request received");
1025 dma_trm_reset(&ohci->at_req_context);
1026 dma_trm_reset(&ohci->at_resp_context);
1029 case ISO_LISTEN_CHANNEL:
1032 struct dma_rcv_ctx *d = &ohci->ir_legacy_context;
1033 int ir_legacy_active;
1035 if (arg<0 || arg>63) {
1037 "%s: IS0 listen channel %d is out of range",
1042 mask = (u64)0x1<<arg;
1044 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1046 if (ohci->ISO_channel_usage & mask) {
1048 "%s: IS0 listen channel %d is already used",
1050 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1054 ir_legacy_active = ohci->ir_legacy_channels;
1056 ohci->ISO_channel_usage |= mask;
1057 ohci->ir_legacy_channels |= mask;
1059 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1061 if (!ir_legacy_active) {
1062 if (ohci1394_register_iso_tasklet(ohci,
1063 &ohci->ir_legacy_tasklet) < 0) {
1064 PRINT(KERN_ERR, "No IR DMA context available");
1068 /* the IR context can be assigned to any DMA context
1069 * by ohci1394_register_iso_tasklet */
1070 d->ctx = ohci->ir_legacy_tasklet.context;
1071 d->ctrlSet = OHCI1394_IsoRcvContextControlSet +
1073 d->ctrlClear = OHCI1394_IsoRcvContextControlClear +
1075 d->cmdPtr = OHCI1394_IsoRcvCommandPtr + 32*d->ctx;
1076 d->ctxtMatch = OHCI1394_IsoRcvContextMatch + 32*d->ctx;
1078 initialize_dma_rcv_ctx(&ohci->ir_legacy_context, 1);
1080 PRINT(KERN_ERR, "IR legacy activated");
1083 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1086 reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet,
1089 reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet,
1092 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1093 DBGMSG("Listening enabled on channel %d", arg);
1096 case ISO_UNLISTEN_CHANNEL:
1100 if (arg<0 || arg>63) {
1102 "%s: IS0 unlisten channel %d is out of range",
1107 mask = (u64)0x1<<arg;
1109 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1111 if (!(ohci->ISO_channel_usage & mask)) {
1113 "%s: IS0 unlisten channel %d is not used",
1115 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1119 ohci->ISO_channel_usage &= ~mask;
1120 ohci->ir_legacy_channels &= ~mask;
1123 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear,
1126 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear,
1129 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1130 DBGMSG("Listening disabled on channel %d", arg);
1132 if (ohci->ir_legacy_channels == 0) {
1133 stop_dma_rcv_ctx(&ohci->ir_legacy_context);
1134 DBGMSG("ISO legacy receive context stopped");
1140 PRINT_G(KERN_ERR, "ohci_devctl cmd %d not implemented yet",
1147 /***********************************
1148 * rawiso ISO reception *
1149 ***********************************/
1152 We use either buffer-fill or packet-per-buffer DMA mode. The DMA
1153 buffer is split into "blocks" (regions described by one DMA
1154 descriptor). Each block must be one page or less in size, and
1155 must not cross a page boundary.
1157 There is one little wrinkle with buffer-fill mode: a packet that
1158 starts in the final block may wrap around into the first block. But
1159 the user API expects all packets to be contiguous. Our solution is
1160 to keep the very last page of the DMA buffer in reserve - if a
1161 packet spans the gap, we copy its tail into this page.
1164 struct ohci_iso_recv {
1165 struct ti_ohci *ohci;
1167 struct ohci1394_iso_tasklet task;
1170 enum { BUFFER_FILL_MODE = 0,
1171 PACKET_PER_BUFFER_MODE = 1 } dma_mode;
1173 /* memory and PCI mapping for the DMA descriptors */
1174 struct dma_prog_region prog;
1175 struct dma_cmd *block; /* = (struct dma_cmd*) prog.virt */
1177 /* how many DMA blocks fit in the buffer */
1178 unsigned int nblocks;
1180 /* stride of DMA blocks */
1181 unsigned int buf_stride;
1183 /* number of blocks to batch between interrupts */
1184 int block_irq_interval;
1186 /* block that DMA will finish next */
1189 /* (buffer-fill only) block that the reader will release next */
1192 /* (buffer-fill only) bytes of buffer the reader has released,
1193 less than one block */
1196 /* (buffer-fill only) buffer offset at which the next packet will appear */
1199 /* OHCI DMA context control registers */
1200 u32 ContextControlSet;
1201 u32 ContextControlClear;
1206 static void ohci_iso_recv_task(unsigned long data);
1207 static void ohci_iso_recv_stop(struct hpsb_iso *iso);
1208 static void ohci_iso_recv_shutdown(struct hpsb_iso *iso);
1209 static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync);
1210 static void ohci_iso_recv_program(struct hpsb_iso *iso);
1212 static int ohci_iso_recv_init(struct hpsb_iso *iso)
1214 struct ti_ohci *ohci = iso->host->hostdata;
1215 struct ohci_iso_recv *recv;
1219 recv = kmalloc(sizeof(*recv), SLAB_KERNEL);
1223 iso->hostdata = recv;
1225 recv->task_active = 0;
1226 dma_prog_region_init(&recv->prog);
1229 /* use buffer-fill mode, unless irq_interval is 1
1230 (note: multichannel requires buffer-fill) */
1232 if (((iso->irq_interval == 1 && iso->dma_mode == HPSB_ISO_DMA_OLD_ABI) ||
1233 iso->dma_mode == HPSB_ISO_DMA_PACKET_PER_BUFFER) && iso->channel != -1) {
1234 recv->dma_mode = PACKET_PER_BUFFER_MODE;
1236 recv->dma_mode = BUFFER_FILL_MODE;
1239 /* set nblocks, buf_stride, block_irq_interval */
1241 if (recv->dma_mode == BUFFER_FILL_MODE) {
1242 recv->buf_stride = PAGE_SIZE;
1244 /* one block per page of data in the DMA buffer, minus the final guard page */
1245 recv->nblocks = iso->buf_size/PAGE_SIZE - 1;
1246 if (recv->nblocks < 3) {
1247 DBGMSG("ohci_iso_recv_init: DMA buffer too small");
1251 /* iso->irq_interval is in packets - translate that to blocks */
1252 if (iso->irq_interval == 1)
1253 recv->block_irq_interval = 1;
1255 recv->block_irq_interval = iso->irq_interval *
1256 ((recv->nblocks+1)/iso->buf_packets);
1257 if (recv->block_irq_interval*4 > recv->nblocks)
1258 recv->block_irq_interval = recv->nblocks/4;
1259 if (recv->block_irq_interval < 1)
1260 recv->block_irq_interval = 1;
1263 int max_packet_size;
1265 recv->nblocks = iso->buf_packets;
1266 recv->block_irq_interval = iso->irq_interval;
1267 if (recv->block_irq_interval * 4 > iso->buf_packets)
1268 recv->block_irq_interval = iso->buf_packets / 4;
1269 if (recv->block_irq_interval < 1)
1270 recv->block_irq_interval = 1;
1272 /* choose a buffer stride */
1273 /* must be a power of 2, and <= PAGE_SIZE */
1275 max_packet_size = iso->buf_size / iso->buf_packets;
1277 for (recv->buf_stride = 8; recv->buf_stride < max_packet_size;
1278 recv->buf_stride *= 2);
1280 if (recv->buf_stride*iso->buf_packets > iso->buf_size ||
1281 recv->buf_stride > PAGE_SIZE) {
1282 /* this shouldn't happen, but anyway... */
1283 DBGMSG("ohci_iso_recv_init: problem choosing a buffer stride");
1288 recv->block_reader = 0;
1289 recv->released_bytes = 0;
1290 recv->block_dma = 0;
1291 recv->dma_offset = 0;
1293 /* size of DMA program = one descriptor per block */
1294 if (dma_prog_region_alloc(&recv->prog,
1295 sizeof(struct dma_cmd) * recv->nblocks,
1299 recv->block = (struct dma_cmd*) recv->prog.kvirt;
1301 ohci1394_init_iso_tasklet(&recv->task,
1302 iso->channel == -1 ? OHCI_ISO_MULTICHANNEL_RECEIVE :
1304 ohci_iso_recv_task, (unsigned long) iso);
1306 if (ohci1394_register_iso_tasklet(recv->ohci, &recv->task) < 0) {
1311 recv->task_active = 1;
1313 /* recv context registers are spaced 32 bytes apart */
1314 ctx = recv->task.context;
1315 recv->ContextControlSet = OHCI1394_IsoRcvContextControlSet + 32 * ctx;
1316 recv->ContextControlClear = OHCI1394_IsoRcvContextControlClear + 32 * ctx;
1317 recv->CommandPtr = OHCI1394_IsoRcvCommandPtr + 32 * ctx;
1318 recv->ContextMatch = OHCI1394_IsoRcvContextMatch + 32 * ctx;
1320 if (iso->channel == -1) {
1321 /* clear multi-channel selection mask */
1322 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, 0xFFFFFFFF);
1323 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, 0xFFFFFFFF);
1326 /* write the DMA program */
1327 ohci_iso_recv_program(iso);
1329 DBGMSG("ohci_iso_recv_init: %s mode, DMA buffer is %lu pages"
1330 " (%u bytes), using %u blocks, buf_stride %u, block_irq_interval %d",
1331 recv->dma_mode == BUFFER_FILL_MODE ?
1332 "buffer-fill" : "packet-per-buffer",
1333 iso->buf_size/PAGE_SIZE, iso->buf_size,
1334 recv->nblocks, recv->buf_stride, recv->block_irq_interval);
1339 ohci_iso_recv_shutdown(iso);
1343 static void ohci_iso_recv_stop(struct hpsb_iso *iso)
1345 struct ohci_iso_recv *recv = iso->hostdata;
1347 /* disable interrupts */
1348 reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << recv->task.context);
1351 ohci1394_stop_context(recv->ohci, recv->ContextControlClear, NULL);
1354 static void ohci_iso_recv_shutdown(struct hpsb_iso *iso)
1356 struct ohci_iso_recv *recv = iso->hostdata;
1358 if (recv->task_active) {
1359 ohci_iso_recv_stop(iso);
1360 ohci1394_unregister_iso_tasklet(recv->ohci, &recv->task);
1361 recv->task_active = 0;
1364 dma_prog_region_free(&recv->prog);
1366 iso->hostdata = NULL;
1369 /* set up a "gapped" ring buffer DMA program */
1370 static void ohci_iso_recv_program(struct hpsb_iso *iso)
1372 struct ohci_iso_recv *recv = iso->hostdata;
1375 /* address of 'branch' field in previous DMA descriptor */
1376 u32 *prev_branch = NULL;
1378 for (blk = 0; blk < recv->nblocks; blk++) {
1381 /* the DMA descriptor */
1382 struct dma_cmd *cmd = &recv->block[blk];
1384 /* offset of the DMA descriptor relative to the DMA prog buffer */
1385 unsigned long prog_offset = blk * sizeof(struct dma_cmd);
1387 /* offset of this packet's data within the DMA buffer */
1388 unsigned long buf_offset = blk * recv->buf_stride;
1390 if (recv->dma_mode == BUFFER_FILL_MODE) {
1391 control = 2 << 28; /* INPUT_MORE */
1393 control = 3 << 28; /* INPUT_LAST */
1396 control |= 8 << 24; /* s = 1, update xferStatus and resCount */
1398 /* interrupt on last block, and at intervals */
1399 if (blk == recv->nblocks-1 || (blk % recv->block_irq_interval) == 0) {
1400 control |= 3 << 20; /* want interrupt */
1403 control |= 3 << 18; /* enable branch to address */
1404 control |= recv->buf_stride;
1406 cmd->control = cpu_to_le32(control);
1407 cmd->address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, buf_offset));
1408 cmd->branchAddress = 0; /* filled in on next loop */
1409 cmd->status = cpu_to_le32(recv->buf_stride);
1411 /* link the previous descriptor to this one */
1413 *prev_branch = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog, prog_offset) | 1);
1416 prev_branch = &cmd->branchAddress;
1419 /* the final descriptor's branch address and Z should be left at 0 */
1422 /* listen or unlisten to a specific channel (multi-channel mode only) */
1423 static void ohci_iso_recv_change_channel(struct hpsb_iso *iso, unsigned char channel, int listen)
1425 struct ohci_iso_recv *recv = iso->hostdata;
1429 reg = listen ? OHCI1394_IRMultiChanMaskLoSet : OHCI1394_IRMultiChanMaskLoClear;
1432 reg = listen ? OHCI1394_IRMultiChanMaskHiSet : OHCI1394_IRMultiChanMaskHiClear;
1436 reg_write(recv->ohci, reg, (1 << i));
1438 /* issue a dummy read to force all PCI writes to be posted immediately */
1440 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1443 static void ohci_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
1445 struct ohci_iso_recv *recv = iso->hostdata;
1448 for (i = 0; i < 64; i++) {
1449 if (mask & (1ULL << i)) {
1451 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoSet, (1 << i));
1453 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiSet, (1 << (i-32)));
1456 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, (1 << i));
1458 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, (1 << (i-32)));
1462 /* issue a dummy read to force all PCI writes to be posted immediately */
1464 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1467 static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
1469 struct ohci_iso_recv *recv = iso->hostdata;
1470 struct ti_ohci *ohci = recv->ohci;
1471 u32 command, contextMatch;
1473 reg_write(recv->ohci, recv->ContextControlClear, 0xFFFFFFFF);
1476 /* always keep ISO headers */
1477 command = (1 << 30);
1479 if (recv->dma_mode == BUFFER_FILL_MODE)
1480 command |= (1 << 31);
1482 reg_write(recv->ohci, recv->ContextControlSet, command);
1484 /* match on specified tags */
1485 contextMatch = tag_mask << 28;
1487 if (iso->channel == -1) {
1488 /* enable multichannel reception */
1489 reg_write(recv->ohci, recv->ContextControlSet, (1 << 28));
1491 /* listen on channel */
1492 contextMatch |= iso->channel;
1498 /* enable cycleMatch */
1499 reg_write(recv->ohci, recv->ContextControlSet, (1 << 29));
1501 /* set starting cycle */
1504 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
1505 just snarf them from the current time */
1506 seconds = reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
1508 /* advance one second to give some extra time for DMA to start */
1511 cycle |= (seconds & 3) << 13;
1513 contextMatch |= cycle << 12;
1517 /* set sync flag on first DMA descriptor */
1518 struct dma_cmd *cmd = &recv->block[recv->block_dma];
1519 cmd->control |= cpu_to_le32(DMA_CTL_WAIT);
1521 /* match sync field */
1522 contextMatch |= (sync&0xf)<<8;
1525 reg_write(recv->ohci, recv->ContextMatch, contextMatch);
1527 /* address of first descriptor block */
1528 command = dma_prog_region_offset_to_bus(&recv->prog,
1529 recv->block_dma * sizeof(struct dma_cmd));
1530 command |= 1; /* Z=1 */
1532 reg_write(recv->ohci, recv->CommandPtr, command);
1534 /* enable interrupts */
1535 reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskSet, 1 << recv->task.context);
1540 reg_write(recv->ohci, recv->ContextControlSet, 0x8000);
1542 /* issue a dummy read of the cycle timer register to force
1543 all PCI writes to be posted immediately */
1545 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1548 if (!(reg_read(recv->ohci, recv->ContextControlSet) & 0x8000)) {
1550 "Error starting IR DMA (ContextControl 0x%08x)\n",
1551 reg_read(recv->ohci, recv->ContextControlSet));
1558 static void ohci_iso_recv_release_block(struct ohci_iso_recv *recv, int block)
1560 /* re-use the DMA descriptor for the block */
1561 /* by linking the previous descriptor to it */
1564 int prev_i = (next_i == 0) ? (recv->nblocks - 1) : (next_i - 1);
1566 struct dma_cmd *next = &recv->block[next_i];
1567 struct dma_cmd *prev = &recv->block[prev_i];
1569 /* 'next' becomes the new end of the DMA chain,
1570 so disable branch and enable interrupt */
1571 next->branchAddress = 0;
1572 next->control |= cpu_to_le32(3 << 20);
1573 next->status = cpu_to_le32(recv->buf_stride);
1575 /* link prev to next */
1576 prev->branchAddress = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog,
1577 sizeof(struct dma_cmd) * next_i)
1580 /* disable interrupt on previous DMA descriptor, except at intervals */
1581 if ((prev_i % recv->block_irq_interval) == 0) {
1582 prev->control |= cpu_to_le32(3 << 20); /* enable interrupt */
1584 prev->control &= cpu_to_le32(~(3<<20)); /* disable interrupt */
1588 /* wake up DMA in case it fell asleep */
1589 reg_write(recv->ohci, recv->ContextControlSet, (1 << 12));
1592 static void ohci_iso_recv_bufferfill_release(struct ohci_iso_recv *recv,
1593 struct hpsb_iso_packet_info *info)
1597 /* release the memory where the packet was */
1600 /* add the wasted space for padding to 4 bytes */
1602 len += 4 - (len % 4);
1604 /* add 8 bytes for the OHCI DMA data format overhead */
1607 recv->released_bytes += len;
1609 /* have we released enough memory for one block? */
1610 while (recv->released_bytes > recv->buf_stride) {
1611 ohci_iso_recv_release_block(recv, recv->block_reader);
1612 recv->block_reader = (recv->block_reader + 1) % recv->nblocks;
1613 recv->released_bytes -= recv->buf_stride;
1617 static inline void ohci_iso_recv_release(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
1619 struct ohci_iso_recv *recv = iso->hostdata;
1620 if (recv->dma_mode == BUFFER_FILL_MODE) {
1621 ohci_iso_recv_bufferfill_release(recv, info);
1623 ohci_iso_recv_release_block(recv, info - iso->infos);
1627 /* parse all packets from blocks that have been fully received */
1628 static void ohci_iso_recv_bufferfill_parse(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1632 struct ti_ohci *ohci = recv->ohci;
1635 /* we expect the next parsable packet to begin at recv->dma_offset */
1636 /* note: packet layout is as shown in section 10.6.1.1 of the OHCI spec */
1638 unsigned int offset;
1639 unsigned short len, cycle;
1640 unsigned char channel, tag, sy;
1642 unsigned char *p = iso->data_buf.kvirt;
1644 unsigned int this_block = recv->dma_offset/recv->buf_stride;
1646 /* don't loop indefinitely */
1647 if (runaway++ > 100000) {
1648 atomic_inc(&iso->overflows);
1650 "IR DMA error - Runaway during buffer parsing!\n");
1654 /* stop parsing once we arrive at block_dma (i.e. don't get ahead of DMA) */
1655 if (this_block == recv->block_dma)
1660 /* parse data length, tag, channel, and sy */
1662 /* note: we keep our own local copies of 'len' and 'offset'
1663 so the user can't mess with them by poking in the mmap area */
1665 len = p[recv->dma_offset+2] | (p[recv->dma_offset+3] << 8);
1669 "IR DMA error - bogus 'len' value %u\n", len);
1672 channel = p[recv->dma_offset+1] & 0x3F;
1673 tag = p[recv->dma_offset+1] >> 6;
1674 sy = p[recv->dma_offset+0] & 0xF;
1676 /* advance to data payload */
1677 recv->dma_offset += 4;
1679 /* check for wrap-around */
1680 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1681 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1684 /* dma_offset now points to the first byte of the data payload */
1685 offset = recv->dma_offset;
1687 /* advance to xferStatus/timeStamp */
1688 recv->dma_offset += len;
1690 /* payload is padded to 4 bytes */
1692 recv->dma_offset += 4 - (len%4);
1695 /* check for wrap-around */
1696 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1697 /* uh oh, the packet data wraps from the last
1698 to the first DMA block - make the packet
1699 contiguous by copying its "tail" into the
1702 int guard_off = recv->buf_stride*recv->nblocks;
1703 int tail_len = len - (guard_off - offset);
1705 if (tail_len > 0 && tail_len < recv->buf_stride) {
1706 memcpy(iso->data_buf.kvirt + guard_off,
1707 iso->data_buf.kvirt,
1711 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1714 /* parse timestamp */
1715 cycle = p[recv->dma_offset+0] | (p[recv->dma_offset+1]<<8);
1718 /* advance to next packet */
1719 recv->dma_offset += 4;
1721 /* check for wrap-around */
1722 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1723 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1726 hpsb_iso_packet_received(iso, offset, len, cycle, channel, tag, sy);
1733 static void ohci_iso_recv_bufferfill_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1736 struct ti_ohci *ohci = recv->ohci;
1738 /* loop over all blocks */
1739 for (loop = 0; loop < recv->nblocks; loop++) {
1741 /* check block_dma to see if it's done */
1742 struct dma_cmd *im = &recv->block[recv->block_dma];
1744 /* check the DMA descriptor for new writes to xferStatus */
1745 u16 xferstatus = le32_to_cpu(im->status) >> 16;
1747 /* rescount is the number of bytes *remaining to be written* in the block */
1748 u16 rescount = le32_to_cpu(im->status) & 0xFFFF;
1750 unsigned char event = xferstatus & 0x1F;
1753 /* nothing has happened to this block yet */
1757 if (event != 0x11) {
1758 atomic_inc(&iso->overflows);
1760 "IR DMA error - OHCI error code 0x%02x\n", event);
1763 if (rescount != 0) {
1764 /* the card is still writing to this block;
1765 we can't touch it until it's done */
1769 /* OK, the block is finished... */
1771 /* sync our view of the block */
1772 dma_region_sync_for_cpu(&iso->data_buf, recv->block_dma*recv->buf_stride, recv->buf_stride);
1774 /* reset the DMA descriptor */
1775 im->status = recv->buf_stride;
1777 /* advance block_dma */
1778 recv->block_dma = (recv->block_dma + 1) % recv->nblocks;
1780 if ((recv->block_dma+1) % recv->nblocks == recv->block_reader) {
1781 atomic_inc(&iso->overflows);
1782 DBGMSG("ISO reception overflow - "
1783 "ran out of DMA blocks");
1787 /* parse any packets that have arrived */
1788 ohci_iso_recv_bufferfill_parse(iso, recv);
1791 static void ohci_iso_recv_packetperbuf_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1795 struct ti_ohci *ohci = recv->ohci;
1797 /* loop over the entire buffer */
1798 for (count = 0; count < recv->nblocks; count++) {
1801 /* pointer to the DMA descriptor */
1802 struct dma_cmd *il = ((struct dma_cmd*) recv->prog.kvirt) + iso->pkt_dma;
1804 /* check the DMA descriptor for new writes to xferStatus */
1805 u16 xferstatus = le32_to_cpu(il->status) >> 16;
1806 u16 rescount = le32_to_cpu(il->status) & 0xFFFF;
1808 unsigned char event = xferstatus & 0x1F;
1811 /* this packet hasn't come in yet; we are done for now */
1815 if (event == 0x11) {
1816 /* packet received successfully! */
1818 /* rescount is the number of bytes *remaining* in the packet buffer,
1819 after the packet was written */
1820 packet_len = recv->buf_stride - rescount;
1822 } else if (event == 0x02) {
1823 PRINT(KERN_ERR, "IR DMA error - packet too long for buffer\n");
1825 PRINT(KERN_ERR, "IR DMA error - OHCI error code 0x%02x\n", event);
1828 /* sync our view of the buffer */
1829 dma_region_sync_for_cpu(&iso->data_buf, iso->pkt_dma * recv->buf_stride, recv->buf_stride);
1831 /* record the per-packet info */
1833 /* iso header is 8 bytes ahead of the data payload */
1836 unsigned int offset;
1837 unsigned short cycle;
1838 unsigned char channel, tag, sy;
1840 offset = iso->pkt_dma * recv->buf_stride;
1841 hdr = iso->data_buf.kvirt + offset;
1843 /* skip iso header */
1847 cycle = (hdr[0] | (hdr[1] << 8)) & 0x1FFF;
1848 channel = hdr[5] & 0x3F;
1852 hpsb_iso_packet_received(iso, offset, packet_len, cycle, channel, tag, sy);
1855 /* reset the DMA descriptor */
1856 il->status = recv->buf_stride;
1859 recv->block_dma = iso->pkt_dma;
1867 static void ohci_iso_recv_task(unsigned long data)
1869 struct hpsb_iso *iso = (struct hpsb_iso*) data;
1870 struct ohci_iso_recv *recv = iso->hostdata;
1872 if (recv->dma_mode == BUFFER_FILL_MODE)
1873 ohci_iso_recv_bufferfill_task(iso, recv);
1875 ohci_iso_recv_packetperbuf_task(iso, recv);
1878 /***********************************
1879 * rawiso ISO transmission *
1880 ***********************************/
1882 struct ohci_iso_xmit {
1883 struct ti_ohci *ohci;
1884 struct dma_prog_region prog;
1885 struct ohci1394_iso_tasklet task;
1888 u32 ContextControlSet;
1889 u32 ContextControlClear;
1893 /* transmission DMA program:
1894 one OUTPUT_MORE_IMMEDIATE for the IT header
1895 one OUTPUT_LAST for the buffer data */
1897 struct iso_xmit_cmd {
1898 struct dma_cmd output_more_immediate;
1901 struct dma_cmd output_last;
1904 static int ohci_iso_xmit_init(struct hpsb_iso *iso);
1905 static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle);
1906 static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso);
1907 static void ohci_iso_xmit_task(unsigned long data);
1909 static int ohci_iso_xmit_init(struct hpsb_iso *iso)
1911 struct ohci_iso_xmit *xmit;
1912 unsigned int prog_size;
1916 xmit = kmalloc(sizeof(*xmit), SLAB_KERNEL);
1920 iso->hostdata = xmit;
1921 xmit->ohci = iso->host->hostdata;
1922 xmit->task_active = 0;
1924 dma_prog_region_init(&xmit->prog);
1926 prog_size = sizeof(struct iso_xmit_cmd) * iso->buf_packets;
1928 if (dma_prog_region_alloc(&xmit->prog, prog_size, xmit->ohci->dev))
1931 ohci1394_init_iso_tasklet(&xmit->task, OHCI_ISO_TRANSMIT,
1932 ohci_iso_xmit_task, (unsigned long) iso);
1934 if (ohci1394_register_iso_tasklet(xmit->ohci, &xmit->task) < 0) {
1939 xmit->task_active = 1;
1941 /* xmit context registers are spaced 16 bytes apart */
1942 ctx = xmit->task.context;
1943 xmit->ContextControlSet = OHCI1394_IsoXmitContextControlSet + 16 * ctx;
1944 xmit->ContextControlClear = OHCI1394_IsoXmitContextControlClear + 16 * ctx;
1945 xmit->CommandPtr = OHCI1394_IsoXmitCommandPtr + 16 * ctx;
1950 ohci_iso_xmit_shutdown(iso);
1954 static void ohci_iso_xmit_stop(struct hpsb_iso *iso)
1956 struct ohci_iso_xmit *xmit = iso->hostdata;
1957 struct ti_ohci *ohci = xmit->ohci;
1959 /* disable interrupts */
1960 reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskClear, 1 << xmit->task.context);
1963 if (ohci1394_stop_context(xmit->ohci, xmit->ContextControlClear, NULL)) {
1964 /* XXX the DMA context will lock up if you try to send too much data! */
1966 "you probably exceeded the OHCI card's bandwidth limit - "
1967 "reload the module and reduce xmit bandwidth");
1971 static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso)
1973 struct ohci_iso_xmit *xmit = iso->hostdata;
1975 if (xmit->task_active) {
1976 ohci_iso_xmit_stop(iso);
1977 ohci1394_unregister_iso_tasklet(xmit->ohci, &xmit->task);
1978 xmit->task_active = 0;
1981 dma_prog_region_free(&xmit->prog);
1983 iso->hostdata = NULL;
1986 static void ohci_iso_xmit_task(unsigned long data)
1988 struct hpsb_iso *iso = (struct hpsb_iso*) data;
1989 struct ohci_iso_xmit *xmit = iso->hostdata;
1990 struct ti_ohci *ohci = xmit->ohci;
1994 /* check the whole buffer if necessary, starting at pkt_dma */
1995 for (count = 0; count < iso->buf_packets; count++) {
1998 /* DMA descriptor */
1999 struct iso_xmit_cmd *cmd = dma_region_i(&xmit->prog, struct iso_xmit_cmd, iso->pkt_dma);
2001 /* check for new writes to xferStatus */
2002 u16 xferstatus = le32_to_cpu(cmd->output_last.status) >> 16;
2003 u8 event = xferstatus & 0x1F;
2006 /* packet hasn't been sent yet; we are done for now */
2012 "IT DMA error - OHCI error code 0x%02x\n", event);
2014 /* at least one packet went out, so wake up the writer */
2018 cycle = le32_to_cpu(cmd->output_last.status) & 0x1FFF;
2020 /* tell the subsystem the packet has gone out */
2021 hpsb_iso_packet_sent(iso, cycle, event != 0x11);
2023 /* reset the DMA descriptor for next time */
2024 cmd->output_last.status = 0;
2031 static int ohci_iso_xmit_queue(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
2033 struct ohci_iso_xmit *xmit = iso->hostdata;
2034 struct ti_ohci *ohci = xmit->ohci;
2037 struct iso_xmit_cmd *next, *prev;
2039 unsigned int offset;
2041 unsigned char tag, sy;
2043 /* check that the packet doesn't cross a page boundary
2044 (we could allow this if we added OUTPUT_MORE descriptor support) */
2045 if (cross_bound(info->offset, info->len)) {
2047 "rawiso xmit: packet %u crosses a page boundary",
2052 offset = info->offset;
2057 /* sync up the card's view of the buffer */
2058 dma_region_sync_for_device(&iso->data_buf, offset, len);
2060 /* append first_packet to the DMA chain */
2061 /* by linking the previous descriptor to it */
2062 /* (next will become the new end of the DMA chain) */
2064 next_i = iso->first_packet;
2065 prev_i = (next_i == 0) ? (iso->buf_packets - 1) : (next_i - 1);
2067 next = dma_region_i(&xmit->prog, struct iso_xmit_cmd, next_i);
2068 prev = dma_region_i(&xmit->prog, struct iso_xmit_cmd, prev_i);
2070 /* set up the OUTPUT_MORE_IMMEDIATE descriptor */
2071 memset(next, 0, sizeof(struct iso_xmit_cmd));
2072 next->output_more_immediate.control = cpu_to_le32(0x02000008);
2074 /* ISO packet header is embedded in the OUTPUT_MORE_IMMEDIATE */
2076 /* tcode = 0xA, and sy */
2077 next->iso_hdr[0] = 0xA0 | (sy & 0xF);
2079 /* tag and channel number */
2080 next->iso_hdr[1] = (tag << 6) | (iso->channel & 0x3F);
2082 /* transmission speed */
2083 next->iso_hdr[2] = iso->speed & 0x7;
2086 next->iso_hdr[6] = len & 0xFF;
2087 next->iso_hdr[7] = len >> 8;
2089 /* set up the OUTPUT_LAST */
2090 next->output_last.control = cpu_to_le32(1 << 28);
2091 next->output_last.control |= cpu_to_le32(1 << 27); /* update timeStamp */
2092 next->output_last.control |= cpu_to_le32(3 << 20); /* want interrupt */
2093 next->output_last.control |= cpu_to_le32(3 << 18); /* enable branch */
2094 next->output_last.control |= cpu_to_le32(len);
2096 /* payload bus address */
2097 next->output_last.address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, offset));
2099 /* leave branchAddress at zero for now */
2101 /* re-write the previous DMA descriptor to chain to this one */
2103 /* set prev branch address to point to next (Z=3) */
2104 prev->output_last.branchAddress = cpu_to_le32(
2105 dma_prog_region_offset_to_bus(&xmit->prog, sizeof(struct iso_xmit_cmd) * next_i) | 3);
2107 /* disable interrupt, unless required by the IRQ interval */
2108 if (prev_i % iso->irq_interval) {
2109 prev->output_last.control &= cpu_to_le32(~(3 << 20)); /* no interrupt */
2111 prev->output_last.control |= cpu_to_le32(3 << 20); /* enable interrupt */
2116 /* wake DMA in case it is sleeping */
2117 reg_write(xmit->ohci, xmit->ContextControlSet, 1 << 12);
2119 /* issue a dummy read of the cycle timer to force all PCI
2120 writes to be posted immediately */
2122 reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer);
2127 static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle)
2129 struct ohci_iso_xmit *xmit = iso->hostdata;
2130 struct ti_ohci *ohci = xmit->ohci;
2132 /* clear out the control register */
2133 reg_write(xmit->ohci, xmit->ContextControlClear, 0xFFFFFFFF);
2136 /* address and length of first descriptor block (Z=3) */
2137 reg_write(xmit->ohci, xmit->CommandPtr,
2138 dma_prog_region_offset_to_bus(&xmit->prog, iso->pkt_dma * sizeof(struct iso_xmit_cmd)) | 3);
2142 u32 start = cycle & 0x1FFF;
2144 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
2145 just snarf them from the current time */
2146 u32 seconds = reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
2148 /* advance one second to give some extra time for DMA to start */
2151 start |= (seconds & 3) << 13;
2153 reg_write(xmit->ohci, xmit->ContextControlSet, 0x80000000 | (start << 16));
2156 /* enable interrupts */
2157 reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskSet, 1 << xmit->task.context);
2160 reg_write(xmit->ohci, xmit->ContextControlSet, 0x8000);
2163 /* wait 100 usec to give the card time to go active */
2166 /* check the RUN bit */
2167 if (!(reg_read(xmit->ohci, xmit->ContextControlSet) & 0x8000)) {
2168 PRINT(KERN_ERR, "Error starting IT DMA (ContextControl 0x%08x)\n",
2169 reg_read(xmit->ohci, xmit->ContextControlSet));
2176 static int ohci_isoctl(struct hpsb_iso *iso, enum isoctl_cmd cmd, unsigned long arg)
2181 return ohci_iso_xmit_init(iso);
2183 return ohci_iso_xmit_start(iso, arg);
2185 ohci_iso_xmit_stop(iso);
2188 return ohci_iso_xmit_queue(iso, (struct hpsb_iso_packet_info*) arg);
2190 ohci_iso_xmit_shutdown(iso);
2194 return ohci_iso_recv_init(iso);
2196 int *args = (int*) arg;
2197 return ohci_iso_recv_start(iso, args[0], args[1], args[2]);
2200 ohci_iso_recv_stop(iso);
2203 ohci_iso_recv_release(iso, (struct hpsb_iso_packet_info*) arg);
2206 ohci_iso_recv_task((unsigned long) iso);
2209 ohci_iso_recv_shutdown(iso);
2211 case RECV_LISTEN_CHANNEL:
2212 ohci_iso_recv_change_channel(iso, arg, 1);
2214 case RECV_UNLISTEN_CHANNEL:
2215 ohci_iso_recv_change_channel(iso, arg, 0);
2217 case RECV_SET_CHANNEL_MASK:
2218 ohci_iso_recv_set_channel_mask(iso, *((u64*) arg));
2222 PRINT_G(KERN_ERR, "ohci_isoctl cmd %d not implemented yet",
2229 /***************************************
2230 * IEEE-1394 functionality section END *
2231 ***************************************/
2234 /********************************************************
2235 * Global stuff (interrupt handler, init/shutdown code) *
2236 ********************************************************/
2238 static void dma_trm_reset(struct dma_trm_ctx *d)
2240 unsigned long flags;
2241 LIST_HEAD(packet_list);
2242 struct ti_ohci *ohci = d->ohci;
2243 struct hpsb_packet *packet, *ptmp;
2245 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
2247 /* Lock the context, reset it and release it. Move the packets
2248 * that were pending in the context to packet_list and free
2249 * them after releasing the lock. */
2251 spin_lock_irqsave(&d->lock, flags);
2253 list_splice(&d->fifo_list, &packet_list);
2254 list_splice(&d->pending_list, &packet_list);
2255 INIT_LIST_HEAD(&d->fifo_list);
2256 INIT_LIST_HEAD(&d->pending_list);
2258 d->branchAddrPtr = NULL;
2259 d->sent_ind = d->prg_ind;
2260 d->free_prgs = d->num_desc;
2262 spin_unlock_irqrestore(&d->lock, flags);
2264 if (list_empty(&packet_list))
2267 PRINT(KERN_INFO, "AT dma reset ctx=%d, aborting transmission", d->ctx);
2269 /* Now process subsystem callbacks for the packets from this
2271 list_for_each_entry_safe(packet, ptmp, &packet_list, driver_list) {
2272 list_del_init(&packet->driver_list);
2273 hpsb_packet_sent(ohci->host, packet, ACKX_ABORTED);
2277 static void ohci_schedule_iso_tasklets(struct ti_ohci *ohci,
2281 struct ohci1394_iso_tasklet *t;
2284 spin_lock(&ohci->iso_tasklet_list_lock);
2286 list_for_each_entry(t, &ohci->iso_tasklet_list, link) {
2287 mask = 1 << t->context;
2289 if (t->type == OHCI_ISO_TRANSMIT && tx_event & mask)
2290 tasklet_schedule(&t->tasklet);
2291 else if (rx_event & mask)
2292 tasklet_schedule(&t->tasklet);
2295 spin_unlock(&ohci->iso_tasklet_list_lock);
2299 static irqreturn_t ohci_irq_handler(int irq, void *dev_id,
2300 struct pt_regs *regs_are_unused)
2302 quadlet_t event, node_id;
2303 struct ti_ohci *ohci = (struct ti_ohci *)dev_id;
2304 struct hpsb_host *host = ohci->host;
2305 int phyid = -1, isroot = 0;
2306 unsigned long flags;
2308 /* Read and clear the interrupt event register. Don't clear
2309 * the busReset event, though. This is done when we get the
2310 * selfIDComplete interrupt. */
2311 spin_lock_irqsave(&ohci->event_lock, flags);
2312 event = reg_read(ohci, OHCI1394_IntEventClear);
2313 reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
2314 spin_unlock_irqrestore(&ohci->event_lock, flags);
2319 /* If event is ~(u32)0 cardbus card was ejected. In this case
2320 * we just return, and clean up in the ohci1394_pci_remove
2322 if (event == ~(u32) 0) {
2323 DBGMSG("Device removed.");
2327 DBGMSG("IntEvent: %08x", event);
2329 if (event & OHCI1394_unrecoverableError) {
2331 PRINT(KERN_ERR, "Unrecoverable error!");
2333 if (reg_read(ohci, OHCI1394_AsReqTrContextControlSet) & 0x800)
2334 PRINT(KERN_ERR, "Async Req Tx Context died: "
2335 "ctrl[%08x] cmdptr[%08x]",
2336 reg_read(ohci, OHCI1394_AsReqTrContextControlSet),
2337 reg_read(ohci, OHCI1394_AsReqTrCommandPtr));
2339 if (reg_read(ohci, OHCI1394_AsRspTrContextControlSet) & 0x800)
2340 PRINT(KERN_ERR, "Async Rsp Tx Context died: "
2341 "ctrl[%08x] cmdptr[%08x]",
2342 reg_read(ohci, OHCI1394_AsRspTrContextControlSet),
2343 reg_read(ohci, OHCI1394_AsRspTrCommandPtr));
2345 if (reg_read(ohci, OHCI1394_AsReqRcvContextControlSet) & 0x800)
2346 PRINT(KERN_ERR, "Async Req Rcv Context died: "
2347 "ctrl[%08x] cmdptr[%08x]",
2348 reg_read(ohci, OHCI1394_AsReqRcvContextControlSet),
2349 reg_read(ohci, OHCI1394_AsReqRcvCommandPtr));
2351 if (reg_read(ohci, OHCI1394_AsRspRcvContextControlSet) & 0x800)
2352 PRINT(KERN_ERR, "Async Rsp Rcv Context died: "
2353 "ctrl[%08x] cmdptr[%08x]",
2354 reg_read(ohci, OHCI1394_AsRspRcvContextControlSet),
2355 reg_read(ohci, OHCI1394_AsRspRcvCommandPtr));
2357 for (ctx = 0; ctx < ohci->nb_iso_xmit_ctx; ctx++) {
2358 if (reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)) & 0x800)
2359 PRINT(KERN_ERR, "Iso Xmit %d Context died: "
2360 "ctrl[%08x] cmdptr[%08x]", ctx,
2361 reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)),
2362 reg_read(ohci, OHCI1394_IsoXmitCommandPtr + (16 * ctx)));
2365 for (ctx = 0; ctx < ohci->nb_iso_rcv_ctx; ctx++) {
2366 if (reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)) & 0x800)
2367 PRINT(KERN_ERR, "Iso Recv %d Context died: "
2368 "ctrl[%08x] cmdptr[%08x] match[%08x]", ctx,
2369 reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)),
2370 reg_read(ohci, OHCI1394_IsoRcvCommandPtr + (32 * ctx)),
2371 reg_read(ohci, OHCI1394_IsoRcvContextMatch + (32 * ctx)));
2374 event &= ~OHCI1394_unrecoverableError;
2377 if (event & OHCI1394_cycleInconsistent) {
2378 /* We subscribe to the cycleInconsistent event only to
2379 * clear the corresponding event bit... otherwise,
2380 * isochronous cycleMatch DMA won't work. */
2381 DBGMSG("OHCI1394_cycleInconsistent");
2382 event &= ~OHCI1394_cycleInconsistent;
2385 if (event & OHCI1394_busReset) {
2386 /* The busReset event bit can't be cleared during the
2387 * selfID phase, so we disable busReset interrupts, to
2388 * avoid burying the cpu in interrupt requests. */
2389 spin_lock_irqsave(&ohci->event_lock, flags);
2390 reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
2392 if (ohci->check_busreset) {
2397 while (reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
2398 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2400 spin_unlock_irqrestore(&ohci->event_lock, flags);
2402 spin_lock_irqsave(&ohci->event_lock, flags);
2404 /* The loop counter check is to prevent the driver
2405 * from remaining in this state forever. For the
2406 * initial bus reset, the loop continues for ever
2407 * and the system hangs, until some device is plugged-in
2408 * or out manually into a port! The forced reset seems
2409 * to solve this problem. This mainly effects nForce2. */
2410 if (loop_count > 10000) {
2411 ohci_devctl(host, RESET_BUS, LONG_RESET);
2412 DBGMSG("Detected bus-reset loop. Forced a bus reset!");
2419 spin_unlock_irqrestore(&ohci->event_lock, flags);
2420 if (!host->in_bus_reset) {
2421 DBGMSG("irq_handler: Bus reset requested");
2423 /* Subsystem call */
2424 hpsb_bus_reset(ohci->host);
2426 event &= ~OHCI1394_busReset;
2429 if (event & OHCI1394_reqTxComplete) {
2430 struct dma_trm_ctx *d = &ohci->at_req_context;
2431 DBGMSG("Got reqTxComplete interrupt "
2432 "status=0x%08X", reg_read(ohci, d->ctrlSet));
2433 if (reg_read(ohci, d->ctrlSet) & 0x800)
2434 ohci1394_stop_context(ohci, d->ctrlClear,
2437 dma_trm_tasklet((unsigned long)d);
2438 //tasklet_schedule(&d->task);
2439 event &= ~OHCI1394_reqTxComplete;
2441 if (event & OHCI1394_respTxComplete) {
2442 struct dma_trm_ctx *d = &ohci->at_resp_context;
2443 DBGMSG("Got respTxComplete interrupt "
2444 "status=0x%08X", reg_read(ohci, d->ctrlSet));
2445 if (reg_read(ohci, d->ctrlSet) & 0x800)
2446 ohci1394_stop_context(ohci, d->ctrlClear,
2449 tasklet_schedule(&d->task);
2450 event &= ~OHCI1394_respTxComplete;
2452 if (event & OHCI1394_RQPkt) {
2453 struct dma_rcv_ctx *d = &ohci->ar_req_context;
2454 DBGMSG("Got RQPkt interrupt status=0x%08X",
2455 reg_read(ohci, d->ctrlSet));
2456 if (reg_read(ohci, d->ctrlSet) & 0x800)
2457 ohci1394_stop_context(ohci, d->ctrlClear, "RQPkt");
2459 tasklet_schedule(&d->task);
2460 event &= ~OHCI1394_RQPkt;
2462 if (event & OHCI1394_RSPkt) {
2463 struct dma_rcv_ctx *d = &ohci->ar_resp_context;
2464 DBGMSG("Got RSPkt interrupt status=0x%08X",
2465 reg_read(ohci, d->ctrlSet));
2466 if (reg_read(ohci, d->ctrlSet) & 0x800)
2467 ohci1394_stop_context(ohci, d->ctrlClear, "RSPkt");
2469 tasklet_schedule(&d->task);
2470 event &= ~OHCI1394_RSPkt;
2472 if (event & OHCI1394_isochRx) {
2475 rx_event = reg_read(ohci, OHCI1394_IsoRecvIntEventSet);
2476 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, rx_event);
2477 ohci_schedule_iso_tasklets(ohci, rx_event, 0);
2478 event &= ~OHCI1394_isochRx;
2480 if (event & OHCI1394_isochTx) {
2483 tx_event = reg_read(ohci, OHCI1394_IsoXmitIntEventSet);
2484 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, tx_event);
2485 ohci_schedule_iso_tasklets(ohci, 0, tx_event);
2486 event &= ~OHCI1394_isochTx;
2488 if (event & OHCI1394_selfIDComplete) {
2489 if (host->in_bus_reset) {
2490 node_id = reg_read(ohci, OHCI1394_NodeID);
2492 if (!(node_id & 0x80000000)) {
2494 "SelfID received, but NodeID invalid "
2495 "(probably new bus reset occurred): %08X",
2497 goto selfid_not_valid;
2500 phyid = node_id & 0x0000003f;
2501 isroot = (node_id & 0x40000000) != 0;
2503 DBGMSG("SelfID interrupt received "
2504 "(phyid %d, %s)", phyid,
2505 (isroot ? "root" : "not root"));
2507 handle_selfid(ohci, host, phyid, isroot);
2509 /* Clear the bus reset event and re-enable the
2510 * busReset interrupt. */
2511 spin_lock_irqsave(&ohci->event_lock, flags);
2512 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2513 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
2514 spin_unlock_irqrestore(&ohci->event_lock, flags);
2516 /* Accept Physical requests from all nodes. */
2517 reg_write(ohci,OHCI1394_AsReqFilterHiSet, 0xffffffff);
2518 reg_write(ohci,OHCI1394_AsReqFilterLoSet, 0xffffffff);
2520 /* Turn on phys dma reception.
2522 * TODO: Enable some sort of filtering management.
2525 reg_write(ohci,OHCI1394_PhyReqFilterHiSet, 0xffffffff);
2526 reg_write(ohci,OHCI1394_PhyReqFilterLoSet, 0xffffffff);
2527 reg_write(ohci,OHCI1394_PhyUpperBound, 0xffff0000);
2529 reg_write(ohci,OHCI1394_PhyReqFilterHiSet, 0x00000000);
2530 reg_write(ohci,OHCI1394_PhyReqFilterLoSet, 0x00000000);
2533 DBGMSG("PhyReqFilter=%08x%08x",
2534 reg_read(ohci,OHCI1394_PhyReqFilterHiSet),
2535 reg_read(ohci,OHCI1394_PhyReqFilterLoSet));
2537 hpsb_selfid_complete(host, phyid, isroot);
2540 "SelfID received outside of bus reset sequence");
2543 event &= ~OHCI1394_selfIDComplete;
2546 /* Make sure we handle everything, just in case we accidentally
2547 * enabled an interrupt that we didn't write a handler for. */
2549 PRINT(KERN_ERR, "Unhandled interrupt(s) 0x%08x",
2555 /* Put the buffer back into the dma context */
2556 static void insert_dma_buffer(struct dma_rcv_ctx *d, int idx)
2558 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2559 DBGMSG("Inserting dma buf ctx=%d idx=%d", d->ctx, idx);
2561 d->prg_cpu[idx]->status = cpu_to_le32(d->buf_size);
2562 d->prg_cpu[idx]->branchAddress &= le32_to_cpu(0xfffffff0);
2563 idx = (idx + d->num_desc - 1 ) % d->num_desc;
2564 d->prg_cpu[idx]->branchAddress |= le32_to_cpu(0x00000001);
2566 /* To avoid a race, ensure 1394 interface hardware sees the inserted
2567 * context program descriptors before it sees the wakeup bit set. */
2570 /* wake up the dma context if necessary */
2571 if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
2573 "Waking dma ctx=%d ... processing is probably too slow",
2577 /* do this always, to avoid race condition */
2578 reg_write(ohci, d->ctrlSet, 0x1000);
2581 #define cond_le32_to_cpu(data, noswap) \
2582 (noswap ? data : le32_to_cpu(data))
2584 static const int TCODE_SIZE[16] = {20, 0, 16, -1, 16, 20, 20, 0,
2585 -1, 0, -1, 0, -1, -1, 16, -1};
2588 * Determine the length of a packet in the buffer
2589 * Optimization suggested by Pascal Drolet <pascal.drolet@informission.ca>
2591 static __inline__ int packet_length(struct dma_rcv_ctx *d, int idx, quadlet_t *buf_ptr,
2592 int offset, unsigned char tcode, int noswap)
2596 if (d->type == DMA_CTX_ASYNC_REQ || d->type == DMA_CTX_ASYNC_RESP) {
2597 length = TCODE_SIZE[tcode];
2599 if (offset + 12 >= d->buf_size) {
2600 length = (cond_le32_to_cpu(d->buf_cpu[(idx + 1) % d->num_desc]
2601 [3 - ((d->buf_size - offset) >> 2)], noswap) >> 16);
2603 length = (cond_le32_to_cpu(buf_ptr[3], noswap) >> 16);
2607 } else if (d->type == DMA_CTX_ISO) {
2608 /* Assumption: buffer fill mode with header/trailer */
2609 length = (cond_le32_to_cpu(buf_ptr[0], noswap) >> 16) + 8;
2612 if (length > 0 && length % 4)
2613 length += 4 - (length % 4);
2618 /* Tasklet that processes dma receive buffers */
2619 static void dma_rcv_tasklet (unsigned long data)
2621 struct dma_rcv_ctx *d = (struct dma_rcv_ctx*)data;
2622 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2623 unsigned int split_left, idx, offset, rescount;
2624 unsigned char tcode;
2625 int length, bytes_left, ack;
2626 unsigned long flags;
2631 spin_lock_irqsave(&d->lock, flags);
2634 offset = d->buf_offset;
2635 buf_ptr = d->buf_cpu[idx] + offset/4;
2637 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2638 bytes_left = d->buf_size - rescount - offset;
2640 while (bytes_left > 0) {
2641 tcode = (cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming) >> 4) & 0xf;
2643 /* packet_length() will return < 4 for an error */
2644 length = packet_length(d, idx, buf_ptr, offset, tcode, ohci->no_swap_incoming);
2646 if (length < 4) { /* something is wrong */
2647 sprintf(msg,"Unexpected tcode 0x%x(0x%08x) in AR ctx=%d, length=%d",
2648 tcode, cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming),
2650 ohci1394_stop_context(ohci, d->ctrlClear, msg);
2651 spin_unlock_irqrestore(&d->lock, flags);
2655 /* The first case is where we have a packet that crosses
2656 * over more than one descriptor. The next case is where
2657 * it's all in the first descriptor. */
2658 if ((offset + length) > d->buf_size) {
2659 DBGMSG("Split packet rcv'd");
2660 if (length > d->split_buf_size) {
2661 ohci1394_stop_context(ohci, d->ctrlClear,
2662 "Split packet size exceeded");
2664 d->buf_offset = offset;
2665 spin_unlock_irqrestore(&d->lock, flags);
2669 if (le32_to_cpu(d->prg_cpu[(idx+1)%d->num_desc]->status)
2671 /* Other part of packet not written yet.
2672 * this should never happen I think
2673 * anyway we'll get it on the next call. */
2675 "Got only half a packet!");
2677 d->buf_offset = offset;
2678 spin_unlock_irqrestore(&d->lock, flags);
2682 split_left = length;
2683 split_ptr = (char *)d->spb;
2684 memcpy(split_ptr,buf_ptr,d->buf_size-offset);
2685 split_left -= d->buf_size-offset;
2686 split_ptr += d->buf_size-offset;
2687 insert_dma_buffer(d, idx);
2688 idx = (idx+1) % d->num_desc;
2689 buf_ptr = d->buf_cpu[idx];
2692 while (split_left >= d->buf_size) {
2693 memcpy(split_ptr,buf_ptr,d->buf_size);
2694 split_ptr += d->buf_size;
2695 split_left -= d->buf_size;
2696 insert_dma_buffer(d, idx);
2697 idx = (idx+1) % d->num_desc;
2698 buf_ptr = d->buf_cpu[idx];
2701 if (split_left > 0) {
2702 memcpy(split_ptr, buf_ptr, split_left);
2703 offset = split_left;
2704 buf_ptr += offset/4;
2707 DBGMSG("Single packet rcv'd");
2708 memcpy(d->spb, buf_ptr, length);
2710 buf_ptr += length/4;
2711 if (offset==d->buf_size) {
2712 insert_dma_buffer(d, idx);
2713 idx = (idx+1) % d->num_desc;
2714 buf_ptr = d->buf_cpu[idx];
2719 /* We get one phy packet to the async descriptor for each
2720 * bus reset. We always ignore it. */
2721 if (tcode != OHCI1394_TCODE_PHY) {
2722 if (!ohci->no_swap_incoming)
2723 packet_swab(d->spb, tcode);
2724 DBGMSG("Packet received from node"
2725 " %d ack=0x%02X spd=%d tcode=0x%X"
2726 " length=%d ctx=%d tlabel=%d",
2727 (d->spb[1]>>16)&0x3f,
2728 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f,
2729 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>21)&0x3,
2730 tcode, length, d->ctx,
2731 (cond_le32_to_cpu(d->spb[0], ohci->no_swap_incoming)>>10)&0x3f);
2733 ack = (((cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f)
2736 hpsb_packet_received(ohci->host, d->spb,
2739 #ifdef OHCI1394_DEBUG
2741 PRINT (KERN_DEBUG, "Got phy packet ctx=%d ... discarded",
2745 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2747 bytes_left = d->buf_size - rescount - offset;
2752 d->buf_offset = offset;
2754 spin_unlock_irqrestore(&d->lock, flags);
2757 /* Bottom half that processes sent packets */
2758 static void dma_trm_tasklet (unsigned long data)
2760 struct dma_trm_ctx *d = (struct dma_trm_ctx*)data;
2761 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2762 struct hpsb_packet *packet, *ptmp;
2763 unsigned long flags;
2767 spin_lock_irqsave(&d->lock, flags);
2769 list_for_each_entry_safe(packet, ptmp, &d->fifo_list, driver_list) {
2770 datasize = packet->data_size;
2771 if (datasize && packet->type != hpsb_raw)
2772 status = le32_to_cpu(
2773 d->prg_cpu[d->sent_ind]->end.status) >> 16;
2775 status = le32_to_cpu(
2776 d->prg_cpu[d->sent_ind]->begin.status) >> 16;
2779 /* this packet hasn't been sent yet*/
2782 #ifdef OHCI1394_DEBUG
2784 if (((le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf) == 0xa)
2785 DBGMSG("Stream packet sent to channel %d tcode=0x%X "
2786 "ack=0x%X spd=%d dataLength=%d ctx=%d",
2787 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>8)&0x3f,
2788 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2789 status&0x1f, (status>>5)&0x3,
2790 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16,
2793 DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2794 "%d ack=0x%X spd=%d dataLength=%d ctx=%d",
2795 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16)&0x3f,
2796 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2797 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>10)&0x3f,
2798 status&0x1f, (status>>5)&0x3,
2799 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3])>>16,
2802 DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2803 "%d ack=0x%X spd=%d data=0x%08X ctx=%d",
2804 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])
2806 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2808 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2810 status&0x1f, (status>>5)&0x3,
2811 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3]),
2815 if (status & 0x10) {
2818 switch (status & 0x1f) {
2819 case EVT_NO_STATUS: /* that should never happen */
2820 case EVT_RESERVED_A: /* that should never happen */
2821 case EVT_LONG_PACKET: /* that should never happen */
2822 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2823 ack = ACKX_SEND_ERROR;
2825 case EVT_MISSING_ACK:
2829 ack = ACKX_SEND_ERROR;
2831 case EVT_OVERRUN: /* that should never happen */
2832 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2833 ack = ACKX_SEND_ERROR;
2835 case EVT_DESCRIPTOR_READ:
2837 case EVT_DATA_WRITE:
2838 ack = ACKX_SEND_ERROR;
2840 case EVT_BUS_RESET: /* that should never happen */
2841 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2842 ack = ACKX_SEND_ERROR;
2848 ack = ACKX_SEND_ERROR;
2850 case EVT_RESERVED_B: /* that should never happen */
2851 case EVT_RESERVED_C: /* that should never happen */
2852 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2853 ack = ACKX_SEND_ERROR;
2857 ack = ACKX_SEND_ERROR;
2860 PRINT(KERN_ERR, "Unhandled OHCI evt_* error 0x%x", status & 0x1f);
2861 ack = ACKX_SEND_ERROR;
2866 list_del_init(&packet->driver_list);
2867 hpsb_packet_sent(ohci->host, packet, ack);
2870 pci_unmap_single(ohci->dev,
2871 cpu_to_le32(d->prg_cpu[d->sent_ind]->end.address),
2872 datasize, PCI_DMA_TODEVICE);
2873 OHCI_DMA_FREE("single Xmit data packet");
2876 d->sent_ind = (d->sent_ind+1)%d->num_desc;
2880 dma_trm_flush(ohci, d);
2882 spin_unlock_irqrestore(&d->lock, flags);
2885 static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d)
2888 ohci1394_stop_context(d->ohci, d->ctrlClear, NULL);
2890 if (d->type == DMA_CTX_ISO) {
2891 /* disable interrupts */
2892 reg_write(d->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << d->ctx);
2893 ohci1394_unregister_iso_tasklet(d->ohci, &d->ohci->ir_legacy_tasklet);
2895 tasklet_kill(&d->task);
2901 static void free_dma_rcv_ctx(struct dma_rcv_ctx *d)
2904 struct ti_ohci *ohci = d->ohci;
2909 DBGMSG("Freeing dma_rcv_ctx %d", d->ctx);
2912 for (i=0; i<d->num_desc; i++)
2913 if (d->buf_cpu[i] && d->buf_bus[i]) {
2914 pci_free_consistent(
2915 ohci->dev, d->buf_size,
2916 d->buf_cpu[i], d->buf_bus[i]);
2917 OHCI_DMA_FREE("consistent dma_rcv buf[%d]", i);
2923 for (i=0; i<d->num_desc; i++)
2924 if (d->prg_cpu[i] && d->prg_bus[i]) {
2925 pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
2926 OHCI_DMA_FREE("consistent dma_rcv prg[%d]", i);
2928 pci_pool_destroy(d->prg_pool);
2929 OHCI_DMA_FREE("dma_rcv prg pool");
2935 /* Mark this context as freed. */
2940 alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
2941 enum context_type type, int ctx, int num_desc,
2942 int buf_size, int split_buf_size, int context_base)
2945 static int num_allocs;
2946 static char pool_name[20];
2952 d->num_desc = num_desc;
2953 d->buf_size = buf_size;
2954 d->split_buf_size = split_buf_size;
2960 d->buf_cpu = kmalloc(d->num_desc * sizeof(quadlet_t*), GFP_ATOMIC);
2961 d->buf_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_ATOMIC);
2963 if (d->buf_cpu == NULL || d->buf_bus == NULL) {
2964 PRINT(KERN_ERR, "Failed to allocate dma buffer");
2965 free_dma_rcv_ctx(d);
2968 memset(d->buf_cpu, 0, d->num_desc * sizeof(quadlet_t*));
2969 memset(d->buf_bus, 0, d->num_desc * sizeof(dma_addr_t));
2971 d->prg_cpu = kmalloc(d->num_desc * sizeof(struct dma_cmd*),
2973 d->prg_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_ATOMIC);
2975 if (d->prg_cpu == NULL || d->prg_bus == NULL) {
2976 PRINT(KERN_ERR, "Failed to allocate dma prg");
2977 free_dma_rcv_ctx(d);
2980 memset(d->prg_cpu, 0, d->num_desc * sizeof(struct dma_cmd*));
2981 memset(d->prg_bus, 0, d->num_desc * sizeof(dma_addr_t));
2983 d->spb = kmalloc(d->split_buf_size, GFP_ATOMIC);
2985 if (d->spb == NULL) {
2986 PRINT(KERN_ERR, "Failed to allocate split buffer");
2987 free_dma_rcv_ctx(d);
2991 len = sprintf(pool_name, "ohci1394_rcv_prg");
2992 sprintf(pool_name+len, "%d", num_allocs);
2993 d->prg_pool = pci_pool_create(pool_name, ohci->dev,
2994 sizeof(struct dma_cmd), 4, 0);
2995 if(d->prg_pool == NULL)
2997 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
2998 free_dma_rcv_ctx(d);
3003 OHCI_DMA_ALLOC("dma_rcv prg pool");
3005 for (i=0; i<d->num_desc; i++) {
3006 d->buf_cpu[i] = pci_alloc_consistent(ohci->dev,
3009 OHCI_DMA_ALLOC("consistent dma_rcv buf[%d]", i);
3011 if (d->buf_cpu[i] != NULL) {
3012 memset(d->buf_cpu[i], 0, d->buf_size);
3015 "Failed to allocate dma buffer");
3016 free_dma_rcv_ctx(d);
3020 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
3021 OHCI_DMA_ALLOC("pool dma_rcv prg[%d]", i);
3023 if (d->prg_cpu[i] != NULL) {
3024 memset(d->prg_cpu[i], 0, sizeof(struct dma_cmd));
3027 "Failed to allocate dma prg");
3028 free_dma_rcv_ctx(d);
3033 spin_lock_init(&d->lock);
3035 if (type == DMA_CTX_ISO) {
3036 ohci1394_init_iso_tasklet(&ohci->ir_legacy_tasklet,
3037 OHCI_ISO_MULTICHANNEL_RECEIVE,
3038 dma_rcv_tasklet, (unsigned long) d);
3040 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3041 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3042 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3044 tasklet_init (&d->task, dma_rcv_tasklet, (unsigned long) d);
3050 static void free_dma_trm_ctx(struct dma_trm_ctx *d)
3053 struct ti_ohci *ohci = d->ohci;
3058 DBGMSG("Freeing dma_trm_ctx %d", d->ctx);
3061 for (i=0; i<d->num_desc; i++)
3062 if (d->prg_cpu[i] && d->prg_bus[i]) {
3063 pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
3064 OHCI_DMA_FREE("pool dma_trm prg[%d]", i);
3066 pci_pool_destroy(d->prg_pool);
3067 OHCI_DMA_FREE("dma_trm prg pool");
3072 /* Mark this context as freed. */
3077 alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
3078 enum context_type type, int ctx, int num_desc,
3082 static char pool_name[20];
3083 static int num_allocs=0;
3088 d->num_desc = num_desc;
3093 d->prg_cpu = kmalloc(d->num_desc * sizeof(struct at_dma_prg*),
3095 d->prg_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_KERNEL);
3097 if (d->prg_cpu == NULL || d->prg_bus == NULL) {
3098 PRINT(KERN_ERR, "Failed to allocate at dma prg");
3099 free_dma_trm_ctx(d);
3102 memset(d->prg_cpu, 0, d->num_desc * sizeof(struct at_dma_prg*));
3103 memset(d->prg_bus, 0, d->num_desc * sizeof(dma_addr_t));
3105 len = sprintf(pool_name, "ohci1394_trm_prg");
3106 sprintf(pool_name+len, "%d", num_allocs);
3107 d->prg_pool = pci_pool_create(pool_name, ohci->dev,
3108 sizeof(struct at_dma_prg), 4, 0);
3109 if (d->prg_pool == NULL) {
3110 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
3111 free_dma_trm_ctx(d);
3116 OHCI_DMA_ALLOC("dma_rcv prg pool");
3118 for (i = 0; i < d->num_desc; i++) {
3119 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
3120 OHCI_DMA_ALLOC("pool dma_trm prg[%d]", i);
3122 if (d->prg_cpu[i] != NULL) {
3123 memset(d->prg_cpu[i], 0, sizeof(struct at_dma_prg));
3126 "Failed to allocate at dma prg");
3127 free_dma_trm_ctx(d);
3132 spin_lock_init(&d->lock);
3134 /* initialize tasklet */
3135 if (type == DMA_CTX_ISO) {
3136 ohci1394_init_iso_tasklet(&ohci->it_legacy_tasklet, OHCI_ISO_TRANSMIT,
3137 dma_trm_tasklet, (unsigned long) d);
3138 if (ohci1394_register_iso_tasklet(ohci,
3139 &ohci->it_legacy_tasklet) < 0) {
3140 PRINT(KERN_ERR, "No IT DMA context available");
3141 free_dma_trm_ctx(d);
3145 /* IT can be assigned to any context by register_iso_tasklet */
3146 d->ctx = ohci->it_legacy_tasklet.context;
3147 d->ctrlSet = OHCI1394_IsoXmitContextControlSet + 16 * d->ctx;
3148 d->ctrlClear = OHCI1394_IsoXmitContextControlClear + 16 * d->ctx;
3149 d->cmdPtr = OHCI1394_IsoXmitCommandPtr + 16 * d->ctx;
3151 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3152 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3153 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3154 tasklet_init (&d->task, dma_trm_tasklet, (unsigned long)d);
3160 static void ohci_set_hw_config_rom(struct hpsb_host *host, quadlet_t *config_rom)
3162 struct ti_ohci *ohci = host->hostdata;
3164 reg_write(ohci, OHCI1394_ConfigROMhdr, be32_to_cpu(config_rom[0]));
3165 reg_write(ohci, OHCI1394_BusOptions, be32_to_cpu(config_rom[2]));
3167 memcpy(ohci->csr_config_rom_cpu, config_rom, OHCI_CONFIG_ROM_LEN);
3171 static quadlet_t ohci_hw_csr_reg(struct hpsb_host *host, int reg,
3172 quadlet_t data, quadlet_t compare)
3174 struct ti_ohci *ohci = host->hostdata;
3177 reg_write(ohci, OHCI1394_CSRData, data);
3178 reg_write(ohci, OHCI1394_CSRCompareData, compare);
3179 reg_write(ohci, OHCI1394_CSRControl, reg & 0x3);
3181 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
3182 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
3188 return reg_read(ohci, OHCI1394_CSRData);
3191 static struct hpsb_host_driver ohci1394_driver = {
3192 .owner = THIS_MODULE,
3193 .name = OHCI1394_DRIVER_NAME,
3194 .set_hw_config_rom = ohci_set_hw_config_rom,
3195 .transmit_packet = ohci_transmit,
3196 .devctl = ohci_devctl,
3197 .isoctl = ohci_isoctl,
3198 .hw_csr_reg = ohci_hw_csr_reg,
3203 /***********************************
3204 * PCI Driver Interface functions *
3205 ***********************************/
3207 #define FAIL(err, fmt, args...) \
3209 PRINT_G(KERN_ERR, fmt , ## args); \
3210 ohci1394_pci_remove(dev); \
3214 static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
3215 const struct pci_device_id *ent)
3217 static int version_printed = 0;
3219 struct hpsb_host *host;
3220 struct ti_ohci *ohci; /* shortcut to currently handled device */
3221 unsigned long ohci_base;
3223 if (version_printed++ == 0)
3224 PRINT_G(KERN_INFO, "%s", version);
3226 if (pci_enable_device(dev))
3227 FAIL(-ENXIO, "Failed to enable OHCI hardware");
3228 pci_set_master(dev);
3230 host = hpsb_alloc_host(&ohci1394_driver, sizeof(struct ti_ohci), &dev->dev);
3231 if (!host) FAIL(-ENOMEM, "Failed to allocate host structure");
3233 ohci = host->hostdata;
3236 ohci->init_state = OHCI_INIT_ALLOC_HOST;
3238 pci_set_drvdata(dev, ohci);
3240 /* We don't want hardware swapping */
3241 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
3243 /* Some oddball Apple controllers do not order the selfid
3244 * properly, so we make up for it here. */
3245 #ifndef __LITTLE_ENDIAN
3246 /* XXX: Need a better way to check this. I'm wondering if we can
3247 * read the values of the OHCI1394_PCI_HCI_Control and the
3248 * noByteSwapData registers to see if they were not cleared to
3249 * zero. Should this work? Obviously it's not defined what these
3250 * registers will read when they aren't supported. Bleh! */
3251 if (dev->vendor == PCI_VENDOR_ID_APPLE &&
3252 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) {
3253 ohci->no_swap_incoming = 1;
3254 ohci->selfid_swap = 0;
3256 ohci->selfid_swap = 1;
3260 #ifndef PCI_DEVICE_ID_NVIDIA_NFORCE2_FW
3261 #define PCI_DEVICE_ID_NVIDIA_NFORCE2_FW 0x006e
3264 /* These chipsets require a bit of extra care when checking after
3266 if ((dev->vendor == PCI_VENDOR_ID_APPLE &&
3267 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) ||
3268 (dev->vendor == PCI_VENDOR_ID_NVIDIA &&
3269 dev->device == PCI_DEVICE_ID_NVIDIA_NFORCE2_FW))
3270 ohci->check_busreset = 1;
3272 /* We hardwire the MMIO length, since some CardBus adaptors
3273 * fail to report the right length. Anyway, the ohci spec
3274 * clearly says it's 2kb, so this shouldn't be a problem. */
3275 ohci_base = pci_resource_start(dev, 0);
3276 if (pci_resource_len(dev, 0) != OHCI1394_REGISTER_SIZE)
3277 PRINT(KERN_WARNING, "Unexpected PCI resource length of %lx!",
3278 pci_resource_len(dev, 0));
3280 /* Seems PCMCIA handles this internally. Not sure why. Seems
3281 * pretty bogus to force a driver to special case this. */
3283 if (!request_mem_region (ohci_base, OHCI1394_REGISTER_SIZE, OHCI1394_DRIVER_NAME))
3284 FAIL(-ENOMEM, "MMIO resource (0x%lx - 0x%lx) unavailable",
3285 ohci_base, ohci_base + OHCI1394_REGISTER_SIZE);
3287 ohci->init_state = OHCI_INIT_HAVE_MEM_REGION;
3289 ohci->registers = ioremap(ohci_base, OHCI1394_REGISTER_SIZE);
3290 if (ohci->registers == NULL)
3291 FAIL(-ENXIO, "Failed to remap registers - card not accessible");
3292 ohci->init_state = OHCI_INIT_HAVE_IOMAPPING;
3293 DBGMSG("Remapped memory spaces reg 0x%p", ohci->registers);
3295 /* csr_config rom allocation */
3296 ohci->csr_config_rom_cpu =
3297 pci_alloc_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3298 &ohci->csr_config_rom_bus);
3299 OHCI_DMA_ALLOC("consistent csr_config_rom");
3300 if (ohci->csr_config_rom_cpu == NULL)
3301 FAIL(-ENOMEM, "Failed to allocate buffer config rom");
3302 ohci->init_state = OHCI_INIT_HAVE_CONFIG_ROM_BUFFER;
3304 /* self-id dma buffer allocation */
3305 ohci->selfid_buf_cpu =
3306 pci_alloc_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3307 &ohci->selfid_buf_bus);
3308 OHCI_DMA_ALLOC("consistent selfid_buf");
3310 if (ohci->selfid_buf_cpu == NULL)
3311 FAIL(-ENOMEM, "Failed to allocate DMA buffer for self-id packets");
3312 ohci->init_state = OHCI_INIT_HAVE_SELFID_BUFFER;
3314 if ((unsigned long)ohci->selfid_buf_cpu & 0x1fff)
3315 PRINT(KERN_INFO, "SelfID buffer %p is not aligned on "
3316 "8Kb boundary... may cause problems on some CXD3222 chip",
3317 ohci->selfid_buf_cpu);
3319 /* No self-id errors at startup */
3320 ohci->self_id_errors = 0;
3322 ohci->init_state = OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE;
3323 /* AR DMA request context allocation */
3324 if (alloc_dma_rcv_ctx(ohci, &ohci->ar_req_context,
3325 DMA_CTX_ASYNC_REQ, 0, AR_REQ_NUM_DESC,
3326 AR_REQ_BUF_SIZE, AR_REQ_SPLIT_BUF_SIZE,
3327 OHCI1394_AsReqRcvContextBase) < 0)
3328 FAIL(-ENOMEM, "Failed to allocate AR Req context");
3330 /* AR DMA response context allocation */
3331 if (alloc_dma_rcv_ctx(ohci, &ohci->ar_resp_context,
3332 DMA_CTX_ASYNC_RESP, 0, AR_RESP_NUM_DESC,
3333 AR_RESP_BUF_SIZE, AR_RESP_SPLIT_BUF_SIZE,
3334 OHCI1394_AsRspRcvContextBase) < 0)
3335 FAIL(-ENOMEM, "Failed to allocate AR Resp context");
3337 /* AT DMA request context */
3338 if (alloc_dma_trm_ctx(ohci, &ohci->at_req_context,
3339 DMA_CTX_ASYNC_REQ, 0, AT_REQ_NUM_DESC,
3340 OHCI1394_AsReqTrContextBase) < 0)
3341 FAIL(-ENOMEM, "Failed to allocate AT Req context");
3343 /* AT DMA response context */
3344 if (alloc_dma_trm_ctx(ohci, &ohci->at_resp_context,
3345 DMA_CTX_ASYNC_RESP, 1, AT_RESP_NUM_DESC,
3346 OHCI1394_AsRspTrContextBase) < 0)
3347 FAIL(-ENOMEM, "Failed to allocate AT Resp context");
3349 /* Start off with a soft reset, to clear everything to a sane
3351 ohci_soft_reset(ohci);
3353 /* Now enable LPS, which we need in order to start accessing
3354 * most of the registers. In fact, on some cards (ALI M5251),
3355 * accessing registers in the SClk domain without LPS enabled
3356 * will lock up the machine. Wait 50msec to make sure we have
3357 * full link enabled. */
3358 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
3360 /* Disable and clear interrupts */
3361 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3362 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3366 /* Determine the number of available IR and IT contexts. */
3367 ohci->nb_iso_rcv_ctx =
3368 get_nb_iso_ctx(ohci, OHCI1394_IsoRecvIntMaskSet);
3369 DBGMSG("%d iso receive contexts available",
3370 ohci->nb_iso_rcv_ctx);
3372 ohci->nb_iso_xmit_ctx =
3373 get_nb_iso_ctx(ohci, OHCI1394_IsoXmitIntMaskSet);
3374 DBGMSG("%d iso transmit contexts available",
3375 ohci->nb_iso_xmit_ctx);
3377 /* Set the usage bits for non-existent contexts so they can't
3379 ohci->ir_ctx_usage = ~0 << ohci->nb_iso_rcv_ctx;
3380 ohci->it_ctx_usage = ~0 << ohci->nb_iso_xmit_ctx;
3382 INIT_LIST_HEAD(&ohci->iso_tasklet_list);
3383 spin_lock_init(&ohci->iso_tasklet_list_lock);
3384 ohci->ISO_channel_usage = 0;
3385 spin_lock_init(&ohci->IR_channel_lock);
3387 /* Allocate the IR DMA context right here so we don't have
3388 * to do it in interrupt path - note that this doesn't
3389 * waste much memory and avoids the jugglery required to
3390 * allocate it in IRQ path. */
3391 if (alloc_dma_rcv_ctx(ohci, &ohci->ir_legacy_context,
3392 DMA_CTX_ISO, 0, IR_NUM_DESC,
3393 IR_BUF_SIZE, IR_SPLIT_BUF_SIZE,
3394 OHCI1394_IsoRcvContextBase) < 0) {
3395 FAIL(-ENOMEM, "Cannot allocate IR Legacy DMA context");
3398 /* We hopefully don't have to pre-allocate IT DMA like we did
3399 * for IR DMA above. Allocate it on-demand and mark inactive. */
3400 ohci->it_legacy_context.ohci = NULL;
3401 spin_lock_init(&ohci->event_lock);
3404 * interrupts are disabled, all right, but... due to SA_SHIRQ we
3405 * might get called anyway. We'll see no event, of course, but
3406 * we need to get to that "no event", so enough should be initialized
3409 if (request_irq(dev->irq, ohci_irq_handler, SA_SHIRQ,
3410 OHCI1394_DRIVER_NAME, ohci))
3411 FAIL(-ENOMEM, "Failed to allocate shared interrupt %d", dev->irq);
3413 ohci->init_state = OHCI_INIT_HAVE_IRQ;
3414 ohci_initialize(ohci);
3416 /* Set certain csr values */
3417 host->csr.guid_hi = reg_read(ohci, OHCI1394_GUIDHi);
3418 host->csr.guid_lo = reg_read(ohci, OHCI1394_GUIDLo);
3419 host->csr.cyc_clk_acc = 100; /* how do we determine clk accuracy? */
3420 host->csr.max_rec = (reg_read(ohci, OHCI1394_BusOptions) >> 12) & 0xf;
3421 host->csr.lnk_spd = reg_read(ohci, OHCI1394_BusOptions) & 0x7;
3423 /* Tell the highlevel this host is ready */
3424 if (hpsb_add_host(host))
3425 FAIL(-ENOMEM, "Failed to register host with highlevel");
3427 ohci->init_state = OHCI_INIT_DONE;
3433 static void ohci1394_pci_remove(struct pci_dev *pdev)
3435 struct ti_ohci *ohci;
3438 ohci = pci_get_drvdata(pdev);
3442 dev = get_device(&ohci->host->device);
3444 switch (ohci->init_state) {
3445 case OHCI_INIT_DONE:
3446 hpsb_remove_host(ohci->host);
3448 /* Clear out BUS Options */
3449 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
3450 reg_write(ohci, OHCI1394_BusOptions,
3451 (reg_read(ohci, OHCI1394_BusOptions) & 0x0000f007) |
3453 memset(ohci->csr_config_rom_cpu, 0, OHCI_CONFIG_ROM_LEN);
3455 case OHCI_INIT_HAVE_IRQ:
3456 /* Clear interrupt registers */
3457 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3458 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3459 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
3460 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
3461 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
3462 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
3464 /* Disable IRM Contender */
3465 set_phy_reg(ohci, 4, ~0xc0 & get_phy_reg(ohci, 4));
3467 /* Clear link control register */
3468 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
3470 /* Let all other nodes know to ignore us */
3471 ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
3473 /* Soft reset before we start - this disables
3474 * interrupts and clears linkEnable and LPS. */
3475 ohci_soft_reset(ohci);
3476 free_irq(ohci->dev->irq, ohci);
3478 case OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE:
3479 /* The ohci_soft_reset() stops all DMA contexts, so we
3480 * dont need to do this. */
3482 free_dma_rcv_ctx(&ohci->ar_req_context);
3483 free_dma_rcv_ctx(&ohci->ar_resp_context);
3486 free_dma_trm_ctx(&ohci->at_req_context);
3487 free_dma_trm_ctx(&ohci->at_resp_context);
3490 free_dma_rcv_ctx(&ohci->ir_legacy_context);
3493 free_dma_trm_ctx(&ohci->it_legacy_context);
3495 /* Free IR legacy dma */
3496 free_dma_rcv_ctx(&ohci->ir_legacy_context);
3499 case OHCI_INIT_HAVE_SELFID_BUFFER:
3500 pci_free_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3501 ohci->selfid_buf_cpu,
3502 ohci->selfid_buf_bus);
3503 OHCI_DMA_FREE("consistent selfid_buf");
3505 case OHCI_INIT_HAVE_CONFIG_ROM_BUFFER:
3506 pci_free_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3507 ohci->csr_config_rom_cpu,
3508 ohci->csr_config_rom_bus);
3509 OHCI_DMA_FREE("consistent csr_config_rom");
3511 case OHCI_INIT_HAVE_IOMAPPING:
3512 iounmap(ohci->registers);
3514 case OHCI_INIT_HAVE_MEM_REGION:
3516 release_mem_region(pci_resource_start(ohci->dev, 0),
3517 OHCI1394_REGISTER_SIZE);
3520 #ifdef CONFIG_PPC_PMAC
3521 /* On UniNorth, power down the cable and turn off the chip
3522 * clock when the module is removed to save power on
3523 * laptops. Turning it back ON is done by the arch code when
3524 * pci_enable_device() is called */
3526 struct device_node* of_node;
3528 of_node = pci_device_to_OF_node(ohci->dev);
3530 pmac_call_feature(PMAC_FTR_1394_ENABLE, of_node, 0, 0);
3531 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, of_node, 0, 0);
3534 #endif /* CONFIG_PPC_PMAC */
3536 case OHCI_INIT_ALLOC_HOST:
3537 pci_set_drvdata(ohci->dev, NULL);
3545 static int ohci1394_pci_resume (struct pci_dev *pdev)
3547 #ifdef CONFIG_PMAC_PBOOK
3549 struct device_node *of_node;
3551 /* Re-enable 1394 */
3552 of_node = pci_device_to_OF_node (pdev);
3554 pmac_call_feature (PMAC_FTR_1394_ENABLE, of_node, 0, 1);
3558 pci_enable_device(pdev);
3564 static int ohci1394_pci_suspend (struct pci_dev *pdev, pm_message_t state)
3566 #ifdef CONFIG_PMAC_PBOOK
3568 struct device_node *of_node;
3571 of_node = pci_device_to_OF_node (pdev);
3573 pmac_call_feature(PMAC_FTR_1394_ENABLE, of_node, 0, 0);
3581 #define PCI_CLASS_FIREWIRE_OHCI ((PCI_CLASS_SERIAL_FIREWIRE << 8) | 0x10)
3583 static struct pci_device_id ohci1394_pci_tbl[] = {
3585 .class = PCI_CLASS_FIREWIRE_OHCI,
3586 .class_mask = PCI_ANY_ID,
3587 .vendor = PCI_ANY_ID,
3588 .device = PCI_ANY_ID,
3589 .subvendor = PCI_ANY_ID,
3590 .subdevice = PCI_ANY_ID,
3595 MODULE_DEVICE_TABLE(pci, ohci1394_pci_tbl);
3597 static struct pci_driver ohci1394_pci_driver = {
3598 .name = OHCI1394_DRIVER_NAME,
3599 .id_table = ohci1394_pci_tbl,
3600 .probe = ohci1394_pci_probe,
3601 .remove = ohci1394_pci_remove,
3602 .resume = ohci1394_pci_resume,
3603 .suspend = ohci1394_pci_suspend,
3608 /***********************************
3609 * OHCI1394 Video Interface *
3610 ***********************************/
3612 /* essentially the only purpose of this code is to allow another
3613 module to hook into ohci's interrupt handler */
3615 int ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg)
3619 /* stop the channel program if it's still running */
3620 reg_write(ohci, reg, 0x8000);
3622 /* Wait until it effectively stops */
3623 while (reg_read(ohci, reg) & 0x400) {
3627 "Runaway loop while stopping context: %s...", msg ? msg : "");
3634 if (msg) PRINT(KERN_ERR, "%s: dma prg stopped", msg);
3638 void ohci1394_init_iso_tasklet(struct ohci1394_iso_tasklet *tasklet, int type,
3639 void (*func)(unsigned long), unsigned long data)
3641 tasklet_init(&tasklet->tasklet, func, data);
3642 tasklet->type = type;
3643 /* We init the tasklet->link field, so we can list_del() it
3644 * without worrying whether it was added to the list or not. */
3645 INIT_LIST_HEAD(&tasklet->link);
3648 int ohci1394_register_iso_tasklet(struct ti_ohci *ohci,
3649 struct ohci1394_iso_tasklet *tasklet)
3651 unsigned long flags, *usage;
3652 int n, i, r = -EBUSY;
3654 if (tasklet->type == OHCI_ISO_TRANSMIT) {
3655 n = ohci->nb_iso_xmit_ctx;
3656 usage = &ohci->it_ctx_usage;
3659 n = ohci->nb_iso_rcv_ctx;
3660 usage = &ohci->ir_ctx_usage;
3662 /* only one receive context can be multichannel (OHCI sec 10.4.1) */
3663 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3664 if (test_and_set_bit(0, &ohci->ir_multichannel_used)) {
3670 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3672 for (i = 0; i < n; i++)
3673 if (!test_and_set_bit(i, usage)) {
3674 tasklet->context = i;
3675 list_add_tail(&tasklet->link, &ohci->iso_tasklet_list);
3680 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3685 void ohci1394_unregister_iso_tasklet(struct ti_ohci *ohci,
3686 struct ohci1394_iso_tasklet *tasklet)
3688 unsigned long flags;
3690 tasklet_kill(&tasklet->tasklet);
3692 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3694 if (tasklet->type == OHCI_ISO_TRANSMIT)
3695 clear_bit(tasklet->context, &ohci->it_ctx_usage);
3697 clear_bit(tasklet->context, &ohci->ir_ctx_usage);
3699 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3700 clear_bit(0, &ohci->ir_multichannel_used);
3704 list_del(&tasklet->link);
3706 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3709 EXPORT_SYMBOL(ohci1394_stop_context);
3710 EXPORT_SYMBOL(ohci1394_init_iso_tasklet);
3711 EXPORT_SYMBOL(ohci1394_register_iso_tasklet);
3712 EXPORT_SYMBOL(ohci1394_unregister_iso_tasklet);
3715 /***********************************
3716 * General module initialization *
3717 ***********************************/
3719 MODULE_AUTHOR("Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>");
3720 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE-1394 controllers");
3721 MODULE_LICENSE("GPL");
3723 static void __exit ohci1394_cleanup (void)
3725 pci_unregister_driver(&ohci1394_pci_driver);
3728 static int __init ohci1394_init(void)
3730 return pci_register_driver(&ohci1394_pci_driver);
3733 module_init(ohci1394_init);
3734 module_exit(ohci1394_cleanup);