ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / drivers / ieee1394 / ohci1394.c
1 /*
2  * ohci1394.c - driver for OHCI 1394 boards
3  * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
4  *                        Gord Peters <GordPeters@smarttech.com>
5  *              2001      Ben Collins <bcollins@debian.org>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software Foundation,
19  * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20  */
21
22 /*
23  * Things known to be working:
24  * . Async Request Transmit
25  * . Async Response Receive
26  * . Async Request Receive
27  * . Async Response Transmit
28  * . Iso Receive
29  * . DMA mmap for iso receive
30  * . Config ROM generation
31  *
32  * Things implemented, but still in test phase:
33  * . Iso Transmit
34  * . Async Stream Packets Transmit (Receive done via Iso interface)
35  *
36  * Things not implemented:
37  * . DMA error recovery
38  *
39  * Known bugs:
40  * . devctl BUS_RESET arg confusion (reset type or root holdoff?)
41  *   added LONG_RESET_ROOT and SHORT_RESET_ROOT for root holdoff --kk
42  */
43
44 /*
45  * Acknowledgments:
46  *
47  * Adam J Richter <adam@yggdrasil.com>
48  *  . Use of pci_class to find device
49  *
50  * Emilie Chung <emilie.chung@axis.com>
51  *  . Tip on Async Request Filter
52  *
53  * Pascal Drolet <pascal.drolet@informission.ca>
54  *  . Various tips for optimization and functionnalities
55  *
56  * Robert Ficklin <rficklin@westengineering.com>
57  *  . Loop in irq_handler
58  *
59  * James Goodwin <jamesg@Filanet.com>
60  *  . Various tips on initialization, self-id reception, etc.
61  *
62  * Albrecht Dress <ad@mpifr-bonn.mpg.de>
63  *  . Apple PowerBook detection
64  *
65  * Daniel Kobras <daniel.kobras@student.uni-tuebingen.de>
66  *  . Reset the board properly before leaving + misc cleanups
67  *
68  * Leon van Stuivenberg <leonvs@iae.nl>
69  *  . Bug fixes
70  *
71  * Ben Collins <bcollins@debian.org>
72  *  . Working big-endian support
73  *  . Updated to 2.4.x module scheme (PCI aswell)
74  *  . Config ROM generation
75  *
76  * Manfred Weihs <weihs@ict.tuwien.ac.at>
77  *  . Reworked code for initiating bus resets
78  *    (long, short, with or without hold-off)
79  *
80  * Nandu Santhi <contactnandu@users.sourceforge.net>
81  *  . Added support for nVidia nForce2 onboard Firewire chipset
82  *
83  */
84
85 #include <linux/config.h>
86 #include <linux/kernel.h>
87 #include <linux/list.h>
88 #include <linux/slab.h>
89 #include <linux/interrupt.h>
90 #include <linux/wait.h>
91 #include <linux/errno.h>
92 #include <linux/module.h>
93 #include <linux/moduleparam.h>
94 #include <linux/pci.h>
95 #include <linux/fs.h>
96 #include <linux/poll.h>
97 #include <linux/irq.h>
98 #include <asm/byteorder.h>
99 #include <asm/atomic.h>
100 #include <asm/uaccess.h>
101 #include <linux/delay.h>
102 #include <linux/spinlock.h>
103
104 #include <asm/pgtable.h>
105 #include <asm/page.h>
106 #include <linux/sched.h>
107 #include <linux/types.h>
108 #include <linux/vmalloc.h>
109 #include <linux/init.h>
110
111 #ifdef CONFIG_PPC_PMAC
112 #include <asm/machdep.h>
113 #include <asm/pmac_feature.h>
114 #include <asm/prom.h>
115 #include <asm/pci-bridge.h>
116 #endif
117
118 #include "csr1212.h"
119 #include "ieee1394.h"
120 #include "ieee1394_types.h"
121 #include "hosts.h"
122 #include "dma.h"
123 #include "iso.h"
124 #include "ieee1394_core.h"
125 #include "highlevel.h"
126 #include "ohci1394.h"
127
128 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
129 #define OHCI1394_DEBUG
130 #endif
131
132 #ifdef DBGMSG
133 #undef DBGMSG
134 #endif
135
136 #ifdef OHCI1394_DEBUG
137 #define DBGMSG(fmt, args...) \
138 printk(KERN_INFO "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
139 #else
140 #define DBGMSG(fmt, args...)
141 #endif
142
143 #ifdef CONFIG_IEEE1394_OHCI_DMA_DEBUG
144 #define OHCI_DMA_ALLOC(fmt, args...) \
145         HPSB_ERR("%s(%s)alloc(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
146                 ++global_outstanding_dmas, ## args)
147 #define OHCI_DMA_FREE(fmt, args...) \
148         HPSB_ERR("%s(%s)free(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
149                 --global_outstanding_dmas, ## args)
150 static int global_outstanding_dmas = 0;
151 #else
152 #define OHCI_DMA_ALLOC(fmt, args...)
153 #define OHCI_DMA_FREE(fmt, args...)
154 #endif
155
156 /* print general (card independent) information */
157 #define PRINT_G(level, fmt, args...) \
158 printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
159
160 /* print card specific information */
161 #define PRINT(level, fmt, args...) \
162 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
163
164 static char version[] __devinitdata =
165         "$Rev: 1203 $ Ben Collins <bcollins@debian.org>";
166
167 /* Module Parameters */
168 static int phys_dma = 1;
169 module_param(phys_dma, int, 0644);
170 MODULE_PARM_DESC(phys_dma, "Enable physical dma (default = 1).");
171
172 static void dma_trm_tasklet(unsigned long data);
173 static void dma_trm_reset(struct dma_trm_ctx *d);
174
175 static int alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
176                              enum context_type type, int ctx, int num_desc,
177                              int buf_size, int split_buf_size, int context_base);
178 static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d);
179 static void free_dma_rcv_ctx(struct dma_rcv_ctx *d);
180
181 static int alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
182                              enum context_type type, int ctx, int num_desc,
183                              int context_base);
184
185 static void ohci1394_pci_remove(struct pci_dev *pdev);
186
187 #ifndef __LITTLE_ENDIAN
188 static unsigned hdr_sizes[] =
189 {
190         3,      /* TCODE_WRITEQ */
191         4,      /* TCODE_WRITEB */
192         3,      /* TCODE_WRITE_RESPONSE */
193         0,      /* ??? */
194         3,      /* TCODE_READQ */
195         4,      /* TCODE_READB */
196         3,      /* TCODE_READQ_RESPONSE */
197         4,      /* TCODE_READB_RESPONSE */
198         1,      /* TCODE_CYCLE_START (???) */
199         4,      /* TCODE_LOCK_REQUEST */
200         2,      /* TCODE_ISO_DATA */
201         4,      /* TCODE_LOCK_RESPONSE */
202 };
203
204 /* Swap headers */
205 static inline void packet_swab(quadlet_t *data, int tcode)
206 {
207         size_t size = hdr_sizes[tcode];
208
209         if (tcode > TCODE_LOCK_RESPONSE || hdr_sizes[tcode] == 0)
210                 return;
211
212         while (size--)
213                 data[size] = swab32(data[size]);
214 }
215 #else
216 /* Don't waste cycles on same sex byte swaps */
217 #define packet_swab(w,x)
218 #endif /* !LITTLE_ENDIAN */
219
220 /***********************************
221  * IEEE-1394 functionality section *
222  ***********************************/
223
224 static u8 get_phy_reg(struct ti_ohci *ohci, u8 addr)
225 {
226         int i;
227         unsigned long flags;
228         quadlet_t r;
229
230         spin_lock_irqsave (&ohci->phy_reg_lock, flags);
231
232         reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000);
233
234         for (i = 0; i < OHCI_LOOP_COUNT; i++) {
235                 if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000)
236                         break;
237
238                 mdelay(1);
239         }
240
241         r = reg_read(ohci, OHCI1394_PhyControl);
242
243         if (i >= OHCI_LOOP_COUNT)
244                 PRINT (KERN_ERR, "Get PHY Reg timeout [0x%08x/0x%08x/%d]",
245                        r, r & 0x80000000, i);
246
247         spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
248
249         return (r & 0x00ff0000) >> 16;
250 }
251
252 static void set_phy_reg(struct ti_ohci *ohci, u8 addr, u8 data)
253 {
254         int i;
255         unsigned long flags;
256         u32 r = 0;
257
258         spin_lock_irqsave (&ohci->phy_reg_lock, flags);
259
260         reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000);
261
262         for (i = 0; i < OHCI_LOOP_COUNT; i++) {
263                 r = reg_read(ohci, OHCI1394_PhyControl);
264                 if (!(r & 0x00004000))
265                         break;
266
267                 mdelay(1);
268         }
269
270         if (i == OHCI_LOOP_COUNT)
271                 PRINT (KERN_ERR, "Set PHY Reg timeout [0x%08x/0x%08x/%d]",
272                        r, r & 0x00004000, i);
273
274         spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
275
276         return;
277 }
278
279 /* Or's our value into the current value */
280 static void set_phy_reg_mask(struct ti_ohci *ohci, u8 addr, u8 data)
281 {
282         u8 old;
283
284         old = get_phy_reg (ohci, addr);
285         old |= data;
286         set_phy_reg (ohci, addr, old);
287
288         return;
289 }
290
291 static void handle_selfid(struct ti_ohci *ohci, struct hpsb_host *host,
292                                 int phyid, int isroot)
293 {
294         quadlet_t *q = ohci->selfid_buf_cpu;
295         quadlet_t self_id_count=reg_read(ohci, OHCI1394_SelfIDCount);
296         size_t size;
297         quadlet_t q0, q1;
298
299         /* Check status of self-id reception */
300
301         if (ohci->selfid_swap)
302                 q0 = le32_to_cpu(q[0]);
303         else
304                 q0 = q[0];
305
306         if ((self_id_count & 0x80000000) ||
307             ((self_id_count & 0x00FF0000) != (q0 & 0x00FF0000))) {
308                 PRINT(KERN_ERR,
309                       "Error in reception of SelfID packets [0x%08x/0x%08x] (count: %d)",
310                       self_id_count, q0, ohci->self_id_errors);
311
312                 /* Tip by James Goodwin <jamesg@Filanet.com>:
313                  * We had an error, generate another bus reset in response.  */
314                 if (ohci->self_id_errors<OHCI1394_MAX_SELF_ID_ERRORS) {
315                         set_phy_reg_mask (ohci, 1, 0x40);
316                         ohci->self_id_errors++;
317                 } else {
318                         PRINT(KERN_ERR,
319                               "Too many errors on SelfID error reception, giving up!");
320                 }
321                 return;
322         }
323
324         /* SelfID Ok, reset error counter. */
325         ohci->self_id_errors = 0;
326
327         size = ((self_id_count & 0x00001FFC) >> 2) - 1;
328         q++;
329
330         while (size > 0) {
331                 if (ohci->selfid_swap) {
332                         q0 = le32_to_cpu(q[0]);
333                         q1 = le32_to_cpu(q[1]);
334                 } else {
335                         q0 = q[0];
336                         q1 = q[1];
337                 }
338
339                 if (q0 == ~q1) {
340                         DBGMSG ("SelfID packet 0x%x received", q0);
341                         hpsb_selfid_received(host, cpu_to_be32(q0));
342                         if (((q0 & 0x3f000000) >> 24) == phyid)
343                                 DBGMSG ("SelfID for this node is 0x%08x", q0);
344                 } else {
345                         PRINT(KERN_ERR,
346                               "SelfID is inconsistent [0x%08x/0x%08x]", q0, q1);
347                 }
348                 q += 2;
349                 size -= 2;
350         }
351
352         DBGMSG("SelfID complete");
353
354         return;
355 }
356
357 static void ohci_soft_reset(struct ti_ohci *ohci) {
358         int i;
359
360         reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
361
362         for (i = 0; i < OHCI_LOOP_COUNT; i++) {
363                 if (!(reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_softReset))
364                         break;
365                 mdelay(1);
366         }
367         DBGMSG ("Soft reset finished");
368 }
369
370
371 /* Generate the dma receive prgs and start the context */
372 static void initialize_dma_rcv_ctx(struct dma_rcv_ctx *d, int generate_irq)
373 {
374         struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
375         int i;
376
377         ohci1394_stop_context(ohci, d->ctrlClear, NULL);
378
379         for (i=0; i<d->num_desc; i++) {
380                 u32 c;
381
382                 c = DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE | DMA_CTL_BRANCH;
383                 if (generate_irq)
384                         c |= DMA_CTL_IRQ;
385
386                 d->prg_cpu[i]->control = cpu_to_le32(c | d->buf_size);
387
388                 /* End of descriptor list? */
389                 if (i + 1 < d->num_desc) {
390                         d->prg_cpu[i]->branchAddress =
391                                 cpu_to_le32((d->prg_bus[i+1] & 0xfffffff0) | 0x1);
392                 } else {
393                         d->prg_cpu[i]->branchAddress =
394                                 cpu_to_le32((d->prg_bus[0] & 0xfffffff0));
395                 }
396
397                 d->prg_cpu[i]->address = cpu_to_le32(d->buf_bus[i]);
398                 d->prg_cpu[i]->status = cpu_to_le32(d->buf_size);
399         }
400
401         d->buf_ind = 0;
402         d->buf_offset = 0;
403
404         if (d->type == DMA_CTX_ISO) {
405                 /* Clear contextControl */
406                 reg_write(ohci, d->ctrlClear, 0xffffffff);
407
408                 /* Set bufferFill, isochHeader, multichannel for IR context */
409                 reg_write(ohci, d->ctrlSet, 0xd0000000);
410
411                 /* Set the context match register to match on all tags */
412                 reg_write(ohci, d->ctxtMatch, 0xf0000000);
413
414                 /* Clear the multi channel mask high and low registers */
415                 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, 0xffffffff);
416                 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, 0xffffffff);
417
418                 /* Set up isoRecvIntMask to generate interrupts */
419                 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << d->ctx);
420         }
421
422         /* Tell the controller where the first AR program is */
423         reg_write(ohci, d->cmdPtr, d->prg_bus[0] | 0x1);
424
425         /* Run context */
426         reg_write(ohci, d->ctrlSet, 0x00008000);
427
428         DBGMSG("Receive DMA ctx=%d initialized", d->ctx);
429 }
430
431 /* Initialize the dma transmit context */
432 static void initialize_dma_trm_ctx(struct dma_trm_ctx *d)
433 {
434         struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
435
436         /* Stop the context */
437         ohci1394_stop_context(ohci, d->ctrlClear, NULL);
438
439         d->prg_ind = 0;
440         d->sent_ind = 0;
441         d->free_prgs = d->num_desc;
442         d->branchAddrPtr = NULL;
443         INIT_LIST_HEAD(&d->fifo_list);
444         INIT_LIST_HEAD(&d->pending_list);
445
446         if (d->type == DMA_CTX_ISO) {
447                 /* enable interrupts */
448                 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << d->ctx);
449         }
450
451         DBGMSG("Transmit DMA ctx=%d initialized", d->ctx);
452 }
453
454 /* Count the number of available iso contexts */
455 static int get_nb_iso_ctx(struct ti_ohci *ohci, int reg)
456 {
457         int i,ctx=0;
458         u32 tmp;
459
460         reg_write(ohci, reg, 0xffffffff);
461         tmp = reg_read(ohci, reg);
462
463         DBGMSG("Iso contexts reg: %08x implemented: %08x", reg, tmp);
464
465         /* Count the number of contexts */
466         for (i=0; i<32; i++) {
467                 if (tmp & 1) ctx++;
468                 tmp >>= 1;
469         }
470         return ctx;
471 }
472
473 /* Global initialization */
474 static void ohci_initialize(struct ti_ohci *ohci)
475 {
476         char irq_buf[16];
477         quadlet_t buf;
478         int num_ports, i;
479
480         spin_lock_init(&ohci->phy_reg_lock);
481         spin_lock_init(&ohci->event_lock);
482
483         /* Put some defaults to these undefined bus options */
484         buf = reg_read(ohci, OHCI1394_BusOptions);
485         buf |=  0xE0000000; /* Enable IRMC, CMC and ISC */
486         buf &= ~0x00ff0000; /* XXX: Set cyc_clk_acc to zero for now */
487         buf &= ~0x18000000; /* Disable PMC and BMC */
488         reg_write(ohci, OHCI1394_BusOptions, buf);
489
490         /* Set the bus number */
491         reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0);
492
493         /* Enable posted writes */
494         reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_postedWriteEnable);
495
496         /* Clear link control register */
497         reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
498
499         /* Enable cycle timer and cycle master and set the IRM
500          * contender bit in our self ID packets. */
501         reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_CycleTimerEnable |
502                   OHCI1394_LinkControl_CycleMaster);
503         set_phy_reg_mask(ohci, 4, 0xc0);
504
505         /* Set up self-id dma buffer */
506         reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->selfid_buf_bus);
507
508         /* enable self-id and phys */
509         reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_RcvSelfID |
510                   OHCI1394_LinkControl_RcvPhyPkt);
511
512         /* Set the Config ROM mapping register */
513         reg_write(ohci, OHCI1394_ConfigROMmap, ohci->csr_config_rom_bus);
514
515         /* Now get our max packet size */
516         ohci->max_packet_size =
517                 1<<(((reg_read(ohci, OHCI1394_BusOptions)>>12)&0xf)+1);
518
519         /* Don't accept phy packets into AR request context */
520         reg_write(ohci, OHCI1394_LinkControlClear, 0x00000400);
521
522         /* Clear the interrupt mask */
523         reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
524         reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
525
526         /* Clear the interrupt mask */
527         reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
528         reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
529
530         /* Initialize AR dma */
531         initialize_dma_rcv_ctx(&ohci->ar_req_context, 0);
532         initialize_dma_rcv_ctx(&ohci->ar_resp_context, 0);
533
534         /* Initialize AT dma */
535         initialize_dma_trm_ctx(&ohci->at_req_context);
536         initialize_dma_trm_ctx(&ohci->at_resp_context);
537
538         /*
539          * Accept AT requests from all nodes. This probably
540          * will have to be controlled from the subsystem
541          * on a per node basis.
542          */
543         reg_write(ohci,OHCI1394_AsReqFilterHiSet, 0x80000000);
544
545         /* Specify AT retries */
546         reg_write(ohci, OHCI1394_ATRetries,
547                   OHCI1394_MAX_AT_REQ_RETRIES |
548                   (OHCI1394_MAX_AT_RESP_RETRIES<<4) |
549                   (OHCI1394_MAX_PHYS_RESP_RETRIES<<8));
550
551         /* We don't want hardware swapping */
552         reg_write(ohci, OHCI1394_HCControlClear, OHCI1394_HCControl_noByteSwap);
553
554         /* Enable interrupts */
555         reg_write(ohci, OHCI1394_IntMaskSet,
556                   OHCI1394_unrecoverableError |
557                   OHCI1394_masterIntEnable |
558                   OHCI1394_busReset |
559                   OHCI1394_selfIDComplete |
560                   OHCI1394_RSPkt |
561                   OHCI1394_RQPkt |
562                   OHCI1394_respTxComplete |
563                   OHCI1394_reqTxComplete |
564                   OHCI1394_isochRx |
565                   OHCI1394_isochTx |
566                   OHCI1394_cycleInconsistent);
567
568         /* Enable link */
569         reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable);
570
571         buf = reg_read(ohci, OHCI1394_Version);
572 #ifndef __sparc__
573         sprintf (irq_buf, "%d", ohci->dev->irq);
574 #else
575         sprintf (irq_buf, "%s", __irq_itoa(ohci->dev->irq));
576 #endif
577         PRINT(KERN_INFO, "OHCI-1394 %d.%d (PCI): IRQ=[%s]  "
578               "MMIO=[%lx-%lx]  Max Packet=[%d]",
579               ((((buf) >> 16) & 0xf) + (((buf) >> 20) & 0xf) * 10),
580               ((((buf) >> 4) & 0xf) + ((buf) & 0xf) * 10), irq_buf,
581               pci_resource_start(ohci->dev, 0),
582               pci_resource_start(ohci->dev, 0) + OHCI1394_REGISTER_SIZE - 1,
583               ohci->max_packet_size);
584
585         /* Check all of our ports to make sure that if anything is
586          * connected, we enable that port. */
587         num_ports = get_phy_reg(ohci, 2) & 0xf;
588         for (i = 0; i < num_ports; i++) {
589                 unsigned int status;
590
591                 set_phy_reg(ohci, 7, i);
592                 status = get_phy_reg(ohci, 8);
593
594                 if (status & 0x20)
595                         set_phy_reg(ohci, 8, status & ~1);
596         }
597
598         /* Serial EEPROM Sanity check. */
599         if ((ohci->max_packet_size < 512) ||
600             (ohci->max_packet_size > 4096)) {
601                 /* Serial EEPROM contents are suspect, set a sane max packet
602                  * size and print the raw contents for bug reports if verbose
603                  * debug is enabled. */
604 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
605                 int i;
606 #endif
607
608                 PRINT(KERN_DEBUG, "Serial EEPROM has suspicious values, "
609                       "attempting to setting max_packet_size to 512 bytes");
610                 reg_write(ohci, OHCI1394_BusOptions,
611                           (reg_read(ohci, OHCI1394_BusOptions) & 0xf007) | 0x8002);
612                 ohci->max_packet_size = 512;
613 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
614                 PRINT(KERN_DEBUG, "    EEPROM Present: %d",
615                       (reg_read(ohci, OHCI1394_Version) >> 24) & 0x1);
616                 reg_write(ohci, OHCI1394_GUID_ROM, 0x80000000);
617
618                 for (i = 0;
619                      ((i < 1000) &&
620                       (reg_read(ohci, OHCI1394_GUID_ROM) & 0x80000000)); i++)
621                         udelay(10);
622
623                 for (i = 0; i < 0x20; i++) {
624                         reg_write(ohci, OHCI1394_GUID_ROM, 0x02000000);
625                         PRINT(KERN_DEBUG, "    EEPROM %02x: %02x", i,
626                               (reg_read(ohci, OHCI1394_GUID_ROM) >> 16) & 0xff);
627                 }
628 #endif
629         }
630 }
631
632 /*
633  * Insert a packet in the DMA fifo and generate the DMA prg
634  * FIXME: rewrite the program in order to accept packets crossing
635  *        page boundaries.
636  *        check also that a single dma descriptor doesn't cross a
637  *        page boundary.
638  */
639 static void insert_packet(struct ti_ohci *ohci,
640                           struct dma_trm_ctx *d, struct hpsb_packet *packet)
641 {
642         u32 cycleTimer;
643         int idx = d->prg_ind;
644
645         DBGMSG("Inserting packet for node " NODE_BUS_FMT
646                ", tlabel=%d, tcode=0x%x, speed=%d",
647                NODE_BUS_ARGS(ohci->host, packet->node_id), packet->tlabel,
648                packet->tcode, packet->speed_code);
649
650         d->prg_cpu[idx]->begin.address = 0;
651         d->prg_cpu[idx]->begin.branchAddress = 0;
652
653         if (d->type == DMA_CTX_ASYNC_RESP) {
654                 /*
655                  * For response packets, we need to put a timeout value in
656                  * the 16 lower bits of the status... let's try 1 sec timeout
657                  */
658                 cycleTimer = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
659                 d->prg_cpu[idx]->begin.status = cpu_to_le32(
660                         (((((cycleTimer>>25)&0x7)+1)&0x7)<<13) |
661                         ((cycleTimer&0x01fff000)>>12));
662
663                 DBGMSG("cycleTimer: %08x timeStamp: %08x",
664                        cycleTimer, d->prg_cpu[idx]->begin.status);
665         } else 
666                 d->prg_cpu[idx]->begin.status = 0;
667
668         if ( (packet->type == hpsb_async) || (packet->type == hpsb_raw) ) {
669
670                 if (packet->type == hpsb_raw) {
671                         d->prg_cpu[idx]->data[0] = cpu_to_le32(OHCI1394_TCODE_PHY<<4);
672                         d->prg_cpu[idx]->data[1] = cpu_to_le32(packet->header[0]);
673                         d->prg_cpu[idx]->data[2] = cpu_to_le32(packet->header[1]);
674                 } else {
675                         d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
676                                 (packet->header[0] & 0xFFFF);
677
678                         if (packet->tcode == TCODE_ISO_DATA) {
679                                 /* Sending an async stream packet */
680                                 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
681                         } else {
682                                 /* Sending a normal async request or response */
683                                 d->prg_cpu[idx]->data[1] =
684                                         (packet->header[1] & 0xFFFF) |
685                                         (packet->header[0] & 0xFFFF0000);
686                                 d->prg_cpu[idx]->data[2] = packet->header[2];
687                                 d->prg_cpu[idx]->data[3] = packet->header[3];
688                         }
689                         packet_swab(d->prg_cpu[idx]->data, packet->tcode);
690                 }
691
692                 if (packet->data_size) { /* block transmit */
693                         if (packet->tcode == TCODE_STREAM_DATA){
694                                 d->prg_cpu[idx]->begin.control =
695                                         cpu_to_le32(DMA_CTL_OUTPUT_MORE |
696                                                     DMA_CTL_IMMEDIATE | 0x8);
697                         } else {
698                                 d->prg_cpu[idx]->begin.control =
699                                         cpu_to_le32(DMA_CTL_OUTPUT_MORE |
700                                                     DMA_CTL_IMMEDIATE | 0x10);
701                         }
702                         d->prg_cpu[idx]->end.control =
703                                 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
704                                             DMA_CTL_IRQ |
705                                             DMA_CTL_BRANCH |
706                                             packet->data_size);
707                         /*
708                          * Check that the packet data buffer
709                          * does not cross a page boundary.
710                          *
711                          * XXX Fix this some day. eth1394 seems to trigger
712                          * it, but ignoring it doesn't seem to cause a
713                          * problem.
714                          */
715 #if 0
716                         if (cross_bound((unsigned long)packet->data,
717                                         packet->data_size)>0) {
718                                 /* FIXME: do something about it */
719                                 PRINT(KERN_ERR,
720                                       "%s: packet data addr: %p size %Zd bytes "
721                                       "cross page boundary", __FUNCTION__,
722                                       packet->data, packet->data_size);
723                         }
724 #endif
725                         d->prg_cpu[idx]->end.address = cpu_to_le32(
726                                 pci_map_single(ohci->dev, packet->data,
727                                                packet->data_size,
728                                                PCI_DMA_TODEVICE));
729                         OHCI_DMA_ALLOC("single, block transmit packet");
730
731                         d->prg_cpu[idx]->end.branchAddress = 0;
732                         d->prg_cpu[idx]->end.status = 0;
733                         if (d->branchAddrPtr)
734                                 *(d->branchAddrPtr) =
735                                         cpu_to_le32(d->prg_bus[idx] | 0x3);
736                         d->branchAddrPtr =
737                                 &(d->prg_cpu[idx]->end.branchAddress);
738                 } else { /* quadlet transmit */
739                         if (packet->type == hpsb_raw)
740                                 d->prg_cpu[idx]->begin.control =
741                                         cpu_to_le32(DMA_CTL_OUTPUT_LAST |
742                                                     DMA_CTL_IMMEDIATE |
743                                                     DMA_CTL_IRQ |
744                                                     DMA_CTL_BRANCH |
745                                                     (packet->header_size + 4));
746                         else
747                                 d->prg_cpu[idx]->begin.control =
748                                         cpu_to_le32(DMA_CTL_OUTPUT_LAST |
749                                                     DMA_CTL_IMMEDIATE |
750                                                     DMA_CTL_IRQ |
751                                                     DMA_CTL_BRANCH |
752                                                     packet->header_size);
753
754                         if (d->branchAddrPtr)
755                                 *(d->branchAddrPtr) =
756                                         cpu_to_le32(d->prg_bus[idx] | 0x2);
757                         d->branchAddrPtr =
758                                 &(d->prg_cpu[idx]->begin.branchAddress);
759                 }
760
761         } else { /* iso packet */
762                 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
763                         (packet->header[0] & 0xFFFF);
764                 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
765                 packet_swab(d->prg_cpu[idx]->data, packet->tcode);
766
767                 d->prg_cpu[idx]->begin.control =
768                         cpu_to_le32(DMA_CTL_OUTPUT_MORE |
769                                     DMA_CTL_IMMEDIATE | 0x8);
770                 d->prg_cpu[idx]->end.control =
771                         cpu_to_le32(DMA_CTL_OUTPUT_LAST |
772                                     DMA_CTL_UPDATE |
773                                     DMA_CTL_IRQ |
774                                     DMA_CTL_BRANCH |
775                                     packet->data_size);
776                 d->prg_cpu[idx]->end.address = cpu_to_le32(
777                                 pci_map_single(ohci->dev, packet->data,
778                                 packet->data_size, PCI_DMA_TODEVICE));
779                 OHCI_DMA_ALLOC("single, iso transmit packet");
780
781                 d->prg_cpu[idx]->end.branchAddress = 0;
782                 d->prg_cpu[idx]->end.status = 0;
783                 DBGMSG("Iso xmit context info: header[%08x %08x]\n"
784                        "                       begin=%08x %08x %08x %08x\n"
785                        "                             %08x %08x %08x %08x\n"
786                        "                       end  =%08x %08x %08x %08x",
787                        d->prg_cpu[idx]->data[0], d->prg_cpu[idx]->data[1],
788                        d->prg_cpu[idx]->begin.control,
789                        d->prg_cpu[idx]->begin.address,
790                        d->prg_cpu[idx]->begin.branchAddress,
791                        d->prg_cpu[idx]->begin.status,
792                        d->prg_cpu[idx]->data[0],
793                        d->prg_cpu[idx]->data[1],
794                        d->prg_cpu[idx]->data[2],
795                        d->prg_cpu[idx]->data[3],
796                        d->prg_cpu[idx]->end.control,
797                        d->prg_cpu[idx]->end.address,
798                        d->prg_cpu[idx]->end.branchAddress,
799                        d->prg_cpu[idx]->end.status);
800                 if (d->branchAddrPtr)
801                         *(d->branchAddrPtr) = cpu_to_le32(d->prg_bus[idx] | 0x3);
802                 d->branchAddrPtr = &(d->prg_cpu[idx]->end.branchAddress);
803         }
804         d->free_prgs--;
805
806         /* queue the packet in the appropriate context queue */
807         list_add_tail(&packet->driver_list, &d->fifo_list);
808         d->prg_ind = (d->prg_ind + 1) % d->num_desc;
809 }
810
811 /*
812  * This function fills the FIFO with the (eventual) pending packets
813  * and runs or wakes up the DMA prg if necessary.
814  *
815  * The function MUST be called with the d->lock held.
816  */
817 static void dma_trm_flush(struct ti_ohci *ohci, struct dma_trm_ctx *d)
818 {
819         struct hpsb_packet *packet, *ptmp;
820         int idx = d->prg_ind;
821         int z = 0;
822
823         /* insert the packets into the dma fifo */
824         list_for_each_entry_safe(packet, ptmp, &d->pending_list, driver_list) {
825                 if (!d->free_prgs)
826                         break;
827
828                 /* For the first packet only */
829                 if (!z)
830                         z = (packet->data_size) ? 3 : 2;
831
832                 /* Insert the packet */
833                 list_del_init(&packet->driver_list);
834                 insert_packet(ohci, d, packet);
835         }
836
837         /* Nothing must have been done, either no free_prgs or no packets */
838         if (z == 0)
839                 return;
840
841         /* Is the context running ? (should be unless it is
842            the first packet to be sent in this context) */
843         if (!(reg_read(ohci, d->ctrlSet) & 0x8000)) {
844                 u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
845
846                 DBGMSG("Starting transmit DMA ctx=%d",d->ctx);
847                 reg_write(ohci, d->cmdPtr, d->prg_bus[idx] | z);
848
849                 /* Check that the node id is valid, and not 63 */
850                 if (!(nodeId & 0x80000000) || (nodeId & 0x3f) == 63)
851                         PRINT(KERN_ERR, "Running dma failed because Node ID is not valid");
852                 else
853                         reg_write(ohci, d->ctrlSet, 0x8000);
854         } else {
855                 /* Wake up the dma context if necessary */
856                 if (!(reg_read(ohci, d->ctrlSet) & 0x400))
857                         DBGMSG("Waking transmit DMA ctx=%d",d->ctx);
858
859                 /* do this always, to avoid race condition */
860                 reg_write(ohci, d->ctrlSet, 0x1000);
861         }
862
863         return;
864 }
865
866 /* Transmission of an async or iso packet */
867 static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
868 {
869         struct ti_ohci *ohci = host->hostdata;
870         struct dma_trm_ctx *d;
871         unsigned long flags;
872
873         if (packet->data_size > ohci->max_packet_size) {
874                 PRINT(KERN_ERR,
875                       "Transmit packet size %Zd is too big",
876                       packet->data_size);
877                 return -EOVERFLOW;
878         }
879
880         /* Decide whether we have an iso, a request, or a response packet */
881         if (packet->type == hpsb_raw)
882                 d = &ohci->at_req_context;
883         else if ((packet->tcode == TCODE_ISO_DATA) && (packet->type == hpsb_iso)) {
884                 /* The legacy IT DMA context is initialized on first
885                  * use.  However, the alloc cannot be run from
886                  * interrupt context, so we bail out if that is the
887                  * case. I don't see anyone sending ISO packets from
888                  * interrupt context anyway... */
889
890                 if (ohci->it_legacy_context.ohci == NULL) {
891                         if (in_interrupt()) {
892                                 PRINT(KERN_ERR,
893                                       "legacy IT context cannot be initialized during interrupt");
894                                 return -EINVAL;
895                         }
896
897                         if (alloc_dma_trm_ctx(ohci, &ohci->it_legacy_context,
898                                               DMA_CTX_ISO, 0, IT_NUM_DESC,
899                                               OHCI1394_IsoXmitContextBase) < 0) {
900                                 PRINT(KERN_ERR,
901                                       "error initializing legacy IT context");
902                                 return -ENOMEM;
903                         }
904
905                         initialize_dma_trm_ctx(&ohci->it_legacy_context);
906                 }
907
908                 d = &ohci->it_legacy_context;
909         } else if ((packet->tcode & 0x02) && (packet->tcode != TCODE_ISO_DATA))
910                 d = &ohci->at_resp_context;
911         else
912                 d = &ohci->at_req_context;
913
914         spin_lock_irqsave(&d->lock,flags);
915
916         list_add_tail(&packet->driver_list, &d->pending_list);
917
918         dma_trm_flush(ohci, d);
919
920         spin_unlock_irqrestore(&d->lock,flags);
921
922         return 0;
923 }
924
925 static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
926 {
927         struct ti_ohci *ohci = host->hostdata;
928         int retval = 0;
929         unsigned long flags;
930         int phy_reg;
931
932         switch (cmd) {
933         case RESET_BUS:
934                 switch (arg) {
935                 case SHORT_RESET:
936                         phy_reg = get_phy_reg(ohci, 5);
937                         phy_reg |= 0x40;
938                         set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
939                         break;
940                 case LONG_RESET:
941                         phy_reg = get_phy_reg(ohci, 1);
942                         phy_reg |= 0x40;
943                         set_phy_reg(ohci, 1, phy_reg); /* set IBR */
944                         break;
945                 case SHORT_RESET_NO_FORCE_ROOT:
946                         phy_reg = get_phy_reg(ohci, 1);
947                         if (phy_reg & 0x80) {
948                                 phy_reg &= ~0x80;
949                                 set_phy_reg(ohci, 1, phy_reg); /* clear RHB */
950                         }
951
952                         phy_reg = get_phy_reg(ohci, 5);
953                         phy_reg |= 0x40;
954                         set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
955                         break;
956                 case LONG_RESET_NO_FORCE_ROOT:
957                         phy_reg = get_phy_reg(ohci, 1);
958                         phy_reg &= ~0x80;
959                         phy_reg |= 0x40;
960                         set_phy_reg(ohci, 1, phy_reg); /* clear RHB, set IBR */
961                         break;
962                 case SHORT_RESET_FORCE_ROOT:
963                         phy_reg = get_phy_reg(ohci, 1);
964                         if (!(phy_reg & 0x80)) {
965                                 phy_reg |= 0x80;
966                                 set_phy_reg(ohci, 1, phy_reg); /* set RHB */
967                         }
968
969                         phy_reg = get_phy_reg(ohci, 5);
970                         phy_reg |= 0x40;
971                         set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
972                         break;
973                 case LONG_RESET_FORCE_ROOT:
974                         phy_reg = get_phy_reg(ohci, 1);
975                         phy_reg |= 0xc0;
976                         set_phy_reg(ohci, 1, phy_reg); /* set RHB and IBR */
977                         break;
978                 default:
979                         retval = -1;
980                 }
981                 break;
982
983         case GET_CYCLE_COUNTER:
984                 retval = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
985                 break;
986
987         case SET_CYCLE_COUNTER:
988                 reg_write(ohci, OHCI1394_IsochronousCycleTimer, arg);
989                 break;
990
991         case SET_BUS_ID:
992                 PRINT(KERN_ERR, "devctl command SET_BUS_ID err");
993                 break;
994
995         case ACT_CYCLE_MASTER:
996                 if (arg) {
997                         /* check if we are root and other nodes are present */
998                         u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
999                         if ((nodeId & (1<<30)) && (nodeId & 0x3f)) {
1000                                 /*
1001                                  * enable cycleTimer, cycleMaster
1002                                  */
1003                                 DBGMSG("Cycle master enabled");
1004                                 reg_write(ohci, OHCI1394_LinkControlSet,
1005                                           OHCI1394_LinkControl_CycleTimerEnable |
1006                                           OHCI1394_LinkControl_CycleMaster);
1007                         }
1008                 } else {
1009                         /* disable cycleTimer, cycleMaster, cycleSource */
1010                         reg_write(ohci, OHCI1394_LinkControlClear,
1011                                   OHCI1394_LinkControl_CycleTimerEnable |
1012                                   OHCI1394_LinkControl_CycleMaster |
1013                                   OHCI1394_LinkControl_CycleSource);
1014                 }
1015                 break;
1016
1017         case CANCEL_REQUESTS:
1018                 DBGMSG("Cancel request received");
1019                 dma_trm_reset(&ohci->at_req_context);
1020                 dma_trm_reset(&ohci->at_resp_context);
1021                 break;
1022
1023         case ISO_LISTEN_CHANNEL:
1024         {
1025                 u64 mask;
1026
1027                 if (arg<0 || arg>63) {
1028                         PRINT(KERN_ERR,
1029                               "%s: IS0 listen channel %d is out of range",
1030                               __FUNCTION__, arg);
1031                         return -EFAULT;
1032                 }
1033
1034                 /* activate the legacy IR context */
1035                 if (ohci->ir_legacy_context.ohci == NULL) {
1036                         if (alloc_dma_rcv_ctx(ohci, &ohci->ir_legacy_context,
1037                                               DMA_CTX_ISO, 0, IR_NUM_DESC,
1038                                               IR_BUF_SIZE, IR_SPLIT_BUF_SIZE,
1039                                               OHCI1394_IsoRcvContextBase) < 0) {
1040                                 PRINT(KERN_ERR, "%s: failed to allocate an IR context",
1041                                       __FUNCTION__);
1042                                 return -ENOMEM;
1043                         }
1044                         ohci->ir_legacy_channels = 0;
1045                         initialize_dma_rcv_ctx(&ohci->ir_legacy_context, 1);
1046
1047                         DBGMSG("ISO receive legacy context activated");
1048                 }
1049
1050                 mask = (u64)0x1<<arg;
1051
1052                 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1053
1054                 if (ohci->ISO_channel_usage & mask) {
1055                         PRINT(KERN_ERR,
1056                               "%s: IS0 listen channel %d is already used",
1057                               __FUNCTION__, arg);
1058                         spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1059                         return -EFAULT;
1060                 }
1061
1062                 ohci->ISO_channel_usage |= mask;
1063                 ohci->ir_legacy_channels |= mask;
1064
1065                 if (arg>31)
1066                         reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet,
1067                                   1<<(arg-32));
1068                 else
1069                         reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet,
1070                                   1<<arg);
1071
1072                 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1073                 DBGMSG("Listening enabled on channel %d", arg);
1074                 break;
1075         }
1076         case ISO_UNLISTEN_CHANNEL:
1077         {
1078                 u64 mask;
1079
1080                 if (arg<0 || arg>63) {
1081                         PRINT(KERN_ERR,
1082                               "%s: IS0 unlisten channel %d is out of range",
1083                               __FUNCTION__, arg);
1084                         return -EFAULT;
1085                 }
1086
1087                 mask = (u64)0x1<<arg;
1088
1089                 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1090
1091                 if (!(ohci->ISO_channel_usage & mask)) {
1092                         PRINT(KERN_ERR,
1093                               "%s: IS0 unlisten channel %d is not used",
1094                               __FUNCTION__, arg);
1095                         spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1096                         return -EFAULT;
1097                 }
1098
1099                 ohci->ISO_channel_usage &= ~mask;
1100                 ohci->ir_legacy_channels &= ~mask;
1101
1102                 if (arg>31)
1103                         reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear,
1104                                   1<<(arg-32));
1105                 else
1106                         reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear,
1107                                   1<<arg);
1108
1109                 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1110                 DBGMSG("Listening disabled on channel %d", arg);
1111
1112                 if (ohci->ir_legacy_channels == 0) {
1113                         stop_dma_rcv_ctx(&ohci->ir_legacy_context);
1114                         free_dma_rcv_ctx(&ohci->ir_legacy_context);
1115                         DBGMSG("ISO receive legacy context deactivated");
1116                 }
1117                 break;
1118         }
1119         default:
1120                 PRINT_G(KERN_ERR, "ohci_devctl cmd %d not implemented yet",
1121                         cmd);
1122                 break;
1123         }
1124         return retval;
1125 }
1126
1127 /***********************************
1128  * rawiso ISO reception            *
1129  ***********************************/
1130
1131 /*
1132   We use either buffer-fill or packet-per-buffer DMA mode. The DMA
1133   buffer is split into "blocks" (regions described by one DMA
1134   descriptor). Each block must be one page or less in size, and
1135   must not cross a page boundary.
1136
1137   There is one little wrinkle with buffer-fill mode: a packet that
1138   starts in the final block may wrap around into the first block. But
1139   the user API expects all packets to be contiguous. Our solution is
1140   to keep the very last page of the DMA buffer in reserve - if a
1141   packet spans the gap, we copy its tail into this page.
1142 */
1143
1144 struct ohci_iso_recv {
1145         struct ti_ohci *ohci;
1146
1147         struct ohci1394_iso_tasklet task;
1148         int task_active;
1149
1150         enum { BUFFER_FILL_MODE = 0,
1151                PACKET_PER_BUFFER_MODE = 1 } dma_mode;
1152
1153         /* memory and PCI mapping for the DMA descriptors */
1154         struct dma_prog_region prog;
1155         struct dma_cmd *block; /* = (struct dma_cmd*) prog.virt */
1156
1157         /* how many DMA blocks fit in the buffer */
1158         unsigned int nblocks;
1159
1160         /* stride of DMA blocks */
1161         unsigned int buf_stride;
1162
1163         /* number of blocks to batch between interrupts */
1164         int block_irq_interval;
1165
1166         /* block that DMA will finish next */
1167         int block_dma;
1168
1169         /* (buffer-fill only) block that the reader will release next */
1170         int block_reader;
1171
1172         /* (buffer-fill only) bytes of buffer the reader has released,
1173            less than one block */
1174         int released_bytes;
1175
1176         /* (buffer-fill only) buffer offset at which the next packet will appear */
1177         int dma_offset;
1178
1179         /* OHCI DMA context control registers */
1180         u32 ContextControlSet;
1181         u32 ContextControlClear;
1182         u32 CommandPtr;
1183         u32 ContextMatch;
1184 };
1185
1186 static void ohci_iso_recv_task(unsigned long data);
1187 static void ohci_iso_recv_stop(struct hpsb_iso *iso);
1188 static void ohci_iso_recv_shutdown(struct hpsb_iso *iso);
1189 static int  ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync);
1190 static void ohci_iso_recv_program(struct hpsb_iso *iso);
1191
1192 static int ohci_iso_recv_init(struct hpsb_iso *iso)
1193 {
1194         struct ti_ohci *ohci = iso->host->hostdata;
1195         struct ohci_iso_recv *recv;
1196         int ctx;
1197         int ret = -ENOMEM;
1198
1199         recv = kmalloc(sizeof(*recv), SLAB_KERNEL);
1200         if (!recv)
1201                 return -ENOMEM;
1202
1203         iso->hostdata = recv;
1204         recv->ohci = ohci;
1205         recv->task_active = 0;
1206         dma_prog_region_init(&recv->prog);
1207         recv->block = NULL;
1208
1209         /* use buffer-fill mode, unless irq_interval is 1
1210            (note: multichannel requires buffer-fill) */
1211
1212         if (((iso->irq_interval == 1 && iso->dma_mode == HPSB_ISO_DMA_OLD_ABI) ||
1213              iso->dma_mode == HPSB_ISO_DMA_PACKET_PER_BUFFER) && iso->channel != -1) {
1214                 recv->dma_mode = PACKET_PER_BUFFER_MODE;
1215         } else {
1216                 recv->dma_mode = BUFFER_FILL_MODE;
1217         }
1218
1219         /* set nblocks, buf_stride, block_irq_interval */
1220
1221         if (recv->dma_mode == BUFFER_FILL_MODE) {
1222                 recv->buf_stride = PAGE_SIZE;
1223
1224                 /* one block per page of data in the DMA buffer, minus the final guard page */
1225                 recv->nblocks = iso->buf_size/PAGE_SIZE - 1;
1226                 if (recv->nblocks < 3) {
1227                         DBGMSG("ohci_iso_recv_init: DMA buffer too small");
1228                         goto err;
1229                 }
1230
1231                 /* iso->irq_interval is in packets - translate that to blocks */
1232                 if (iso->irq_interval == 1)
1233                         recv->block_irq_interval = 1;
1234                 else
1235                         recv->block_irq_interval = iso->irq_interval *
1236                                                         ((recv->nblocks+1)/iso->buf_packets);
1237                 if (recv->block_irq_interval*4 > recv->nblocks)
1238                         recv->block_irq_interval = recv->nblocks/4;
1239                 if (recv->block_irq_interval < 1)
1240                         recv->block_irq_interval = 1;
1241
1242         } else {
1243                 int max_packet_size;
1244
1245                 recv->nblocks = iso->buf_packets;
1246                 recv->block_irq_interval = iso->irq_interval;
1247                 if (recv->block_irq_interval * 4 > iso->buf_packets)
1248                         recv->block_irq_interval = iso->buf_packets / 4;
1249                 if (recv->block_irq_interval < 1)
1250                 recv->block_irq_interval = 1;
1251
1252                 /* choose a buffer stride */
1253                 /* must be a power of 2, and <= PAGE_SIZE */
1254
1255                 max_packet_size = iso->buf_size / iso->buf_packets;
1256
1257                 for (recv->buf_stride = 8; recv->buf_stride < max_packet_size;
1258                     recv->buf_stride *= 2);
1259
1260                 if (recv->buf_stride*iso->buf_packets > iso->buf_size ||
1261                    recv->buf_stride > PAGE_SIZE) {
1262                         /* this shouldn't happen, but anyway... */
1263                         DBGMSG("ohci_iso_recv_init: problem choosing a buffer stride");
1264                         goto err;
1265                 }
1266         }
1267
1268         recv->block_reader = 0;
1269         recv->released_bytes = 0;
1270         recv->block_dma = 0;
1271         recv->dma_offset = 0;
1272
1273         /* size of DMA program = one descriptor per block */
1274         if (dma_prog_region_alloc(&recv->prog,
1275                                  sizeof(struct dma_cmd) * recv->nblocks,
1276                                  recv->ohci->dev))
1277                 goto err;
1278
1279         recv->block = (struct dma_cmd*) recv->prog.kvirt;
1280
1281         ohci1394_init_iso_tasklet(&recv->task,
1282                                   iso->channel == -1 ? OHCI_ISO_MULTICHANNEL_RECEIVE :
1283                                                        OHCI_ISO_RECEIVE,
1284                                   ohci_iso_recv_task, (unsigned long) iso);
1285
1286         if (ohci1394_register_iso_tasklet(recv->ohci, &recv->task) < 0)
1287                 goto err;
1288
1289         recv->task_active = 1;
1290
1291         /* recv context registers are spaced 32 bytes apart */
1292         ctx = recv->task.context;
1293         recv->ContextControlSet = OHCI1394_IsoRcvContextControlSet + 32 * ctx;
1294         recv->ContextControlClear = OHCI1394_IsoRcvContextControlClear + 32 * ctx;
1295         recv->CommandPtr = OHCI1394_IsoRcvCommandPtr + 32 * ctx;
1296         recv->ContextMatch = OHCI1394_IsoRcvContextMatch + 32 * ctx;
1297
1298         if (iso->channel == -1) {
1299                 /* clear multi-channel selection mask */
1300                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, 0xFFFFFFFF);
1301                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, 0xFFFFFFFF);
1302         }
1303
1304         /* write the DMA program */
1305         ohci_iso_recv_program(iso);
1306
1307         DBGMSG("ohci_iso_recv_init: %s mode, DMA buffer is %lu pages"
1308                " (%u bytes), using %u blocks, buf_stride %u, block_irq_interval %d",
1309                recv->dma_mode == BUFFER_FILL_MODE ?
1310                "buffer-fill" : "packet-per-buffer",
1311                iso->buf_size/PAGE_SIZE, iso->buf_size,
1312                recv->nblocks, recv->buf_stride, recv->block_irq_interval);
1313
1314         return 0;
1315
1316 err:
1317         ohci_iso_recv_shutdown(iso);
1318         return ret;
1319 }
1320
1321 static void ohci_iso_recv_stop(struct hpsb_iso *iso)
1322 {
1323         struct ohci_iso_recv *recv = iso->hostdata;
1324
1325         /* disable interrupts */
1326         reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << recv->task.context);
1327
1328         /* halt DMA */
1329         ohci1394_stop_context(recv->ohci, recv->ContextControlClear, NULL);
1330 }
1331
1332 static void ohci_iso_recv_shutdown(struct hpsb_iso *iso)
1333 {
1334         struct ohci_iso_recv *recv = iso->hostdata;
1335
1336         if (recv->task_active) {
1337                 ohci_iso_recv_stop(iso);
1338                 ohci1394_unregister_iso_tasklet(recv->ohci, &recv->task);
1339                 recv->task_active = 0;
1340         }
1341
1342         dma_prog_region_free(&recv->prog);
1343         kfree(recv);
1344         iso->hostdata = NULL;
1345 }
1346
1347 /* set up a "gapped" ring buffer DMA program */
1348 static void ohci_iso_recv_program(struct hpsb_iso *iso)
1349 {
1350         struct ohci_iso_recv *recv = iso->hostdata;
1351         int blk;
1352
1353         /* address of 'branch' field in previous DMA descriptor */
1354         u32 *prev_branch = NULL;
1355
1356         for (blk = 0; blk < recv->nblocks; blk++) {
1357                 u32 control;
1358
1359                 /* the DMA descriptor */
1360                 struct dma_cmd *cmd = &recv->block[blk];
1361
1362                 /* offset of the DMA descriptor relative to the DMA prog buffer */
1363                 unsigned long prog_offset = blk * sizeof(struct dma_cmd);
1364
1365                 /* offset of this packet's data within the DMA buffer */
1366                 unsigned long buf_offset = blk * recv->buf_stride;
1367
1368                 if (recv->dma_mode == BUFFER_FILL_MODE) {
1369                         control = 2 << 28; /* INPUT_MORE */
1370                 } else {
1371                         control = 3 << 28; /* INPUT_LAST */
1372                 }
1373
1374                 control |= 8 << 24; /* s = 1, update xferStatus and resCount */
1375
1376                 /* interrupt on last block, and at intervals */
1377                 if (blk == recv->nblocks-1 || (blk % recv->block_irq_interval) == 0) {
1378                         control |= 3 << 20; /* want interrupt */
1379                 }
1380
1381                 control |= 3 << 18; /* enable branch to address */
1382                 control |= recv->buf_stride;
1383
1384                 cmd->control = cpu_to_le32(control);
1385                 cmd->address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, buf_offset));
1386                 cmd->branchAddress = 0; /* filled in on next loop */
1387                 cmd->status = cpu_to_le32(recv->buf_stride);
1388
1389                 /* link the previous descriptor to this one */
1390                 if (prev_branch) {
1391                         *prev_branch = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog, prog_offset) | 1);
1392                 }
1393
1394                 prev_branch = &cmd->branchAddress;
1395         }
1396
1397         /* the final descriptor's branch address and Z should be left at 0 */
1398 }
1399
1400 /* listen or unlisten to a specific channel (multi-channel mode only) */
1401 static void ohci_iso_recv_change_channel(struct hpsb_iso *iso, unsigned char channel, int listen)
1402 {
1403         struct ohci_iso_recv *recv = iso->hostdata;
1404         int reg, i;
1405
1406         if (channel < 32) {
1407                 reg = listen ? OHCI1394_IRMultiChanMaskLoSet : OHCI1394_IRMultiChanMaskLoClear;
1408                 i = channel;
1409         } else {
1410                 reg = listen ? OHCI1394_IRMultiChanMaskHiSet : OHCI1394_IRMultiChanMaskHiClear;
1411                 i = channel - 32;
1412         }
1413
1414         reg_write(recv->ohci, reg, (1 << i));
1415
1416         /* issue a dummy read to force all PCI writes to be posted immediately */
1417         mb();
1418         reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1419 }
1420
1421 static void ohci_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
1422 {
1423         struct ohci_iso_recv *recv = iso->hostdata;
1424         int i;
1425
1426         for (i = 0; i < 64; i++) {
1427                 if (mask & (1ULL << i)) {
1428                         if (i < 32)
1429                                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoSet, (1 << i));
1430                         else
1431                                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiSet, (1 << (i-32)));
1432                 } else {
1433                         if (i < 32)
1434                                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, (1 << i));
1435                         else
1436                                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, (1 << (i-32)));
1437                 }
1438         }
1439
1440         /* issue a dummy read to force all PCI writes to be posted immediately */
1441         mb();
1442         reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1443 }
1444
1445 static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
1446 {
1447         struct ohci_iso_recv *recv = iso->hostdata;
1448         struct ti_ohci *ohci = recv->ohci;
1449         u32 command, contextMatch;
1450
1451         reg_write(recv->ohci, recv->ContextControlClear, 0xFFFFFFFF);
1452         wmb();
1453
1454         /* always keep ISO headers */
1455         command = (1 << 30);
1456
1457         if (recv->dma_mode == BUFFER_FILL_MODE)
1458                 command |= (1 << 31);
1459
1460         reg_write(recv->ohci, recv->ContextControlSet, command);
1461
1462         /* match on specified tags */
1463         contextMatch = tag_mask << 28;
1464
1465         if (iso->channel == -1) {
1466                 /* enable multichannel reception */
1467                 reg_write(recv->ohci, recv->ContextControlSet, (1 << 28));
1468         } else {
1469                 /* listen on channel */
1470                 contextMatch |= iso->channel;
1471         }
1472
1473         if (cycle != -1) {
1474                 u32 seconds;
1475
1476                 /* enable cycleMatch */
1477                 reg_write(recv->ohci, recv->ContextControlSet, (1 << 29));
1478
1479                 /* set starting cycle */
1480                 cycle &= 0x1FFF;
1481
1482                 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
1483                    just snarf them from the current time */
1484                 seconds = reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
1485
1486                 /* advance one second to give some extra time for DMA to start */
1487                 seconds += 1;
1488
1489                 cycle |= (seconds & 3) << 13;
1490
1491                 contextMatch |= cycle << 12;
1492         }
1493
1494         if (sync != -1) {
1495                 /* set sync flag on first DMA descriptor */
1496                 struct dma_cmd *cmd = &recv->block[recv->block_dma];
1497                 cmd->control |= cpu_to_le32(DMA_CTL_WAIT);
1498
1499                 /* match sync field */
1500                 contextMatch |= (sync&0xf)<<8;
1501         }
1502
1503         reg_write(recv->ohci, recv->ContextMatch, contextMatch);
1504
1505         /* address of first descriptor block */
1506         command = dma_prog_region_offset_to_bus(&recv->prog,
1507                                                 recv->block_dma * sizeof(struct dma_cmd));
1508         command |= 1; /* Z=1 */
1509
1510         reg_write(recv->ohci, recv->CommandPtr, command);
1511
1512         /* enable interrupts */
1513         reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskSet, 1 << recv->task.context);
1514
1515         wmb();
1516
1517         /* run */
1518         reg_write(recv->ohci, recv->ContextControlSet, 0x8000);
1519
1520         /* issue a dummy read of the cycle timer register to force
1521            all PCI writes to be posted immediately */
1522         mb();
1523         reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1524
1525         /* check RUN */
1526         if (!(reg_read(recv->ohci, recv->ContextControlSet) & 0x8000)) {
1527                 PRINT(KERN_ERR,
1528                       "Error starting IR DMA (ContextControl 0x%08x)\n",
1529                       reg_read(recv->ohci, recv->ContextControlSet));
1530                 return -1;
1531         }
1532
1533         return 0;
1534 }
1535
1536 static void ohci_iso_recv_release_block(struct ohci_iso_recv *recv, int block)
1537 {
1538         /* re-use the DMA descriptor for the block */
1539         /* by linking the previous descriptor to it */
1540
1541         int next_i = block;
1542         int prev_i = (next_i == 0) ? (recv->nblocks - 1) : (next_i - 1);
1543
1544         struct dma_cmd *next = &recv->block[next_i];
1545         struct dma_cmd *prev = &recv->block[prev_i];
1546
1547         /* 'next' becomes the new end of the DMA chain,
1548            so disable branch and enable interrupt */
1549         next->branchAddress = 0;
1550         next->control |= cpu_to_le32(3 << 20);
1551         next->status = cpu_to_le32(recv->buf_stride);
1552
1553         /* link prev to next */
1554         prev->branchAddress = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog,
1555                                                                         sizeof(struct dma_cmd) * next_i)
1556                                           | 1); /* Z=1 */
1557
1558         /* disable interrupt on previous DMA descriptor, except at intervals */
1559         if ((prev_i % recv->block_irq_interval) == 0) {
1560                 prev->control |= cpu_to_le32(3 << 20); /* enable interrupt */
1561         } else {
1562                 prev->control &= cpu_to_le32(~(3<<20)); /* disable interrupt */
1563         }
1564         wmb();
1565
1566         /* wake up DMA in case it fell asleep */
1567         reg_write(recv->ohci, recv->ContextControlSet, (1 << 12));
1568 }
1569
1570 static void ohci_iso_recv_bufferfill_release(struct ohci_iso_recv *recv,
1571                                              struct hpsb_iso_packet_info *info)
1572 {
1573         int len;
1574
1575         /* release the memory where the packet was */
1576         len = info->len;
1577
1578         /* add the wasted space for padding to 4 bytes */
1579         if (len % 4)
1580                 len += 4 - (len % 4);
1581
1582         /* add 8 bytes for the OHCI DMA data format overhead */
1583         len += 8;
1584
1585         recv->released_bytes += len;
1586
1587         /* have we released enough memory for one block? */
1588         while (recv->released_bytes > recv->buf_stride) {
1589                 ohci_iso_recv_release_block(recv, recv->block_reader);
1590                 recv->block_reader = (recv->block_reader + 1) % recv->nblocks;
1591                 recv->released_bytes -= recv->buf_stride;
1592         }
1593 }
1594
1595 static inline void ohci_iso_recv_release(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
1596 {
1597         struct ohci_iso_recv *recv = iso->hostdata;
1598         if (recv->dma_mode == BUFFER_FILL_MODE) {
1599                 ohci_iso_recv_bufferfill_release(recv, info);
1600         } else {
1601                 ohci_iso_recv_release_block(recv, info - iso->infos);
1602         }
1603 }
1604
1605 /* parse all packets from blocks that have been fully received */
1606 static void ohci_iso_recv_bufferfill_parse(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1607 {
1608         int wake = 0;
1609         int runaway = 0;
1610         struct ti_ohci *ohci = recv->ohci;
1611
1612         while (1) {
1613                 /* we expect the next parsable packet to begin at recv->dma_offset */
1614                 /* note: packet layout is as shown in section 10.6.1.1 of the OHCI spec */
1615
1616                 unsigned int offset;
1617                 unsigned short len, cycle;
1618                 unsigned char channel, tag, sy;
1619
1620                 unsigned char *p = iso->data_buf.kvirt;
1621
1622                 unsigned int this_block = recv->dma_offset/recv->buf_stride;
1623
1624                 /* don't loop indefinitely */
1625                 if (runaway++ > 100000) {
1626                         atomic_inc(&iso->overflows);
1627                         PRINT(KERN_ERR,
1628                               "IR DMA error - Runaway during buffer parsing!\n");
1629                         break;
1630                 }
1631
1632                 /* stop parsing once we arrive at block_dma (i.e. don't get ahead of DMA) */
1633                 if (this_block == recv->block_dma)
1634                         break;
1635
1636                 wake = 1;
1637
1638                 /* parse data length, tag, channel, and sy */
1639
1640                 /* note: we keep our own local copies of 'len' and 'offset'
1641                    so the user can't mess with them by poking in the mmap area */
1642
1643                 len = p[recv->dma_offset+2] | (p[recv->dma_offset+3] << 8);
1644
1645                 if (len > 4096) {
1646                         PRINT(KERN_ERR,
1647                               "IR DMA error - bogus 'len' value %u\n", len);
1648                 }
1649
1650                 channel = p[recv->dma_offset+1] & 0x3F;
1651                 tag = p[recv->dma_offset+1] >> 6;
1652                 sy = p[recv->dma_offset+0] & 0xF;
1653
1654                 /* advance to data payload */
1655                 recv->dma_offset += 4;
1656
1657                 /* check for wrap-around */
1658                 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1659                         recv->dma_offset -= recv->buf_stride*recv->nblocks;
1660                 }
1661
1662                 /* dma_offset now points to the first byte of the data payload */
1663                 offset = recv->dma_offset;
1664
1665                 /* advance to xferStatus/timeStamp */
1666                 recv->dma_offset += len;
1667
1668                 /* payload is padded to 4 bytes */
1669                 if (len % 4) {
1670                         recv->dma_offset += 4 - (len%4);
1671                 }
1672
1673                 /* check for wrap-around */
1674                 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1675                         /* uh oh, the packet data wraps from the last
1676                            to the first DMA block - make the packet
1677                            contiguous by copying its "tail" into the
1678                            guard page */
1679
1680                         int guard_off = recv->buf_stride*recv->nblocks;
1681                         int tail_len = len - (guard_off - offset);
1682
1683                         if (tail_len > 0  && tail_len < recv->buf_stride) {
1684                                 memcpy(iso->data_buf.kvirt + guard_off,
1685                                        iso->data_buf.kvirt,
1686                                        tail_len);
1687                         }
1688
1689                         recv->dma_offset -= recv->buf_stride*recv->nblocks;
1690                 }
1691
1692                 /* parse timestamp */
1693                 cycle = p[recv->dma_offset+0] | (p[recv->dma_offset+1]<<8);
1694                 cycle &= 0x1FFF;
1695
1696                 /* advance to next packet */
1697                 recv->dma_offset += 4;
1698
1699                 /* check for wrap-around */
1700                 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1701                         recv->dma_offset -= recv->buf_stride*recv->nblocks;
1702                 }
1703
1704                 hpsb_iso_packet_received(iso, offset, len, cycle, channel, tag, sy);
1705         }
1706
1707         if (wake)
1708                 hpsb_iso_wake(iso);
1709 }
1710
1711 static void ohci_iso_recv_bufferfill_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1712 {
1713         int loop;
1714         struct ti_ohci *ohci = recv->ohci;
1715
1716         /* loop over all blocks */
1717         for (loop = 0; loop < recv->nblocks; loop++) {
1718
1719                 /* check block_dma to see if it's done */
1720                 struct dma_cmd *im = &recv->block[recv->block_dma];
1721
1722                 /* check the DMA descriptor for new writes to xferStatus */
1723                 u16 xferstatus = le32_to_cpu(im->status) >> 16;
1724
1725                 /* rescount is the number of bytes *remaining to be written* in the block */
1726                 u16 rescount = le32_to_cpu(im->status) & 0xFFFF;
1727
1728                 unsigned char event = xferstatus & 0x1F;
1729
1730                 if (!event) {
1731                         /* nothing has happened to this block yet */
1732                         break;
1733                 }
1734
1735                 if (event != 0x11) {
1736                         atomic_inc(&iso->overflows);
1737                         PRINT(KERN_ERR,
1738                               "IR DMA error - OHCI error code 0x%02x\n", event);
1739                 }
1740
1741                 if (rescount != 0) {
1742                         /* the card is still writing to this block;
1743                            we can't touch it until it's done */
1744                         break;
1745                 }
1746
1747                 /* OK, the block is finished... */
1748
1749                 /* sync our view of the block */
1750                 dma_region_sync_for_cpu(&iso->data_buf, recv->block_dma*recv->buf_stride, recv->buf_stride);
1751
1752                 /* reset the DMA descriptor */
1753                 im->status = recv->buf_stride;
1754
1755                 /* advance block_dma */
1756                 recv->block_dma = (recv->block_dma + 1) % recv->nblocks;
1757
1758                 if ((recv->block_dma+1) % recv->nblocks == recv->block_reader) {
1759                         atomic_inc(&iso->overflows);
1760                         DBGMSG("ISO reception overflow - "
1761                                "ran out of DMA blocks");
1762                 }
1763         }
1764
1765         /* parse any packets that have arrived */
1766         ohci_iso_recv_bufferfill_parse(iso, recv);
1767 }
1768
1769 static void ohci_iso_recv_packetperbuf_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1770 {
1771         int count;
1772         int wake = 0;
1773         struct ti_ohci *ohci = recv->ohci;
1774
1775         /* loop over the entire buffer */
1776         for (count = 0; count < recv->nblocks; count++) {
1777                 u32 packet_len = 0;
1778
1779                 /* pointer to the DMA descriptor */
1780                 struct dma_cmd *il = ((struct dma_cmd*) recv->prog.kvirt) + iso->pkt_dma;
1781
1782                 /* check the DMA descriptor for new writes to xferStatus */
1783                 u16 xferstatus = le32_to_cpu(il->status) >> 16;
1784                 u16 rescount = le32_to_cpu(il->status) & 0xFFFF;
1785
1786                 unsigned char event = xferstatus & 0x1F;
1787
1788                 if (!event) {
1789                         /* this packet hasn't come in yet; we are done for now */
1790                         goto out;
1791                 }
1792
1793                 if (event == 0x11) {
1794                         /* packet received successfully! */
1795
1796                         /* rescount is the number of bytes *remaining* in the packet buffer,
1797                            after the packet was written */
1798                         packet_len = recv->buf_stride - rescount;
1799
1800                 } else if (event == 0x02) {
1801                         PRINT(KERN_ERR, "IR DMA error - packet too long for buffer\n");
1802                 } else if (event) {
1803                         PRINT(KERN_ERR, "IR DMA error - OHCI error code 0x%02x\n", event);
1804                 }
1805
1806                 /* sync our view of the buffer */
1807                 dma_region_sync_for_cpu(&iso->data_buf, iso->pkt_dma * recv->buf_stride, recv->buf_stride);
1808
1809                 /* record the per-packet info */
1810                 {
1811                         /* iso header is 8 bytes ahead of the data payload */
1812                         unsigned char *hdr;
1813
1814                         unsigned int offset;
1815                         unsigned short cycle;
1816                         unsigned char channel, tag, sy;
1817
1818                         offset = iso->pkt_dma * recv->buf_stride;
1819                         hdr = iso->data_buf.kvirt + offset;
1820
1821                         /* skip iso header */
1822                         offset += 8;
1823                         packet_len -= 8;
1824
1825                         cycle = (hdr[0] | (hdr[1] << 8)) & 0x1FFF;
1826                         channel = hdr[5] & 0x3F;
1827                         tag = hdr[5] >> 6;
1828                         sy = hdr[4] & 0xF;
1829
1830                         hpsb_iso_packet_received(iso, offset, packet_len, cycle, channel, tag, sy);
1831                 }
1832
1833                 /* reset the DMA descriptor */
1834                 il->status = recv->buf_stride;
1835
1836                 wake = 1;
1837                 recv->block_dma = iso->pkt_dma;
1838         }
1839
1840 out:
1841         if (wake)
1842                 hpsb_iso_wake(iso);
1843 }
1844
1845 static void ohci_iso_recv_task(unsigned long data)
1846 {
1847         struct hpsb_iso *iso = (struct hpsb_iso*) data;
1848         struct ohci_iso_recv *recv = iso->hostdata;
1849
1850         if (recv->dma_mode == BUFFER_FILL_MODE)
1851                 ohci_iso_recv_bufferfill_task(iso, recv);
1852         else
1853                 ohci_iso_recv_packetperbuf_task(iso, recv);
1854 }
1855
1856 /***********************************
1857  * rawiso ISO transmission         *
1858  ***********************************/
1859
1860 struct ohci_iso_xmit {
1861         struct ti_ohci *ohci;
1862         struct dma_prog_region prog;
1863         struct ohci1394_iso_tasklet task;
1864         int task_active;
1865
1866         u32 ContextControlSet;
1867         u32 ContextControlClear;
1868         u32 CommandPtr;
1869 };
1870
1871 /* transmission DMA program:
1872    one OUTPUT_MORE_IMMEDIATE for the IT header
1873    one OUTPUT_LAST for the buffer data */
1874
1875 struct iso_xmit_cmd {
1876         struct dma_cmd output_more_immediate;
1877         u8 iso_hdr[8];
1878         u32 unused[2];
1879         struct dma_cmd output_last;
1880 };
1881
1882 static int ohci_iso_xmit_init(struct hpsb_iso *iso);
1883 static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle);
1884 static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso);
1885 static void ohci_iso_xmit_task(unsigned long data);
1886
1887 static int ohci_iso_xmit_init(struct hpsb_iso *iso)
1888 {
1889         struct ohci_iso_xmit *xmit;
1890         unsigned int prog_size;
1891         int ctx;
1892         int ret = -ENOMEM;
1893
1894         xmit = kmalloc(sizeof(*xmit), SLAB_KERNEL);
1895         if (!xmit)
1896                 return -ENOMEM;
1897
1898         iso->hostdata = xmit;
1899         xmit->ohci = iso->host->hostdata;
1900         xmit->task_active = 0;
1901
1902         dma_prog_region_init(&xmit->prog);
1903
1904         prog_size = sizeof(struct iso_xmit_cmd) * iso->buf_packets;
1905
1906         if (dma_prog_region_alloc(&xmit->prog, prog_size, xmit->ohci->dev))
1907                 goto err;
1908
1909         ohci1394_init_iso_tasklet(&xmit->task, OHCI_ISO_TRANSMIT,
1910                                   ohci_iso_xmit_task, (unsigned long) iso);
1911
1912         if (ohci1394_register_iso_tasklet(xmit->ohci, &xmit->task) < 0)
1913                 goto err;
1914
1915         xmit->task_active = 1;
1916
1917         /* xmit context registers are spaced 16 bytes apart */
1918         ctx = xmit->task.context;
1919         xmit->ContextControlSet = OHCI1394_IsoXmitContextControlSet + 16 * ctx;
1920         xmit->ContextControlClear = OHCI1394_IsoXmitContextControlClear + 16 * ctx;
1921         xmit->CommandPtr = OHCI1394_IsoXmitCommandPtr + 16 * ctx;
1922
1923         return 0;
1924
1925 err:
1926         ohci_iso_xmit_shutdown(iso);
1927         return ret;
1928 }
1929
1930 static void ohci_iso_xmit_stop(struct hpsb_iso *iso)
1931 {
1932         struct ohci_iso_xmit *xmit = iso->hostdata;
1933         struct ti_ohci *ohci = xmit->ohci;
1934
1935         /* disable interrupts */
1936         reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskClear, 1 << xmit->task.context);
1937
1938         /* halt DMA */
1939         if (ohci1394_stop_context(xmit->ohci, xmit->ContextControlClear, NULL)) {
1940                 /* XXX the DMA context will lock up if you try to send too much data! */
1941                 PRINT(KERN_ERR,
1942                       "you probably exceeded the OHCI card's bandwidth limit - "
1943                       "reload the module and reduce xmit bandwidth");
1944         }
1945 }
1946
1947 static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso)
1948 {
1949         struct ohci_iso_xmit *xmit = iso->hostdata;
1950
1951         if (xmit->task_active) {
1952                 ohci_iso_xmit_stop(iso);
1953                 ohci1394_unregister_iso_tasklet(xmit->ohci, &xmit->task);
1954                 xmit->task_active = 0;
1955         }
1956
1957         dma_prog_region_free(&xmit->prog);
1958         kfree(xmit);
1959         iso->hostdata = NULL;
1960 }
1961
1962 static void ohci_iso_xmit_task(unsigned long data)
1963 {
1964         struct hpsb_iso *iso = (struct hpsb_iso*) data;
1965         struct ohci_iso_xmit *xmit = iso->hostdata;
1966         struct ti_ohci *ohci = xmit->ohci;
1967         int wake = 0;
1968         int count;
1969
1970         /* check the whole buffer if necessary, starting at pkt_dma */
1971         for (count = 0; count < iso->buf_packets; count++) {
1972                 int cycle;
1973
1974                 /* DMA descriptor */
1975                 struct iso_xmit_cmd *cmd = dma_region_i(&xmit->prog, struct iso_xmit_cmd, iso->pkt_dma);
1976
1977                 /* check for new writes to xferStatus */
1978                 u16 xferstatus = le32_to_cpu(cmd->output_last.status) >> 16;
1979                 u8  event = xferstatus & 0x1F;
1980
1981                 if (!event) {
1982                         /* packet hasn't been sent yet; we are done for now */
1983                         break;
1984                 }
1985
1986                 if (event != 0x11)
1987                         PRINT(KERN_ERR,
1988                               "IT DMA error - OHCI error code 0x%02x\n", event);
1989
1990                 /* at least one packet went out, so wake up the writer */
1991                 wake = 1;
1992
1993                 /* parse cycle */
1994                 cycle = le32_to_cpu(cmd->output_last.status) & 0x1FFF;
1995
1996                 /* tell the subsystem the packet has gone out */
1997                 hpsb_iso_packet_sent(iso, cycle, event != 0x11);
1998
1999                 /* reset the DMA descriptor for next time */
2000                 cmd->output_last.status = 0;
2001         }
2002
2003         if (wake)
2004                 hpsb_iso_wake(iso);
2005 }
2006
2007 static int ohci_iso_xmit_queue(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
2008 {
2009         struct ohci_iso_xmit *xmit = iso->hostdata;
2010         struct ti_ohci *ohci = xmit->ohci;
2011
2012         int next_i, prev_i;
2013         struct iso_xmit_cmd *next, *prev;
2014
2015         unsigned int offset;
2016         unsigned short len;
2017         unsigned char tag, sy;
2018
2019         /* check that the packet doesn't cross a page boundary
2020            (we could allow this if we added OUTPUT_MORE descriptor support) */
2021         if (cross_bound(info->offset, info->len)) {
2022                 PRINT(KERN_ERR,
2023                       "rawiso xmit: packet %u crosses a page boundary",
2024                       iso->first_packet);
2025                 return -EINVAL;
2026         }
2027
2028         offset = info->offset;
2029         len = info->len;
2030         tag = info->tag;
2031         sy = info->sy;
2032
2033         /* sync up the card's view of the buffer */
2034         dma_region_sync_for_device(&iso->data_buf, offset, len);
2035
2036         /* append first_packet to the DMA chain */
2037         /* by linking the previous descriptor to it */
2038         /* (next will become the new end of the DMA chain) */
2039
2040         next_i = iso->first_packet;
2041         prev_i = (next_i == 0) ? (iso->buf_packets - 1) : (next_i - 1);
2042
2043         next = dma_region_i(&xmit->prog, struct iso_xmit_cmd, next_i);
2044         prev = dma_region_i(&xmit->prog, struct iso_xmit_cmd, prev_i);
2045
2046         /* set up the OUTPUT_MORE_IMMEDIATE descriptor */
2047         memset(next, 0, sizeof(struct iso_xmit_cmd));
2048         next->output_more_immediate.control = cpu_to_le32(0x02000008);
2049
2050         /* ISO packet header is embedded in the OUTPUT_MORE_IMMEDIATE */
2051
2052         /* tcode = 0xA, and sy */
2053         next->iso_hdr[0] = 0xA0 | (sy & 0xF);
2054
2055         /* tag and channel number */
2056         next->iso_hdr[1] = (tag << 6) | (iso->channel & 0x3F);
2057
2058         /* transmission speed */
2059         next->iso_hdr[2] = iso->speed & 0x7;
2060
2061         /* payload size */
2062         next->iso_hdr[6] = len & 0xFF;
2063         next->iso_hdr[7] = len >> 8;
2064
2065         /* set up the OUTPUT_LAST */
2066         next->output_last.control = cpu_to_le32(1 << 28);
2067         next->output_last.control |= cpu_to_le32(1 << 27); /* update timeStamp */
2068         next->output_last.control |= cpu_to_le32(3 << 20); /* want interrupt */
2069         next->output_last.control |= cpu_to_le32(3 << 18); /* enable branch */
2070         next->output_last.control |= cpu_to_le32(len);
2071
2072         /* payload bus address */
2073         next->output_last.address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, offset));
2074
2075         /* leave branchAddress at zero for now */
2076
2077         /* re-write the previous DMA descriptor to chain to this one */
2078
2079         /* set prev branch address to point to next (Z=3) */
2080         prev->output_last.branchAddress = cpu_to_le32(
2081                 dma_prog_region_offset_to_bus(&xmit->prog, sizeof(struct iso_xmit_cmd) * next_i) | 3);
2082
2083         /* disable interrupt, unless required by the IRQ interval */
2084         if (prev_i % iso->irq_interval) {
2085                 prev->output_last.control &= cpu_to_le32(~(3 << 20)); /* no interrupt */
2086         } else {
2087                 prev->output_last.control |= cpu_to_le32(3 << 20); /* enable interrupt */
2088         }
2089
2090         wmb();
2091
2092         /* wake DMA in case it is sleeping */
2093         reg_write(xmit->ohci, xmit->ContextControlSet, 1 << 12);
2094
2095         /* issue a dummy read of the cycle timer to force all PCI
2096            writes to be posted immediately */
2097         mb();
2098         reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer);
2099
2100         return 0;
2101 }
2102
2103 static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle)
2104 {
2105         struct ohci_iso_xmit *xmit = iso->hostdata;
2106         struct ti_ohci *ohci = xmit->ohci;
2107
2108         /* clear out the control register */
2109         reg_write(xmit->ohci, xmit->ContextControlClear, 0xFFFFFFFF);
2110         wmb();
2111
2112         /* address and length of first descriptor block (Z=3) */
2113         reg_write(xmit->ohci, xmit->CommandPtr,
2114                   dma_prog_region_offset_to_bus(&xmit->prog, iso->pkt_dma * sizeof(struct iso_xmit_cmd)) | 3);
2115
2116         /* cycle match */
2117         if (cycle != -1) {
2118                 u32 start = cycle & 0x1FFF;
2119
2120                 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
2121                    just snarf them from the current time */
2122                 u32 seconds = reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
2123
2124                 /* advance one second to give some extra time for DMA to start */
2125                 seconds += 1;
2126
2127                 start |= (seconds & 3) << 13;
2128
2129                 reg_write(xmit->ohci, xmit->ContextControlSet, 0x80000000 | (start << 16));
2130         }
2131
2132         /* enable interrupts */
2133         reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskSet, 1 << xmit->task.context);
2134
2135         /* run */
2136         reg_write(xmit->ohci, xmit->ContextControlSet, 0x8000);
2137         mb();
2138
2139         /* wait 100 usec to give the card time to go active */
2140         udelay(100);
2141
2142         /* check the RUN bit */
2143         if (!(reg_read(xmit->ohci, xmit->ContextControlSet) & 0x8000)) {
2144                 PRINT(KERN_ERR, "Error starting IT DMA (ContextControl 0x%08x)\n",
2145                       reg_read(xmit->ohci, xmit->ContextControlSet));
2146                 return -1;
2147         }
2148
2149         return 0;
2150 }
2151
2152 static int ohci_isoctl(struct hpsb_iso *iso, enum isoctl_cmd cmd, unsigned long arg)
2153 {
2154
2155         switch(cmd) {
2156         case XMIT_INIT:
2157                 return ohci_iso_xmit_init(iso);
2158         case XMIT_START:
2159                 return ohci_iso_xmit_start(iso, arg);
2160         case XMIT_STOP:
2161                 ohci_iso_xmit_stop(iso);
2162                 return 0;
2163         case XMIT_QUEUE:
2164                 return ohci_iso_xmit_queue(iso, (struct hpsb_iso_packet_info*) arg);
2165         case XMIT_SHUTDOWN:
2166                 ohci_iso_xmit_shutdown(iso);
2167                 return 0;
2168
2169         case RECV_INIT:
2170                 return ohci_iso_recv_init(iso);
2171         case RECV_START: {
2172                 int *args = (int*) arg;
2173                 return ohci_iso_recv_start(iso, args[0], args[1], args[2]);
2174         }
2175         case RECV_STOP:
2176                 ohci_iso_recv_stop(iso);
2177                 return 0;
2178         case RECV_RELEASE:
2179                 ohci_iso_recv_release(iso, (struct hpsb_iso_packet_info*) arg);
2180                 return 0;
2181         case RECV_FLUSH:
2182                 ohci_iso_recv_task((unsigned long) iso);
2183                 return 0;
2184         case RECV_SHUTDOWN:
2185                 ohci_iso_recv_shutdown(iso);
2186                 return 0;
2187         case RECV_LISTEN_CHANNEL:
2188                 ohci_iso_recv_change_channel(iso, arg, 1);
2189                 return 0;
2190         case RECV_UNLISTEN_CHANNEL:
2191                 ohci_iso_recv_change_channel(iso, arg, 0);
2192                 return 0;
2193         case RECV_SET_CHANNEL_MASK:
2194                 ohci_iso_recv_set_channel_mask(iso, *((u64*) arg));
2195                 return 0;
2196
2197         default:
2198                 PRINT_G(KERN_ERR, "ohci_isoctl cmd %d not implemented yet",
2199                         cmd);
2200                 break;
2201         }
2202         return -EINVAL;
2203 }
2204
2205 /***************************************
2206  * IEEE-1394 functionality section END *
2207  ***************************************/
2208
2209
2210 /********************************************************
2211  * Global stuff (interrupt handler, init/shutdown code) *
2212  ********************************************************/
2213
2214 static void dma_trm_reset(struct dma_trm_ctx *d)
2215 {
2216         unsigned long flags;
2217         LIST_HEAD(packet_list);
2218         struct ti_ohci *ohci = d->ohci;
2219         struct hpsb_packet *packet, *ptmp;
2220
2221         ohci1394_stop_context(ohci, d->ctrlClear, NULL);
2222
2223         /* Lock the context, reset it and release it. Move the packets
2224          * that were pending in the context to packet_list and free
2225          * them after releasing the lock. */
2226
2227         spin_lock_irqsave(&d->lock, flags);
2228
2229         list_splice(&d->fifo_list, &packet_list);
2230         list_splice(&d->pending_list, &packet_list);
2231         INIT_LIST_HEAD(&d->fifo_list);
2232         INIT_LIST_HEAD(&d->pending_list);
2233
2234         d->branchAddrPtr = NULL;
2235         d->sent_ind = d->prg_ind;
2236         d->free_prgs = d->num_desc;
2237
2238         spin_unlock_irqrestore(&d->lock, flags);
2239
2240         if (list_empty(&packet_list))
2241                 return;
2242
2243         PRINT(KERN_INFO, "AT dma reset ctx=%d, aborting transmission", d->ctx);
2244
2245         /* Now process subsystem callbacks for the packets from this
2246          * context. */
2247         list_for_each_entry_safe(packet, ptmp, &packet_list, driver_list) {
2248                 list_del_init(&packet->driver_list);
2249                 hpsb_packet_sent(ohci->host, packet, ACKX_ABORTED);
2250         }
2251 }
2252
2253 static void ohci_schedule_iso_tasklets(struct ti_ohci *ohci,
2254                                        quadlet_t rx_event,
2255                                        quadlet_t tx_event)
2256 {
2257         struct ohci1394_iso_tasklet *t;
2258         unsigned long mask;
2259
2260         spin_lock(&ohci->iso_tasklet_list_lock);
2261
2262         list_for_each_entry(t, &ohci->iso_tasklet_list, link) {
2263                 mask = 1 << t->context;
2264
2265                 if (t->type == OHCI_ISO_TRANSMIT && tx_event & mask)
2266                         tasklet_schedule(&t->tasklet);
2267                 else if (rx_event & mask)
2268                         tasklet_schedule(&t->tasklet);
2269         }
2270
2271         spin_unlock(&ohci->iso_tasklet_list_lock);
2272
2273 }
2274
2275 static irqreturn_t ohci_irq_handler(int irq, void *dev_id,
2276                              struct pt_regs *regs_are_unused)
2277 {
2278         quadlet_t event, node_id;
2279         struct ti_ohci *ohci = (struct ti_ohci *)dev_id;
2280         struct hpsb_host *host = ohci->host;
2281         int phyid = -1, isroot = 0;
2282         unsigned long flags;
2283
2284         /* Read and clear the interrupt event register.  Don't clear
2285          * the busReset event, though. This is done when we get the
2286          * selfIDComplete interrupt. */
2287         spin_lock_irqsave(&ohci->event_lock, flags);
2288         event = reg_read(ohci, OHCI1394_IntEventClear);
2289         reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
2290         spin_unlock_irqrestore(&ohci->event_lock, flags);
2291
2292         if (!event)
2293                 return IRQ_NONE;
2294
2295         /* If event is ~(u32)0 cardbus card was ejected.  In this case
2296          * we just return, and clean up in the ohci1394_pci_remove
2297          * function. */
2298         if (event == ~(u32) 0) {
2299                 DBGMSG("Device removed.");
2300                 return IRQ_NONE;
2301         }
2302
2303         DBGMSG("IntEvent: %08x", event);
2304
2305         if (event & OHCI1394_unrecoverableError) {
2306                 int ctx;
2307                 PRINT(KERN_ERR, "Unrecoverable error!");
2308
2309                 if (reg_read(ohci, OHCI1394_AsReqTrContextControlSet) & 0x800)
2310                         PRINT(KERN_ERR, "Async Req Tx Context died: "
2311                                 "ctrl[%08x] cmdptr[%08x]",
2312                                 reg_read(ohci, OHCI1394_AsReqTrContextControlSet),
2313                                 reg_read(ohci, OHCI1394_AsReqTrCommandPtr));
2314
2315                 if (reg_read(ohci, OHCI1394_AsRspTrContextControlSet) & 0x800)
2316                         PRINT(KERN_ERR, "Async Rsp Tx Context died: "
2317                                 "ctrl[%08x] cmdptr[%08x]",
2318                                 reg_read(ohci, OHCI1394_AsRspTrContextControlSet),
2319                                 reg_read(ohci, OHCI1394_AsRspTrCommandPtr));
2320
2321                 if (reg_read(ohci, OHCI1394_AsReqRcvContextControlSet) & 0x800)
2322                         PRINT(KERN_ERR, "Async Req Rcv Context died: "
2323                                 "ctrl[%08x] cmdptr[%08x]",
2324                                 reg_read(ohci, OHCI1394_AsReqRcvContextControlSet),
2325                                 reg_read(ohci, OHCI1394_AsReqRcvCommandPtr));
2326
2327                 if (reg_read(ohci, OHCI1394_AsRspRcvContextControlSet) & 0x800)
2328                         PRINT(KERN_ERR, "Async Rsp Rcv Context died: "
2329                                 "ctrl[%08x] cmdptr[%08x]",
2330                                 reg_read(ohci, OHCI1394_AsRspRcvContextControlSet),
2331                                 reg_read(ohci, OHCI1394_AsRspRcvCommandPtr));
2332
2333                 for (ctx = 0; ctx < ohci->nb_iso_xmit_ctx; ctx++) {
2334                         if (reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)) & 0x800)
2335                                 PRINT(KERN_ERR, "Iso Xmit %d Context died: "
2336                                         "ctrl[%08x] cmdptr[%08x]", ctx,
2337                                         reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)),
2338                                         reg_read(ohci, OHCI1394_IsoXmitCommandPtr + (16 * ctx)));
2339                 }
2340
2341                 for (ctx = 0; ctx < ohci->nb_iso_rcv_ctx; ctx++) {
2342                         if (reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)) & 0x800)
2343                                 PRINT(KERN_ERR, "Iso Recv %d Context died: "
2344                                         "ctrl[%08x] cmdptr[%08x] match[%08x]", ctx,
2345                                         reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)),
2346                                         reg_read(ohci, OHCI1394_IsoRcvCommandPtr + (32 * ctx)),
2347                                         reg_read(ohci, OHCI1394_IsoRcvContextMatch + (32 * ctx)));
2348                 }
2349
2350                 event &= ~OHCI1394_unrecoverableError;
2351         }
2352
2353         if (event & OHCI1394_cycleInconsistent) {
2354                 /* We subscribe to the cycleInconsistent event only to
2355                  * clear the corresponding event bit... otherwise,
2356                  * isochronous cycleMatch DMA won't work. */
2357                 DBGMSG("OHCI1394_cycleInconsistent");
2358                 event &= ~OHCI1394_cycleInconsistent;
2359         }
2360
2361         if (event & OHCI1394_busReset) {
2362                 /* The busReset event bit can't be cleared during the
2363                  * selfID phase, so we disable busReset interrupts, to
2364                  * avoid burying the cpu in interrupt requests. */
2365                 spin_lock_irqsave(&ohci->event_lock, flags);
2366                 reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
2367
2368                 if (ohci->check_busreset) {
2369                         int loop_count = 0;
2370
2371                         udelay(10);
2372
2373                         while (reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
2374                                 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2375
2376                                 spin_unlock_irqrestore(&ohci->event_lock, flags);
2377                                 udelay(10);
2378                                 spin_lock_irqsave(&ohci->event_lock, flags);
2379
2380                                 /* The loop counter check is to prevent the driver
2381                                  * from remaining in this state forever. For the
2382                                  * initial bus reset, the loop continues for ever
2383                                  * and the system hangs, until some device is plugged-in
2384                                  * or out manually into a port! The forced reset seems
2385                                  * to solve this problem. This mainly effects nForce2. */
2386                                 if (loop_count > 10000) {
2387                                         ohci_devctl(host, RESET_BUS, LONG_RESET);
2388                                         DBGMSG("Detected bus-reset loop. Forced a bus reset!");
2389                                         loop_count = 0;
2390                                 }
2391
2392                                 loop_count++;
2393                         }
2394                 }
2395                 spin_unlock_irqrestore(&ohci->event_lock, flags);
2396                 if (!host->in_bus_reset) {
2397                         DBGMSG("irq_handler: Bus reset requested");
2398
2399                         /* Subsystem call */
2400                         hpsb_bus_reset(ohci->host);
2401                 }
2402                 event &= ~OHCI1394_busReset;
2403         }
2404
2405         if (event & OHCI1394_reqTxComplete) {
2406                 struct dma_trm_ctx *d = &ohci->at_req_context;
2407                 DBGMSG("Got reqTxComplete interrupt "
2408                        "status=0x%08X", reg_read(ohci, d->ctrlSet));
2409                 if (reg_read(ohci, d->ctrlSet) & 0x800)
2410                         ohci1394_stop_context(ohci, d->ctrlClear,
2411                                               "reqTxComplete");
2412                 else
2413                         dma_trm_tasklet((unsigned long)d);
2414                         //tasklet_schedule(&d->task);
2415                 event &= ~OHCI1394_reqTxComplete;
2416         }
2417         if (event & OHCI1394_respTxComplete) {
2418                 struct dma_trm_ctx *d = &ohci->at_resp_context;
2419                 DBGMSG("Got respTxComplete interrupt "
2420                        "status=0x%08X", reg_read(ohci, d->ctrlSet));
2421                 if (reg_read(ohci, d->ctrlSet) & 0x800)
2422                         ohci1394_stop_context(ohci, d->ctrlClear,
2423                                               "respTxComplete");
2424                 else
2425                         tasklet_schedule(&d->task);
2426                 event &= ~OHCI1394_respTxComplete;
2427         }
2428         if (event & OHCI1394_RQPkt) {
2429                 struct dma_rcv_ctx *d = &ohci->ar_req_context;
2430                 DBGMSG("Got RQPkt interrupt status=0x%08X",
2431                        reg_read(ohci, d->ctrlSet));
2432                 if (reg_read(ohci, d->ctrlSet) & 0x800)
2433                         ohci1394_stop_context(ohci, d->ctrlClear, "RQPkt");
2434                 else
2435                         tasklet_schedule(&d->task);
2436                 event &= ~OHCI1394_RQPkt;
2437         }
2438         if (event & OHCI1394_RSPkt) {
2439                 struct dma_rcv_ctx *d = &ohci->ar_resp_context;
2440                 DBGMSG("Got RSPkt interrupt status=0x%08X",
2441                        reg_read(ohci, d->ctrlSet));
2442                 if (reg_read(ohci, d->ctrlSet) & 0x800)
2443                         ohci1394_stop_context(ohci, d->ctrlClear, "RSPkt");
2444                 else
2445                         tasklet_schedule(&d->task);
2446                 event &= ~OHCI1394_RSPkt;
2447         }
2448         if (event & OHCI1394_isochRx) {
2449                 quadlet_t rx_event;
2450
2451                 rx_event = reg_read(ohci, OHCI1394_IsoRecvIntEventSet);
2452                 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, rx_event);
2453                 ohci_schedule_iso_tasklets(ohci, rx_event, 0);
2454                 event &= ~OHCI1394_isochRx;
2455         }
2456         if (event & OHCI1394_isochTx) {
2457                 quadlet_t tx_event;
2458
2459                 tx_event = reg_read(ohci, OHCI1394_IsoXmitIntEventSet);
2460                 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, tx_event);
2461                 ohci_schedule_iso_tasklets(ohci, 0, tx_event);
2462                 event &= ~OHCI1394_isochTx;
2463         }
2464         if (event & OHCI1394_selfIDComplete) {
2465                 if (host->in_bus_reset) {
2466                         node_id = reg_read(ohci, OHCI1394_NodeID);
2467
2468                         if (!(node_id & 0x80000000)) {
2469                                 PRINT(KERN_ERR,
2470                                       "SelfID received, but NodeID invalid "
2471                                       "(probably new bus reset occurred): %08X",
2472                                       node_id);
2473                                 goto selfid_not_valid;
2474                         }
2475
2476                         phyid =  node_id & 0x0000003f;
2477                         isroot = (node_id & 0x40000000) != 0;
2478
2479                         DBGMSG("SelfID interrupt received "
2480                               "(phyid %d, %s)", phyid,
2481                               (isroot ? "root" : "not root"));
2482
2483                         handle_selfid(ohci, host, phyid, isroot);
2484
2485                         /* Clear the bus reset event and re-enable the
2486                          * busReset interrupt.  */
2487                         spin_lock_irqsave(&ohci->event_lock, flags);
2488                         reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2489                         reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
2490                         spin_unlock_irqrestore(&ohci->event_lock, flags);
2491
2492                         /* Accept Physical requests from all nodes. */
2493                         reg_write(ohci,OHCI1394_AsReqFilterHiSet, 0xffffffff);
2494                         reg_write(ohci,OHCI1394_AsReqFilterLoSet, 0xffffffff);
2495
2496                         /* Turn on phys dma reception.
2497                          *
2498                          * TODO: Enable some sort of filtering management.
2499                          */
2500                         if (phys_dma) {
2501                                 reg_write(ohci,OHCI1394_PhyReqFilterHiSet, 0xffffffff);
2502                                 reg_write(ohci,OHCI1394_PhyReqFilterLoSet, 0xffffffff);
2503                                 reg_write(ohci,OHCI1394_PhyUpperBound, 0xffff0000);
2504                         } else {
2505                                 reg_write(ohci,OHCI1394_PhyReqFilterHiSet, 0x00000000);
2506                                 reg_write(ohci,OHCI1394_PhyReqFilterLoSet, 0x00000000);
2507                         }
2508
2509                         DBGMSG("PhyReqFilter=%08x%08x",
2510                                reg_read(ohci,OHCI1394_PhyReqFilterHiSet),
2511                                reg_read(ohci,OHCI1394_PhyReqFilterLoSet));
2512
2513                         hpsb_selfid_complete(host, phyid, isroot);
2514                 } else
2515                         PRINT(KERN_ERR,
2516                               "SelfID received outside of bus reset sequence");
2517
2518 selfid_not_valid:
2519                 event &= ~OHCI1394_selfIDComplete;
2520         }
2521
2522         /* Make sure we handle everything, just in case we accidentally
2523          * enabled an interrupt that we didn't write a handler for.  */
2524         if (event)
2525                 PRINT(KERN_ERR, "Unhandled interrupt(s) 0x%08x",
2526                       event);
2527
2528         return IRQ_HANDLED;
2529 }
2530
2531 /* Put the buffer back into the dma context */
2532 static void insert_dma_buffer(struct dma_rcv_ctx *d, int idx)
2533 {
2534         struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2535         DBGMSG("Inserting dma buf ctx=%d idx=%d", d->ctx, idx);
2536
2537         d->prg_cpu[idx]->status = cpu_to_le32(d->buf_size);
2538         d->prg_cpu[idx]->branchAddress &= le32_to_cpu(0xfffffff0);
2539         idx = (idx + d->num_desc - 1 ) % d->num_desc;
2540         d->prg_cpu[idx]->branchAddress |= le32_to_cpu(0x00000001);
2541
2542         /* wake up the dma context if necessary */
2543         if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
2544                 PRINT(KERN_INFO,
2545                       "Waking dma ctx=%d ... processing is probably too slow",
2546                       d->ctx);
2547         }
2548
2549         /* do this always, to avoid race condition */
2550         reg_write(ohci, d->ctrlSet, 0x1000);
2551 }
2552
2553 #define cond_le32_to_cpu(data, noswap) \
2554         (noswap ? data : le32_to_cpu(data))
2555
2556 static const int TCODE_SIZE[16] = {20, 0, 16, -1, 16, 20, 20, 0,
2557                             -1, 0, -1, 0, -1, -1, 16, -1};
2558
2559 /*
2560  * Determine the length of a packet in the buffer
2561  * Optimization suggested by Pascal Drolet <pascal.drolet@informission.ca>
2562  */
2563 static __inline__ int packet_length(struct dma_rcv_ctx *d, int idx, quadlet_t *buf_ptr,
2564                          int offset, unsigned char tcode, int noswap)
2565 {
2566         int length = -1;
2567
2568         if (d->type == DMA_CTX_ASYNC_REQ || d->type == DMA_CTX_ASYNC_RESP) {
2569                 length = TCODE_SIZE[tcode];
2570                 if (length == 0) {
2571                         if (offset + 12 >= d->buf_size) {
2572                                 length = (cond_le32_to_cpu(d->buf_cpu[(idx + 1) % d->num_desc]
2573                                                 [3 - ((d->buf_size - offset) >> 2)], noswap) >> 16);
2574                         } else {
2575                                 length = (cond_le32_to_cpu(buf_ptr[3], noswap) >> 16);
2576                         }
2577                         length += 20;
2578                 }
2579         } else if (d->type == DMA_CTX_ISO) {
2580                 /* Assumption: buffer fill mode with header/trailer */
2581                 length = (cond_le32_to_cpu(buf_ptr[0], noswap) >> 16) + 8;
2582         }
2583
2584         if (length > 0 && length % 4)
2585                 length += 4 - (length % 4);
2586
2587         return length;
2588 }
2589
2590 /* Tasklet that processes dma receive buffers */
2591 static void dma_rcv_tasklet (unsigned long data)
2592 {
2593         struct dma_rcv_ctx *d = (struct dma_rcv_ctx*)data;
2594         struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2595         unsigned int split_left, idx, offset, rescount;
2596         unsigned char tcode;
2597         int length, bytes_left, ack;
2598         unsigned long flags;
2599         quadlet_t *buf_ptr;
2600         char *split_ptr;
2601         char msg[256];
2602
2603         spin_lock_irqsave(&d->lock, flags);
2604
2605         idx = d->buf_ind;
2606         offset = d->buf_offset;
2607         buf_ptr = d->buf_cpu[idx] + offset/4;
2608
2609         rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2610         bytes_left = d->buf_size - rescount - offset;
2611
2612         while (bytes_left > 0) {
2613                 tcode = (cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming) >> 4) & 0xf;
2614
2615                 /* packet_length() will return < 4 for an error */
2616                 length = packet_length(d, idx, buf_ptr, offset, tcode, ohci->no_swap_incoming);
2617
2618                 if (length < 4) { /* something is wrong */
2619                         sprintf(msg,"Unexpected tcode 0x%x(0x%08x) in AR ctx=%d, length=%d",
2620                                 tcode, cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming),
2621                                 d->ctx, length);
2622                         ohci1394_stop_context(ohci, d->ctrlClear, msg);
2623                         spin_unlock_irqrestore(&d->lock, flags);
2624                         return;
2625                 }
2626
2627                 /* The first case is where we have a packet that crosses
2628                  * over more than one descriptor. The next case is where
2629                  * it's all in the first descriptor.  */
2630                 if ((offset + length) > d->buf_size) {
2631                         DBGMSG("Split packet rcv'd");
2632                         if (length > d->split_buf_size) {
2633                                 ohci1394_stop_context(ohci, d->ctrlClear,
2634                                              "Split packet size exceeded");
2635                                 d->buf_ind = idx;
2636                                 d->buf_offset = offset;
2637                                 spin_unlock_irqrestore(&d->lock, flags);
2638                                 return;
2639                         }
2640
2641                         if (le32_to_cpu(d->prg_cpu[(idx+1)%d->num_desc]->status)
2642                             == d->buf_size) {
2643                                 /* Other part of packet not written yet.
2644                                  * this should never happen I think
2645                                  * anyway we'll get it on the next call.  */
2646                                 PRINT(KERN_INFO,
2647                                       "Got only half a packet!");
2648                                 d->buf_ind = idx;
2649                                 d->buf_offset = offset;
2650                                 spin_unlock_irqrestore(&d->lock, flags);
2651                                 return;
2652                         }
2653
2654                         split_left = length;
2655                         split_ptr = (char *)d->spb;
2656                         memcpy(split_ptr,buf_ptr,d->buf_size-offset);
2657                         split_left -= d->buf_size-offset;
2658                         split_ptr += d->buf_size-offset;
2659                         insert_dma_buffer(d, idx);
2660                         idx = (idx+1) % d->num_desc;
2661                         buf_ptr = d->buf_cpu[idx];
2662                         offset=0;
2663
2664                         while (split_left >= d->buf_size) {
2665                                 memcpy(split_ptr,buf_ptr,d->buf_size);
2666                                 split_ptr += d->buf_size;
2667                                 split_left -= d->buf_size;
2668                                 insert_dma_buffer(d, idx);
2669                                 idx = (idx+1) % d->num_desc;
2670                                 buf_ptr = d->buf_cpu[idx];
2671                         }
2672
2673                         if (split_left > 0) {
2674                                 memcpy(split_ptr, buf_ptr, split_left);
2675                                 offset = split_left;
2676                                 buf_ptr += offset/4;
2677                         }
2678                 } else {
2679                         DBGMSG("Single packet rcv'd");
2680                         memcpy(d->spb, buf_ptr, length);
2681                         offset += length;
2682                         buf_ptr += length/4;
2683                         if (offset==d->buf_size) {
2684                                 insert_dma_buffer(d, idx);
2685                                 idx = (idx+1) % d->num_desc;
2686                                 buf_ptr = d->buf_cpu[idx];
2687                                 offset=0;
2688                         }
2689                 }
2690
2691                 /* We get one phy packet to the async descriptor for each
2692                  * bus reset. We always ignore it.  */
2693                 if (tcode != OHCI1394_TCODE_PHY) {
2694                         if (!ohci->no_swap_incoming)
2695                                 packet_swab(d->spb, tcode);
2696                         DBGMSG("Packet received from node"
2697                                 " %d ack=0x%02X spd=%d tcode=0x%X"
2698                                 " length=%d ctx=%d tlabel=%d",
2699                                 (d->spb[1]>>16)&0x3f,
2700                                 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f,
2701                                 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>21)&0x3,
2702                                 tcode, length, d->ctx,
2703                                 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>10)&0x3f);
2704
2705                         ack = (((cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f)
2706                                 == 0x11) ? 1 : 0;
2707
2708                         hpsb_packet_received(ohci->host, d->spb,
2709                                              length-4, ack);
2710                 }
2711 #ifdef OHCI1394_DEBUG
2712                 else
2713                         PRINT (KERN_DEBUG, "Got phy packet ctx=%d ... discarded",
2714                                d->ctx);
2715 #endif
2716
2717                 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2718
2719                 bytes_left = d->buf_size - rescount - offset;
2720
2721         }
2722
2723         d->buf_ind = idx;
2724         d->buf_offset = offset;
2725
2726         spin_unlock_irqrestore(&d->lock, flags);
2727 }
2728
2729 /* Bottom half that processes sent packets */
2730 static void dma_trm_tasklet (unsigned long data)
2731 {
2732         struct dma_trm_ctx *d = (struct dma_trm_ctx*)data;
2733         struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2734         struct hpsb_packet *packet, *ptmp;
2735         unsigned long flags;
2736         u32 status, ack;
2737         size_t datasize;
2738
2739         spin_lock_irqsave(&d->lock, flags);
2740
2741         list_for_each_entry_safe(packet, ptmp, &d->fifo_list, driver_list) {
2742                 datasize = packet->data_size;
2743                 if (datasize && packet->type != hpsb_raw)
2744                         status = le32_to_cpu(
2745                                 d->prg_cpu[d->sent_ind]->end.status) >> 16;
2746                 else
2747                         status = le32_to_cpu(
2748                                 d->prg_cpu[d->sent_ind]->begin.status) >> 16;
2749
2750                 if (status == 0)
2751                         /* this packet hasn't been sent yet*/
2752                         break;
2753
2754 #ifdef OHCI1394_DEBUG
2755                 if (datasize)
2756                         if (((le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf) == 0xa)
2757                                 DBGMSG("Stream packet sent to channel %d tcode=0x%X "
2758                                        "ack=0x%X spd=%d dataLength=%d ctx=%d",
2759                                        (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>8)&0x3f,
2760                                        (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2761                                        status&0x1f, (status>>5)&0x3,
2762                                        le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16,
2763                                        d->ctx);
2764                         else
2765                                 DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2766                                        "0x%02X ack=0x%X spd=%d dataLength=%d ctx=%d",
2767                                        (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16)&0x3f,
2768                                        (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2769                                        (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>10)&0x3f,
2770                                        status&0x1f, (status>>5)&0x3,
2771                                        le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3])>>16,
2772                                        d->ctx);
2773                 else
2774                         DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2775                                "0x%02X ack=0x%X spd=%d data=0x%08X ctx=%d",
2776                                 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])
2777                                         >>16)&0x3f,
2778                                 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2779                                         >>4)&0xf,
2780                                 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2781                                         >>10)&0x3f,
2782                                 status&0x1f, (status>>5)&0x3,
2783                                 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3]),
2784                                 d->ctx);
2785 #endif
2786
2787                 if (status & 0x10) {
2788                         ack = status & 0xf;
2789                 } else {
2790                         switch (status & 0x1f) {
2791                         case EVT_NO_STATUS: /* that should never happen */
2792                         case EVT_RESERVED_A: /* that should never happen */
2793                         case EVT_LONG_PACKET: /* that should never happen */
2794                                 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2795                                 ack = ACKX_SEND_ERROR;
2796                                 break;
2797                         case EVT_MISSING_ACK:
2798                                 ack = ACKX_TIMEOUT;
2799                                 break;
2800                         case EVT_UNDERRUN:
2801                                 ack = ACKX_SEND_ERROR;
2802                                 break;
2803                         case EVT_OVERRUN: /* that should never happen */
2804                                 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2805                                 ack = ACKX_SEND_ERROR;
2806                                 break;
2807                         case EVT_DESCRIPTOR_READ:
2808                         case EVT_DATA_READ:
2809                         case EVT_DATA_WRITE:
2810                                 ack = ACKX_SEND_ERROR;
2811                                 break;
2812                         case EVT_BUS_RESET: /* that should never happen */
2813                                 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2814                                 ack = ACKX_SEND_ERROR;
2815                                 break;
2816                         case EVT_TIMEOUT:
2817                                 ack = ACKX_TIMEOUT;
2818                                 break;
2819                         case EVT_TCODE_ERR:
2820                                 ack = ACKX_SEND_ERROR;
2821                                 break;
2822                         case EVT_RESERVED_B: /* that should never happen */
2823                         case EVT_RESERVED_C: /* that should never happen */
2824                                 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2825                                 ack = ACKX_SEND_ERROR;
2826                                 break;
2827                         case EVT_UNKNOWN:
2828                         case EVT_FLUSHED:
2829                                 ack = ACKX_SEND_ERROR;
2830                                 break;
2831                         default:
2832                                 PRINT(KERN_ERR, "Unhandled OHCI evt_* error 0x%x", status & 0x1f);
2833                                 ack = ACKX_SEND_ERROR;
2834                                 BUG();
2835                         }
2836                 }
2837
2838                 list_del_init(&packet->driver_list);
2839                 hpsb_packet_sent(ohci->host, packet, ack);
2840
2841                 if (datasize) {
2842                         pci_unmap_single(ohci->dev,
2843                                          cpu_to_le32(d->prg_cpu[d->sent_ind]->end.address),
2844                                          datasize, PCI_DMA_TODEVICE);
2845                         OHCI_DMA_FREE("single Xmit data packet");
2846                 }
2847
2848                 d->sent_ind = (d->sent_ind+1)%d->num_desc;
2849                 d->free_prgs++;
2850         }
2851
2852         dma_trm_flush(ohci, d);
2853
2854         spin_unlock_irqrestore(&d->lock, flags);
2855 }
2856
2857 static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d)
2858 {
2859         if (d->ctrlClear) {
2860                 ohci1394_stop_context(d->ohci, d->ctrlClear, NULL);
2861
2862                 if (d->type == DMA_CTX_ISO) {
2863                         /* disable interrupts */
2864                         reg_write(d->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << d->ctx);
2865                         ohci1394_unregister_iso_tasklet(d->ohci, &d->ohci->ir_legacy_tasklet);
2866                 } else {
2867                         tasklet_kill(&d->task);
2868                 }
2869         }
2870 }
2871
2872
2873 static void free_dma_rcv_ctx(struct dma_rcv_ctx *d)
2874 {
2875         int i;
2876         struct ti_ohci *ohci = d->ohci;
2877
2878         if (ohci == NULL)
2879                 return;
2880
2881         DBGMSG("Freeing dma_rcv_ctx %d", d->ctx);
2882
2883         if (d->buf_cpu) {
2884                 for (i=0; i<d->num_desc; i++)
2885                         if (d->buf_cpu[i] && d->buf_bus[i]) {
2886                                 pci_free_consistent(
2887                                         ohci->dev, d->buf_size,
2888                                         d->buf_cpu[i], d->buf_bus[i]);
2889                                 OHCI_DMA_FREE("consistent dma_rcv buf[%d]", i);
2890                         }
2891                 kfree(d->buf_cpu);
2892                 kfree(d->buf_bus);
2893         }
2894         if (d->prg_cpu) {
2895                 for (i=0; i<d->num_desc; i++)
2896                         if (d->prg_cpu[i] && d->prg_bus[i]) {
2897                                 pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
2898                                 OHCI_DMA_FREE("consistent dma_rcv prg[%d]", i);
2899                         }
2900                 pci_pool_destroy(d->prg_pool);
2901                 OHCI_DMA_FREE("dma_rcv prg pool");
2902                 kfree(d->prg_cpu);
2903                 kfree(d->prg_bus);
2904         }
2905         if (d->spb) kfree(d->spb);
2906
2907         /* Mark this context as freed. */
2908         d->ohci = NULL;
2909 }
2910
2911 static int
2912 alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
2913                   enum context_type type, int ctx, int num_desc,
2914                   int buf_size, int split_buf_size, int context_base)
2915 {
2916         int i;
2917
2918         d->ohci = ohci;
2919         d->type = type;
2920         d->ctx = ctx;
2921
2922         d->num_desc = num_desc;
2923         d->buf_size = buf_size;
2924         d->split_buf_size = split_buf_size;
2925
2926         d->ctrlSet = 0;
2927         d->ctrlClear = 0;
2928         d->cmdPtr = 0;
2929
2930         d->buf_cpu = kmalloc(d->num_desc * sizeof(quadlet_t*), GFP_KERNEL);
2931         d->buf_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_KERNEL);
2932
2933         if (d->buf_cpu == NULL || d->buf_bus == NULL) {
2934                 PRINT(KERN_ERR, "Failed to allocate dma buffer");
2935                 free_dma_rcv_ctx(d);
2936                 return -ENOMEM;
2937         }
2938         memset(d->buf_cpu, 0, d->num_desc * sizeof(quadlet_t*));
2939         memset(d->buf_bus, 0, d->num_desc * sizeof(dma_addr_t));
2940
2941         d->prg_cpu = kmalloc(d->num_desc * sizeof(struct dma_cmd*),
2942                              GFP_KERNEL);
2943         d->prg_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_KERNEL);
2944
2945         if (d->prg_cpu == NULL || d->prg_bus == NULL) {
2946                 PRINT(KERN_ERR, "Failed to allocate dma prg");
2947                 free_dma_rcv_ctx(d);
2948                 return -ENOMEM;
2949         }
2950         memset(d->prg_cpu, 0, d->num_desc * sizeof(struct dma_cmd*));
2951         memset(d->prg_bus, 0, d->num_desc * sizeof(dma_addr_t));
2952
2953         d->spb = kmalloc(d->split_buf_size, GFP_KERNEL);
2954
2955         if (d->spb == NULL) {
2956                 PRINT(KERN_ERR, "Failed to allocate split buffer");
2957                 free_dma_rcv_ctx(d);
2958                 return -ENOMEM;
2959         }
2960
2961         d->prg_pool = pci_pool_create("ohci1394 rcv prg", ohci->dev,
2962                                 sizeof(struct dma_cmd), 4, 0);
2963         OHCI_DMA_ALLOC("dma_rcv prg pool");
2964
2965         for (i=0; i<d->num_desc; i++) {
2966                 d->buf_cpu[i] = pci_alloc_consistent(ohci->dev,
2967                                                      d->buf_size,
2968                                                      d->buf_bus+i);
2969                 OHCI_DMA_ALLOC("consistent dma_rcv buf[%d]", i);
2970
2971                 if (d->buf_cpu[i] != NULL) {
2972                         memset(d->buf_cpu[i], 0, d->buf_size);
2973                 } else {
2974                         PRINT(KERN_ERR,
2975                               "Failed to allocate dma buffer");
2976                         free_dma_rcv_ctx(d);
2977                         return -ENOMEM;
2978                 }
2979
2980                 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
2981                 OHCI_DMA_ALLOC("pool dma_rcv prg[%d]", i);
2982
2983                 if (d->prg_cpu[i] != NULL) {
2984                         memset(d->prg_cpu[i], 0, sizeof(struct dma_cmd));
2985                 } else {
2986                         PRINT(KERN_ERR,
2987                               "Failed to allocate dma prg");
2988                         free_dma_rcv_ctx(d);
2989                         return -ENOMEM;
2990                 }
2991         }
2992
2993         spin_lock_init(&d->lock);
2994
2995         if (type == DMA_CTX_ISO) {
2996                 ohci1394_init_iso_tasklet(&ohci->ir_legacy_tasklet,
2997                                           OHCI_ISO_MULTICHANNEL_RECEIVE,
2998                                           dma_rcv_tasklet, (unsigned long) d);
2999                 if (ohci1394_register_iso_tasklet(ohci,
3000                                                   &ohci->ir_legacy_tasklet) < 0) {
3001                         PRINT(KERN_ERR, "No IR DMA context available");
3002                         free_dma_rcv_ctx(d);
3003                         return -EBUSY;
3004                 }
3005
3006                 /* the IR context can be assigned to any DMA context
3007                  * by ohci1394_register_iso_tasklet */
3008                 d->ctx = ohci->ir_legacy_tasklet.context;
3009                 d->ctrlSet = OHCI1394_IsoRcvContextControlSet + 32*d->ctx;
3010                 d->ctrlClear = OHCI1394_IsoRcvContextControlClear + 32*d->ctx;
3011                 d->cmdPtr = OHCI1394_IsoRcvCommandPtr + 32*d->ctx;
3012                 d->ctxtMatch = OHCI1394_IsoRcvContextMatch + 32*d->ctx;
3013         } else {
3014                 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3015                 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3016                 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3017
3018                 tasklet_init (&d->task, dma_rcv_tasklet, (unsigned long) d);
3019         }
3020
3021         return 0;
3022 }
3023
3024 static void free_dma_trm_ctx(struct dma_trm_ctx *d)
3025 {
3026         int i;
3027         struct ti_ohci *ohci = d->ohci;
3028
3029         if (ohci == NULL)
3030                 return;
3031
3032         DBGMSG("Freeing dma_trm_ctx %d", d->ctx);
3033
3034         if (d->prg_cpu) {
3035                 for (i=0; i<d->num_desc; i++)
3036                         if (d->prg_cpu[i] && d->prg_bus[i]) {
3037                                 pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
3038                                 OHCI_DMA_FREE("pool dma_trm prg[%d]", i);
3039                         }
3040                 pci_pool_destroy(d->prg_pool);
3041                 OHCI_DMA_FREE("dma_trm prg pool");
3042                 kfree(d->prg_cpu);
3043                 kfree(d->prg_bus);
3044         }
3045
3046         /* Mark this context as freed. */
3047         d->ohci = NULL;
3048 }
3049
3050 static int
3051 alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
3052                   enum context_type type, int ctx, int num_desc,
3053                   int context_base)
3054 {
3055         int i;
3056
3057         d->ohci = ohci;
3058         d->type = type;
3059         d->ctx = ctx;
3060         d->num_desc = num_desc;
3061         d->ctrlSet = 0;
3062         d->ctrlClear = 0;
3063         d->cmdPtr = 0;
3064
3065         d->prg_cpu = kmalloc(d->num_desc * sizeof(struct at_dma_prg*),
3066                              GFP_KERNEL);
3067         d->prg_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_KERNEL);
3068
3069         if (d->prg_cpu == NULL || d->prg_bus == NULL) {
3070                 PRINT(KERN_ERR, "Failed to allocate at dma prg");
3071                 free_dma_trm_ctx(d);
3072                 return -ENOMEM;
3073         }
3074         memset(d->prg_cpu, 0, d->num_desc * sizeof(struct at_dma_prg*));
3075         memset(d->prg_bus, 0, d->num_desc * sizeof(dma_addr_t));
3076
3077         d->prg_pool = pci_pool_create("ohci1394 trm prg", ohci->dev,
3078                                 sizeof(struct at_dma_prg), 4, 0);
3079         OHCI_DMA_ALLOC("dma_rcv prg pool");
3080
3081         for (i = 0; i < d->num_desc; i++) {
3082                 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
3083                 OHCI_DMA_ALLOC("pool dma_trm prg[%d]", i);
3084
3085                 if (d->prg_cpu[i] != NULL) {
3086                         memset(d->prg_cpu[i], 0, sizeof(struct at_dma_prg));
3087                 } else {
3088                         PRINT(KERN_ERR,
3089                               "Failed to allocate at dma prg");
3090                         free_dma_trm_ctx(d);
3091                         return -ENOMEM;
3092                 }
3093         }
3094
3095         spin_lock_init(&d->lock);
3096
3097         /* initialize tasklet */
3098         if (type == DMA_CTX_ISO) {
3099                 ohci1394_init_iso_tasklet(&ohci->it_legacy_tasklet, OHCI_ISO_TRANSMIT,
3100                                           dma_trm_tasklet, (unsigned long) d);
3101                 if (ohci1394_register_iso_tasklet(ohci,
3102                                                   &ohci->it_legacy_tasklet) < 0) {
3103                         PRINT(KERN_ERR, "No IT DMA context available");
3104                         free_dma_trm_ctx(d);
3105                         return -EBUSY;
3106                 }
3107
3108                 /* IT can be assigned to any context by register_iso_tasklet */
3109                 d->ctx = ohci->it_legacy_tasklet.context;
3110                 d->ctrlSet = OHCI1394_IsoXmitContextControlSet + 16 * d->ctx;
3111                 d->ctrlClear = OHCI1394_IsoXmitContextControlClear + 16 * d->ctx;
3112                 d->cmdPtr = OHCI1394_IsoXmitCommandPtr + 16 * d->ctx;
3113         } else {
3114                 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3115                 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3116                 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3117                 tasklet_init (&d->task, dma_trm_tasklet, (unsigned long)d);
3118         }
3119
3120         return 0;
3121 }
3122
3123 static void ohci_set_hw_config_rom(struct hpsb_host *host, quadlet_t *config_rom)
3124 {
3125         struct ti_ohci *ohci = host->hostdata;
3126
3127         reg_write(ohci, OHCI1394_ConfigROMhdr, be32_to_cpu(config_rom[0]));
3128         reg_write(ohci, OHCI1394_BusOptions, be32_to_cpu(config_rom[2]));
3129
3130         memcpy(ohci->csr_config_rom_cpu, config_rom, OHCI_CONFIG_ROM_LEN);
3131 }
3132
3133
3134 static quadlet_t ohci_hw_csr_reg(struct hpsb_host *host, int reg,
3135                                  quadlet_t data, quadlet_t compare)
3136 {
3137         struct ti_ohci *ohci = host->hostdata;
3138         int i;
3139
3140         reg_write(ohci, OHCI1394_CSRData, data);
3141         reg_write(ohci, OHCI1394_CSRCompareData, compare);
3142         reg_write(ohci, OHCI1394_CSRControl, reg & 0x3);
3143
3144         for (i = 0; i < OHCI_LOOP_COUNT; i++) {
3145                 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
3146                         break;
3147
3148                 mdelay(1);
3149         }
3150
3151         return reg_read(ohci, OHCI1394_CSRData);
3152 }
3153
3154 static struct hpsb_host_driver ohci1394_driver = {
3155         .owner =                THIS_MODULE,
3156         .name =                 OHCI1394_DRIVER_NAME,
3157         .set_hw_config_rom =    ohci_set_hw_config_rom,
3158         .transmit_packet =      ohci_transmit,
3159         .devctl =               ohci_devctl,
3160         .isoctl =               ohci_isoctl,
3161         .hw_csr_reg =           ohci_hw_csr_reg,
3162 };
3163
3164 \f
3165
3166 /***********************************
3167  * PCI Driver Interface functions  *
3168  ***********************************/
3169
3170 #define FAIL(err, fmt, args...)                 \
3171 do {                                            \
3172         PRINT_G(KERN_ERR, fmt , ## args);       \
3173         ohci1394_pci_remove(dev);               \
3174         return err;                             \
3175 } while (0)
3176
3177 static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
3178                                         const struct pci_device_id *ent)
3179 {
3180         static int version_printed = 0;
3181
3182         struct hpsb_host *host;
3183         struct ti_ohci *ohci;   /* shortcut to currently handled device */
3184         unsigned long ohci_base;
3185
3186         if (version_printed++ == 0)
3187                 PRINT_G(KERN_INFO, "%s", version);
3188
3189         if (pci_enable_device(dev))
3190                 FAIL(-ENXIO, "Failed to enable OHCI hardware");
3191         pci_set_master(dev);
3192
3193         host = hpsb_alloc_host(&ohci1394_driver, sizeof(struct ti_ohci), &dev->dev);
3194         if (!host) FAIL(-ENOMEM, "Failed to allocate host structure");
3195
3196         ohci = host->hostdata;
3197         ohci->dev = dev;
3198         ohci->host = host;
3199         ohci->init_state = OHCI_INIT_ALLOC_HOST;
3200         host->pdev = dev;
3201         pci_set_drvdata(dev, ohci);
3202
3203         /* We don't want hardware swapping */
3204         pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
3205
3206         /* Some oddball Apple controllers do not order the selfid
3207          * properly, so we make up for it here.  */
3208 #ifndef __LITTLE_ENDIAN
3209         /* XXX: Need a better way to check this. I'm wondering if we can
3210          * read the values of the OHCI1394_PCI_HCI_Control and the
3211          * noByteSwapData registers to see if they were not cleared to
3212          * zero. Should this work? Obviously it's not defined what these
3213          * registers will read when they aren't supported. Bleh! */
3214         if (dev->vendor == PCI_VENDOR_ID_APPLE &&
3215             dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) {
3216                 ohci->no_swap_incoming = 1;
3217                 ohci->selfid_swap = 0;
3218         } else
3219                 ohci->selfid_swap = 1;
3220 #endif
3221
3222
3223 #ifndef PCI_DEVICE_ID_NVIDIA_NFORCE2_FW
3224 #define PCI_DEVICE_ID_NVIDIA_NFORCE2_FW 0x006e
3225 #endif
3226
3227         /* These chipsets require a bit of extra care when checking after
3228          * a busreset.  */
3229         if ((dev->vendor == PCI_VENDOR_ID_APPLE &&
3230              dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) ||
3231             (dev->vendor ==  PCI_VENDOR_ID_NVIDIA &&
3232              dev->device == PCI_DEVICE_ID_NVIDIA_NFORCE2_FW))
3233                 ohci->check_busreset = 1;
3234
3235         /* We hardwire the MMIO length, since some CardBus adaptors
3236          * fail to report the right length.  Anyway, the ohci spec
3237          * clearly says it's 2kb, so this shouldn't be a problem. */
3238         ohci_base = pci_resource_start(dev, 0);
3239         if (pci_resource_len(dev, 0) != OHCI1394_REGISTER_SIZE)
3240                 PRINT(KERN_WARNING, "Unexpected PCI resource length of %lx!",
3241                       pci_resource_len(dev, 0));
3242
3243         /* Seems PCMCIA handles this internally. Not sure why. Seems
3244          * pretty bogus to force a driver to special case this.  */
3245 #ifndef PCMCIA
3246         if (!request_mem_region (ohci_base, OHCI1394_REGISTER_SIZE, OHCI1394_DRIVER_NAME))
3247                 FAIL(-ENOMEM, "MMIO resource (0x%lx - 0x%lx) unavailable",
3248                      ohci_base, ohci_base + OHCI1394_REGISTER_SIZE);
3249 #endif
3250         ohci->init_state = OHCI_INIT_HAVE_MEM_REGION;
3251
3252         ohci->registers = ioremap(ohci_base, OHCI1394_REGISTER_SIZE);
3253         if (ohci->registers == NULL)
3254                 FAIL(-ENXIO, "Failed to remap registers - card not accessible");
3255         ohci->init_state = OHCI_INIT_HAVE_IOMAPPING;
3256         DBGMSG("Remapped memory spaces reg 0x%p", ohci->registers);
3257
3258         /* csr_config rom allocation */
3259         ohci->csr_config_rom_cpu =
3260                 pci_alloc_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3261                                      &ohci->csr_config_rom_bus);
3262         OHCI_DMA_ALLOC("consistent csr_config_rom");
3263         if (ohci->csr_config_rom_cpu == NULL)
3264                 FAIL(-ENOMEM, "Failed to allocate buffer config rom");
3265         ohci->init_state = OHCI_INIT_HAVE_CONFIG_ROM_BUFFER;
3266
3267         /* self-id dma buffer allocation */
3268         ohci->selfid_buf_cpu =
3269                 pci_alloc_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3270                       &ohci->selfid_buf_bus);
3271         OHCI_DMA_ALLOC("consistent selfid_buf");
3272
3273         if (ohci->selfid_buf_cpu == NULL)
3274                 FAIL(-ENOMEM, "Failed to allocate DMA buffer for self-id packets");
3275         ohci->init_state = OHCI_INIT_HAVE_SELFID_BUFFER;
3276
3277         if ((unsigned long)ohci->selfid_buf_cpu & 0x1fff)
3278                 PRINT(KERN_INFO, "SelfID buffer %p is not aligned on "
3279                       "8Kb boundary... may cause problems on some CXD3222 chip",
3280                       ohci->selfid_buf_cpu);
3281
3282         /* No self-id errors at startup */
3283         ohci->self_id_errors = 0;
3284
3285         ohci->init_state = OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE;
3286         /* AR DMA request context allocation */
3287         if (alloc_dma_rcv_ctx(ohci, &ohci->ar_req_context,
3288                               DMA_CTX_ASYNC_REQ, 0, AR_REQ_NUM_DESC,
3289                               AR_REQ_BUF_SIZE, AR_REQ_SPLIT_BUF_SIZE,
3290                               OHCI1394_AsReqRcvContextBase) < 0)
3291                 FAIL(-ENOMEM, "Failed to allocate AR Req context");
3292
3293         /* AR DMA response context allocation */
3294         if (alloc_dma_rcv_ctx(ohci, &ohci->ar_resp_context,
3295                               DMA_CTX_ASYNC_RESP, 0, AR_RESP_NUM_DESC,
3296                               AR_RESP_BUF_SIZE, AR_RESP_SPLIT_BUF_SIZE,
3297                               OHCI1394_AsRspRcvContextBase) < 0)
3298                 FAIL(-ENOMEM, "Failed to allocate AR Resp context");
3299
3300         /* AT DMA request context */
3301         if (alloc_dma_trm_ctx(ohci, &ohci->at_req_context,
3302                               DMA_CTX_ASYNC_REQ, 0, AT_REQ_NUM_DESC,
3303                               OHCI1394_AsReqTrContextBase) < 0)
3304                 FAIL(-ENOMEM, "Failed to allocate AT Req context");
3305
3306         /* AT DMA response context */
3307         if (alloc_dma_trm_ctx(ohci, &ohci->at_resp_context,
3308                               DMA_CTX_ASYNC_RESP, 1, AT_RESP_NUM_DESC,
3309                               OHCI1394_AsRspTrContextBase) < 0)
3310                 FAIL(-ENOMEM, "Failed to allocate AT Resp context");
3311
3312         /* Start off with a soft reset, to clear everything to a sane
3313          * state. */
3314         ohci_soft_reset(ohci);
3315
3316         /* Now enable LPS, which we need in order to start accessing
3317          * most of the registers.  In fact, on some cards (ALI M5251),
3318          * accessing registers in the SClk domain without LPS enabled
3319          * will lock up the machine.  Wait 50msec to make sure we have
3320          * full link enabled.  */
3321         reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
3322
3323         /* Disable and clear interrupts */
3324         reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3325         reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3326
3327         mdelay(50);
3328
3329         /* Determine the number of available IR and IT contexts. */
3330         ohci->nb_iso_rcv_ctx =
3331                 get_nb_iso_ctx(ohci, OHCI1394_IsoRecvIntMaskSet);
3332         DBGMSG("%d iso receive contexts available",
3333                ohci->nb_iso_rcv_ctx);
3334
3335         ohci->nb_iso_xmit_ctx =
3336                 get_nb_iso_ctx(ohci, OHCI1394_IsoXmitIntMaskSet);
3337         DBGMSG("%d iso transmit contexts available",
3338                ohci->nb_iso_xmit_ctx);
3339
3340         /* Set the usage bits for non-existent contexts so they can't
3341          * be allocated */
3342         ohci->ir_ctx_usage = ~0 << ohci->nb_iso_rcv_ctx;
3343         ohci->it_ctx_usage = ~0 << ohci->nb_iso_xmit_ctx;
3344
3345         INIT_LIST_HEAD(&ohci->iso_tasklet_list);
3346         spin_lock_init(&ohci->iso_tasklet_list_lock);
3347         ohci->ISO_channel_usage = 0;
3348         spin_lock_init(&ohci->IR_channel_lock);
3349
3350         /* the IR DMA context is allocated on-demand; mark it inactive */
3351         ohci->ir_legacy_context.ohci = NULL;
3352
3353         /* same for the IT DMA context */
3354         ohci->it_legacy_context.ohci = NULL;
3355
3356         if (request_irq(dev->irq, ohci_irq_handler, SA_SHIRQ,
3357                          OHCI1394_DRIVER_NAME, ohci))
3358                 FAIL(-ENOMEM, "Failed to allocate shared interrupt %d", dev->irq);
3359
3360         ohci->init_state = OHCI_INIT_HAVE_IRQ;
3361         ohci_initialize(ohci);
3362
3363         /* Set certain csr values */
3364         host->csr.guid_hi = reg_read(ohci, OHCI1394_GUIDHi);
3365         host->csr.guid_lo = reg_read(ohci, OHCI1394_GUIDLo);
3366         host->csr.cyc_clk_acc = 100;  /* how do we determine clk accuracy? */
3367         host->csr.max_rec = (reg_read(ohci, OHCI1394_BusOptions) >> 12) & 0xf;
3368         host->csr.lnk_spd = reg_read(ohci, OHCI1394_BusOptions) & 0x7;
3369
3370         /* Tell the highlevel this host is ready */
3371         if (hpsb_add_host(host))
3372                 FAIL(-ENOMEM, "Failed to register host with highlevel");
3373
3374         ohci->init_state = OHCI_INIT_DONE;
3375
3376         return 0;
3377 #undef FAIL
3378 }
3379
3380 static void ohci1394_pci_remove(struct pci_dev *pdev)
3381 {
3382         struct ti_ohci *ohci;
3383         struct device *dev;
3384
3385         ohci = pci_get_drvdata(pdev);
3386         if (!ohci)
3387                 return;
3388
3389         dev = get_device(&ohci->host->device);
3390
3391         switch (ohci->init_state) {
3392         case OHCI_INIT_DONE:
3393                 hpsb_remove_host(ohci->host);
3394
3395                 /* Clear out BUS Options */
3396                 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
3397                 reg_write(ohci, OHCI1394_BusOptions,
3398                           (reg_read(ohci, OHCI1394_BusOptions) & 0x0000f007) |
3399                           0x00ff0000);
3400                 memset(ohci->csr_config_rom_cpu, 0, OHCI_CONFIG_ROM_LEN);
3401
3402         case OHCI_INIT_HAVE_IRQ:
3403                 /* Clear interrupt registers */
3404                 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3405                 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3406                 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
3407                 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
3408                 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
3409                 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
3410
3411                 /* Disable IRM Contender */
3412                 set_phy_reg(ohci, 4, ~0xc0 & get_phy_reg(ohci, 4));
3413
3414                 /* Clear link control register */
3415                 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
3416
3417                 /* Let all other nodes know to ignore us */
3418                 ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
3419
3420                 /* Soft reset before we start - this disables
3421                  * interrupts and clears linkEnable and LPS. */
3422                 ohci_soft_reset(ohci);
3423                 free_irq(ohci->dev->irq, ohci);
3424
3425         case OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE:
3426                 /* The ohci_soft_reset() stops all DMA contexts, so we
3427                  * dont need to do this.  */
3428                 /* Free AR dma */
3429                 free_dma_rcv_ctx(&ohci->ar_req_context);
3430                 free_dma_rcv_ctx(&ohci->ar_resp_context);
3431
3432                 /* Free AT dma */
3433                 free_dma_trm_ctx(&ohci->at_req_context);
3434                 free_dma_trm_ctx(&ohci->at_resp_context);
3435
3436                 /* Free IR dma */
3437                 free_dma_rcv_ctx(&ohci->ir_legacy_context);
3438
3439                 /* Free IT dma */
3440                 free_dma_trm_ctx(&ohci->it_legacy_context);
3441
3442         case OHCI_INIT_HAVE_SELFID_BUFFER:
3443                 pci_free_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3444                                     ohci->selfid_buf_cpu,
3445                                     ohci->selfid_buf_bus);
3446                 OHCI_DMA_FREE("consistent selfid_buf");
3447
3448         case OHCI_INIT_HAVE_CONFIG_ROM_BUFFER:
3449                 pci_free_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3450                                     ohci->csr_config_rom_cpu,
3451                                     ohci->csr_config_rom_bus);
3452                 OHCI_DMA_FREE("consistent csr_config_rom");
3453
3454         case OHCI_INIT_HAVE_IOMAPPING:
3455                 iounmap(ohci->registers);
3456
3457         case OHCI_INIT_HAVE_MEM_REGION:
3458 #ifndef PCMCIA
3459                 release_mem_region(pci_resource_start(ohci->dev, 0),
3460                                    OHCI1394_REGISTER_SIZE);
3461 #endif
3462
3463 #ifdef CONFIG_PPC_PMAC
3464         /* On UniNorth, power down the cable and turn off the chip
3465          * clock when the module is removed to save power on
3466          * laptops. Turning it back ON is done by the arch code when
3467          * pci_enable_device() is called */
3468         {
3469                 struct device_node* of_node;
3470
3471                 of_node = pci_device_to_OF_node(ohci->dev);
3472                 if (of_node) {
3473                         pmac_call_feature(PMAC_FTR_1394_ENABLE, of_node, 0, 0);
3474                         pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, of_node, 0, 0);
3475                 }
3476         }
3477 #endif /* CONFIG_PPC_PMAC */
3478
3479         case OHCI_INIT_ALLOC_HOST:
3480                 pci_set_drvdata(ohci->dev, NULL);
3481         }
3482
3483         if (dev)
3484                 put_device(dev);
3485 }
3486
3487
3488 static int ohci1394_pci_resume (struct pci_dev *pdev)
3489 {
3490 #ifdef CONFIG_PMAC_PBOOK
3491         {
3492                 struct device_node *of_node;
3493
3494                 /* Re-enable 1394 */
3495                 of_node = pci_device_to_OF_node (pdev);
3496                 if (of_node)
3497                         pmac_call_feature (PMAC_FTR_1394_ENABLE, of_node, 0, 1);
3498         }
3499 #endif
3500
3501         pci_enable_device(pdev);
3502
3503         return 0;
3504 }
3505
3506
3507 static int ohci1394_pci_suspend (struct pci_dev *pdev, u32 state)
3508 {
3509 #ifdef CONFIG_PMAC_PBOOK
3510         {
3511                 struct device_node *of_node;
3512
3513                 /* Disable 1394 */
3514                 of_node = pci_device_to_OF_node (pdev);
3515                 if (of_node)
3516                         pmac_call_feature(PMAC_FTR_1394_ENABLE, of_node, 0, 0);
3517         }
3518 #endif
3519
3520         return 0;
3521 }
3522
3523
3524 #define PCI_CLASS_FIREWIRE_OHCI     ((PCI_CLASS_SERIAL_FIREWIRE << 8) | 0x10)
3525
3526 static struct pci_device_id ohci1394_pci_tbl[] = {
3527         {
3528                 .class =        PCI_CLASS_FIREWIRE_OHCI,
3529                 .class_mask =   PCI_ANY_ID,
3530                 .vendor =       PCI_ANY_ID,
3531                 .device =       PCI_ANY_ID,
3532                 .subvendor =    PCI_ANY_ID,
3533                 .subdevice =    PCI_ANY_ID,
3534         },
3535         { 0, },
3536 };
3537
3538 MODULE_DEVICE_TABLE(pci, ohci1394_pci_tbl);
3539
3540 static struct pci_driver ohci1394_pci_driver = {
3541         .name =         OHCI1394_DRIVER_NAME,
3542         .id_table =     ohci1394_pci_tbl,
3543         .probe =        ohci1394_pci_probe,
3544         .remove =       ohci1394_pci_remove,
3545         .resume =       ohci1394_pci_resume,
3546         .suspend =      ohci1394_pci_suspend,
3547 };
3548
3549 \f
3550
3551 /***********************************
3552  * OHCI1394 Video Interface        *
3553  ***********************************/
3554
3555 /* essentially the only purpose of this code is to allow another
3556    module to hook into ohci's interrupt handler */
3557
3558 int ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg)
3559 {
3560         int i=0;
3561
3562         /* stop the channel program if it's still running */
3563         reg_write(ohci, reg, 0x8000);
3564
3565         /* Wait until it effectively stops */
3566         while (reg_read(ohci, reg) & 0x400) {
3567                 i++;
3568                 if (i>5000) {
3569                         PRINT(KERN_ERR,
3570                               "Runaway loop while stopping context: %s...", msg ? msg : "");
3571                         return 1;
3572                 }
3573
3574                 mb();
3575                 udelay(10);
3576         }
3577         if (msg) PRINT(KERN_ERR, "%s: dma prg stopped", msg);
3578         return 0;
3579 }
3580
3581 void ohci1394_init_iso_tasklet(struct ohci1394_iso_tasklet *tasklet, int type,
3582                                void (*func)(unsigned long), unsigned long data)
3583 {
3584         tasklet_init(&tasklet->tasklet, func, data);
3585         tasklet->type = type;
3586         /* We init the tasklet->link field, so we can list_del() it
3587          * without worrying whether it was added to the list or not. */
3588         INIT_LIST_HEAD(&tasklet->link);
3589 }
3590
3591 int ohci1394_register_iso_tasklet(struct ti_ohci *ohci,
3592                                   struct ohci1394_iso_tasklet *tasklet)
3593 {
3594         unsigned long flags, *usage;
3595         int n, i, r = -EBUSY;
3596
3597         if (tasklet->type == OHCI_ISO_TRANSMIT) {
3598                 n = ohci->nb_iso_xmit_ctx;
3599                 usage = &ohci->it_ctx_usage;
3600         }
3601         else {
3602                 n = ohci->nb_iso_rcv_ctx;
3603                 usage = &ohci->ir_ctx_usage;
3604
3605                 /* only one receive context can be multichannel (OHCI sec 10.4.1) */
3606                 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3607                         if (test_and_set_bit(0, &ohci->ir_multichannel_used)) {
3608                                 return r;
3609                         }
3610                 }
3611         }
3612
3613         spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3614
3615         for (i = 0; i < n; i++)
3616                 if (!test_and_set_bit(i, usage)) {
3617                         tasklet->context = i;
3618                         list_add_tail(&tasklet->link, &ohci->iso_tasklet_list);
3619                         r = 0;
3620                         break;
3621                 }
3622
3623         spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3624
3625         return r;
3626 }
3627
3628 void ohci1394_unregister_iso_tasklet(struct ti_ohci *ohci,
3629                                      struct ohci1394_iso_tasklet *tasklet)
3630 {
3631         unsigned long flags;
3632
3633         tasklet_kill(&tasklet->tasklet);
3634
3635         spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3636
3637         if (tasklet->type == OHCI_ISO_TRANSMIT)
3638                 clear_bit(tasklet->context, &ohci->it_ctx_usage);
3639         else {
3640                 clear_bit(tasklet->context, &ohci->ir_ctx_usage);
3641
3642                 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3643                         clear_bit(0, &ohci->ir_multichannel_used);
3644                 }
3645         }
3646
3647         list_del(&tasklet->link);
3648
3649         spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3650 }
3651
3652 EXPORT_SYMBOL(ohci1394_stop_context);
3653 EXPORT_SYMBOL(ohci1394_init_iso_tasklet);
3654 EXPORT_SYMBOL(ohci1394_register_iso_tasklet);
3655 EXPORT_SYMBOL(ohci1394_unregister_iso_tasklet);
3656
3657
3658 /***********************************
3659  * General module initialization   *
3660  ***********************************/
3661
3662 MODULE_AUTHOR("Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>");
3663 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE-1394 controllers");
3664 MODULE_LICENSE("GPL");
3665
3666 static void __exit ohci1394_cleanup (void)
3667 {
3668         pci_unregister_driver(&ohci1394_pci_driver);
3669 }
3670
3671 static int __init ohci1394_init(void)
3672 {
3673         return pci_module_init(&ohci1394_pci_driver);
3674 }
3675
3676 module_init(ohci1394_init);
3677 module_exit(ohci1394_cleanup);