patch-2_6_7-vs1_9_1_12
[linux-2.6.git] / drivers / ieee1394 / ohci1394.c
1 /*
2  * ohci1394.c - driver for OHCI 1394 boards
3  * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
4  *                        Gord Peters <GordPeters@smarttech.com>
5  *              2001      Ben Collins <bcollins@debian.org>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software Foundation,
19  * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20  */
21
22 /*
23  * Things known to be working:
24  * . Async Request Transmit
25  * . Async Response Receive
26  * . Async Request Receive
27  * . Async Response Transmit
28  * . Iso Receive
29  * . DMA mmap for iso receive
30  * . Config ROM generation
31  *
32  * Things implemented, but still in test phase:
33  * . Iso Transmit
34  * . Async Stream Packets Transmit (Receive done via Iso interface)
35  *
36  * Things not implemented:
37  * . DMA error recovery
38  *
39  * Known bugs:
40  * . devctl BUS_RESET arg confusion (reset type or root holdoff?)
41  *   added LONG_RESET_ROOT and SHORT_RESET_ROOT for root holdoff --kk
42  */
43
44 /*
45  * Acknowledgments:
46  *
47  * Adam J Richter <adam@yggdrasil.com>
48  *  . Use of pci_class to find device
49  *
50  * Emilie Chung <emilie.chung@axis.com>
51  *  . Tip on Async Request Filter
52  *
53  * Pascal Drolet <pascal.drolet@informission.ca>
54  *  . Various tips for optimization and functionnalities
55  *
56  * Robert Ficklin <rficklin@westengineering.com>
57  *  . Loop in irq_handler
58  *
59  * James Goodwin <jamesg@Filanet.com>
60  *  . Various tips on initialization, self-id reception, etc.
61  *
62  * Albrecht Dress <ad@mpifr-bonn.mpg.de>
63  *  . Apple PowerBook detection
64  *
65  * Daniel Kobras <daniel.kobras@student.uni-tuebingen.de>
66  *  . Reset the board properly before leaving + misc cleanups
67  *
68  * Leon van Stuivenberg <leonvs@iae.nl>
69  *  . Bug fixes
70  *
71  * Ben Collins <bcollins@debian.org>
72  *  . Working big-endian support
73  *  . Updated to 2.4.x module scheme (PCI aswell)
74  *  . Config ROM generation
75  *
76  * Manfred Weihs <weihs@ict.tuwien.ac.at>
77  *  . Reworked code for initiating bus resets
78  *    (long, short, with or without hold-off)
79  *
80  * Nandu Santhi <contactnandu@users.sourceforge.net>
81  *  . Added support for nVidia nForce2 onboard Firewire chipset
82  *
83  */
84
85 #include <linux/config.h>
86 #include <linux/kernel.h>
87 #include <linux/list.h>
88 #include <linux/slab.h>
89 #include <linux/interrupt.h>
90 #include <linux/wait.h>
91 #include <linux/errno.h>
92 #include <linux/module.h>
93 #include <linux/moduleparam.h>
94 #include <linux/pci.h>
95 #include <linux/fs.h>
96 #include <linux/poll.h>
97 #include <linux/irq.h>
98 #include <asm/byteorder.h>
99 #include <asm/atomic.h>
100 #include <asm/uaccess.h>
101 #include <linux/delay.h>
102 #include <linux/spinlock.h>
103
104 #include <asm/pgtable.h>
105 #include <asm/page.h>
106 #include <linux/sched.h>
107 #include <linux/types.h>
108 #include <linux/vmalloc.h>
109 #include <linux/init.h>
110
111 #ifdef CONFIG_PPC_PMAC
112 #include <asm/machdep.h>
113 #include <asm/pmac_feature.h>
114 #include <asm/prom.h>
115 #include <asm/pci-bridge.h>
116 #endif
117
118 #include "csr1212.h"
119 #include "ieee1394.h"
120 #include "ieee1394_types.h"
121 #include "hosts.h"
122 #include "dma.h"
123 #include "iso.h"
124 #include "ieee1394_core.h"
125 #include "highlevel.h"
126 #include "ohci1394.h"
127
128 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
129 #define OHCI1394_DEBUG
130 #endif
131
132 #ifdef DBGMSG
133 #undef DBGMSG
134 #endif
135
136 #ifdef OHCI1394_DEBUG
137 #define DBGMSG(fmt, args...) \
138 printk(KERN_INFO "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
139 #else
140 #define DBGMSG(fmt, args...)
141 #endif
142
143 #ifdef CONFIG_IEEE1394_OHCI_DMA_DEBUG
144 #define OHCI_DMA_ALLOC(fmt, args...) \
145         HPSB_ERR("%s(%s)alloc(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
146                 ++global_outstanding_dmas, ## args)
147 #define OHCI_DMA_FREE(fmt, args...) \
148         HPSB_ERR("%s(%s)free(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
149                 --global_outstanding_dmas, ## args)
150 static int global_outstanding_dmas = 0;
151 #else
152 #define OHCI_DMA_ALLOC(fmt, args...)
153 #define OHCI_DMA_FREE(fmt, args...)
154 #endif
155
156 /* print general (card independent) information */
157 #define PRINT_G(level, fmt, args...) \
158 printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
159
160 /* print card specific information */
161 #define PRINT(level, fmt, args...) \
162 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
163
164 static char version[] __devinitdata =
165         "$Rev: 1223 $ Ben Collins <bcollins@debian.org>";
166
167 /* Module Parameters */
168 static int phys_dma = 1;
169 module_param(phys_dma, int, 0644);
170 MODULE_PARM_DESC(phys_dma, "Enable physical dma (default = 1).");
171
172 static void dma_trm_tasklet(unsigned long data);
173 static void dma_trm_reset(struct dma_trm_ctx *d);
174
175 static int alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
176                              enum context_type type, int ctx, int num_desc,
177                              int buf_size, int split_buf_size, int context_base);
178 static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d);
179 static void free_dma_rcv_ctx(struct dma_rcv_ctx *d);
180
181 static int alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
182                              enum context_type type, int ctx, int num_desc,
183                              int context_base);
184
185 static void ohci1394_pci_remove(struct pci_dev *pdev);
186
187 #ifndef __LITTLE_ENDIAN
188 static unsigned hdr_sizes[] =
189 {
190         3,      /* TCODE_WRITEQ */
191         4,      /* TCODE_WRITEB */
192         3,      /* TCODE_WRITE_RESPONSE */
193         0,      /* ??? */
194         3,      /* TCODE_READQ */
195         4,      /* TCODE_READB */
196         3,      /* TCODE_READQ_RESPONSE */
197         4,      /* TCODE_READB_RESPONSE */
198         1,      /* TCODE_CYCLE_START (???) */
199         4,      /* TCODE_LOCK_REQUEST */
200         2,      /* TCODE_ISO_DATA */
201         4,      /* TCODE_LOCK_RESPONSE */
202 };
203
204 /* Swap headers */
205 static inline void packet_swab(quadlet_t *data, int tcode)
206 {
207         size_t size = hdr_sizes[tcode];
208
209         if (tcode > TCODE_LOCK_RESPONSE || hdr_sizes[tcode] == 0)
210                 return;
211
212         while (size--)
213                 data[size] = swab32(data[size]);
214 }
215 #else
216 /* Don't waste cycles on same sex byte swaps */
217 #define packet_swab(w,x)
218 #endif /* !LITTLE_ENDIAN */
219
220 /***********************************
221  * IEEE-1394 functionality section *
222  ***********************************/
223
224 static u8 get_phy_reg(struct ti_ohci *ohci, u8 addr)
225 {
226         int i;
227         unsigned long flags;
228         quadlet_t r;
229
230         spin_lock_irqsave (&ohci->phy_reg_lock, flags);
231
232         reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000);
233
234         for (i = 0; i < OHCI_LOOP_COUNT; i++) {
235                 if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000)
236                         break;
237
238                 mdelay(1);
239         }
240
241         r = reg_read(ohci, OHCI1394_PhyControl);
242
243         if (i >= OHCI_LOOP_COUNT)
244                 PRINT (KERN_ERR, "Get PHY Reg timeout [0x%08x/0x%08x/%d]",
245                        r, r & 0x80000000, i);
246
247         spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
248
249         return (r & 0x00ff0000) >> 16;
250 }
251
252 static void set_phy_reg(struct ti_ohci *ohci, u8 addr, u8 data)
253 {
254         int i;
255         unsigned long flags;
256         u32 r = 0;
257
258         spin_lock_irqsave (&ohci->phy_reg_lock, flags);
259
260         reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000);
261
262         for (i = 0; i < OHCI_LOOP_COUNT; i++) {
263                 r = reg_read(ohci, OHCI1394_PhyControl);
264                 if (!(r & 0x00004000))
265                         break;
266
267                 mdelay(1);
268         }
269
270         if (i == OHCI_LOOP_COUNT)
271                 PRINT (KERN_ERR, "Set PHY Reg timeout [0x%08x/0x%08x/%d]",
272                        r, r & 0x00004000, i);
273
274         spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
275
276         return;
277 }
278
279 /* Or's our value into the current value */
280 static void set_phy_reg_mask(struct ti_ohci *ohci, u8 addr, u8 data)
281 {
282         u8 old;
283
284         old = get_phy_reg (ohci, addr);
285         old |= data;
286         set_phy_reg (ohci, addr, old);
287
288         return;
289 }
290
291 static void handle_selfid(struct ti_ohci *ohci, struct hpsb_host *host,
292                                 int phyid, int isroot)
293 {
294         quadlet_t *q = ohci->selfid_buf_cpu;
295         quadlet_t self_id_count=reg_read(ohci, OHCI1394_SelfIDCount);
296         size_t size;
297         quadlet_t q0, q1;
298
299         /* Check status of self-id reception */
300
301         if (ohci->selfid_swap)
302                 q0 = le32_to_cpu(q[0]);
303         else
304                 q0 = q[0];
305
306         if ((self_id_count & 0x80000000) ||
307             ((self_id_count & 0x00FF0000) != (q0 & 0x00FF0000))) {
308                 PRINT(KERN_ERR,
309                       "Error in reception of SelfID packets [0x%08x/0x%08x] (count: %d)",
310                       self_id_count, q0, ohci->self_id_errors);
311
312                 /* Tip by James Goodwin <jamesg@Filanet.com>:
313                  * We had an error, generate another bus reset in response.  */
314                 if (ohci->self_id_errors<OHCI1394_MAX_SELF_ID_ERRORS) {
315                         set_phy_reg_mask (ohci, 1, 0x40);
316                         ohci->self_id_errors++;
317                 } else {
318                         PRINT(KERN_ERR,
319                               "Too many errors on SelfID error reception, giving up!");
320                 }
321                 return;
322         }
323
324         /* SelfID Ok, reset error counter. */
325         ohci->self_id_errors = 0;
326
327         size = ((self_id_count & 0x00001FFC) >> 2) - 1;
328         q++;
329
330         while (size > 0) {
331                 if (ohci->selfid_swap) {
332                         q0 = le32_to_cpu(q[0]);
333                         q1 = le32_to_cpu(q[1]);
334                 } else {
335                         q0 = q[0];
336                         q1 = q[1];
337                 }
338
339                 if (q0 == ~q1) {
340                         DBGMSG ("SelfID packet 0x%x received", q0);
341                         hpsb_selfid_received(host, cpu_to_be32(q0));
342                         if (((q0 & 0x3f000000) >> 24) == phyid)
343                                 DBGMSG ("SelfID for this node is 0x%08x", q0);
344                 } else {
345                         PRINT(KERN_ERR,
346                               "SelfID is inconsistent [0x%08x/0x%08x]", q0, q1);
347                 }
348                 q += 2;
349                 size -= 2;
350         }
351
352         DBGMSG("SelfID complete");
353
354         return;
355 }
356
357 static void ohci_soft_reset(struct ti_ohci *ohci) {
358         int i;
359
360         reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
361
362         for (i = 0; i < OHCI_LOOP_COUNT; i++) {
363                 if (!(reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_softReset))
364                         break;
365                 mdelay(1);
366         }
367         DBGMSG ("Soft reset finished");
368 }
369
370
371 /* Generate the dma receive prgs and start the context */
372 static void initialize_dma_rcv_ctx(struct dma_rcv_ctx *d, int generate_irq)
373 {
374         struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
375         int i;
376
377         ohci1394_stop_context(ohci, d->ctrlClear, NULL);
378
379         for (i=0; i<d->num_desc; i++) {
380                 u32 c;
381
382                 c = DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE | DMA_CTL_BRANCH;
383                 if (generate_irq)
384                         c |= DMA_CTL_IRQ;
385
386                 d->prg_cpu[i]->control = cpu_to_le32(c | d->buf_size);
387
388                 /* End of descriptor list? */
389                 if (i + 1 < d->num_desc) {
390                         d->prg_cpu[i]->branchAddress =
391                                 cpu_to_le32((d->prg_bus[i+1] & 0xfffffff0) | 0x1);
392                 } else {
393                         d->prg_cpu[i]->branchAddress =
394                                 cpu_to_le32((d->prg_bus[0] & 0xfffffff0));
395                 }
396
397                 d->prg_cpu[i]->address = cpu_to_le32(d->buf_bus[i]);
398                 d->prg_cpu[i]->status = cpu_to_le32(d->buf_size);
399         }
400
401         d->buf_ind = 0;
402         d->buf_offset = 0;
403
404         if (d->type == DMA_CTX_ISO) {
405                 /* Clear contextControl */
406                 reg_write(ohci, d->ctrlClear, 0xffffffff);
407
408                 /* Set bufferFill, isochHeader, multichannel for IR context */
409                 reg_write(ohci, d->ctrlSet, 0xd0000000);
410
411                 /* Set the context match register to match on all tags */
412                 reg_write(ohci, d->ctxtMatch, 0xf0000000);
413
414                 /* Clear the multi channel mask high and low registers */
415                 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, 0xffffffff);
416                 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, 0xffffffff);
417
418                 /* Set up isoRecvIntMask to generate interrupts */
419                 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << d->ctx);
420         }
421
422         /* Tell the controller where the first AR program is */
423         reg_write(ohci, d->cmdPtr, d->prg_bus[0] | 0x1);
424
425         /* Run context */
426         reg_write(ohci, d->ctrlSet, 0x00008000);
427
428         DBGMSG("Receive DMA ctx=%d initialized", d->ctx);
429 }
430
431 /* Initialize the dma transmit context */
432 static void initialize_dma_trm_ctx(struct dma_trm_ctx *d)
433 {
434         struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
435
436         /* Stop the context */
437         ohci1394_stop_context(ohci, d->ctrlClear, NULL);
438
439         d->prg_ind = 0;
440         d->sent_ind = 0;
441         d->free_prgs = d->num_desc;
442         d->branchAddrPtr = NULL;
443         INIT_LIST_HEAD(&d->fifo_list);
444         INIT_LIST_HEAD(&d->pending_list);
445
446         if (d->type == DMA_CTX_ISO) {
447                 /* enable interrupts */
448                 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << d->ctx);
449         }
450
451         DBGMSG("Transmit DMA ctx=%d initialized", d->ctx);
452 }
453
454 /* Count the number of available iso contexts */
455 static int get_nb_iso_ctx(struct ti_ohci *ohci, int reg)
456 {
457         int i,ctx=0;
458         u32 tmp;
459
460         reg_write(ohci, reg, 0xffffffff);
461         tmp = reg_read(ohci, reg);
462
463         DBGMSG("Iso contexts reg: %08x implemented: %08x", reg, tmp);
464
465         /* Count the number of contexts */
466         for (i=0; i<32; i++) {
467                 if (tmp & 1) ctx++;
468                 tmp >>= 1;
469         }
470         return ctx;
471 }
472
473 /* Global initialization */
474 static void ohci_initialize(struct ti_ohci *ohci)
475 {
476         char irq_buf[16];
477         quadlet_t buf;
478         int num_ports, i;
479
480         spin_lock_init(&ohci->phy_reg_lock);
481         spin_lock_init(&ohci->event_lock);
482
483         /* Put some defaults to these undefined bus options */
484         buf = reg_read(ohci, OHCI1394_BusOptions);
485         buf |=  0xE0000000; /* Enable IRMC, CMC and ISC */
486         buf &= ~0x00ff0000; /* XXX: Set cyc_clk_acc to zero for now */
487         buf &= ~0x18000000; /* Disable PMC and BMC */
488         reg_write(ohci, OHCI1394_BusOptions, buf);
489
490         /* Set the bus number */
491         reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0);
492
493         /* Enable posted writes */
494         reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_postedWriteEnable);
495
496         /* Clear link control register */
497         reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
498
499         /* Enable cycle timer and cycle master and set the IRM
500          * contender bit in our self ID packets. */
501         reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_CycleTimerEnable |
502                   OHCI1394_LinkControl_CycleMaster);
503         set_phy_reg_mask(ohci, 4, 0xc0);
504
505         /* Set up self-id dma buffer */
506         reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->selfid_buf_bus);
507
508         /* enable self-id and phys */
509         reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_RcvSelfID |
510                   OHCI1394_LinkControl_RcvPhyPkt);
511
512         /* Set the Config ROM mapping register */
513         reg_write(ohci, OHCI1394_ConfigROMmap, ohci->csr_config_rom_bus);
514
515         /* Now get our max packet size */
516         ohci->max_packet_size =
517                 1<<(((reg_read(ohci, OHCI1394_BusOptions)>>12)&0xf)+1);
518
519         if (ohci->max_packet_size < 512) {
520                 HPSB_ERR("warning: Invalid max packet size of %d, setting to 512",
521                              ohci->max_packet_size);
522                 ohci->max_packet_size = 512;
523         }
524                 
525         /* Don't accept phy packets into AR request context */
526         reg_write(ohci, OHCI1394_LinkControlClear, 0x00000400);
527
528         /* Clear the interrupt mask */
529         reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
530         reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
531
532         /* Clear the interrupt mask */
533         reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
534         reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
535
536         /* Initialize AR dma */
537         initialize_dma_rcv_ctx(&ohci->ar_req_context, 0);
538         initialize_dma_rcv_ctx(&ohci->ar_resp_context, 0);
539
540         /* Initialize AT dma */
541         initialize_dma_trm_ctx(&ohci->at_req_context);
542         initialize_dma_trm_ctx(&ohci->at_resp_context);
543
544         /*
545          * Accept AT requests from all nodes. This probably
546          * will have to be controlled from the subsystem
547          * on a per node basis.
548          */
549         reg_write(ohci,OHCI1394_AsReqFilterHiSet, 0x80000000);
550
551         /* Specify AT retries */
552         reg_write(ohci, OHCI1394_ATRetries,
553                   OHCI1394_MAX_AT_REQ_RETRIES |
554                   (OHCI1394_MAX_AT_RESP_RETRIES<<4) |
555                   (OHCI1394_MAX_PHYS_RESP_RETRIES<<8));
556
557         /* We don't want hardware swapping */
558         reg_write(ohci, OHCI1394_HCControlClear, OHCI1394_HCControl_noByteSwap);
559
560         /* Enable interrupts */
561         reg_write(ohci, OHCI1394_IntMaskSet,
562                   OHCI1394_unrecoverableError |
563                   OHCI1394_masterIntEnable |
564                   OHCI1394_busReset |
565                   OHCI1394_selfIDComplete |
566                   OHCI1394_RSPkt |
567                   OHCI1394_RQPkt |
568                   OHCI1394_respTxComplete |
569                   OHCI1394_reqTxComplete |
570                   OHCI1394_isochRx |
571                   OHCI1394_isochTx |
572                   OHCI1394_cycleInconsistent);
573
574         /* Enable link */
575         reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable);
576
577         buf = reg_read(ohci, OHCI1394_Version);
578 #ifndef __sparc__
579         sprintf (irq_buf, "%d", ohci->dev->irq);
580 #else
581         sprintf (irq_buf, "%s", __irq_itoa(ohci->dev->irq));
582 #endif
583         PRINT(KERN_INFO, "OHCI-1394 %d.%d (PCI): IRQ=[%s]  "
584               "MMIO=[%lx-%lx]  Max Packet=[%d]",
585               ((((buf) >> 16) & 0xf) + (((buf) >> 20) & 0xf) * 10),
586               ((((buf) >> 4) & 0xf) + ((buf) & 0xf) * 10), irq_buf,
587               pci_resource_start(ohci->dev, 0),
588               pci_resource_start(ohci->dev, 0) + OHCI1394_REGISTER_SIZE - 1,
589               ohci->max_packet_size);
590
591         /* Check all of our ports to make sure that if anything is
592          * connected, we enable that port. */
593         num_ports = get_phy_reg(ohci, 2) & 0xf;
594         for (i = 0; i < num_ports; i++) {
595                 unsigned int status;
596
597                 set_phy_reg(ohci, 7, i);
598                 status = get_phy_reg(ohci, 8);
599
600                 if (status & 0x20)
601                         set_phy_reg(ohci, 8, status & ~1);
602         }
603
604         /* Serial EEPROM Sanity check. */
605         if ((ohci->max_packet_size < 512) ||
606             (ohci->max_packet_size > 4096)) {
607                 /* Serial EEPROM contents are suspect, set a sane max packet
608                  * size and print the raw contents for bug reports if verbose
609                  * debug is enabled. */
610 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
611                 int i;
612 #endif
613
614                 PRINT(KERN_DEBUG, "Serial EEPROM has suspicious values, "
615                       "attempting to setting max_packet_size to 512 bytes");
616                 reg_write(ohci, OHCI1394_BusOptions,
617                           (reg_read(ohci, OHCI1394_BusOptions) & 0xf007) | 0x8002);
618                 ohci->max_packet_size = 512;
619 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
620                 PRINT(KERN_DEBUG, "    EEPROM Present: %d",
621                       (reg_read(ohci, OHCI1394_Version) >> 24) & 0x1);
622                 reg_write(ohci, OHCI1394_GUID_ROM, 0x80000000);
623
624                 for (i = 0;
625                      ((i < 1000) &&
626                       (reg_read(ohci, OHCI1394_GUID_ROM) & 0x80000000)); i++)
627                         udelay(10);
628
629                 for (i = 0; i < 0x20; i++) {
630                         reg_write(ohci, OHCI1394_GUID_ROM, 0x02000000);
631                         PRINT(KERN_DEBUG, "    EEPROM %02x: %02x", i,
632                               (reg_read(ohci, OHCI1394_GUID_ROM) >> 16) & 0xff);
633                 }
634 #endif
635         }
636 }
637
638 /*
639  * Insert a packet in the DMA fifo and generate the DMA prg
640  * FIXME: rewrite the program in order to accept packets crossing
641  *        page boundaries.
642  *        check also that a single dma descriptor doesn't cross a
643  *        page boundary.
644  */
645 static void insert_packet(struct ti_ohci *ohci,
646                           struct dma_trm_ctx *d, struct hpsb_packet *packet)
647 {
648         u32 cycleTimer;
649         int idx = d->prg_ind;
650
651         DBGMSG("Inserting packet for node " NODE_BUS_FMT
652                ", tlabel=%d, tcode=0x%x, speed=%d",
653                NODE_BUS_ARGS(ohci->host, packet->node_id), packet->tlabel,
654                packet->tcode, packet->speed_code);
655
656         d->prg_cpu[idx]->begin.address = 0;
657         d->prg_cpu[idx]->begin.branchAddress = 0;
658
659         if (d->type == DMA_CTX_ASYNC_RESP) {
660                 /*
661                  * For response packets, we need to put a timeout value in
662                  * the 16 lower bits of the status... let's try 1 sec timeout
663                  */
664                 cycleTimer = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
665                 d->prg_cpu[idx]->begin.status = cpu_to_le32(
666                         (((((cycleTimer>>25)&0x7)+1)&0x7)<<13) |
667                         ((cycleTimer&0x01fff000)>>12));
668
669                 DBGMSG("cycleTimer: %08x timeStamp: %08x",
670                        cycleTimer, d->prg_cpu[idx]->begin.status);
671         } else 
672                 d->prg_cpu[idx]->begin.status = 0;
673
674         if ( (packet->type == hpsb_async) || (packet->type == hpsb_raw) ) {
675
676                 if (packet->type == hpsb_raw) {
677                         d->prg_cpu[idx]->data[0] = cpu_to_le32(OHCI1394_TCODE_PHY<<4);
678                         d->prg_cpu[idx]->data[1] = cpu_to_le32(packet->header[0]);
679                         d->prg_cpu[idx]->data[2] = cpu_to_le32(packet->header[1]);
680                 } else {
681                         d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
682                                 (packet->header[0] & 0xFFFF);
683
684                         if (packet->tcode == TCODE_ISO_DATA) {
685                                 /* Sending an async stream packet */
686                                 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
687                         } else {
688                                 /* Sending a normal async request or response */
689                                 d->prg_cpu[idx]->data[1] =
690                                         (packet->header[1] & 0xFFFF) |
691                                         (packet->header[0] & 0xFFFF0000);
692                                 d->prg_cpu[idx]->data[2] = packet->header[2];
693                                 d->prg_cpu[idx]->data[3] = packet->header[3];
694                         }
695                         packet_swab(d->prg_cpu[idx]->data, packet->tcode);
696                 }
697
698                 if (packet->data_size) { /* block transmit */
699                         if (packet->tcode == TCODE_STREAM_DATA){
700                                 d->prg_cpu[idx]->begin.control =
701                                         cpu_to_le32(DMA_CTL_OUTPUT_MORE |
702                                                     DMA_CTL_IMMEDIATE | 0x8);
703                         } else {
704                                 d->prg_cpu[idx]->begin.control =
705                                         cpu_to_le32(DMA_CTL_OUTPUT_MORE |
706                                                     DMA_CTL_IMMEDIATE | 0x10);
707                         }
708                         d->prg_cpu[idx]->end.control =
709                                 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
710                                             DMA_CTL_IRQ |
711                                             DMA_CTL_BRANCH |
712                                             packet->data_size);
713                         /*
714                          * Check that the packet data buffer
715                          * does not cross a page boundary.
716                          *
717                          * XXX Fix this some day. eth1394 seems to trigger
718                          * it, but ignoring it doesn't seem to cause a
719                          * problem.
720                          */
721 #if 0
722                         if (cross_bound((unsigned long)packet->data,
723                                         packet->data_size)>0) {
724                                 /* FIXME: do something about it */
725                                 PRINT(KERN_ERR,
726                                       "%s: packet data addr: %p size %Zd bytes "
727                                       "cross page boundary", __FUNCTION__,
728                                       packet->data, packet->data_size);
729                         }
730 #endif
731                         d->prg_cpu[idx]->end.address = cpu_to_le32(
732                                 pci_map_single(ohci->dev, packet->data,
733                                                packet->data_size,
734                                                PCI_DMA_TODEVICE));
735                         OHCI_DMA_ALLOC("single, block transmit packet");
736
737                         d->prg_cpu[idx]->end.branchAddress = 0;
738                         d->prg_cpu[idx]->end.status = 0;
739                         if (d->branchAddrPtr)
740                                 *(d->branchAddrPtr) =
741                                         cpu_to_le32(d->prg_bus[idx] | 0x3);
742                         d->branchAddrPtr =
743                                 &(d->prg_cpu[idx]->end.branchAddress);
744                 } else { /* quadlet transmit */
745                         if (packet->type == hpsb_raw)
746                                 d->prg_cpu[idx]->begin.control =
747                                         cpu_to_le32(DMA_CTL_OUTPUT_LAST |
748                                                     DMA_CTL_IMMEDIATE |
749                                                     DMA_CTL_IRQ |
750                                                     DMA_CTL_BRANCH |
751                                                     (packet->header_size + 4));
752                         else
753                                 d->prg_cpu[idx]->begin.control =
754                                         cpu_to_le32(DMA_CTL_OUTPUT_LAST |
755                                                     DMA_CTL_IMMEDIATE |
756                                                     DMA_CTL_IRQ |
757                                                     DMA_CTL_BRANCH |
758                                                     packet->header_size);
759
760                         if (d->branchAddrPtr)
761                                 *(d->branchAddrPtr) =
762                                         cpu_to_le32(d->prg_bus[idx] | 0x2);
763                         d->branchAddrPtr =
764                                 &(d->prg_cpu[idx]->begin.branchAddress);
765                 }
766
767         } else { /* iso packet */
768                 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
769                         (packet->header[0] & 0xFFFF);
770                 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
771                 packet_swab(d->prg_cpu[idx]->data, packet->tcode);
772
773                 d->prg_cpu[idx]->begin.control =
774                         cpu_to_le32(DMA_CTL_OUTPUT_MORE |
775                                     DMA_CTL_IMMEDIATE | 0x8);
776                 d->prg_cpu[idx]->end.control =
777                         cpu_to_le32(DMA_CTL_OUTPUT_LAST |
778                                     DMA_CTL_UPDATE |
779                                     DMA_CTL_IRQ |
780                                     DMA_CTL_BRANCH |
781                                     packet->data_size);
782                 d->prg_cpu[idx]->end.address = cpu_to_le32(
783                                 pci_map_single(ohci->dev, packet->data,
784                                 packet->data_size, PCI_DMA_TODEVICE));
785                 OHCI_DMA_ALLOC("single, iso transmit packet");
786
787                 d->prg_cpu[idx]->end.branchAddress = 0;
788                 d->prg_cpu[idx]->end.status = 0;
789                 DBGMSG("Iso xmit context info: header[%08x %08x]\n"
790                        "                       begin=%08x %08x %08x %08x\n"
791                        "                             %08x %08x %08x %08x\n"
792                        "                       end  =%08x %08x %08x %08x",
793                        d->prg_cpu[idx]->data[0], d->prg_cpu[idx]->data[1],
794                        d->prg_cpu[idx]->begin.control,
795                        d->prg_cpu[idx]->begin.address,
796                        d->prg_cpu[idx]->begin.branchAddress,
797                        d->prg_cpu[idx]->begin.status,
798                        d->prg_cpu[idx]->data[0],
799                        d->prg_cpu[idx]->data[1],
800                        d->prg_cpu[idx]->data[2],
801                        d->prg_cpu[idx]->data[3],
802                        d->prg_cpu[idx]->end.control,
803                        d->prg_cpu[idx]->end.address,
804                        d->prg_cpu[idx]->end.branchAddress,
805                        d->prg_cpu[idx]->end.status);
806                 if (d->branchAddrPtr)
807                         *(d->branchAddrPtr) = cpu_to_le32(d->prg_bus[idx] | 0x3);
808                 d->branchAddrPtr = &(d->prg_cpu[idx]->end.branchAddress);
809         }
810         d->free_prgs--;
811
812         /* queue the packet in the appropriate context queue */
813         list_add_tail(&packet->driver_list, &d->fifo_list);
814         d->prg_ind = (d->prg_ind + 1) % d->num_desc;
815 }
816
817 /*
818  * This function fills the FIFO with the (eventual) pending packets
819  * and runs or wakes up the DMA prg if necessary.
820  *
821  * The function MUST be called with the d->lock held.
822  */
823 static void dma_trm_flush(struct ti_ohci *ohci, struct dma_trm_ctx *d)
824 {
825         struct hpsb_packet *packet, *ptmp;
826         int idx = d->prg_ind;
827         int z = 0;
828
829         /* insert the packets into the dma fifo */
830         list_for_each_entry_safe(packet, ptmp, &d->pending_list, driver_list) {
831                 if (!d->free_prgs)
832                         break;
833
834                 /* For the first packet only */
835                 if (!z)
836                         z = (packet->data_size) ? 3 : 2;
837
838                 /* Insert the packet */
839                 list_del_init(&packet->driver_list);
840                 insert_packet(ohci, d, packet);
841         }
842
843         /* Nothing must have been done, either no free_prgs or no packets */
844         if (z == 0)
845                 return;
846
847         /* Is the context running ? (should be unless it is
848            the first packet to be sent in this context) */
849         if (!(reg_read(ohci, d->ctrlSet) & 0x8000)) {
850                 u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
851
852                 DBGMSG("Starting transmit DMA ctx=%d",d->ctx);
853                 reg_write(ohci, d->cmdPtr, d->prg_bus[idx] | z);
854
855                 /* Check that the node id is valid, and not 63 */
856                 if (!(nodeId & 0x80000000) || (nodeId & 0x3f) == 63)
857                         PRINT(KERN_ERR, "Running dma failed because Node ID is not valid");
858                 else
859                         reg_write(ohci, d->ctrlSet, 0x8000);
860         } else {
861                 /* Wake up the dma context if necessary */
862                 if (!(reg_read(ohci, d->ctrlSet) & 0x400))
863                         DBGMSG("Waking transmit DMA ctx=%d",d->ctx);
864
865                 /* do this always, to avoid race condition */
866                 reg_write(ohci, d->ctrlSet, 0x1000);
867         }
868
869         return;
870 }
871
872 /* Transmission of an async or iso packet */
873 static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
874 {
875         struct ti_ohci *ohci = host->hostdata;
876         struct dma_trm_ctx *d;
877         unsigned long flags;
878
879         if (packet->data_size > ohci->max_packet_size) {
880                 PRINT(KERN_ERR,
881                       "Transmit packet size %Zd is too big",
882                       packet->data_size);
883                 return -EOVERFLOW;
884         }
885
886         /* Decide whether we have an iso, a request, or a response packet */
887         if (packet->type == hpsb_raw)
888                 d = &ohci->at_req_context;
889         else if ((packet->tcode == TCODE_ISO_DATA) && (packet->type == hpsb_iso)) {
890                 /* The legacy IT DMA context is initialized on first
891                  * use.  However, the alloc cannot be run from
892                  * interrupt context, so we bail out if that is the
893                  * case. I don't see anyone sending ISO packets from
894                  * interrupt context anyway... */
895
896                 if (ohci->it_legacy_context.ohci == NULL) {
897                         if (in_interrupt()) {
898                                 PRINT(KERN_ERR,
899                                       "legacy IT context cannot be initialized during interrupt");
900                                 return -EINVAL;
901                         }
902
903                         if (alloc_dma_trm_ctx(ohci, &ohci->it_legacy_context,
904                                               DMA_CTX_ISO, 0, IT_NUM_DESC,
905                                               OHCI1394_IsoXmitContextBase) < 0) {
906                                 PRINT(KERN_ERR,
907                                       "error initializing legacy IT context");
908                                 return -ENOMEM;
909                         }
910
911                         initialize_dma_trm_ctx(&ohci->it_legacy_context);
912                 }
913
914                 d = &ohci->it_legacy_context;
915         } else if ((packet->tcode & 0x02) && (packet->tcode != TCODE_ISO_DATA))
916                 d = &ohci->at_resp_context;
917         else
918                 d = &ohci->at_req_context;
919
920         spin_lock_irqsave(&d->lock,flags);
921
922         list_add_tail(&packet->driver_list, &d->pending_list);
923
924         dma_trm_flush(ohci, d);
925
926         spin_unlock_irqrestore(&d->lock,flags);
927
928         return 0;
929 }
930
931 static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
932 {
933         struct ti_ohci *ohci = host->hostdata;
934         int retval = 0;
935         unsigned long flags;
936         int phy_reg;
937
938         switch (cmd) {
939         case RESET_BUS:
940                 switch (arg) {
941                 case SHORT_RESET:
942                         phy_reg = get_phy_reg(ohci, 5);
943                         phy_reg |= 0x40;
944                         set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
945                         break;
946                 case LONG_RESET:
947                         phy_reg = get_phy_reg(ohci, 1);
948                         phy_reg |= 0x40;
949                         set_phy_reg(ohci, 1, phy_reg); /* set IBR */
950                         break;
951                 case SHORT_RESET_NO_FORCE_ROOT:
952                         phy_reg = get_phy_reg(ohci, 1);
953                         if (phy_reg & 0x80) {
954                                 phy_reg &= ~0x80;
955                                 set_phy_reg(ohci, 1, phy_reg); /* clear RHB */
956                         }
957
958                         phy_reg = get_phy_reg(ohci, 5);
959                         phy_reg |= 0x40;
960                         set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
961                         break;
962                 case LONG_RESET_NO_FORCE_ROOT:
963                         phy_reg = get_phy_reg(ohci, 1);
964                         phy_reg &= ~0x80;
965                         phy_reg |= 0x40;
966                         set_phy_reg(ohci, 1, phy_reg); /* clear RHB, set IBR */
967                         break;
968                 case SHORT_RESET_FORCE_ROOT:
969                         phy_reg = get_phy_reg(ohci, 1);
970                         if (!(phy_reg & 0x80)) {
971                                 phy_reg |= 0x80;
972                                 set_phy_reg(ohci, 1, phy_reg); /* set RHB */
973                         }
974
975                         phy_reg = get_phy_reg(ohci, 5);
976                         phy_reg |= 0x40;
977                         set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
978                         break;
979                 case LONG_RESET_FORCE_ROOT:
980                         phy_reg = get_phy_reg(ohci, 1);
981                         phy_reg |= 0xc0;
982                         set_phy_reg(ohci, 1, phy_reg); /* set RHB and IBR */
983                         break;
984                 default:
985                         retval = -1;
986                 }
987                 break;
988
989         case GET_CYCLE_COUNTER:
990                 retval = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
991                 break;
992
993         case SET_CYCLE_COUNTER:
994                 reg_write(ohci, OHCI1394_IsochronousCycleTimer, arg);
995                 break;
996
997         case SET_BUS_ID:
998                 PRINT(KERN_ERR, "devctl command SET_BUS_ID err");
999                 break;
1000
1001         case ACT_CYCLE_MASTER:
1002                 if (arg) {
1003                         /* check if we are root and other nodes are present */
1004                         u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
1005                         if ((nodeId & (1<<30)) && (nodeId & 0x3f)) {
1006                                 /*
1007                                  * enable cycleTimer, cycleMaster
1008                                  */
1009                                 DBGMSG("Cycle master enabled");
1010                                 reg_write(ohci, OHCI1394_LinkControlSet,
1011                                           OHCI1394_LinkControl_CycleTimerEnable |
1012                                           OHCI1394_LinkControl_CycleMaster);
1013                         }
1014                 } else {
1015                         /* disable cycleTimer, cycleMaster, cycleSource */
1016                         reg_write(ohci, OHCI1394_LinkControlClear,
1017                                   OHCI1394_LinkControl_CycleTimerEnable |
1018                                   OHCI1394_LinkControl_CycleMaster |
1019                                   OHCI1394_LinkControl_CycleSource);
1020                 }
1021                 break;
1022
1023         case CANCEL_REQUESTS:
1024                 DBGMSG("Cancel request received");
1025                 dma_trm_reset(&ohci->at_req_context);
1026                 dma_trm_reset(&ohci->at_resp_context);
1027                 break;
1028
1029         case ISO_LISTEN_CHANNEL:
1030         {
1031                 u64 mask;
1032
1033                 if (arg<0 || arg>63) {
1034                         PRINT(KERN_ERR,
1035                               "%s: IS0 listen channel %d is out of range",
1036                               __FUNCTION__, arg);
1037                         return -EFAULT;
1038                 }
1039
1040                 /* activate the legacy IR context */
1041                 if (ohci->ir_legacy_context.ohci == NULL) {
1042                         if (alloc_dma_rcv_ctx(ohci, &ohci->ir_legacy_context,
1043                                               DMA_CTX_ISO, 0, IR_NUM_DESC,
1044                                               IR_BUF_SIZE, IR_SPLIT_BUF_SIZE,
1045                                               OHCI1394_IsoRcvContextBase) < 0) {
1046                                 PRINT(KERN_ERR, "%s: failed to allocate an IR context",
1047                                       __FUNCTION__);
1048                                 return -ENOMEM;
1049                         }
1050                         ohci->ir_legacy_channels = 0;
1051                         initialize_dma_rcv_ctx(&ohci->ir_legacy_context, 1);
1052
1053                         DBGMSG("ISO receive legacy context activated");
1054                 }
1055
1056                 mask = (u64)0x1<<arg;
1057
1058                 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1059
1060                 if (ohci->ISO_channel_usage & mask) {
1061                         PRINT(KERN_ERR,
1062                               "%s: IS0 listen channel %d is already used",
1063                               __FUNCTION__, arg);
1064                         spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1065                         return -EFAULT;
1066                 }
1067
1068                 ohci->ISO_channel_usage |= mask;
1069                 ohci->ir_legacy_channels |= mask;
1070
1071                 if (arg>31)
1072                         reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet,
1073                                   1<<(arg-32));
1074                 else
1075                         reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet,
1076                                   1<<arg);
1077
1078                 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1079                 DBGMSG("Listening enabled on channel %d", arg);
1080                 break;
1081         }
1082         case ISO_UNLISTEN_CHANNEL:
1083         {
1084                 u64 mask;
1085
1086                 if (arg<0 || arg>63) {
1087                         PRINT(KERN_ERR,
1088                               "%s: IS0 unlisten channel %d is out of range",
1089                               __FUNCTION__, arg);
1090                         return -EFAULT;
1091                 }
1092
1093                 mask = (u64)0x1<<arg;
1094
1095                 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1096
1097                 if (!(ohci->ISO_channel_usage & mask)) {
1098                         PRINT(KERN_ERR,
1099                               "%s: IS0 unlisten channel %d is not used",
1100                               __FUNCTION__, arg);
1101                         spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1102                         return -EFAULT;
1103                 }
1104
1105                 ohci->ISO_channel_usage &= ~mask;
1106                 ohci->ir_legacy_channels &= ~mask;
1107
1108                 if (arg>31)
1109                         reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear,
1110                                   1<<(arg-32));
1111                 else
1112                         reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear,
1113                                   1<<arg);
1114
1115                 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1116                 DBGMSG("Listening disabled on channel %d", arg);
1117
1118                 if (ohci->ir_legacy_channels == 0) {
1119                         stop_dma_rcv_ctx(&ohci->ir_legacy_context);
1120                         free_dma_rcv_ctx(&ohci->ir_legacy_context);
1121                         DBGMSG("ISO receive legacy context deactivated");
1122                 }
1123                 break;
1124         }
1125         default:
1126                 PRINT_G(KERN_ERR, "ohci_devctl cmd %d not implemented yet",
1127                         cmd);
1128                 break;
1129         }
1130         return retval;
1131 }
1132
1133 /***********************************
1134  * rawiso ISO reception            *
1135  ***********************************/
1136
1137 /*
1138   We use either buffer-fill or packet-per-buffer DMA mode. The DMA
1139   buffer is split into "blocks" (regions described by one DMA
1140   descriptor). Each block must be one page or less in size, and
1141   must not cross a page boundary.
1142
1143   There is one little wrinkle with buffer-fill mode: a packet that
1144   starts in the final block may wrap around into the first block. But
1145   the user API expects all packets to be contiguous. Our solution is
1146   to keep the very last page of the DMA buffer in reserve - if a
1147   packet spans the gap, we copy its tail into this page.
1148 */
1149
1150 struct ohci_iso_recv {
1151         struct ti_ohci *ohci;
1152
1153         struct ohci1394_iso_tasklet task;
1154         int task_active;
1155
1156         enum { BUFFER_FILL_MODE = 0,
1157                PACKET_PER_BUFFER_MODE = 1 } dma_mode;
1158
1159         /* memory and PCI mapping for the DMA descriptors */
1160         struct dma_prog_region prog;
1161         struct dma_cmd *block; /* = (struct dma_cmd*) prog.virt */
1162
1163         /* how many DMA blocks fit in the buffer */
1164         unsigned int nblocks;
1165
1166         /* stride of DMA blocks */
1167         unsigned int buf_stride;
1168
1169         /* number of blocks to batch between interrupts */
1170         int block_irq_interval;
1171
1172         /* block that DMA will finish next */
1173         int block_dma;
1174
1175         /* (buffer-fill only) block that the reader will release next */
1176         int block_reader;
1177
1178         /* (buffer-fill only) bytes of buffer the reader has released,
1179            less than one block */
1180         int released_bytes;
1181
1182         /* (buffer-fill only) buffer offset at which the next packet will appear */
1183         int dma_offset;
1184
1185         /* OHCI DMA context control registers */
1186         u32 ContextControlSet;
1187         u32 ContextControlClear;
1188         u32 CommandPtr;
1189         u32 ContextMatch;
1190 };
1191
1192 static void ohci_iso_recv_task(unsigned long data);
1193 static void ohci_iso_recv_stop(struct hpsb_iso *iso);
1194 static void ohci_iso_recv_shutdown(struct hpsb_iso *iso);
1195 static int  ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync);
1196 static void ohci_iso_recv_program(struct hpsb_iso *iso);
1197
1198 static int ohci_iso_recv_init(struct hpsb_iso *iso)
1199 {
1200         struct ti_ohci *ohci = iso->host->hostdata;
1201         struct ohci_iso_recv *recv;
1202         int ctx;
1203         int ret = -ENOMEM;
1204
1205         recv = kmalloc(sizeof(*recv), SLAB_KERNEL);
1206         if (!recv)
1207                 return -ENOMEM;
1208
1209         iso->hostdata = recv;
1210         recv->ohci = ohci;
1211         recv->task_active = 0;
1212         dma_prog_region_init(&recv->prog);
1213         recv->block = NULL;
1214
1215         /* use buffer-fill mode, unless irq_interval is 1
1216            (note: multichannel requires buffer-fill) */
1217
1218         if (((iso->irq_interval == 1 && iso->dma_mode == HPSB_ISO_DMA_OLD_ABI) ||
1219              iso->dma_mode == HPSB_ISO_DMA_PACKET_PER_BUFFER) && iso->channel != -1) {
1220                 recv->dma_mode = PACKET_PER_BUFFER_MODE;
1221         } else {
1222                 recv->dma_mode = BUFFER_FILL_MODE;
1223         }
1224
1225         /* set nblocks, buf_stride, block_irq_interval */
1226
1227         if (recv->dma_mode == BUFFER_FILL_MODE) {
1228                 recv->buf_stride = PAGE_SIZE;
1229
1230                 /* one block per page of data in the DMA buffer, minus the final guard page */
1231                 recv->nblocks = iso->buf_size/PAGE_SIZE - 1;
1232                 if (recv->nblocks < 3) {
1233                         DBGMSG("ohci_iso_recv_init: DMA buffer too small");
1234                         goto err;
1235                 }
1236
1237                 /* iso->irq_interval is in packets - translate that to blocks */
1238                 if (iso->irq_interval == 1)
1239                         recv->block_irq_interval = 1;
1240                 else
1241                         recv->block_irq_interval = iso->irq_interval *
1242                                                         ((recv->nblocks+1)/iso->buf_packets);
1243                 if (recv->block_irq_interval*4 > recv->nblocks)
1244                         recv->block_irq_interval = recv->nblocks/4;
1245                 if (recv->block_irq_interval < 1)
1246                         recv->block_irq_interval = 1;
1247
1248         } else {
1249                 int max_packet_size;
1250
1251                 recv->nblocks = iso->buf_packets;
1252                 recv->block_irq_interval = iso->irq_interval;
1253                 if (recv->block_irq_interval * 4 > iso->buf_packets)
1254                         recv->block_irq_interval = iso->buf_packets / 4;
1255                 if (recv->block_irq_interval < 1)
1256                 recv->block_irq_interval = 1;
1257
1258                 /* choose a buffer stride */
1259                 /* must be a power of 2, and <= PAGE_SIZE */
1260
1261                 max_packet_size = iso->buf_size / iso->buf_packets;
1262
1263                 for (recv->buf_stride = 8; recv->buf_stride < max_packet_size;
1264                     recv->buf_stride *= 2);
1265
1266                 if (recv->buf_stride*iso->buf_packets > iso->buf_size ||
1267                    recv->buf_stride > PAGE_SIZE) {
1268                         /* this shouldn't happen, but anyway... */
1269                         DBGMSG("ohci_iso_recv_init: problem choosing a buffer stride");
1270                         goto err;
1271                 }
1272         }
1273
1274         recv->block_reader = 0;
1275         recv->released_bytes = 0;
1276         recv->block_dma = 0;
1277         recv->dma_offset = 0;
1278
1279         /* size of DMA program = one descriptor per block */
1280         if (dma_prog_region_alloc(&recv->prog,
1281                                  sizeof(struct dma_cmd) * recv->nblocks,
1282                                  recv->ohci->dev))
1283                 goto err;
1284
1285         recv->block = (struct dma_cmd*) recv->prog.kvirt;
1286
1287         ohci1394_init_iso_tasklet(&recv->task,
1288                                   iso->channel == -1 ? OHCI_ISO_MULTICHANNEL_RECEIVE :
1289                                                        OHCI_ISO_RECEIVE,
1290                                   ohci_iso_recv_task, (unsigned long) iso);
1291
1292         if (ohci1394_register_iso_tasklet(recv->ohci, &recv->task) < 0)
1293                 goto err;
1294
1295         recv->task_active = 1;
1296
1297         /* recv context registers are spaced 32 bytes apart */
1298         ctx = recv->task.context;
1299         recv->ContextControlSet = OHCI1394_IsoRcvContextControlSet + 32 * ctx;
1300         recv->ContextControlClear = OHCI1394_IsoRcvContextControlClear + 32 * ctx;
1301         recv->CommandPtr = OHCI1394_IsoRcvCommandPtr + 32 * ctx;
1302         recv->ContextMatch = OHCI1394_IsoRcvContextMatch + 32 * ctx;
1303
1304         if (iso->channel == -1) {
1305                 /* clear multi-channel selection mask */
1306                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, 0xFFFFFFFF);
1307                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, 0xFFFFFFFF);
1308         }
1309
1310         /* write the DMA program */
1311         ohci_iso_recv_program(iso);
1312
1313         DBGMSG("ohci_iso_recv_init: %s mode, DMA buffer is %lu pages"
1314                " (%u bytes), using %u blocks, buf_stride %u, block_irq_interval %d",
1315                recv->dma_mode == BUFFER_FILL_MODE ?
1316                "buffer-fill" : "packet-per-buffer",
1317                iso->buf_size/PAGE_SIZE, iso->buf_size,
1318                recv->nblocks, recv->buf_stride, recv->block_irq_interval);
1319
1320         return 0;
1321
1322 err:
1323         ohci_iso_recv_shutdown(iso);
1324         return ret;
1325 }
1326
1327 static void ohci_iso_recv_stop(struct hpsb_iso *iso)
1328 {
1329         struct ohci_iso_recv *recv = iso->hostdata;
1330
1331         /* disable interrupts */
1332         reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << recv->task.context);
1333
1334         /* halt DMA */
1335         ohci1394_stop_context(recv->ohci, recv->ContextControlClear, NULL);
1336 }
1337
1338 static void ohci_iso_recv_shutdown(struct hpsb_iso *iso)
1339 {
1340         struct ohci_iso_recv *recv = iso->hostdata;
1341
1342         if (recv->task_active) {
1343                 ohci_iso_recv_stop(iso);
1344                 ohci1394_unregister_iso_tasklet(recv->ohci, &recv->task);
1345                 recv->task_active = 0;
1346         }
1347
1348         dma_prog_region_free(&recv->prog);
1349         kfree(recv);
1350         iso->hostdata = NULL;
1351 }
1352
1353 /* set up a "gapped" ring buffer DMA program */
1354 static void ohci_iso_recv_program(struct hpsb_iso *iso)
1355 {
1356         struct ohci_iso_recv *recv = iso->hostdata;
1357         int blk;
1358
1359         /* address of 'branch' field in previous DMA descriptor */
1360         u32 *prev_branch = NULL;
1361
1362         for (blk = 0; blk < recv->nblocks; blk++) {
1363                 u32 control;
1364
1365                 /* the DMA descriptor */
1366                 struct dma_cmd *cmd = &recv->block[blk];
1367
1368                 /* offset of the DMA descriptor relative to the DMA prog buffer */
1369                 unsigned long prog_offset = blk * sizeof(struct dma_cmd);
1370
1371                 /* offset of this packet's data within the DMA buffer */
1372                 unsigned long buf_offset = blk * recv->buf_stride;
1373
1374                 if (recv->dma_mode == BUFFER_FILL_MODE) {
1375                         control = 2 << 28; /* INPUT_MORE */
1376                 } else {
1377                         control = 3 << 28; /* INPUT_LAST */
1378                 }
1379
1380                 control |= 8 << 24; /* s = 1, update xferStatus and resCount */
1381
1382                 /* interrupt on last block, and at intervals */
1383                 if (blk == recv->nblocks-1 || (blk % recv->block_irq_interval) == 0) {
1384                         control |= 3 << 20; /* want interrupt */
1385                 }
1386
1387                 control |= 3 << 18; /* enable branch to address */
1388                 control |= recv->buf_stride;
1389
1390                 cmd->control = cpu_to_le32(control);
1391                 cmd->address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, buf_offset));
1392                 cmd->branchAddress = 0; /* filled in on next loop */
1393                 cmd->status = cpu_to_le32(recv->buf_stride);
1394
1395                 /* link the previous descriptor to this one */
1396                 if (prev_branch) {
1397                         *prev_branch = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog, prog_offset) | 1);
1398                 }
1399
1400                 prev_branch = &cmd->branchAddress;
1401         }
1402
1403         /* the final descriptor's branch address and Z should be left at 0 */
1404 }
1405
1406 /* listen or unlisten to a specific channel (multi-channel mode only) */
1407 static void ohci_iso_recv_change_channel(struct hpsb_iso *iso, unsigned char channel, int listen)
1408 {
1409         struct ohci_iso_recv *recv = iso->hostdata;
1410         int reg, i;
1411
1412         if (channel < 32) {
1413                 reg = listen ? OHCI1394_IRMultiChanMaskLoSet : OHCI1394_IRMultiChanMaskLoClear;
1414                 i = channel;
1415         } else {
1416                 reg = listen ? OHCI1394_IRMultiChanMaskHiSet : OHCI1394_IRMultiChanMaskHiClear;
1417                 i = channel - 32;
1418         }
1419
1420         reg_write(recv->ohci, reg, (1 << i));
1421
1422         /* issue a dummy read to force all PCI writes to be posted immediately */
1423         mb();
1424         reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1425 }
1426
1427 static void ohci_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
1428 {
1429         struct ohci_iso_recv *recv = iso->hostdata;
1430         int i;
1431
1432         for (i = 0; i < 64; i++) {
1433                 if (mask & (1ULL << i)) {
1434                         if (i < 32)
1435                                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoSet, (1 << i));
1436                         else
1437                                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiSet, (1 << (i-32)));
1438                 } else {
1439                         if (i < 32)
1440                                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, (1 << i));
1441                         else
1442                                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, (1 << (i-32)));
1443                 }
1444         }
1445
1446         /* issue a dummy read to force all PCI writes to be posted immediately */
1447         mb();
1448         reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1449 }
1450
1451 static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
1452 {
1453         struct ohci_iso_recv *recv = iso->hostdata;
1454         struct ti_ohci *ohci = recv->ohci;
1455         u32 command, contextMatch;
1456
1457         reg_write(recv->ohci, recv->ContextControlClear, 0xFFFFFFFF);
1458         wmb();
1459
1460         /* always keep ISO headers */
1461         command = (1 << 30);
1462
1463         if (recv->dma_mode == BUFFER_FILL_MODE)
1464                 command |= (1 << 31);
1465
1466         reg_write(recv->ohci, recv->ContextControlSet, command);
1467
1468         /* match on specified tags */
1469         contextMatch = tag_mask << 28;
1470
1471         if (iso->channel == -1) {
1472                 /* enable multichannel reception */
1473                 reg_write(recv->ohci, recv->ContextControlSet, (1 << 28));
1474         } else {
1475                 /* listen on channel */
1476                 contextMatch |= iso->channel;
1477         }
1478
1479         if (cycle != -1) {
1480                 u32 seconds;
1481
1482                 /* enable cycleMatch */
1483                 reg_write(recv->ohci, recv->ContextControlSet, (1 << 29));
1484
1485                 /* set starting cycle */
1486                 cycle &= 0x1FFF;
1487
1488                 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
1489                    just snarf them from the current time */
1490                 seconds = reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
1491
1492                 /* advance one second to give some extra time for DMA to start */
1493                 seconds += 1;
1494
1495                 cycle |= (seconds & 3) << 13;
1496
1497                 contextMatch |= cycle << 12;
1498         }
1499
1500         if (sync != -1) {
1501                 /* set sync flag on first DMA descriptor */
1502                 struct dma_cmd *cmd = &recv->block[recv->block_dma];
1503                 cmd->control |= cpu_to_le32(DMA_CTL_WAIT);
1504
1505                 /* match sync field */
1506                 contextMatch |= (sync&0xf)<<8;
1507         }
1508
1509         reg_write(recv->ohci, recv->ContextMatch, contextMatch);
1510
1511         /* address of first descriptor block */
1512         command = dma_prog_region_offset_to_bus(&recv->prog,
1513                                                 recv->block_dma * sizeof(struct dma_cmd));
1514         command |= 1; /* Z=1 */
1515
1516         reg_write(recv->ohci, recv->CommandPtr, command);
1517
1518         /* enable interrupts */
1519         reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskSet, 1 << recv->task.context);
1520
1521         wmb();
1522
1523         /* run */
1524         reg_write(recv->ohci, recv->ContextControlSet, 0x8000);
1525
1526         /* issue a dummy read of the cycle timer register to force
1527            all PCI writes to be posted immediately */
1528         mb();
1529         reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1530
1531         /* check RUN */
1532         if (!(reg_read(recv->ohci, recv->ContextControlSet) & 0x8000)) {
1533                 PRINT(KERN_ERR,
1534                       "Error starting IR DMA (ContextControl 0x%08x)\n",
1535                       reg_read(recv->ohci, recv->ContextControlSet));
1536                 return -1;
1537         }
1538
1539         return 0;
1540 }
1541
1542 static void ohci_iso_recv_release_block(struct ohci_iso_recv *recv, int block)
1543 {
1544         /* re-use the DMA descriptor for the block */
1545         /* by linking the previous descriptor to it */
1546
1547         int next_i = block;
1548         int prev_i = (next_i == 0) ? (recv->nblocks - 1) : (next_i - 1);
1549
1550         struct dma_cmd *next = &recv->block[next_i];
1551         struct dma_cmd *prev = &recv->block[prev_i];
1552
1553         /* 'next' becomes the new end of the DMA chain,
1554            so disable branch and enable interrupt */
1555         next->branchAddress = 0;
1556         next->control |= cpu_to_le32(3 << 20);
1557         next->status = cpu_to_le32(recv->buf_stride);
1558
1559         /* link prev to next */
1560         prev->branchAddress = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog,
1561                                                                         sizeof(struct dma_cmd) * next_i)
1562                                           | 1); /* Z=1 */
1563
1564         /* disable interrupt on previous DMA descriptor, except at intervals */
1565         if ((prev_i % recv->block_irq_interval) == 0) {
1566                 prev->control |= cpu_to_le32(3 << 20); /* enable interrupt */
1567         } else {
1568                 prev->control &= cpu_to_le32(~(3<<20)); /* disable interrupt */
1569         }
1570         wmb();
1571
1572         /* wake up DMA in case it fell asleep */
1573         reg_write(recv->ohci, recv->ContextControlSet, (1 << 12));
1574 }
1575
1576 static void ohci_iso_recv_bufferfill_release(struct ohci_iso_recv *recv,
1577                                              struct hpsb_iso_packet_info *info)
1578 {
1579         int len;
1580
1581         /* release the memory where the packet was */
1582         len = info->len;
1583
1584         /* add the wasted space for padding to 4 bytes */
1585         if (len % 4)
1586                 len += 4 - (len % 4);
1587
1588         /* add 8 bytes for the OHCI DMA data format overhead */
1589         len += 8;
1590
1591         recv->released_bytes += len;
1592
1593         /* have we released enough memory for one block? */
1594         while (recv->released_bytes > recv->buf_stride) {
1595                 ohci_iso_recv_release_block(recv, recv->block_reader);
1596                 recv->block_reader = (recv->block_reader + 1) % recv->nblocks;
1597                 recv->released_bytes -= recv->buf_stride;
1598         }
1599 }
1600
1601 static inline void ohci_iso_recv_release(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
1602 {
1603         struct ohci_iso_recv *recv = iso->hostdata;
1604         if (recv->dma_mode == BUFFER_FILL_MODE) {
1605                 ohci_iso_recv_bufferfill_release(recv, info);
1606         } else {
1607                 ohci_iso_recv_release_block(recv, info - iso->infos);
1608         }
1609 }
1610
1611 /* parse all packets from blocks that have been fully received */
1612 static void ohci_iso_recv_bufferfill_parse(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1613 {
1614         int wake = 0;
1615         int runaway = 0;
1616         struct ti_ohci *ohci = recv->ohci;
1617
1618         while (1) {
1619                 /* we expect the next parsable packet to begin at recv->dma_offset */
1620                 /* note: packet layout is as shown in section 10.6.1.1 of the OHCI spec */
1621
1622                 unsigned int offset;
1623                 unsigned short len, cycle;
1624                 unsigned char channel, tag, sy;
1625
1626                 unsigned char *p = iso->data_buf.kvirt;
1627
1628                 unsigned int this_block = recv->dma_offset/recv->buf_stride;
1629
1630                 /* don't loop indefinitely */
1631                 if (runaway++ > 100000) {
1632                         atomic_inc(&iso->overflows);
1633                         PRINT(KERN_ERR,
1634                               "IR DMA error - Runaway during buffer parsing!\n");
1635                         break;
1636                 }
1637
1638                 /* stop parsing once we arrive at block_dma (i.e. don't get ahead of DMA) */
1639                 if (this_block == recv->block_dma)
1640                         break;
1641
1642                 wake = 1;
1643
1644                 /* parse data length, tag, channel, and sy */
1645
1646                 /* note: we keep our own local copies of 'len' and 'offset'
1647                    so the user can't mess with them by poking in the mmap area */
1648
1649                 len = p[recv->dma_offset+2] | (p[recv->dma_offset+3] << 8);
1650
1651                 if (len > 4096) {
1652                         PRINT(KERN_ERR,
1653                               "IR DMA error - bogus 'len' value %u\n", len);
1654                 }
1655
1656                 channel = p[recv->dma_offset+1] & 0x3F;
1657                 tag = p[recv->dma_offset+1] >> 6;
1658                 sy = p[recv->dma_offset+0] & 0xF;
1659
1660                 /* advance to data payload */
1661                 recv->dma_offset += 4;
1662
1663                 /* check for wrap-around */
1664                 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1665                         recv->dma_offset -= recv->buf_stride*recv->nblocks;
1666                 }
1667
1668                 /* dma_offset now points to the first byte of the data payload */
1669                 offset = recv->dma_offset;
1670
1671                 /* advance to xferStatus/timeStamp */
1672                 recv->dma_offset += len;
1673
1674                 /* payload is padded to 4 bytes */
1675                 if (len % 4) {
1676                         recv->dma_offset += 4 - (len%4);
1677                 }
1678
1679                 /* check for wrap-around */
1680                 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1681                         /* uh oh, the packet data wraps from the last
1682                            to the first DMA block - make the packet
1683                            contiguous by copying its "tail" into the
1684                            guard page */
1685
1686                         int guard_off = recv->buf_stride*recv->nblocks;
1687                         int tail_len = len - (guard_off - offset);
1688
1689                         if (tail_len > 0  && tail_len < recv->buf_stride) {
1690                                 memcpy(iso->data_buf.kvirt + guard_off,
1691                                        iso->data_buf.kvirt,
1692                                        tail_len);
1693                         }
1694
1695                         recv->dma_offset -= recv->buf_stride*recv->nblocks;
1696                 }
1697
1698                 /* parse timestamp */
1699                 cycle = p[recv->dma_offset+0] | (p[recv->dma_offset+1]<<8);
1700                 cycle &= 0x1FFF;
1701
1702                 /* advance to next packet */
1703                 recv->dma_offset += 4;
1704
1705                 /* check for wrap-around */
1706                 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1707                         recv->dma_offset -= recv->buf_stride*recv->nblocks;
1708                 }
1709
1710                 hpsb_iso_packet_received(iso, offset, len, cycle, channel, tag, sy);
1711         }
1712
1713         if (wake)
1714                 hpsb_iso_wake(iso);
1715 }
1716
1717 static void ohci_iso_recv_bufferfill_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1718 {
1719         int loop;
1720         struct ti_ohci *ohci = recv->ohci;
1721
1722         /* loop over all blocks */
1723         for (loop = 0; loop < recv->nblocks; loop++) {
1724
1725                 /* check block_dma to see if it's done */
1726                 struct dma_cmd *im = &recv->block[recv->block_dma];
1727
1728                 /* check the DMA descriptor for new writes to xferStatus */
1729                 u16 xferstatus = le32_to_cpu(im->status) >> 16;
1730
1731                 /* rescount is the number of bytes *remaining to be written* in the block */
1732                 u16 rescount = le32_to_cpu(im->status) & 0xFFFF;
1733
1734                 unsigned char event = xferstatus & 0x1F;
1735
1736                 if (!event) {
1737                         /* nothing has happened to this block yet */
1738                         break;
1739                 }
1740
1741                 if (event != 0x11) {
1742                         atomic_inc(&iso->overflows);
1743                         PRINT(KERN_ERR,
1744                               "IR DMA error - OHCI error code 0x%02x\n", event);
1745                 }
1746
1747                 if (rescount != 0) {
1748                         /* the card is still writing to this block;
1749                            we can't touch it until it's done */
1750                         break;
1751                 }
1752
1753                 /* OK, the block is finished... */
1754
1755                 /* sync our view of the block */
1756                 dma_region_sync_for_cpu(&iso->data_buf, recv->block_dma*recv->buf_stride, recv->buf_stride);
1757
1758                 /* reset the DMA descriptor */
1759                 im->status = recv->buf_stride;
1760
1761                 /* advance block_dma */
1762                 recv->block_dma = (recv->block_dma + 1) % recv->nblocks;
1763
1764                 if ((recv->block_dma+1) % recv->nblocks == recv->block_reader) {
1765                         atomic_inc(&iso->overflows);
1766                         DBGMSG("ISO reception overflow - "
1767                                "ran out of DMA blocks");
1768                 }
1769         }
1770
1771         /* parse any packets that have arrived */
1772         ohci_iso_recv_bufferfill_parse(iso, recv);
1773 }
1774
1775 static void ohci_iso_recv_packetperbuf_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1776 {
1777         int count;
1778         int wake = 0;
1779         struct ti_ohci *ohci = recv->ohci;
1780
1781         /* loop over the entire buffer */
1782         for (count = 0; count < recv->nblocks; count++) {
1783                 u32 packet_len = 0;
1784
1785                 /* pointer to the DMA descriptor */
1786                 struct dma_cmd *il = ((struct dma_cmd*) recv->prog.kvirt) + iso->pkt_dma;
1787
1788                 /* check the DMA descriptor for new writes to xferStatus */
1789                 u16 xferstatus = le32_to_cpu(il->status) >> 16;
1790                 u16 rescount = le32_to_cpu(il->status) & 0xFFFF;
1791
1792                 unsigned char event = xferstatus & 0x1F;
1793
1794                 if (!event) {
1795                         /* this packet hasn't come in yet; we are done for now */
1796                         goto out;
1797                 }
1798
1799                 if (event == 0x11) {
1800                         /* packet received successfully! */
1801
1802                         /* rescount is the number of bytes *remaining* in the packet buffer,
1803                            after the packet was written */
1804                         packet_len = recv->buf_stride - rescount;
1805
1806                 } else if (event == 0x02) {
1807                         PRINT(KERN_ERR, "IR DMA error - packet too long for buffer\n");
1808                 } else if (event) {
1809                         PRINT(KERN_ERR, "IR DMA error - OHCI error code 0x%02x\n", event);
1810                 }
1811
1812                 /* sync our view of the buffer */
1813                 dma_region_sync_for_cpu(&iso->data_buf, iso->pkt_dma * recv->buf_stride, recv->buf_stride);
1814
1815                 /* record the per-packet info */
1816                 {
1817                         /* iso header is 8 bytes ahead of the data payload */
1818                         unsigned char *hdr;
1819
1820                         unsigned int offset;
1821                         unsigned short cycle;
1822                         unsigned char channel, tag, sy;
1823
1824                         offset = iso->pkt_dma * recv->buf_stride;
1825                         hdr = iso->data_buf.kvirt + offset;
1826
1827                         /* skip iso header */
1828                         offset += 8;
1829                         packet_len -= 8;
1830
1831                         cycle = (hdr[0] | (hdr[1] << 8)) & 0x1FFF;
1832                         channel = hdr[5] & 0x3F;
1833                         tag = hdr[5] >> 6;
1834                         sy = hdr[4] & 0xF;
1835
1836                         hpsb_iso_packet_received(iso, offset, packet_len, cycle, channel, tag, sy);
1837                 }
1838
1839                 /* reset the DMA descriptor */
1840                 il->status = recv->buf_stride;
1841
1842                 wake = 1;
1843                 recv->block_dma = iso->pkt_dma;
1844         }
1845
1846 out:
1847         if (wake)
1848                 hpsb_iso_wake(iso);
1849 }
1850
1851 static void ohci_iso_recv_task(unsigned long data)
1852 {
1853         struct hpsb_iso *iso = (struct hpsb_iso*) data;
1854         struct ohci_iso_recv *recv = iso->hostdata;
1855
1856         if (recv->dma_mode == BUFFER_FILL_MODE)
1857                 ohci_iso_recv_bufferfill_task(iso, recv);
1858         else
1859                 ohci_iso_recv_packetperbuf_task(iso, recv);
1860 }
1861
1862 /***********************************
1863  * rawiso ISO transmission         *
1864  ***********************************/
1865
1866 struct ohci_iso_xmit {
1867         struct ti_ohci *ohci;
1868         struct dma_prog_region prog;
1869         struct ohci1394_iso_tasklet task;
1870         int task_active;
1871
1872         u32 ContextControlSet;
1873         u32 ContextControlClear;
1874         u32 CommandPtr;
1875 };
1876
1877 /* transmission DMA program:
1878    one OUTPUT_MORE_IMMEDIATE for the IT header
1879    one OUTPUT_LAST for the buffer data */
1880
1881 struct iso_xmit_cmd {
1882         struct dma_cmd output_more_immediate;
1883         u8 iso_hdr[8];
1884         u32 unused[2];
1885         struct dma_cmd output_last;
1886 };
1887
1888 static int ohci_iso_xmit_init(struct hpsb_iso *iso);
1889 static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle);
1890 static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso);
1891 static void ohci_iso_xmit_task(unsigned long data);
1892
1893 static int ohci_iso_xmit_init(struct hpsb_iso *iso)
1894 {
1895         struct ohci_iso_xmit *xmit;
1896         unsigned int prog_size;
1897         int ctx;
1898         int ret = -ENOMEM;
1899
1900         xmit = kmalloc(sizeof(*xmit), SLAB_KERNEL);
1901         if (!xmit)
1902                 return -ENOMEM;
1903
1904         iso->hostdata = xmit;
1905         xmit->ohci = iso->host->hostdata;
1906         xmit->task_active = 0;
1907
1908         dma_prog_region_init(&xmit->prog);
1909
1910         prog_size = sizeof(struct iso_xmit_cmd) * iso->buf_packets;
1911
1912         if (dma_prog_region_alloc(&xmit->prog, prog_size, xmit->ohci->dev))
1913                 goto err;
1914
1915         ohci1394_init_iso_tasklet(&xmit->task, OHCI_ISO_TRANSMIT,
1916                                   ohci_iso_xmit_task, (unsigned long) iso);
1917
1918         if (ohci1394_register_iso_tasklet(xmit->ohci, &xmit->task) < 0)
1919                 goto err;
1920
1921         xmit->task_active = 1;
1922
1923         /* xmit context registers are spaced 16 bytes apart */
1924         ctx = xmit->task.context;
1925         xmit->ContextControlSet = OHCI1394_IsoXmitContextControlSet + 16 * ctx;
1926         xmit->ContextControlClear = OHCI1394_IsoXmitContextControlClear + 16 * ctx;
1927         xmit->CommandPtr = OHCI1394_IsoXmitCommandPtr + 16 * ctx;
1928
1929         return 0;
1930
1931 err:
1932         ohci_iso_xmit_shutdown(iso);
1933         return ret;
1934 }
1935
1936 static void ohci_iso_xmit_stop(struct hpsb_iso *iso)
1937 {
1938         struct ohci_iso_xmit *xmit = iso->hostdata;
1939         struct ti_ohci *ohci = xmit->ohci;
1940
1941         /* disable interrupts */
1942         reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskClear, 1 << xmit->task.context);
1943
1944         /* halt DMA */
1945         if (ohci1394_stop_context(xmit->ohci, xmit->ContextControlClear, NULL)) {
1946                 /* XXX the DMA context will lock up if you try to send too much data! */
1947                 PRINT(KERN_ERR,
1948                       "you probably exceeded the OHCI card's bandwidth limit - "
1949                       "reload the module and reduce xmit bandwidth");
1950         }
1951 }
1952
1953 static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso)
1954 {
1955         struct ohci_iso_xmit *xmit = iso->hostdata;
1956
1957         if (xmit->task_active) {
1958                 ohci_iso_xmit_stop(iso);
1959                 ohci1394_unregister_iso_tasklet(xmit->ohci, &xmit->task);
1960                 xmit->task_active = 0;
1961         }
1962
1963         dma_prog_region_free(&xmit->prog);
1964         kfree(xmit);
1965         iso->hostdata = NULL;
1966 }
1967
1968 static void ohci_iso_xmit_task(unsigned long data)
1969 {
1970         struct hpsb_iso *iso = (struct hpsb_iso*) data;
1971         struct ohci_iso_xmit *xmit = iso->hostdata;
1972         struct ti_ohci *ohci = xmit->ohci;
1973         int wake = 0;
1974         int count;
1975
1976         /* check the whole buffer if necessary, starting at pkt_dma */
1977         for (count = 0; count < iso->buf_packets; count++) {
1978                 int cycle;
1979
1980                 /* DMA descriptor */
1981                 struct iso_xmit_cmd *cmd = dma_region_i(&xmit->prog, struct iso_xmit_cmd, iso->pkt_dma);
1982
1983                 /* check for new writes to xferStatus */
1984                 u16 xferstatus = le32_to_cpu(cmd->output_last.status) >> 16;
1985                 u8  event = xferstatus & 0x1F;
1986
1987                 if (!event) {
1988                         /* packet hasn't been sent yet; we are done for now */
1989                         break;
1990                 }
1991
1992                 if (event != 0x11)
1993                         PRINT(KERN_ERR,
1994                               "IT DMA error - OHCI error code 0x%02x\n", event);
1995
1996                 /* at least one packet went out, so wake up the writer */
1997                 wake = 1;
1998
1999                 /* parse cycle */
2000                 cycle = le32_to_cpu(cmd->output_last.status) & 0x1FFF;
2001
2002                 /* tell the subsystem the packet has gone out */
2003                 hpsb_iso_packet_sent(iso, cycle, event != 0x11);
2004
2005                 /* reset the DMA descriptor for next time */
2006                 cmd->output_last.status = 0;
2007         }
2008
2009         if (wake)
2010                 hpsb_iso_wake(iso);
2011 }
2012
2013 static int ohci_iso_xmit_queue(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
2014 {
2015         struct ohci_iso_xmit *xmit = iso->hostdata;
2016         struct ti_ohci *ohci = xmit->ohci;
2017
2018         int next_i, prev_i;
2019         struct iso_xmit_cmd *next, *prev;
2020
2021         unsigned int offset;
2022         unsigned short len;
2023         unsigned char tag, sy;
2024
2025         /* check that the packet doesn't cross a page boundary
2026            (we could allow this if we added OUTPUT_MORE descriptor support) */
2027         if (cross_bound(info->offset, info->len)) {
2028                 PRINT(KERN_ERR,
2029                       "rawiso xmit: packet %u crosses a page boundary",
2030                       iso->first_packet);
2031                 return -EINVAL;
2032         }
2033
2034         offset = info->offset;
2035         len = info->len;
2036         tag = info->tag;
2037         sy = info->sy;
2038
2039         /* sync up the card's view of the buffer */
2040         dma_region_sync_for_device(&iso->data_buf, offset, len);
2041
2042         /* append first_packet to the DMA chain */
2043         /* by linking the previous descriptor to it */
2044         /* (next will become the new end of the DMA chain) */
2045
2046         next_i = iso->first_packet;
2047         prev_i = (next_i == 0) ? (iso->buf_packets - 1) : (next_i - 1);
2048
2049         next = dma_region_i(&xmit->prog, struct iso_xmit_cmd, next_i);
2050         prev = dma_region_i(&xmit->prog, struct iso_xmit_cmd, prev_i);
2051
2052         /* set up the OUTPUT_MORE_IMMEDIATE descriptor */
2053         memset(next, 0, sizeof(struct iso_xmit_cmd));
2054         next->output_more_immediate.control = cpu_to_le32(0x02000008);
2055
2056         /* ISO packet header is embedded in the OUTPUT_MORE_IMMEDIATE */
2057
2058         /* tcode = 0xA, and sy */
2059         next->iso_hdr[0] = 0xA0 | (sy & 0xF);
2060
2061         /* tag and channel number */
2062         next->iso_hdr[1] = (tag << 6) | (iso->channel & 0x3F);
2063
2064         /* transmission speed */
2065         next->iso_hdr[2] = iso->speed & 0x7;
2066
2067         /* payload size */
2068         next->iso_hdr[6] = len & 0xFF;
2069         next->iso_hdr[7] = len >> 8;
2070
2071         /* set up the OUTPUT_LAST */
2072         next->output_last.control = cpu_to_le32(1 << 28);
2073         next->output_last.control |= cpu_to_le32(1 << 27); /* update timeStamp */
2074         next->output_last.control |= cpu_to_le32(3 << 20); /* want interrupt */
2075         next->output_last.control |= cpu_to_le32(3 << 18); /* enable branch */
2076         next->output_last.control |= cpu_to_le32(len);
2077
2078         /* payload bus address */
2079         next->output_last.address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, offset));
2080
2081         /* leave branchAddress at zero for now */
2082
2083         /* re-write the previous DMA descriptor to chain to this one */
2084
2085         /* set prev branch address to point to next (Z=3) */
2086         prev->output_last.branchAddress = cpu_to_le32(
2087                 dma_prog_region_offset_to_bus(&xmit->prog, sizeof(struct iso_xmit_cmd) * next_i) | 3);
2088
2089         /* disable interrupt, unless required by the IRQ interval */
2090         if (prev_i % iso->irq_interval) {
2091                 prev->output_last.control &= cpu_to_le32(~(3 << 20)); /* no interrupt */
2092         } else {
2093                 prev->output_last.control |= cpu_to_le32(3 << 20); /* enable interrupt */
2094         }
2095
2096         wmb();
2097
2098         /* wake DMA in case it is sleeping */
2099         reg_write(xmit->ohci, xmit->ContextControlSet, 1 << 12);
2100
2101         /* issue a dummy read of the cycle timer to force all PCI
2102            writes to be posted immediately */
2103         mb();
2104         reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer);
2105
2106         return 0;
2107 }
2108
2109 static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle)
2110 {
2111         struct ohci_iso_xmit *xmit = iso->hostdata;
2112         struct ti_ohci *ohci = xmit->ohci;
2113
2114         /* clear out the control register */
2115         reg_write(xmit->ohci, xmit->ContextControlClear, 0xFFFFFFFF);
2116         wmb();
2117
2118         /* address and length of first descriptor block (Z=3) */
2119         reg_write(xmit->ohci, xmit->CommandPtr,
2120                   dma_prog_region_offset_to_bus(&xmit->prog, iso->pkt_dma * sizeof(struct iso_xmit_cmd)) | 3);
2121
2122         /* cycle match */
2123         if (cycle != -1) {
2124                 u32 start = cycle & 0x1FFF;
2125
2126                 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
2127                    just snarf them from the current time */
2128                 u32 seconds = reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
2129
2130                 /* advance one second to give some extra time for DMA to start */
2131                 seconds += 1;
2132
2133                 start |= (seconds & 3) << 13;
2134
2135                 reg_write(xmit->ohci, xmit->ContextControlSet, 0x80000000 | (start << 16));
2136         }
2137
2138         /* enable interrupts */
2139         reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskSet, 1 << xmit->task.context);
2140
2141         /* run */
2142         reg_write(xmit->ohci, xmit->ContextControlSet, 0x8000);
2143         mb();
2144
2145         /* wait 100 usec to give the card time to go active */
2146         udelay(100);
2147
2148         /* check the RUN bit */
2149         if (!(reg_read(xmit->ohci, xmit->ContextControlSet) & 0x8000)) {
2150                 PRINT(KERN_ERR, "Error starting IT DMA (ContextControl 0x%08x)\n",
2151                       reg_read(xmit->ohci, xmit->ContextControlSet));
2152                 return -1;
2153         }
2154
2155         return 0;
2156 }
2157
2158 static int ohci_isoctl(struct hpsb_iso *iso, enum isoctl_cmd cmd, unsigned long arg)
2159 {
2160
2161         switch(cmd) {
2162         case XMIT_INIT:
2163                 return ohci_iso_xmit_init(iso);
2164         case XMIT_START:
2165                 return ohci_iso_xmit_start(iso, arg);
2166         case XMIT_STOP:
2167                 ohci_iso_xmit_stop(iso);
2168                 return 0;
2169         case XMIT_QUEUE:
2170                 return ohci_iso_xmit_queue(iso, (struct hpsb_iso_packet_info*) arg);
2171         case XMIT_SHUTDOWN:
2172                 ohci_iso_xmit_shutdown(iso);
2173                 return 0;
2174
2175         case RECV_INIT:
2176                 return ohci_iso_recv_init(iso);
2177         case RECV_START: {
2178                 int *args = (int*) arg;
2179                 return ohci_iso_recv_start(iso, args[0], args[1], args[2]);
2180         }
2181         case RECV_STOP:
2182                 ohci_iso_recv_stop(iso);
2183                 return 0;
2184         case RECV_RELEASE:
2185                 ohci_iso_recv_release(iso, (struct hpsb_iso_packet_info*) arg);
2186                 return 0;
2187         case RECV_FLUSH:
2188                 ohci_iso_recv_task((unsigned long) iso);
2189                 return 0;
2190         case RECV_SHUTDOWN:
2191                 ohci_iso_recv_shutdown(iso);
2192                 return 0;
2193         case RECV_LISTEN_CHANNEL:
2194                 ohci_iso_recv_change_channel(iso, arg, 1);
2195                 return 0;
2196         case RECV_UNLISTEN_CHANNEL:
2197                 ohci_iso_recv_change_channel(iso, arg, 0);
2198                 return 0;
2199         case RECV_SET_CHANNEL_MASK:
2200                 ohci_iso_recv_set_channel_mask(iso, *((u64*) arg));
2201                 return 0;
2202
2203         default:
2204                 PRINT_G(KERN_ERR, "ohci_isoctl cmd %d not implemented yet",
2205                         cmd);
2206                 break;
2207         }
2208         return -EINVAL;
2209 }
2210
2211 /***************************************
2212  * IEEE-1394 functionality section END *
2213  ***************************************/
2214
2215
2216 /********************************************************
2217  * Global stuff (interrupt handler, init/shutdown code) *
2218  ********************************************************/
2219
2220 static void dma_trm_reset(struct dma_trm_ctx *d)
2221 {
2222         unsigned long flags;
2223         LIST_HEAD(packet_list);
2224         struct ti_ohci *ohci = d->ohci;
2225         struct hpsb_packet *packet, *ptmp;
2226
2227         ohci1394_stop_context(ohci, d->ctrlClear, NULL);
2228
2229         /* Lock the context, reset it and release it. Move the packets
2230          * that were pending in the context to packet_list and free
2231          * them after releasing the lock. */
2232
2233         spin_lock_irqsave(&d->lock, flags);
2234
2235         list_splice(&d->fifo_list, &packet_list);
2236         list_splice(&d->pending_list, &packet_list);
2237         INIT_LIST_HEAD(&d->fifo_list);
2238         INIT_LIST_HEAD(&d->pending_list);
2239
2240         d->branchAddrPtr = NULL;
2241         d->sent_ind = d->prg_ind;
2242         d->free_prgs = d->num_desc;
2243
2244         spin_unlock_irqrestore(&d->lock, flags);
2245
2246         if (list_empty(&packet_list))
2247                 return;
2248
2249         PRINT(KERN_INFO, "AT dma reset ctx=%d, aborting transmission", d->ctx);
2250
2251         /* Now process subsystem callbacks for the packets from this
2252          * context. */
2253         list_for_each_entry_safe(packet, ptmp, &packet_list, driver_list) {
2254                 list_del_init(&packet->driver_list);
2255                 hpsb_packet_sent(ohci->host, packet, ACKX_ABORTED);
2256         }
2257 }
2258
2259 static void ohci_schedule_iso_tasklets(struct ti_ohci *ohci,
2260                                        quadlet_t rx_event,
2261                                        quadlet_t tx_event)
2262 {
2263         struct ohci1394_iso_tasklet *t;
2264         unsigned long mask;
2265
2266         spin_lock(&ohci->iso_tasklet_list_lock);
2267
2268         list_for_each_entry(t, &ohci->iso_tasklet_list, link) {
2269                 mask = 1 << t->context;
2270
2271                 if (t->type == OHCI_ISO_TRANSMIT && tx_event & mask)
2272                         tasklet_schedule(&t->tasklet);
2273                 else if (rx_event & mask)
2274                         tasklet_schedule(&t->tasklet);
2275         }
2276
2277         spin_unlock(&ohci->iso_tasklet_list_lock);
2278
2279 }
2280
2281 static irqreturn_t ohci_irq_handler(int irq, void *dev_id,
2282                              struct pt_regs *regs_are_unused)
2283 {
2284         quadlet_t event, node_id;
2285         struct ti_ohci *ohci = (struct ti_ohci *)dev_id;
2286         struct hpsb_host *host = ohci->host;
2287         int phyid = -1, isroot = 0;
2288         unsigned long flags;
2289
2290         /* Read and clear the interrupt event register.  Don't clear
2291          * the busReset event, though. This is done when we get the
2292          * selfIDComplete interrupt. */
2293         spin_lock_irqsave(&ohci->event_lock, flags);
2294         event = reg_read(ohci, OHCI1394_IntEventClear);
2295         reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
2296         spin_unlock_irqrestore(&ohci->event_lock, flags);
2297
2298         if (!event)
2299                 return IRQ_NONE;
2300
2301         /* If event is ~(u32)0 cardbus card was ejected.  In this case
2302          * we just return, and clean up in the ohci1394_pci_remove
2303          * function. */
2304         if (event == ~(u32) 0) {
2305                 DBGMSG("Device removed.");
2306                 return IRQ_NONE;
2307         }
2308
2309         DBGMSG("IntEvent: %08x", event);
2310
2311         if (event & OHCI1394_unrecoverableError) {
2312                 int ctx;
2313                 PRINT(KERN_ERR, "Unrecoverable error!");
2314
2315                 if (reg_read(ohci, OHCI1394_AsReqTrContextControlSet) & 0x800)
2316                         PRINT(KERN_ERR, "Async Req Tx Context died: "
2317                                 "ctrl[%08x] cmdptr[%08x]",
2318                                 reg_read(ohci, OHCI1394_AsReqTrContextControlSet),
2319                                 reg_read(ohci, OHCI1394_AsReqTrCommandPtr));
2320
2321                 if (reg_read(ohci, OHCI1394_AsRspTrContextControlSet) & 0x800)
2322                         PRINT(KERN_ERR, "Async Rsp Tx Context died: "
2323                                 "ctrl[%08x] cmdptr[%08x]",
2324                                 reg_read(ohci, OHCI1394_AsRspTrContextControlSet),
2325                                 reg_read(ohci, OHCI1394_AsRspTrCommandPtr));
2326
2327                 if (reg_read(ohci, OHCI1394_AsReqRcvContextControlSet) & 0x800)
2328                         PRINT(KERN_ERR, "Async Req Rcv Context died: "
2329                                 "ctrl[%08x] cmdptr[%08x]",
2330                                 reg_read(ohci, OHCI1394_AsReqRcvContextControlSet),
2331                                 reg_read(ohci, OHCI1394_AsReqRcvCommandPtr));
2332
2333                 if (reg_read(ohci, OHCI1394_AsRspRcvContextControlSet) & 0x800)
2334                         PRINT(KERN_ERR, "Async Rsp Rcv Context died: "
2335                                 "ctrl[%08x] cmdptr[%08x]",
2336                                 reg_read(ohci, OHCI1394_AsRspRcvContextControlSet),
2337                                 reg_read(ohci, OHCI1394_AsRspRcvCommandPtr));
2338
2339                 for (ctx = 0; ctx < ohci->nb_iso_xmit_ctx; ctx++) {
2340                         if (reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)) & 0x800)
2341                                 PRINT(KERN_ERR, "Iso Xmit %d Context died: "
2342                                         "ctrl[%08x] cmdptr[%08x]", ctx,
2343                                         reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)),
2344                                         reg_read(ohci, OHCI1394_IsoXmitCommandPtr + (16 * ctx)));
2345                 }
2346
2347                 for (ctx = 0; ctx < ohci->nb_iso_rcv_ctx; ctx++) {
2348                         if (reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)) & 0x800)
2349                                 PRINT(KERN_ERR, "Iso Recv %d Context died: "
2350                                         "ctrl[%08x] cmdptr[%08x] match[%08x]", ctx,
2351                                         reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)),
2352                                         reg_read(ohci, OHCI1394_IsoRcvCommandPtr + (32 * ctx)),
2353                                         reg_read(ohci, OHCI1394_IsoRcvContextMatch + (32 * ctx)));
2354                 }
2355
2356                 event &= ~OHCI1394_unrecoverableError;
2357         }
2358
2359         if (event & OHCI1394_cycleInconsistent) {
2360                 /* We subscribe to the cycleInconsistent event only to
2361                  * clear the corresponding event bit... otherwise,
2362                  * isochronous cycleMatch DMA won't work. */
2363                 DBGMSG("OHCI1394_cycleInconsistent");
2364                 event &= ~OHCI1394_cycleInconsistent;
2365         }
2366
2367         if (event & OHCI1394_busReset) {
2368                 /* The busReset event bit can't be cleared during the
2369                  * selfID phase, so we disable busReset interrupts, to
2370                  * avoid burying the cpu in interrupt requests. */
2371                 spin_lock_irqsave(&ohci->event_lock, flags);
2372                 reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
2373
2374                 if (ohci->check_busreset) {
2375                         int loop_count = 0;
2376
2377                         udelay(10);
2378
2379                         while (reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
2380                                 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2381
2382                                 spin_unlock_irqrestore(&ohci->event_lock, flags);
2383                                 udelay(10);
2384                                 spin_lock_irqsave(&ohci->event_lock, flags);
2385
2386                                 /* The loop counter check is to prevent the driver
2387                                  * from remaining in this state forever. For the
2388                                  * initial bus reset, the loop continues for ever
2389                                  * and the system hangs, until some device is plugged-in
2390                                  * or out manually into a port! The forced reset seems
2391                                  * to solve this problem. This mainly effects nForce2. */
2392                                 if (loop_count > 10000) {
2393                                         ohci_devctl(host, RESET_BUS, LONG_RESET);
2394                                         DBGMSG("Detected bus-reset loop. Forced a bus reset!");
2395                                         loop_count = 0;
2396                                 }
2397
2398                                 loop_count++;
2399                         }
2400                 }
2401                 spin_unlock_irqrestore(&ohci->event_lock, flags);
2402                 if (!host->in_bus_reset) {
2403                         DBGMSG("irq_handler: Bus reset requested");
2404
2405                         /* Subsystem call */
2406                         hpsb_bus_reset(ohci->host);
2407                 }
2408                 event &= ~OHCI1394_busReset;
2409         }
2410
2411         if (event & OHCI1394_reqTxComplete) {
2412                 struct dma_trm_ctx *d = &ohci->at_req_context;
2413                 DBGMSG("Got reqTxComplete interrupt "
2414                        "status=0x%08X", reg_read(ohci, d->ctrlSet));
2415                 if (reg_read(ohci, d->ctrlSet) & 0x800)
2416                         ohci1394_stop_context(ohci, d->ctrlClear,
2417                                               "reqTxComplete");
2418                 else
2419                         dma_trm_tasklet((unsigned long)d);
2420                         //tasklet_schedule(&d->task);
2421                 event &= ~OHCI1394_reqTxComplete;
2422         }
2423         if (event & OHCI1394_respTxComplete) {
2424                 struct dma_trm_ctx *d = &ohci->at_resp_context;
2425                 DBGMSG("Got respTxComplete interrupt "
2426                        "status=0x%08X", reg_read(ohci, d->ctrlSet));
2427                 if (reg_read(ohci, d->ctrlSet) & 0x800)
2428                         ohci1394_stop_context(ohci, d->ctrlClear,
2429                                               "respTxComplete");
2430                 else
2431                         tasklet_schedule(&d->task);
2432                 event &= ~OHCI1394_respTxComplete;
2433         }
2434         if (event & OHCI1394_RQPkt) {
2435                 struct dma_rcv_ctx *d = &ohci->ar_req_context;
2436                 DBGMSG("Got RQPkt interrupt status=0x%08X",
2437                        reg_read(ohci, d->ctrlSet));
2438                 if (reg_read(ohci, d->ctrlSet) & 0x800)
2439                         ohci1394_stop_context(ohci, d->ctrlClear, "RQPkt");
2440                 else
2441                         tasklet_schedule(&d->task);
2442                 event &= ~OHCI1394_RQPkt;
2443         }
2444         if (event & OHCI1394_RSPkt) {
2445                 struct dma_rcv_ctx *d = &ohci->ar_resp_context;
2446                 DBGMSG("Got RSPkt interrupt status=0x%08X",
2447                        reg_read(ohci, d->ctrlSet));
2448                 if (reg_read(ohci, d->ctrlSet) & 0x800)
2449                         ohci1394_stop_context(ohci, d->ctrlClear, "RSPkt");
2450                 else
2451                         tasklet_schedule(&d->task);
2452                 event &= ~OHCI1394_RSPkt;
2453         }
2454         if (event & OHCI1394_isochRx) {
2455                 quadlet_t rx_event;
2456
2457                 rx_event = reg_read(ohci, OHCI1394_IsoRecvIntEventSet);
2458                 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, rx_event);
2459                 ohci_schedule_iso_tasklets(ohci, rx_event, 0);
2460                 event &= ~OHCI1394_isochRx;
2461         }
2462         if (event & OHCI1394_isochTx) {
2463                 quadlet_t tx_event;
2464
2465                 tx_event = reg_read(ohci, OHCI1394_IsoXmitIntEventSet);
2466                 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, tx_event);
2467                 ohci_schedule_iso_tasklets(ohci, 0, tx_event);
2468                 event &= ~OHCI1394_isochTx;
2469         }
2470         if (event & OHCI1394_selfIDComplete) {
2471                 if (host->in_bus_reset) {
2472                         node_id = reg_read(ohci, OHCI1394_NodeID);
2473
2474                         if (!(node_id & 0x80000000)) {
2475                                 PRINT(KERN_ERR,
2476                                       "SelfID received, but NodeID invalid "
2477                                       "(probably new bus reset occurred): %08X",
2478                                       node_id);
2479                                 goto selfid_not_valid;
2480                         }
2481
2482                         phyid =  node_id & 0x0000003f;
2483                         isroot = (node_id & 0x40000000) != 0;
2484
2485                         DBGMSG("SelfID interrupt received "
2486                               "(phyid %d, %s)", phyid,
2487                               (isroot ? "root" : "not root"));
2488
2489                         handle_selfid(ohci, host, phyid, isroot);
2490
2491                         /* Clear the bus reset event and re-enable the
2492                          * busReset interrupt.  */
2493                         spin_lock_irqsave(&ohci->event_lock, flags);
2494                         reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2495                         reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
2496                         spin_unlock_irqrestore(&ohci->event_lock, flags);
2497
2498                         /* Accept Physical requests from all nodes. */
2499                         reg_write(ohci,OHCI1394_AsReqFilterHiSet, 0xffffffff);
2500                         reg_write(ohci,OHCI1394_AsReqFilterLoSet, 0xffffffff);
2501
2502                         /* Turn on phys dma reception.
2503                          *
2504                          * TODO: Enable some sort of filtering management.
2505                          */
2506                         if (phys_dma) {
2507                                 reg_write(ohci,OHCI1394_PhyReqFilterHiSet, 0xffffffff);
2508                                 reg_write(ohci,OHCI1394_PhyReqFilterLoSet, 0xffffffff);
2509                                 reg_write(ohci,OHCI1394_PhyUpperBound, 0xffff0000);
2510                         } else {
2511                                 reg_write(ohci,OHCI1394_PhyReqFilterHiSet, 0x00000000);
2512                                 reg_write(ohci,OHCI1394_PhyReqFilterLoSet, 0x00000000);
2513                         }
2514
2515                         DBGMSG("PhyReqFilter=%08x%08x",
2516                                reg_read(ohci,OHCI1394_PhyReqFilterHiSet),
2517                                reg_read(ohci,OHCI1394_PhyReqFilterLoSet));
2518
2519                         hpsb_selfid_complete(host, phyid, isroot);
2520                 } else
2521                         PRINT(KERN_ERR,
2522                               "SelfID received outside of bus reset sequence");
2523
2524 selfid_not_valid:
2525                 event &= ~OHCI1394_selfIDComplete;
2526         }
2527
2528         /* Make sure we handle everything, just in case we accidentally
2529          * enabled an interrupt that we didn't write a handler for.  */
2530         if (event)
2531                 PRINT(KERN_ERR, "Unhandled interrupt(s) 0x%08x",
2532                       event);
2533
2534         return IRQ_HANDLED;
2535 }
2536
2537 /* Put the buffer back into the dma context */
2538 static void insert_dma_buffer(struct dma_rcv_ctx *d, int idx)
2539 {
2540         struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2541         DBGMSG("Inserting dma buf ctx=%d idx=%d", d->ctx, idx);
2542
2543         d->prg_cpu[idx]->status = cpu_to_le32(d->buf_size);
2544         d->prg_cpu[idx]->branchAddress &= le32_to_cpu(0xfffffff0);
2545         idx = (idx + d->num_desc - 1 ) % d->num_desc;
2546         d->prg_cpu[idx]->branchAddress |= le32_to_cpu(0x00000001);
2547
2548         /* wake up the dma context if necessary */
2549         if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
2550                 PRINT(KERN_INFO,
2551                       "Waking dma ctx=%d ... processing is probably too slow",
2552                       d->ctx);
2553         }
2554
2555         /* do this always, to avoid race condition */
2556         reg_write(ohci, d->ctrlSet, 0x1000);
2557 }
2558
2559 #define cond_le32_to_cpu(data, noswap) \
2560         (noswap ? data : le32_to_cpu(data))
2561
2562 static const int TCODE_SIZE[16] = {20, 0, 16, -1, 16, 20, 20, 0,
2563                             -1, 0, -1, 0, -1, -1, 16, -1};
2564
2565 /*
2566  * Determine the length of a packet in the buffer
2567  * Optimization suggested by Pascal Drolet <pascal.drolet@informission.ca>
2568  */
2569 static __inline__ int packet_length(struct dma_rcv_ctx *d, int idx, quadlet_t *buf_ptr,
2570                          int offset, unsigned char tcode, int noswap)
2571 {
2572         int length = -1;
2573
2574         if (d->type == DMA_CTX_ASYNC_REQ || d->type == DMA_CTX_ASYNC_RESP) {
2575                 length = TCODE_SIZE[tcode];
2576                 if (length == 0) {
2577                         if (offset + 12 >= d->buf_size) {
2578                                 length = (cond_le32_to_cpu(d->buf_cpu[(idx + 1) % d->num_desc]
2579                                                 [3 - ((d->buf_size - offset) >> 2)], noswap) >> 16);
2580                         } else {
2581                                 length = (cond_le32_to_cpu(buf_ptr[3], noswap) >> 16);
2582                         }
2583                         length += 20;
2584                 }
2585         } else if (d->type == DMA_CTX_ISO) {
2586                 /* Assumption: buffer fill mode with header/trailer */
2587                 length = (cond_le32_to_cpu(buf_ptr[0], noswap) >> 16) + 8;
2588         }
2589
2590         if (length > 0 && length % 4)
2591                 length += 4 - (length % 4);
2592
2593         return length;
2594 }
2595
2596 /* Tasklet that processes dma receive buffers */
2597 static void dma_rcv_tasklet (unsigned long data)
2598 {
2599         struct dma_rcv_ctx *d = (struct dma_rcv_ctx*)data;
2600         struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2601         unsigned int split_left, idx, offset, rescount;
2602         unsigned char tcode;
2603         int length, bytes_left, ack;
2604         unsigned long flags;
2605         quadlet_t *buf_ptr;
2606         char *split_ptr;
2607         char msg[256];
2608
2609         spin_lock_irqsave(&d->lock, flags);
2610
2611         idx = d->buf_ind;
2612         offset = d->buf_offset;
2613         buf_ptr = d->buf_cpu[idx] + offset/4;
2614
2615         rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2616         bytes_left = d->buf_size - rescount - offset;
2617
2618         while (bytes_left > 0) {
2619                 tcode = (cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming) >> 4) & 0xf;
2620
2621                 /* packet_length() will return < 4 for an error */
2622                 length = packet_length(d, idx, buf_ptr, offset, tcode, ohci->no_swap_incoming);
2623
2624                 if (length < 4) { /* something is wrong */
2625                         sprintf(msg,"Unexpected tcode 0x%x(0x%08x) in AR ctx=%d, length=%d",
2626                                 tcode, cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming),
2627                                 d->ctx, length);
2628                         ohci1394_stop_context(ohci, d->ctrlClear, msg);
2629                         spin_unlock_irqrestore(&d->lock, flags);
2630                         return;
2631                 }
2632
2633                 /* The first case is where we have a packet that crosses
2634                  * over more than one descriptor. The next case is where
2635                  * it's all in the first descriptor.  */
2636                 if ((offset + length) > d->buf_size) {
2637                         DBGMSG("Split packet rcv'd");
2638                         if (length > d->split_buf_size) {
2639                                 ohci1394_stop_context(ohci, d->ctrlClear,
2640                                              "Split packet size exceeded");
2641                                 d->buf_ind = idx;
2642                                 d->buf_offset = offset;
2643                                 spin_unlock_irqrestore(&d->lock, flags);
2644                                 return;
2645                         }
2646
2647                         if (le32_to_cpu(d->prg_cpu[(idx+1)%d->num_desc]->status)
2648                             == d->buf_size) {
2649                                 /* Other part of packet not written yet.
2650                                  * this should never happen I think
2651                                  * anyway we'll get it on the next call.  */
2652                                 PRINT(KERN_INFO,
2653                                       "Got only half a packet!");
2654                                 d->buf_ind = idx;
2655                                 d->buf_offset = offset;
2656                                 spin_unlock_irqrestore(&d->lock, flags);
2657                                 return;
2658                         }
2659
2660                         split_left = length;
2661                         split_ptr = (char *)d->spb;
2662                         memcpy(split_ptr,buf_ptr,d->buf_size-offset);
2663                         split_left -= d->buf_size-offset;
2664                         split_ptr += d->buf_size-offset;
2665                         insert_dma_buffer(d, idx);
2666                         idx = (idx+1) % d->num_desc;
2667                         buf_ptr = d->buf_cpu[idx];
2668                         offset=0;
2669
2670                         while (split_left >= d->buf_size) {
2671                                 memcpy(split_ptr,buf_ptr,d->buf_size);
2672                                 split_ptr += d->buf_size;
2673                                 split_left -= d->buf_size;
2674                                 insert_dma_buffer(d, idx);
2675                                 idx = (idx+1) % d->num_desc;
2676                                 buf_ptr = d->buf_cpu[idx];
2677                         }
2678
2679                         if (split_left > 0) {
2680                                 memcpy(split_ptr, buf_ptr, split_left);
2681                                 offset = split_left;
2682                                 buf_ptr += offset/4;
2683                         }
2684                 } else {
2685                         DBGMSG("Single packet rcv'd");
2686                         memcpy(d->spb, buf_ptr, length);
2687                         offset += length;
2688                         buf_ptr += length/4;
2689                         if (offset==d->buf_size) {
2690                                 insert_dma_buffer(d, idx);
2691                                 idx = (idx+1) % d->num_desc;
2692                                 buf_ptr = d->buf_cpu[idx];
2693                                 offset=0;
2694                         }
2695                 }
2696
2697                 /* We get one phy packet to the async descriptor for each
2698                  * bus reset. We always ignore it.  */
2699                 if (tcode != OHCI1394_TCODE_PHY) {
2700                         if (!ohci->no_swap_incoming)
2701                                 packet_swab(d->spb, tcode);
2702                         DBGMSG("Packet received from node"
2703                                 " %d ack=0x%02X spd=%d tcode=0x%X"
2704                                 " length=%d ctx=%d tlabel=%d",
2705                                 (d->spb[1]>>16)&0x3f,
2706                                 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f,
2707                                 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>21)&0x3,
2708                                 tcode, length, d->ctx,
2709                                 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>10)&0x3f);
2710
2711                         ack = (((cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f)
2712                                 == 0x11) ? 1 : 0;
2713
2714                         hpsb_packet_received(ohci->host, d->spb,
2715                                              length-4, ack);
2716                 }
2717 #ifdef OHCI1394_DEBUG
2718                 else
2719                         PRINT (KERN_DEBUG, "Got phy packet ctx=%d ... discarded",
2720                                d->ctx);
2721 #endif
2722
2723                 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2724
2725                 bytes_left = d->buf_size - rescount - offset;
2726
2727         }
2728
2729         d->buf_ind = idx;
2730         d->buf_offset = offset;
2731
2732         spin_unlock_irqrestore(&d->lock, flags);
2733 }
2734
2735 /* Bottom half that processes sent packets */
2736 static void dma_trm_tasklet (unsigned long data)
2737 {
2738         struct dma_trm_ctx *d = (struct dma_trm_ctx*)data;
2739         struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2740         struct hpsb_packet *packet, *ptmp;
2741         unsigned long flags;
2742         u32 status, ack;
2743         size_t datasize;
2744
2745         spin_lock_irqsave(&d->lock, flags);
2746
2747         list_for_each_entry_safe(packet, ptmp, &d->fifo_list, driver_list) {
2748                 datasize = packet->data_size;
2749                 if (datasize && packet->type != hpsb_raw)
2750                         status = le32_to_cpu(
2751                                 d->prg_cpu[d->sent_ind]->end.status) >> 16;
2752                 else
2753                         status = le32_to_cpu(
2754                                 d->prg_cpu[d->sent_ind]->begin.status) >> 16;
2755
2756                 if (status == 0)
2757                         /* this packet hasn't been sent yet*/
2758                         break;
2759
2760 #ifdef OHCI1394_DEBUG
2761                 if (datasize)
2762                         if (((le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf) == 0xa)
2763                                 DBGMSG("Stream packet sent to channel %d tcode=0x%X "
2764                                        "ack=0x%X spd=%d dataLength=%d ctx=%d",
2765                                        (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>8)&0x3f,
2766                                        (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2767                                        status&0x1f, (status>>5)&0x3,
2768                                        le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16,
2769                                        d->ctx);
2770                         else
2771                                 DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2772                                        "0x%02X ack=0x%X spd=%d dataLength=%d ctx=%d",
2773                                        (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16)&0x3f,
2774                                        (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2775                                        (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>10)&0x3f,
2776                                        status&0x1f, (status>>5)&0x3,
2777                                        le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3])>>16,
2778                                        d->ctx);
2779                 else
2780                         DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2781                                "0x%02X ack=0x%X spd=%d data=0x%08X ctx=%d",
2782                                 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])
2783                                         >>16)&0x3f,
2784                                 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2785                                         >>4)&0xf,
2786                                 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2787                                         >>10)&0x3f,
2788                                 status&0x1f, (status>>5)&0x3,
2789                                 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3]),
2790                                 d->ctx);
2791 #endif
2792
2793                 if (status & 0x10) {
2794                         ack = status & 0xf;
2795                 } else {
2796                         switch (status & 0x1f) {
2797                         case EVT_NO_STATUS: /* that should never happen */
2798                         case EVT_RESERVED_A: /* that should never happen */
2799                         case EVT_LONG_PACKET: /* that should never happen */
2800                                 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2801                                 ack = ACKX_SEND_ERROR;
2802                                 break;
2803                         case EVT_MISSING_ACK:
2804                                 ack = ACKX_TIMEOUT;
2805                                 break;
2806                         case EVT_UNDERRUN:
2807                                 ack = ACKX_SEND_ERROR;
2808                                 break;
2809                         case EVT_OVERRUN: /* that should never happen */
2810                                 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2811                                 ack = ACKX_SEND_ERROR;
2812                                 break;
2813                         case EVT_DESCRIPTOR_READ:
2814                         case EVT_DATA_READ:
2815                         case EVT_DATA_WRITE:
2816                                 ack = ACKX_SEND_ERROR;
2817                                 break;
2818                         case EVT_BUS_RESET: /* that should never happen */
2819                                 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2820                                 ack = ACKX_SEND_ERROR;
2821                                 break;
2822                         case EVT_TIMEOUT:
2823                                 ack = ACKX_TIMEOUT;
2824                                 break;
2825                         case EVT_TCODE_ERR:
2826                                 ack = ACKX_SEND_ERROR;
2827                                 break;
2828                         case EVT_RESERVED_B: /* that should never happen */
2829                         case EVT_RESERVED_C: /* that should never happen */
2830                                 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2831                                 ack = ACKX_SEND_ERROR;
2832                                 break;
2833                         case EVT_UNKNOWN:
2834                         case EVT_FLUSHED:
2835                                 ack = ACKX_SEND_ERROR;
2836                                 break;
2837                         default:
2838                                 PRINT(KERN_ERR, "Unhandled OHCI evt_* error 0x%x", status & 0x1f);
2839                                 ack = ACKX_SEND_ERROR;
2840                                 BUG();
2841                         }
2842                 }
2843
2844                 list_del_init(&packet->driver_list);
2845                 hpsb_packet_sent(ohci->host, packet, ack);
2846
2847                 if (datasize) {
2848                         pci_unmap_single(ohci->dev,
2849                                          cpu_to_le32(d->prg_cpu[d->sent_ind]->end.address),
2850                                          datasize, PCI_DMA_TODEVICE);
2851                         OHCI_DMA_FREE("single Xmit data packet");
2852                 }
2853
2854                 d->sent_ind = (d->sent_ind+1)%d->num_desc;
2855                 d->free_prgs++;
2856         }
2857
2858         dma_trm_flush(ohci, d);
2859
2860         spin_unlock_irqrestore(&d->lock, flags);
2861 }
2862
2863 static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d)
2864 {
2865         if (d->ctrlClear) {
2866                 ohci1394_stop_context(d->ohci, d->ctrlClear, NULL);
2867
2868                 if (d->type == DMA_CTX_ISO) {
2869                         /* disable interrupts */
2870                         reg_write(d->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << d->ctx);
2871                         ohci1394_unregister_iso_tasklet(d->ohci, &d->ohci->ir_legacy_tasklet);
2872                 } else {
2873                         tasklet_kill(&d->task);
2874                 }
2875         }
2876 }
2877
2878
2879 static void free_dma_rcv_ctx(struct dma_rcv_ctx *d)
2880 {
2881         int i;
2882         struct ti_ohci *ohci = d->ohci;
2883
2884         if (ohci == NULL)
2885                 return;
2886
2887         DBGMSG("Freeing dma_rcv_ctx %d", d->ctx);
2888
2889         if (d->buf_cpu) {
2890                 for (i=0; i<d->num_desc; i++)
2891                         if (d->buf_cpu[i] && d->buf_bus[i]) {
2892                                 pci_free_consistent(
2893                                         ohci->dev, d->buf_size,
2894                                         d->buf_cpu[i], d->buf_bus[i]);
2895                                 OHCI_DMA_FREE("consistent dma_rcv buf[%d]", i);
2896                         }
2897                 kfree(d->buf_cpu);
2898                 kfree(d->buf_bus);
2899         }
2900         if (d->prg_cpu) {
2901                 for (i=0; i<d->num_desc; i++)
2902                         if (d->prg_cpu[i] && d->prg_bus[i]) {
2903                                 pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
2904                                 OHCI_DMA_FREE("consistent dma_rcv prg[%d]", i);
2905                         }
2906                 pci_pool_destroy(d->prg_pool);
2907                 OHCI_DMA_FREE("dma_rcv prg pool");
2908                 kfree(d->prg_cpu);
2909                 kfree(d->prg_bus);
2910         }
2911         if (d->spb) kfree(d->spb);
2912
2913         /* Mark this context as freed. */
2914         d->ohci = NULL;
2915 }
2916
2917 static int
2918 alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
2919                   enum context_type type, int ctx, int num_desc,
2920                   int buf_size, int split_buf_size, int context_base)
2921 {
2922         int i;
2923
2924         d->ohci = ohci;
2925         d->type = type;
2926         d->ctx = ctx;
2927
2928         d->num_desc = num_desc;
2929         d->buf_size = buf_size;
2930         d->split_buf_size = split_buf_size;
2931
2932         d->ctrlSet = 0;
2933         d->ctrlClear = 0;
2934         d->cmdPtr = 0;
2935
2936         d->buf_cpu = kmalloc(d->num_desc * sizeof(quadlet_t*), GFP_KERNEL);
2937         d->buf_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_KERNEL);
2938
2939         if (d->buf_cpu == NULL || d->buf_bus == NULL) {
2940                 PRINT(KERN_ERR, "Failed to allocate dma buffer");
2941                 free_dma_rcv_ctx(d);
2942                 return -ENOMEM;
2943         }
2944         memset(d->buf_cpu, 0, d->num_desc * sizeof(quadlet_t*));
2945         memset(d->buf_bus, 0, d->num_desc * sizeof(dma_addr_t));
2946
2947         d->prg_cpu = kmalloc(d->num_desc * sizeof(struct dma_cmd*),
2948                              GFP_KERNEL);
2949         d->prg_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_KERNEL);
2950
2951         if (d->prg_cpu == NULL || d->prg_bus == NULL) {
2952                 PRINT(KERN_ERR, "Failed to allocate dma prg");
2953                 free_dma_rcv_ctx(d);
2954                 return -ENOMEM;
2955         }
2956         memset(d->prg_cpu, 0, d->num_desc * sizeof(struct dma_cmd*));
2957         memset(d->prg_bus, 0, d->num_desc * sizeof(dma_addr_t));
2958
2959         d->spb = kmalloc(d->split_buf_size, GFP_KERNEL);
2960
2961         if (d->spb == NULL) {
2962                 PRINT(KERN_ERR, "Failed to allocate split buffer");
2963                 free_dma_rcv_ctx(d);
2964                 return -ENOMEM;
2965         }
2966
2967         d->prg_pool = pci_pool_create("ohci1394 rcv prg", ohci->dev,
2968                                 sizeof(struct dma_cmd), 4, 0);
2969         OHCI_DMA_ALLOC("dma_rcv prg pool");
2970
2971         for (i=0; i<d->num_desc; i++) {
2972                 d->buf_cpu[i] = pci_alloc_consistent(ohci->dev,
2973                                                      d->buf_size,
2974                                                      d->buf_bus+i);
2975                 OHCI_DMA_ALLOC("consistent dma_rcv buf[%d]", i);
2976
2977                 if (d->buf_cpu[i] != NULL) {
2978                         memset(d->buf_cpu[i], 0, d->buf_size);
2979                 } else {
2980                         PRINT(KERN_ERR,
2981                               "Failed to allocate dma buffer");
2982                         free_dma_rcv_ctx(d);
2983                         return -ENOMEM;
2984                 }
2985
2986                 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
2987                 OHCI_DMA_ALLOC("pool dma_rcv prg[%d]", i);
2988
2989                 if (d->prg_cpu[i] != NULL) {
2990                         memset(d->prg_cpu[i], 0, sizeof(struct dma_cmd));
2991                 } else {
2992                         PRINT(KERN_ERR,
2993                               "Failed to allocate dma prg");
2994                         free_dma_rcv_ctx(d);
2995                         return -ENOMEM;
2996                 }
2997         }
2998
2999         spin_lock_init(&d->lock);
3000
3001         if (type == DMA_CTX_ISO) {
3002                 ohci1394_init_iso_tasklet(&ohci->ir_legacy_tasklet,
3003                                           OHCI_ISO_MULTICHANNEL_RECEIVE,
3004                                           dma_rcv_tasklet, (unsigned long) d);
3005                 if (ohci1394_register_iso_tasklet(ohci,
3006                                                   &ohci->ir_legacy_tasklet) < 0) {
3007                         PRINT(KERN_ERR, "No IR DMA context available");
3008                         free_dma_rcv_ctx(d);
3009                         return -EBUSY;
3010                 }
3011
3012                 /* the IR context can be assigned to any DMA context
3013                  * by ohci1394_register_iso_tasklet */
3014                 d->ctx = ohci->ir_legacy_tasklet.context;
3015                 d->ctrlSet = OHCI1394_IsoRcvContextControlSet + 32*d->ctx;
3016                 d->ctrlClear = OHCI1394_IsoRcvContextControlClear + 32*d->ctx;
3017                 d->cmdPtr = OHCI1394_IsoRcvCommandPtr + 32*d->ctx;
3018                 d->ctxtMatch = OHCI1394_IsoRcvContextMatch + 32*d->ctx;
3019         } else {
3020                 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3021                 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3022                 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3023
3024                 tasklet_init (&d->task, dma_rcv_tasklet, (unsigned long) d);
3025         }
3026
3027         return 0;
3028 }
3029
3030 static void free_dma_trm_ctx(struct dma_trm_ctx *d)
3031 {
3032         int i;
3033         struct ti_ohci *ohci = d->ohci;
3034
3035         if (ohci == NULL)
3036                 return;
3037
3038         DBGMSG("Freeing dma_trm_ctx %d", d->ctx);
3039
3040         if (d->prg_cpu) {
3041                 for (i=0; i<d->num_desc; i++)
3042                         if (d->prg_cpu[i] && d->prg_bus[i]) {
3043                                 pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
3044                                 OHCI_DMA_FREE("pool dma_trm prg[%d]", i);
3045                         }
3046                 pci_pool_destroy(d->prg_pool);
3047                 OHCI_DMA_FREE("dma_trm prg pool");
3048                 kfree(d->prg_cpu);
3049                 kfree(d->prg_bus);
3050         }
3051
3052         /* Mark this context as freed. */
3053         d->ohci = NULL;
3054 }
3055
3056 static int
3057 alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
3058                   enum context_type type, int ctx, int num_desc,
3059                   int context_base)
3060 {
3061         int i;
3062
3063         d->ohci = ohci;
3064         d->type = type;
3065         d->ctx = ctx;
3066         d->num_desc = num_desc;
3067         d->ctrlSet = 0;
3068         d->ctrlClear = 0;
3069         d->cmdPtr = 0;
3070
3071         d->prg_cpu = kmalloc(d->num_desc * sizeof(struct at_dma_prg*),
3072                              GFP_KERNEL);
3073         d->prg_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_KERNEL);
3074
3075         if (d->prg_cpu == NULL || d->prg_bus == NULL) {
3076                 PRINT(KERN_ERR, "Failed to allocate at dma prg");
3077                 free_dma_trm_ctx(d);
3078                 return -ENOMEM;
3079         }
3080         memset(d->prg_cpu, 0, d->num_desc * sizeof(struct at_dma_prg*));
3081         memset(d->prg_bus, 0, d->num_desc * sizeof(dma_addr_t));
3082
3083         d->prg_pool = pci_pool_create("ohci1394 trm prg", ohci->dev,
3084                                 sizeof(struct at_dma_prg), 4, 0);
3085         OHCI_DMA_ALLOC("dma_rcv prg pool");
3086
3087         for (i = 0; i < d->num_desc; i++) {
3088                 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
3089                 OHCI_DMA_ALLOC("pool dma_trm prg[%d]", i);
3090
3091                 if (d->prg_cpu[i] != NULL) {
3092                         memset(d->prg_cpu[i], 0, sizeof(struct at_dma_prg));
3093                 } else {
3094                         PRINT(KERN_ERR,
3095                               "Failed to allocate at dma prg");
3096                         free_dma_trm_ctx(d);
3097                         return -ENOMEM;
3098                 }
3099         }
3100
3101         spin_lock_init(&d->lock);
3102
3103         /* initialize tasklet */
3104         if (type == DMA_CTX_ISO) {
3105                 ohci1394_init_iso_tasklet(&ohci->it_legacy_tasklet, OHCI_ISO_TRANSMIT,
3106                                           dma_trm_tasklet, (unsigned long) d);
3107                 if (ohci1394_register_iso_tasklet(ohci,
3108                                                   &ohci->it_legacy_tasklet) < 0) {
3109                         PRINT(KERN_ERR, "No IT DMA context available");
3110                         free_dma_trm_ctx(d);
3111                         return -EBUSY;
3112                 }
3113
3114                 /* IT can be assigned to any context by register_iso_tasklet */
3115                 d->ctx = ohci->it_legacy_tasklet.context;
3116                 d->ctrlSet = OHCI1394_IsoXmitContextControlSet + 16 * d->ctx;
3117                 d->ctrlClear = OHCI1394_IsoXmitContextControlClear + 16 * d->ctx;
3118                 d->cmdPtr = OHCI1394_IsoXmitCommandPtr + 16 * d->ctx;
3119         } else {
3120                 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3121                 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3122                 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3123                 tasklet_init (&d->task, dma_trm_tasklet, (unsigned long)d);
3124         }
3125
3126         return 0;
3127 }
3128
3129 static void ohci_set_hw_config_rom(struct hpsb_host *host, quadlet_t *config_rom)
3130 {
3131         struct ti_ohci *ohci = host->hostdata;
3132
3133         reg_write(ohci, OHCI1394_ConfigROMhdr, be32_to_cpu(config_rom[0]));
3134         reg_write(ohci, OHCI1394_BusOptions, be32_to_cpu(config_rom[2]));
3135
3136         memcpy(ohci->csr_config_rom_cpu, config_rom, OHCI_CONFIG_ROM_LEN);
3137 }
3138
3139
3140 static quadlet_t ohci_hw_csr_reg(struct hpsb_host *host, int reg,
3141                                  quadlet_t data, quadlet_t compare)
3142 {
3143         struct ti_ohci *ohci = host->hostdata;
3144         int i;
3145
3146         reg_write(ohci, OHCI1394_CSRData, data);
3147         reg_write(ohci, OHCI1394_CSRCompareData, compare);
3148         reg_write(ohci, OHCI1394_CSRControl, reg & 0x3);
3149
3150         for (i = 0; i < OHCI_LOOP_COUNT; i++) {
3151                 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
3152                         break;
3153
3154                 mdelay(1);
3155         }
3156
3157         return reg_read(ohci, OHCI1394_CSRData);
3158 }
3159
3160 static struct hpsb_host_driver ohci1394_driver = {
3161         .owner =                THIS_MODULE,
3162         .name =                 OHCI1394_DRIVER_NAME,
3163         .set_hw_config_rom =    ohci_set_hw_config_rom,
3164         .transmit_packet =      ohci_transmit,
3165         .devctl =               ohci_devctl,
3166         .isoctl =               ohci_isoctl,
3167         .hw_csr_reg =           ohci_hw_csr_reg,
3168 };
3169
3170 \f
3171
3172 /***********************************
3173  * PCI Driver Interface functions  *
3174  ***********************************/
3175
3176 #define FAIL(err, fmt, args...)                 \
3177 do {                                            \
3178         PRINT_G(KERN_ERR, fmt , ## args);       \
3179         ohci1394_pci_remove(dev);               \
3180         return err;                             \
3181 } while (0)
3182
3183 static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
3184                                         const struct pci_device_id *ent)
3185 {
3186         static int version_printed = 0;
3187
3188         struct hpsb_host *host;
3189         struct ti_ohci *ohci;   /* shortcut to currently handled device */
3190         unsigned long ohci_base;
3191
3192         if (version_printed++ == 0)
3193                 PRINT_G(KERN_INFO, "%s", version);
3194
3195         if (pci_enable_device(dev))
3196                 FAIL(-ENXIO, "Failed to enable OHCI hardware");
3197         pci_set_master(dev);
3198
3199         host = hpsb_alloc_host(&ohci1394_driver, sizeof(struct ti_ohci), &dev->dev);
3200         if (!host) FAIL(-ENOMEM, "Failed to allocate host structure");
3201
3202         ohci = host->hostdata;
3203         ohci->dev = dev;
3204         ohci->host = host;
3205         ohci->init_state = OHCI_INIT_ALLOC_HOST;
3206         host->pdev = dev;
3207         pci_set_drvdata(dev, ohci);
3208
3209         /* We don't want hardware swapping */
3210         pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
3211
3212         /* Some oddball Apple controllers do not order the selfid
3213          * properly, so we make up for it here.  */
3214 #ifndef __LITTLE_ENDIAN
3215         /* XXX: Need a better way to check this. I'm wondering if we can
3216          * read the values of the OHCI1394_PCI_HCI_Control and the
3217          * noByteSwapData registers to see if they were not cleared to
3218          * zero. Should this work? Obviously it's not defined what these
3219          * registers will read when they aren't supported. Bleh! */
3220         if (dev->vendor == PCI_VENDOR_ID_APPLE &&
3221             dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) {
3222                 ohci->no_swap_incoming = 1;
3223                 ohci->selfid_swap = 0;
3224         } else
3225                 ohci->selfid_swap = 1;
3226 #endif
3227
3228
3229 #ifndef PCI_DEVICE_ID_NVIDIA_NFORCE2_FW
3230 #define PCI_DEVICE_ID_NVIDIA_NFORCE2_FW 0x006e
3231 #endif
3232
3233         /* These chipsets require a bit of extra care when checking after
3234          * a busreset.  */
3235         if ((dev->vendor == PCI_VENDOR_ID_APPLE &&
3236              dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) ||
3237             (dev->vendor ==  PCI_VENDOR_ID_NVIDIA &&
3238              dev->device == PCI_DEVICE_ID_NVIDIA_NFORCE2_FW))
3239                 ohci->check_busreset = 1;
3240
3241         /* We hardwire the MMIO length, since some CardBus adaptors
3242          * fail to report the right length.  Anyway, the ohci spec
3243          * clearly says it's 2kb, so this shouldn't be a problem. */
3244         ohci_base = pci_resource_start(dev, 0);
3245         if (pci_resource_len(dev, 0) != OHCI1394_REGISTER_SIZE)
3246                 PRINT(KERN_WARNING, "Unexpected PCI resource length of %lx!",
3247                       pci_resource_len(dev, 0));
3248
3249         /* Seems PCMCIA handles this internally. Not sure why. Seems
3250          * pretty bogus to force a driver to special case this.  */
3251 #ifndef PCMCIA
3252         if (!request_mem_region (ohci_base, OHCI1394_REGISTER_SIZE, OHCI1394_DRIVER_NAME))
3253                 FAIL(-ENOMEM, "MMIO resource (0x%lx - 0x%lx) unavailable",
3254                      ohci_base, ohci_base + OHCI1394_REGISTER_SIZE);
3255 #endif
3256         ohci->init_state = OHCI_INIT_HAVE_MEM_REGION;
3257
3258         ohci->registers = ioremap(ohci_base, OHCI1394_REGISTER_SIZE);
3259         if (ohci->registers == NULL)
3260                 FAIL(-ENXIO, "Failed to remap registers - card not accessible");
3261         ohci->init_state = OHCI_INIT_HAVE_IOMAPPING;
3262         DBGMSG("Remapped memory spaces reg 0x%p", ohci->registers);
3263
3264         /* csr_config rom allocation */
3265         ohci->csr_config_rom_cpu =
3266                 pci_alloc_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3267                                      &ohci->csr_config_rom_bus);
3268         OHCI_DMA_ALLOC("consistent csr_config_rom");
3269         if (ohci->csr_config_rom_cpu == NULL)
3270                 FAIL(-ENOMEM, "Failed to allocate buffer config rom");
3271         ohci->init_state = OHCI_INIT_HAVE_CONFIG_ROM_BUFFER;
3272
3273         /* self-id dma buffer allocation */
3274         ohci->selfid_buf_cpu =
3275                 pci_alloc_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3276                       &ohci->selfid_buf_bus);
3277         OHCI_DMA_ALLOC("consistent selfid_buf");
3278
3279         if (ohci->selfid_buf_cpu == NULL)
3280                 FAIL(-ENOMEM, "Failed to allocate DMA buffer for self-id packets");
3281         ohci->init_state = OHCI_INIT_HAVE_SELFID_BUFFER;
3282
3283         if ((unsigned long)ohci->selfid_buf_cpu & 0x1fff)
3284                 PRINT(KERN_INFO, "SelfID buffer %p is not aligned on "
3285                       "8Kb boundary... may cause problems on some CXD3222 chip",
3286                       ohci->selfid_buf_cpu);
3287
3288         /* No self-id errors at startup */
3289         ohci->self_id_errors = 0;
3290
3291         ohci->init_state = OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE;
3292         /* AR DMA request context allocation */
3293         if (alloc_dma_rcv_ctx(ohci, &ohci->ar_req_context,
3294                               DMA_CTX_ASYNC_REQ, 0, AR_REQ_NUM_DESC,
3295                               AR_REQ_BUF_SIZE, AR_REQ_SPLIT_BUF_SIZE,
3296                               OHCI1394_AsReqRcvContextBase) < 0)
3297                 FAIL(-ENOMEM, "Failed to allocate AR Req context");
3298
3299         /* AR DMA response context allocation */
3300         if (alloc_dma_rcv_ctx(ohci, &ohci->ar_resp_context,
3301                               DMA_CTX_ASYNC_RESP, 0, AR_RESP_NUM_DESC,
3302                               AR_RESP_BUF_SIZE, AR_RESP_SPLIT_BUF_SIZE,
3303                               OHCI1394_AsRspRcvContextBase) < 0)
3304                 FAIL(-ENOMEM, "Failed to allocate AR Resp context");
3305
3306         /* AT DMA request context */
3307         if (alloc_dma_trm_ctx(ohci, &ohci->at_req_context,
3308                               DMA_CTX_ASYNC_REQ, 0, AT_REQ_NUM_DESC,
3309                               OHCI1394_AsReqTrContextBase) < 0)
3310                 FAIL(-ENOMEM, "Failed to allocate AT Req context");
3311
3312         /* AT DMA response context */
3313         if (alloc_dma_trm_ctx(ohci, &ohci->at_resp_context,
3314                               DMA_CTX_ASYNC_RESP, 1, AT_RESP_NUM_DESC,
3315                               OHCI1394_AsRspTrContextBase) < 0)
3316                 FAIL(-ENOMEM, "Failed to allocate AT Resp context");
3317
3318         /* Start off with a soft reset, to clear everything to a sane
3319          * state. */
3320         ohci_soft_reset(ohci);
3321
3322         /* Now enable LPS, which we need in order to start accessing
3323          * most of the registers.  In fact, on some cards (ALI M5251),
3324          * accessing registers in the SClk domain without LPS enabled
3325          * will lock up the machine.  Wait 50msec to make sure we have
3326          * full link enabled.  */
3327         reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
3328
3329         /* Disable and clear interrupts */
3330         reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3331         reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3332
3333         mdelay(50);
3334
3335         /* Determine the number of available IR and IT contexts. */
3336         ohci->nb_iso_rcv_ctx =
3337                 get_nb_iso_ctx(ohci, OHCI1394_IsoRecvIntMaskSet);
3338         DBGMSG("%d iso receive contexts available",
3339                ohci->nb_iso_rcv_ctx);
3340
3341         ohci->nb_iso_xmit_ctx =
3342                 get_nb_iso_ctx(ohci, OHCI1394_IsoXmitIntMaskSet);
3343         DBGMSG("%d iso transmit contexts available",
3344                ohci->nb_iso_xmit_ctx);
3345
3346         /* Set the usage bits for non-existent contexts so they can't
3347          * be allocated */
3348         ohci->ir_ctx_usage = ~0 << ohci->nb_iso_rcv_ctx;
3349         ohci->it_ctx_usage = ~0 << ohci->nb_iso_xmit_ctx;
3350
3351         INIT_LIST_HEAD(&ohci->iso_tasklet_list);
3352         spin_lock_init(&ohci->iso_tasklet_list_lock);
3353         ohci->ISO_channel_usage = 0;
3354         spin_lock_init(&ohci->IR_channel_lock);
3355
3356         /* the IR DMA context is allocated on-demand; mark it inactive */
3357         ohci->ir_legacy_context.ohci = NULL;
3358
3359         /* same for the IT DMA context */
3360         ohci->it_legacy_context.ohci = NULL;
3361
3362         if (request_irq(dev->irq, ohci_irq_handler, SA_SHIRQ,
3363                          OHCI1394_DRIVER_NAME, ohci))
3364                 FAIL(-ENOMEM, "Failed to allocate shared interrupt %d", dev->irq);
3365
3366         ohci->init_state = OHCI_INIT_HAVE_IRQ;
3367         ohci_initialize(ohci);
3368
3369         /* Set certain csr values */
3370         host->csr.guid_hi = reg_read(ohci, OHCI1394_GUIDHi);
3371         host->csr.guid_lo = reg_read(ohci, OHCI1394_GUIDLo);
3372         host->csr.cyc_clk_acc = 100;  /* how do we determine clk accuracy? */
3373         host->csr.max_rec = (reg_read(ohci, OHCI1394_BusOptions) >> 12) & 0xf;
3374         host->csr.lnk_spd = reg_read(ohci, OHCI1394_BusOptions) & 0x7;
3375
3376         /* Tell the highlevel this host is ready */
3377         if (hpsb_add_host(host))
3378                 FAIL(-ENOMEM, "Failed to register host with highlevel");
3379
3380         ohci->init_state = OHCI_INIT_DONE;
3381
3382         return 0;
3383 #undef FAIL
3384 }
3385
3386 static void ohci1394_pci_remove(struct pci_dev *pdev)
3387 {
3388         struct ti_ohci *ohci;
3389         struct device *dev;
3390
3391         ohci = pci_get_drvdata(pdev);
3392         if (!ohci)
3393                 return;
3394
3395         dev = get_device(&ohci->host->device);
3396
3397         switch (ohci->init_state) {
3398         case OHCI_INIT_DONE:
3399                 hpsb_remove_host(ohci->host);
3400
3401                 /* Clear out BUS Options */
3402                 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
3403                 reg_write(ohci, OHCI1394_BusOptions,
3404                           (reg_read(ohci, OHCI1394_BusOptions) & 0x0000f007) |
3405                           0x00ff0000);
3406                 memset(ohci->csr_config_rom_cpu, 0, OHCI_CONFIG_ROM_LEN);
3407
3408         case OHCI_INIT_HAVE_IRQ:
3409                 /* Clear interrupt registers */
3410                 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3411                 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3412                 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
3413                 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
3414                 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
3415                 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
3416
3417                 /* Disable IRM Contender */
3418                 set_phy_reg(ohci, 4, ~0xc0 & get_phy_reg(ohci, 4));
3419
3420                 /* Clear link control register */
3421                 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
3422
3423                 /* Let all other nodes know to ignore us */
3424                 ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
3425
3426                 /* Soft reset before we start - this disables
3427                  * interrupts and clears linkEnable and LPS. */
3428                 ohci_soft_reset(ohci);
3429                 free_irq(ohci->dev->irq, ohci);
3430
3431         case OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE:
3432                 /* The ohci_soft_reset() stops all DMA contexts, so we
3433                  * dont need to do this.  */
3434                 /* Free AR dma */
3435                 free_dma_rcv_ctx(&ohci->ar_req_context);
3436                 free_dma_rcv_ctx(&ohci->ar_resp_context);
3437
3438                 /* Free AT dma */
3439                 free_dma_trm_ctx(&ohci->at_req_context);
3440                 free_dma_trm_ctx(&ohci->at_resp_context);
3441
3442                 /* Free IR dma */
3443                 free_dma_rcv_ctx(&ohci->ir_legacy_context);
3444
3445                 /* Free IT dma */
3446                 free_dma_trm_ctx(&ohci->it_legacy_context);
3447
3448         case OHCI_INIT_HAVE_SELFID_BUFFER:
3449                 pci_free_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3450                                     ohci->selfid_buf_cpu,
3451                                     ohci->selfid_buf_bus);
3452                 OHCI_DMA_FREE("consistent selfid_buf");
3453
3454         case OHCI_INIT_HAVE_CONFIG_ROM_BUFFER:
3455                 pci_free_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3456                                     ohci->csr_config_rom_cpu,
3457                                     ohci->csr_config_rom_bus);
3458                 OHCI_DMA_FREE("consistent csr_config_rom");
3459
3460         case OHCI_INIT_HAVE_IOMAPPING:
3461                 iounmap(ohci->registers);
3462
3463         case OHCI_INIT_HAVE_MEM_REGION:
3464 #ifndef PCMCIA
3465                 release_mem_region(pci_resource_start(ohci->dev, 0),
3466                                    OHCI1394_REGISTER_SIZE);
3467 #endif
3468
3469 #ifdef CONFIG_PPC_PMAC
3470         /* On UniNorth, power down the cable and turn off the chip
3471          * clock when the module is removed to save power on
3472          * laptops. Turning it back ON is done by the arch code when
3473          * pci_enable_device() is called */
3474         {
3475                 struct device_node* of_node;
3476
3477                 of_node = pci_device_to_OF_node(ohci->dev);
3478                 if (of_node) {
3479                         pmac_call_feature(PMAC_FTR_1394_ENABLE, of_node, 0, 0);
3480                         pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, of_node, 0, 0);
3481                 }
3482         }
3483 #endif /* CONFIG_PPC_PMAC */
3484
3485         case OHCI_INIT_ALLOC_HOST:
3486                 pci_set_drvdata(ohci->dev, NULL);
3487         }
3488
3489         if (dev)
3490                 put_device(dev);
3491 }
3492
3493
3494 static int ohci1394_pci_resume (struct pci_dev *pdev)
3495 {
3496 #ifdef CONFIG_PMAC_PBOOK
3497         {
3498                 struct device_node *of_node;
3499
3500                 /* Re-enable 1394 */
3501                 of_node = pci_device_to_OF_node (pdev);
3502                 if (of_node)
3503                         pmac_call_feature (PMAC_FTR_1394_ENABLE, of_node, 0, 1);
3504         }
3505 #endif
3506
3507         pci_enable_device(pdev);
3508
3509         return 0;
3510 }
3511
3512
3513 static int ohci1394_pci_suspend (struct pci_dev *pdev, u32 state)
3514 {
3515 #ifdef CONFIG_PMAC_PBOOK
3516         {
3517                 struct device_node *of_node;
3518
3519                 /* Disable 1394 */
3520                 of_node = pci_device_to_OF_node (pdev);
3521                 if (of_node)
3522                         pmac_call_feature(PMAC_FTR_1394_ENABLE, of_node, 0, 0);
3523         }
3524 #endif
3525
3526         return 0;
3527 }
3528
3529
3530 #define PCI_CLASS_FIREWIRE_OHCI     ((PCI_CLASS_SERIAL_FIREWIRE << 8) | 0x10)
3531
3532 static struct pci_device_id ohci1394_pci_tbl[] = {
3533         {
3534                 .class =        PCI_CLASS_FIREWIRE_OHCI,
3535                 .class_mask =   PCI_ANY_ID,
3536                 .vendor =       PCI_ANY_ID,
3537                 .device =       PCI_ANY_ID,
3538                 .subvendor =    PCI_ANY_ID,
3539                 .subdevice =    PCI_ANY_ID,
3540         },
3541         { 0, },
3542 };
3543
3544 MODULE_DEVICE_TABLE(pci, ohci1394_pci_tbl);
3545
3546 static struct pci_driver ohci1394_pci_driver = {
3547         .name =         OHCI1394_DRIVER_NAME,
3548         .id_table =     ohci1394_pci_tbl,
3549         .probe =        ohci1394_pci_probe,
3550         .remove =       ohci1394_pci_remove,
3551         .resume =       ohci1394_pci_resume,
3552         .suspend =      ohci1394_pci_suspend,
3553 };
3554
3555 \f
3556
3557 /***********************************
3558  * OHCI1394 Video Interface        *
3559  ***********************************/
3560
3561 /* essentially the only purpose of this code is to allow another
3562    module to hook into ohci's interrupt handler */
3563
3564 int ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg)
3565 {
3566         int i=0;
3567
3568         /* stop the channel program if it's still running */
3569         reg_write(ohci, reg, 0x8000);
3570
3571         /* Wait until it effectively stops */
3572         while (reg_read(ohci, reg) & 0x400) {
3573                 i++;
3574                 if (i>5000) {
3575                         PRINT(KERN_ERR,
3576                               "Runaway loop while stopping context: %s...", msg ? msg : "");
3577                         return 1;
3578                 }
3579
3580                 mb();
3581                 udelay(10);
3582         }
3583         if (msg) PRINT(KERN_ERR, "%s: dma prg stopped", msg);
3584         return 0;
3585 }
3586
3587 void ohci1394_init_iso_tasklet(struct ohci1394_iso_tasklet *tasklet, int type,
3588                                void (*func)(unsigned long), unsigned long data)
3589 {
3590         tasklet_init(&tasklet->tasklet, func, data);
3591         tasklet->type = type;
3592         /* We init the tasklet->link field, so we can list_del() it
3593          * without worrying whether it was added to the list or not. */
3594         INIT_LIST_HEAD(&tasklet->link);
3595 }
3596
3597 int ohci1394_register_iso_tasklet(struct ti_ohci *ohci,
3598                                   struct ohci1394_iso_tasklet *tasklet)
3599 {
3600         unsigned long flags, *usage;
3601         int n, i, r = -EBUSY;
3602
3603         if (tasklet->type == OHCI_ISO_TRANSMIT) {
3604                 n = ohci->nb_iso_xmit_ctx;
3605                 usage = &ohci->it_ctx_usage;
3606         }
3607         else {
3608                 n = ohci->nb_iso_rcv_ctx;
3609                 usage = &ohci->ir_ctx_usage;
3610
3611                 /* only one receive context can be multichannel (OHCI sec 10.4.1) */
3612                 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3613                         if (test_and_set_bit(0, &ohci->ir_multichannel_used)) {
3614                                 return r;
3615                         }
3616                 }
3617         }
3618
3619         spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3620
3621         for (i = 0; i < n; i++)
3622                 if (!test_and_set_bit(i, usage)) {
3623                         tasklet->context = i;
3624                         list_add_tail(&tasklet->link, &ohci->iso_tasklet_list);
3625                         r = 0;
3626                         break;
3627                 }
3628
3629         spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3630
3631         return r;
3632 }
3633
3634 void ohci1394_unregister_iso_tasklet(struct ti_ohci *ohci,
3635                                      struct ohci1394_iso_tasklet *tasklet)
3636 {
3637         unsigned long flags;
3638
3639         tasklet_kill(&tasklet->tasklet);
3640
3641         spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3642
3643         if (tasklet->type == OHCI_ISO_TRANSMIT)
3644                 clear_bit(tasklet->context, &ohci->it_ctx_usage);
3645         else {
3646                 clear_bit(tasklet->context, &ohci->ir_ctx_usage);
3647
3648                 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3649                         clear_bit(0, &ohci->ir_multichannel_used);
3650                 }
3651         }
3652
3653         list_del(&tasklet->link);
3654
3655         spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3656 }
3657
3658 EXPORT_SYMBOL(ohci1394_stop_context);
3659 EXPORT_SYMBOL(ohci1394_init_iso_tasklet);
3660 EXPORT_SYMBOL(ohci1394_register_iso_tasklet);
3661 EXPORT_SYMBOL(ohci1394_unregister_iso_tasklet);
3662
3663
3664 /***********************************
3665  * General module initialization   *
3666  ***********************************/
3667
3668 MODULE_AUTHOR("Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>");
3669 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE-1394 controllers");
3670 MODULE_LICENSE("GPL");
3671
3672 static void __exit ohci1394_cleanup (void)
3673 {
3674         pci_unregister_driver(&ohci1394_pci_driver);
3675 }
3676
3677 static int __init ohci1394_init(void)
3678 {
3679         return pci_module_init(&ohci1394_pci_driver);
3680 }
3681
3682 module_init(ohci1394_init);
3683 module_exit(ohci1394_cleanup);