upgrade to fedora-2.6.12-1.1398.FC4 + vserver 2.0.rc7
[linux-2.6.git] / drivers / ieee1394 / ohci1394.c
1 /*
2  * ohci1394.c - driver for OHCI 1394 boards
3  * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
4  *                        Gord Peters <GordPeters@smarttech.com>
5  *              2001      Ben Collins <bcollins@debian.org>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software Foundation,
19  * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20  */
21
22 /*
23  * Things known to be working:
24  * . Async Request Transmit
25  * . Async Response Receive
26  * . Async Request Receive
27  * . Async Response Transmit
28  * . Iso Receive
29  * . DMA mmap for iso receive
30  * . Config ROM generation
31  *
32  * Things implemented, but still in test phase:
33  * . Iso Transmit
34  * . Async Stream Packets Transmit (Receive done via Iso interface)
35  *
36  * Things not implemented:
37  * . DMA error recovery
38  *
39  * Known bugs:
40  * . devctl BUS_RESET arg confusion (reset type or root holdoff?)
41  *   added LONG_RESET_ROOT and SHORT_RESET_ROOT for root holdoff --kk
42  */
43
44 /*
45  * Acknowledgments:
46  *
47  * Adam J Richter <adam@yggdrasil.com>
48  *  . Use of pci_class to find device
49  *
50  * Emilie Chung <emilie.chung@axis.com>
51  *  . Tip on Async Request Filter
52  *
53  * Pascal Drolet <pascal.drolet@informission.ca>
54  *  . Various tips for optimization and functionnalities
55  *
56  * Robert Ficklin <rficklin@westengineering.com>
57  *  . Loop in irq_handler
58  *
59  * James Goodwin <jamesg@Filanet.com>
60  *  . Various tips on initialization, self-id reception, etc.
61  *
62  * Albrecht Dress <ad@mpifr-bonn.mpg.de>
63  *  . Apple PowerBook detection
64  *
65  * Daniel Kobras <daniel.kobras@student.uni-tuebingen.de>
66  *  . Reset the board properly before leaving + misc cleanups
67  *
68  * Leon van Stuivenberg <leonvs@iae.nl>
69  *  . Bug fixes
70  *
71  * Ben Collins <bcollins@debian.org>
72  *  . Working big-endian support
73  *  . Updated to 2.4.x module scheme (PCI aswell)
74  *  . Config ROM generation
75  *
76  * Manfred Weihs <weihs@ict.tuwien.ac.at>
77  *  . Reworked code for initiating bus resets
78  *    (long, short, with or without hold-off)
79  *
80  * Nandu Santhi <contactnandu@users.sourceforge.net>
81  *  . Added support for nVidia nForce2 onboard Firewire chipset
82  *
83  */
84
85 #include <linux/config.h>
86 #include <linux/kernel.h>
87 #include <linux/list.h>
88 #include <linux/slab.h>
89 #include <linux/interrupt.h>
90 #include <linux/wait.h>
91 #include <linux/errno.h>
92 #include <linux/module.h>
93 #include <linux/moduleparam.h>
94 #include <linux/pci.h>
95 #include <linux/fs.h>
96 #include <linux/poll.h>
97 #include <asm/byteorder.h>
98 #include <asm/atomic.h>
99 #include <asm/uaccess.h>
100 #include <linux/delay.h>
101 #include <linux/spinlock.h>
102
103 #include <asm/pgtable.h>
104 #include <asm/page.h>
105 #include <asm/irq.h>
106 #include <linux/sched.h>
107 #include <linux/types.h>
108 #include <linux/vmalloc.h>
109 #include <linux/init.h>
110
111 #ifdef CONFIG_PPC_PMAC
112 #include <asm/machdep.h>
113 #include <asm/pmac_feature.h>
114 #include <asm/prom.h>
115 #include <asm/pci-bridge.h>
116 #endif
117
118 #include "csr1212.h"
119 #include "ieee1394.h"
120 #include "ieee1394_types.h"
121 #include "hosts.h"
122 #include "dma.h"
123 #include "iso.h"
124 #include "ieee1394_core.h"
125 #include "highlevel.h"
126 #include "ohci1394.h"
127
128 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
129 #define OHCI1394_DEBUG
130 #endif
131
132 #ifdef DBGMSG
133 #undef DBGMSG
134 #endif
135
136 #ifdef OHCI1394_DEBUG
137 #define DBGMSG(fmt, args...) \
138 printk(KERN_INFO "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
139 #else
140 #define DBGMSG(fmt, args...)
141 #endif
142
143 #ifdef CONFIG_IEEE1394_OHCI_DMA_DEBUG
144 #define OHCI_DMA_ALLOC(fmt, args...) \
145         HPSB_ERR("%s(%s)alloc(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
146                 ++global_outstanding_dmas, ## args)
147 #define OHCI_DMA_FREE(fmt, args...) \
148         HPSB_ERR("%s(%s)free(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
149                 --global_outstanding_dmas, ## args)
150 static int global_outstanding_dmas = 0;
151 #else
152 #define OHCI_DMA_ALLOC(fmt, args...)
153 #define OHCI_DMA_FREE(fmt, args...)
154 #endif
155
156 /* print general (card independent) information */
157 #define PRINT_G(level, fmt, args...) \
158 printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
159
160 /* print card specific information */
161 #define PRINT(level, fmt, args...) \
162 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
163
164 static char version[] __devinitdata =
165         "$Rev: 1250 $ Ben Collins <bcollins@debian.org>";
166
167 /* Module Parameters */
168 static int phys_dma = 1;
169 module_param(phys_dma, int, 0644);
170 MODULE_PARM_DESC(phys_dma, "Enable physical dma (default = 1).");
171
172 static void dma_trm_tasklet(unsigned long data);
173 static void dma_trm_reset(struct dma_trm_ctx *d);
174
175 static int alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
176                              enum context_type type, int ctx, int num_desc,
177                              int buf_size, int split_buf_size, int context_base);
178 static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d);
179 static void free_dma_rcv_ctx(struct dma_rcv_ctx *d);
180
181 static int alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
182                              enum context_type type, int ctx, int num_desc,
183                              int context_base);
184
185 static void ohci1394_pci_remove(struct pci_dev *pdev);
186
187 #ifndef __LITTLE_ENDIAN
188 static unsigned hdr_sizes[] =
189 {
190         3,      /* TCODE_WRITEQ */
191         4,      /* TCODE_WRITEB */
192         3,      /* TCODE_WRITE_RESPONSE */
193         0,      /* ??? */
194         3,      /* TCODE_READQ */
195         4,      /* TCODE_READB */
196         3,      /* TCODE_READQ_RESPONSE */
197         4,      /* TCODE_READB_RESPONSE */
198         1,      /* TCODE_CYCLE_START (???) */
199         4,      /* TCODE_LOCK_REQUEST */
200         2,      /* TCODE_ISO_DATA */
201         4,      /* TCODE_LOCK_RESPONSE */
202 };
203
204 /* Swap headers */
205 static inline void packet_swab(quadlet_t *data, int tcode)
206 {
207         size_t size = hdr_sizes[tcode];
208
209         if (tcode > TCODE_LOCK_RESPONSE || hdr_sizes[tcode] == 0)
210                 return;
211
212         while (size--)
213                 data[size] = swab32(data[size]);
214 }
215 #else
216 /* Don't waste cycles on same sex byte swaps */
217 #define packet_swab(w,x)
218 #endif /* !LITTLE_ENDIAN */
219
220 /***********************************
221  * IEEE-1394 functionality section *
222  ***********************************/
223
224 static u8 get_phy_reg(struct ti_ohci *ohci, u8 addr)
225 {
226         int i;
227         unsigned long flags;
228         quadlet_t r;
229
230         spin_lock_irqsave (&ohci->phy_reg_lock, flags);
231
232         reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000);
233
234         for (i = 0; i < OHCI_LOOP_COUNT; i++) {
235                 if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000)
236                         break;
237
238                 mdelay(1);
239         }
240
241         r = reg_read(ohci, OHCI1394_PhyControl);
242
243         if (i >= OHCI_LOOP_COUNT)
244                 PRINT (KERN_ERR, "Get PHY Reg timeout [0x%08x/0x%08x/%d]",
245                        r, r & 0x80000000, i);
246
247         spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
248
249         return (r & 0x00ff0000) >> 16;
250 }
251
252 static void set_phy_reg(struct ti_ohci *ohci, u8 addr, u8 data)
253 {
254         int i;
255         unsigned long flags;
256         u32 r = 0;
257
258         spin_lock_irqsave (&ohci->phy_reg_lock, flags);
259
260         reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000);
261
262         for (i = 0; i < OHCI_LOOP_COUNT; i++) {
263                 r = reg_read(ohci, OHCI1394_PhyControl);
264                 if (!(r & 0x00004000))
265                         break;
266
267                 mdelay(1);
268         }
269
270         if (i == OHCI_LOOP_COUNT)
271                 PRINT (KERN_ERR, "Set PHY Reg timeout [0x%08x/0x%08x/%d]",
272                        r, r & 0x00004000, i);
273
274         spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
275
276         return;
277 }
278
279 /* Or's our value into the current value */
280 static void set_phy_reg_mask(struct ti_ohci *ohci, u8 addr, u8 data)
281 {
282         u8 old;
283
284         old = get_phy_reg (ohci, addr);
285         old |= data;
286         set_phy_reg (ohci, addr, old);
287
288         return;
289 }
290
291 static void handle_selfid(struct ti_ohci *ohci, struct hpsb_host *host,
292                                 int phyid, int isroot)
293 {
294         quadlet_t *q = ohci->selfid_buf_cpu;
295         quadlet_t self_id_count=reg_read(ohci, OHCI1394_SelfIDCount);
296         size_t size;
297         quadlet_t q0, q1;
298
299         /* Check status of self-id reception */
300
301         if (ohci->selfid_swap)
302                 q0 = le32_to_cpu(q[0]);
303         else
304                 q0 = q[0];
305
306         if ((self_id_count & 0x80000000) ||
307             ((self_id_count & 0x00FF0000) != (q0 & 0x00FF0000))) {
308                 PRINT(KERN_ERR,
309                       "Error in reception of SelfID packets [0x%08x/0x%08x] (count: %d)",
310                       self_id_count, q0, ohci->self_id_errors);
311
312                 /* Tip by James Goodwin <jamesg@Filanet.com>:
313                  * We had an error, generate another bus reset in response.  */
314                 if (ohci->self_id_errors<OHCI1394_MAX_SELF_ID_ERRORS) {
315                         set_phy_reg_mask (ohci, 1, 0x40);
316                         ohci->self_id_errors++;
317                 } else {
318                         PRINT(KERN_ERR,
319                               "Too many errors on SelfID error reception, giving up!");
320                 }
321                 return;
322         }
323
324         /* SelfID Ok, reset error counter. */
325         ohci->self_id_errors = 0;
326
327         size = ((self_id_count & 0x00001FFC) >> 2) - 1;
328         q++;
329
330         while (size > 0) {
331                 if (ohci->selfid_swap) {
332                         q0 = le32_to_cpu(q[0]);
333                         q1 = le32_to_cpu(q[1]);
334                 } else {
335                         q0 = q[0];
336                         q1 = q[1];
337                 }
338
339                 if (q0 == ~q1) {
340                         DBGMSG ("SelfID packet 0x%x received", q0);
341                         hpsb_selfid_received(host, cpu_to_be32(q0));
342                         if (((q0 & 0x3f000000) >> 24) == phyid)
343                                 DBGMSG ("SelfID for this node is 0x%08x", q0);
344                 } else {
345                         PRINT(KERN_ERR,
346                               "SelfID is inconsistent [0x%08x/0x%08x]", q0, q1);
347                 }
348                 q += 2;
349                 size -= 2;
350         }
351
352         DBGMSG("SelfID complete");
353
354         return;
355 }
356
357 static void ohci_soft_reset(struct ti_ohci *ohci) {
358         int i;
359
360         reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
361
362         for (i = 0; i < OHCI_LOOP_COUNT; i++) {
363                 if (!(reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_softReset))
364                         break;
365                 mdelay(1);
366         }
367         DBGMSG ("Soft reset finished");
368 }
369
370
371 /* Generate the dma receive prgs and start the context */
372 static void initialize_dma_rcv_ctx(struct dma_rcv_ctx *d, int generate_irq)
373 {
374         struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
375         int i;
376
377         ohci1394_stop_context(ohci, d->ctrlClear, NULL);
378
379         for (i=0; i<d->num_desc; i++) {
380                 u32 c;
381
382                 c = DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE | DMA_CTL_BRANCH;
383                 if (generate_irq)
384                         c |= DMA_CTL_IRQ;
385
386                 d->prg_cpu[i]->control = cpu_to_le32(c | d->buf_size);
387
388                 /* End of descriptor list? */
389                 if (i + 1 < d->num_desc) {
390                         d->prg_cpu[i]->branchAddress =
391                                 cpu_to_le32((d->prg_bus[i+1] & 0xfffffff0) | 0x1);
392                 } else {
393                         d->prg_cpu[i]->branchAddress =
394                                 cpu_to_le32((d->prg_bus[0] & 0xfffffff0));
395                 }
396
397                 d->prg_cpu[i]->address = cpu_to_le32(d->buf_bus[i]);
398                 d->prg_cpu[i]->status = cpu_to_le32(d->buf_size);
399         }
400
401         d->buf_ind = 0;
402         d->buf_offset = 0;
403
404         if (d->type == DMA_CTX_ISO) {
405                 /* Clear contextControl */
406                 reg_write(ohci, d->ctrlClear, 0xffffffff);
407
408                 /* Set bufferFill, isochHeader, multichannel for IR context */
409                 reg_write(ohci, d->ctrlSet, 0xd0000000);
410
411                 /* Set the context match register to match on all tags */
412                 reg_write(ohci, d->ctxtMatch, 0xf0000000);
413
414                 /* Clear the multi channel mask high and low registers */
415                 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, 0xffffffff);
416                 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, 0xffffffff);
417
418                 /* Set up isoRecvIntMask to generate interrupts */
419                 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << d->ctx);
420         }
421
422         /* Tell the controller where the first AR program is */
423         reg_write(ohci, d->cmdPtr, d->prg_bus[0] | 0x1);
424
425         /* Run context */
426         reg_write(ohci, d->ctrlSet, 0x00008000);
427
428         DBGMSG("Receive DMA ctx=%d initialized", d->ctx);
429 }
430
431 /* Initialize the dma transmit context */
432 static void initialize_dma_trm_ctx(struct dma_trm_ctx *d)
433 {
434         struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
435
436         /* Stop the context */
437         ohci1394_stop_context(ohci, d->ctrlClear, NULL);
438
439         d->prg_ind = 0;
440         d->sent_ind = 0;
441         d->free_prgs = d->num_desc;
442         d->branchAddrPtr = NULL;
443         INIT_LIST_HEAD(&d->fifo_list);
444         INIT_LIST_HEAD(&d->pending_list);
445
446         if (d->type == DMA_CTX_ISO) {
447                 /* enable interrupts */
448                 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << d->ctx);
449         }
450
451         DBGMSG("Transmit DMA ctx=%d initialized", d->ctx);
452 }
453
454 /* Count the number of available iso contexts */
455 static int get_nb_iso_ctx(struct ti_ohci *ohci, int reg)
456 {
457         int i,ctx=0;
458         u32 tmp;
459
460         reg_write(ohci, reg, 0xffffffff);
461         tmp = reg_read(ohci, reg);
462
463         DBGMSG("Iso contexts reg: %08x implemented: %08x", reg, tmp);
464
465         /* Count the number of contexts */
466         for (i=0; i<32; i++) {
467                 if (tmp & 1) ctx++;
468                 tmp >>= 1;
469         }
470         return ctx;
471 }
472
473 /* Global initialization */
474 static void ohci_initialize(struct ti_ohci *ohci)
475 {
476         char irq_buf[16];
477         quadlet_t buf;
478         int num_ports, i;
479
480         spin_lock_init(&ohci->phy_reg_lock);
481
482         /* Put some defaults to these undefined bus options */
483         buf = reg_read(ohci, OHCI1394_BusOptions);
484         buf |=  0x60000000; /* Enable CMC and ISC */
485         if (!hpsb_disable_irm)
486                 buf |=  0x80000000; /* Enable IRMC */
487         buf &= ~0x00ff0000; /* XXX: Set cyc_clk_acc to zero for now */
488         buf &= ~0x18000000; /* Disable PMC and BMC */
489         reg_write(ohci, OHCI1394_BusOptions, buf);
490
491         /* Set the bus number */
492         reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0);
493
494         /* Enable posted writes */
495         reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_postedWriteEnable);
496
497         /* Clear link control register */
498         reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
499
500         /* Enable cycle timer and cycle master and set the IRM
501          * contender bit in our self ID packets if appropriate. */
502         reg_write(ohci, OHCI1394_LinkControlSet,
503                   OHCI1394_LinkControl_CycleTimerEnable |
504                   OHCI1394_LinkControl_CycleMaster);
505         set_phy_reg_mask(ohci, 4, PHY_04_LCTRL |
506                          (hpsb_disable_irm ? 0 : PHY_04_CONTENDER));
507
508         /* Set up self-id dma buffer */
509         reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->selfid_buf_bus);
510
511         /* enable self-id and phys */
512         reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_RcvSelfID |
513                   OHCI1394_LinkControl_RcvPhyPkt);
514
515         /* Set the Config ROM mapping register */
516         reg_write(ohci, OHCI1394_ConfigROMmap, ohci->csr_config_rom_bus);
517
518         /* Now get our max packet size */
519         ohci->max_packet_size =
520                 1<<(((reg_read(ohci, OHCI1394_BusOptions)>>12)&0xf)+1);
521                 
522         /* Don't accept phy packets into AR request context */
523         reg_write(ohci, OHCI1394_LinkControlClear, 0x00000400);
524
525         /* Clear the interrupt mask */
526         reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
527         reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
528
529         /* Clear the interrupt mask */
530         reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
531         reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
532
533         /* Initialize AR dma */
534         initialize_dma_rcv_ctx(&ohci->ar_req_context, 0);
535         initialize_dma_rcv_ctx(&ohci->ar_resp_context, 0);
536
537         /* Initialize AT dma */
538         initialize_dma_trm_ctx(&ohci->at_req_context);
539         initialize_dma_trm_ctx(&ohci->at_resp_context);
540         
541         /* Initialize IR Legacy DMA channel mask */
542         ohci->ir_legacy_channels = 0;
543
544         /*
545          * Accept AT requests from all nodes. This probably
546          * will have to be controlled from the subsystem
547          * on a per node basis.
548          */
549         reg_write(ohci,OHCI1394_AsReqFilterHiSet, 0x80000000);
550
551         /* Specify AT retries */
552         reg_write(ohci, OHCI1394_ATRetries,
553                   OHCI1394_MAX_AT_REQ_RETRIES |
554                   (OHCI1394_MAX_AT_RESP_RETRIES<<4) |
555                   (OHCI1394_MAX_PHYS_RESP_RETRIES<<8));
556
557         /* We don't want hardware swapping */
558         reg_write(ohci, OHCI1394_HCControlClear, OHCI1394_HCControl_noByteSwap);
559
560         /* Enable interrupts */
561         reg_write(ohci, OHCI1394_IntMaskSet,
562                   OHCI1394_unrecoverableError |
563                   OHCI1394_masterIntEnable |
564                   OHCI1394_busReset |
565                   OHCI1394_selfIDComplete |
566                   OHCI1394_RSPkt |
567                   OHCI1394_RQPkt |
568                   OHCI1394_respTxComplete |
569                   OHCI1394_reqTxComplete |
570                   OHCI1394_isochRx |
571                   OHCI1394_isochTx |
572                   OHCI1394_cycleInconsistent);
573
574         /* Enable link */
575         reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable);
576
577         buf = reg_read(ohci, OHCI1394_Version);
578 #ifndef __sparc__
579         sprintf (irq_buf, "%d", ohci->dev->irq);
580 #else
581         sprintf (irq_buf, "%s", __irq_itoa(ohci->dev->irq));
582 #endif
583         PRINT(KERN_INFO, "OHCI-1394 %d.%d (PCI): IRQ=[%s]  "
584               "MMIO=[%lx-%lx]  Max Packet=[%d]",
585               ((((buf) >> 16) & 0xf) + (((buf) >> 20) & 0xf) * 10),
586               ((((buf) >> 4) & 0xf) + ((buf) & 0xf) * 10), irq_buf,
587               pci_resource_start(ohci->dev, 0),
588               pci_resource_start(ohci->dev, 0) + OHCI1394_REGISTER_SIZE - 1,
589               ohci->max_packet_size);
590
591         /* Check all of our ports to make sure that if anything is
592          * connected, we enable that port. */
593         num_ports = get_phy_reg(ohci, 2) & 0xf;
594         for (i = 0; i < num_ports; i++) {
595                 unsigned int status;
596
597                 set_phy_reg(ohci, 7, i);
598                 status = get_phy_reg(ohci, 8);
599
600                 if (status & 0x20)
601                         set_phy_reg(ohci, 8, status & ~1);
602         }
603
604         /* Serial EEPROM Sanity check. */
605         if ((ohci->max_packet_size < 512) ||
606             (ohci->max_packet_size > 4096)) {
607                 /* Serial EEPROM contents are suspect, set a sane max packet
608                  * size and print the raw contents for bug reports if verbose
609                  * debug is enabled. */
610 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
611                 int i;
612 #endif
613
614                 PRINT(KERN_DEBUG, "Serial EEPROM has suspicious values, "
615                       "attempting to setting max_packet_size to 512 bytes");
616                 reg_write(ohci, OHCI1394_BusOptions,
617                           (reg_read(ohci, OHCI1394_BusOptions) & 0xf007) | 0x8002);
618                 ohci->max_packet_size = 512;
619 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
620                 PRINT(KERN_DEBUG, "    EEPROM Present: %d",
621                       (reg_read(ohci, OHCI1394_Version) >> 24) & 0x1);
622                 reg_write(ohci, OHCI1394_GUID_ROM, 0x80000000);
623
624                 for (i = 0;
625                      ((i < 1000) &&
626                       (reg_read(ohci, OHCI1394_GUID_ROM) & 0x80000000)); i++)
627                         udelay(10);
628
629                 for (i = 0; i < 0x20; i++) {
630                         reg_write(ohci, OHCI1394_GUID_ROM, 0x02000000);
631                         PRINT(KERN_DEBUG, "    EEPROM %02x: %02x", i,
632                               (reg_read(ohci, OHCI1394_GUID_ROM) >> 16) & 0xff);
633                 }
634 #endif
635         }
636 }
637
638 /*
639  * Insert a packet in the DMA fifo and generate the DMA prg
640  * FIXME: rewrite the program in order to accept packets crossing
641  *        page boundaries.
642  *        check also that a single dma descriptor doesn't cross a
643  *        page boundary.
644  */
645 static void insert_packet(struct ti_ohci *ohci,
646                           struct dma_trm_ctx *d, struct hpsb_packet *packet)
647 {
648         u32 cycleTimer;
649         int idx = d->prg_ind;
650
651         DBGMSG("Inserting packet for node " NODE_BUS_FMT
652                ", tlabel=%d, tcode=0x%x, speed=%d",
653                NODE_BUS_ARGS(ohci->host, packet->node_id), packet->tlabel,
654                packet->tcode, packet->speed_code);
655
656         d->prg_cpu[idx]->begin.address = 0;
657         d->prg_cpu[idx]->begin.branchAddress = 0;
658
659         if (d->type == DMA_CTX_ASYNC_RESP) {
660                 /*
661                  * For response packets, we need to put a timeout value in
662                  * the 16 lower bits of the status... let's try 1 sec timeout
663                  */
664                 cycleTimer = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
665                 d->prg_cpu[idx]->begin.status = cpu_to_le32(
666                         (((((cycleTimer>>25)&0x7)+1)&0x7)<<13) |
667                         ((cycleTimer&0x01fff000)>>12));
668
669                 DBGMSG("cycleTimer: %08x timeStamp: %08x",
670                        cycleTimer, d->prg_cpu[idx]->begin.status);
671         } else 
672                 d->prg_cpu[idx]->begin.status = 0;
673
674         if ( (packet->type == hpsb_async) || (packet->type == hpsb_raw) ) {
675
676                 if (packet->type == hpsb_raw) {
677                         d->prg_cpu[idx]->data[0] = cpu_to_le32(OHCI1394_TCODE_PHY<<4);
678                         d->prg_cpu[idx]->data[1] = cpu_to_le32(packet->header[0]);
679                         d->prg_cpu[idx]->data[2] = cpu_to_le32(packet->header[1]);
680                 } else {
681                         d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
682                                 (packet->header[0] & 0xFFFF);
683
684                         if (packet->tcode == TCODE_ISO_DATA) {
685                                 /* Sending an async stream packet */
686                                 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
687                         } else {
688                                 /* Sending a normal async request or response */
689                                 d->prg_cpu[idx]->data[1] =
690                                         (packet->header[1] & 0xFFFF) |
691                                         (packet->header[0] & 0xFFFF0000);
692                                 d->prg_cpu[idx]->data[2] = packet->header[2];
693                                 d->prg_cpu[idx]->data[3] = packet->header[3];
694                         }
695                         packet_swab(d->prg_cpu[idx]->data, packet->tcode);
696                 }
697
698                 if (packet->data_size) { /* block transmit */
699                         if (packet->tcode == TCODE_STREAM_DATA){
700                                 d->prg_cpu[idx]->begin.control =
701                                         cpu_to_le32(DMA_CTL_OUTPUT_MORE |
702                                                     DMA_CTL_IMMEDIATE | 0x8);
703                         } else {
704                                 d->prg_cpu[idx]->begin.control =
705                                         cpu_to_le32(DMA_CTL_OUTPUT_MORE |
706                                                     DMA_CTL_IMMEDIATE | 0x10);
707                         }
708                         d->prg_cpu[idx]->end.control =
709                                 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
710                                             DMA_CTL_IRQ |
711                                             DMA_CTL_BRANCH |
712                                             packet->data_size);
713                         /*
714                          * Check that the packet data buffer
715                          * does not cross a page boundary.
716                          *
717                          * XXX Fix this some day. eth1394 seems to trigger
718                          * it, but ignoring it doesn't seem to cause a
719                          * problem.
720                          */
721 #if 0
722                         if (cross_bound((unsigned long)packet->data,
723                                         packet->data_size)>0) {
724                                 /* FIXME: do something about it */
725                                 PRINT(KERN_ERR,
726                                       "%s: packet data addr: %p size %Zd bytes "
727                                       "cross page boundary", __FUNCTION__,
728                                       packet->data, packet->data_size);
729                         }
730 #endif
731                         d->prg_cpu[idx]->end.address = cpu_to_le32(
732                                 pci_map_single(ohci->dev, packet->data,
733                                                packet->data_size,
734                                                PCI_DMA_TODEVICE));
735                         OHCI_DMA_ALLOC("single, block transmit packet");
736
737                         d->prg_cpu[idx]->end.branchAddress = 0;
738                         d->prg_cpu[idx]->end.status = 0;
739                         if (d->branchAddrPtr)
740                                 *(d->branchAddrPtr) =
741                                         cpu_to_le32(d->prg_bus[idx] | 0x3);
742                         d->branchAddrPtr =
743                                 &(d->prg_cpu[idx]->end.branchAddress);
744                 } else { /* quadlet transmit */
745                         if (packet->type == hpsb_raw)
746                                 d->prg_cpu[idx]->begin.control =
747                                         cpu_to_le32(DMA_CTL_OUTPUT_LAST |
748                                                     DMA_CTL_IMMEDIATE |
749                                                     DMA_CTL_IRQ |
750                                                     DMA_CTL_BRANCH |
751                                                     (packet->header_size + 4));
752                         else
753                                 d->prg_cpu[idx]->begin.control =
754                                         cpu_to_le32(DMA_CTL_OUTPUT_LAST |
755                                                     DMA_CTL_IMMEDIATE |
756                                                     DMA_CTL_IRQ |
757                                                     DMA_CTL_BRANCH |
758                                                     packet->header_size);
759
760                         if (d->branchAddrPtr)
761                                 *(d->branchAddrPtr) =
762                                         cpu_to_le32(d->prg_bus[idx] | 0x2);
763                         d->branchAddrPtr =
764                                 &(d->prg_cpu[idx]->begin.branchAddress);
765                 }
766
767         } else { /* iso packet */
768                 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
769                         (packet->header[0] & 0xFFFF);
770                 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
771                 packet_swab(d->prg_cpu[idx]->data, packet->tcode);
772
773                 d->prg_cpu[idx]->begin.control =
774                         cpu_to_le32(DMA_CTL_OUTPUT_MORE |
775                                     DMA_CTL_IMMEDIATE | 0x8);
776                 d->prg_cpu[idx]->end.control =
777                         cpu_to_le32(DMA_CTL_OUTPUT_LAST |
778                                     DMA_CTL_UPDATE |
779                                     DMA_CTL_IRQ |
780                                     DMA_CTL_BRANCH |
781                                     packet->data_size);
782                 d->prg_cpu[idx]->end.address = cpu_to_le32(
783                                 pci_map_single(ohci->dev, packet->data,
784                                 packet->data_size, PCI_DMA_TODEVICE));
785                 OHCI_DMA_ALLOC("single, iso transmit packet");
786
787                 d->prg_cpu[idx]->end.branchAddress = 0;
788                 d->prg_cpu[idx]->end.status = 0;
789                 DBGMSG("Iso xmit context info: header[%08x %08x]\n"
790                        "                       begin=%08x %08x %08x %08x\n"
791                        "                             %08x %08x %08x %08x\n"
792                        "                       end  =%08x %08x %08x %08x",
793                        d->prg_cpu[idx]->data[0], d->prg_cpu[idx]->data[1],
794                        d->prg_cpu[idx]->begin.control,
795                        d->prg_cpu[idx]->begin.address,
796                        d->prg_cpu[idx]->begin.branchAddress,
797                        d->prg_cpu[idx]->begin.status,
798                        d->prg_cpu[idx]->data[0],
799                        d->prg_cpu[idx]->data[1],
800                        d->prg_cpu[idx]->data[2],
801                        d->prg_cpu[idx]->data[3],
802                        d->prg_cpu[idx]->end.control,
803                        d->prg_cpu[idx]->end.address,
804                        d->prg_cpu[idx]->end.branchAddress,
805                        d->prg_cpu[idx]->end.status);
806                 if (d->branchAddrPtr)
807                         *(d->branchAddrPtr) = cpu_to_le32(d->prg_bus[idx] | 0x3);
808                 d->branchAddrPtr = &(d->prg_cpu[idx]->end.branchAddress);
809         }
810         d->free_prgs--;
811
812         /* queue the packet in the appropriate context queue */
813         list_add_tail(&packet->driver_list, &d->fifo_list);
814         d->prg_ind = (d->prg_ind + 1) % d->num_desc;
815 }
816
817 /*
818  * This function fills the FIFO with the (eventual) pending packets
819  * and runs or wakes up the DMA prg if necessary.
820  *
821  * The function MUST be called with the d->lock held.
822  */
823 static void dma_trm_flush(struct ti_ohci *ohci, struct dma_trm_ctx *d)
824 {
825         struct hpsb_packet *packet, *ptmp;
826         int idx = d->prg_ind;
827         int z = 0;
828
829         /* insert the packets into the dma fifo */
830         list_for_each_entry_safe(packet, ptmp, &d->pending_list, driver_list) {
831                 if (!d->free_prgs)
832                         break;
833
834                 /* For the first packet only */
835                 if (!z)
836                         z = (packet->data_size) ? 3 : 2;
837
838                 /* Insert the packet */
839                 list_del_init(&packet->driver_list);
840                 insert_packet(ohci, d, packet);
841         }
842
843         /* Nothing must have been done, either no free_prgs or no packets */
844         if (z == 0)
845                 return;
846
847         /* Is the context running ? (should be unless it is
848            the first packet to be sent in this context) */
849         if (!(reg_read(ohci, d->ctrlSet) & 0x8000)) {
850                 u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
851
852                 DBGMSG("Starting transmit DMA ctx=%d",d->ctx);
853                 reg_write(ohci, d->cmdPtr, d->prg_bus[idx] | z);
854
855                 /* Check that the node id is valid, and not 63 */
856                 if (!(nodeId & 0x80000000) || (nodeId & 0x3f) == 63)
857                         PRINT(KERN_ERR, "Running dma failed because Node ID is not valid");
858                 else
859                         reg_write(ohci, d->ctrlSet, 0x8000);
860         } else {
861                 /* Wake up the dma context if necessary */
862                 if (!(reg_read(ohci, d->ctrlSet) & 0x400))
863                         DBGMSG("Waking transmit DMA ctx=%d",d->ctx);
864
865                 /* do this always, to avoid race condition */
866                 reg_write(ohci, d->ctrlSet, 0x1000);
867         }
868
869         return;
870 }
871
872 /* Transmission of an async or iso packet */
873 static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
874 {
875         struct ti_ohci *ohci = host->hostdata;
876         struct dma_trm_ctx *d;
877         unsigned long flags;
878
879         if (packet->data_size > ohci->max_packet_size) {
880                 PRINT(KERN_ERR,
881                       "Transmit packet size %Zd is too big",
882                       packet->data_size);
883                 return -EOVERFLOW;
884         }
885
886         /* Decide whether we have an iso, a request, or a response packet */
887         if (packet->type == hpsb_raw)
888                 d = &ohci->at_req_context;
889         else if ((packet->tcode == TCODE_ISO_DATA) && (packet->type == hpsb_iso)) {
890                 /* The legacy IT DMA context is initialized on first
891                  * use.  However, the alloc cannot be run from
892                  * interrupt context, so we bail out if that is the
893                  * case. I don't see anyone sending ISO packets from
894                  * interrupt context anyway... */
895
896                 if (ohci->it_legacy_context.ohci == NULL) {
897                         if (in_interrupt()) {
898                                 PRINT(KERN_ERR,
899                                       "legacy IT context cannot be initialized during interrupt");
900                                 return -EINVAL;
901                         }
902
903                         if (alloc_dma_trm_ctx(ohci, &ohci->it_legacy_context,
904                                               DMA_CTX_ISO, 0, IT_NUM_DESC,
905                                               OHCI1394_IsoXmitContextBase) < 0) {
906                                 PRINT(KERN_ERR,
907                                       "error initializing legacy IT context");
908                                 return -ENOMEM;
909                         }
910
911                         initialize_dma_trm_ctx(&ohci->it_legacy_context);
912                 }
913
914                 d = &ohci->it_legacy_context;
915         } else if ((packet->tcode & 0x02) && (packet->tcode != TCODE_ISO_DATA))
916                 d = &ohci->at_resp_context;
917         else
918                 d = &ohci->at_req_context;
919
920         spin_lock_irqsave(&d->lock,flags);
921
922         list_add_tail(&packet->driver_list, &d->pending_list);
923
924         dma_trm_flush(ohci, d);
925
926         spin_unlock_irqrestore(&d->lock,flags);
927
928         return 0;
929 }
930
931 static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
932 {
933         struct ti_ohci *ohci = host->hostdata;
934         int retval = 0;
935         unsigned long flags;
936         int phy_reg;
937
938         switch (cmd) {
939         case RESET_BUS:
940                 switch (arg) {
941                 case SHORT_RESET:
942                         phy_reg = get_phy_reg(ohci, 5);
943                         phy_reg |= 0x40;
944                         set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
945                         break;
946                 case LONG_RESET:
947                         phy_reg = get_phy_reg(ohci, 1);
948                         phy_reg |= 0x40;
949                         set_phy_reg(ohci, 1, phy_reg); /* set IBR */
950                         break;
951                 case SHORT_RESET_NO_FORCE_ROOT:
952                         phy_reg = get_phy_reg(ohci, 1);
953                         if (phy_reg & 0x80) {
954                                 phy_reg &= ~0x80;
955                                 set_phy_reg(ohci, 1, phy_reg); /* clear RHB */
956                         }
957
958                         phy_reg = get_phy_reg(ohci, 5);
959                         phy_reg |= 0x40;
960                         set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
961                         break;
962                 case LONG_RESET_NO_FORCE_ROOT:
963                         phy_reg = get_phy_reg(ohci, 1);
964                         phy_reg &= ~0x80;
965                         phy_reg |= 0x40;
966                         set_phy_reg(ohci, 1, phy_reg); /* clear RHB, set IBR */
967                         break;
968                 case SHORT_RESET_FORCE_ROOT:
969                         phy_reg = get_phy_reg(ohci, 1);
970                         if (!(phy_reg & 0x80)) {
971                                 phy_reg |= 0x80;
972                                 set_phy_reg(ohci, 1, phy_reg); /* set RHB */
973                         }
974
975                         phy_reg = get_phy_reg(ohci, 5);
976                         phy_reg |= 0x40;
977                         set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
978                         break;
979                 case LONG_RESET_FORCE_ROOT:
980                         phy_reg = get_phy_reg(ohci, 1);
981                         phy_reg |= 0xc0;
982                         set_phy_reg(ohci, 1, phy_reg); /* set RHB and IBR */
983                         break;
984                 default:
985                         retval = -1;
986                 }
987                 break;
988
989         case GET_CYCLE_COUNTER:
990                 retval = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
991                 break;
992
993         case SET_CYCLE_COUNTER:
994                 reg_write(ohci, OHCI1394_IsochronousCycleTimer, arg);
995                 break;
996
997         case SET_BUS_ID:
998                 PRINT(KERN_ERR, "devctl command SET_BUS_ID err");
999                 break;
1000
1001         case ACT_CYCLE_MASTER:
1002                 if (arg) {
1003                         /* check if we are root and other nodes are present */
1004                         u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
1005                         if ((nodeId & (1<<30)) && (nodeId & 0x3f)) {
1006                                 /*
1007                                  * enable cycleTimer, cycleMaster
1008                                  */
1009                                 DBGMSG("Cycle master enabled");
1010                                 reg_write(ohci, OHCI1394_LinkControlSet,
1011                                           OHCI1394_LinkControl_CycleTimerEnable |
1012                                           OHCI1394_LinkControl_CycleMaster);
1013                         }
1014                 } else {
1015                         /* disable cycleTimer, cycleMaster, cycleSource */
1016                         reg_write(ohci, OHCI1394_LinkControlClear,
1017                                   OHCI1394_LinkControl_CycleTimerEnable |
1018                                   OHCI1394_LinkControl_CycleMaster |
1019                                   OHCI1394_LinkControl_CycleSource);
1020                 }
1021                 break;
1022
1023         case CANCEL_REQUESTS:
1024                 DBGMSG("Cancel request received");
1025                 dma_trm_reset(&ohci->at_req_context);
1026                 dma_trm_reset(&ohci->at_resp_context);
1027                 break;
1028
1029         case ISO_LISTEN_CHANNEL:
1030         {
1031                 u64 mask;
1032                 struct dma_rcv_ctx *d = &ohci->ir_legacy_context;
1033                 int ir_legacy_active;
1034
1035                 if (arg<0 || arg>63) {
1036                         PRINT(KERN_ERR,
1037                               "%s: IS0 listen channel %d is out of range",
1038                               __FUNCTION__, arg);
1039                         return -EFAULT;
1040                 }
1041
1042                 mask = (u64)0x1<<arg;
1043
1044                 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1045
1046                 if (ohci->ISO_channel_usage & mask) {
1047                         PRINT(KERN_ERR,
1048                               "%s: IS0 listen channel %d is already used",
1049                               __FUNCTION__, arg);
1050                         spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1051                         return -EFAULT;
1052                 }
1053
1054                 ir_legacy_active = ohci->ir_legacy_channels;
1055
1056                 ohci->ISO_channel_usage |= mask;
1057                 ohci->ir_legacy_channels |= mask;
1058
1059                 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1060
1061                 if (!ir_legacy_active) {
1062                         if (ohci1394_register_iso_tasklet(ohci,
1063                                           &ohci->ir_legacy_tasklet) < 0) {
1064                                 PRINT(KERN_ERR, "No IR DMA context available");
1065                                 return -EBUSY;
1066                         }
1067
1068                         /* the IR context can be assigned to any DMA context
1069                          * by ohci1394_register_iso_tasklet */
1070                         d->ctx = ohci->ir_legacy_tasklet.context;
1071                         d->ctrlSet = OHCI1394_IsoRcvContextControlSet +
1072                                 32*d->ctx;
1073                         d->ctrlClear = OHCI1394_IsoRcvContextControlClear +
1074                                 32*d->ctx;
1075                         d->cmdPtr = OHCI1394_IsoRcvCommandPtr + 32*d->ctx;
1076                         d->ctxtMatch = OHCI1394_IsoRcvContextMatch + 32*d->ctx;
1077
1078                         initialize_dma_rcv_ctx(&ohci->ir_legacy_context, 1);
1079
1080                         PRINT(KERN_ERR, "IR legacy activated");
1081                 }
1082
1083                 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1084
1085                 if (arg>31)
1086                         reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet,
1087                                   1<<(arg-32));
1088                 else
1089                         reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet,
1090                                   1<<arg);
1091
1092                 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1093                 DBGMSG("Listening enabled on channel %d", arg);
1094                 break;
1095         }
1096         case ISO_UNLISTEN_CHANNEL:
1097         {
1098                 u64 mask;
1099
1100                 if (arg<0 || arg>63) {
1101                         PRINT(KERN_ERR,
1102                               "%s: IS0 unlisten channel %d is out of range",
1103                               __FUNCTION__, arg);
1104                         return -EFAULT;
1105                 }
1106
1107                 mask = (u64)0x1<<arg;
1108
1109                 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1110
1111                 if (!(ohci->ISO_channel_usage & mask)) {
1112                         PRINT(KERN_ERR,
1113                               "%s: IS0 unlisten channel %d is not used",
1114                               __FUNCTION__, arg);
1115                         spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1116                         return -EFAULT;
1117                 }
1118
1119                 ohci->ISO_channel_usage &= ~mask;
1120                 ohci->ir_legacy_channels &= ~mask;
1121
1122                 if (arg>31)
1123                         reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear,
1124                                   1<<(arg-32));
1125                 else
1126                         reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear,
1127                                   1<<arg);
1128
1129                 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1130                 DBGMSG("Listening disabled on channel %d", arg);
1131
1132                 if (ohci->ir_legacy_channels == 0) {
1133                         stop_dma_rcv_ctx(&ohci->ir_legacy_context);
1134                         DBGMSG("ISO legacy receive context stopped");
1135                 }
1136
1137                 break;
1138         }
1139         default:
1140                 PRINT_G(KERN_ERR, "ohci_devctl cmd %d not implemented yet",
1141                         cmd);
1142                 break;
1143         }
1144         return retval;
1145 }
1146
1147 /***********************************
1148  * rawiso ISO reception            *
1149  ***********************************/
1150
1151 /*
1152   We use either buffer-fill or packet-per-buffer DMA mode. The DMA
1153   buffer is split into "blocks" (regions described by one DMA
1154   descriptor). Each block must be one page or less in size, and
1155   must not cross a page boundary.
1156
1157   There is one little wrinkle with buffer-fill mode: a packet that
1158   starts in the final block may wrap around into the first block. But
1159   the user API expects all packets to be contiguous. Our solution is
1160   to keep the very last page of the DMA buffer in reserve - if a
1161   packet spans the gap, we copy its tail into this page.
1162 */
1163
1164 struct ohci_iso_recv {
1165         struct ti_ohci *ohci;
1166
1167         struct ohci1394_iso_tasklet task;
1168         int task_active;
1169
1170         enum { BUFFER_FILL_MODE = 0,
1171                PACKET_PER_BUFFER_MODE = 1 } dma_mode;
1172
1173         /* memory and PCI mapping for the DMA descriptors */
1174         struct dma_prog_region prog;
1175         struct dma_cmd *block; /* = (struct dma_cmd*) prog.virt */
1176
1177         /* how many DMA blocks fit in the buffer */
1178         unsigned int nblocks;
1179
1180         /* stride of DMA blocks */
1181         unsigned int buf_stride;
1182
1183         /* number of blocks to batch between interrupts */
1184         int block_irq_interval;
1185
1186         /* block that DMA will finish next */
1187         int block_dma;
1188
1189         /* (buffer-fill only) block that the reader will release next */
1190         int block_reader;
1191
1192         /* (buffer-fill only) bytes of buffer the reader has released,
1193            less than one block */
1194         int released_bytes;
1195
1196         /* (buffer-fill only) buffer offset at which the next packet will appear */
1197         int dma_offset;
1198
1199         /* OHCI DMA context control registers */
1200         u32 ContextControlSet;
1201         u32 ContextControlClear;
1202         u32 CommandPtr;
1203         u32 ContextMatch;
1204 };
1205
1206 static void ohci_iso_recv_task(unsigned long data);
1207 static void ohci_iso_recv_stop(struct hpsb_iso *iso);
1208 static void ohci_iso_recv_shutdown(struct hpsb_iso *iso);
1209 static int  ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync);
1210 static void ohci_iso_recv_program(struct hpsb_iso *iso);
1211
1212 static int ohci_iso_recv_init(struct hpsb_iso *iso)
1213 {
1214         struct ti_ohci *ohci = iso->host->hostdata;
1215         struct ohci_iso_recv *recv;
1216         int ctx;
1217         int ret = -ENOMEM;
1218
1219         recv = kmalloc(sizeof(*recv), SLAB_KERNEL);
1220         if (!recv)
1221                 return -ENOMEM;
1222
1223         iso->hostdata = recv;
1224         recv->ohci = ohci;
1225         recv->task_active = 0;
1226         dma_prog_region_init(&recv->prog);
1227         recv->block = NULL;
1228
1229         /* use buffer-fill mode, unless irq_interval is 1
1230            (note: multichannel requires buffer-fill) */
1231
1232         if (((iso->irq_interval == 1 && iso->dma_mode == HPSB_ISO_DMA_OLD_ABI) ||
1233              iso->dma_mode == HPSB_ISO_DMA_PACKET_PER_BUFFER) && iso->channel != -1) {
1234                 recv->dma_mode = PACKET_PER_BUFFER_MODE;
1235         } else {
1236                 recv->dma_mode = BUFFER_FILL_MODE;
1237         }
1238
1239         /* set nblocks, buf_stride, block_irq_interval */
1240
1241         if (recv->dma_mode == BUFFER_FILL_MODE) {
1242                 recv->buf_stride = PAGE_SIZE;
1243
1244                 /* one block per page of data in the DMA buffer, minus the final guard page */
1245                 recv->nblocks = iso->buf_size/PAGE_SIZE - 1;
1246                 if (recv->nblocks < 3) {
1247                         DBGMSG("ohci_iso_recv_init: DMA buffer too small");
1248                         goto err;
1249                 }
1250
1251                 /* iso->irq_interval is in packets - translate that to blocks */
1252                 if (iso->irq_interval == 1)
1253                         recv->block_irq_interval = 1;
1254                 else
1255                         recv->block_irq_interval = iso->irq_interval *
1256                                                         ((recv->nblocks+1)/iso->buf_packets);
1257                 if (recv->block_irq_interval*4 > recv->nblocks)
1258                         recv->block_irq_interval = recv->nblocks/4;
1259                 if (recv->block_irq_interval < 1)
1260                         recv->block_irq_interval = 1;
1261
1262         } else {
1263                 int max_packet_size;
1264
1265                 recv->nblocks = iso->buf_packets;
1266                 recv->block_irq_interval = iso->irq_interval;
1267                 if (recv->block_irq_interval * 4 > iso->buf_packets)
1268                         recv->block_irq_interval = iso->buf_packets / 4;
1269                 if (recv->block_irq_interval < 1)
1270                 recv->block_irq_interval = 1;
1271
1272                 /* choose a buffer stride */
1273                 /* must be a power of 2, and <= PAGE_SIZE */
1274
1275                 max_packet_size = iso->buf_size / iso->buf_packets;
1276
1277                 for (recv->buf_stride = 8; recv->buf_stride < max_packet_size;
1278                     recv->buf_stride *= 2);
1279
1280                 if (recv->buf_stride*iso->buf_packets > iso->buf_size ||
1281                    recv->buf_stride > PAGE_SIZE) {
1282                         /* this shouldn't happen, but anyway... */
1283                         DBGMSG("ohci_iso_recv_init: problem choosing a buffer stride");
1284                         goto err;
1285                 }
1286         }
1287
1288         recv->block_reader = 0;
1289         recv->released_bytes = 0;
1290         recv->block_dma = 0;
1291         recv->dma_offset = 0;
1292
1293         /* size of DMA program = one descriptor per block */
1294         if (dma_prog_region_alloc(&recv->prog,
1295                                  sizeof(struct dma_cmd) * recv->nblocks,
1296                                  recv->ohci->dev))
1297                 goto err;
1298
1299         recv->block = (struct dma_cmd*) recv->prog.kvirt;
1300
1301         ohci1394_init_iso_tasklet(&recv->task,
1302                                   iso->channel == -1 ? OHCI_ISO_MULTICHANNEL_RECEIVE :
1303                                                        OHCI_ISO_RECEIVE,
1304                                   ohci_iso_recv_task, (unsigned long) iso);
1305
1306         if (ohci1394_register_iso_tasklet(recv->ohci, &recv->task) < 0) {
1307                 ret = -EBUSY;
1308                 goto err;
1309         }
1310
1311         recv->task_active = 1;
1312
1313         /* recv context registers are spaced 32 bytes apart */
1314         ctx = recv->task.context;
1315         recv->ContextControlSet = OHCI1394_IsoRcvContextControlSet + 32 * ctx;
1316         recv->ContextControlClear = OHCI1394_IsoRcvContextControlClear + 32 * ctx;
1317         recv->CommandPtr = OHCI1394_IsoRcvCommandPtr + 32 * ctx;
1318         recv->ContextMatch = OHCI1394_IsoRcvContextMatch + 32 * ctx;
1319
1320         if (iso->channel == -1) {
1321                 /* clear multi-channel selection mask */
1322                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, 0xFFFFFFFF);
1323                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, 0xFFFFFFFF);
1324         }
1325
1326         /* write the DMA program */
1327         ohci_iso_recv_program(iso);
1328
1329         DBGMSG("ohci_iso_recv_init: %s mode, DMA buffer is %lu pages"
1330                " (%u bytes), using %u blocks, buf_stride %u, block_irq_interval %d",
1331                recv->dma_mode == BUFFER_FILL_MODE ?
1332                "buffer-fill" : "packet-per-buffer",
1333                iso->buf_size/PAGE_SIZE, iso->buf_size,
1334                recv->nblocks, recv->buf_stride, recv->block_irq_interval);
1335
1336         return 0;
1337
1338 err:
1339         ohci_iso_recv_shutdown(iso);
1340         return ret;
1341 }
1342
1343 static void ohci_iso_recv_stop(struct hpsb_iso *iso)
1344 {
1345         struct ohci_iso_recv *recv = iso->hostdata;
1346
1347         /* disable interrupts */
1348         reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << recv->task.context);
1349
1350         /* halt DMA */
1351         ohci1394_stop_context(recv->ohci, recv->ContextControlClear, NULL);
1352 }
1353
1354 static void ohci_iso_recv_shutdown(struct hpsb_iso *iso)
1355 {
1356         struct ohci_iso_recv *recv = iso->hostdata;
1357
1358         if (recv->task_active) {
1359                 ohci_iso_recv_stop(iso);
1360                 ohci1394_unregister_iso_tasklet(recv->ohci, &recv->task);
1361                 recv->task_active = 0;
1362         }
1363
1364         dma_prog_region_free(&recv->prog);
1365         kfree(recv);
1366         iso->hostdata = NULL;
1367 }
1368
1369 /* set up a "gapped" ring buffer DMA program */
1370 static void ohci_iso_recv_program(struct hpsb_iso *iso)
1371 {
1372         struct ohci_iso_recv *recv = iso->hostdata;
1373         int blk;
1374
1375         /* address of 'branch' field in previous DMA descriptor */
1376         u32 *prev_branch = NULL;
1377
1378         for (blk = 0; blk < recv->nblocks; blk++) {
1379                 u32 control;
1380
1381                 /* the DMA descriptor */
1382                 struct dma_cmd *cmd = &recv->block[blk];
1383
1384                 /* offset of the DMA descriptor relative to the DMA prog buffer */
1385                 unsigned long prog_offset = blk * sizeof(struct dma_cmd);
1386
1387                 /* offset of this packet's data within the DMA buffer */
1388                 unsigned long buf_offset = blk * recv->buf_stride;
1389
1390                 if (recv->dma_mode == BUFFER_FILL_MODE) {
1391                         control = 2 << 28; /* INPUT_MORE */
1392                 } else {
1393                         control = 3 << 28; /* INPUT_LAST */
1394                 }
1395
1396                 control |= 8 << 24; /* s = 1, update xferStatus and resCount */
1397
1398                 /* interrupt on last block, and at intervals */
1399                 if (blk == recv->nblocks-1 || (blk % recv->block_irq_interval) == 0) {
1400                         control |= 3 << 20; /* want interrupt */
1401                 }
1402
1403                 control |= 3 << 18; /* enable branch to address */
1404                 control |= recv->buf_stride;
1405
1406                 cmd->control = cpu_to_le32(control);
1407                 cmd->address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, buf_offset));
1408                 cmd->branchAddress = 0; /* filled in on next loop */
1409                 cmd->status = cpu_to_le32(recv->buf_stride);
1410
1411                 /* link the previous descriptor to this one */
1412                 if (prev_branch) {
1413                         *prev_branch = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog, prog_offset) | 1);
1414                 }
1415
1416                 prev_branch = &cmd->branchAddress;
1417         }
1418
1419         /* the final descriptor's branch address and Z should be left at 0 */
1420 }
1421
1422 /* listen or unlisten to a specific channel (multi-channel mode only) */
1423 static void ohci_iso_recv_change_channel(struct hpsb_iso *iso, unsigned char channel, int listen)
1424 {
1425         struct ohci_iso_recv *recv = iso->hostdata;
1426         int reg, i;
1427
1428         if (channel < 32) {
1429                 reg = listen ? OHCI1394_IRMultiChanMaskLoSet : OHCI1394_IRMultiChanMaskLoClear;
1430                 i = channel;
1431         } else {
1432                 reg = listen ? OHCI1394_IRMultiChanMaskHiSet : OHCI1394_IRMultiChanMaskHiClear;
1433                 i = channel - 32;
1434         }
1435
1436         reg_write(recv->ohci, reg, (1 << i));
1437
1438         /* issue a dummy read to force all PCI writes to be posted immediately */
1439         mb();
1440         reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1441 }
1442
1443 static void ohci_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
1444 {
1445         struct ohci_iso_recv *recv = iso->hostdata;
1446         int i;
1447
1448         for (i = 0; i < 64; i++) {
1449                 if (mask & (1ULL << i)) {
1450                         if (i < 32)
1451                                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoSet, (1 << i));
1452                         else
1453                                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiSet, (1 << (i-32)));
1454                 } else {
1455                         if (i < 32)
1456                                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, (1 << i));
1457                         else
1458                                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, (1 << (i-32)));
1459                 }
1460         }
1461
1462         /* issue a dummy read to force all PCI writes to be posted immediately */
1463         mb();
1464         reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1465 }
1466
1467 static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
1468 {
1469         struct ohci_iso_recv *recv = iso->hostdata;
1470         struct ti_ohci *ohci = recv->ohci;
1471         u32 command, contextMatch;
1472
1473         reg_write(recv->ohci, recv->ContextControlClear, 0xFFFFFFFF);
1474         wmb();
1475
1476         /* always keep ISO headers */
1477         command = (1 << 30);
1478
1479         if (recv->dma_mode == BUFFER_FILL_MODE)
1480                 command |= (1 << 31);
1481
1482         reg_write(recv->ohci, recv->ContextControlSet, command);
1483
1484         /* match on specified tags */
1485         contextMatch = tag_mask << 28;
1486
1487         if (iso->channel == -1) {
1488                 /* enable multichannel reception */
1489                 reg_write(recv->ohci, recv->ContextControlSet, (1 << 28));
1490         } else {
1491                 /* listen on channel */
1492                 contextMatch |= iso->channel;
1493         }
1494
1495         if (cycle != -1) {
1496                 u32 seconds;
1497
1498                 /* enable cycleMatch */
1499                 reg_write(recv->ohci, recv->ContextControlSet, (1 << 29));
1500
1501                 /* set starting cycle */
1502                 cycle &= 0x1FFF;
1503
1504                 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
1505                    just snarf them from the current time */
1506                 seconds = reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
1507
1508                 /* advance one second to give some extra time for DMA to start */
1509                 seconds += 1;
1510
1511                 cycle |= (seconds & 3) << 13;
1512
1513                 contextMatch |= cycle << 12;
1514         }
1515
1516         if (sync != -1) {
1517                 /* set sync flag on first DMA descriptor */
1518                 struct dma_cmd *cmd = &recv->block[recv->block_dma];
1519                 cmd->control |= cpu_to_le32(DMA_CTL_WAIT);
1520
1521                 /* match sync field */
1522                 contextMatch |= (sync&0xf)<<8;
1523         }
1524
1525         reg_write(recv->ohci, recv->ContextMatch, contextMatch);
1526
1527         /* address of first descriptor block */
1528         command = dma_prog_region_offset_to_bus(&recv->prog,
1529                                                 recv->block_dma * sizeof(struct dma_cmd));
1530         command |= 1; /* Z=1 */
1531
1532         reg_write(recv->ohci, recv->CommandPtr, command);
1533
1534         /* enable interrupts */
1535         reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskSet, 1 << recv->task.context);
1536
1537         wmb();
1538
1539         /* run */
1540         reg_write(recv->ohci, recv->ContextControlSet, 0x8000);
1541
1542         /* issue a dummy read of the cycle timer register to force
1543            all PCI writes to be posted immediately */
1544         mb();
1545         reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1546
1547         /* check RUN */
1548         if (!(reg_read(recv->ohci, recv->ContextControlSet) & 0x8000)) {
1549                 PRINT(KERN_ERR,
1550                       "Error starting IR DMA (ContextControl 0x%08x)\n",
1551                       reg_read(recv->ohci, recv->ContextControlSet));
1552                 return -1;
1553         }
1554
1555         return 0;
1556 }
1557
1558 static void ohci_iso_recv_release_block(struct ohci_iso_recv *recv, int block)
1559 {
1560         /* re-use the DMA descriptor for the block */
1561         /* by linking the previous descriptor to it */
1562
1563         int next_i = block;
1564         int prev_i = (next_i == 0) ? (recv->nblocks - 1) : (next_i - 1);
1565
1566         struct dma_cmd *next = &recv->block[next_i];
1567         struct dma_cmd *prev = &recv->block[prev_i];
1568
1569         /* 'next' becomes the new end of the DMA chain,
1570            so disable branch and enable interrupt */
1571         next->branchAddress = 0;
1572         next->control |= cpu_to_le32(3 << 20);
1573         next->status = cpu_to_le32(recv->buf_stride);
1574
1575         /* link prev to next */
1576         prev->branchAddress = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog,
1577                                                                         sizeof(struct dma_cmd) * next_i)
1578                                           | 1); /* Z=1 */
1579
1580         /* disable interrupt on previous DMA descriptor, except at intervals */
1581         if ((prev_i % recv->block_irq_interval) == 0) {
1582                 prev->control |= cpu_to_le32(3 << 20); /* enable interrupt */
1583         } else {
1584                 prev->control &= cpu_to_le32(~(3<<20)); /* disable interrupt */
1585         }
1586         wmb();
1587
1588         /* wake up DMA in case it fell asleep */
1589         reg_write(recv->ohci, recv->ContextControlSet, (1 << 12));
1590 }
1591
1592 static void ohci_iso_recv_bufferfill_release(struct ohci_iso_recv *recv,
1593                                              struct hpsb_iso_packet_info *info)
1594 {
1595         int len;
1596
1597         /* release the memory where the packet was */
1598         len = info->len;
1599
1600         /* add the wasted space for padding to 4 bytes */
1601         if (len % 4)
1602                 len += 4 - (len % 4);
1603
1604         /* add 8 bytes for the OHCI DMA data format overhead */
1605         len += 8;
1606
1607         recv->released_bytes += len;
1608
1609         /* have we released enough memory for one block? */
1610         while (recv->released_bytes > recv->buf_stride) {
1611                 ohci_iso_recv_release_block(recv, recv->block_reader);
1612                 recv->block_reader = (recv->block_reader + 1) % recv->nblocks;
1613                 recv->released_bytes -= recv->buf_stride;
1614         }
1615 }
1616
1617 static inline void ohci_iso_recv_release(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
1618 {
1619         struct ohci_iso_recv *recv = iso->hostdata;
1620         if (recv->dma_mode == BUFFER_FILL_MODE) {
1621                 ohci_iso_recv_bufferfill_release(recv, info);
1622         } else {
1623                 ohci_iso_recv_release_block(recv, info - iso->infos);
1624         }
1625 }
1626
1627 /* parse all packets from blocks that have been fully received */
1628 static void ohci_iso_recv_bufferfill_parse(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1629 {
1630         int wake = 0;
1631         int runaway = 0;
1632         struct ti_ohci *ohci = recv->ohci;
1633
1634         while (1) {
1635                 /* we expect the next parsable packet to begin at recv->dma_offset */
1636                 /* note: packet layout is as shown in section 10.6.1.1 of the OHCI spec */
1637
1638                 unsigned int offset;
1639                 unsigned short len, cycle;
1640                 unsigned char channel, tag, sy;
1641
1642                 unsigned char *p = iso->data_buf.kvirt;
1643
1644                 unsigned int this_block = recv->dma_offset/recv->buf_stride;
1645
1646                 /* don't loop indefinitely */
1647                 if (runaway++ > 100000) {
1648                         atomic_inc(&iso->overflows);
1649                         PRINT(KERN_ERR,
1650                               "IR DMA error - Runaway during buffer parsing!\n");
1651                         break;
1652                 }
1653
1654                 /* stop parsing once we arrive at block_dma (i.e. don't get ahead of DMA) */
1655                 if (this_block == recv->block_dma)
1656                         break;
1657
1658                 wake = 1;
1659
1660                 /* parse data length, tag, channel, and sy */
1661
1662                 /* note: we keep our own local copies of 'len' and 'offset'
1663                    so the user can't mess with them by poking in the mmap area */
1664
1665                 len = p[recv->dma_offset+2] | (p[recv->dma_offset+3] << 8);
1666
1667                 if (len > 4096) {
1668                         PRINT(KERN_ERR,
1669                               "IR DMA error - bogus 'len' value %u\n", len);
1670                 }
1671
1672                 channel = p[recv->dma_offset+1] & 0x3F;
1673                 tag = p[recv->dma_offset+1] >> 6;
1674                 sy = p[recv->dma_offset+0] & 0xF;
1675
1676                 /* advance to data payload */
1677                 recv->dma_offset += 4;
1678
1679                 /* check for wrap-around */
1680                 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1681                         recv->dma_offset -= recv->buf_stride*recv->nblocks;
1682                 }
1683
1684                 /* dma_offset now points to the first byte of the data payload */
1685                 offset = recv->dma_offset;
1686
1687                 /* advance to xferStatus/timeStamp */
1688                 recv->dma_offset += len;
1689
1690                 /* payload is padded to 4 bytes */
1691                 if (len % 4) {
1692                         recv->dma_offset += 4 - (len%4);
1693                 }
1694
1695                 /* check for wrap-around */
1696                 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1697                         /* uh oh, the packet data wraps from the last
1698                            to the first DMA block - make the packet
1699                            contiguous by copying its "tail" into the
1700                            guard page */
1701
1702                         int guard_off = recv->buf_stride*recv->nblocks;
1703                         int tail_len = len - (guard_off - offset);
1704
1705                         if (tail_len > 0  && tail_len < recv->buf_stride) {
1706                                 memcpy(iso->data_buf.kvirt + guard_off,
1707                                        iso->data_buf.kvirt,
1708                                        tail_len);
1709                         }
1710
1711                         recv->dma_offset -= recv->buf_stride*recv->nblocks;
1712                 }
1713
1714                 /* parse timestamp */
1715                 cycle = p[recv->dma_offset+0] | (p[recv->dma_offset+1]<<8);
1716                 cycle &= 0x1FFF;
1717
1718                 /* advance to next packet */
1719                 recv->dma_offset += 4;
1720
1721                 /* check for wrap-around */
1722                 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1723                         recv->dma_offset -= recv->buf_stride*recv->nblocks;
1724                 }
1725
1726                 hpsb_iso_packet_received(iso, offset, len, cycle, channel, tag, sy);
1727         }
1728
1729         if (wake)
1730                 hpsb_iso_wake(iso);
1731 }
1732
1733 static void ohci_iso_recv_bufferfill_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1734 {
1735         int loop;
1736         struct ti_ohci *ohci = recv->ohci;
1737
1738         /* loop over all blocks */
1739         for (loop = 0; loop < recv->nblocks; loop++) {
1740
1741                 /* check block_dma to see if it's done */
1742                 struct dma_cmd *im = &recv->block[recv->block_dma];
1743
1744                 /* check the DMA descriptor for new writes to xferStatus */
1745                 u16 xferstatus = le32_to_cpu(im->status) >> 16;
1746
1747                 /* rescount is the number of bytes *remaining to be written* in the block */
1748                 u16 rescount = le32_to_cpu(im->status) & 0xFFFF;
1749
1750                 unsigned char event = xferstatus & 0x1F;
1751
1752                 if (!event) {
1753                         /* nothing has happened to this block yet */
1754                         break;
1755                 }
1756
1757                 if (event != 0x11) {
1758                         atomic_inc(&iso->overflows);
1759                         PRINT(KERN_ERR,
1760                               "IR DMA error - OHCI error code 0x%02x\n", event);
1761                 }
1762
1763                 if (rescount != 0) {
1764                         /* the card is still writing to this block;
1765                            we can't touch it until it's done */
1766                         break;
1767                 }
1768
1769                 /* OK, the block is finished... */
1770
1771                 /* sync our view of the block */
1772                 dma_region_sync_for_cpu(&iso->data_buf, recv->block_dma*recv->buf_stride, recv->buf_stride);
1773
1774                 /* reset the DMA descriptor */
1775                 im->status = recv->buf_stride;
1776
1777                 /* advance block_dma */
1778                 recv->block_dma = (recv->block_dma + 1) % recv->nblocks;
1779
1780                 if ((recv->block_dma+1) % recv->nblocks == recv->block_reader) {
1781                         atomic_inc(&iso->overflows);
1782                         DBGMSG("ISO reception overflow - "
1783                                "ran out of DMA blocks");
1784                 }
1785         }
1786
1787         /* parse any packets that have arrived */
1788         ohci_iso_recv_bufferfill_parse(iso, recv);
1789 }
1790
1791 static void ohci_iso_recv_packetperbuf_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1792 {
1793         int count;
1794         int wake = 0;
1795         struct ti_ohci *ohci = recv->ohci;
1796
1797         /* loop over the entire buffer */
1798         for (count = 0; count < recv->nblocks; count++) {
1799                 u32 packet_len = 0;
1800
1801                 /* pointer to the DMA descriptor */
1802                 struct dma_cmd *il = ((struct dma_cmd*) recv->prog.kvirt) + iso->pkt_dma;
1803
1804                 /* check the DMA descriptor for new writes to xferStatus */
1805                 u16 xferstatus = le32_to_cpu(il->status) >> 16;
1806                 u16 rescount = le32_to_cpu(il->status) & 0xFFFF;
1807
1808                 unsigned char event = xferstatus & 0x1F;
1809
1810                 if (!event) {
1811                         /* this packet hasn't come in yet; we are done for now */
1812                         goto out;
1813                 }
1814
1815                 if (event == 0x11) {
1816                         /* packet received successfully! */
1817
1818                         /* rescount is the number of bytes *remaining* in the packet buffer,
1819                            after the packet was written */
1820                         packet_len = recv->buf_stride - rescount;
1821
1822                 } else if (event == 0x02) {
1823                         PRINT(KERN_ERR, "IR DMA error - packet too long for buffer\n");
1824                 } else if (event) {
1825                         PRINT(KERN_ERR, "IR DMA error - OHCI error code 0x%02x\n", event);
1826                 }
1827
1828                 /* sync our view of the buffer */
1829                 dma_region_sync_for_cpu(&iso->data_buf, iso->pkt_dma * recv->buf_stride, recv->buf_stride);
1830
1831                 /* record the per-packet info */
1832                 {
1833                         /* iso header is 8 bytes ahead of the data payload */
1834                         unsigned char *hdr;
1835
1836                         unsigned int offset;
1837                         unsigned short cycle;
1838                         unsigned char channel, tag, sy;
1839
1840                         offset = iso->pkt_dma * recv->buf_stride;
1841                         hdr = iso->data_buf.kvirt + offset;
1842
1843                         /* skip iso header */
1844                         offset += 8;
1845                         packet_len -= 8;
1846
1847                         cycle = (hdr[0] | (hdr[1] << 8)) & 0x1FFF;
1848                         channel = hdr[5] & 0x3F;
1849                         tag = hdr[5] >> 6;
1850                         sy = hdr[4] & 0xF;
1851
1852                         hpsb_iso_packet_received(iso, offset, packet_len, cycle, channel, tag, sy);
1853                 }
1854
1855                 /* reset the DMA descriptor */
1856                 il->status = recv->buf_stride;
1857
1858                 wake = 1;
1859                 recv->block_dma = iso->pkt_dma;
1860         }
1861
1862 out:
1863         if (wake)
1864                 hpsb_iso_wake(iso);
1865 }
1866
1867 static void ohci_iso_recv_task(unsigned long data)
1868 {
1869         struct hpsb_iso *iso = (struct hpsb_iso*) data;
1870         struct ohci_iso_recv *recv = iso->hostdata;
1871
1872         if (recv->dma_mode == BUFFER_FILL_MODE)
1873                 ohci_iso_recv_bufferfill_task(iso, recv);
1874         else
1875                 ohci_iso_recv_packetperbuf_task(iso, recv);
1876 }
1877
1878 /***********************************
1879  * rawiso ISO transmission         *
1880  ***********************************/
1881
1882 struct ohci_iso_xmit {
1883         struct ti_ohci *ohci;
1884         struct dma_prog_region prog;
1885         struct ohci1394_iso_tasklet task;
1886         int task_active;
1887
1888         u32 ContextControlSet;
1889         u32 ContextControlClear;
1890         u32 CommandPtr;
1891 };
1892
1893 /* transmission DMA program:
1894    one OUTPUT_MORE_IMMEDIATE for the IT header
1895    one OUTPUT_LAST for the buffer data */
1896
1897 struct iso_xmit_cmd {
1898         struct dma_cmd output_more_immediate;
1899         u8 iso_hdr[8];
1900         u32 unused[2];
1901         struct dma_cmd output_last;
1902 };
1903
1904 static int ohci_iso_xmit_init(struct hpsb_iso *iso);
1905 static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle);
1906 static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso);
1907 static void ohci_iso_xmit_task(unsigned long data);
1908
1909 static int ohci_iso_xmit_init(struct hpsb_iso *iso)
1910 {
1911         struct ohci_iso_xmit *xmit;
1912         unsigned int prog_size;
1913         int ctx;
1914         int ret = -ENOMEM;
1915
1916         xmit = kmalloc(sizeof(*xmit), SLAB_KERNEL);
1917         if (!xmit)
1918                 return -ENOMEM;
1919
1920         iso->hostdata = xmit;
1921         xmit->ohci = iso->host->hostdata;
1922         xmit->task_active = 0;
1923
1924         dma_prog_region_init(&xmit->prog);
1925
1926         prog_size = sizeof(struct iso_xmit_cmd) * iso->buf_packets;
1927
1928         if (dma_prog_region_alloc(&xmit->prog, prog_size, xmit->ohci->dev))
1929                 goto err;
1930
1931         ohci1394_init_iso_tasklet(&xmit->task, OHCI_ISO_TRANSMIT,
1932                                   ohci_iso_xmit_task, (unsigned long) iso);
1933
1934         if (ohci1394_register_iso_tasklet(xmit->ohci, &xmit->task) < 0) {
1935                 ret = -EBUSY;
1936                 goto err;
1937         }
1938
1939         xmit->task_active = 1;
1940
1941         /* xmit context registers are spaced 16 bytes apart */
1942         ctx = xmit->task.context;
1943         xmit->ContextControlSet = OHCI1394_IsoXmitContextControlSet + 16 * ctx;
1944         xmit->ContextControlClear = OHCI1394_IsoXmitContextControlClear + 16 * ctx;
1945         xmit->CommandPtr = OHCI1394_IsoXmitCommandPtr + 16 * ctx;
1946
1947         return 0;
1948
1949 err:
1950         ohci_iso_xmit_shutdown(iso);
1951         return ret;
1952 }
1953
1954 static void ohci_iso_xmit_stop(struct hpsb_iso *iso)
1955 {
1956         struct ohci_iso_xmit *xmit = iso->hostdata;
1957         struct ti_ohci *ohci = xmit->ohci;
1958
1959         /* disable interrupts */
1960         reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskClear, 1 << xmit->task.context);
1961
1962         /* halt DMA */
1963         if (ohci1394_stop_context(xmit->ohci, xmit->ContextControlClear, NULL)) {
1964                 /* XXX the DMA context will lock up if you try to send too much data! */
1965                 PRINT(KERN_ERR,
1966                       "you probably exceeded the OHCI card's bandwidth limit - "
1967                       "reload the module and reduce xmit bandwidth");
1968         }
1969 }
1970
1971 static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso)
1972 {
1973         struct ohci_iso_xmit *xmit = iso->hostdata;
1974
1975         if (xmit->task_active) {
1976                 ohci_iso_xmit_stop(iso);
1977                 ohci1394_unregister_iso_tasklet(xmit->ohci, &xmit->task);
1978                 xmit->task_active = 0;
1979         }
1980
1981         dma_prog_region_free(&xmit->prog);
1982         kfree(xmit);
1983         iso->hostdata = NULL;
1984 }
1985
1986 static void ohci_iso_xmit_task(unsigned long data)
1987 {
1988         struct hpsb_iso *iso = (struct hpsb_iso*) data;
1989         struct ohci_iso_xmit *xmit = iso->hostdata;
1990         struct ti_ohci *ohci = xmit->ohci;
1991         int wake = 0;
1992         int count;
1993
1994         /* check the whole buffer if necessary, starting at pkt_dma */
1995         for (count = 0; count < iso->buf_packets; count++) {
1996                 int cycle;
1997
1998                 /* DMA descriptor */
1999                 struct iso_xmit_cmd *cmd = dma_region_i(&xmit->prog, struct iso_xmit_cmd, iso->pkt_dma);
2000
2001                 /* check for new writes to xferStatus */
2002                 u16 xferstatus = le32_to_cpu(cmd->output_last.status) >> 16;
2003                 u8  event = xferstatus & 0x1F;
2004
2005                 if (!event) {
2006                         /* packet hasn't been sent yet; we are done for now */
2007                         break;
2008                 }
2009
2010                 if (event != 0x11)
2011                         PRINT(KERN_ERR,
2012                               "IT DMA error - OHCI error code 0x%02x\n", event);
2013
2014                 /* at least one packet went out, so wake up the writer */
2015                 wake = 1;
2016
2017                 /* parse cycle */
2018                 cycle = le32_to_cpu(cmd->output_last.status) & 0x1FFF;
2019
2020                 /* tell the subsystem the packet has gone out */
2021                 hpsb_iso_packet_sent(iso, cycle, event != 0x11);
2022
2023                 /* reset the DMA descriptor for next time */
2024                 cmd->output_last.status = 0;
2025         }
2026
2027         if (wake)
2028                 hpsb_iso_wake(iso);
2029 }
2030
2031 static int ohci_iso_xmit_queue(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
2032 {
2033         struct ohci_iso_xmit *xmit = iso->hostdata;
2034         struct ti_ohci *ohci = xmit->ohci;
2035
2036         int next_i, prev_i;
2037         struct iso_xmit_cmd *next, *prev;
2038
2039         unsigned int offset;
2040         unsigned short len;
2041         unsigned char tag, sy;
2042
2043         /* check that the packet doesn't cross a page boundary
2044            (we could allow this if we added OUTPUT_MORE descriptor support) */
2045         if (cross_bound(info->offset, info->len)) {
2046                 PRINT(KERN_ERR,
2047                       "rawiso xmit: packet %u crosses a page boundary",
2048                       iso->first_packet);
2049                 return -EINVAL;
2050         }
2051
2052         offset = info->offset;
2053         len = info->len;
2054         tag = info->tag;
2055         sy = info->sy;
2056
2057         /* sync up the card's view of the buffer */
2058         dma_region_sync_for_device(&iso->data_buf, offset, len);
2059
2060         /* append first_packet to the DMA chain */
2061         /* by linking the previous descriptor to it */
2062         /* (next will become the new end of the DMA chain) */
2063
2064         next_i = iso->first_packet;
2065         prev_i = (next_i == 0) ? (iso->buf_packets - 1) : (next_i - 1);
2066
2067         next = dma_region_i(&xmit->prog, struct iso_xmit_cmd, next_i);
2068         prev = dma_region_i(&xmit->prog, struct iso_xmit_cmd, prev_i);
2069
2070         /* set up the OUTPUT_MORE_IMMEDIATE descriptor */
2071         memset(next, 0, sizeof(struct iso_xmit_cmd));
2072         next->output_more_immediate.control = cpu_to_le32(0x02000008);
2073
2074         /* ISO packet header is embedded in the OUTPUT_MORE_IMMEDIATE */
2075
2076         /* tcode = 0xA, and sy */
2077         next->iso_hdr[0] = 0xA0 | (sy & 0xF);
2078
2079         /* tag and channel number */
2080         next->iso_hdr[1] = (tag << 6) | (iso->channel & 0x3F);
2081
2082         /* transmission speed */
2083         next->iso_hdr[2] = iso->speed & 0x7;
2084
2085         /* payload size */
2086         next->iso_hdr[6] = len & 0xFF;
2087         next->iso_hdr[7] = len >> 8;
2088
2089         /* set up the OUTPUT_LAST */
2090         next->output_last.control = cpu_to_le32(1 << 28);
2091         next->output_last.control |= cpu_to_le32(1 << 27); /* update timeStamp */
2092         next->output_last.control |= cpu_to_le32(3 << 20); /* want interrupt */
2093         next->output_last.control |= cpu_to_le32(3 << 18); /* enable branch */
2094         next->output_last.control |= cpu_to_le32(len);
2095
2096         /* payload bus address */
2097         next->output_last.address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, offset));
2098
2099         /* leave branchAddress at zero for now */
2100
2101         /* re-write the previous DMA descriptor to chain to this one */
2102
2103         /* set prev branch address to point to next (Z=3) */
2104         prev->output_last.branchAddress = cpu_to_le32(
2105                 dma_prog_region_offset_to_bus(&xmit->prog, sizeof(struct iso_xmit_cmd) * next_i) | 3);
2106
2107         /* disable interrupt, unless required by the IRQ interval */
2108         if (prev_i % iso->irq_interval) {
2109                 prev->output_last.control &= cpu_to_le32(~(3 << 20)); /* no interrupt */
2110         } else {
2111                 prev->output_last.control |= cpu_to_le32(3 << 20); /* enable interrupt */
2112         }
2113
2114         wmb();
2115
2116         /* wake DMA in case it is sleeping */
2117         reg_write(xmit->ohci, xmit->ContextControlSet, 1 << 12);
2118
2119         /* issue a dummy read of the cycle timer to force all PCI
2120            writes to be posted immediately */
2121         mb();
2122         reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer);
2123
2124         return 0;
2125 }
2126
2127 static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle)
2128 {
2129         struct ohci_iso_xmit *xmit = iso->hostdata;
2130         struct ti_ohci *ohci = xmit->ohci;
2131
2132         /* clear out the control register */
2133         reg_write(xmit->ohci, xmit->ContextControlClear, 0xFFFFFFFF);
2134         wmb();
2135
2136         /* address and length of first descriptor block (Z=3) */
2137         reg_write(xmit->ohci, xmit->CommandPtr,
2138                   dma_prog_region_offset_to_bus(&xmit->prog, iso->pkt_dma * sizeof(struct iso_xmit_cmd)) | 3);
2139
2140         /* cycle match */
2141         if (cycle != -1) {
2142                 u32 start = cycle & 0x1FFF;
2143
2144                 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
2145                    just snarf them from the current time */
2146                 u32 seconds = reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
2147
2148                 /* advance one second to give some extra time for DMA to start */
2149                 seconds += 1;
2150
2151                 start |= (seconds & 3) << 13;
2152
2153                 reg_write(xmit->ohci, xmit->ContextControlSet, 0x80000000 | (start << 16));
2154         }
2155
2156         /* enable interrupts */
2157         reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskSet, 1 << xmit->task.context);
2158
2159         /* run */
2160         reg_write(xmit->ohci, xmit->ContextControlSet, 0x8000);
2161         mb();
2162
2163         /* wait 100 usec to give the card time to go active */
2164         udelay(100);
2165
2166         /* check the RUN bit */
2167         if (!(reg_read(xmit->ohci, xmit->ContextControlSet) & 0x8000)) {
2168                 PRINT(KERN_ERR, "Error starting IT DMA (ContextControl 0x%08x)\n",
2169                       reg_read(xmit->ohci, xmit->ContextControlSet));
2170                 return -1;
2171         }
2172
2173         return 0;
2174 }
2175
2176 static int ohci_isoctl(struct hpsb_iso *iso, enum isoctl_cmd cmd, unsigned long arg)
2177 {
2178
2179         switch(cmd) {
2180         case XMIT_INIT:
2181                 return ohci_iso_xmit_init(iso);
2182         case XMIT_START:
2183                 return ohci_iso_xmit_start(iso, arg);
2184         case XMIT_STOP:
2185                 ohci_iso_xmit_stop(iso);
2186                 return 0;
2187         case XMIT_QUEUE:
2188                 return ohci_iso_xmit_queue(iso, (struct hpsb_iso_packet_info*) arg);
2189         case XMIT_SHUTDOWN:
2190                 ohci_iso_xmit_shutdown(iso);
2191                 return 0;
2192
2193         case RECV_INIT:
2194                 return ohci_iso_recv_init(iso);
2195         case RECV_START: {
2196                 int *args = (int*) arg;
2197                 return ohci_iso_recv_start(iso, args[0], args[1], args[2]);
2198         }
2199         case RECV_STOP:
2200                 ohci_iso_recv_stop(iso);
2201                 return 0;
2202         case RECV_RELEASE:
2203                 ohci_iso_recv_release(iso, (struct hpsb_iso_packet_info*) arg);
2204                 return 0;
2205         case RECV_FLUSH:
2206                 ohci_iso_recv_task((unsigned long) iso);
2207                 return 0;
2208         case RECV_SHUTDOWN:
2209                 ohci_iso_recv_shutdown(iso);
2210                 return 0;
2211         case RECV_LISTEN_CHANNEL:
2212                 ohci_iso_recv_change_channel(iso, arg, 1);
2213                 return 0;
2214         case RECV_UNLISTEN_CHANNEL:
2215                 ohci_iso_recv_change_channel(iso, arg, 0);
2216                 return 0;
2217         case RECV_SET_CHANNEL_MASK:
2218                 ohci_iso_recv_set_channel_mask(iso, *((u64*) arg));
2219                 return 0;
2220
2221         default:
2222                 PRINT_G(KERN_ERR, "ohci_isoctl cmd %d not implemented yet",
2223                         cmd);
2224                 break;
2225         }
2226         return -EINVAL;
2227 }
2228
2229 /***************************************
2230  * IEEE-1394 functionality section END *
2231  ***************************************/
2232
2233
2234 /********************************************************
2235  * Global stuff (interrupt handler, init/shutdown code) *
2236  ********************************************************/
2237
2238 static void dma_trm_reset(struct dma_trm_ctx *d)
2239 {
2240         unsigned long flags;
2241         LIST_HEAD(packet_list);
2242         struct ti_ohci *ohci = d->ohci;
2243         struct hpsb_packet *packet, *ptmp;
2244
2245         ohci1394_stop_context(ohci, d->ctrlClear, NULL);
2246
2247         /* Lock the context, reset it and release it. Move the packets
2248          * that were pending in the context to packet_list and free
2249          * them after releasing the lock. */
2250
2251         spin_lock_irqsave(&d->lock, flags);
2252
2253         list_splice(&d->fifo_list, &packet_list);
2254         list_splice(&d->pending_list, &packet_list);
2255         INIT_LIST_HEAD(&d->fifo_list);
2256         INIT_LIST_HEAD(&d->pending_list);
2257
2258         d->branchAddrPtr = NULL;
2259         d->sent_ind = d->prg_ind;
2260         d->free_prgs = d->num_desc;
2261
2262         spin_unlock_irqrestore(&d->lock, flags);
2263
2264         if (list_empty(&packet_list))
2265                 return;
2266
2267         PRINT(KERN_INFO, "AT dma reset ctx=%d, aborting transmission", d->ctx);
2268
2269         /* Now process subsystem callbacks for the packets from this
2270          * context. */
2271         list_for_each_entry_safe(packet, ptmp, &packet_list, driver_list) {
2272                 list_del_init(&packet->driver_list);
2273                 hpsb_packet_sent(ohci->host, packet, ACKX_ABORTED);
2274         }
2275 }
2276
2277 static void ohci_schedule_iso_tasklets(struct ti_ohci *ohci,
2278                                        quadlet_t rx_event,
2279                                        quadlet_t tx_event)
2280 {
2281         struct ohci1394_iso_tasklet *t;
2282         unsigned long mask;
2283
2284         spin_lock(&ohci->iso_tasklet_list_lock);
2285
2286         list_for_each_entry(t, &ohci->iso_tasklet_list, link) {
2287                 mask = 1 << t->context;
2288
2289                 if (t->type == OHCI_ISO_TRANSMIT && tx_event & mask)
2290                         tasklet_schedule(&t->tasklet);
2291                 else if (rx_event & mask)
2292                         tasklet_schedule(&t->tasklet);
2293         }
2294
2295         spin_unlock(&ohci->iso_tasklet_list_lock);
2296
2297 }
2298
2299 static irqreturn_t ohci_irq_handler(int irq, void *dev_id,
2300                              struct pt_regs *regs_are_unused)
2301 {
2302         quadlet_t event, node_id;
2303         struct ti_ohci *ohci = (struct ti_ohci *)dev_id;
2304         struct hpsb_host *host = ohci->host;
2305         int phyid = -1, isroot = 0;
2306         unsigned long flags;
2307
2308         /* Read and clear the interrupt event register.  Don't clear
2309          * the busReset event, though. This is done when we get the
2310          * selfIDComplete interrupt. */
2311         spin_lock_irqsave(&ohci->event_lock, flags);
2312         event = reg_read(ohci, OHCI1394_IntEventClear);
2313         reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
2314         spin_unlock_irqrestore(&ohci->event_lock, flags);
2315
2316         if (!event)
2317                 return IRQ_NONE;
2318
2319         /* If event is ~(u32)0 cardbus card was ejected.  In this case
2320          * we just return, and clean up in the ohci1394_pci_remove
2321          * function. */
2322         if (event == ~(u32) 0) {
2323                 DBGMSG("Device removed.");
2324                 return IRQ_NONE;
2325         }
2326
2327         DBGMSG("IntEvent: %08x", event);
2328
2329         if (event & OHCI1394_unrecoverableError) {
2330                 int ctx;
2331                 PRINT(KERN_ERR, "Unrecoverable error!");
2332
2333                 if (reg_read(ohci, OHCI1394_AsReqTrContextControlSet) & 0x800)
2334                         PRINT(KERN_ERR, "Async Req Tx Context died: "
2335                                 "ctrl[%08x] cmdptr[%08x]",
2336                                 reg_read(ohci, OHCI1394_AsReqTrContextControlSet),
2337                                 reg_read(ohci, OHCI1394_AsReqTrCommandPtr));
2338
2339                 if (reg_read(ohci, OHCI1394_AsRspTrContextControlSet) & 0x800)
2340                         PRINT(KERN_ERR, "Async Rsp Tx Context died: "
2341                                 "ctrl[%08x] cmdptr[%08x]",
2342                                 reg_read(ohci, OHCI1394_AsRspTrContextControlSet),
2343                                 reg_read(ohci, OHCI1394_AsRspTrCommandPtr));
2344
2345                 if (reg_read(ohci, OHCI1394_AsReqRcvContextControlSet) & 0x800)
2346                         PRINT(KERN_ERR, "Async Req Rcv Context died: "
2347                                 "ctrl[%08x] cmdptr[%08x]",
2348                                 reg_read(ohci, OHCI1394_AsReqRcvContextControlSet),
2349                                 reg_read(ohci, OHCI1394_AsReqRcvCommandPtr));
2350
2351                 if (reg_read(ohci, OHCI1394_AsRspRcvContextControlSet) & 0x800)
2352                         PRINT(KERN_ERR, "Async Rsp Rcv Context died: "
2353                                 "ctrl[%08x] cmdptr[%08x]",
2354                                 reg_read(ohci, OHCI1394_AsRspRcvContextControlSet),
2355                                 reg_read(ohci, OHCI1394_AsRspRcvCommandPtr));
2356
2357                 for (ctx = 0; ctx < ohci->nb_iso_xmit_ctx; ctx++) {
2358                         if (reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)) & 0x800)
2359                                 PRINT(KERN_ERR, "Iso Xmit %d Context died: "
2360                                         "ctrl[%08x] cmdptr[%08x]", ctx,
2361                                         reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)),
2362                                         reg_read(ohci, OHCI1394_IsoXmitCommandPtr + (16 * ctx)));
2363                 }
2364
2365                 for (ctx = 0; ctx < ohci->nb_iso_rcv_ctx; ctx++) {
2366                         if (reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)) & 0x800)
2367                                 PRINT(KERN_ERR, "Iso Recv %d Context died: "
2368                                         "ctrl[%08x] cmdptr[%08x] match[%08x]", ctx,
2369                                         reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)),
2370                                         reg_read(ohci, OHCI1394_IsoRcvCommandPtr + (32 * ctx)),
2371                                         reg_read(ohci, OHCI1394_IsoRcvContextMatch + (32 * ctx)));
2372                 }
2373
2374                 event &= ~OHCI1394_unrecoverableError;
2375         }
2376
2377         if (event & OHCI1394_cycleInconsistent) {
2378                 /* We subscribe to the cycleInconsistent event only to
2379                  * clear the corresponding event bit... otherwise,
2380                  * isochronous cycleMatch DMA won't work. */
2381                 DBGMSG("OHCI1394_cycleInconsistent");
2382                 event &= ~OHCI1394_cycleInconsistent;
2383         }
2384
2385         if (event & OHCI1394_busReset) {
2386                 /* The busReset event bit can't be cleared during the
2387                  * selfID phase, so we disable busReset interrupts, to
2388                  * avoid burying the cpu in interrupt requests. */
2389                 spin_lock_irqsave(&ohci->event_lock, flags);
2390                 reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
2391
2392                 if (ohci->check_busreset) {
2393                         int loop_count = 0;
2394
2395                         udelay(10);
2396
2397                         while (reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
2398                                 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2399
2400                                 spin_unlock_irqrestore(&ohci->event_lock, flags);
2401                                 udelay(10);
2402                                 spin_lock_irqsave(&ohci->event_lock, flags);
2403
2404                                 /* The loop counter check is to prevent the driver
2405                                  * from remaining in this state forever. For the
2406                                  * initial bus reset, the loop continues for ever
2407                                  * and the system hangs, until some device is plugged-in
2408                                  * or out manually into a port! The forced reset seems
2409                                  * to solve this problem. This mainly effects nForce2. */
2410                                 if (loop_count > 10000) {
2411                                         ohci_devctl(host, RESET_BUS, LONG_RESET);
2412                                         DBGMSG("Detected bus-reset loop. Forced a bus reset!");
2413                                         loop_count = 0;
2414                                 }
2415
2416                                 loop_count++;
2417                         }
2418                 }
2419                 spin_unlock_irqrestore(&ohci->event_lock, flags);
2420                 if (!host->in_bus_reset) {
2421                         DBGMSG("irq_handler: Bus reset requested");
2422
2423                         /* Subsystem call */
2424                         hpsb_bus_reset(ohci->host);
2425                 }
2426                 event &= ~OHCI1394_busReset;
2427         }
2428
2429         if (event & OHCI1394_reqTxComplete) {
2430                 struct dma_trm_ctx *d = &ohci->at_req_context;
2431                 DBGMSG("Got reqTxComplete interrupt "
2432                        "status=0x%08X", reg_read(ohci, d->ctrlSet));
2433                 if (reg_read(ohci, d->ctrlSet) & 0x800)
2434                         ohci1394_stop_context(ohci, d->ctrlClear,
2435                                               "reqTxComplete");
2436                 else
2437                         dma_trm_tasklet((unsigned long)d);
2438                         //tasklet_schedule(&d->task);
2439                 event &= ~OHCI1394_reqTxComplete;
2440         }
2441         if (event & OHCI1394_respTxComplete) {
2442                 struct dma_trm_ctx *d = &ohci->at_resp_context;
2443                 DBGMSG("Got respTxComplete interrupt "
2444                        "status=0x%08X", reg_read(ohci, d->ctrlSet));
2445                 if (reg_read(ohci, d->ctrlSet) & 0x800)
2446                         ohci1394_stop_context(ohci, d->ctrlClear,
2447                                               "respTxComplete");
2448                 else
2449                         tasklet_schedule(&d->task);
2450                 event &= ~OHCI1394_respTxComplete;
2451         }
2452         if (event & OHCI1394_RQPkt) {
2453                 struct dma_rcv_ctx *d = &ohci->ar_req_context;
2454                 DBGMSG("Got RQPkt interrupt status=0x%08X",
2455                        reg_read(ohci, d->ctrlSet));
2456                 if (reg_read(ohci, d->ctrlSet) & 0x800)
2457                         ohci1394_stop_context(ohci, d->ctrlClear, "RQPkt");
2458                 else
2459                         tasklet_schedule(&d->task);
2460                 event &= ~OHCI1394_RQPkt;
2461         }
2462         if (event & OHCI1394_RSPkt) {
2463                 struct dma_rcv_ctx *d = &ohci->ar_resp_context;
2464                 DBGMSG("Got RSPkt interrupt status=0x%08X",
2465                        reg_read(ohci, d->ctrlSet));
2466                 if (reg_read(ohci, d->ctrlSet) & 0x800)
2467                         ohci1394_stop_context(ohci, d->ctrlClear, "RSPkt");
2468                 else
2469                         tasklet_schedule(&d->task);
2470                 event &= ~OHCI1394_RSPkt;
2471         }
2472         if (event & OHCI1394_isochRx) {
2473                 quadlet_t rx_event;
2474
2475                 rx_event = reg_read(ohci, OHCI1394_IsoRecvIntEventSet);
2476                 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, rx_event);
2477                 ohci_schedule_iso_tasklets(ohci, rx_event, 0);
2478                 event &= ~OHCI1394_isochRx;
2479         }
2480         if (event & OHCI1394_isochTx) {
2481                 quadlet_t tx_event;
2482
2483                 tx_event = reg_read(ohci, OHCI1394_IsoXmitIntEventSet);
2484                 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, tx_event);
2485                 ohci_schedule_iso_tasklets(ohci, 0, tx_event);
2486                 event &= ~OHCI1394_isochTx;
2487         }
2488         if (event & OHCI1394_selfIDComplete) {
2489                 if (host->in_bus_reset) {
2490                         node_id = reg_read(ohci, OHCI1394_NodeID);
2491
2492                         if (!(node_id & 0x80000000)) {
2493                                 PRINT(KERN_ERR,
2494                                       "SelfID received, but NodeID invalid "
2495                                       "(probably new bus reset occurred): %08X",
2496                                       node_id);
2497                                 goto selfid_not_valid;
2498                         }
2499
2500                         phyid =  node_id & 0x0000003f;
2501                         isroot = (node_id & 0x40000000) != 0;
2502
2503                         DBGMSG("SelfID interrupt received "
2504                               "(phyid %d, %s)", phyid,
2505                               (isroot ? "root" : "not root"));
2506
2507                         handle_selfid(ohci, host, phyid, isroot);
2508
2509                         /* Clear the bus reset event and re-enable the
2510                          * busReset interrupt.  */
2511                         spin_lock_irqsave(&ohci->event_lock, flags);
2512                         reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2513                         reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
2514                         spin_unlock_irqrestore(&ohci->event_lock, flags);
2515
2516                         /* Accept Physical requests from all nodes. */
2517                         reg_write(ohci,OHCI1394_AsReqFilterHiSet, 0xffffffff);
2518                         reg_write(ohci,OHCI1394_AsReqFilterLoSet, 0xffffffff);
2519
2520                         /* Turn on phys dma reception.
2521                          *
2522                          * TODO: Enable some sort of filtering management.
2523                          */
2524                         if (phys_dma) {
2525                                 reg_write(ohci,OHCI1394_PhyReqFilterHiSet, 0xffffffff);
2526                                 reg_write(ohci,OHCI1394_PhyReqFilterLoSet, 0xffffffff);
2527                                 reg_write(ohci,OHCI1394_PhyUpperBound, 0xffff0000);
2528                         } else {
2529                                 reg_write(ohci,OHCI1394_PhyReqFilterHiSet, 0x00000000);
2530                                 reg_write(ohci,OHCI1394_PhyReqFilterLoSet, 0x00000000);
2531                         }
2532
2533                         DBGMSG("PhyReqFilter=%08x%08x",
2534                                reg_read(ohci,OHCI1394_PhyReqFilterHiSet),
2535                                reg_read(ohci,OHCI1394_PhyReqFilterLoSet));
2536
2537                         hpsb_selfid_complete(host, phyid, isroot);
2538                 } else
2539                         PRINT(KERN_ERR,
2540                               "SelfID received outside of bus reset sequence");
2541
2542 selfid_not_valid:
2543                 event &= ~OHCI1394_selfIDComplete;
2544         }
2545
2546         /* Make sure we handle everything, just in case we accidentally
2547          * enabled an interrupt that we didn't write a handler for.  */
2548         if (event)
2549                 PRINT(KERN_ERR, "Unhandled interrupt(s) 0x%08x",
2550                       event);
2551
2552         return IRQ_HANDLED;
2553 }
2554
2555 /* Put the buffer back into the dma context */
2556 static void insert_dma_buffer(struct dma_rcv_ctx *d, int idx)
2557 {
2558         struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2559         DBGMSG("Inserting dma buf ctx=%d idx=%d", d->ctx, idx);
2560
2561         d->prg_cpu[idx]->status = cpu_to_le32(d->buf_size);
2562         d->prg_cpu[idx]->branchAddress &= le32_to_cpu(0xfffffff0);
2563         idx = (idx + d->num_desc - 1 ) % d->num_desc;
2564         d->prg_cpu[idx]->branchAddress |= le32_to_cpu(0x00000001);
2565
2566         /* To avoid a race, ensure 1394 interface hardware sees the inserted
2567          * context program descriptors before it sees the wakeup bit set. */
2568         wmb();
2569         
2570         /* wake up the dma context if necessary */
2571         if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
2572                 PRINT(KERN_INFO,
2573                       "Waking dma ctx=%d ... processing is probably too slow",
2574                       d->ctx);
2575         }
2576
2577         /* do this always, to avoid race condition */
2578         reg_write(ohci, d->ctrlSet, 0x1000);
2579 }
2580
2581 #define cond_le32_to_cpu(data, noswap) \
2582         (noswap ? data : le32_to_cpu(data))
2583
2584 static const int TCODE_SIZE[16] = {20, 0, 16, -1, 16, 20, 20, 0,
2585                             -1, 0, -1, 0, -1, -1, 16, -1};
2586
2587 /*
2588  * Determine the length of a packet in the buffer
2589  * Optimization suggested by Pascal Drolet <pascal.drolet@informission.ca>
2590  */
2591 static __inline__ int packet_length(struct dma_rcv_ctx *d, int idx, quadlet_t *buf_ptr,
2592                          int offset, unsigned char tcode, int noswap)
2593 {
2594         int length = -1;
2595
2596         if (d->type == DMA_CTX_ASYNC_REQ || d->type == DMA_CTX_ASYNC_RESP) {
2597                 length = TCODE_SIZE[tcode];
2598                 if (length == 0) {
2599                         if (offset + 12 >= d->buf_size) {
2600                                 length = (cond_le32_to_cpu(d->buf_cpu[(idx + 1) % d->num_desc]
2601                                                 [3 - ((d->buf_size - offset) >> 2)], noswap) >> 16);
2602                         } else {
2603                                 length = (cond_le32_to_cpu(buf_ptr[3], noswap) >> 16);
2604                         }
2605                         length += 20;
2606                 }
2607         } else if (d->type == DMA_CTX_ISO) {
2608                 /* Assumption: buffer fill mode with header/trailer */
2609                 length = (cond_le32_to_cpu(buf_ptr[0], noswap) >> 16) + 8;
2610         }
2611
2612         if (length > 0 && length % 4)
2613                 length += 4 - (length % 4);
2614
2615         return length;
2616 }
2617
2618 /* Tasklet that processes dma receive buffers */
2619 static void dma_rcv_tasklet (unsigned long data)
2620 {
2621         struct dma_rcv_ctx *d = (struct dma_rcv_ctx*)data;
2622         struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2623         unsigned int split_left, idx, offset, rescount;
2624         unsigned char tcode;
2625         int length, bytes_left, ack;
2626         unsigned long flags;
2627         quadlet_t *buf_ptr;
2628         char *split_ptr;
2629         char msg[256];
2630
2631         spin_lock_irqsave(&d->lock, flags);
2632
2633         idx = d->buf_ind;
2634         offset = d->buf_offset;
2635         buf_ptr = d->buf_cpu[idx] + offset/4;
2636
2637         rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2638         bytes_left = d->buf_size - rescount - offset;
2639
2640         while (bytes_left > 0) {
2641                 tcode = (cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming) >> 4) & 0xf;
2642
2643                 /* packet_length() will return < 4 for an error */
2644                 length = packet_length(d, idx, buf_ptr, offset, tcode, ohci->no_swap_incoming);
2645
2646                 if (length < 4) { /* something is wrong */
2647                         sprintf(msg,"Unexpected tcode 0x%x(0x%08x) in AR ctx=%d, length=%d",
2648                                 tcode, cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming),
2649                                 d->ctx, length);
2650                         ohci1394_stop_context(ohci, d->ctrlClear, msg);
2651                         spin_unlock_irqrestore(&d->lock, flags);
2652                         return;
2653                 }
2654
2655                 /* The first case is where we have a packet that crosses
2656                  * over more than one descriptor. The next case is where
2657                  * it's all in the first descriptor.  */
2658                 if ((offset + length) > d->buf_size) {
2659                         DBGMSG("Split packet rcv'd");
2660                         if (length > d->split_buf_size) {
2661                                 ohci1394_stop_context(ohci, d->ctrlClear,
2662                                              "Split packet size exceeded");
2663                                 d->buf_ind = idx;
2664                                 d->buf_offset = offset;
2665                                 spin_unlock_irqrestore(&d->lock, flags);
2666                                 return;
2667                         }
2668
2669                         if (le32_to_cpu(d->prg_cpu[(idx+1)%d->num_desc]->status)
2670                             == d->buf_size) {
2671                                 /* Other part of packet not written yet.
2672                                  * this should never happen I think
2673                                  * anyway we'll get it on the next call.  */
2674                                 PRINT(KERN_INFO,
2675                                       "Got only half a packet!");
2676                                 d->buf_ind = idx;
2677                                 d->buf_offset = offset;
2678                                 spin_unlock_irqrestore(&d->lock, flags);
2679                                 return;
2680                         }
2681
2682                         split_left = length;
2683                         split_ptr = (char *)d->spb;
2684                         memcpy(split_ptr,buf_ptr,d->buf_size-offset);
2685                         split_left -= d->buf_size-offset;
2686                         split_ptr += d->buf_size-offset;
2687                         insert_dma_buffer(d, idx);
2688                         idx = (idx+1) % d->num_desc;
2689                         buf_ptr = d->buf_cpu[idx];
2690                         offset=0;
2691
2692                         while (split_left >= d->buf_size) {
2693                                 memcpy(split_ptr,buf_ptr,d->buf_size);
2694                                 split_ptr += d->buf_size;
2695                                 split_left -= d->buf_size;
2696                                 insert_dma_buffer(d, idx);
2697                                 idx = (idx+1) % d->num_desc;
2698                                 buf_ptr = d->buf_cpu[idx];
2699                         }
2700
2701                         if (split_left > 0) {
2702                                 memcpy(split_ptr, buf_ptr, split_left);
2703                                 offset = split_left;
2704                                 buf_ptr += offset/4;
2705                         }
2706                 } else {
2707                         DBGMSG("Single packet rcv'd");
2708                         memcpy(d->spb, buf_ptr, length);
2709                         offset += length;
2710                         buf_ptr += length/4;
2711                         if (offset==d->buf_size) {
2712                                 insert_dma_buffer(d, idx);
2713                                 idx = (idx+1) % d->num_desc;
2714                                 buf_ptr = d->buf_cpu[idx];
2715                                 offset=0;
2716                         }
2717                 }
2718
2719                 /* We get one phy packet to the async descriptor for each
2720                  * bus reset. We always ignore it.  */
2721                 if (tcode != OHCI1394_TCODE_PHY) {
2722                         if (!ohci->no_swap_incoming)
2723                                 packet_swab(d->spb, tcode);
2724                         DBGMSG("Packet received from node"
2725                                 " %d ack=0x%02X spd=%d tcode=0x%X"
2726                                 " length=%d ctx=%d tlabel=%d",
2727                                 (d->spb[1]>>16)&0x3f,
2728                                 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f,
2729                                 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>21)&0x3,
2730                                 tcode, length, d->ctx,
2731                                 (cond_le32_to_cpu(d->spb[0], ohci->no_swap_incoming)>>10)&0x3f);
2732
2733                         ack = (((cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f)
2734                                 == 0x11) ? 1 : 0;
2735
2736                         hpsb_packet_received(ohci->host, d->spb,
2737                                              length-4, ack);
2738                 }
2739 #ifdef OHCI1394_DEBUG
2740                 else
2741                         PRINT (KERN_DEBUG, "Got phy packet ctx=%d ... discarded",
2742                                d->ctx);
2743 #endif
2744
2745                 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2746
2747                 bytes_left = d->buf_size - rescount - offset;
2748
2749         }
2750
2751         d->buf_ind = idx;
2752         d->buf_offset = offset;
2753
2754         spin_unlock_irqrestore(&d->lock, flags);
2755 }
2756
2757 /* Bottom half that processes sent packets */
2758 static void dma_trm_tasklet (unsigned long data)
2759 {
2760         struct dma_trm_ctx *d = (struct dma_trm_ctx*)data;
2761         struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2762         struct hpsb_packet *packet, *ptmp;
2763         unsigned long flags;
2764         u32 status, ack;
2765         size_t datasize;
2766
2767         spin_lock_irqsave(&d->lock, flags);
2768
2769         list_for_each_entry_safe(packet, ptmp, &d->fifo_list, driver_list) {
2770                 datasize = packet->data_size;
2771                 if (datasize && packet->type != hpsb_raw)
2772                         status = le32_to_cpu(
2773                                 d->prg_cpu[d->sent_ind]->end.status) >> 16;
2774                 else
2775                         status = le32_to_cpu(
2776                                 d->prg_cpu[d->sent_ind]->begin.status) >> 16;
2777
2778                 if (status == 0)
2779                         /* this packet hasn't been sent yet*/
2780                         break;
2781
2782 #ifdef OHCI1394_DEBUG
2783                 if (datasize)
2784                         if (((le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf) == 0xa)
2785                                 DBGMSG("Stream packet sent to channel %d tcode=0x%X "
2786                                        "ack=0x%X spd=%d dataLength=%d ctx=%d",
2787                                        (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>8)&0x3f,
2788                                        (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2789                                        status&0x1f, (status>>5)&0x3,
2790                                        le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16,
2791                                        d->ctx);
2792                         else
2793                                 DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2794                                        "%d ack=0x%X spd=%d dataLength=%d ctx=%d",
2795                                        (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16)&0x3f,
2796                                        (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2797                                        (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>10)&0x3f,
2798                                        status&0x1f, (status>>5)&0x3,
2799                                        le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3])>>16,
2800                                        d->ctx);
2801                 else
2802                         DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2803                                "%d ack=0x%X spd=%d data=0x%08X ctx=%d",
2804                                 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])
2805                                         >>16)&0x3f,
2806                                 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2807                                         >>4)&0xf,
2808                                 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2809                                         >>10)&0x3f,
2810                                 status&0x1f, (status>>5)&0x3,
2811                                 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3]),
2812                                 d->ctx);
2813 #endif
2814
2815                 if (status & 0x10) {
2816                         ack = status & 0xf;
2817                 } else {
2818                         switch (status & 0x1f) {
2819                         case EVT_NO_STATUS: /* that should never happen */
2820                         case EVT_RESERVED_A: /* that should never happen */
2821                         case EVT_LONG_PACKET: /* that should never happen */
2822                                 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2823                                 ack = ACKX_SEND_ERROR;
2824                                 break;
2825                         case EVT_MISSING_ACK:
2826                                 ack = ACKX_TIMEOUT;
2827                                 break;
2828                         case EVT_UNDERRUN:
2829                                 ack = ACKX_SEND_ERROR;
2830                                 break;
2831                         case EVT_OVERRUN: /* that should never happen */
2832                                 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2833                                 ack = ACKX_SEND_ERROR;
2834                                 break;
2835                         case EVT_DESCRIPTOR_READ:
2836                         case EVT_DATA_READ:
2837                         case EVT_DATA_WRITE:
2838                                 ack = ACKX_SEND_ERROR;
2839                                 break;
2840                         case EVT_BUS_RESET: /* that should never happen */
2841                                 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2842                                 ack = ACKX_SEND_ERROR;
2843                                 break;
2844                         case EVT_TIMEOUT:
2845                                 ack = ACKX_TIMEOUT;
2846                                 break;
2847                         case EVT_TCODE_ERR:
2848                                 ack = ACKX_SEND_ERROR;
2849                                 break;
2850                         case EVT_RESERVED_B: /* that should never happen */
2851                         case EVT_RESERVED_C: /* that should never happen */
2852                                 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2853                                 ack = ACKX_SEND_ERROR;
2854                                 break;
2855                         case EVT_UNKNOWN:
2856                         case EVT_FLUSHED:
2857                                 ack = ACKX_SEND_ERROR;
2858                                 break;
2859                         default:
2860                                 PRINT(KERN_ERR, "Unhandled OHCI evt_* error 0x%x", status & 0x1f);
2861                                 ack = ACKX_SEND_ERROR;
2862                                 BUG();
2863                         }
2864                 }
2865
2866                 list_del_init(&packet->driver_list);
2867                 hpsb_packet_sent(ohci->host, packet, ack);
2868
2869                 if (datasize) {
2870                         pci_unmap_single(ohci->dev,
2871                                          cpu_to_le32(d->prg_cpu[d->sent_ind]->end.address),
2872                                          datasize, PCI_DMA_TODEVICE);
2873                         OHCI_DMA_FREE("single Xmit data packet");
2874                 }
2875
2876                 d->sent_ind = (d->sent_ind+1)%d->num_desc;
2877                 d->free_prgs++;
2878         }
2879
2880         dma_trm_flush(ohci, d);
2881
2882         spin_unlock_irqrestore(&d->lock, flags);
2883 }
2884
2885 static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d)
2886 {
2887         if (d->ctrlClear) {
2888                 ohci1394_stop_context(d->ohci, d->ctrlClear, NULL);
2889
2890                 if (d->type == DMA_CTX_ISO) {
2891                         /* disable interrupts */
2892                         reg_write(d->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << d->ctx);
2893                         ohci1394_unregister_iso_tasklet(d->ohci, &d->ohci->ir_legacy_tasklet);
2894                 } else {
2895                         tasklet_kill(&d->task);
2896                 }
2897         }
2898 }
2899
2900
2901 static void free_dma_rcv_ctx(struct dma_rcv_ctx *d)
2902 {
2903         int i;
2904         struct ti_ohci *ohci = d->ohci;
2905
2906         if (ohci == NULL)
2907                 return;
2908
2909         DBGMSG("Freeing dma_rcv_ctx %d", d->ctx);
2910
2911         if (d->buf_cpu) {
2912                 for (i=0; i<d->num_desc; i++)
2913                         if (d->buf_cpu[i] && d->buf_bus[i]) {
2914                                 pci_free_consistent(
2915                                         ohci->dev, d->buf_size,
2916                                         d->buf_cpu[i], d->buf_bus[i]);
2917                                 OHCI_DMA_FREE("consistent dma_rcv buf[%d]", i);
2918                         }
2919                 kfree(d->buf_cpu);
2920                 kfree(d->buf_bus);
2921         }
2922         if (d->prg_cpu) {
2923                 for (i=0; i<d->num_desc; i++)
2924                         if (d->prg_cpu[i] && d->prg_bus[i]) {
2925                                 pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
2926                                 OHCI_DMA_FREE("consistent dma_rcv prg[%d]", i);
2927                         }
2928                 pci_pool_destroy(d->prg_pool);
2929                 OHCI_DMA_FREE("dma_rcv prg pool");
2930                 kfree(d->prg_cpu);
2931                 kfree(d->prg_bus);
2932         }
2933         kfree(d->spb);
2934
2935         /* Mark this context as freed. */
2936         d->ohci = NULL;
2937 }
2938
2939 static int
2940 alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
2941                   enum context_type type, int ctx, int num_desc,
2942                   int buf_size, int split_buf_size, int context_base)
2943 {
2944         int i, len;
2945         static int num_allocs;
2946         static char pool_name[20];
2947
2948         d->ohci = ohci;
2949         d->type = type;
2950         d->ctx = ctx;
2951
2952         d->num_desc = num_desc;
2953         d->buf_size = buf_size;
2954         d->split_buf_size = split_buf_size;
2955
2956         d->ctrlSet = 0;
2957         d->ctrlClear = 0;
2958         d->cmdPtr = 0;
2959
2960         d->buf_cpu = kmalloc(d->num_desc * sizeof(quadlet_t*), GFP_ATOMIC);
2961         d->buf_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_ATOMIC);
2962
2963         if (d->buf_cpu == NULL || d->buf_bus == NULL) {
2964                 PRINT(KERN_ERR, "Failed to allocate dma buffer");
2965                 free_dma_rcv_ctx(d);
2966                 return -ENOMEM;
2967         }
2968         memset(d->buf_cpu, 0, d->num_desc * sizeof(quadlet_t*));
2969         memset(d->buf_bus, 0, d->num_desc * sizeof(dma_addr_t));
2970
2971         d->prg_cpu = kmalloc(d->num_desc * sizeof(struct dma_cmd*),
2972                                 GFP_ATOMIC);
2973         d->prg_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_ATOMIC);
2974
2975         if (d->prg_cpu == NULL || d->prg_bus == NULL) {
2976                 PRINT(KERN_ERR, "Failed to allocate dma prg");
2977                 free_dma_rcv_ctx(d);
2978                 return -ENOMEM;
2979         }
2980         memset(d->prg_cpu, 0, d->num_desc * sizeof(struct dma_cmd*));
2981         memset(d->prg_bus, 0, d->num_desc * sizeof(dma_addr_t));
2982
2983         d->spb = kmalloc(d->split_buf_size, GFP_ATOMIC);
2984
2985         if (d->spb == NULL) {
2986                 PRINT(KERN_ERR, "Failed to allocate split buffer");
2987                 free_dma_rcv_ctx(d);
2988                 return -ENOMEM;
2989         }
2990         
2991         len = sprintf(pool_name, "ohci1394_rcv_prg");
2992         sprintf(pool_name+len, "%d", num_allocs);
2993         d->prg_pool = pci_pool_create(pool_name, ohci->dev,
2994                                 sizeof(struct dma_cmd), 4, 0);
2995         if(d->prg_pool == NULL)
2996         {
2997                 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
2998                 free_dma_rcv_ctx(d);
2999                 return -ENOMEM;
3000         }
3001         num_allocs++;
3002
3003         OHCI_DMA_ALLOC("dma_rcv prg pool");
3004
3005         for (i=0; i<d->num_desc; i++) {
3006                 d->buf_cpu[i] = pci_alloc_consistent(ohci->dev,
3007                                                      d->buf_size,
3008                                                      d->buf_bus+i);
3009                 OHCI_DMA_ALLOC("consistent dma_rcv buf[%d]", i);
3010
3011                 if (d->buf_cpu[i] != NULL) {
3012                         memset(d->buf_cpu[i], 0, d->buf_size);
3013                 } else {
3014                         PRINT(KERN_ERR,
3015                               "Failed to allocate dma buffer");
3016                         free_dma_rcv_ctx(d);
3017                         return -ENOMEM;
3018                 }
3019
3020                 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
3021                 OHCI_DMA_ALLOC("pool dma_rcv prg[%d]", i);
3022
3023                 if (d->prg_cpu[i] != NULL) {
3024                         memset(d->prg_cpu[i], 0, sizeof(struct dma_cmd));
3025                 } else {
3026                         PRINT(KERN_ERR,
3027                               "Failed to allocate dma prg");
3028                         free_dma_rcv_ctx(d);
3029                         return -ENOMEM;
3030                 }
3031         }
3032
3033         spin_lock_init(&d->lock);
3034
3035         if (type == DMA_CTX_ISO) {
3036                 ohci1394_init_iso_tasklet(&ohci->ir_legacy_tasklet,
3037                                           OHCI_ISO_MULTICHANNEL_RECEIVE,
3038                                           dma_rcv_tasklet, (unsigned long) d);
3039         } else {
3040                 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3041                 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3042                 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3043
3044                 tasklet_init (&d->task, dma_rcv_tasklet, (unsigned long) d);
3045         }
3046
3047         return 0;
3048 }
3049
3050 static void free_dma_trm_ctx(struct dma_trm_ctx *d)
3051 {
3052         int i;
3053         struct ti_ohci *ohci = d->ohci;
3054
3055         if (ohci == NULL)
3056                 return;
3057
3058         DBGMSG("Freeing dma_trm_ctx %d", d->ctx);
3059
3060         if (d->prg_cpu) {
3061                 for (i=0; i<d->num_desc; i++)
3062                         if (d->prg_cpu[i] && d->prg_bus[i]) {
3063                                 pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
3064                                 OHCI_DMA_FREE("pool dma_trm prg[%d]", i);
3065                         }
3066                 pci_pool_destroy(d->prg_pool);
3067                 OHCI_DMA_FREE("dma_trm prg pool");
3068                 kfree(d->prg_cpu);
3069                 kfree(d->prg_bus);
3070         }
3071
3072         /* Mark this context as freed. */
3073         d->ohci = NULL;
3074 }
3075
3076 static int
3077 alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
3078                   enum context_type type, int ctx, int num_desc,
3079                   int context_base)
3080 {
3081         int i, len;
3082         static char pool_name[20];
3083         static int num_allocs=0;
3084
3085         d->ohci = ohci;
3086         d->type = type;
3087         d->ctx = ctx;
3088         d->num_desc = num_desc;
3089         d->ctrlSet = 0;
3090         d->ctrlClear = 0;
3091         d->cmdPtr = 0;
3092
3093         d->prg_cpu = kmalloc(d->num_desc * sizeof(struct at_dma_prg*),
3094                              GFP_KERNEL);
3095         d->prg_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_KERNEL);
3096
3097         if (d->prg_cpu == NULL || d->prg_bus == NULL) {
3098                 PRINT(KERN_ERR, "Failed to allocate at dma prg");
3099                 free_dma_trm_ctx(d);
3100                 return -ENOMEM;
3101         }
3102         memset(d->prg_cpu, 0, d->num_desc * sizeof(struct at_dma_prg*));
3103         memset(d->prg_bus, 0, d->num_desc * sizeof(dma_addr_t));
3104
3105         len = sprintf(pool_name, "ohci1394_trm_prg");
3106         sprintf(pool_name+len, "%d", num_allocs);
3107         d->prg_pool = pci_pool_create(pool_name, ohci->dev,
3108                                 sizeof(struct at_dma_prg), 4, 0);
3109         if (d->prg_pool == NULL) {
3110                 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
3111                 free_dma_trm_ctx(d);
3112                 return -ENOMEM;
3113         }
3114         num_allocs++;
3115
3116         OHCI_DMA_ALLOC("dma_rcv prg pool");
3117
3118         for (i = 0; i < d->num_desc; i++) {
3119                 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
3120                 OHCI_DMA_ALLOC("pool dma_trm prg[%d]", i);
3121
3122                 if (d->prg_cpu[i] != NULL) {
3123                         memset(d->prg_cpu[i], 0, sizeof(struct at_dma_prg));
3124                 } else {
3125                         PRINT(KERN_ERR,
3126                               "Failed to allocate at dma prg");
3127                         free_dma_trm_ctx(d);
3128                         return -ENOMEM;
3129                 }
3130         }
3131
3132         spin_lock_init(&d->lock);
3133
3134         /* initialize tasklet */
3135         if (type == DMA_CTX_ISO) {
3136                 ohci1394_init_iso_tasklet(&ohci->it_legacy_tasklet, OHCI_ISO_TRANSMIT,
3137                                           dma_trm_tasklet, (unsigned long) d);
3138                 if (ohci1394_register_iso_tasklet(ohci,
3139                                                   &ohci->it_legacy_tasklet) < 0) {
3140                         PRINT(KERN_ERR, "No IT DMA context available");
3141                         free_dma_trm_ctx(d);
3142                         return -EBUSY;
3143                 }
3144
3145                 /* IT can be assigned to any context by register_iso_tasklet */
3146                 d->ctx = ohci->it_legacy_tasklet.context;
3147                 d->ctrlSet = OHCI1394_IsoXmitContextControlSet + 16 * d->ctx;
3148                 d->ctrlClear = OHCI1394_IsoXmitContextControlClear + 16 * d->ctx;
3149                 d->cmdPtr = OHCI1394_IsoXmitCommandPtr + 16 * d->ctx;
3150         } else {
3151                 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3152                 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3153                 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3154                 tasklet_init (&d->task, dma_trm_tasklet, (unsigned long)d);
3155         }
3156
3157         return 0;
3158 }
3159
3160 static void ohci_set_hw_config_rom(struct hpsb_host *host, quadlet_t *config_rom)
3161 {
3162         struct ti_ohci *ohci = host->hostdata;
3163
3164         reg_write(ohci, OHCI1394_ConfigROMhdr, be32_to_cpu(config_rom[0]));
3165         reg_write(ohci, OHCI1394_BusOptions, be32_to_cpu(config_rom[2]));
3166
3167         memcpy(ohci->csr_config_rom_cpu, config_rom, OHCI_CONFIG_ROM_LEN);
3168 }
3169
3170
3171 static quadlet_t ohci_hw_csr_reg(struct hpsb_host *host, int reg,
3172                                  quadlet_t data, quadlet_t compare)
3173 {
3174         struct ti_ohci *ohci = host->hostdata;
3175         int i;
3176
3177         reg_write(ohci, OHCI1394_CSRData, data);
3178         reg_write(ohci, OHCI1394_CSRCompareData, compare);
3179         reg_write(ohci, OHCI1394_CSRControl, reg & 0x3);
3180
3181         for (i = 0; i < OHCI_LOOP_COUNT; i++) {
3182                 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
3183                         break;
3184
3185                 mdelay(1);
3186         }
3187
3188         return reg_read(ohci, OHCI1394_CSRData);
3189 }
3190
3191 static struct hpsb_host_driver ohci1394_driver = {
3192         .owner =                THIS_MODULE,
3193         .name =                 OHCI1394_DRIVER_NAME,
3194         .set_hw_config_rom =    ohci_set_hw_config_rom,
3195         .transmit_packet =      ohci_transmit,
3196         .devctl =               ohci_devctl,
3197         .isoctl =               ohci_isoctl,
3198         .hw_csr_reg =           ohci_hw_csr_reg,
3199 };
3200
3201 \f
3202
3203 /***********************************
3204  * PCI Driver Interface functions  *
3205  ***********************************/
3206
3207 #define FAIL(err, fmt, args...)                 \
3208 do {                                            \
3209         PRINT_G(KERN_ERR, fmt , ## args);       \
3210         ohci1394_pci_remove(dev);               \
3211         return err;                             \
3212 } while (0)
3213
3214 static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
3215                                         const struct pci_device_id *ent)
3216 {
3217         static int version_printed = 0;
3218
3219         struct hpsb_host *host;
3220         struct ti_ohci *ohci;   /* shortcut to currently handled device */
3221         unsigned long ohci_base;
3222
3223         if (version_printed++ == 0)
3224                 PRINT_G(KERN_INFO, "%s", version);
3225
3226         if (pci_enable_device(dev))
3227                 FAIL(-ENXIO, "Failed to enable OHCI hardware");
3228         pci_set_master(dev);
3229
3230         host = hpsb_alloc_host(&ohci1394_driver, sizeof(struct ti_ohci), &dev->dev);
3231         if (!host) FAIL(-ENOMEM, "Failed to allocate host structure");
3232
3233         ohci = host->hostdata;
3234         ohci->dev = dev;
3235         ohci->host = host;
3236         ohci->init_state = OHCI_INIT_ALLOC_HOST;
3237         host->pdev = dev;
3238         pci_set_drvdata(dev, ohci);
3239
3240         /* We don't want hardware swapping */
3241         pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
3242
3243         /* Some oddball Apple controllers do not order the selfid
3244          * properly, so we make up for it here.  */
3245 #ifndef __LITTLE_ENDIAN
3246         /* XXX: Need a better way to check this. I'm wondering if we can
3247          * read the values of the OHCI1394_PCI_HCI_Control and the
3248          * noByteSwapData registers to see if they were not cleared to
3249          * zero. Should this work? Obviously it's not defined what these
3250          * registers will read when they aren't supported. Bleh! */
3251         if (dev->vendor == PCI_VENDOR_ID_APPLE &&
3252             dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) {
3253                 ohci->no_swap_incoming = 1;
3254                 ohci->selfid_swap = 0;
3255         } else
3256                 ohci->selfid_swap = 1;
3257 #endif
3258
3259
3260 #ifndef PCI_DEVICE_ID_NVIDIA_NFORCE2_FW
3261 #define PCI_DEVICE_ID_NVIDIA_NFORCE2_FW 0x006e
3262 #endif
3263
3264         /* These chipsets require a bit of extra care when checking after
3265          * a busreset.  */
3266         if ((dev->vendor == PCI_VENDOR_ID_APPLE &&
3267              dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) ||
3268             (dev->vendor ==  PCI_VENDOR_ID_NVIDIA &&
3269              dev->device == PCI_DEVICE_ID_NVIDIA_NFORCE2_FW))
3270                 ohci->check_busreset = 1;
3271
3272         /* We hardwire the MMIO length, since some CardBus adaptors
3273          * fail to report the right length.  Anyway, the ohci spec
3274          * clearly says it's 2kb, so this shouldn't be a problem. */
3275         ohci_base = pci_resource_start(dev, 0);
3276         if (pci_resource_len(dev, 0) != OHCI1394_REGISTER_SIZE)
3277                 PRINT(KERN_WARNING, "Unexpected PCI resource length of %lx!",
3278                       pci_resource_len(dev, 0));
3279
3280         /* Seems PCMCIA handles this internally. Not sure why. Seems
3281          * pretty bogus to force a driver to special case this.  */
3282 #ifndef PCMCIA
3283         if (!request_mem_region (ohci_base, OHCI1394_REGISTER_SIZE, OHCI1394_DRIVER_NAME))
3284                 FAIL(-ENOMEM, "MMIO resource (0x%lx - 0x%lx) unavailable",
3285                      ohci_base, ohci_base + OHCI1394_REGISTER_SIZE);
3286 #endif
3287         ohci->init_state = OHCI_INIT_HAVE_MEM_REGION;
3288
3289         ohci->registers = ioremap(ohci_base, OHCI1394_REGISTER_SIZE);
3290         if (ohci->registers == NULL)
3291                 FAIL(-ENXIO, "Failed to remap registers - card not accessible");
3292         ohci->init_state = OHCI_INIT_HAVE_IOMAPPING;
3293         DBGMSG("Remapped memory spaces reg 0x%p", ohci->registers);
3294
3295         /* csr_config rom allocation */
3296         ohci->csr_config_rom_cpu =
3297                 pci_alloc_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3298                                      &ohci->csr_config_rom_bus);
3299         OHCI_DMA_ALLOC("consistent csr_config_rom");
3300         if (ohci->csr_config_rom_cpu == NULL)
3301                 FAIL(-ENOMEM, "Failed to allocate buffer config rom");
3302         ohci->init_state = OHCI_INIT_HAVE_CONFIG_ROM_BUFFER;
3303
3304         /* self-id dma buffer allocation */
3305         ohci->selfid_buf_cpu =
3306                 pci_alloc_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3307                       &ohci->selfid_buf_bus);
3308         OHCI_DMA_ALLOC("consistent selfid_buf");
3309
3310         if (ohci->selfid_buf_cpu == NULL)
3311                 FAIL(-ENOMEM, "Failed to allocate DMA buffer for self-id packets");
3312         ohci->init_state = OHCI_INIT_HAVE_SELFID_BUFFER;
3313
3314         if ((unsigned long)ohci->selfid_buf_cpu & 0x1fff)
3315                 PRINT(KERN_INFO, "SelfID buffer %p is not aligned on "
3316                       "8Kb boundary... may cause problems on some CXD3222 chip",
3317                       ohci->selfid_buf_cpu);
3318
3319         /* No self-id errors at startup */
3320         ohci->self_id_errors = 0;
3321
3322         ohci->init_state = OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE;
3323         /* AR DMA request context allocation */
3324         if (alloc_dma_rcv_ctx(ohci, &ohci->ar_req_context,
3325                               DMA_CTX_ASYNC_REQ, 0, AR_REQ_NUM_DESC,
3326                               AR_REQ_BUF_SIZE, AR_REQ_SPLIT_BUF_SIZE,
3327                               OHCI1394_AsReqRcvContextBase) < 0)
3328                 FAIL(-ENOMEM, "Failed to allocate AR Req context");
3329
3330         /* AR DMA response context allocation */
3331         if (alloc_dma_rcv_ctx(ohci, &ohci->ar_resp_context,
3332                               DMA_CTX_ASYNC_RESP, 0, AR_RESP_NUM_DESC,
3333                               AR_RESP_BUF_SIZE, AR_RESP_SPLIT_BUF_SIZE,
3334                               OHCI1394_AsRspRcvContextBase) < 0)
3335                 FAIL(-ENOMEM, "Failed to allocate AR Resp context");
3336
3337         /* AT DMA request context */
3338         if (alloc_dma_trm_ctx(ohci, &ohci->at_req_context,
3339                               DMA_CTX_ASYNC_REQ, 0, AT_REQ_NUM_DESC,
3340                               OHCI1394_AsReqTrContextBase) < 0)
3341                 FAIL(-ENOMEM, "Failed to allocate AT Req context");
3342
3343         /* AT DMA response context */
3344         if (alloc_dma_trm_ctx(ohci, &ohci->at_resp_context,
3345                               DMA_CTX_ASYNC_RESP, 1, AT_RESP_NUM_DESC,
3346                               OHCI1394_AsRspTrContextBase) < 0)
3347                 FAIL(-ENOMEM, "Failed to allocate AT Resp context");
3348
3349         /* Start off with a soft reset, to clear everything to a sane
3350          * state. */
3351         ohci_soft_reset(ohci);
3352
3353         /* Now enable LPS, which we need in order to start accessing
3354          * most of the registers.  In fact, on some cards (ALI M5251),
3355          * accessing registers in the SClk domain without LPS enabled
3356          * will lock up the machine.  Wait 50msec to make sure we have
3357          * full link enabled.  */
3358         reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
3359
3360         /* Disable and clear interrupts */
3361         reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3362         reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3363
3364         mdelay(50);
3365
3366         /* Determine the number of available IR and IT contexts. */
3367         ohci->nb_iso_rcv_ctx =
3368                 get_nb_iso_ctx(ohci, OHCI1394_IsoRecvIntMaskSet);
3369         DBGMSG("%d iso receive contexts available",
3370                ohci->nb_iso_rcv_ctx);
3371
3372         ohci->nb_iso_xmit_ctx =
3373                 get_nb_iso_ctx(ohci, OHCI1394_IsoXmitIntMaskSet);
3374         DBGMSG("%d iso transmit contexts available",
3375                ohci->nb_iso_xmit_ctx);
3376
3377         /* Set the usage bits for non-existent contexts so they can't
3378          * be allocated */
3379         ohci->ir_ctx_usage = ~0 << ohci->nb_iso_rcv_ctx;
3380         ohci->it_ctx_usage = ~0 << ohci->nb_iso_xmit_ctx;
3381
3382         INIT_LIST_HEAD(&ohci->iso_tasklet_list);
3383         spin_lock_init(&ohci->iso_tasklet_list_lock);
3384         ohci->ISO_channel_usage = 0;
3385         spin_lock_init(&ohci->IR_channel_lock);
3386
3387         /* Allocate the IR DMA context right here so we don't have
3388          * to do it in interrupt path - note that this doesn't
3389          * waste much memory and avoids the jugglery required to
3390          * allocate it in IRQ path. */
3391         if (alloc_dma_rcv_ctx(ohci, &ohci->ir_legacy_context,
3392                               DMA_CTX_ISO, 0, IR_NUM_DESC,
3393                               IR_BUF_SIZE, IR_SPLIT_BUF_SIZE,
3394                               OHCI1394_IsoRcvContextBase) < 0) {
3395                 FAIL(-ENOMEM, "Cannot allocate IR Legacy DMA context");
3396         }
3397
3398         /* We hopefully don't have to pre-allocate IT DMA like we did
3399          * for IR DMA above. Allocate it on-demand and mark inactive. */
3400         ohci->it_legacy_context.ohci = NULL;
3401         spin_lock_init(&ohci->event_lock);
3402
3403         /*
3404          * interrupts are disabled, all right, but... due to SA_SHIRQ we
3405          * might get called anyway.  We'll see no event, of course, but
3406          * we need to get to that "no event", so enough should be initialized
3407          * by that point.
3408          */
3409         if (request_irq(dev->irq, ohci_irq_handler, SA_SHIRQ,
3410                          OHCI1394_DRIVER_NAME, ohci))
3411                 FAIL(-ENOMEM, "Failed to allocate shared interrupt %d", dev->irq);
3412
3413         ohci->init_state = OHCI_INIT_HAVE_IRQ;
3414         ohci_initialize(ohci);
3415
3416         /* Set certain csr values */
3417         host->csr.guid_hi = reg_read(ohci, OHCI1394_GUIDHi);
3418         host->csr.guid_lo = reg_read(ohci, OHCI1394_GUIDLo);
3419         host->csr.cyc_clk_acc = 100;  /* how do we determine clk accuracy? */
3420         host->csr.max_rec = (reg_read(ohci, OHCI1394_BusOptions) >> 12) & 0xf;
3421         host->csr.lnk_spd = reg_read(ohci, OHCI1394_BusOptions) & 0x7;
3422
3423         /* Tell the highlevel this host is ready */
3424         if (hpsb_add_host(host))
3425                 FAIL(-ENOMEM, "Failed to register host with highlevel");
3426
3427         ohci->init_state = OHCI_INIT_DONE;
3428
3429         return 0;
3430 #undef FAIL
3431 }
3432
3433 static void ohci1394_pci_remove(struct pci_dev *pdev)
3434 {
3435         struct ti_ohci *ohci;
3436         struct device *dev;
3437
3438         ohci = pci_get_drvdata(pdev);
3439         if (!ohci)
3440                 return;
3441
3442         dev = get_device(&ohci->host->device);
3443
3444         switch (ohci->init_state) {
3445         case OHCI_INIT_DONE:
3446                 hpsb_remove_host(ohci->host);
3447
3448                 /* Clear out BUS Options */
3449                 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
3450                 reg_write(ohci, OHCI1394_BusOptions,
3451                           (reg_read(ohci, OHCI1394_BusOptions) & 0x0000f007) |
3452                           0x00ff0000);
3453                 memset(ohci->csr_config_rom_cpu, 0, OHCI_CONFIG_ROM_LEN);
3454
3455         case OHCI_INIT_HAVE_IRQ:
3456                 /* Clear interrupt registers */
3457                 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3458                 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3459                 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
3460                 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
3461                 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
3462                 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
3463
3464                 /* Disable IRM Contender */
3465                 set_phy_reg(ohci, 4, ~0xc0 & get_phy_reg(ohci, 4));
3466
3467                 /* Clear link control register */
3468                 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
3469
3470                 /* Let all other nodes know to ignore us */
3471                 ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
3472
3473                 /* Soft reset before we start - this disables
3474                  * interrupts and clears linkEnable and LPS. */
3475                 ohci_soft_reset(ohci);
3476                 free_irq(ohci->dev->irq, ohci);
3477
3478         case OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE:
3479                 /* The ohci_soft_reset() stops all DMA contexts, so we
3480                  * dont need to do this.  */
3481                 /* Free AR dma */
3482                 free_dma_rcv_ctx(&ohci->ar_req_context);
3483                 free_dma_rcv_ctx(&ohci->ar_resp_context);
3484
3485                 /* Free AT dma */
3486                 free_dma_trm_ctx(&ohci->at_req_context);
3487                 free_dma_trm_ctx(&ohci->at_resp_context);
3488
3489                 /* Free IR dma */
3490                 free_dma_rcv_ctx(&ohci->ir_legacy_context);
3491
3492                 /* Free IT dma */
3493                 free_dma_trm_ctx(&ohci->it_legacy_context);
3494
3495                 /* Free IR legacy dma */
3496                 free_dma_rcv_ctx(&ohci->ir_legacy_context);
3497
3498
3499         case OHCI_INIT_HAVE_SELFID_BUFFER:
3500                 pci_free_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3501                                     ohci->selfid_buf_cpu,
3502                                     ohci->selfid_buf_bus);
3503                 OHCI_DMA_FREE("consistent selfid_buf");
3504
3505         case OHCI_INIT_HAVE_CONFIG_ROM_BUFFER:
3506                 pci_free_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3507                                     ohci->csr_config_rom_cpu,
3508                                     ohci->csr_config_rom_bus);
3509                 OHCI_DMA_FREE("consistent csr_config_rom");
3510
3511         case OHCI_INIT_HAVE_IOMAPPING:
3512                 iounmap(ohci->registers);
3513
3514         case OHCI_INIT_HAVE_MEM_REGION:
3515 #ifndef PCMCIA
3516                 release_mem_region(pci_resource_start(ohci->dev, 0),
3517                                    OHCI1394_REGISTER_SIZE);
3518 #endif
3519
3520 #ifdef CONFIG_PPC_PMAC
3521         /* On UniNorth, power down the cable and turn off the chip
3522          * clock when the module is removed to save power on
3523          * laptops. Turning it back ON is done by the arch code when
3524          * pci_enable_device() is called */
3525         {
3526                 struct device_node* of_node;
3527
3528                 of_node = pci_device_to_OF_node(ohci->dev);
3529                 if (of_node) {
3530                         pmac_call_feature(PMAC_FTR_1394_ENABLE, of_node, 0, 0);
3531                         pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, of_node, 0, 0);
3532                 }
3533         }
3534 #endif /* CONFIG_PPC_PMAC */
3535
3536         case OHCI_INIT_ALLOC_HOST:
3537                 pci_set_drvdata(ohci->dev, NULL);
3538         }
3539
3540         if (dev)
3541                 put_device(dev);
3542 }
3543
3544
3545 static int ohci1394_pci_resume (struct pci_dev *pdev)
3546 {
3547 #ifdef CONFIG_PMAC_PBOOK
3548         {
3549                 struct device_node *of_node;
3550
3551                 /* Re-enable 1394 */
3552                 of_node = pci_device_to_OF_node (pdev);
3553                 if (of_node)
3554                         pmac_call_feature (PMAC_FTR_1394_ENABLE, of_node, 0, 1);
3555         }
3556 #endif
3557
3558         pci_enable_device(pdev);
3559
3560         return 0;
3561 }
3562
3563
3564 static int ohci1394_pci_suspend (struct pci_dev *pdev, pm_message_t state)
3565 {
3566 #ifdef CONFIG_PMAC_PBOOK
3567         {
3568                 struct device_node *of_node;
3569
3570                 /* Disable 1394 */
3571                 of_node = pci_device_to_OF_node (pdev);
3572                 if (of_node)
3573                         pmac_call_feature(PMAC_FTR_1394_ENABLE, of_node, 0, 0);
3574         }
3575 #endif
3576
3577         return 0;
3578 }
3579
3580
3581 #define PCI_CLASS_FIREWIRE_OHCI     ((PCI_CLASS_SERIAL_FIREWIRE << 8) | 0x10)
3582
3583 static struct pci_device_id ohci1394_pci_tbl[] = {
3584         {
3585                 .class =        PCI_CLASS_FIREWIRE_OHCI,
3586                 .class_mask =   PCI_ANY_ID,
3587                 .vendor =       PCI_ANY_ID,
3588                 .device =       PCI_ANY_ID,
3589                 .subvendor =    PCI_ANY_ID,
3590                 .subdevice =    PCI_ANY_ID,
3591         },
3592         { 0, },
3593 };
3594
3595 MODULE_DEVICE_TABLE(pci, ohci1394_pci_tbl);
3596
3597 static struct pci_driver ohci1394_pci_driver = {
3598         .name =         OHCI1394_DRIVER_NAME,
3599         .id_table =     ohci1394_pci_tbl,
3600         .probe =        ohci1394_pci_probe,
3601         .remove =       ohci1394_pci_remove,
3602         .resume =       ohci1394_pci_resume,
3603         .suspend =      ohci1394_pci_suspend,
3604 };
3605
3606 \f
3607
3608 /***********************************
3609  * OHCI1394 Video Interface        *
3610  ***********************************/
3611
3612 /* essentially the only purpose of this code is to allow another
3613    module to hook into ohci's interrupt handler */
3614
3615 int ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg)
3616 {
3617         int i=0;
3618
3619         /* stop the channel program if it's still running */
3620         reg_write(ohci, reg, 0x8000);
3621
3622         /* Wait until it effectively stops */
3623         while (reg_read(ohci, reg) & 0x400) {
3624                 i++;
3625                 if (i>5000) {
3626                         PRINT(KERN_ERR,
3627                               "Runaway loop while stopping context: %s...", msg ? msg : "");
3628                         return 1;
3629                 }
3630
3631                 mb();
3632                 udelay(10);
3633         }
3634         if (msg) PRINT(KERN_ERR, "%s: dma prg stopped", msg);
3635         return 0;
3636 }
3637
3638 void ohci1394_init_iso_tasklet(struct ohci1394_iso_tasklet *tasklet, int type,
3639                                void (*func)(unsigned long), unsigned long data)
3640 {
3641         tasklet_init(&tasklet->tasklet, func, data);
3642         tasklet->type = type;
3643         /* We init the tasklet->link field, so we can list_del() it
3644          * without worrying whether it was added to the list or not. */
3645         INIT_LIST_HEAD(&tasklet->link);
3646 }
3647
3648 int ohci1394_register_iso_tasklet(struct ti_ohci *ohci,
3649                                   struct ohci1394_iso_tasklet *tasklet)
3650 {
3651         unsigned long flags, *usage;
3652         int n, i, r = -EBUSY;
3653
3654         if (tasklet->type == OHCI_ISO_TRANSMIT) {
3655                 n = ohci->nb_iso_xmit_ctx;
3656                 usage = &ohci->it_ctx_usage;
3657         }
3658         else {
3659                 n = ohci->nb_iso_rcv_ctx;
3660                 usage = &ohci->ir_ctx_usage;
3661
3662                 /* only one receive context can be multichannel (OHCI sec 10.4.1) */
3663                 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3664                         if (test_and_set_bit(0, &ohci->ir_multichannel_used)) {
3665                                 return r;
3666                         }
3667                 }
3668         }
3669
3670         spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3671
3672         for (i = 0; i < n; i++)
3673                 if (!test_and_set_bit(i, usage)) {
3674                         tasklet->context = i;
3675                         list_add_tail(&tasklet->link, &ohci->iso_tasklet_list);
3676                         r = 0;
3677                         break;
3678                 }
3679
3680         spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3681
3682         return r;
3683 }
3684
3685 void ohci1394_unregister_iso_tasklet(struct ti_ohci *ohci,
3686                                      struct ohci1394_iso_tasklet *tasklet)
3687 {
3688         unsigned long flags;
3689
3690         tasklet_kill(&tasklet->tasklet);
3691
3692         spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3693
3694         if (tasklet->type == OHCI_ISO_TRANSMIT)
3695                 clear_bit(tasklet->context, &ohci->it_ctx_usage);
3696         else {
3697                 clear_bit(tasklet->context, &ohci->ir_ctx_usage);
3698
3699                 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3700                         clear_bit(0, &ohci->ir_multichannel_used);
3701                 }
3702         }
3703
3704         list_del(&tasklet->link);
3705
3706         spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3707 }
3708
3709 EXPORT_SYMBOL(ohci1394_stop_context);
3710 EXPORT_SYMBOL(ohci1394_init_iso_tasklet);
3711 EXPORT_SYMBOL(ohci1394_register_iso_tasklet);
3712 EXPORT_SYMBOL(ohci1394_unregister_iso_tasklet);
3713
3714
3715 /***********************************
3716  * General module initialization   *
3717  ***********************************/
3718
3719 MODULE_AUTHOR("Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>");
3720 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE-1394 controllers");
3721 MODULE_LICENSE("GPL");
3722
3723 static void __exit ohci1394_cleanup (void)
3724 {
3725         pci_unregister_driver(&ohci1394_pci_driver);
3726 }
3727
3728 static int __init ohci1394_init(void)
3729 {
3730         return pci_register_driver(&ohci1394_pci_driver);
3731 }
3732
3733 module_init(ohci1394_init);
3734 module_exit(ohci1394_cleanup);