vserver 1.9.3
[linux-2.6.git] / drivers / ide / arm / icside.c
1 /*
2  * linux/drivers/ide/arm/icside.c
3  *
4  * Copyright (c) 1996-2003 Russell King.
5  */
6
7 #include <linux/config.h>
8 #include <linux/string.h>
9 #include <linux/module.h>
10 #include <linux/ioport.h>
11 #include <linux/slab.h>
12 #include <linux/blkdev.h>
13 #include <linux/errno.h>
14 #include <linux/hdreg.h>
15 #include <linux/ide.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/device.h>
18 #include <linux/init.h>
19
20 #include <asm/dma.h>
21 #include <asm/ecard.h>
22 #include <asm/io.h>
23
24 #define ICS_IDENT_OFFSET                0x2280
25
26 #define ICS_ARCIN_V5_INTRSTAT           0x000
27 #define ICS_ARCIN_V5_INTROFFSET         0x001
28 #define ICS_ARCIN_V5_IDEOFFSET          0xa00
29 #define ICS_ARCIN_V5_IDEALTOFFSET       0xae0
30 #define ICS_ARCIN_V5_IDESTEPPING        4
31
32 #define ICS_ARCIN_V6_IDEOFFSET_1        0x800
33 #define ICS_ARCIN_V6_INTROFFSET_1       0x880
34 #define ICS_ARCIN_V6_INTRSTAT_1         0x8a4
35 #define ICS_ARCIN_V6_IDEALTOFFSET_1     0x8e0
36 #define ICS_ARCIN_V6_IDEOFFSET_2        0xc00
37 #define ICS_ARCIN_V6_INTROFFSET_2       0xc80
38 #define ICS_ARCIN_V6_INTRSTAT_2         0xca4
39 #define ICS_ARCIN_V6_IDEALTOFFSET_2     0xce0
40 #define ICS_ARCIN_V6_IDESTEPPING        4
41
42 struct cardinfo {
43         unsigned int dataoffset;
44         unsigned int ctrloffset;
45         unsigned int stepping;
46 };
47
48 static struct cardinfo icside_cardinfo_v5 = {
49         ICS_ARCIN_V5_IDEOFFSET,
50         ICS_ARCIN_V5_IDEALTOFFSET,
51         ICS_ARCIN_V5_IDESTEPPING
52 };
53
54 static struct cardinfo icside_cardinfo_v6_1 = {
55         ICS_ARCIN_V6_IDEOFFSET_1,
56         ICS_ARCIN_V6_IDEALTOFFSET_1,
57         ICS_ARCIN_V6_IDESTEPPING
58 };
59
60 static struct cardinfo icside_cardinfo_v6_2 = {
61         ICS_ARCIN_V6_IDEOFFSET_2,
62         ICS_ARCIN_V6_IDEALTOFFSET_2,
63         ICS_ARCIN_V6_IDESTEPPING
64 };
65
66 struct icside_state {
67         unsigned int channel;
68         unsigned int enabled;
69         unsigned long irq_port;
70         unsigned long slot_port;
71         unsigned int type;
72         /* parent device... until the IDE core gets one of its own */
73         struct device *dev;
74         ide_hwif_t *hwif[2];
75 };
76
77 #define ICS_TYPE_A3IN   0
78 #define ICS_TYPE_A3USER 1
79 #define ICS_TYPE_V6     3
80 #define ICS_TYPE_V5     15
81 #define ICS_TYPE_NOTYPE ((unsigned int)-1)
82
83 /* ---------------- Version 5 PCB Support Functions --------------------- */
84 /* Prototype: icside_irqenable_arcin_v5 (struct expansion_card *ec, int irqnr)
85  * Purpose  : enable interrupts from card
86  */
87 static void icside_irqenable_arcin_v5 (struct expansion_card *ec, int irqnr)
88 {
89         struct icside_state *state = ec->irq_data;
90         unsigned int base = state->irq_port;
91
92         outb(0, base + ICS_ARCIN_V5_INTROFFSET);
93 }
94
95 /* Prototype: icside_irqdisable_arcin_v5 (struct expansion_card *ec, int irqnr)
96  * Purpose  : disable interrupts from card
97  */
98 static void icside_irqdisable_arcin_v5 (struct expansion_card *ec, int irqnr)
99 {
100         struct icside_state *state = ec->irq_data;
101         unsigned int base = state->irq_port;
102
103         inb(base + ICS_ARCIN_V5_INTROFFSET);
104 }
105
106 static const expansioncard_ops_t icside_ops_arcin_v5 = {
107         .irqenable      = icside_irqenable_arcin_v5,
108         .irqdisable     = icside_irqdisable_arcin_v5,
109 };
110
111
112 /* ---------------- Version 6 PCB Support Functions --------------------- */
113 /* Prototype: icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr)
114  * Purpose  : enable interrupts from card
115  */
116 static void icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr)
117 {
118         struct icside_state *state = ec->irq_data;
119         unsigned int base = state->irq_port;
120
121         state->enabled = 1;
122
123         switch (state->channel) {
124         case 0:
125                 outb(0, base + ICS_ARCIN_V6_INTROFFSET_1);
126                 inb(base + ICS_ARCIN_V6_INTROFFSET_2);
127                 break;
128         case 1:
129                 outb(0, base + ICS_ARCIN_V6_INTROFFSET_2);
130                 inb(base + ICS_ARCIN_V6_INTROFFSET_1);
131                 break;
132         }
133 }
134
135 /* Prototype: icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr)
136  * Purpose  : disable interrupts from card
137  */
138 static void icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr)
139 {
140         struct icside_state *state = ec->irq_data;
141
142         state->enabled = 0;
143
144         inb (state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
145         inb (state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
146 }
147
148 /* Prototype: icside_irqprobe(struct expansion_card *ec)
149  * Purpose  : detect an active interrupt from card
150  */
151 static int icside_irqpending_arcin_v6(struct expansion_card *ec)
152 {
153         struct icside_state *state = ec->irq_data;
154
155         return inb(state->irq_port + ICS_ARCIN_V6_INTRSTAT_1) & 1 ||
156                inb(state->irq_port + ICS_ARCIN_V6_INTRSTAT_2) & 1;
157 }
158
159 static const expansioncard_ops_t icside_ops_arcin_v6 = {
160         .irqenable      = icside_irqenable_arcin_v6,
161         .irqdisable     = icside_irqdisable_arcin_v6,
162         .irqpending     = icside_irqpending_arcin_v6,
163 };
164
165 /*
166  * Handle routing of interrupts.  This is called before
167  * we write the command to the drive.
168  */
169 static void icside_maskproc(ide_drive_t *drive, int mask)
170 {
171         ide_hwif_t *hwif = HWIF(drive);
172         struct icside_state *state = hwif->hwif_data;
173         unsigned long flags;
174
175         local_irq_save(flags);
176
177         state->channel = hwif->channel;
178
179         if (state->enabled && !mask) {
180                 switch (hwif->channel) {
181                 case 0:
182                         outb(0, state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
183                         inb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
184                         break;
185                 case 1:
186                         outb(0, state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
187                         inb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
188                         break;
189                 }
190         } else {
191                 inb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
192                 inb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
193         }
194
195         local_irq_restore(flags);
196 }
197
198 #ifdef CONFIG_BLK_DEV_IDEDMA_ICS
199 /*
200  * SG-DMA support.
201  *
202  * Similar to the BM-DMA, but we use the RiscPCs IOMD DMA controllers.
203  * There is only one DMA controller per card, which means that only
204  * one drive can be accessed at one time.  NOTE! We do not enforce that
205  * here, but we rely on the main IDE driver spotting that both
206  * interfaces use the same IRQ, which should guarantee this.
207  */
208 #define NR_ENTRIES 256
209 #define TABLE_SIZE (NR_ENTRIES * 8)
210
211 static void icside_build_sglist(ide_drive_t *drive, struct request *rq)
212 {
213         ide_hwif_t *hwif = drive->hwif;
214         struct icside_state *state = hwif->hwif_data;
215         struct scatterlist *sg = hwif->sg_table;
216         int nents;
217
218         if (rq->flags & REQ_DRIVE_TASKFILE) {
219                 ide_task_t *args = rq->special;
220
221                 if (args->command_type == IDE_DRIVE_TASK_RAW_WRITE)
222                         hwif->sg_dma_direction = DMA_TO_DEVICE;
223                 else
224                         hwif->sg_dma_direction = DMA_FROM_DEVICE;
225
226                 memset(sg, 0, sizeof(*sg));
227                 sg->page   = virt_to_page(rq->buffer);
228                 sg->offset = offset_in_page(rq->buffer);
229                 sg->length = rq->nr_sectors * SECTOR_SIZE;
230                 nents = 1;
231         } else {
232                 nents = blk_rq_map_sg(drive->queue, rq, sg);
233
234                 if (rq_data_dir(rq) == READ)
235                         hwif->sg_dma_direction = DMA_FROM_DEVICE;
236                 else
237                         hwif->sg_dma_direction = DMA_TO_DEVICE;
238         }
239
240         nents = dma_map_sg(state->dev, sg, nents, hwif->sg_dma_direction);
241
242         hwif->sg_nents = nents;
243 }
244
245
246 /*
247  * Configure the IOMD to give the appropriate timings for the transfer
248  * mode being requested.  We take the advice of the ATA standards, and
249  * calculate the cycle time based on the transfer mode, and the EIDE
250  * MW DMA specs that the drive provides in the IDENTIFY command.
251  *
252  * We have the following IOMD DMA modes to choose from:
253  *
254  *      Type    Active          Recovery        Cycle
255  *      A       250 (250)       312 (550)       562 (800)
256  *      B       187             250             437
257  *      C       125 (125)       125 (375)       250 (500)
258  *      D       62              125             187
259  *
260  * (figures in brackets are actual measured timings)
261  *
262  * However, we also need to take care of the read/write active and
263  * recovery timings:
264  *
265  *                      Read    Write
266  *      Mode    Active  -- Recovery --  Cycle   IOMD type
267  *      MW0     215     50      215     480     A
268  *      MW1     80      50      50      150     C
269  *      MW2     70      25      25      120     C
270  */
271 static int icside_set_speed(ide_drive_t *drive, u8 xfer_mode)
272 {
273         int on = 0, cycle_time = 0, use_dma_info = 0;
274
275         /*
276          * Limit the transfer speed to MW_DMA_2.
277          */
278         if (xfer_mode > XFER_MW_DMA_2)
279                 xfer_mode = XFER_MW_DMA_2;
280
281         switch (xfer_mode) {
282         case XFER_MW_DMA_2:
283                 cycle_time = 250;
284                 use_dma_info = 1;
285                 break;
286
287         case XFER_MW_DMA_1:
288                 cycle_time = 250;
289                 use_dma_info = 1;
290                 break;
291
292         case XFER_MW_DMA_0:
293                 cycle_time = 480;
294                 break;
295
296         case XFER_SW_DMA_2:
297         case XFER_SW_DMA_1:
298         case XFER_SW_DMA_0:
299                 cycle_time = 480;
300                 break;
301         }
302
303         /*
304          * If we're going to be doing MW_DMA_1 or MW_DMA_2, we should
305          * take care to note the values in the ID...
306          */
307         if (use_dma_info && drive->id->eide_dma_time > cycle_time)
308                 cycle_time = drive->id->eide_dma_time;
309
310         drive->drive_data = cycle_time;
311
312         if (cycle_time && ide_config_drive_speed(drive, xfer_mode) == 0)
313                 on = 1;
314         else
315                 drive->drive_data = 480;
316
317         printk("%s: %s selected (peak %dMB/s)\n", drive->name,
318                 ide_xfer_verbose(xfer_mode), 2000 / drive->drive_data);
319
320         drive->current_speed = xfer_mode;
321
322         return on;
323 }
324
325 static int icside_dma_host_off(ide_drive_t *drive)
326 {
327         return 0;
328 }
329
330 static int icside_dma_off_quietly(ide_drive_t *drive)
331 {
332         drive->using_dma = 0;
333         return icside_dma_host_off(drive);
334 }
335
336 static int icside_dma_host_on(ide_drive_t *drive)
337 {
338         return 0;
339 }
340
341 static int icside_dma_on(ide_drive_t *drive)
342 {
343         drive->using_dma = 1;
344         return icside_dma_host_on(drive);
345 }
346
347 static int icside_dma_check(ide_drive_t *drive)
348 {
349         struct hd_driveid *id = drive->id;
350         ide_hwif_t *hwif = HWIF(drive);
351         int xfer_mode = XFER_PIO_2;
352         int on;
353
354         if (!(id->capability & 1) || !hwif->autodma)
355                 goto out;
356
357         /*
358          * Consult the list of known "bad" drives
359          */
360         if (__ide_dma_bad_drive(drive))
361                 goto out;
362
363         /*
364          * Enable DMA on any drive that has multiword DMA
365          */
366         if (id->field_valid & 2) {
367                 xfer_mode = ide_dma_speed(drive, 0);
368                 goto out;
369         }
370
371         /*
372          * Consult the list of known "good" drives
373          */
374         if (__ide_dma_good_drive(drive)) {
375                 if (id->eide_dma_time > 150)
376                         goto out;
377                 xfer_mode = XFER_MW_DMA_1;
378         }
379
380 out:
381         on = icside_set_speed(drive, xfer_mode);
382
383         if (on)
384                 return icside_dma_on(drive);
385         else
386                 return icside_dma_off_quietly(drive);
387 }
388
389 static int icside_dma_end(ide_drive_t *drive)
390 {
391         ide_hwif_t *hwif = HWIF(drive);
392         struct icside_state *state = hwif->hwif_data;
393
394         drive->waiting_for_dma = 0;
395
396         disable_dma(hwif->hw.dma);
397
398         /* Teardown mappings after DMA has completed. */
399         dma_unmap_sg(state->dev, hwif->sg_table, hwif->sg_nents,
400                      hwif->sg_dma_direction);
401
402         return get_dma_residue(hwif->hw.dma) != 0;
403 }
404
405 static int icside_dma_begin(ide_drive_t *drive)
406 {
407         ide_hwif_t *hwif = HWIF(drive);
408
409         /* We can not enable DMA on both channels simultaneously. */
410         BUG_ON(dma_channel_active(hwif->hw.dma));
411         enable_dma(hwif->hw.dma);
412         return 0;
413 }
414
415 /*
416  * dma_intr() is the handler for disk read/write DMA interrupts
417  */
418 static ide_startstop_t icside_dmaintr(ide_drive_t *drive)
419 {
420         unsigned int stat;
421         int dma_stat;
422
423         dma_stat = icside_dma_end(drive);
424         stat = HWIF(drive)->INB(IDE_STATUS_REG);
425         if (OK_STAT(stat, DRIVE_READY, drive->bad_wstat | DRQ_STAT)) {
426                 if (!dma_stat) {
427                         struct request *rq = HWGROUP(drive)->rq;
428                         int i;
429
430                         for (i = rq->nr_sectors; i > 0; ) {
431                                 i -= rq->current_nr_sectors;
432                                 DRIVER(drive)->end_request(drive, 1, rq->nr_sectors);
433                         }
434
435                         return ide_stopped;
436                 }
437                 printk(KERN_ERR "%s: bad DMA status (dma_stat=%x)\n",
438                        drive->name, dma_stat);
439         }
440
441         return DRIVER(drive)->error(drive, __FUNCTION__, stat);
442 }
443
444 static int
445 icside_dma_common(ide_drive_t *drive, struct request *rq,
446                   unsigned int dma_mode)
447 {
448         ide_hwif_t *hwif = HWIF(drive);
449
450         /*
451          * We can not enable DMA on both channels.
452          */
453         BUG_ON(dma_channel_active(hwif->hw.dma));
454
455         icside_build_sglist(drive, rq);
456
457         /*
458          * Ensure that we have the right interrupt routed.
459          */
460         icside_maskproc(drive, 0);
461
462         /*
463          * Route the DMA signals to the correct interface.
464          */
465         outb(hwif->select_data, hwif->config_data);
466
467         /*
468          * Select the correct timing for this drive.
469          */
470         set_dma_speed(hwif->hw.dma, drive->drive_data);
471
472         /*
473          * Tell the DMA engine about the SG table and
474          * data direction.
475          */
476         set_dma_sg(hwif->hw.dma, hwif->sg_table, hwif->sg_nents);
477         set_dma_mode(hwif->hw.dma, dma_mode);
478
479         drive->waiting_for_dma = 1;
480
481         return 0;
482 }
483
484 static int icside_dma_read(ide_drive_t *drive)
485 {
486         struct request *rq = HWGROUP(drive)->rq;
487         task_ioreg_t cmd;
488
489         if (icside_dma_common(drive, rq, DMA_MODE_READ))
490                 return 1;
491
492         if (drive->media != ide_disk)
493                 return 0;
494
495         BUG_ON(HWGROUP(drive)->handler != NULL);
496
497         /*
498          * FIX ME to use only ACB ide_task_t args Struct
499          */
500 #if 0
501         {
502                 ide_task_t *args = rq->special;
503                 cmd = args->tfRegister[IDE_COMMAND_OFFSET];
504         }
505 #else
506         if (rq->flags & REQ_DRIVE_TASKFILE) {
507                 ide_task_t *args = rq->special;
508                 cmd = args->tfRegister[IDE_COMMAND_OFFSET];
509         } else if (drive->addressing == 1) {
510                 cmd = WIN_READDMA_EXT;
511         } else {
512                 cmd = WIN_READDMA;
513         }
514 #endif
515         /* issue cmd to drive */
516         ide_execute_command(drive, cmd, icside_dmaintr, 2*WAIT_CMD, NULL);
517
518         return icside_dma_begin(drive);
519 }
520
521 static int icside_dma_write(ide_drive_t *drive)
522 {
523         struct request *rq = HWGROUP(drive)->rq;
524         task_ioreg_t cmd;
525
526         if (icside_dma_common(drive, rq, DMA_MODE_WRITE))
527                 return 1;
528
529         if (drive->media != ide_disk)
530                 return 0;
531
532         BUG_ON(HWGROUP(drive)->handler != NULL);
533
534         /*
535          * FIX ME to use only ACB ide_task_t args Struct
536          */
537 #if 0
538         {
539                 ide_task_t *args = rq->special;
540                 cmd = args->tfRegister[IDE_COMMAND_OFFSET];
541         }
542 #else
543         if (rq->flags & REQ_DRIVE_TASKFILE) {
544                 ide_task_t *args = rq->special;
545                 cmd = args->tfRegister[IDE_COMMAND_OFFSET];
546         } else if (drive->addressing == 1) {
547                 cmd = WIN_WRITEDMA_EXT;
548         } else {
549                 cmd = WIN_WRITEDMA;
550         }
551 #endif
552
553         /* issue cmd to drive */
554         ide_execute_command(drive, cmd, icside_dmaintr, 2*WAIT_CMD, NULL);
555
556         return icside_dma_begin(drive);
557 }
558
559 static int icside_dma_test_irq(ide_drive_t *drive)
560 {
561         ide_hwif_t *hwif = HWIF(drive);
562         struct icside_state *state = hwif->hwif_data;
563
564         return inb(state->irq_port +
565                    (hwif->channel ?
566                         ICS_ARCIN_V6_INTRSTAT_2 :
567                         ICS_ARCIN_V6_INTRSTAT_1)) & 1;
568 }
569
570 static int icside_dma_verbose(ide_drive_t *drive)
571 {
572         printk(", %s (peak %dMB/s)",
573                 ide_xfer_verbose(drive->current_speed),
574                 2000 / drive->drive_data);
575         return 1;
576 }
577
578 static int icside_dma_timeout(ide_drive_t *drive)
579 {
580         printk(KERN_ERR "%s: DMA timeout occurred: ", drive->name);
581
582         if (icside_dma_test_irq(drive))
583                 return 0;
584
585         ide_dump_status(drive, "DMA timeout",
586                 HWIF(drive)->INB(IDE_STATUS_REG));
587
588         return icside_dma_end(drive);
589 }
590
591 static int icside_dma_lostirq(ide_drive_t *drive)
592 {
593         printk(KERN_ERR "%s: IRQ lost\n", drive->name);
594         return 1;
595 }
596
597 static int icside_dma_init(ide_hwif_t *hwif)
598 {
599         int autodma = 0;
600
601 #ifdef CONFIG_IDEDMA_ICS_AUTO
602         autodma = 1;
603 #endif
604
605         printk("    %s: SG-DMA", hwif->name);
606
607         hwif->sg_table = kmalloc(sizeof(struct scatterlist) * NR_ENTRIES,
608                                  GFP_KERNEL);
609         if (!hwif->sg_table)
610                 goto failed;
611
612         hwif->atapi_dma         = 1;
613         hwif->mwdma_mask        = 7; /* MW0..2 */
614         hwif->swdma_mask        = 7; /* SW0..2 */
615
616         hwif->dmatable_cpu      = NULL;
617         hwif->dmatable_dma      = 0;
618         hwif->speedproc         = icside_set_speed;
619         hwif->autodma           = autodma;
620
621         hwif->ide_dma_check     = icside_dma_check;
622         hwif->ide_dma_host_off  = icside_dma_host_off;
623         hwif->ide_dma_off_quietly = icside_dma_off_quietly;
624         hwif->ide_dma_host_on   = icside_dma_host_on;
625         hwif->ide_dma_on        = icside_dma_on;
626         hwif->ide_dma_read      = icside_dma_read;
627         hwif->ide_dma_write     = icside_dma_write;
628         hwif->ide_dma_begin     = icside_dma_begin;
629         hwif->ide_dma_end       = icside_dma_end;
630         hwif->ide_dma_test_irq  = icside_dma_test_irq;
631         hwif->ide_dma_verbose   = icside_dma_verbose;
632         hwif->ide_dma_timeout   = icside_dma_timeout;
633         hwif->ide_dma_lostirq   = icside_dma_lostirq;
634
635         hwif->drives[0].autodma = hwif->autodma;
636         hwif->drives[1].autodma = hwif->autodma;
637
638         printk(" capable%s\n", hwif->autodma ? ", auto-enable" : "");
639
640         return 1;
641
642 failed:
643         printk(" disabled, unable to allocate DMA table\n");
644         return 0;
645 }
646
647 static void icside_dma_exit(ide_hwif_t *hwif)
648 {
649         if (hwif->sg_table) {
650                 kfree(hwif->sg_table);
651                 hwif->sg_table = NULL;
652         }
653 }
654 #else
655 #define icside_dma_init(hwif)   (0)
656 #define icside_dma_exit(hwif)   do { } while (0)
657 #endif
658
659 static ide_hwif_t *icside_find_hwif(unsigned long dataport)
660 {
661         ide_hwif_t *hwif;
662         int index;
663
664         for (index = 0; index < MAX_HWIFS; ++index) {
665                 hwif = &ide_hwifs[index];
666                 if (hwif->io_ports[IDE_DATA_OFFSET] == dataport)
667                         goto found;
668         }
669
670         for (index = 0; index < MAX_HWIFS; ++index) {
671                 hwif = &ide_hwifs[index];
672                 if (!hwif->io_ports[IDE_DATA_OFFSET])
673                         goto found;
674         }
675
676         hwif = NULL;
677 found:
678         return hwif;
679 }
680
681 static ide_hwif_t *
682 icside_setup(unsigned long base, struct cardinfo *info, struct expansion_card *ec)
683 {
684         unsigned long port = base + info->dataoffset;
685         ide_hwif_t *hwif;
686
687         hwif = icside_find_hwif(base);
688         if (hwif) {
689                 int i;
690
691                 memset(&hwif->hw, 0, sizeof(hw_regs_t));
692
693                 for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) {
694                         hwif->hw.io_ports[i] = port;
695                         hwif->io_ports[i] = port;
696                         port += 1 << info->stepping;
697                 }
698                 hwif->hw.io_ports[IDE_CONTROL_OFFSET] = base + info->ctrloffset;
699                 hwif->io_ports[IDE_CONTROL_OFFSET] = base + info->ctrloffset;
700                 hwif->hw.irq  = ec->irq;
701                 hwif->irq     = ec->irq;
702                 hwif->noprobe = 0;
703                 hwif->chipset = ide_acorn;
704                 hwif->gendev.parent = &ec->dev;
705         }
706
707         return hwif;
708 }
709
710 static int __init
711 icside_register_v5(struct icside_state *state, struct expansion_card *ec)
712 {
713         unsigned long slot_port;
714         ide_hwif_t *hwif;
715
716         slot_port = ecard_address(ec, ECARD_MEMC, 0);
717
718         state->irq_port = slot_port;
719
720         ec->irqaddr  = (unsigned char *)ioaddr(slot_port + ICS_ARCIN_V5_INTRSTAT);
721         ec->irqmask  = 1;
722         ec->irq_data = state;
723         ec->ops      = &icside_ops_arcin_v5;
724
725         /*
726          * Be on the safe side - disable interrupts
727          */
728         inb(slot_port + ICS_ARCIN_V5_INTROFFSET);
729
730         hwif = icside_setup(slot_port, &icside_cardinfo_v5, ec);
731
732         state->hwif[0] = hwif;
733
734         return hwif ? 0 : -ENODEV;
735 }
736
737 static int __init
738 icside_register_v6(struct icside_state *state, struct expansion_card *ec)
739 {
740         unsigned long slot_port, port;
741         ide_hwif_t *hwif, *mate;
742         unsigned int sel = 0;
743
744         slot_port = ecard_address(ec, ECARD_IOC, ECARD_FAST);
745         port      = ecard_address(ec, ECARD_EASI, ECARD_FAST);
746
747         if (port == 0)
748                 port = slot_port;
749         else
750                 sel = 1 << 5;
751
752         outb(sel, slot_port);
753
754         /*
755          * Be on the safe side - disable interrupts
756          */
757         inb(port + ICS_ARCIN_V6_INTROFFSET_1);
758         inb(port + ICS_ARCIN_V6_INTROFFSET_2);
759
760         /*
761          * Find and register the interfaces.
762          */
763         hwif = icside_setup(port, &icside_cardinfo_v6_1, ec);
764         mate = icside_setup(port, &icside_cardinfo_v6_2, ec);
765
766         if (!hwif || !mate)
767                 return -ENODEV;
768
769         state->irq_port   = port;
770         state->slot_port  = slot_port;
771         state->hwif[0]    = hwif;
772         state->hwif[1]    = mate;
773
774         ec->irq_data      = state;
775         ec->ops           = &icside_ops_arcin_v6;
776
777         hwif->maskproc    = icside_maskproc;
778         hwif->channel     = 0;
779         hwif->hwif_data   = state;
780         hwif->mate        = mate;
781         hwif->serialized  = 1;
782         hwif->config_data = slot_port;
783         hwif->select_data = sel;
784         hwif->hw.dma      = ec->dma;
785
786         mate->maskproc    = icside_maskproc;
787         mate->channel     = 1;
788         mate->hwif_data   = state;
789         mate->mate        = hwif;
790         mate->serialized  = 1;
791         mate->config_data = slot_port;
792         mate->select_data = sel | 1;
793         mate->hw.dma      = ec->dma;
794
795         if (ec->dma != NO_DMA && !request_dma(ec->dma, hwif->name)) {
796                 icside_dma_init(hwif);
797                 icside_dma_init(mate);
798         }
799
800         return 0;
801 }
802
803 static int __devinit
804 icside_probe(struct expansion_card *ec, const struct ecard_id *id)
805 {
806         struct icside_state *state;
807         void *idmem;
808         int ret;
809
810         state = kmalloc(sizeof(struct icside_state), GFP_KERNEL);
811         if (!state) {
812                 ret = -ENOMEM;
813                 goto out;
814         }
815
816         memset(state, 0, sizeof(state));
817         state->type     = ICS_TYPE_NOTYPE;
818         state->dev      = &ec->dev;
819
820         idmem = ioremap(ecard_resource_start(ec, ECARD_RES_IOCFAST),
821                         ecard_resource_len(ec, ECARD_RES_IOCFAST));
822         if (idmem) {
823                 unsigned int type;
824
825                 type = readb(idmem + ICS_IDENT_OFFSET) & 1;
826                 type |= (readb(idmem + ICS_IDENT_OFFSET + 4) & 1) << 1;
827                 type |= (readb(idmem + ICS_IDENT_OFFSET + 8) & 1) << 2;
828                 type |= (readb(idmem + ICS_IDENT_OFFSET + 12) & 1) << 3;
829                 iounmap(idmem);
830
831                 state->type = type;
832         }
833
834         switch (state->type) {
835         case ICS_TYPE_A3IN:
836                 printk(KERN_WARNING "icside: A3IN unsupported\n");
837                 ret = -ENODEV;
838                 break;
839
840         case ICS_TYPE_A3USER:
841                 printk(KERN_WARNING "icside: A3USER unsupported\n");
842                 ret = -ENODEV;
843                 break;
844
845         case ICS_TYPE_V5:
846                 ret = icside_register_v5(state, ec);
847                 break;
848
849         case ICS_TYPE_V6:
850                 ret = icside_register_v6(state, ec);
851                 break;
852
853         default:
854                 printk(KERN_WARNING "icside: unknown interface type\n");
855                 ret = -ENODEV;
856                 break;
857         }
858
859         if (ret == 0)
860                 ecard_set_drvdata(ec, state);
861         else
862                 kfree(state);
863  out:
864         return ret;
865 }
866
867 static void __devexit icside_remove(struct expansion_card *ec)
868 {
869         struct icside_state *state = ecard_get_drvdata(ec);
870
871         switch (state->type) {
872         case ICS_TYPE_V5:
873                 /* FIXME: tell IDE to stop using the interface */
874
875                 /* Disable interrupts */
876                 inb(state->slot_port + ICS_ARCIN_V5_INTROFFSET);
877                 break;
878
879         case ICS_TYPE_V6:
880                 /* FIXME: tell IDE to stop using the interface */
881                 icside_dma_exit(state->hwif[1]);
882                 icside_dma_exit(state->hwif[0]);
883
884                 if (ec->dma != NO_DMA)
885                         free_dma(ec->dma);
886
887                 /* Disable interrupts */
888                 inb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
889                 inb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
890
891                 /* Reset the ROM pointer/EASI selection */
892                 outb(0, state->slot_port);
893                 break;
894         }
895
896         ecard_set_drvdata(ec, NULL);
897         ec->ops = NULL;
898         ec->irq_data = NULL;
899
900         kfree(state);
901 }
902
903 static void icside_shutdown(struct expansion_card *ec)
904 {
905         struct icside_state *state = ecard_get_drvdata(ec);
906
907         switch (state->type) {
908         case ICS_TYPE_V5:
909                 /* Disable interrupts */
910                 inb(state->slot_port + ICS_ARCIN_V5_INTROFFSET);
911                 break;
912
913         case ICS_TYPE_V6:
914                 /* Disable interrupts */
915                 inb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
916                 inb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
917
918                 /* Reset the ROM pointer/EASI selection */
919                 outb(0, state->slot_port);
920                 break;
921         }
922 }
923
924 static const struct ecard_id icside_ids[] = {
925         { MANU_ICS,  PROD_ICS_IDE  },
926         { MANU_ICS2, PROD_ICS2_IDE },
927         { 0xffff, 0xffff }
928 };
929
930 static struct ecard_driver icside_driver = {
931         .probe          = icside_probe,
932         .remove         = __devexit_p(icside_remove),
933         .shutdown       = icside_shutdown,
934         .id_table       = icside_ids,
935         .drv = {
936                 .name   = "icside",
937         },
938 };
939
940 static int __init icside_init(void)
941 {
942         return ecard_register_driver(&icside_driver);
943 }
944
945 MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
946 MODULE_LICENSE("GPL");
947 MODULE_DESCRIPTION("ICS IDE driver");
948
949 module_init(icside_init);