ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / drivers / mtd / chips / cfi_cmdset_0001.c
1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  * $Id: cfi_cmdset_0001.c,v 1.126 2003/06/23 07:45:48 dwmw2 Exp $
8  *
9  * 
10  * 10/10/2000   Nicolas Pitre <nico@cam.org>
11  *      - completely revamped method functions so they are aware and
12  *        independent of the flash geometry (buswidth, interleave, etc.)
13  *      - scalability vs code size is completely set at compile-time
14  *        (see include/linux/mtd/cfi.h for selection)
15  *      - optimized write buffer method
16  * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17  *      - reworked lock/unlock/erase support for var size flash
18  */
19
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
25 #include <asm/io.h>
26 #include <asm/byteorder.h>
27
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/mtd/map.h>
33 #include <linux/mtd/mtd.h>
34 #include <linux/mtd/compatmac.h>
35 #include <linux/mtd/cfi.h>
36
37 // debugging, turns off buffer write mode if set to 1
38 #define FORCE_WORD_WRITE 0
39
40 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
41 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
42 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
43 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
44 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
45 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
46 static void cfi_intelext_sync (struct mtd_info *);
47 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
48 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
49 static int cfi_intelext_suspend (struct mtd_info *);
50 static void cfi_intelext_resume (struct mtd_info *);
51
52 static void cfi_intelext_destroy(struct mtd_info *);
53
54 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
55
56 static struct mtd_info *cfi_intelext_setup (struct map_info *);
57
58 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
59                      size_t *retlen, u_char **mtdbuf);
60 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
61                         size_t len);
62
63
64 /*
65  *  *********** SETUP AND PROBE BITS  ***********
66  */
67
68 static struct mtd_chip_driver cfi_intelext_chipdrv = {
69         .probe          = NULL, /* Not usable directly */
70         .destroy        = cfi_intelext_destroy,
71         .name           = "cfi_cmdset_0001",
72         .module         = THIS_MODULE
73 };
74
75 /* #define DEBUG_LOCK_BITS */
76 /* #define DEBUG_CFI_FEATURES */
77
78 #ifdef DEBUG_CFI_FEATURES
79 static void cfi_tell_features(struct cfi_pri_intelext *extp)
80 {
81         int i;
82         printk("  Feature/Command Support: %4.4X\n", extp->FeatureSupport);
83         printk("     - Chip Erase:         %s\n", extp->FeatureSupport&1?"supported":"unsupported");
84         printk("     - Suspend Erase:      %s\n", extp->FeatureSupport&2?"supported":"unsupported");
85         printk("     - Suspend Program:    %s\n", extp->FeatureSupport&4?"supported":"unsupported");
86         printk("     - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
87         printk("     - Queued Erase:       %s\n", extp->FeatureSupport&16?"supported":"unsupported");
88         printk("     - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
89         printk("     - Protection Bits:    %s\n", extp->FeatureSupport&64?"supported":"unsupported");
90         printk("     - Page-mode read:     %s\n", extp->FeatureSupport&128?"supported":"unsupported");
91         printk("     - Synchronous read:   %s\n", extp->FeatureSupport&256?"supported":"unsupported");
92         for (i=9; i<32; i++) {
93                 if (extp->FeatureSupport & (1<<i)) 
94                         printk("     - Unknown Bit %X:      supported\n", i);
95         }
96         
97         printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
98         printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
99         for (i=1; i<8; i++) {
100                 if (extp->SuspendCmdSupport & (1<<i))
101                         printk("     - Unknown Bit %X:               supported\n", i);
102         }
103         
104         printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
105         printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
106         printk("     - Valid Bit Active:     %s\n", extp->BlkStatusRegMask&2?"yes":"no");
107         for (i=2; i<16; i++) {
108                 if (extp->BlkStatusRegMask & (1<<i))
109                         printk("     - Unknown Bit %X Active: yes\n",i);
110         }
111         
112         printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n", 
113                extp->VccOptimal >> 8, extp->VccOptimal & 0xf);
114         if (extp->VppOptimal)
115                 printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n", 
116                        extp->VppOptimal >> 8, extp->VppOptimal & 0xf);
117 }
118 #endif
119
120 /* This routine is made available to other mtd code via
121  * inter_module_register.  It must only be accessed through
122  * inter_module_get which will bump the use count of this module.  The
123  * addresses passed back in cfi are valid as long as the use count of
124  * this module is non-zero, i.e. between inter_module_get and
125  * inter_module_put.  Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
126  */
127 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
128 {
129         struct cfi_private *cfi = map->fldrv_priv;
130         int i;
131         __u32 base = cfi->chips[0].start;
132
133         if (cfi->cfi_mode == CFI_MODE_CFI) {
134                 /* 
135                  * It's a real CFI chip, not one for which the probe
136                  * routine faked a CFI structure. So we read the feature
137                  * table from it.
138                  */
139                 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
140                 struct cfi_pri_intelext *extp;
141                 int ofs_factor = cfi->interleave * cfi->device_type;
142
143                 //printk(" Intel/Sharp Extended Query Table at 0x%4.4X\n", adr);
144                 if (!adr)
145                         return NULL;
146
147                 /* Switch it into Query Mode */
148                 cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
149
150                 extp = kmalloc(sizeof(*extp), GFP_KERNEL);
151                 if (!extp) {
152                         printk(KERN_ERR "Failed to allocate memory\n");
153                         return NULL;
154                 }
155                 
156                 /* Read in the Extended Query Table */
157                 for (i=0; i<sizeof(*extp); i++) {
158                         ((unsigned char *)extp)[i] = 
159                                 cfi_read_query(map, (base+((adr+i)*ofs_factor)));
160                 }
161                 
162                 if (extp->MajorVersion != '1' || 
163                     (extp->MinorVersion < '0' || extp->MinorVersion > '3')) {
164                         printk(KERN_WARNING "  Unknown IntelExt Extended Query "
165                                "version %c.%c.\n",  extp->MajorVersion,
166                                extp->MinorVersion);
167                         kfree(extp);
168                         return NULL;
169                 }
170                 
171                 /* Do some byteswapping if necessary */
172                 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
173                 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
174                 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
175                         
176 #ifdef DEBUG_CFI_FEATURES
177                 /* Tell the user about it in lots of lovely detail */
178                 cfi_tell_features(extp);
179 #endif  
180
181                 if(extp->SuspendCmdSupport & 1) {
182 //#define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
183 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
184 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */ 
185                         printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
186                                "erase on write disabled.\n");
187                         extp->SuspendCmdSupport &= ~1;
188 #else
189                         printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
190 #endif
191                 }
192                 /* Install our own private info structure */
193                 cfi->cmdset_priv = extp;        
194         }
195
196         for (i=0; i< cfi->numchips; i++) {
197                 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
198                 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
199                 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
200                 cfi->chips[i].ref_point_counter = 0;
201         }               
202
203         map->fldrv = &cfi_intelext_chipdrv;
204         
205         /* Make sure it's in read mode */
206         cfi_send_gen_cmd(0xff, 0x55, base, map, cfi, cfi->device_type, NULL);
207         return cfi_intelext_setup(map);
208 }
209
210 static struct mtd_info *cfi_intelext_setup(struct map_info *map)
211 {
212         struct cfi_private *cfi = map->fldrv_priv;
213         struct mtd_info *mtd;
214         unsigned long offset = 0;
215         int i,j;
216         unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
217
218         mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
219         //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
220
221         if (!mtd) {
222                 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
223                 goto setup_err;
224         }
225
226         memset(mtd, 0, sizeof(*mtd));
227         mtd->priv = map;
228         mtd->type = MTD_NORFLASH;
229         mtd->size = devsize * cfi->numchips;
230
231         mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
232         mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) 
233                         * mtd->numeraseregions, GFP_KERNEL);
234         if (!mtd->eraseregions) { 
235                 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
236                 goto setup_err;
237         }
238         
239         for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
240                 unsigned long ernum, ersize;
241                 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
242                 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
243
244                 if (mtd->erasesize < ersize) {
245                         mtd->erasesize = ersize;
246                 }
247                 for (j=0; j<cfi->numchips; j++) {
248                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
249                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
250                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
251                 }
252                 offset += (ersize * ernum);
253         }
254
255         if (offset != devsize) {
256                 /* Argh */
257                 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
258                 goto setup_err;
259         }
260
261         for (i=0; i<mtd->numeraseregions;i++){
262                 printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
263                        i,mtd->eraseregions[i].offset,
264                        mtd->eraseregions[i].erasesize,
265                        mtd->eraseregions[i].numblocks);
266         }
267
268         /* Also select the correct geometry setup too */ 
269         mtd->erase = cfi_intelext_erase_varsize;
270         mtd->read = cfi_intelext_read;
271
272         if (map_is_linear(map)) {
273                 mtd->point = cfi_intelext_point;
274                 mtd->unpoint = cfi_intelext_unpoint;
275         }
276
277         if ( cfi->cfiq->BufWriteTimeoutTyp && !FORCE_WORD_WRITE) {
278                 printk(KERN_INFO "Using buffer write method\n" );
279                 mtd->write = cfi_intelext_write_buffers;
280         } else {
281                 printk(KERN_INFO "Using word write method\n" );
282                 mtd->write = cfi_intelext_write_words;
283         }
284         mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
285         mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
286         mtd->sync = cfi_intelext_sync;
287         mtd->lock = cfi_intelext_lock;
288         mtd->unlock = cfi_intelext_unlock;
289         mtd->suspend = cfi_intelext_suspend;
290         mtd->resume = cfi_intelext_resume;
291         mtd->flags = MTD_CAP_NORFLASH;
292         map->fldrv = &cfi_intelext_chipdrv;
293         mtd->name = map->name;
294         __module_get(THIS_MODULE);
295         return mtd;
296
297  setup_err:
298         if(mtd) {
299                 if(mtd->eraseregions)
300                         kfree(mtd->eraseregions);
301                 kfree(mtd);
302         }
303         kfree(cfi->cmdset_priv);
304         kfree(cfi->cfiq);
305         return NULL;
306 }
307
308 /*
309  *  *********** CHIP ACCESS FUNCTIONS ***********
310  */
311
312 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
313 {
314         DECLARE_WAITQUEUE(wait, current);
315         struct cfi_private *cfi = map->fldrv_priv;
316         cfi_word status, status_OK = CMD(0x80);
317         unsigned long timeo;
318         struct cfi_pri_intelext *cfip = (struct cfi_pri_intelext *)cfi->cmdset_priv;
319
320  resettime:
321         timeo = jiffies + HZ;
322  retry:
323         switch (chip->state) {
324
325         case FL_STATUS:
326                 for (;;) {
327                         status = cfi_read(map, adr);
328                         if ((status & status_OK) == status_OK)
329                                 break;
330
331                         if (time_after(jiffies, timeo)) {
332                                 printk(KERN_ERR "Waiting for chip to be ready timed out. Status %llx\n", 
333                                        (long long)status);
334                                 spin_unlock(chip->mutex);
335                                 return -EIO;
336                         }
337                         spin_unlock(chip->mutex);
338                         cfi_udelay(1);
339                         spin_lock(chip->mutex);
340                         /* Someone else might have been playing with it. */
341                         goto retry;
342                 }
343                                 
344         case FL_READY:
345         case FL_CFI_QUERY:
346         case FL_JEDEC_QUERY:
347                 return 0;
348
349         case FL_ERASING:
350                 if (!(cfip->FeatureSupport & 2) ||
351                     !(mode == FL_READY || mode == FL_POINT ||
352                      (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
353                         goto sleep;
354
355
356                 /* Erase suspend */
357                 cfi_write(map, CMD(0xB0), adr);
358
359                 /* If the flash has finished erasing, then 'erase suspend'
360                  * appears to make some (28F320) flash devices switch to
361                  * 'read' mode.  Make sure that we switch to 'read status'
362                  * mode so we get the right data. --rmk
363                  */
364                 cfi_write(map, CMD(0x70), adr);
365                 chip->oldstate = FL_ERASING;
366                 chip->state = FL_ERASE_SUSPENDING;
367                 chip->erase_suspended = 1;
368                 for (;;) {
369                         status = cfi_read(map, adr);
370                         if ((status & status_OK) == status_OK)
371                                 break;
372
373                         if (time_after(jiffies, timeo)) {
374                                 /* Urgh. Resume and pretend we weren't here.  */
375                                 cfi_write(map, CMD(0xd0), adr);
376                                 /* Make sure we're in 'read status' mode if it had finished */
377                                 cfi_write(map, CMD(0x70), adr);
378                                 chip->state = FL_ERASING;
379                                 chip->oldstate = FL_READY;
380                                 printk(KERN_ERR "Chip not ready after erase "
381                                        "suspended: status = 0x%x\n", status);
382                                 return -EIO;
383                         }
384
385                         spin_unlock(chip->mutex);
386                         cfi_udelay(1);
387                         spin_lock(chip->mutex);
388                         /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
389                            So we can just loop here. */
390                 }
391                 chip->state = FL_STATUS;
392                 return 0;
393
394         case FL_POINT:
395                 /* Only if there's no operation suspended... */
396                 if (mode == FL_READY && chip->oldstate == FL_READY)
397                         return 0;
398
399         default:
400         sleep:
401                 set_current_state(TASK_UNINTERRUPTIBLE);
402                 add_wait_queue(&chip->wq, &wait);
403                 spin_unlock(chip->mutex);
404                 schedule();
405                 remove_wait_queue(&chip->wq, &wait);
406                 spin_lock(chip->mutex);
407                 goto resettime;
408         }
409 }
410
411 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
412 {
413         struct cfi_private *cfi = map->fldrv_priv;
414
415         switch(chip->oldstate) {
416         case FL_ERASING:
417                 chip->state = chip->oldstate;
418                 /* What if one interleaved chip has finished and the 
419                    other hasn't? The old code would leave the finished
420                    one in READY mode. That's bad, and caused -EROFS 
421                    errors to be returned from do_erase_oneblock because
422                    that's the only bit it checked for at the time.
423                    As the state machine appears to explicitly allow 
424                    sending the 0x70 (Read Status) command to an erasing
425                    chip and expecting it to be ignored, that's what we 
426                    do. */
427                 cfi_write(map, CMD(0xd0), adr);
428                 cfi_write(map, CMD(0x70), adr);
429                 chip->oldstate = FL_READY;
430                 chip->state = FL_ERASING;
431                 break;
432
433         case FL_READY:
434                 /* We should really make set_vpp() count, rather than doing this */
435                 DISABLE_VPP(map);
436                 break;
437         default:
438                 printk(KERN_ERR "put_chip() called with oldstate %d!!\n", chip->oldstate);
439         }
440         wake_up(&chip->wq);
441 }
442
443 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
444 {
445         unsigned long cmd_addr;
446         struct cfi_private *cfi = map->fldrv_priv;
447         int ret = 0;
448
449         adr += chip->start;
450
451         /* Ensure cmd read/writes are aligned. */ 
452         cmd_addr = adr & ~(CFIDEV_BUSWIDTH-1); 
453
454         spin_lock(chip->mutex);
455
456         ret = get_chip(map, chip, cmd_addr, FL_POINT);
457
458         if (!ret) {
459                 if (chip->state != FL_POINT && chip->state != FL_READY)
460                         cfi_write(map, CMD(0xff), cmd_addr);
461
462                 chip->state = FL_POINT;
463                 chip->ref_point_counter++;
464         }
465         spin_unlock(chip->mutex);
466
467         return ret;
468 }
469
470 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
471 {
472         struct map_info *map = mtd->priv;
473         struct cfi_private *cfi = map->fldrv_priv;
474         unsigned long ofs;
475         int chipnum;
476         int ret = 0;
477
478         if (from + len > mtd->size)
479                 return -EINVAL;
480         
481         *mtdbuf = (void *)map->virt + from;
482         if(*mtdbuf == NULL)
483                 return -EINVAL; /* can not point this region */
484         *retlen = 0;
485
486         /* Now lock the chip(s) to POINT state */
487
488         /* ofs: offset within the first chip that the first read should start */
489         chipnum = (from >> cfi->chipshift);
490         ofs = from - (chipnum << cfi->chipshift);
491
492         while (len) {
493                 unsigned long thislen;
494
495                 if (chipnum >= cfi->numchips)
496                         break;
497
498                 if ((len + ofs -1) >> cfi->chipshift)
499                         thislen = (1<<cfi->chipshift) - ofs;
500                 else
501                         thislen = len;
502
503                 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
504                 if (ret)
505                         break;
506
507                 *retlen += thislen;
508                 len -= thislen;
509                 
510                 ofs = 0;
511                 chipnum++;
512         }
513         return 0;
514 }
515
516 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
517 {
518         struct map_info *map = mtd->priv;
519         struct cfi_private *cfi = map->fldrv_priv;
520         unsigned long ofs;
521         int chipnum;
522
523         /* Now unlock the chip(s) POINT state */
524
525         /* ofs: offset within the first chip that the first read should start */
526         chipnum = (from >> cfi->chipshift);
527         ofs = from - (chipnum <<  cfi->chipshift);
528
529         while (len) {
530                 unsigned long thislen;
531                 struct flchip *chip;
532
533                 chip = &cfi->chips[chipnum];
534                 if (chipnum >= cfi->numchips)
535                         break;
536
537                 if ((len + ofs -1) >> cfi->chipshift)
538                         thislen = (1<<cfi->chipshift) - ofs;
539                 else
540                         thislen = len;
541
542                 spin_lock(chip->mutex);
543                 if (chip->state == FL_POINT) {
544                         chip->ref_point_counter--;
545                         if(chip->ref_point_counter == 0)
546                                 chip->state = FL_READY;
547                 } else
548                         printk(KERN_ERR "Warning: unpoint called on non pointed region\n"); /* Should this give an error? */
549
550                 put_chip(map, chip, chip->start);
551                 spin_unlock(chip->mutex);
552
553                 len -= thislen;
554                 ofs = 0;
555                 chipnum++;
556         }
557 }
558
559 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
560 {
561         unsigned long cmd_addr;
562         struct cfi_private *cfi = map->fldrv_priv;
563         int ret;
564
565         adr += chip->start;
566
567         /* Ensure cmd read/writes are aligned. */ 
568         cmd_addr = adr & ~(CFIDEV_BUSWIDTH-1); 
569
570         spin_lock(chip->mutex);
571         ret = get_chip(map, chip, cmd_addr, FL_READY);
572         if (ret) {
573                 spin_unlock(chip->mutex);
574                 return ret;
575         }
576
577         if (chip->state != FL_POINT && chip->state != FL_READY) {
578                 cfi_write(map, CMD(0xff), cmd_addr);
579
580                 chip->state = FL_READY;
581         }
582
583         map_copy_from(map, buf, adr, len);
584
585         put_chip(map, chip, cmd_addr);
586
587         spin_unlock(chip->mutex);
588         return 0;
589 }
590
591 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
592 {
593         struct map_info *map = mtd->priv;
594         struct cfi_private *cfi = map->fldrv_priv;
595         unsigned long ofs;
596         int chipnum;
597         int ret = 0;
598
599         /* ofs: offset within the first chip that the first read should start */
600         chipnum = (from >> cfi->chipshift);
601         ofs = from - (chipnum <<  cfi->chipshift);
602
603         *retlen = 0;
604
605         while (len) {
606                 unsigned long thislen;
607
608                 if (chipnum >= cfi->numchips)
609                         break;
610
611                 if ((len + ofs -1) >> cfi->chipshift)
612                         thislen = (1<<cfi->chipshift) - ofs;
613                 else
614                         thislen = len;
615
616                 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
617                 if (ret)
618                         break;
619
620                 *retlen += thislen;
621                 len -= thislen;
622                 buf += thislen;
623                 
624                 ofs = 0;
625                 chipnum++;
626         }
627         return ret;
628 }
629
630 static int cfi_intelext_read_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf, int base_offst, int reg_sz)
631 {
632         struct map_info *map = mtd->priv;
633         struct cfi_private *cfi = map->fldrv_priv;
634         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
635         struct flchip *chip;
636         int ofs_factor = cfi->interleave * cfi->device_type;
637         int count = len;
638         int chip_num, offst;
639         int ret;
640
641         chip_num = ((unsigned int)from/reg_sz);
642         offst = from - (reg_sz*chip_num)+base_offst;
643
644         while (count) {
645         /* Calculate which chip & protection register offset we need */
646
647                 if (chip_num >= cfi->numchips)
648                         goto out;
649
650                 chip = &cfi->chips[chip_num];
651                 
652                 spin_lock(chip->mutex);
653                 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
654                 if (ret) {
655                         spin_unlock(chip->mutex);
656                         return (len-count)?:ret;
657                 }
658
659                 if (chip->state != FL_JEDEC_QUERY) {
660                         cfi_write(map, CMD(0x90), chip->start);
661                         chip->state = FL_JEDEC_QUERY;
662                 }
663
664                 while (count && ((offst-base_offst) < reg_sz)) {
665                         *buf = map_read8(map,(chip->start+((extp->ProtRegAddr+1)*ofs_factor)+offst));
666                         buf++;
667                         offst++;
668                         count--;
669                 }
670
671                 put_chip(map, chip, chip->start);
672                 spin_unlock(chip->mutex);
673
674                 /* Move on to the next chip */
675                 chip_num++;
676                 offst = base_offst;
677         }
678         
679  out:   
680         return len-count;
681 }
682         
683 static int cfi_intelext_read_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
684 {
685         struct map_info *map = mtd->priv;
686         struct cfi_private *cfi = map->fldrv_priv;
687         struct cfi_pri_intelext *extp=cfi->cmdset_priv;
688         int base_offst,reg_sz;
689         
690         /* Check that we actually have some protection registers */
691         if(!(extp->FeatureSupport&64)){
692                 printk(KERN_WARNING "%s: This flash device has no protection data to read!\n",map->name);
693                 return 0;
694         }
695
696         base_offst=(1<<extp->FactProtRegSize);
697         reg_sz=(1<<extp->UserProtRegSize);
698
699         return cfi_intelext_read_prot_reg(mtd, from, len, retlen, buf, base_offst, reg_sz);
700 }
701
702 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
703 {
704         struct map_info *map = mtd->priv;
705         struct cfi_private *cfi = map->fldrv_priv;
706         struct cfi_pri_intelext *extp=cfi->cmdset_priv;
707         int base_offst,reg_sz;
708         
709         /* Check that we actually have some protection registers */
710         if(!(extp->FeatureSupport&64)){
711                 printk(KERN_WARNING "%s: This flash device has no protection data to read!\n",map->name);
712                 return 0;
713         }
714
715         base_offst=0;
716         reg_sz=(1<<extp->FactProtRegSize);
717
718         return cfi_intelext_read_prot_reg(mtd, from, len, retlen, buf, base_offst, reg_sz);
719 }
720
721
722 static int do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, cfi_word datum)
723 {
724         struct cfi_private *cfi = map->fldrv_priv;
725         cfi_word status, status_OK;
726         unsigned long timeo;
727         int z, ret=0;
728
729         adr += chip->start;
730
731         /* Let's determine this according to the interleave only once */
732         status_OK = CMD(0x80);
733
734         spin_lock(chip->mutex);
735         ret = get_chip(map, chip, adr, FL_WRITING);
736         if (ret) {
737                 spin_unlock(chip->mutex);
738                 return ret;
739         }
740
741         ENABLE_VPP(map);
742         cfi_write(map, CMD(0x40), adr);
743         cfi_write(map, datum, adr);
744         chip->state = FL_WRITING;
745
746         spin_unlock(chip->mutex);
747         cfi_udelay(chip->word_write_time);
748         spin_lock(chip->mutex);
749
750         timeo = jiffies + (HZ/2);
751         z = 0;
752         for (;;) {
753                 if (chip->state != FL_WRITING) {
754                         /* Someone's suspended the write. Sleep */
755                         DECLARE_WAITQUEUE(wait, current);
756
757                         set_current_state(TASK_UNINTERRUPTIBLE);
758                         add_wait_queue(&chip->wq, &wait);
759                         spin_unlock(chip->mutex);
760                         schedule();
761                         remove_wait_queue(&chip->wq, &wait);
762                         timeo = jiffies + (HZ / 2); /* FIXME */
763                         spin_lock(chip->mutex);
764                         continue;
765                 }
766
767                 status = cfi_read(map, adr);
768                 if ((status & status_OK) == status_OK)
769                         break;
770                 
771                 /* OK Still waiting */
772                 if (time_after(jiffies, timeo)) {
773                         chip->state = FL_STATUS;
774                         printk(KERN_ERR "waiting for chip to be ready timed out in word write\n");
775                         ret = -EIO;
776                         goto out;
777                 }
778
779                 /* Latency issues. Drop the lock, wait a while and retry */
780                 spin_unlock(chip->mutex);
781                 z++;
782                 cfi_udelay(1);
783                 spin_lock(chip->mutex);
784         }
785         if (!z) {
786                 chip->word_write_time--;
787                 if (!chip->word_write_time)
788                         chip->word_write_time++;
789         }
790         if (z > 1) 
791                 chip->word_write_time++;
792
793         /* Done and happy. */
794         chip->state = FL_STATUS;
795         /* check for lock bit */
796         if (status & CMD(0x02)) {
797                 /* clear status */
798                 cfi_write(map, CMD(0x50), adr);
799                 /* put back into read status register mode */
800                 cfi_write(map, CMD(0x70), adr);
801                 ret = -EROFS;
802         }
803  out:
804         put_chip(map, chip, adr);
805         spin_unlock(chip->mutex);
806
807         return ret;
808 }
809
810
811 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
812 {
813         struct map_info *map = mtd->priv;
814         struct cfi_private *cfi = map->fldrv_priv;
815         int ret = 0;
816         int chipnum;
817         unsigned long ofs;
818
819         *retlen = 0;
820         if (!len)
821                 return 0;
822
823         chipnum = to >> cfi->chipshift;
824         ofs = to  - (chipnum << cfi->chipshift);
825
826         /* If it's not bus-aligned, do the first byte write */
827         if (ofs & (CFIDEV_BUSWIDTH-1)) {
828                 unsigned long bus_ofs = ofs & ~(CFIDEV_BUSWIDTH-1);
829                 int gap = ofs - bus_ofs;
830                 int i = 0, n = 0;
831                 u_char tmp_buf[8];
832                 cfi_word datum;
833
834                 while (gap--)
835                         tmp_buf[i++] = 0xff;
836                 while (len && i < CFIDEV_BUSWIDTH)
837                         tmp_buf[i++] = buf[n++], len--;
838                 while (i < CFIDEV_BUSWIDTH)
839                         tmp_buf[i++] = 0xff;
840
841                 if (cfi_buswidth_is_2()) {
842                         datum = *(__u16*)tmp_buf;
843                 } else if (cfi_buswidth_is_4()) {
844                         datum = *(__u32*)tmp_buf;
845                 } else if (cfi_buswidth_is_8()) {
846                         datum = *(__u64*)tmp_buf;
847                 } else {
848                         return -EINVAL;  /* should never happen, but be safe */
849                 }
850
851                 ret = do_write_oneword(map, &cfi->chips[chipnum],
852                                                bus_ofs, datum);
853                 if (ret) 
854                         return ret;
855                 
856                 ofs += n;
857                 buf += n;
858                 (*retlen) += n;
859
860                 if (ofs >> cfi->chipshift) {
861                         chipnum ++; 
862                         ofs = 0;
863                         if (chipnum == cfi->numchips)
864                                 return 0;
865                 }
866         }
867         
868         while(len >= CFIDEV_BUSWIDTH) {
869                 cfi_word datum;
870
871                 if (cfi_buswidth_is_1()) {
872                         datum = *(__u8*)buf;
873                 } else if (cfi_buswidth_is_2()) {
874                         datum = *(__u16*)buf;
875                 } else if (cfi_buswidth_is_4()) {
876                         datum = *(__u32*)buf;
877                 } else if (cfi_buswidth_is_8()) {
878                         datum = *(__u64*)buf;
879                 } else {
880                         return -EINVAL;
881                 }
882
883                 ret = do_write_oneword(map, &cfi->chips[chipnum],
884                                 ofs, datum);
885                 if (ret)
886                         return ret;
887
888                 ofs += CFIDEV_BUSWIDTH;
889                 buf += CFIDEV_BUSWIDTH;
890                 (*retlen) += CFIDEV_BUSWIDTH;
891                 len -= CFIDEV_BUSWIDTH;
892
893                 if (ofs >> cfi->chipshift) {
894                         chipnum ++; 
895                         ofs = 0;
896                         if (chipnum == cfi->numchips)
897                                 return 0;
898                 }
899         }
900
901         if (len & (CFIDEV_BUSWIDTH-1)) {
902                 int i = 0, n = 0;
903                 u_char tmp_buf[8];
904                 cfi_word datum;
905
906                 while (len--)
907                         tmp_buf[i++] = buf[n++];
908                 while (i < CFIDEV_BUSWIDTH)
909                         tmp_buf[i++] = 0xff;
910
911                 if (cfi_buswidth_is_2()) {
912                         datum = *(__u16*)tmp_buf;
913                 } else if (cfi_buswidth_is_4()) {
914                         datum = *(__u32*)tmp_buf;
915                 } else if (cfi_buswidth_is_8()) {
916                         datum = *(__u64*)tmp_buf;
917                 } else {
918                         return -EINVAL;  /* should never happen, but be safe */
919                 }
920
921                 ret = do_write_oneword(map, &cfi->chips[chipnum],
922                                                ofs, datum);
923                 if (ret) 
924                         return ret;
925                 
926                 (*retlen) += n;
927         }
928
929         return 0;
930 }
931
932
933 static inline int do_write_buffer(struct map_info *map, struct flchip *chip, 
934                                   unsigned long adr, const u_char *buf, int len)
935 {
936         struct cfi_private *cfi = map->fldrv_priv;
937         cfi_word status, status_OK;
938         unsigned long cmd_adr, timeo;
939         int wbufsize, z, ret=0, bytes, words;
940
941         wbufsize = CFIDEV_INTERLEAVE << cfi->cfiq->MaxBufWriteSize;
942         adr += chip->start;
943         cmd_adr = adr & ~(wbufsize-1);
944         
945         /* Let's determine this according to the interleave only once */
946         status_OK = CMD(0x80);
947
948         spin_lock(chip->mutex);
949         ret = get_chip(map, chip, cmd_adr, FL_WRITING);
950         if (ret) {
951                 spin_unlock(chip->mutex);
952                 return ret;
953         }
954
955         if (chip->state != FL_STATUS)
956                 cfi_write(map, CMD(0x70), cmd_adr);
957
958         status = cfi_read(map, cmd_adr);
959
960         /* Â§4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
961            [...], the device will not accept any more Write to Buffer commands". 
962            So we must check here and reset those bits if they're set. Otherwise
963            we're just pissing in the wind */
964         if (status & CMD(0x30)) {
965                 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %x). Clearing.\n", status);
966                 cfi_write(map, CMD(0x50), cmd_adr);
967                 cfi_write(map, CMD(0x70), cmd_adr);
968         }
969         ENABLE_VPP(map);
970         chip->state = FL_WRITING_TO_BUFFER;
971
972         z = 0;
973         for (;;) {
974                 cfi_write(map, CMD(0xe8), cmd_adr);
975
976                 status = cfi_read(map, cmd_adr);
977                 if ((status & status_OK) == status_OK)
978                         break;
979
980                 spin_unlock(chip->mutex);
981                 cfi_udelay(1);
982                 spin_lock(chip->mutex);
983
984                 if (++z > 20) {
985                         /* Argh. Not ready for write to buffer */
986                         cfi_write(map, CMD(0x70), cmd_adr);
987                         chip->state = FL_STATUS;
988                         printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %llx, status = %llx\n", (__u64)status, (__u64)cfi_read(map, cmd_adr));
989                         /* Odd. Clear status bits */
990                         cfi_write(map, CMD(0x50), cmd_adr);
991                         cfi_write(map, CMD(0x70), cmd_adr);
992                         ret = -EIO;
993                         goto out;
994                 }
995         }
996
997         /* Write length of data to come */
998         bytes = len & (CFIDEV_BUSWIDTH-1);
999         words = len / CFIDEV_BUSWIDTH;
1000         cfi_write(map, CMD(words - !bytes), cmd_adr );
1001
1002         /* Write data */
1003         z = 0;
1004         while(z < words * CFIDEV_BUSWIDTH) {
1005                 if (cfi_buswidth_is_1()) {
1006                         u8 *b = (u8 *)buf;
1007
1008                         map_write8 (map, *b++, adr+z);
1009                         buf = (const u_char *)b;
1010                 } else if (cfi_buswidth_is_2()) {
1011                         u16 *b = (u16 *)buf;
1012
1013                         map_write16 (map, *b++, adr+z);
1014                         buf = (const u_char *)b;
1015                 } else if (cfi_buswidth_is_4()) {
1016                         u32 *b = (u32 *)buf;
1017
1018                         map_write32 (map, *b++, adr+z);
1019                         buf = (const u_char *)b;
1020                 } else if (cfi_buswidth_is_8()) {
1021                         u64 *b = (u64 *)buf;
1022
1023                         map_write64 (map, *b++, adr+z);
1024                         buf = (const u_char *)b;
1025                 } else {
1026                         ret = -EINVAL;
1027                         goto out;
1028                 }
1029                 z += CFIDEV_BUSWIDTH;
1030         }
1031         if (bytes) {
1032                 int i = 0, n = 0;
1033                 u_char tmp_buf[8], *tmp_p = tmp_buf;
1034
1035                 while (bytes--)
1036                         tmp_buf[i++] = buf[n++];
1037                 while (i < CFIDEV_BUSWIDTH)
1038                         tmp_buf[i++] = 0xff;
1039                 if (cfi_buswidth_is_2()) {
1040                         u16 *b = (u16 *)tmp_p;
1041
1042                         map_write16 (map, *b++, adr+z);
1043                         tmp_p = (u_char *)b;
1044                 } else if (cfi_buswidth_is_4()) {
1045                         u32 *b = (u32 *)tmp_p;
1046
1047                         map_write32 (map, *b++, adr+z);
1048                         tmp_p = (u_char *)b;
1049                 } else if (cfi_buswidth_is_8()) {
1050                         u64 *b = (u64 *)tmp_p;
1051
1052                         map_write64 (map, *b++, adr+z);
1053                         tmp_p = (u_char *)b;
1054                 } else {
1055                         ret = -EINVAL;
1056                         goto out;
1057                 }
1058         }
1059         /* GO GO GO */
1060         cfi_write(map, CMD(0xd0), cmd_adr);
1061         chip->state = FL_WRITING;
1062
1063         spin_unlock(chip->mutex);
1064         cfi_udelay(chip->buffer_write_time);
1065         spin_lock(chip->mutex);
1066
1067         timeo = jiffies + (HZ/2);
1068         z = 0;
1069         for (;;) {
1070                 if (chip->state != FL_WRITING) {
1071                         /* Someone's suspended the write. Sleep */
1072                         DECLARE_WAITQUEUE(wait, current);
1073                         set_current_state(TASK_UNINTERRUPTIBLE);
1074                         add_wait_queue(&chip->wq, &wait);
1075                         spin_unlock(chip->mutex);
1076                         schedule();
1077                         remove_wait_queue(&chip->wq, &wait);
1078                         timeo = jiffies + (HZ / 2); /* FIXME */
1079                         spin_lock(chip->mutex);
1080                         continue;
1081                 }
1082
1083                 status = cfi_read(map, cmd_adr);
1084                 if ((status & status_OK) == status_OK)
1085                         break;
1086
1087                 /* OK Still waiting */
1088                 if (time_after(jiffies, timeo)) {
1089                         chip->state = FL_STATUS;
1090                         printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
1091                         ret = -EIO;
1092                         goto out;
1093                 }
1094                 
1095                 /* Latency issues. Drop the lock, wait a while and retry */
1096                 spin_unlock(chip->mutex);
1097                 cfi_udelay(1);
1098                 z++;
1099                 spin_lock(chip->mutex);
1100         }
1101         if (!z) {
1102                 chip->buffer_write_time--;
1103                 if (!chip->buffer_write_time)
1104                         chip->buffer_write_time++;
1105         }
1106         if (z > 1) 
1107                 chip->buffer_write_time++;
1108
1109         /* Done and happy. */
1110         chip->state = FL_STATUS;
1111
1112         /* check for lock bit */
1113         if (status & CMD(0x02)) {
1114                 /* clear status */
1115                 cfi_write(map, CMD(0x50), cmd_adr);
1116                 /* put back into read status register mode */
1117                 cfi_write(map, CMD(0x70), adr);
1118                 ret = -EROFS;
1119         }
1120
1121  out:
1122         put_chip(map, chip, cmd_adr);
1123         spin_unlock(chip->mutex);
1124         return ret;
1125 }
1126
1127 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to, 
1128                                        size_t len, size_t *retlen, const u_char *buf)
1129 {
1130         struct map_info *map = mtd->priv;
1131         struct cfi_private *cfi = map->fldrv_priv;
1132         int wbufsize = CFIDEV_INTERLEAVE << cfi->cfiq->MaxBufWriteSize;
1133         int ret = 0;
1134         int chipnum;
1135         unsigned long ofs;
1136
1137         *retlen = 0;
1138         if (!len)
1139                 return 0;
1140
1141         chipnum = to >> cfi->chipshift;
1142         ofs = to  - (chipnum << cfi->chipshift);
1143
1144         /* If it's not bus-aligned, do the first word write */
1145         if (ofs & (CFIDEV_BUSWIDTH-1)) {
1146                 size_t local_len = (-ofs)&(CFIDEV_BUSWIDTH-1);
1147                 if (local_len > len)
1148                         local_len = len;
1149                 ret = cfi_intelext_write_words(mtd, to, local_len,
1150                                                retlen, buf);
1151                 if (ret)
1152                         return ret;
1153                 ofs += local_len;
1154                 buf += local_len;
1155                 len -= local_len;
1156
1157                 if (ofs >> cfi->chipshift) {
1158                         chipnum ++;
1159                         ofs = 0;
1160                         if (chipnum == cfi->numchips)
1161                                 return 0;
1162                 }
1163         }
1164
1165         /* Write buffer is worth it only if more than one word to write... */
1166         while(len) {
1167                 /* We must not cross write block boundaries */
1168                 int size = wbufsize - (ofs & (wbufsize-1));
1169
1170                 if (size > len)
1171                         size = len;
1172                 ret = do_write_buffer(map, &cfi->chips[chipnum], 
1173                                       ofs, buf, size);
1174                 if (ret)
1175                         return ret;
1176
1177                 ofs += size;
1178                 buf += size;
1179                 (*retlen) += size;
1180                 len -= size;
1181
1182                 if (ofs >> cfi->chipshift) {
1183                         chipnum ++; 
1184                         ofs = 0;
1185                         if (chipnum == cfi->numchips)
1186                                 return 0;
1187                 }
1188         }
1189         return 0;
1190 }
1191
1192 typedef int (*varsize_frob_t)(struct map_info *map, struct flchip *chip,
1193                               unsigned long adr, void *thunk);
1194
1195 static int cfi_intelext_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
1196                                      loff_t ofs, size_t len, void *thunk)
1197 {
1198         struct map_info *map = mtd->priv;
1199         struct cfi_private *cfi = map->fldrv_priv;
1200         unsigned long adr;
1201         int chipnum, ret = 0;
1202         int i, first;
1203         struct mtd_erase_region_info *regions = mtd->eraseregions;
1204
1205         if (ofs > mtd->size)
1206                 return -EINVAL;
1207
1208         if ((len + ofs) > mtd->size)
1209                 return -EINVAL;
1210
1211         /* Check that both start and end of the requested erase are
1212          * aligned with the erasesize at the appropriate addresses.
1213          */
1214
1215         i = 0;
1216
1217         /* Skip all erase regions which are ended before the start of 
1218            the requested erase. Actually, to save on the calculations,
1219            we skip to the first erase region which starts after the
1220            start of the requested erase, and then go back one.
1221         */
1222         
1223         while (i < mtd->numeraseregions && ofs >= regions[i].offset)
1224                i++;
1225         i--;
1226
1227         /* OK, now i is pointing at the erase region in which this 
1228            erase request starts. Check the start of the requested
1229            erase range is aligned with the erase size which is in
1230            effect here.
1231         */
1232
1233         if (ofs & (regions[i].erasesize-1))
1234                 return -EINVAL;
1235
1236         /* Remember the erase region we start on */
1237         first = i;
1238
1239         /* Next, check that the end of the requested erase is aligned
1240          * with the erase region at that address.
1241          */
1242
1243         while (i<mtd->numeraseregions && (ofs + len) >= regions[i].offset)
1244                 i++;
1245
1246         /* As before, drop back one to point at the region in which
1247            the address actually falls
1248         */
1249         i--;
1250         
1251         if ((ofs + len) & (regions[i].erasesize-1))
1252                 return -EINVAL;
1253
1254         chipnum = ofs >> cfi->chipshift;
1255         adr = ofs - (chipnum << cfi->chipshift);
1256
1257         i=first;
1258
1259         while(len) {
1260                 ret = (*frob)(map, &cfi->chips[chipnum], adr, thunk);
1261                 
1262                 if (ret)
1263                         return ret;
1264
1265                 adr += regions[i].erasesize;
1266                 len -= regions[i].erasesize;
1267
1268                 if (adr % (1<< cfi->chipshift) == ((regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
1269                         i++;
1270
1271                 if (adr >> cfi->chipshift) {
1272                         adr = 0;
1273                         chipnum++;
1274                         
1275                         if (chipnum >= cfi->numchips)
1276                         break;
1277                 }
1278         }
1279
1280         return 0;
1281 }
1282
1283
1284 static int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, void *thunk)
1285 {
1286         struct cfi_private *cfi = map->fldrv_priv;
1287         cfi_word status, status_OK;
1288         unsigned long timeo;
1289         int retries = 3;
1290         DECLARE_WAITQUEUE(wait, current);
1291         int ret = 0;
1292
1293         adr += chip->start;
1294
1295         /* Let's determine this according to the interleave only once */
1296         status_OK = CMD(0x80);
1297
1298  retry:
1299         spin_lock(chip->mutex);
1300         ret = get_chip(map, chip, adr, FL_ERASING);
1301         if (ret) {
1302                 spin_unlock(chip->mutex);
1303                 return ret;
1304         }
1305
1306         ENABLE_VPP(map);
1307         /* Clear the status register first */
1308         cfi_write(map, CMD(0x50), adr);
1309
1310         /* Now erase */
1311         cfi_write(map, CMD(0x20), adr);
1312         cfi_write(map, CMD(0xD0), adr);
1313         chip->state = FL_ERASING;
1314         chip->erase_suspended = 0;
1315
1316         spin_unlock(chip->mutex);
1317         set_current_state(TASK_UNINTERRUPTIBLE);
1318         schedule_timeout((chip->erase_time*HZ)/(2*1000));
1319         spin_lock(chip->mutex);
1320
1321         /* FIXME. Use a timer to check this, and return immediately. */
1322         /* Once the state machine's known to be working I'll do that */
1323
1324         timeo = jiffies + (HZ*20);
1325         for (;;) {
1326                 if (chip->state != FL_ERASING) {
1327                         /* Someone's suspended the erase. Sleep */
1328                         set_current_state(TASK_UNINTERRUPTIBLE);
1329                         add_wait_queue(&chip->wq, &wait);
1330                         spin_unlock(chip->mutex);
1331                         schedule();
1332                         remove_wait_queue(&chip->wq, &wait);
1333                         spin_lock(chip->mutex);
1334                         continue;
1335                 }
1336                 if (chip->erase_suspended) {
1337                         /* This erase was suspended and resumed.
1338                            Adjust the timeout */
1339                         timeo = jiffies + (HZ*20); /* FIXME */
1340                         chip->erase_suspended = 0;
1341                 }
1342
1343                 status = cfi_read(map, adr);
1344                 if ((status & status_OK) == status_OK)
1345                         break;
1346                 
1347                 /* OK Still waiting */
1348                 if (time_after(jiffies, timeo)) {
1349                         cfi_write(map, CMD(0x70), adr);
1350                         chip->state = FL_STATUS;
1351                         printk(KERN_ERR "waiting for erase at %08lx to complete timed out. Xstatus = %llx, status = %llx.\n",
1352                                adr, (__u64)status, (__u64)cfi_read(map, adr));
1353                         /* Clear status bits */
1354                         cfi_write(map, CMD(0x50), adr);
1355                         cfi_write(map, CMD(0x70), adr);
1356                         DISABLE_VPP(map);
1357                         spin_unlock(chip->mutex);
1358                         return -EIO;
1359                 }
1360                 
1361                 /* Latency issues. Drop the lock, wait a while and retry */
1362                 spin_unlock(chip->mutex);
1363                 set_current_state(TASK_UNINTERRUPTIBLE);
1364                 schedule_timeout(1);
1365                 spin_lock(chip->mutex);
1366         }
1367         
1368         DISABLE_VPP(map);
1369         ret = 0;
1370
1371         /* We've broken this before. It doesn't hurt to be safe */
1372         cfi_write(map, CMD(0x70), adr);
1373         chip->state = FL_STATUS;
1374         status = cfi_read(map, adr);
1375
1376         /* check for lock bit */
1377         if (status & CMD(0x3a)) {
1378                 unsigned char chipstatus = status;
1379                 if (status != CMD(status & 0xff)) {
1380                         int i;
1381                         for (i = 1; i<CFIDEV_INTERLEAVE; i++) {
1382                                       chipstatus |= status >> (cfi->device_type * 8);
1383                         }
1384                         printk(KERN_WARNING "Status is not identical for all chips: 0x%llx. Merging to give 0x%02x\n", (__u64)status, chipstatus);
1385                 }
1386                 /* Reset the error bits */
1387                 cfi_write(map, CMD(0x50), adr);
1388                 cfi_write(map, CMD(0x70), adr);
1389                 
1390                 if ((chipstatus & 0x30) == 0x30) {
1391                         printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%llx\n", (__u64)status);
1392                         ret = -EIO;
1393                 } else if (chipstatus & 0x02) {
1394                         /* Protection bit set */
1395                         ret = -EROFS;
1396                 } else if (chipstatus & 0x8) {
1397                         /* Voltage */
1398                         printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%llx\n", (__u64)status);
1399                         ret = -EIO;
1400                 } else if (chipstatus & 0x20) {
1401                         if (retries--) {
1402                                 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%llx. Retrying...\n", adr, (__u64)status);
1403                                 timeo = jiffies + HZ;
1404                                 chip->state = FL_STATUS;
1405                                 spin_unlock(chip->mutex);
1406                                 goto retry;
1407                         }
1408                         printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%llx\n", adr, (__u64)status);
1409                         ret = -EIO;
1410                 }
1411         }
1412
1413         wake_up(&chip->wq);
1414         spin_unlock(chip->mutex);
1415         return ret;
1416 }
1417
1418 int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1419 {
1420         unsigned long ofs, len;
1421         int ret;
1422
1423         ofs = instr->addr;
1424         len = instr->len;
1425
1426         ret = cfi_intelext_varsize_frob(mtd, do_erase_oneblock, ofs, len, 0);
1427         if (ret)
1428                 return ret;
1429
1430         instr->state = MTD_ERASE_DONE;
1431         if (instr->callback)
1432                 instr->callback(instr);
1433         
1434         return 0;
1435 }
1436
1437 static void cfi_intelext_sync (struct mtd_info *mtd)
1438 {
1439         struct map_info *map = mtd->priv;
1440         struct cfi_private *cfi = map->fldrv_priv;
1441         int i;
1442         struct flchip *chip;
1443         int ret = 0;
1444
1445         for (i=0; !ret && i<cfi->numchips; i++) {
1446                 chip = &cfi->chips[i];
1447
1448                 spin_lock(chip->mutex);
1449                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1450
1451                 if (!ret) {
1452                         chip->oldstate = chip->state;
1453                         chip->state = FL_SYNCING;
1454                         /* No need to wake_up() on this state change - 
1455                          * as the whole point is that nobody can do anything
1456                          * with the chip now anyway.
1457                          */
1458                 }
1459                 spin_unlock(chip->mutex);
1460         }
1461
1462         /* Unlock the chips again */
1463
1464         for (i--; i >=0; i--) {
1465                 chip = &cfi->chips[i];
1466
1467                 spin_lock(chip->mutex);
1468                 
1469                 if (chip->state == FL_SYNCING) {
1470                         chip->state = chip->oldstate;
1471                         wake_up(&chip->wq);
1472                 }
1473                 spin_unlock(chip->mutex);
1474         }
1475 }
1476
1477 #ifdef DEBUG_LOCK_BITS
1478 static int do_printlockstatus_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, void *thunk)
1479 {
1480         struct cfi_private *cfi = map->fldrv_priv;
1481         int ofs_factor = cfi->interleave * cfi->device_type;
1482
1483         cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1484         printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1485                adr, cfi_read_query(map, adr+(2*ofs_factor)));
1486         cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1487         
1488         return 0;
1489 }
1490 #endif
1491
1492 #define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
1493 #define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
1494
1495 static int do_xxlock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, void *thunk)
1496 {
1497         struct cfi_private *cfi = map->fldrv_priv;
1498         cfi_word status, status_OK;
1499         unsigned long timeo = jiffies + HZ;
1500         int ret;
1501
1502         adr += chip->start;
1503
1504         /* Let's determine this according to the interleave only once */
1505         status_OK = CMD(0x80);
1506
1507         spin_lock(chip->mutex);
1508         ret = get_chip(map, chip, adr, FL_LOCKING);
1509         if (ret) {
1510                 spin_unlock(chip->mutex);
1511                 return ret;
1512         }
1513
1514         ENABLE_VPP(map);
1515         cfi_write(map, CMD(0x60), adr);
1516
1517         if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1518                 cfi_write(map, CMD(0x01), adr);
1519                 chip->state = FL_LOCKING;
1520         } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1521                 cfi_write(map, CMD(0xD0), adr);
1522                 chip->state = FL_UNLOCKING;
1523         } else
1524                 BUG();
1525
1526         spin_unlock(chip->mutex);
1527         schedule_timeout(HZ);
1528         spin_lock(chip->mutex);
1529
1530         /* FIXME. Use a timer to check this, and return immediately. */
1531         /* Once the state machine's known to be working I'll do that */
1532
1533         timeo = jiffies + (HZ*20);
1534         for (;;) {
1535
1536                 status = cfi_read(map, adr);
1537                 if ((status & status_OK) == status_OK)
1538                         break;
1539                 
1540                 /* OK Still waiting */
1541                 if (time_after(jiffies, timeo)) {
1542                         cfi_write(map, CMD(0x70), adr);
1543                         chip->state = FL_STATUS;
1544                         printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %llx, status = %llx.\n", (__u64)status, (__u64)cfi_read(map, adr));
1545                         DISABLE_VPP(map);
1546                         spin_unlock(chip->mutex);
1547                         return -EIO;
1548                 }
1549                 
1550                 /* Latency issues. Drop the lock, wait a while and retry */
1551                 spin_unlock(chip->mutex);
1552                 cfi_udelay(1);
1553                 spin_lock(chip->mutex);
1554         }
1555         
1556         /* Done and happy. */
1557         chip->state = FL_STATUS;
1558         put_chip(map, chip, adr);
1559         spin_unlock(chip->mutex);
1560         return 0;
1561 }
1562
1563 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1564 {
1565         int ret;
1566
1567 #ifdef DEBUG_LOCK_BITS
1568         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1569                __FUNCTION__, ofs, len);
1570         cfi_intelext_varsize_frob(mtd, do_printlockstatus_oneblock,
1571                                   ofs, len, 0);
1572 #endif
1573
1574         ret = cfi_intelext_varsize_frob(mtd, do_xxlock_oneblock, 
1575                                         ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
1576         
1577 #ifdef DEBUG_LOCK_BITS
1578         printk(KERN_DEBUG
1579                "%s: lock status after, ret=%d\n", __FUNCTION__, ret);
1580         cfi_intelext_varsize_frob(mtd, do_printlockstatus_oneblock,
1581                                   ofs, len, 0);
1582 #endif
1583
1584         return ret;
1585 }
1586
1587 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1588 {
1589         int ret;
1590
1591 #ifdef DEBUG_LOCK_BITS
1592         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1593                __FUNCTION__, ofs, len);
1594         cfi_intelext_varsize_frob(mtd, do_printlockstatus_oneblock,
1595                                   ofs, len, 0);
1596 #endif
1597
1598         ret = cfi_intelext_varsize_frob(mtd, do_xxlock_oneblock,
1599                                         ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
1600         
1601 #ifdef DEBUG_LOCK_BITS
1602         printk(KERN_DEBUG "%s: lock status after, ret=%d\n", __FUNCTION__, ret);
1603         cfi_intelext_varsize_frob(mtd, do_printlockstatus_oneblock, 
1604                                   ofs, len, 0);
1605 #endif
1606         
1607         return ret;
1608 }
1609
1610 static int cfi_intelext_suspend(struct mtd_info *mtd)
1611 {
1612         struct map_info *map = mtd->priv;
1613         struct cfi_private *cfi = map->fldrv_priv;
1614         int i;
1615         struct flchip *chip;
1616         int ret = 0;
1617
1618         for (i=0; !ret && i<cfi->numchips; i++) {
1619                 chip = &cfi->chips[i];
1620
1621                 spin_lock(chip->mutex);
1622
1623                 switch (chip->state) {
1624                 case FL_READY:
1625                 case FL_STATUS:
1626                 case FL_CFI_QUERY:
1627                 case FL_JEDEC_QUERY:
1628                         if (chip->oldstate == FL_READY) {
1629                                 chip->oldstate = chip->state;
1630                                 chip->state = FL_PM_SUSPENDED;
1631                                 /* No need to wake_up() on this state change - 
1632                                  * as the whole point is that nobody can do anything
1633                                  * with the chip now anyway.
1634                                  */
1635                         }
1636                         break;
1637                 default:
1638                         ret = -EAGAIN;
1639                 case FL_PM_SUSPENDED:
1640                         break;
1641                 }
1642                 spin_unlock(chip->mutex);
1643         }
1644
1645         /* Unlock the chips again */
1646
1647         if (ret) {
1648                 for (i--; i >=0; i--) {
1649                         chip = &cfi->chips[i];
1650                         
1651                         spin_lock(chip->mutex);
1652                         
1653                         if (chip->state == FL_PM_SUSPENDED) {
1654                                 /* No need to force it into a known state here,
1655                                    because we're returning failure, and it didn't
1656                                    get power cycled */
1657                                 chip->state = chip->oldstate;
1658                                 wake_up(&chip->wq);
1659                         }
1660                         spin_unlock(chip->mutex);
1661                 }
1662         } 
1663         
1664         return ret;
1665 }
1666
1667 static void cfi_intelext_resume(struct mtd_info *mtd)
1668 {
1669         struct map_info *map = mtd->priv;
1670         struct cfi_private *cfi = map->fldrv_priv;
1671         int i;
1672         struct flchip *chip;
1673
1674         for (i=0; i<cfi->numchips; i++) {
1675         
1676                 chip = &cfi->chips[i];
1677
1678                 spin_lock(chip->mutex);
1679                 
1680                 /* Go to known state. Chip may have been power cycled */
1681                 if (chip->state == FL_PM_SUSPENDED) {
1682                         cfi_write(map, CMD(0xFF), 0);
1683                         chip->state = FL_READY;
1684                         wake_up(&chip->wq);
1685                 }
1686
1687                 spin_unlock(chip->mutex);
1688         }
1689 }
1690
1691 static void cfi_intelext_destroy(struct mtd_info *mtd)
1692 {
1693         struct map_info *map = mtd->priv;
1694         struct cfi_private *cfi = map->fldrv_priv;
1695         kfree(cfi->cmdset_priv);
1696         kfree(cfi->cfiq);
1697         kfree(cfi);
1698         kfree(mtd->eraseregions);
1699 }
1700
1701 static char im_name_1[]="cfi_cmdset_0001";
1702 static char im_name_3[]="cfi_cmdset_0003";
1703
1704 int __init cfi_intelext_init(void)
1705 {
1706         inter_module_register(im_name_1, THIS_MODULE, &cfi_cmdset_0001);
1707         inter_module_register(im_name_3, THIS_MODULE, &cfi_cmdset_0001);
1708         return 0;
1709 }
1710
1711 static void __exit cfi_intelext_exit(void)
1712 {
1713         inter_module_unregister(im_name_1);
1714         inter_module_unregister(im_name_3);
1715 }
1716
1717 module_init(cfi_intelext_init);
1718 module_exit(cfi_intelext_exit);
1719
1720 MODULE_LICENSE("GPL");
1721 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
1722 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");