patch-2_6_7-vs1_9_1_12
[linux-2.6.git] / drivers / mtd / chips / cfi_cmdset_0001.c
1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  * $Id: cfi_cmdset_0001.c,v 1.126 2003/06/23 07:45:48 dwmw2 Exp $
8  *
9  * 
10  * 10/10/2000   Nicolas Pitre <nico@cam.org>
11  *      - completely revamped method functions so they are aware and
12  *        independent of the flash geometry (buswidth, interleave, etc.)
13  *      - scalability vs code size is completely set at compile-time
14  *        (see include/linux/mtd/cfi.h for selection)
15  *      - optimized write buffer method
16  * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17  *      - reworked lock/unlock/erase support for var size flash
18  */
19
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
25 #include <asm/io.h>
26 #include <asm/byteorder.h>
27
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/mtd/map.h>
33 #include <linux/mtd/mtd.h>
34 #include <linux/mtd/compatmac.h>
35 #include <linux/mtd/cfi.h>
36
37 // debugging, turns off buffer write mode if set to 1
38 #define FORCE_WORD_WRITE 0
39
40 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
41 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
42 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
43 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
44 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
45 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
46 static void cfi_intelext_sync (struct mtd_info *);
47 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
48 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
49 static int cfi_intelext_suspend (struct mtd_info *);
50 static void cfi_intelext_resume (struct mtd_info *);
51
52 static void cfi_intelext_destroy(struct mtd_info *);
53
54 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
55
56 static struct mtd_info *cfi_intelext_setup (struct map_info *);
57
58 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
59                      size_t *retlen, u_char **mtdbuf);
60 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
61                         size_t len);
62
63
64 /*
65  *  *********** SETUP AND PROBE BITS  ***********
66  */
67
68 static struct mtd_chip_driver cfi_intelext_chipdrv = {
69         .probe          = NULL, /* Not usable directly */
70         .destroy        = cfi_intelext_destroy,
71         .name           = "cfi_cmdset_0001",
72         .module         = THIS_MODULE
73 };
74
75 /* #define DEBUG_LOCK_BITS */
76 /* #define DEBUG_CFI_FEATURES */
77
78 #ifdef DEBUG_CFI_FEATURES
79 static void cfi_tell_features(struct cfi_pri_intelext *extp)
80 {
81         int i;
82         printk("  Feature/Command Support: %4.4X\n", extp->FeatureSupport);
83         printk("     - Chip Erase:         %s\n", extp->FeatureSupport&1?"supported":"unsupported");
84         printk("     - Suspend Erase:      %s\n", extp->FeatureSupport&2?"supported":"unsupported");
85         printk("     - Suspend Program:    %s\n", extp->FeatureSupport&4?"supported":"unsupported");
86         printk("     - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
87         printk("     - Queued Erase:       %s\n", extp->FeatureSupport&16?"supported":"unsupported");
88         printk("     - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
89         printk("     - Protection Bits:    %s\n", extp->FeatureSupport&64?"supported":"unsupported");
90         printk("     - Page-mode read:     %s\n", extp->FeatureSupport&128?"supported":"unsupported");
91         printk("     - Synchronous read:   %s\n", extp->FeatureSupport&256?"supported":"unsupported");
92         for (i=9; i<32; i++) {
93                 if (extp->FeatureSupport & (1<<i)) 
94                         printk("     - Unknown Bit %X:      supported\n", i);
95         }
96         
97         printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
98         printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
99         for (i=1; i<8; i++) {
100                 if (extp->SuspendCmdSupport & (1<<i))
101                         printk("     - Unknown Bit %X:               supported\n", i);
102         }
103         
104         printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
105         printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
106         printk("     - Valid Bit Active:     %s\n", extp->BlkStatusRegMask&2?"yes":"no");
107         for (i=2; i<16; i++) {
108                 if (extp->BlkStatusRegMask & (1<<i))
109                         printk("     - Unknown Bit %X Active: yes\n",i);
110         }
111         
112         printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n", 
113                extp->VccOptimal >> 8, extp->VccOptimal & 0xf);
114         if (extp->VppOptimal)
115                 printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n", 
116                        extp->VppOptimal >> 8, extp->VppOptimal & 0xf);
117 }
118 #endif
119
120 /* This routine is made available to other mtd code via
121  * inter_module_register.  It must only be accessed through
122  * inter_module_get which will bump the use count of this module.  The
123  * addresses passed back in cfi are valid as long as the use count of
124  * this module is non-zero, i.e. between inter_module_get and
125  * inter_module_put.  Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
126  */
127 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
128 {
129         struct cfi_private *cfi = map->fldrv_priv;
130         int i;
131         __u32 base = cfi->chips[0].start;
132
133         if (cfi->cfi_mode == CFI_MODE_CFI) {
134                 /* 
135                  * It's a real CFI chip, not one for which the probe
136                  * routine faked a CFI structure. So we read the feature
137                  * table from it.
138                  */
139                 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
140                 struct cfi_pri_intelext *extp;
141                 int ofs_factor = cfi->interleave * cfi->device_type;
142
143                 //printk(" Intel/Sharp Extended Query Table at 0x%4.4X\n", adr);
144                 if (!adr)
145                         return NULL;
146
147                 /* Switch it into Query Mode */
148                 cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
149
150                 extp = kmalloc(sizeof(*extp), GFP_KERNEL);
151                 if (!extp) {
152                         printk(KERN_ERR "Failed to allocate memory\n");
153                         return NULL;
154                 }
155                 
156                 /* Read in the Extended Query Table */
157                 for (i=0; i<sizeof(*extp); i++) {
158                         ((unsigned char *)extp)[i] = 
159                                 cfi_read_query(map, (base+((adr+i)*ofs_factor)));
160                 }
161                 
162                 if (extp->MajorVersion != '1' || 
163                     (extp->MinorVersion < '0' || extp->MinorVersion > '3')) {
164                         printk(KERN_WARNING "  Unknown IntelExt Extended Query "
165                                "version %c.%c.\n",  extp->MajorVersion,
166                                extp->MinorVersion);
167                         kfree(extp);
168                         return NULL;
169                 }
170                 
171                 /* Do some byteswapping if necessary */
172                 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
173                 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
174                 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
175                         
176 #ifdef DEBUG_CFI_FEATURES
177                 /* Tell the user about it in lots of lovely detail */
178                 cfi_tell_features(extp);
179 #endif  
180
181                 if(extp->SuspendCmdSupport & 1) {
182 //#define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
183 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
184 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */ 
185                         printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
186                                "erase on write disabled.\n");
187                         extp->SuspendCmdSupport &= ~1;
188 #else
189                         printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
190 #endif
191                 }
192                 /* Install our own private info structure */
193                 cfi->cmdset_priv = extp;        
194         }
195
196         for (i=0; i< cfi->numchips; i++) {
197                 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
198                 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
199                 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
200                 cfi->chips[i].ref_point_counter = 0;
201         }               
202
203         map->fldrv = &cfi_intelext_chipdrv;
204         
205         /* Make sure it's in read mode */
206         cfi_send_gen_cmd(0xff, 0x55, base, map, cfi, cfi->device_type, NULL);
207         return cfi_intelext_setup(map);
208 }
209
210 static struct mtd_info *cfi_intelext_setup(struct map_info *map)
211 {
212         struct cfi_private *cfi = map->fldrv_priv;
213         struct mtd_info *mtd;
214         unsigned long offset = 0;
215         int i,j;
216         unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
217
218         mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
219         //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
220
221         if (!mtd) {
222                 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
223                 goto setup_err;
224         }
225
226         memset(mtd, 0, sizeof(*mtd));
227         mtd->priv = map;
228         mtd->type = MTD_NORFLASH;
229         mtd->size = devsize * cfi->numchips;
230
231         mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
232         mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) 
233                         * mtd->numeraseregions, GFP_KERNEL);
234         if (!mtd->eraseregions) { 
235                 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
236                 goto setup_err;
237         }
238         
239         for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
240                 unsigned long ernum, ersize;
241                 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
242                 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
243
244                 if (mtd->erasesize < ersize) {
245                         mtd->erasesize = ersize;
246                 }
247                 for (j=0; j<cfi->numchips; j++) {
248                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
249                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
250                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
251                 }
252                 offset += (ersize * ernum);
253         }
254
255         if (offset != devsize) {
256                 /* Argh */
257                 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
258                 goto setup_err;
259         }
260
261         for (i=0; i<mtd->numeraseregions;i++){
262                 printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
263                        i,mtd->eraseregions[i].offset,
264                        mtd->eraseregions[i].erasesize,
265                        mtd->eraseregions[i].numblocks);
266         }
267
268         /* Also select the correct geometry setup too */ 
269         mtd->erase = cfi_intelext_erase_varsize;
270         mtd->read = cfi_intelext_read;
271
272         if (map_is_linear(map)) {
273                 mtd->point = cfi_intelext_point;
274                 mtd->unpoint = cfi_intelext_unpoint;
275         }
276
277         if ( cfi->cfiq->BufWriteTimeoutTyp && !FORCE_WORD_WRITE) {
278                 printk(KERN_INFO "Using buffer write method\n" );
279                 mtd->write = cfi_intelext_write_buffers;
280         } else {
281                 printk(KERN_INFO "Using word write method\n" );
282                 mtd->write = cfi_intelext_write_words;
283         }
284         mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
285         mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
286         mtd->sync = cfi_intelext_sync;
287         mtd->lock = cfi_intelext_lock;
288         mtd->unlock = cfi_intelext_unlock;
289         mtd->suspend = cfi_intelext_suspend;
290         mtd->resume = cfi_intelext_resume;
291         mtd->flags = MTD_CAP_NORFLASH;
292         map->fldrv = &cfi_intelext_chipdrv;
293         mtd->name = map->name;
294         __module_get(THIS_MODULE);
295         return mtd;
296
297  setup_err:
298         if(mtd) {
299                 if(mtd->eraseregions)
300                         kfree(mtd->eraseregions);
301                 kfree(mtd);
302         }
303         kfree(cfi->cmdset_priv);
304         kfree(cfi->cfiq);
305         return NULL;
306 }
307
308 /*
309  *  *********** CHIP ACCESS FUNCTIONS ***********
310  */
311
312 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
313 {
314         DECLARE_WAITQUEUE(wait, current);
315         struct cfi_private *cfi = map->fldrv_priv;
316         cfi_word status, status_OK = CMD(0x80);
317         unsigned long timeo;
318         struct cfi_pri_intelext *cfip = (struct cfi_pri_intelext *)cfi->cmdset_priv;
319
320  resettime:
321         timeo = jiffies + HZ;
322  retry:
323         switch (chip->state) {
324
325         case FL_STATUS:
326                 for (;;) {
327                         status = cfi_read(map, adr);
328                         if ((status & status_OK) == status_OK)
329                                 break;
330
331                         if (time_after(jiffies, timeo)) {
332                                 printk(KERN_ERR "Waiting for chip to be ready timed out. Status %llx\n", 
333                                        (long long)status);
334                                 spin_unlock(chip->mutex);
335                                 return -EIO;
336                         }
337                         spin_unlock(chip->mutex);
338                         cfi_udelay(1);
339                         spin_lock(chip->mutex);
340                         /* Someone else might have been playing with it. */
341                         goto retry;
342                 }
343                                 
344         case FL_READY:
345         case FL_CFI_QUERY:
346         case FL_JEDEC_QUERY:
347                 return 0;
348
349         case FL_ERASING:
350                 if (!(cfip->FeatureSupport & 2) ||
351                     !(mode == FL_READY || mode == FL_POINT ||
352                      (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
353                         goto sleep;
354
355
356                 /* Erase suspend */
357                 cfi_write(map, CMD(0xB0), adr);
358
359                 /* If the flash has finished erasing, then 'erase suspend'
360                  * appears to make some (28F320) flash devices switch to
361                  * 'read' mode.  Make sure that we switch to 'read status'
362                  * mode so we get the right data. --rmk
363                  */
364                 cfi_write(map, CMD(0x70), adr);
365                 chip->oldstate = FL_ERASING;
366                 chip->state = FL_ERASE_SUSPENDING;
367                 chip->erase_suspended = 1;
368                 for (;;) {
369                         status = cfi_read(map, adr);
370                         if ((status & status_OK) == status_OK)
371                                 break;
372
373                         if (time_after(jiffies, timeo)) {
374                                 /* Urgh. Resume and pretend we weren't here.  */
375                                 cfi_write(map, CMD(0xd0), adr);
376                                 /* Make sure we're in 'read status' mode if it had finished */
377                                 cfi_write(map, CMD(0x70), adr);
378                                 chip->state = FL_ERASING;
379                                 chip->oldstate = FL_READY;
380                                 printk(KERN_ERR "Chip not ready after erase "
381                                        "suspended: status = 0x%x\n", status);
382                                 return -EIO;
383                         }
384
385                         spin_unlock(chip->mutex);
386                         cfi_udelay(1);
387                         spin_lock(chip->mutex);
388                         /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
389                            So we can just loop here. */
390                 }
391                 chip->state = FL_STATUS;
392                 return 0;
393
394         case FL_POINT:
395                 /* Only if there's no operation suspended... */
396                 if (mode == FL_READY && chip->oldstate == FL_READY)
397                         return 0;
398
399         default:
400         sleep:
401                 set_current_state(TASK_UNINTERRUPTIBLE);
402                 add_wait_queue(&chip->wq, &wait);
403                 spin_unlock(chip->mutex);
404                 schedule();
405                 remove_wait_queue(&chip->wq, &wait);
406                 spin_lock(chip->mutex);
407                 goto resettime;
408         }
409 }
410
411 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
412 {
413         struct cfi_private *cfi = map->fldrv_priv;
414
415         switch(chip->oldstate) {
416         case FL_ERASING:
417                 chip->state = chip->oldstate;
418                 /* What if one interleaved chip has finished and the 
419                    other hasn't? The old code would leave the finished
420                    one in READY mode. That's bad, and caused -EROFS 
421                    errors to be returned from do_erase_oneblock because
422                    that's the only bit it checked for at the time.
423                    As the state machine appears to explicitly allow 
424                    sending the 0x70 (Read Status) command to an erasing
425                    chip and expecting it to be ignored, that's what we 
426                    do. */
427                 cfi_write(map, CMD(0xd0), adr);
428                 cfi_write(map, CMD(0x70), adr);
429                 chip->oldstate = FL_READY;
430                 chip->state = FL_ERASING;
431                 break;
432
433         case FL_READY:
434         case FL_STATUS:
435                 /* We should really make set_vpp() count, rather than doing this */
436                 DISABLE_VPP(map);
437                 break;
438         default:
439                 printk(KERN_ERR "put_chip() called with oldstate %d!!\n", chip->oldstate);
440         }
441         wake_up(&chip->wq);
442 }
443
444 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
445 {
446         unsigned long cmd_addr;
447         struct cfi_private *cfi = map->fldrv_priv;
448         int ret = 0;
449
450         adr += chip->start;
451
452         /* Ensure cmd read/writes are aligned. */ 
453         cmd_addr = adr & ~(CFIDEV_BUSWIDTH-1); 
454
455         spin_lock(chip->mutex);
456
457         ret = get_chip(map, chip, cmd_addr, FL_POINT);
458
459         if (!ret) {
460                 if (chip->state != FL_POINT && chip->state != FL_READY)
461                         cfi_write(map, CMD(0xff), cmd_addr);
462
463                 chip->state = FL_POINT;
464                 chip->ref_point_counter++;
465         }
466         spin_unlock(chip->mutex);
467
468         return ret;
469 }
470
471 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
472 {
473         struct map_info *map = mtd->priv;
474         struct cfi_private *cfi = map->fldrv_priv;
475         unsigned long ofs;
476         int chipnum;
477         int ret = 0;
478
479         if (from + len > mtd->size)
480                 return -EINVAL;
481         
482         *mtdbuf = (void *)map->virt + from;
483         if(*mtdbuf == NULL)
484                 return -EINVAL; /* can not point this region */
485         *retlen = 0;
486
487         /* Now lock the chip(s) to POINT state */
488
489         /* ofs: offset within the first chip that the first read should start */
490         chipnum = (from >> cfi->chipshift);
491         ofs = from - (chipnum << cfi->chipshift);
492
493         while (len) {
494                 unsigned long thislen;
495
496                 if (chipnum >= cfi->numchips)
497                         break;
498
499                 if ((len + ofs -1) >> cfi->chipshift)
500                         thislen = (1<<cfi->chipshift) - ofs;
501                 else
502                         thislen = len;
503
504                 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
505                 if (ret)
506                         break;
507
508                 *retlen += thislen;
509                 len -= thislen;
510                 
511                 ofs = 0;
512                 chipnum++;
513         }
514         return 0;
515 }
516
517 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
518 {
519         struct map_info *map = mtd->priv;
520         struct cfi_private *cfi = map->fldrv_priv;
521         unsigned long ofs;
522         int chipnum;
523
524         /* Now unlock the chip(s) POINT state */
525
526         /* ofs: offset within the first chip that the first read should start */
527         chipnum = (from >> cfi->chipshift);
528         ofs = from - (chipnum <<  cfi->chipshift);
529
530         while (len) {
531                 unsigned long thislen;
532                 struct flchip *chip;
533
534                 chip = &cfi->chips[chipnum];
535                 if (chipnum >= cfi->numchips)
536                         break;
537
538                 if ((len + ofs -1) >> cfi->chipshift)
539                         thislen = (1<<cfi->chipshift) - ofs;
540                 else
541                         thislen = len;
542
543                 spin_lock(chip->mutex);
544                 if (chip->state == FL_POINT) {
545                         chip->ref_point_counter--;
546                         if(chip->ref_point_counter == 0)
547                                 chip->state = FL_READY;
548                 } else
549                         printk(KERN_ERR "Warning: unpoint called on non pointed region\n"); /* Should this give an error? */
550
551                 put_chip(map, chip, chip->start);
552                 spin_unlock(chip->mutex);
553
554                 len -= thislen;
555                 ofs = 0;
556                 chipnum++;
557         }
558 }
559
560 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
561 {
562         unsigned long cmd_addr;
563         struct cfi_private *cfi = map->fldrv_priv;
564         int ret;
565
566         adr += chip->start;
567
568         /* Ensure cmd read/writes are aligned. */ 
569         cmd_addr = adr & ~(CFIDEV_BUSWIDTH-1); 
570
571         spin_lock(chip->mutex);
572         ret = get_chip(map, chip, cmd_addr, FL_READY);
573         if (ret) {
574                 spin_unlock(chip->mutex);
575                 return ret;
576         }
577
578         if (chip->state != FL_POINT && chip->state != FL_READY) {
579                 cfi_write(map, CMD(0xff), cmd_addr);
580
581                 chip->state = FL_READY;
582         }
583
584         map_copy_from(map, buf, adr, len);
585
586         put_chip(map, chip, cmd_addr);
587
588         spin_unlock(chip->mutex);
589         return 0;
590 }
591
592 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
593 {
594         struct map_info *map = mtd->priv;
595         struct cfi_private *cfi = map->fldrv_priv;
596         unsigned long ofs;
597         int chipnum;
598         int ret = 0;
599
600         /* ofs: offset within the first chip that the first read should start */
601         chipnum = (from >> cfi->chipshift);
602         ofs = from - (chipnum <<  cfi->chipshift);
603
604         *retlen = 0;
605
606         while (len) {
607                 unsigned long thislen;
608
609                 if (chipnum >= cfi->numchips)
610                         break;
611
612                 if ((len + ofs -1) >> cfi->chipshift)
613                         thislen = (1<<cfi->chipshift) - ofs;
614                 else
615                         thislen = len;
616
617                 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
618                 if (ret)
619                         break;
620
621                 *retlen += thislen;
622                 len -= thislen;
623                 buf += thislen;
624                 
625                 ofs = 0;
626                 chipnum++;
627         }
628         return ret;
629 }
630
631 static int cfi_intelext_read_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf, int base_offst, int reg_sz)
632 {
633         struct map_info *map = mtd->priv;
634         struct cfi_private *cfi = map->fldrv_priv;
635         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
636         struct flchip *chip;
637         int ofs_factor = cfi->interleave * cfi->device_type;
638         int count = len;
639         int chip_num, offst;
640         int ret;
641
642         chip_num = ((unsigned int)from/reg_sz);
643         offst = from - (reg_sz*chip_num)+base_offst;
644
645         while (count) {
646         /* Calculate which chip & protection register offset we need */
647
648                 if (chip_num >= cfi->numchips)
649                         goto out;
650
651                 chip = &cfi->chips[chip_num];
652                 
653                 spin_lock(chip->mutex);
654                 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
655                 if (ret) {
656                         spin_unlock(chip->mutex);
657                         return (len-count)?:ret;
658                 }
659
660                 if (chip->state != FL_JEDEC_QUERY) {
661                         cfi_write(map, CMD(0x90), chip->start);
662                         chip->state = FL_JEDEC_QUERY;
663                 }
664
665                 while (count && ((offst-base_offst) < reg_sz)) {
666                         *buf = map_read8(map,(chip->start+((extp->ProtRegAddr+1)*ofs_factor)+offst));
667                         buf++;
668                         offst++;
669                         count--;
670                 }
671
672                 put_chip(map, chip, chip->start);
673                 spin_unlock(chip->mutex);
674
675                 /* Move on to the next chip */
676                 chip_num++;
677                 offst = base_offst;
678         }
679         
680  out:   
681         return len-count;
682 }
683         
684 static int cfi_intelext_read_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
685 {
686         struct map_info *map = mtd->priv;
687         struct cfi_private *cfi = map->fldrv_priv;
688         struct cfi_pri_intelext *extp=cfi->cmdset_priv;
689         int base_offst,reg_sz;
690         
691         /* Check that we actually have some protection registers */
692         if(!(extp->FeatureSupport&64)){
693                 printk(KERN_WARNING "%s: This flash device has no protection data to read!\n",map->name);
694                 return 0;
695         }
696
697         base_offst=(1<<extp->FactProtRegSize);
698         reg_sz=(1<<extp->UserProtRegSize);
699
700         return cfi_intelext_read_prot_reg(mtd, from, len, retlen, buf, base_offst, reg_sz);
701 }
702
703 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
704 {
705         struct map_info *map = mtd->priv;
706         struct cfi_private *cfi = map->fldrv_priv;
707         struct cfi_pri_intelext *extp=cfi->cmdset_priv;
708         int base_offst,reg_sz;
709         
710         /* Check that we actually have some protection registers */
711         if(!(extp->FeatureSupport&64)){
712                 printk(KERN_WARNING "%s: This flash device has no protection data to read!\n",map->name);
713                 return 0;
714         }
715
716         base_offst=0;
717         reg_sz=(1<<extp->FactProtRegSize);
718
719         return cfi_intelext_read_prot_reg(mtd, from, len, retlen, buf, base_offst, reg_sz);
720 }
721
722
723 static int do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, cfi_word datum)
724 {
725         struct cfi_private *cfi = map->fldrv_priv;
726         cfi_word status, status_OK;
727         unsigned long timeo;
728         int z, ret=0;
729
730         adr += chip->start;
731
732         /* Let's determine this according to the interleave only once */
733         status_OK = CMD(0x80);
734
735         spin_lock(chip->mutex);
736         ret = get_chip(map, chip, adr, FL_WRITING);
737         if (ret) {
738                 spin_unlock(chip->mutex);
739                 return ret;
740         }
741
742         ENABLE_VPP(map);
743         cfi_write(map, CMD(0x40), adr);
744         cfi_write(map, datum, adr);
745         chip->state = FL_WRITING;
746
747         spin_unlock(chip->mutex);
748         cfi_udelay(chip->word_write_time);
749         spin_lock(chip->mutex);
750
751         timeo = jiffies + (HZ/2);
752         z = 0;
753         for (;;) {
754                 if (chip->state != FL_WRITING) {
755                         /* Someone's suspended the write. Sleep */
756                         DECLARE_WAITQUEUE(wait, current);
757
758                         set_current_state(TASK_UNINTERRUPTIBLE);
759                         add_wait_queue(&chip->wq, &wait);
760                         spin_unlock(chip->mutex);
761                         schedule();
762                         remove_wait_queue(&chip->wq, &wait);
763                         timeo = jiffies + (HZ / 2); /* FIXME */
764                         spin_lock(chip->mutex);
765                         continue;
766                 }
767
768                 status = cfi_read(map, adr);
769                 if ((status & status_OK) == status_OK)
770                         break;
771                 
772                 /* OK Still waiting */
773                 if (time_after(jiffies, timeo)) {
774                         chip->state = FL_STATUS;
775                         printk(KERN_ERR "waiting for chip to be ready timed out in word write\n");
776                         ret = -EIO;
777                         goto out;
778                 }
779
780                 /* Latency issues. Drop the lock, wait a while and retry */
781                 spin_unlock(chip->mutex);
782                 z++;
783                 cfi_udelay(1);
784                 spin_lock(chip->mutex);
785         }
786         if (!z) {
787                 chip->word_write_time--;
788                 if (!chip->word_write_time)
789                         chip->word_write_time++;
790         }
791         if (z > 1) 
792                 chip->word_write_time++;
793
794         /* Done and happy. */
795         chip->state = FL_STATUS;
796         /* check for lock bit */
797         if (status & CMD(0x02)) {
798                 /* clear status */
799                 cfi_write(map, CMD(0x50), adr);
800                 /* put back into read status register mode */
801                 cfi_write(map, CMD(0x70), adr);
802                 ret = -EROFS;
803         }
804  out:
805         put_chip(map, chip, adr);
806         spin_unlock(chip->mutex);
807
808         return ret;
809 }
810
811
812 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
813 {
814         struct map_info *map = mtd->priv;
815         struct cfi_private *cfi = map->fldrv_priv;
816         int ret = 0;
817         int chipnum;
818         unsigned long ofs;
819
820         *retlen = 0;
821         if (!len)
822                 return 0;
823
824         chipnum = to >> cfi->chipshift;
825         ofs = to  - (chipnum << cfi->chipshift);
826
827         /* If it's not bus-aligned, do the first byte write */
828         if (ofs & (CFIDEV_BUSWIDTH-1)) {
829                 unsigned long bus_ofs = ofs & ~(CFIDEV_BUSWIDTH-1);
830                 int gap = ofs - bus_ofs;
831                 int i = 0, n = 0;
832                 u_char tmp_buf[8];
833                 cfi_word datum;
834
835                 while (gap--)
836                         tmp_buf[i++] = 0xff;
837                 while (len && i < CFIDEV_BUSWIDTH)
838                         tmp_buf[i++] = buf[n++], len--;
839                 while (i < CFIDEV_BUSWIDTH)
840                         tmp_buf[i++] = 0xff;
841
842                 if (cfi_buswidth_is_2()) {
843                         datum = *(__u16*)tmp_buf;
844                 } else if (cfi_buswidth_is_4()) {
845                         datum = *(__u32*)tmp_buf;
846                 } else if (cfi_buswidth_is_8()) {
847                         datum = *(__u64*)tmp_buf;
848                 } else {
849                         return -EINVAL;  /* should never happen, but be safe */
850                 }
851
852                 ret = do_write_oneword(map, &cfi->chips[chipnum],
853                                                bus_ofs, datum);
854                 if (ret) 
855                         return ret;
856                 
857                 ofs += n;
858                 buf += n;
859                 (*retlen) += n;
860
861                 if (ofs >> cfi->chipshift) {
862                         chipnum ++; 
863                         ofs = 0;
864                         if (chipnum == cfi->numchips)
865                                 return 0;
866                 }
867         }
868         
869         while(len >= CFIDEV_BUSWIDTH) {
870                 cfi_word datum;
871
872                 if (cfi_buswidth_is_1()) {
873                         datum = *(__u8*)buf;
874                 } else if (cfi_buswidth_is_2()) {
875                         datum = *(__u16*)buf;
876                 } else if (cfi_buswidth_is_4()) {
877                         datum = *(__u32*)buf;
878                 } else if (cfi_buswidth_is_8()) {
879                         datum = *(__u64*)buf;
880                 } else {
881                         return -EINVAL;
882                 }
883
884                 ret = do_write_oneword(map, &cfi->chips[chipnum],
885                                 ofs, datum);
886                 if (ret)
887                         return ret;
888
889                 ofs += CFIDEV_BUSWIDTH;
890                 buf += CFIDEV_BUSWIDTH;
891                 (*retlen) += CFIDEV_BUSWIDTH;
892                 len -= CFIDEV_BUSWIDTH;
893
894                 if (ofs >> cfi->chipshift) {
895                         chipnum ++; 
896                         ofs = 0;
897                         if (chipnum == cfi->numchips)
898                                 return 0;
899                 }
900         }
901
902         if (len & (CFIDEV_BUSWIDTH-1)) {
903                 int i = 0, n = 0;
904                 u_char tmp_buf[8];
905                 cfi_word datum;
906
907                 while (len--)
908                         tmp_buf[i++] = buf[n++];
909                 while (i < CFIDEV_BUSWIDTH)
910                         tmp_buf[i++] = 0xff;
911
912                 if (cfi_buswidth_is_2()) {
913                         datum = *(__u16*)tmp_buf;
914                 } else if (cfi_buswidth_is_4()) {
915                         datum = *(__u32*)tmp_buf;
916                 } else if (cfi_buswidth_is_8()) {
917                         datum = *(__u64*)tmp_buf;
918                 } else {
919                         return -EINVAL;  /* should never happen, but be safe */
920                 }
921
922                 ret = do_write_oneword(map, &cfi->chips[chipnum],
923                                                ofs, datum);
924                 if (ret) 
925                         return ret;
926                 
927                 (*retlen) += n;
928         }
929
930         return 0;
931 }
932
933
934 static inline int do_write_buffer(struct map_info *map, struct flchip *chip, 
935                                   unsigned long adr, const u_char *buf, int len)
936 {
937         struct cfi_private *cfi = map->fldrv_priv;
938         cfi_word status, status_OK;
939         unsigned long cmd_adr, timeo;
940         int wbufsize, z, ret=0, bytes, words;
941
942         wbufsize = CFIDEV_INTERLEAVE << cfi->cfiq->MaxBufWriteSize;
943         adr += chip->start;
944         cmd_adr = adr & ~(wbufsize-1);
945         
946         /* Let's determine this according to the interleave only once */
947         status_OK = CMD(0x80);
948
949         spin_lock(chip->mutex);
950         ret = get_chip(map, chip, cmd_adr, FL_WRITING);
951         if (ret) {
952                 spin_unlock(chip->mutex);
953                 return ret;
954         }
955
956         if (chip->state != FL_STATUS)
957                 cfi_write(map, CMD(0x70), cmd_adr);
958
959         status = cfi_read(map, cmd_adr);
960
961         /* Â§4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
962            [...], the device will not accept any more Write to Buffer commands". 
963            So we must check here and reset those bits if they're set. Otherwise
964            we're just pissing in the wind */
965         if (status & CMD(0x30)) {
966                 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %x). Clearing.\n", status);
967                 cfi_write(map, CMD(0x50), cmd_adr);
968                 cfi_write(map, CMD(0x70), cmd_adr);
969         }
970         ENABLE_VPP(map);
971         chip->state = FL_WRITING_TO_BUFFER;
972
973         z = 0;
974         for (;;) {
975                 cfi_write(map, CMD(0xe8), cmd_adr);
976
977                 status = cfi_read(map, cmd_adr);
978                 if ((status & status_OK) == status_OK)
979                         break;
980
981                 spin_unlock(chip->mutex);
982                 cfi_udelay(1);
983                 spin_lock(chip->mutex);
984
985                 if (++z > 20) {
986                         /* Argh. Not ready for write to buffer */
987                         cfi_write(map, CMD(0x70), cmd_adr);
988                         chip->state = FL_STATUS;
989                         printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %llx, status = %llx\n", (__u64)status, (__u64)cfi_read(map, cmd_adr));
990                         /* Odd. Clear status bits */
991                         cfi_write(map, CMD(0x50), cmd_adr);
992                         cfi_write(map, CMD(0x70), cmd_adr);
993                         ret = -EIO;
994                         goto out;
995                 }
996         }
997
998         /* Write length of data to come */
999         bytes = len & (CFIDEV_BUSWIDTH-1);
1000         words = len / CFIDEV_BUSWIDTH;
1001         cfi_write(map, CMD(words - !bytes), cmd_adr );
1002
1003         /* Write data */
1004         z = 0;
1005         while(z < words * CFIDEV_BUSWIDTH) {
1006                 if (cfi_buswidth_is_1()) {
1007                         u8 *b = (u8 *)buf;
1008
1009                         map_write8 (map, *b++, adr+z);
1010                         buf = (const u_char *)b;
1011                 } else if (cfi_buswidth_is_2()) {
1012                         u16 *b = (u16 *)buf;
1013
1014                         map_write16 (map, *b++, adr+z);
1015                         buf = (const u_char *)b;
1016                 } else if (cfi_buswidth_is_4()) {
1017                         u32 *b = (u32 *)buf;
1018
1019                         map_write32 (map, *b++, adr+z);
1020                         buf = (const u_char *)b;
1021                 } else if (cfi_buswidth_is_8()) {
1022                         u64 *b = (u64 *)buf;
1023
1024                         map_write64 (map, *b++, adr+z);
1025                         buf = (const u_char *)b;
1026                 } else {
1027                         ret = -EINVAL;
1028                         goto out;
1029                 }
1030                 z += CFIDEV_BUSWIDTH;
1031         }
1032         if (bytes) {
1033                 int i = 0, n = 0;
1034                 u_char tmp_buf[8], *tmp_p = tmp_buf;
1035
1036                 while (bytes--)
1037                         tmp_buf[i++] = buf[n++];
1038                 while (i < CFIDEV_BUSWIDTH)
1039                         tmp_buf[i++] = 0xff;
1040                 if (cfi_buswidth_is_2()) {
1041                         u16 *b = (u16 *)tmp_p;
1042
1043                         map_write16 (map, *b++, adr+z);
1044                         tmp_p = (u_char *)b;
1045                 } else if (cfi_buswidth_is_4()) {
1046                         u32 *b = (u32 *)tmp_p;
1047
1048                         map_write32 (map, *b++, adr+z);
1049                         tmp_p = (u_char *)b;
1050                 } else if (cfi_buswidth_is_8()) {
1051                         u64 *b = (u64 *)tmp_p;
1052
1053                         map_write64 (map, *b++, adr+z);
1054                         tmp_p = (u_char *)b;
1055                 } else {
1056                         ret = -EINVAL;
1057                         goto out;
1058                 }
1059         }
1060         /* GO GO GO */
1061         cfi_write(map, CMD(0xd0), cmd_adr);
1062         chip->state = FL_WRITING;
1063
1064         spin_unlock(chip->mutex);
1065         cfi_udelay(chip->buffer_write_time);
1066         spin_lock(chip->mutex);
1067
1068         timeo = jiffies + (HZ/2);
1069         z = 0;
1070         for (;;) {
1071                 if (chip->state != FL_WRITING) {
1072                         /* Someone's suspended the write. Sleep */
1073                         DECLARE_WAITQUEUE(wait, current);
1074                         set_current_state(TASK_UNINTERRUPTIBLE);
1075                         add_wait_queue(&chip->wq, &wait);
1076                         spin_unlock(chip->mutex);
1077                         schedule();
1078                         remove_wait_queue(&chip->wq, &wait);
1079                         timeo = jiffies + (HZ / 2); /* FIXME */
1080                         spin_lock(chip->mutex);
1081                         continue;
1082                 }
1083
1084                 status = cfi_read(map, cmd_adr);
1085                 if ((status & status_OK) == status_OK)
1086                         break;
1087
1088                 /* OK Still waiting */
1089                 if (time_after(jiffies, timeo)) {
1090                         chip->state = FL_STATUS;
1091                         printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
1092                         ret = -EIO;
1093                         goto out;
1094                 }
1095                 
1096                 /* Latency issues. Drop the lock, wait a while and retry */
1097                 spin_unlock(chip->mutex);
1098                 cfi_udelay(1);
1099                 z++;
1100                 spin_lock(chip->mutex);
1101         }
1102         if (!z) {
1103                 chip->buffer_write_time--;
1104                 if (!chip->buffer_write_time)
1105                         chip->buffer_write_time++;
1106         }
1107         if (z > 1) 
1108                 chip->buffer_write_time++;
1109
1110         /* Done and happy. */
1111         chip->state = FL_STATUS;
1112
1113         /* check for lock bit */
1114         if (status & CMD(0x02)) {
1115                 /* clear status */
1116                 cfi_write(map, CMD(0x50), cmd_adr);
1117                 /* put back into read status register mode */
1118                 cfi_write(map, CMD(0x70), adr);
1119                 ret = -EROFS;
1120         }
1121
1122  out:
1123         put_chip(map, chip, cmd_adr);
1124         spin_unlock(chip->mutex);
1125         return ret;
1126 }
1127
1128 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to, 
1129                                        size_t len, size_t *retlen, const u_char *buf)
1130 {
1131         struct map_info *map = mtd->priv;
1132         struct cfi_private *cfi = map->fldrv_priv;
1133         int wbufsize = CFIDEV_INTERLEAVE << cfi->cfiq->MaxBufWriteSize;
1134         int ret = 0;
1135         int chipnum;
1136         unsigned long ofs;
1137
1138         *retlen = 0;
1139         if (!len)
1140                 return 0;
1141
1142         chipnum = to >> cfi->chipshift;
1143         ofs = to  - (chipnum << cfi->chipshift);
1144
1145         /* If it's not bus-aligned, do the first word write */
1146         if (ofs & (CFIDEV_BUSWIDTH-1)) {
1147                 size_t local_len = (-ofs)&(CFIDEV_BUSWIDTH-1);
1148                 if (local_len > len)
1149                         local_len = len;
1150                 ret = cfi_intelext_write_words(mtd, to, local_len,
1151                                                retlen, buf);
1152                 if (ret)
1153                         return ret;
1154                 ofs += local_len;
1155                 buf += local_len;
1156                 len -= local_len;
1157
1158                 if (ofs >> cfi->chipshift) {
1159                         chipnum ++;
1160                         ofs = 0;
1161                         if (chipnum == cfi->numchips)
1162                                 return 0;
1163                 }
1164         }
1165
1166         /* Write buffer is worth it only if more than one word to write... */
1167         while(len) {
1168                 /* We must not cross write block boundaries */
1169                 int size = wbufsize - (ofs & (wbufsize-1));
1170
1171                 if (size > len)
1172                         size = len;
1173                 ret = do_write_buffer(map, &cfi->chips[chipnum], 
1174                                       ofs, buf, size);
1175                 if (ret)
1176                         return ret;
1177
1178                 ofs += size;
1179                 buf += size;
1180                 (*retlen) += size;
1181                 len -= size;
1182
1183                 if (ofs >> cfi->chipshift) {
1184                         chipnum ++; 
1185                         ofs = 0;
1186                         if (chipnum == cfi->numchips)
1187                                 return 0;
1188                 }
1189         }
1190         return 0;
1191 }
1192
1193 typedef int (*varsize_frob_t)(struct map_info *map, struct flchip *chip,
1194                               unsigned long adr, void *thunk);
1195
1196 static int cfi_intelext_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
1197                                      loff_t ofs, size_t len, void *thunk)
1198 {
1199         struct map_info *map = mtd->priv;
1200         struct cfi_private *cfi = map->fldrv_priv;
1201         unsigned long adr;
1202         int chipnum, ret = 0;
1203         int i, first;
1204         struct mtd_erase_region_info *regions = mtd->eraseregions;
1205
1206         if (ofs > mtd->size)
1207                 return -EINVAL;
1208
1209         if ((len + ofs) > mtd->size)
1210                 return -EINVAL;
1211
1212         /* Check that both start and end of the requested erase are
1213          * aligned with the erasesize at the appropriate addresses.
1214          */
1215
1216         i = 0;
1217
1218         /* Skip all erase regions which are ended before the start of 
1219            the requested erase. Actually, to save on the calculations,
1220            we skip to the first erase region which starts after the
1221            start of the requested erase, and then go back one.
1222         */
1223         
1224         while (i < mtd->numeraseregions && ofs >= regions[i].offset)
1225                i++;
1226         i--;
1227
1228         /* OK, now i is pointing at the erase region in which this 
1229            erase request starts. Check the start of the requested
1230            erase range is aligned with the erase size which is in
1231            effect here.
1232         */
1233
1234         if (ofs & (regions[i].erasesize-1))
1235                 return -EINVAL;
1236
1237         /* Remember the erase region we start on */
1238         first = i;
1239
1240         /* Next, check that the end of the requested erase is aligned
1241          * with the erase region at that address.
1242          */
1243
1244         while (i<mtd->numeraseregions && (ofs + len) >= regions[i].offset)
1245                 i++;
1246
1247         /* As before, drop back one to point at the region in which
1248            the address actually falls
1249         */
1250         i--;
1251         
1252         if ((ofs + len) & (regions[i].erasesize-1))
1253                 return -EINVAL;
1254
1255         chipnum = ofs >> cfi->chipshift;
1256         adr = ofs - (chipnum << cfi->chipshift);
1257
1258         i=first;
1259
1260         while(len) {
1261                 ret = (*frob)(map, &cfi->chips[chipnum], adr, thunk);
1262                 
1263                 if (ret)
1264                         return ret;
1265
1266                 adr += regions[i].erasesize;
1267                 len -= regions[i].erasesize;
1268
1269                 if (adr % (1<< cfi->chipshift) == ((regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
1270                         i++;
1271
1272                 if (adr >> cfi->chipshift) {
1273                         adr = 0;
1274                         chipnum++;
1275                         
1276                         if (chipnum >= cfi->numchips)
1277                         break;
1278                 }
1279         }
1280
1281         return 0;
1282 }
1283
1284
1285 static int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, void *thunk)
1286 {
1287         struct cfi_private *cfi = map->fldrv_priv;
1288         cfi_word status, status_OK;
1289         unsigned long timeo;
1290         int retries = 3;
1291         DECLARE_WAITQUEUE(wait, current);
1292         int ret = 0;
1293
1294         adr += chip->start;
1295
1296         /* Let's determine this according to the interleave only once */
1297         status_OK = CMD(0x80);
1298
1299  retry:
1300         spin_lock(chip->mutex);
1301         ret = get_chip(map, chip, adr, FL_ERASING);
1302         if (ret) {
1303                 spin_unlock(chip->mutex);
1304                 return ret;
1305         }
1306
1307         ENABLE_VPP(map);
1308         /* Clear the status register first */
1309         cfi_write(map, CMD(0x50), adr);
1310
1311         /* Now erase */
1312         cfi_write(map, CMD(0x20), adr);
1313         cfi_write(map, CMD(0xD0), adr);
1314         chip->state = FL_ERASING;
1315         chip->erase_suspended = 0;
1316
1317         spin_unlock(chip->mutex);
1318         set_current_state(TASK_UNINTERRUPTIBLE);
1319         schedule_timeout((chip->erase_time*HZ)/(2*1000));
1320         spin_lock(chip->mutex);
1321
1322         /* FIXME. Use a timer to check this, and return immediately. */
1323         /* Once the state machine's known to be working I'll do that */
1324
1325         timeo = jiffies + (HZ*20);
1326         for (;;) {
1327                 if (chip->state != FL_ERASING) {
1328                         /* Someone's suspended the erase. Sleep */
1329                         set_current_state(TASK_UNINTERRUPTIBLE);
1330                         add_wait_queue(&chip->wq, &wait);
1331                         spin_unlock(chip->mutex);
1332                         schedule();
1333                         remove_wait_queue(&chip->wq, &wait);
1334                         spin_lock(chip->mutex);
1335                         continue;
1336                 }
1337                 if (chip->erase_suspended) {
1338                         /* This erase was suspended and resumed.
1339                            Adjust the timeout */
1340                         timeo = jiffies + (HZ*20); /* FIXME */
1341                         chip->erase_suspended = 0;
1342                 }
1343
1344                 status = cfi_read(map, adr);
1345                 if ((status & status_OK) == status_OK)
1346                         break;
1347                 
1348                 /* OK Still waiting */
1349                 if (time_after(jiffies, timeo)) {
1350                         cfi_write(map, CMD(0x70), adr);
1351                         chip->state = FL_STATUS;
1352                         printk(KERN_ERR "waiting for erase at %08lx to complete timed out. Xstatus = %llx, status = %llx.\n",
1353                                adr, (__u64)status, (__u64)cfi_read(map, adr));
1354                         /* Clear status bits */
1355                         cfi_write(map, CMD(0x50), adr);
1356                         cfi_write(map, CMD(0x70), adr);
1357                         DISABLE_VPP(map);
1358                         spin_unlock(chip->mutex);
1359                         return -EIO;
1360                 }
1361                 
1362                 /* Latency issues. Drop the lock, wait a while and retry */
1363                 spin_unlock(chip->mutex);
1364                 set_current_state(TASK_UNINTERRUPTIBLE);
1365                 schedule_timeout(1);
1366                 spin_lock(chip->mutex);
1367         }
1368         
1369         DISABLE_VPP(map);
1370         ret = 0;
1371
1372         /* We've broken this before. It doesn't hurt to be safe */
1373         cfi_write(map, CMD(0x70), adr);
1374         chip->state = FL_STATUS;
1375         status = cfi_read(map, adr);
1376
1377         /* check for lock bit */
1378         if (status & CMD(0x3a)) {
1379                 unsigned char chipstatus = status;
1380                 if (status != CMD(status & 0xff)) {
1381                         int i;
1382                         for (i = 1; i<CFIDEV_INTERLEAVE; i++) {
1383                                       chipstatus |= status >> (cfi->device_type * 8);
1384                         }
1385                         printk(KERN_WARNING "Status is not identical for all chips: 0x%llx. Merging to give 0x%02x\n", (__u64)status, chipstatus);
1386                 }
1387                 /* Reset the error bits */
1388                 cfi_write(map, CMD(0x50), adr);
1389                 cfi_write(map, CMD(0x70), adr);
1390                 
1391                 if ((chipstatus & 0x30) == 0x30) {
1392                         printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%llx\n", (__u64)status);
1393                         ret = -EIO;
1394                 } else if (chipstatus & 0x02) {
1395                         /* Protection bit set */
1396                         ret = -EROFS;
1397                 } else if (chipstatus & 0x8) {
1398                         /* Voltage */
1399                         printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%llx\n", (__u64)status);
1400                         ret = -EIO;
1401                 } else if (chipstatus & 0x20) {
1402                         if (retries--) {
1403                                 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%llx. Retrying...\n", adr, (__u64)status);
1404                                 timeo = jiffies + HZ;
1405                                 chip->state = FL_STATUS;
1406                                 spin_unlock(chip->mutex);
1407                                 goto retry;
1408                         }
1409                         printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%llx\n", adr, (__u64)status);
1410                         ret = -EIO;
1411                 }
1412         }
1413
1414         wake_up(&chip->wq);
1415         spin_unlock(chip->mutex);
1416         return ret;
1417 }
1418
1419 int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1420 {
1421         unsigned long ofs, len;
1422         int ret;
1423
1424         ofs = instr->addr;
1425         len = instr->len;
1426
1427         ret = cfi_intelext_varsize_frob(mtd, do_erase_oneblock, ofs, len, 0);
1428         if (ret)
1429                 return ret;
1430
1431         instr->state = MTD_ERASE_DONE;
1432         if (instr->callback)
1433                 instr->callback(instr);
1434         
1435         return 0;
1436 }
1437
1438 static void cfi_intelext_sync (struct mtd_info *mtd)
1439 {
1440         struct map_info *map = mtd->priv;
1441         struct cfi_private *cfi = map->fldrv_priv;
1442         int i;
1443         struct flchip *chip;
1444         int ret = 0;
1445
1446         for (i=0; !ret && i<cfi->numchips; i++) {
1447                 chip = &cfi->chips[i];
1448
1449                 spin_lock(chip->mutex);
1450                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1451
1452                 if (!ret) {
1453                         chip->oldstate = chip->state;
1454                         chip->state = FL_SYNCING;
1455                         /* No need to wake_up() on this state change - 
1456                          * as the whole point is that nobody can do anything
1457                          * with the chip now anyway.
1458                          */
1459                 }
1460                 spin_unlock(chip->mutex);
1461         }
1462
1463         /* Unlock the chips again */
1464
1465         for (i--; i >=0; i--) {
1466                 chip = &cfi->chips[i];
1467
1468                 spin_lock(chip->mutex);
1469                 
1470                 if (chip->state == FL_SYNCING) {
1471                         chip->state = chip->oldstate;
1472                         wake_up(&chip->wq);
1473                 }
1474                 spin_unlock(chip->mutex);
1475         }
1476 }
1477
1478 #ifdef DEBUG_LOCK_BITS
1479 static int do_printlockstatus_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, void *thunk)
1480 {
1481         struct cfi_private *cfi = map->fldrv_priv;
1482         int ofs_factor = cfi->interleave * cfi->device_type;
1483
1484         cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1485         printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1486                adr, cfi_read_query(map, adr+(2*ofs_factor)));
1487         cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1488         
1489         return 0;
1490 }
1491 #endif
1492
1493 #define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
1494 #define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
1495
1496 static int do_xxlock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, void *thunk)
1497 {
1498         struct cfi_private *cfi = map->fldrv_priv;
1499         cfi_word status, status_OK;
1500         unsigned long timeo = jiffies + HZ;
1501         int ret;
1502
1503         adr += chip->start;
1504
1505         /* Let's determine this according to the interleave only once */
1506         status_OK = CMD(0x80);
1507
1508         spin_lock(chip->mutex);
1509         ret = get_chip(map, chip, adr, FL_LOCKING);
1510         if (ret) {
1511                 spin_unlock(chip->mutex);
1512                 return ret;
1513         }
1514
1515         ENABLE_VPP(map);
1516         cfi_write(map, CMD(0x60), adr);
1517
1518         if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1519                 cfi_write(map, CMD(0x01), adr);
1520                 chip->state = FL_LOCKING;
1521         } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1522                 cfi_write(map, CMD(0xD0), adr);
1523                 chip->state = FL_UNLOCKING;
1524         } else
1525                 BUG();
1526
1527         spin_unlock(chip->mutex);
1528         schedule_timeout(HZ);
1529         spin_lock(chip->mutex);
1530
1531         /* FIXME. Use a timer to check this, and return immediately. */
1532         /* Once the state machine's known to be working I'll do that */
1533
1534         timeo = jiffies + (HZ*20);
1535         for (;;) {
1536
1537                 status = cfi_read(map, adr);
1538                 if ((status & status_OK) == status_OK)
1539                         break;
1540                 
1541                 /* OK Still waiting */
1542                 if (time_after(jiffies, timeo)) {
1543                         cfi_write(map, CMD(0x70), adr);
1544                         chip->state = FL_STATUS;
1545                         printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %llx, status = %llx.\n", (__u64)status, (__u64)cfi_read(map, adr));
1546                         DISABLE_VPP(map);
1547                         spin_unlock(chip->mutex);
1548                         return -EIO;
1549                 }
1550                 
1551                 /* Latency issues. Drop the lock, wait a while and retry */
1552                 spin_unlock(chip->mutex);
1553                 cfi_udelay(1);
1554                 spin_lock(chip->mutex);
1555         }
1556         
1557         /* Done and happy. */
1558         chip->state = FL_STATUS;
1559         put_chip(map, chip, adr);
1560         spin_unlock(chip->mutex);
1561         return 0;
1562 }
1563
1564 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1565 {
1566         int ret;
1567
1568 #ifdef DEBUG_LOCK_BITS
1569         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1570                __FUNCTION__, ofs, len);
1571         cfi_intelext_varsize_frob(mtd, do_printlockstatus_oneblock,
1572                                   ofs, len, 0);
1573 #endif
1574
1575         ret = cfi_intelext_varsize_frob(mtd, do_xxlock_oneblock, 
1576                                         ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
1577         
1578 #ifdef DEBUG_LOCK_BITS
1579         printk(KERN_DEBUG
1580                "%s: lock status after, ret=%d\n", __FUNCTION__, ret);
1581         cfi_intelext_varsize_frob(mtd, do_printlockstatus_oneblock,
1582                                   ofs, len, 0);
1583 #endif
1584
1585         return ret;
1586 }
1587
1588 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1589 {
1590         int ret;
1591
1592 #ifdef DEBUG_LOCK_BITS
1593         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1594                __FUNCTION__, ofs, len);
1595         cfi_intelext_varsize_frob(mtd, do_printlockstatus_oneblock,
1596                                   ofs, len, 0);
1597 #endif
1598
1599         ret = cfi_intelext_varsize_frob(mtd, do_xxlock_oneblock,
1600                                         ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
1601         
1602 #ifdef DEBUG_LOCK_BITS
1603         printk(KERN_DEBUG "%s: lock status after, ret=%d\n", __FUNCTION__, ret);
1604         cfi_intelext_varsize_frob(mtd, do_printlockstatus_oneblock, 
1605                                   ofs, len, 0);
1606 #endif
1607         
1608         return ret;
1609 }
1610
1611 static int cfi_intelext_suspend(struct mtd_info *mtd)
1612 {
1613         struct map_info *map = mtd->priv;
1614         struct cfi_private *cfi = map->fldrv_priv;
1615         int i;
1616         struct flchip *chip;
1617         int ret = 0;
1618
1619         for (i=0; !ret && i<cfi->numchips; i++) {
1620                 chip = &cfi->chips[i];
1621
1622                 spin_lock(chip->mutex);
1623
1624                 switch (chip->state) {
1625                 case FL_READY:
1626                 case FL_STATUS:
1627                 case FL_CFI_QUERY:
1628                 case FL_JEDEC_QUERY:
1629                         if (chip->oldstate == FL_READY) {
1630                                 chip->oldstate = chip->state;
1631                                 chip->state = FL_PM_SUSPENDED;
1632                                 /* No need to wake_up() on this state change - 
1633                                  * as the whole point is that nobody can do anything
1634                                  * with the chip now anyway.
1635                                  */
1636                         }
1637                         break;
1638                 default:
1639                         ret = -EAGAIN;
1640                 case FL_PM_SUSPENDED:
1641                         break;
1642                 }
1643                 spin_unlock(chip->mutex);
1644         }
1645
1646         /* Unlock the chips again */
1647
1648         if (ret) {
1649                 for (i--; i >=0; i--) {
1650                         chip = &cfi->chips[i];
1651                         
1652                         spin_lock(chip->mutex);
1653                         
1654                         if (chip->state == FL_PM_SUSPENDED) {
1655                                 /* No need to force it into a known state here,
1656                                    because we're returning failure, and it didn't
1657                                    get power cycled */
1658                                 chip->state = chip->oldstate;
1659                                 wake_up(&chip->wq);
1660                         }
1661                         spin_unlock(chip->mutex);
1662                 }
1663         } 
1664         
1665         return ret;
1666 }
1667
1668 static void cfi_intelext_resume(struct mtd_info *mtd)
1669 {
1670         struct map_info *map = mtd->priv;
1671         struct cfi_private *cfi = map->fldrv_priv;
1672         int i;
1673         struct flchip *chip;
1674
1675         for (i=0; i<cfi->numchips; i++) {
1676         
1677                 chip = &cfi->chips[i];
1678
1679                 spin_lock(chip->mutex);
1680                 
1681                 /* Go to known state. Chip may have been power cycled */
1682                 if (chip->state == FL_PM_SUSPENDED) {
1683                         cfi_write(map, CMD(0xFF), 0);
1684                         chip->state = FL_READY;
1685                         wake_up(&chip->wq);
1686                 }
1687
1688                 spin_unlock(chip->mutex);
1689         }
1690 }
1691
1692 static void cfi_intelext_destroy(struct mtd_info *mtd)
1693 {
1694         struct map_info *map = mtd->priv;
1695         struct cfi_private *cfi = map->fldrv_priv;
1696         kfree(cfi->cmdset_priv);
1697         kfree(cfi->cfiq);
1698         kfree(cfi);
1699         kfree(mtd->eraseregions);
1700 }
1701
1702 static char im_name_1[]="cfi_cmdset_0001";
1703 static char im_name_3[]="cfi_cmdset_0003";
1704
1705 int __init cfi_intelext_init(void)
1706 {
1707         inter_module_register(im_name_1, THIS_MODULE, &cfi_cmdset_0001);
1708         inter_module_register(im_name_3, THIS_MODULE, &cfi_cmdset_0001);
1709         return 0;
1710 }
1711
1712 static void __exit cfi_intelext_exit(void)
1713 {
1714         inter_module_unregister(im_name_1);
1715         inter_module_unregister(im_name_3);
1716 }
1717
1718 module_init(cfi_intelext_init);
1719 module_exit(cfi_intelext_exit);
1720
1721 MODULE_LICENSE("GPL");
1722 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
1723 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");