VServer 1.9.2 (patch-2.6.8.1-vs1.9.2.diff)
[linux-2.6.git] / drivers / mtd / chips / cfi_cmdset_0002.c
1 /*
2  * Common Flash Interface support:
3  *   AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
4  *
5  * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
6  * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
7  *
8  * 2_by_8 routines added by Simon Munton
9  *
10  * 4_by_16 work by Carolyn J. Smith
11  *
12  * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
13  *
14  * This code is GPL
15  *
16  * $Id: cfi_cmdset_0002.c,v 1.106 2004/08/09 14:02:32 dwmw2 Exp $
17  *
18  */
19
20 #include <linux/config.h>
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <linux/init.h>
26 #include <asm/io.h>
27 #include <asm/byteorder.h>
28
29 #include <linux/errno.h>
30 #include <linux/slab.h>
31 #include <linux/delay.h>
32 #include <linux/interrupt.h>
33 #include <linux/mtd/compatmac.h>
34 #include <linux/mtd/map.h>
35 #include <linux/mtd/mtd.h>
36 #include <linux/mtd/cfi.h>
37
38 #define AMD_BOOTLOC_BUG
39 #define FORCE_WORD_WRITE 0
40
41 #define MAX_WORD_RETRIES 3
42
43 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
44 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
45 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
46 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
47 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
48 static int cfi_amdstd_lock_varsize(struct mtd_info *, loff_t, size_t);
49 static int cfi_amdstd_unlock_varsize(struct mtd_info *, loff_t, size_t);
50 static void cfi_amdstd_sync (struct mtd_info *);
51 static int cfi_amdstd_suspend (struct mtd_info *);
52 static void cfi_amdstd_resume (struct mtd_info *);
53 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
54
55 static void cfi_amdstd_destroy(struct mtd_info *);
56
57 struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
58 static struct mtd_info *cfi_amdstd_setup (struct map_info *);
59
60
61 static struct mtd_chip_driver cfi_amdstd_chipdrv = {
62         .probe          = NULL, /* Not usable directly */
63         .destroy        = cfi_amdstd_destroy,
64         .name           = "cfi_cmdset_0002",
65         .module         = THIS_MODULE
66 };
67
68
69 /* #define DEBUG_LOCK_BITS */
70 /* #define DEBUG_CFI_FEATURES */
71
72
73 #ifdef DEBUG_CFI_FEATURES
74 static void cfi_tell_features(struct cfi_pri_amdstd *extp)
75 {
76         const char* erase_suspend[3] = {
77                 "Not supported", "Read only", "Read/write"
78         };
79         const char* top_bottom[6] = {
80                 "No WP", "8x8KiB sectors at top & bottom, no WP",
81                 "Bottom boot", "Top boot",
82                 "Uniform, Bottom WP", "Uniform, Top WP"
83         };
84
85         printk("  Silicon revision: %d\n", extp->SiliconRevision >> 1);
86         printk("  Address sensitive unlock: %s\n", 
87                (extp->SiliconRevision & 1) ? "Not required" : "Required");
88
89         if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
90                 printk("  Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
91         else
92                 printk("  Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
93
94         if (extp->BlkProt == 0)
95                 printk("  Block protection: Not supported\n");
96         else
97                 printk("  Block protection: %d sectors per group\n", extp->BlkProt);
98
99
100         printk("  Temporary block unprotect: %s\n",
101                extp->TmpBlkUnprotect ? "Supported" : "Not supported");
102         printk("  Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
103         printk("  Number of simultaneous operations: %d\n", extp->SimultaneousOps);
104         printk("  Burst mode: %s\n",
105                extp->BurstMode ? "Supported" : "Not supported");
106         if (extp->PageMode == 0)
107                 printk("  Page mode: Not supported\n");
108         else
109                 printk("  Page mode: %d word page\n", extp->PageMode << 2);
110
111         printk("  Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n", 
112                extp->VppMin >> 4, extp->VppMin & 0xf);
113         printk("  Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n", 
114                extp->VppMax >> 4, extp->VppMax & 0xf);
115
116         if (extp->TopBottom < ARRAY_SIZE(top_bottom))
117                 printk("  Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
118         else
119                 printk("  Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
120 }
121 #endif
122
123 #ifdef AMD_BOOTLOC_BUG
124 /* Wheee. Bring me the head of someone at AMD. */
125 static void fixup_amd_bootblock(struct map_info *map, void* param)
126 {
127         struct cfi_private *cfi = map->fldrv_priv;
128         struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
129         __u8 major = extp->MajorVersion;
130         __u8 minor = extp->MinorVersion;
131
132         if (((major << 8) | minor) < 0x3131) {
133                 /* CFI version 1.0 => don't trust bootloc */
134                 if (cfi->id & 0x80) {
135                         printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
136                         extp->TopBottom = 3;    /* top boot */
137                 } else {
138                         extp->TopBottom = 2;    /* bottom boot */
139                 }
140         }
141 }
142 #endif
143
144 static struct cfi_fixup fixup_table[] = {
145 #ifdef AMD_BOOTLOC_BUG
146         {
147                 0x0001,         /* AMD */
148                 CFI_ID_ANY,
149                 fixup_amd_bootblock, NULL
150         },
151 #endif
152         { 0, 0, NULL, NULL }
153 };
154
155
156 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
157 {
158         struct cfi_private *cfi = map->fldrv_priv;
159         unsigned char bootloc;
160         int i;
161
162         if (cfi->cfi_mode==CFI_MODE_CFI){
163                 /* 
164                  * It's a real CFI chip, not one for which the probe
165                  * routine faked a CFI structure. So we read the feature
166                  * table from it.
167                  */
168                 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
169                 struct cfi_pri_amdstd *extp;
170
171                 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
172                 if (!extp)
173                         return NULL;
174
175                 /* Install our own private info structure */
176                 cfi->cmdset_priv = extp;        
177
178                 cfi_fixup(map, fixup_table);
179
180 #ifdef DEBUG_CFI_FEATURES
181                 /* Tell the user about it in lots of lovely detail */
182                 cfi_tell_features(extp);
183 #endif  
184
185                 bootloc = extp->TopBottom;
186                 if ((bootloc != 2) && (bootloc != 3)) {
187                         printk(KERN_WARNING "%s: CFI does not contain boot "
188                                "bank location. Assuming top.\n", map->name);
189                         bootloc = 2;
190                 }
191
192                 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
193                         printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name);
194                         
195                         for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
196                                 int j = (cfi->cfiq->NumEraseRegions-1)-i;
197                                 __u32 swap;
198                                 
199                                 swap = cfi->cfiq->EraseRegionInfo[i];
200                                 cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
201                                 cfi->cfiq->EraseRegionInfo[j] = swap;
202                         }
203                 }
204                 /*
205                  * These might already be setup (more correctly) by
206                  * jedec_probe.c - still need it for cfi_probe.c path.
207                  */
208                 if ( ! (cfi->addr_unlock1 && cfi->addr_unlock2) ) {
209                         switch (cfi->device_type) {
210                         case CFI_DEVICETYPE_X8:
211                                 cfi->addr_unlock1 = 0x555; 
212                                 cfi->addr_unlock2 = 0x2aa; 
213                                 break;
214                         case CFI_DEVICETYPE_X16:
215                                 cfi->addr_unlock1 = 0xaaa;
216                                 if (map_bankwidth(map) == cfi_interleave(cfi)) {
217                                         /* X16 chip(s) in X8 mode */
218                                         cfi->addr_unlock2 = 0x555;
219                                 } else {
220                                         cfi->addr_unlock2 = 0x554;
221                                 }
222                                 break;
223                         case CFI_DEVICETYPE_X32:
224                                 cfi->addr_unlock1 = 0x1554;
225                                 if (map_bankwidth(map) == cfi_interleave(cfi)*2) {
226                                         /* X32 chip(s) in X16 mode */
227                                         cfi->addr_unlock1 = 0xaaa;
228                                 } else {
229                                         cfi->addr_unlock2 = 0xaa8; 
230                                 }
231                                 break;
232                         default:
233                                 printk(KERN_WARNING
234                                        "MTD %s(): Unsupported device type %d\n",
235                                        __func__, cfi->device_type);
236                                 return NULL;
237                         }
238                 }
239
240         } /* CFI mode */
241
242         for (i=0; i< cfi->numchips; i++) {
243                 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
244                 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
245                 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
246         }               
247         
248         map->fldrv = &cfi_amdstd_chipdrv;
249
250         return cfi_amdstd_setup(map);
251 }
252
253
254 static struct mtd_info *cfi_amdstd_setup(struct map_info *map)
255 {
256         struct cfi_private *cfi = map->fldrv_priv;
257         struct mtd_info *mtd;
258         unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
259         unsigned long offset = 0;
260         int i,j;
261
262         mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
263         printk(KERN_NOTICE "number of %s chips: %d\n", 
264                (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
265
266         if (!mtd) {
267                 printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
268                 goto setup_err;
269         }
270
271         memset(mtd, 0, sizeof(*mtd));
272         mtd->priv = map;
273         mtd->type = MTD_NORFLASH;
274         /* Also select the correct geometry setup too */ 
275         mtd->size = devsize * cfi->numchips;
276
277         mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
278         mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
279                                     * mtd->numeraseregions, GFP_KERNEL);
280         if (!mtd->eraseregions) { 
281                 printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
282                 goto setup_err;
283         }
284                         
285         for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
286                 unsigned long ernum, ersize;
287                 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
288                 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
289                         
290                 if (mtd->erasesize < ersize) {
291                         mtd->erasesize = ersize;
292                 }
293                 for (j=0; j<cfi->numchips; j++) {
294                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
295                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
296                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
297                 }
298                 offset += (ersize * ernum);
299         }
300         if (offset != devsize) {
301                 /* Argh */
302                 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
303                 goto setup_err;
304         }
305 #if 0
306         // debug
307         for (i=0; i<mtd->numeraseregions;i++){
308                 printk("%d: offset=0x%x,size=0x%x,blocks=%d\n",
309                        i,mtd->eraseregions[i].offset,
310                        mtd->eraseregions[i].erasesize,
311                        mtd->eraseregions[i].numblocks);
312         }
313 #endif
314
315         if (mtd->numeraseregions == 1
316             && ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) + 1) == 1) {
317                 mtd->erase = cfi_amdstd_erase_chip;
318         } else {
319                 mtd->erase = cfi_amdstd_erase_varsize;
320                 mtd->lock = cfi_amdstd_lock_varsize;
321                 mtd->unlock = cfi_amdstd_unlock_varsize;
322         }
323
324         if ( cfi->cfiq->BufWriteTimeoutTyp && !FORCE_WORD_WRITE) {
325                 DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" );
326                 mtd->write = cfi_amdstd_write_buffers;
327         } else {
328                 DEBUG(MTD_DEBUG_LEVEL1, "Using word write method\n" );
329                 mtd->write = cfi_amdstd_write_words;
330         }
331
332         mtd->read = cfi_amdstd_read;
333
334         /* FIXME: erase-suspend-program is broken.  See
335            http://lists.infradead.org/pipermail/linux-mtd/2003-December/009001.html */
336         printk(KERN_NOTICE "cfi_cmdset_0002: Disabling erase-suspend-program due to code brokenness.\n");
337
338         /* does this chip have a secsi area? */
339         if(cfi->mfr==1){
340                 
341                 switch(cfi->id){
342                 case 0x50:
343                 case 0x53:
344                 case 0x55:
345                 case 0x56:
346                 case 0x5C:
347                 case 0x5F:
348                         /* Yes */
349                         mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
350                         mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
351                 default:                       
352                         ;
353                 }
354         }
355         
356                 
357         mtd->sync = cfi_amdstd_sync;
358         mtd->suspend = cfi_amdstd_suspend;
359         mtd->resume = cfi_amdstd_resume;
360         mtd->flags = MTD_CAP_NORFLASH;
361         map->fldrv = &cfi_amdstd_chipdrv;
362         mtd->name = map->name;
363         __module_get(THIS_MODULE);
364         return mtd;
365
366  setup_err:
367         if(mtd) {
368                 if(mtd->eraseregions)
369                         kfree(mtd->eraseregions);
370                 kfree(mtd);
371         }
372         kfree(cfi->cmdset_priv);
373         kfree(cfi->cfiq);
374         return NULL;
375 }
376
377 /*
378  * Return true if the chip is ready.
379  *
380  * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
381  * non-suspended sector) and is indicated by no toggle bits toggling.
382  *
383  * Note that anything more complicated than checking if no bits are toggling
384  * (including checking DQ5 for an error status) is tricky to get working
385  * correctly and is therefore not done  (particulary with interleaved chips
386  * as each chip must be checked independantly of the others).
387  */
388 static int chip_ready(struct map_info *map, unsigned long addr)
389 {
390         map_word d, t;
391
392         d = map_read(map, addr);
393         t = map_read(map, addr);
394
395         return map_word_equal(map, d, t);
396 }
397
398 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
399 {
400         DECLARE_WAITQUEUE(wait, current);
401         struct cfi_private *cfi = map->fldrv_priv;
402         unsigned long timeo;
403         struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
404
405  resettime:
406         timeo = jiffies + HZ;
407  retry:
408         switch (chip->state) {
409
410         case FL_STATUS:
411                 for (;;) {
412                         if (chip_ready(map, adr))
413                                 break;
414
415                         if (time_after(jiffies, timeo)) {
416                                 printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
417                                 cfi_spin_unlock(chip->mutex);
418                                 return -EIO;
419                         }
420                         cfi_spin_unlock(chip->mutex);
421                         cfi_udelay(1);
422                         cfi_spin_lock(chip->mutex);
423                         /* Someone else might have been playing with it. */
424                         goto retry;
425                 }
426                                 
427         case FL_READY:
428         case FL_CFI_QUERY:
429         case FL_JEDEC_QUERY:
430                 return 0;
431
432         case FL_ERASING:
433                 if (mode == FL_WRITING) /* FIXME: Erase-suspend-program appears broken. */
434                         goto sleep;
435
436                 if (!(mode == FL_READY || mode == FL_POINT
437                       || (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))
438                       || (mode == FL_WRITING && (cfip->EraseSuspend & 0x1))))
439                         goto sleep;
440
441                 /* We could check to see if we're trying to access the sector
442                  * that is currently being erased. However, no user will try
443                  * anything like that so we just wait for the timeout. */
444
445                 /* Erase suspend */
446                 /* It's harmless to issue the Erase-Suspend and Erase-Resume
447                  * commands when the erase algorithm isn't in progress. */
448                 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
449                 chip->oldstate = FL_ERASING;
450                 chip->state = FL_ERASE_SUSPENDING;
451                 chip->erase_suspended = 1;
452                 for (;;) {
453                         if (chip_ready(map, adr))
454                                 break;
455
456                         if (time_after(jiffies, timeo)) {
457                                 /* Should have suspended the erase by now.
458                                  * Send an Erase-Resume command as either
459                                  * there was an error (so leave the erase
460                                  * routine to recover from it) or we trying to
461                                  * use the erase-in-progress sector. */
462                                 map_write(map, CMD(0x30), chip->in_progress_block_addr);
463                                 chip->state = FL_ERASING;
464                                 chip->oldstate = FL_READY;
465                                 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
466                                 return -EIO;
467                         }
468                         
469                         cfi_spin_unlock(chip->mutex);
470                         cfi_udelay(1);
471                         cfi_spin_lock(chip->mutex);
472                         /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
473                            So we can just loop here. */
474                 }
475                 chip->state = FL_READY;
476                 return 0;
477
478         case FL_POINT:
479                 /* Only if there's no operation suspended... */
480                 if (mode == FL_READY && chip->oldstate == FL_READY)
481                         return 0;
482
483         default:
484         sleep:
485                 set_current_state(TASK_UNINTERRUPTIBLE);
486                 add_wait_queue(&chip->wq, &wait);
487                 cfi_spin_unlock(chip->mutex);
488                 schedule();
489                 remove_wait_queue(&chip->wq, &wait);
490                 cfi_spin_lock(chip->mutex);
491                 goto resettime;
492         }
493 }
494
495
496 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
497 {
498         struct cfi_private *cfi = map->fldrv_priv;
499
500         switch(chip->oldstate) {
501         case FL_ERASING:
502                 chip->state = chip->oldstate;
503                 map_write(map, CMD(0x30), chip->in_progress_block_addr);
504                 chip->oldstate = FL_READY;
505                 chip->state = FL_ERASING;
506                 break;
507
508         case FL_READY:
509         case FL_STATUS:
510                 /* We should really make set_vpp() count, rather than doing this */
511                 DISABLE_VPP(map);
512                 break;
513         default:
514                 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
515         }
516         wake_up(&chip->wq);
517 }
518
519
520 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
521 {
522         unsigned long cmd_addr;
523         struct cfi_private *cfi = map->fldrv_priv;
524         int ret;
525
526         adr += chip->start;
527
528         /* Ensure cmd read/writes are aligned. */ 
529         cmd_addr = adr & ~(map_bankwidth(map)-1); 
530
531         cfi_spin_lock(chip->mutex);
532         ret = get_chip(map, chip, cmd_addr, FL_READY);
533         if (ret) {
534                 cfi_spin_unlock(chip->mutex);
535                 return ret;
536         }
537
538         if (chip->state != FL_POINT && chip->state != FL_READY) {
539                 map_write(map, CMD(0xf0), cmd_addr);
540                 chip->state = FL_READY;
541         }
542
543         map_copy_from(map, buf, adr, len);
544
545         put_chip(map, chip, cmd_addr);
546
547         cfi_spin_unlock(chip->mutex);
548         return 0;
549 }
550
551
552 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
553 {
554         struct map_info *map = mtd->priv;
555         struct cfi_private *cfi = map->fldrv_priv;
556         unsigned long ofs;
557         int chipnum;
558         int ret = 0;
559
560         /* ofs: offset within the first chip that the first read should start */
561
562         chipnum = (from >> cfi->chipshift);
563         ofs = from - (chipnum <<  cfi->chipshift);
564
565
566         *retlen = 0;
567
568         while (len) {
569                 unsigned long thislen;
570
571                 if (chipnum >= cfi->numchips)
572                         break;
573
574                 if ((len + ofs -1) >> cfi->chipshift)
575                         thislen = (1<<cfi->chipshift) - ofs;
576                 else
577                         thislen = len;
578
579                 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
580                 if (ret)
581                         break;
582
583                 *retlen += thislen;
584                 len -= thislen;
585                 buf += thislen;
586
587                 ofs = 0;
588                 chipnum++;
589         }
590         return ret;
591 }
592
593
594 static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
595 {
596         DECLARE_WAITQUEUE(wait, current);
597         unsigned long timeo = jiffies + HZ;
598         struct cfi_private *cfi = map->fldrv_priv;
599
600  retry:
601         cfi_spin_lock(chip->mutex);
602
603         if (chip->state != FL_READY){
604 #if 0
605                 printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state);
606 #endif
607                 set_current_state(TASK_UNINTERRUPTIBLE);
608                 add_wait_queue(&chip->wq, &wait);
609                 
610                 cfi_spin_unlock(chip->mutex);
611
612                 schedule();
613                 remove_wait_queue(&chip->wq, &wait);
614 #if 0
615                 if(signal_pending(current))
616                         return -EINTR;
617 #endif
618                 timeo = jiffies + HZ;
619
620                 goto retry;
621         }       
622
623         adr += chip->start;
624
625         chip->state = FL_READY;
626
627         /* should these be CFI_DEVICETYPE_X8 instead of cfi->device_type? */
628         cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
629         cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
630         cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
631         
632         map_copy_from(map, buf, adr, len);
633
634         /* should these be CFI_DEVICETYPE_X8 instead of cfi->device_type? */
635         cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
636         cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
637         cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
638         cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
639         
640         wake_up(&chip->wq);
641         cfi_spin_unlock(chip->mutex);
642
643         return 0;
644 }
645
646 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
647 {
648         struct map_info *map = mtd->priv;
649         struct cfi_private *cfi = map->fldrv_priv;
650         unsigned long ofs;
651         int chipnum;
652         int ret = 0;
653
654
655         /* ofs: offset within the first chip that the first read should start */
656
657         /* 8 secsi bytes per chip */
658         chipnum=from>>3;
659         ofs=from & 7;
660
661
662         *retlen = 0;
663
664         while (len) {
665                 unsigned long thislen;
666
667                 if (chipnum >= cfi->numchips)
668                         break;
669
670                 if ((len + ofs -1) >> 3)
671                         thislen = (1<<3) - ofs;
672                 else
673                         thislen = len;
674
675                 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
676                 if (ret)
677                         break;
678
679                 *retlen += thislen;
680                 len -= thislen;
681                 buf += thislen;
682
683                 ofs = 0;
684                 chipnum++;
685         }
686         return ret;
687 }
688
689
690 static int do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
691 {
692         struct cfi_private *cfi = map->fldrv_priv;
693         unsigned long timeo = jiffies + HZ;
694         /*
695          * We use a 1ms + 1 jiffies generic timeout for writes (most devices
696          * have a max write time of a few hundreds usec). However, we should
697          * use the maximum timeout value given by the chip at probe time
698          * instead.  Unfortunately, struct flchip does have a field for
699          * maximum timeout, only for typical which can be far too short
700          * depending of the conditions.  The ' + 1' is to avoid having a
701          * timeout of 0 jiffies if HZ is smaller than 1000.
702          */
703         unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
704         int ret = 0;
705         map_word oldd, curd;
706         int retry_cnt = 0;
707
708         adr += chip->start;
709
710         cfi_spin_lock(chip->mutex);
711         ret = get_chip(map, chip, adr, FL_WRITING);
712         if (ret) {
713                 cfi_spin_unlock(chip->mutex);
714                 return ret;
715         }
716
717         DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
718                __func__, adr, datum.x[0] );
719
720         /*
721          * Check for a NOP for the case when the datum to write is already
722          * present - it saves time and works around buggy chips that corrupt
723          * data at other locations when 0xff is written to a location that
724          * already contains 0xff.
725          */
726         oldd = map_read(map, adr);
727         if (map_word_equal(map, oldd, datum)) {
728                 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n",
729                        __func__);
730                 goto op_done;
731         }
732
733         ENABLE_VPP(map);
734  retry:
735         /*
736          * The CFI_DEVICETYPE_X8 argument is needed even when
737          * cfi->device_type != CFI_DEVICETYPE_X8.  The addresses for
738          * command sequences don't scale even when the device is
739          * wider.  This is the case for many of the cfi_send_gen_cmd()
740          * below.  I'm not sure, however, why some use
741          * cfi->device_type.
742          */
743         cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
744         cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
745         cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
746         map_write(map, datum, adr);
747         chip->state = FL_WRITING;
748
749         cfi_spin_unlock(chip->mutex);
750         cfi_udelay(chip->word_write_time);
751         cfi_spin_lock(chip->mutex);
752
753         /* See comment above for timeout value. */
754         timeo = jiffies + uWriteTimeout; 
755         for (;;) {
756                 if (chip->state != FL_WRITING) {
757                         /* Someone's suspended the write. Sleep */
758                         DECLARE_WAITQUEUE(wait, current);
759
760                         set_current_state(TASK_UNINTERRUPTIBLE);
761                         add_wait_queue(&chip->wq, &wait);
762                         cfi_spin_unlock(chip->mutex);
763                         schedule();
764                         remove_wait_queue(&chip->wq, &wait);
765                         timeo = jiffies + (HZ / 2); /* FIXME */
766                         cfi_spin_lock(chip->mutex);
767                         continue;
768                 }
769
770                 /* Test to see if toggling has stopped. */
771                 oldd = map_read(map, adr);
772                 curd = map_read(map, adr);
773                 if (map_word_equal(map, curd, oldd)) {
774                         /* Do we have the correct value? */
775                         if (map_word_equal(map, curd, datum)) {
776                                 goto op_done;
777                         }
778                         /* Nope something has gone wrong. */
779                         break;
780                 }
781
782                 if (time_after(jiffies, timeo)) {
783                         printk(KERN_WARNING "MTD %s(): software timeout\n",
784                                 __func__ );
785                         break;
786                 }
787
788                 /* Latency issues. Drop the lock, wait a while and retry */
789                 cfi_spin_unlock(chip->mutex);
790                 cfi_udelay(1);
791                 cfi_spin_lock(chip->mutex);
792         }
793
794         /* reset on all failures. */
795         map_write( map, CMD(0xF0), chip->start );
796         /* FIXME - should have reset delay before continuing */
797         if (++retry_cnt <= MAX_WORD_RETRIES) 
798                 goto retry;
799
800         ret = -EIO;
801  op_done:
802         chip->state = FL_READY;
803         put_chip(map, chip, adr);
804         cfi_spin_unlock(chip->mutex);
805
806         return ret;
807 }
808
809
810 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
811                                   size_t *retlen, const u_char *buf)
812 {
813         struct map_info *map = mtd->priv;
814         struct cfi_private *cfi = map->fldrv_priv;
815         int ret = 0;
816         int chipnum;
817         unsigned long ofs, chipstart;
818         DECLARE_WAITQUEUE(wait, current);
819
820         *retlen = 0;
821         if (!len)
822                 return 0;
823
824         chipnum = to >> cfi->chipshift;
825         ofs = to  - (chipnum << cfi->chipshift);
826         chipstart = cfi->chips[chipnum].start;
827
828         /* If it's not bus-aligned, do the first byte write */
829         if (ofs & (map_bankwidth(map)-1)) {
830                 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
831                 int i = ofs - bus_ofs;
832                 int n = 0;
833                 map_word tmp_buf;
834
835  retry:
836                 cfi_spin_lock(cfi->chips[chipnum].mutex);
837
838                 if (cfi->chips[chipnum].state != FL_READY) {
839 #if 0
840                         printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
841 #endif
842                         set_current_state(TASK_UNINTERRUPTIBLE);
843                         add_wait_queue(&cfi->chips[chipnum].wq, &wait);
844
845                         cfi_spin_unlock(cfi->chips[chipnum].mutex);
846
847                         schedule();
848                         remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
849 #if 0
850                         if(signal_pending(current))
851                                 return -EINTR;
852 #endif
853                         goto retry;
854                 }
855
856                 /* Load 'tmp_buf' with old contents of flash */
857                 tmp_buf = map_read(map, bus_ofs+chipstart);
858
859                 cfi_spin_unlock(cfi->chips[chipnum].mutex);
860
861                 /* Number of bytes to copy from buffer */
862                 n = min_t(int, len, map_bankwidth(map)-i);
863                 
864                 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
865
866                 ret = do_write_oneword(map, &cfi->chips[chipnum], 
867                                        bus_ofs, tmp_buf);
868                 if (ret) 
869                         return ret;
870                 
871                 ofs += n;
872                 buf += n;
873                 (*retlen) += n;
874                 len -= n;
875
876                 if (ofs >> cfi->chipshift) {
877                         chipnum ++; 
878                         ofs = 0;
879                         if (chipnum == cfi->numchips)
880                                 return 0;
881                 }
882         }
883         
884         /* We are now aligned, write as much as possible */
885         while(len >= map_bankwidth(map)) {
886                 map_word datum;
887
888                 datum = map_word_load(map, buf);
889
890                 ret = do_write_oneword(map, &cfi->chips[chipnum],
891                                        ofs, datum);
892                 if (ret)
893                         return ret;
894
895                 ofs += map_bankwidth(map);
896                 buf += map_bankwidth(map);
897                 (*retlen) += map_bankwidth(map);
898                 len -= map_bankwidth(map);
899
900                 if (ofs >> cfi->chipshift) {
901                         chipnum ++; 
902                         ofs = 0;
903                         if (chipnum == cfi->numchips)
904                                 return 0;
905                         chipstart = cfi->chips[chipnum].start;
906                 }
907         }
908
909         /* Write the trailing bytes if any */
910         if (len & (map_bankwidth(map)-1)) {
911                 map_word tmp_buf;
912
913  retry1:
914                 cfi_spin_lock(cfi->chips[chipnum].mutex);
915
916                 if (cfi->chips[chipnum].state != FL_READY) {
917 #if 0
918                         printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
919 #endif
920                         set_current_state(TASK_UNINTERRUPTIBLE);
921                         add_wait_queue(&cfi->chips[chipnum].wq, &wait);
922
923                         cfi_spin_unlock(cfi->chips[chipnum].mutex);
924
925                         schedule();
926                         remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
927 #if 0
928                         if(signal_pending(current))
929                                 return -EINTR;
930 #endif
931                         goto retry1;
932                 }
933
934                 tmp_buf = map_read(map, ofs + chipstart);
935
936                 cfi_spin_unlock(cfi->chips[chipnum].mutex);
937
938                 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
939         
940                 ret = do_write_oneword(map, &cfi->chips[chipnum], 
941                                 ofs, tmp_buf);
942                 if (ret) 
943                         return ret;
944                 
945                 (*retlen) += len;
946         }
947
948         return 0;
949 }
950
951
952 /*
953  * FIXME: interleaved mode not tested, and probably not supported!
954  */
955 static inline int do_write_buffer(struct map_info *map, struct flchip *chip, 
956                                   unsigned long adr, const u_char *buf, int len)
957 {
958         struct cfi_private *cfi = map->fldrv_priv;
959         unsigned long timeo = jiffies + HZ;
960         /* see comments in do_write_oneword() regarding uWriteTimeo. */
961         static unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
962         int ret = -EIO;
963         unsigned long cmd_adr;
964         int z, words;
965         map_word datum;
966
967         adr += chip->start;
968         cmd_adr = adr;
969
970         cfi_spin_lock(chip->mutex);
971         ret = get_chip(map, chip, adr, FL_WRITING);
972         if (ret) {
973                 cfi_spin_unlock(chip->mutex);
974                 return ret;
975         }
976
977         datum = map_word_load(map, buf);
978
979         DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
980                __func__, adr, datum.x[0] );
981
982         ENABLE_VPP(map);
983         cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
984         cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
985         //cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
986
987         /* Write Buffer Load */
988         map_write(map, CMD(0x25), cmd_adr);
989
990         chip->state = FL_WRITING_TO_BUFFER;
991
992         /* Write length of data to come */
993         words = len / map_bankwidth(map);
994         map_write(map, CMD(words - 1), cmd_adr);
995         /* Write data */
996         z = 0;
997         while(z < words * map_bankwidth(map)) {
998                 datum = map_word_load(map, buf);
999                 map_write(map, datum, adr + z);
1000
1001                 z += map_bankwidth(map);
1002                 buf += map_bankwidth(map);
1003         }
1004         z -= map_bankwidth(map);
1005
1006         adr += z;
1007
1008         /* Write Buffer Program Confirm: GO GO GO */
1009         map_write(map, CMD(0x29), cmd_adr);
1010         chip->state = FL_WRITING;
1011
1012         cfi_spin_unlock(chip->mutex);
1013         cfi_udelay(chip->buffer_write_time);
1014         cfi_spin_lock(chip->mutex);
1015
1016         timeo = jiffies + uWriteTimeout; 
1017                 
1018         for (;;) {
1019                 if (chip->state != FL_WRITING) {
1020                         /* Someone's suspended the write. Sleep */
1021                         DECLARE_WAITQUEUE(wait, current);
1022
1023                         set_current_state(TASK_UNINTERRUPTIBLE);
1024                         add_wait_queue(&chip->wq, &wait);
1025                         cfi_spin_unlock(chip->mutex);
1026                         schedule();
1027                         remove_wait_queue(&chip->wq, &wait);
1028                         timeo = jiffies + (HZ / 2); /* FIXME */
1029                         cfi_spin_lock(chip->mutex);
1030                         continue;
1031                 }
1032
1033                 if (chip_ready(map, adr))
1034                         goto op_done;
1035                     
1036                 if( time_after(jiffies, timeo))
1037                         break;
1038
1039                 /* Latency issues. Drop the lock, wait a while and retry */
1040                 cfi_spin_unlock(chip->mutex);
1041                 cfi_udelay(1);
1042                 cfi_spin_lock(chip->mutex);
1043         }
1044
1045         printk(KERN_WARNING "MTD %s(): software timeout\n",
1046                __func__ );
1047
1048         /* reset on all failures. */
1049         map_write( map, CMD(0xF0), chip->start );
1050         /* FIXME - should have reset delay before continuing */
1051
1052         ret = -EIO;
1053  op_done:
1054         chip->state = FL_READY;
1055         put_chip(map, chip, adr);
1056         cfi_spin_unlock(chip->mutex);
1057
1058         return ret;
1059 }
1060
1061
1062 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1063                                     size_t *retlen, const u_char *buf)
1064 {
1065         struct map_info *map = mtd->priv;
1066         struct cfi_private *cfi = map->fldrv_priv;
1067         int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1068         int ret = 0;
1069         int chipnum;
1070         unsigned long ofs;
1071
1072         *retlen = 0;
1073         if (!len)
1074                 return 0;
1075
1076         chipnum = to >> cfi->chipshift;
1077         ofs = to  - (chipnum << cfi->chipshift);
1078
1079         /* If it's not bus-aligned, do the first word write */
1080         if (ofs & (map_bankwidth(map)-1)) {
1081                 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1082                 if (local_len > len)
1083                         local_len = len;
1084                 ret = cfi_amdstd_write_words(mtd, to, local_len,
1085                                                retlen, buf);
1086                 if (ret)
1087                         return ret;
1088                 ofs += local_len;
1089                 buf += local_len;
1090                 len -= local_len;
1091
1092                 if (ofs >> cfi->chipshift) {
1093                         chipnum ++;
1094                         ofs = 0;
1095                         if (chipnum == cfi->numchips)
1096                                 return 0;
1097                 }
1098         }
1099
1100         /* Write buffer is worth it only if more than one word to write... */
1101         while (len >= map_bankwidth(map) * 2) {
1102                 /* We must not cross write block boundaries */
1103                 int size = wbufsize - (ofs & (wbufsize-1));
1104
1105                 if (size > len)
1106                         size = len;
1107                 if (size % map_bankwidth(map))
1108                         size -= size % map_bankwidth(map);
1109
1110                 ret = do_write_buffer(map, &cfi->chips[chipnum], 
1111                                       ofs, buf, size);
1112                 if (ret)
1113                         return ret;
1114
1115                 ofs += size;
1116                 buf += size;
1117                 (*retlen) += size;
1118                 len -= size;
1119
1120                 if (ofs >> cfi->chipshift) {
1121                         chipnum ++; 
1122                         ofs = 0;
1123                         if (chipnum == cfi->numchips)
1124                                 return 0;
1125                 }
1126         }
1127
1128         if (len) {
1129                 size_t retlen_dregs = 0;
1130
1131                 ret = cfi_amdstd_write_words(mtd, to, len, &retlen_dregs, buf);
1132
1133                 *retlen += retlen_dregs;
1134                 return ret;
1135         }
1136
1137         return 0;
1138 }
1139
1140
1141 /*
1142  * Handle devices with one erase region, that only implement
1143  * the chip erase command.
1144  */
1145 static inline int do_erase_chip(struct map_info *map, struct flchip *chip)
1146 {
1147         struct cfi_private *cfi = map->fldrv_priv;
1148         unsigned long timeo = jiffies + HZ;
1149         unsigned long int adr;
1150         DECLARE_WAITQUEUE(wait, current);
1151         int ret = 0;
1152
1153         adr = cfi->addr_unlock1;
1154
1155         cfi_spin_lock(chip->mutex);
1156         ret = get_chip(map, chip, adr, FL_WRITING);
1157         if (ret) {
1158                 cfi_spin_unlock(chip->mutex);
1159                 return ret;
1160         }
1161
1162         DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1163                __func__, chip->start );
1164
1165         ENABLE_VPP(map);
1166         cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
1167         cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
1168         cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
1169         cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
1170         cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
1171         cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
1172
1173         chip->state = FL_ERASING;
1174         chip->erase_suspended = 0;
1175         chip->in_progress_block_addr = adr;
1176
1177         cfi_spin_unlock(chip->mutex);
1178         set_current_state(TASK_UNINTERRUPTIBLE);
1179         schedule_timeout((chip->erase_time*HZ)/(2*1000));
1180         cfi_spin_lock(chip->mutex);
1181
1182         timeo = jiffies + (HZ*20);
1183
1184         for (;;) {
1185                 if (chip->state != FL_ERASING) {
1186                         /* Someone's suspended the erase. Sleep */
1187                         set_current_state(TASK_UNINTERRUPTIBLE);
1188                         add_wait_queue(&chip->wq, &wait);
1189                         cfi_spin_unlock(chip->mutex);
1190                         schedule();
1191                         remove_wait_queue(&chip->wq, &wait);
1192                         cfi_spin_lock(chip->mutex);
1193                         continue;
1194                 }
1195                 if (chip->erase_suspended) {
1196                         /* This erase was suspended and resumed.
1197                            Adjust the timeout */
1198                         timeo = jiffies + (HZ*20); /* FIXME */
1199                         chip->erase_suspended = 0;
1200                 }
1201
1202                 if (chip_ready(map, adr))
1203                         goto op_done;
1204
1205                 if (time_after(jiffies, timeo))
1206                         break;
1207
1208                 /* Latency issues. Drop the lock, wait a while and retry */
1209                 cfi_spin_unlock(chip->mutex);
1210                 set_current_state(TASK_UNINTERRUPTIBLE);
1211                 schedule_timeout(1);
1212                 cfi_spin_lock(chip->mutex);
1213         }
1214
1215         printk(KERN_WARNING "MTD %s(): software timeout\n",
1216                __func__ );
1217
1218         /* reset on all failures. */
1219         map_write( map, CMD(0xF0), chip->start );
1220         /* FIXME - should have reset delay before continuing */
1221
1222         ret = -EIO;
1223  op_done:
1224         chip->state = FL_READY;
1225         put_chip(map, chip, adr);
1226         cfi_spin_unlock(chip->mutex);
1227
1228         return ret;
1229 }
1230
1231
1232 typedef int (*frob_t)(struct map_info *map, struct flchip *chip,
1233                       unsigned long adr, void *thunk);
1234
1235
1236 static int cfi_amdstd_varsize_frob(struct mtd_info *mtd, frob_t frob,
1237                                    loff_t ofs, size_t len, void *thunk)
1238 {
1239         struct map_info *map = mtd->priv;
1240         struct cfi_private *cfi = map->fldrv_priv;
1241         unsigned long adr;
1242         int chipnum, ret = 0;
1243         int i, first;
1244         struct mtd_erase_region_info *regions = mtd->eraseregions;
1245
1246         if (ofs > mtd->size)
1247                 return -EINVAL;
1248
1249         if ((len + ofs) > mtd->size)
1250                 return -EINVAL;
1251
1252         /* Check that both start and end of the requested erase are
1253          * aligned with the erasesize at the appropriate addresses.
1254          */
1255
1256         i = 0;
1257
1258         /* Skip all erase regions which are ended before the start of 
1259            the requested erase. Actually, to save on the calculations,
1260            we skip to the first erase region which starts after the
1261            start of the requested erase, and then go back one.
1262         */
1263         
1264         while (i < mtd->numeraseregions && ofs >= regions[i].offset)
1265                i++;
1266         i--;
1267
1268         /* OK, now i is pointing at the erase region in which this 
1269            erase request starts. Check the start of the requested
1270            erase range is aligned with the erase size which is in
1271            effect here.
1272         */
1273
1274         if (ofs & (regions[i].erasesize-1))
1275                 return -EINVAL;
1276
1277         /* Remember the erase region we start on */
1278         first = i;
1279
1280         /* Next, check that the end of the requested erase is aligned
1281          * with the erase region at that address.
1282          */
1283
1284         while (i<mtd->numeraseregions && (ofs + len) >= regions[i].offset)
1285                 i++;
1286
1287         /* As before, drop back one to point at the region in which
1288            the address actually falls
1289         */
1290         i--;
1291         
1292         if ((ofs + len) & (regions[i].erasesize-1))
1293                 return -EINVAL;
1294
1295         chipnum = ofs >> cfi->chipshift;
1296         adr = ofs - (chipnum << cfi->chipshift);
1297
1298         i=first;
1299
1300         while (len) {
1301                 ret = (*frob)(map, &cfi->chips[chipnum], adr, thunk);
1302                 
1303                 if (ret)
1304                         return ret;
1305
1306                 adr += regions[i].erasesize;
1307                 len -= regions[i].erasesize;
1308
1309                 if (adr % (1<< cfi->chipshift) == ((regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
1310                         i++;
1311
1312                 if (adr >> cfi->chipshift) {
1313                         adr = 0;
1314                         chipnum++;
1315                         
1316                         if (chipnum >= cfi->numchips)
1317                         break;
1318                 }
1319         }
1320
1321         return 0;
1322 }
1323
1324
1325 static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, void *thunk)
1326 {
1327         struct cfi_private *cfi = map->fldrv_priv;
1328         unsigned long timeo = jiffies + HZ;
1329         DECLARE_WAITQUEUE(wait, current);
1330         int ret = 0;
1331
1332         adr += chip->start;
1333
1334         cfi_spin_lock(chip->mutex);
1335         ret = get_chip(map, chip, adr, FL_ERASING);
1336         if (ret) {
1337                 cfi_spin_unlock(chip->mutex);
1338                 return ret;
1339         }
1340
1341         DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1342                __func__, adr );
1343
1344         ENABLE_VPP(map);
1345         cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
1346         cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
1347         cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
1348         cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
1349         cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
1350         map_write(map, CMD(0x30), adr);
1351
1352         chip->state = FL_ERASING;
1353         chip->erase_suspended = 0;
1354         chip->in_progress_block_addr = adr;
1355         
1356         cfi_spin_unlock(chip->mutex);
1357         set_current_state(TASK_UNINTERRUPTIBLE);
1358         schedule_timeout((chip->erase_time*HZ)/(2*1000));
1359         cfi_spin_lock(chip->mutex);
1360
1361         timeo = jiffies + (HZ*20);
1362
1363         for (;;) {
1364                 if (chip->state != FL_ERASING) {
1365                         /* Someone's suspended the erase. Sleep */
1366                         set_current_state(TASK_UNINTERRUPTIBLE);
1367                         add_wait_queue(&chip->wq, &wait);
1368                         cfi_spin_unlock(chip->mutex);
1369                         schedule();
1370                         remove_wait_queue(&chip->wq, &wait);
1371                         cfi_spin_lock(chip->mutex);
1372                         continue;
1373                 }
1374                 if (chip->erase_suspended) {
1375                         /* This erase was suspended and resumed.
1376                            Adjust the timeout */
1377                         timeo = jiffies + (HZ*20); /* FIXME */
1378                         chip->erase_suspended = 0;
1379                 }
1380
1381                 if (chip_ready(map, adr))
1382                         goto op_done;
1383
1384                 if (time_after(jiffies, timeo))
1385                         break;
1386
1387                 /* Latency issues. Drop the lock, wait a while and retry */
1388                 cfi_spin_unlock(chip->mutex);
1389                 set_current_state(TASK_UNINTERRUPTIBLE);
1390                 schedule_timeout(1);
1391                 cfi_spin_lock(chip->mutex);
1392         }
1393         
1394         printk(KERN_WARNING "MTD %s(): software timeout\n",
1395                __func__ );
1396         
1397         /* reset on all failures. */
1398         map_write( map, CMD(0xF0), chip->start );
1399         /* FIXME - should have reset delay before continuing */
1400
1401         ret = -EIO;
1402  op_done:
1403         chip->state = FL_READY;
1404         put_chip(map, chip, adr);
1405         cfi_spin_unlock(chip->mutex);
1406         return ret;
1407 }
1408
1409
1410 int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1411 {
1412         unsigned long ofs, len;
1413         int ret;
1414
1415         ofs = instr->addr;
1416         len = instr->len;
1417
1418         ret = cfi_amdstd_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1419         if (ret)
1420                 return ret;
1421
1422         instr->state = MTD_ERASE_DONE;
1423         mtd_erase_callback(instr);
1424         
1425         return 0;
1426 }
1427
1428
1429 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
1430 {
1431         struct map_info *map = mtd->priv;
1432         struct cfi_private *cfi = map->fldrv_priv;
1433         int ret = 0;
1434
1435         if (instr->addr != 0)
1436                 return -EINVAL;
1437
1438         if (instr->len != mtd->size)
1439                 return -EINVAL;
1440
1441         ret = do_erase_chip(map, &cfi->chips[0]);
1442         if (ret)
1443                 return ret;
1444
1445         instr->state = MTD_ERASE_DONE;
1446         mtd_erase_callback(instr);
1447         
1448         return 0;
1449 }
1450
1451
1452 static void cfi_amdstd_sync (struct mtd_info *mtd)
1453 {
1454         struct map_info *map = mtd->priv;
1455         struct cfi_private *cfi = map->fldrv_priv;
1456         int i;
1457         struct flchip *chip;
1458         int ret = 0;
1459         DECLARE_WAITQUEUE(wait, current);
1460
1461         for (i=0; !ret && i<cfi->numchips; i++) {
1462                 chip = &cfi->chips[i];
1463
1464         retry:
1465                 cfi_spin_lock(chip->mutex);
1466
1467                 switch(chip->state) {
1468                 case FL_READY:
1469                 case FL_STATUS:
1470                 case FL_CFI_QUERY:
1471                 case FL_JEDEC_QUERY:
1472                         chip->oldstate = chip->state;
1473                         chip->state = FL_SYNCING;
1474                         /* No need to wake_up() on this state change - 
1475                          * as the whole point is that nobody can do anything
1476                          * with the chip now anyway.
1477                          */
1478                 case FL_SYNCING:
1479                         cfi_spin_unlock(chip->mutex);
1480                         break;
1481
1482                 default:
1483                         /* Not an idle state */
1484                         add_wait_queue(&chip->wq, &wait);
1485                         
1486                         cfi_spin_unlock(chip->mutex);
1487
1488                         schedule();
1489
1490                         remove_wait_queue(&chip->wq, &wait);
1491                         
1492                         goto retry;
1493                 }
1494         }
1495
1496         /* Unlock the chips again */
1497
1498         for (i--; i >=0; i--) {
1499                 chip = &cfi->chips[i];
1500
1501                 cfi_spin_lock(chip->mutex);
1502                 
1503                 if (chip->state == FL_SYNCING) {
1504                         chip->state = chip->oldstate;
1505                         wake_up(&chip->wq);
1506                 }
1507                 cfi_spin_unlock(chip->mutex);
1508         }
1509 }
1510
1511
1512 static int cfi_amdstd_suspend(struct mtd_info *mtd)
1513 {
1514         struct map_info *map = mtd->priv;
1515         struct cfi_private *cfi = map->fldrv_priv;
1516         int i;
1517         struct flchip *chip;
1518         int ret = 0;
1519
1520         for (i=0; !ret && i<cfi->numchips; i++) {
1521                 chip = &cfi->chips[i];
1522
1523                 cfi_spin_lock(chip->mutex);
1524
1525                 switch(chip->state) {
1526                 case FL_READY:
1527                 case FL_STATUS:
1528                 case FL_CFI_QUERY:
1529                 case FL_JEDEC_QUERY:
1530                         chip->oldstate = chip->state;
1531                         chip->state = FL_PM_SUSPENDED;
1532                         /* No need to wake_up() on this state change - 
1533                          * as the whole point is that nobody can do anything
1534                          * with the chip now anyway.
1535                          */
1536                 case FL_PM_SUSPENDED:
1537                         break;
1538
1539                 default:
1540                         ret = -EAGAIN;
1541                         break;
1542                 }
1543                 cfi_spin_unlock(chip->mutex);
1544         }
1545
1546         /* Unlock the chips again */
1547
1548         if (ret) {
1549                 for (i--; i >=0; i--) {
1550                         chip = &cfi->chips[i];
1551
1552                         cfi_spin_lock(chip->mutex);
1553                 
1554                         if (chip->state == FL_PM_SUSPENDED) {
1555                                 chip->state = chip->oldstate;
1556                                 wake_up(&chip->wq);
1557                         }
1558                         cfi_spin_unlock(chip->mutex);
1559                 }
1560         }
1561         
1562         return ret;
1563 }
1564
1565
1566 static void cfi_amdstd_resume(struct mtd_info *mtd)
1567 {
1568         struct map_info *map = mtd->priv;
1569         struct cfi_private *cfi = map->fldrv_priv;
1570         int i;
1571         struct flchip *chip;
1572
1573         for (i=0; i<cfi->numchips; i++) {
1574         
1575                 chip = &cfi->chips[i];
1576
1577                 cfi_spin_lock(chip->mutex);
1578                 
1579                 if (chip->state == FL_PM_SUSPENDED) {
1580                         chip->state = FL_READY;
1581                         map_write(map, CMD(0xF0), chip->start);
1582                         wake_up(&chip->wq);
1583                 }
1584                 else
1585                         printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
1586
1587                 cfi_spin_unlock(chip->mutex);
1588         }
1589 }
1590
1591
1592 #ifdef DEBUG_LOCK_BITS
1593
1594 static int do_printlockstatus_oneblock(struct map_info *map,
1595                                        struct flchip *chip,
1596                                        unsigned long adr,
1597                                        void *thunk)
1598 {
1599         struct cfi_private *cfi = map->fldrv_priv;
1600         int ofs_factor = cfi->interleave * cfi->device_type;
1601
1602         cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1603         printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1604                adr, cfi_read_query(map, adr+(2*ofs_factor)));
1605         cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1606         
1607         return 0;
1608 }
1609
1610
1611 #define debug_dump_locks(mtd, frob, ofs, len, thunk) \
1612         cfi_amdstd_varsize_frob((mtd), (frob), (ofs), (len), (thunk))
1613
1614 #else
1615
1616 #define debug_dump_locks(...)
1617
1618 #endif /* DEBUG_LOCK_BITS */
1619
1620
1621 struct xxlock_thunk {
1622         uint8_t val;
1623         flstate_t state;
1624 };
1625
1626
1627 #define DO_XXLOCK_ONEBLOCK_LOCK   ((struct xxlock_thunk){0x01, FL_LOCKING})
1628 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((struct xxlock_thunk){0x00, FL_UNLOCKING})
1629
1630
1631 /*
1632  * FIXME - this is *very* specific to a particular chip.  It likely won't
1633  * work for all chips that require unlock.  It also hasn't been tested
1634  * with interleaved chips.
1635  */
1636 static int do_xxlock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, void *thunk)
1637 {
1638         struct cfi_private *cfi = map->fldrv_priv;
1639         struct xxlock_thunk *xxlt = (struct xxlock_thunk *)thunk;
1640         int ret;
1641
1642         /*
1643          * This is easy because these are writes to registers and not writes
1644          * to flash memory - that means that we don't have to check status
1645          * and timeout.
1646          */
1647
1648         adr += chip->start;
1649         /*
1650          * lock block registers:
1651          * - on 64k boundariesand
1652          * - bit 1 set high
1653          * - block lock registers are 4MiB lower - overflow subtract (danger)
1654          */
1655         adr = ((adr & ~0xffff) | 0x2) + ~0x3fffff;
1656
1657         cfi_spin_lock(chip->mutex);
1658         ret = get_chip(map, chip, adr, FL_LOCKING);
1659         if (ret) {
1660                 cfi_spin_unlock(chip->mutex);
1661                 return ret;
1662         }
1663
1664         chip->state = xxlt->state;
1665         map_write(map, CMD(xxlt->val), adr);
1666         
1667         /* Done and happy. */
1668         chip->state = FL_READY;
1669         put_chip(map, chip, adr);
1670         cfi_spin_unlock(chip->mutex);
1671         return 0;
1672 }
1673
1674
1675 static int cfi_amdstd_lock_varsize(struct mtd_info *mtd,
1676                                    loff_t ofs,
1677                                    size_t len)
1678 {
1679         int ret;
1680
1681         DEBUG(MTD_DEBUG_LEVEL3,
1682               "%s: lock status before, ofs=0x%08llx, len=0x%08zX\n",
1683               __func__, ofs, len);
1684         debug_dump_locks(mtd, do_printlockstatus_oneblock, ofs, len, 0);
1685
1686         ret = cfi_amdstd_varsize_frob(mtd, do_xxlock_oneblock, ofs, len,
1687                                       (void *)&DO_XXLOCK_ONEBLOCK_LOCK);
1688         
1689         DEBUG(MTD_DEBUG_LEVEL3,
1690               "%s: lock status after, ret=%d\n",
1691               __func__, ret);
1692
1693         debug_dump_locks(mtd, do_printlockstatus_oneblock, ofs, len, 0);
1694
1695         return ret;
1696 }
1697
1698
1699 static int cfi_amdstd_unlock_varsize(struct mtd_info *mtd,
1700                                      loff_t ofs,
1701                                      size_t len)
1702 {
1703         int ret;
1704
1705         DEBUG(MTD_DEBUG_LEVEL3,
1706               "%s: lock status before, ofs=0x%08llx, len=0x%08zX\n",
1707               __func__, ofs, len);
1708         debug_dump_locks(mtd, do_printlockstatus_oneblock, ofs, len, 0);
1709
1710         ret = cfi_amdstd_varsize_frob(mtd, do_xxlock_oneblock, ofs, len,
1711                                       (void *)&DO_XXLOCK_ONEBLOCK_UNLOCK);
1712         
1713         DEBUG(MTD_DEBUG_LEVEL3,
1714               "%s: lock status after, ret=%d\n",
1715               __func__, ret);
1716         debug_dump_locks(mtd, do_printlockstatus_oneblock, ofs, len, 0);
1717         
1718         return ret;
1719 }
1720
1721
1722 static void cfi_amdstd_destroy(struct mtd_info *mtd)
1723 {
1724         struct map_info *map = mtd->priv;
1725         struct cfi_private *cfi = map->fldrv_priv;
1726         kfree(cfi->cmdset_priv);
1727         kfree(cfi->cfiq);
1728         kfree(cfi);
1729         kfree(mtd->eraseregions);
1730 }
1731
1732 static char im_name[]="cfi_cmdset_0002";
1733
1734
1735 int __init cfi_amdstd_init(void)
1736 {
1737         inter_module_register(im_name, THIS_MODULE, &cfi_cmdset_0002);
1738         return 0;
1739 }
1740
1741
1742 static void __exit cfi_amdstd_exit(void)
1743 {
1744         inter_module_unregister(im_name);
1745 }
1746
1747
1748 module_init(cfi_amdstd_init);
1749 module_exit(cfi_amdstd_exit);
1750
1751 MODULE_LICENSE("GPL");
1752 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
1753 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");