vserver 1.9.3
[linux-2.6.git] / drivers / mtd / chips / cfi_cmdset_0001.c
1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  * $Id: cfi_cmdset_0001.c,v 1.154 2004/08/09 13:19:43 dwmw2 Exp $
8  *
9  * 
10  * 10/10/2000   Nicolas Pitre <nico@cam.org>
11  *      - completely revamped method functions so they are aware and
12  *        independent of the flash geometry (buswidth, interleave, etc.)
13  *      - scalability vs code size is completely set at compile-time
14  *        (see include/linux/mtd/cfi.h for selection)
15  *      - optimized write buffer method
16  * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17  *      - reworked lock/unlock/erase support for var size flash
18  */
19
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
25 #include <asm/io.h>
26 #include <asm/byteorder.h>
27
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/mtd/map.h>
33 #include <linux/mtd/mtd.h>
34 #include <linux/mtd/compatmac.h>
35 #include <linux/mtd/cfi.h>
36
37 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
38
39 // debugging, turns off buffer write mode if set to 1
40 #define FORCE_WORD_WRITE 0
41
42 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
43 //static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
44 //static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
45 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
46 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
47 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
48 static void cfi_intelext_sync (struct mtd_info *);
49 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
50 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
51 static int cfi_intelext_suspend (struct mtd_info *);
52 static void cfi_intelext_resume (struct mtd_info *);
53
54 static void cfi_intelext_destroy(struct mtd_info *);
55
56 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
57
58 static struct mtd_info *cfi_intelext_setup (struct map_info *);
59 static int cfi_intelext_partition_fixup(struct map_info *, struct cfi_private **);
60
61 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
62                      size_t *retlen, u_char **mtdbuf);
63 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
64                         size_t len);
65
66
67 /*
68  *  *********** SETUP AND PROBE BITS  ***********
69  */
70
71 static struct mtd_chip_driver cfi_intelext_chipdrv = {
72         .probe          = NULL, /* Not usable directly */
73         .destroy        = cfi_intelext_destroy,
74         .name           = "cfi_cmdset_0001",
75         .module         = THIS_MODULE
76 };
77
78 /* #define DEBUG_LOCK_BITS */
79 /* #define DEBUG_CFI_FEATURES */
80
81 #ifdef DEBUG_CFI_FEATURES
82 static void cfi_tell_features(struct cfi_pri_intelext *extp)
83 {
84         int i;
85         printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
86         printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
87         printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
88         printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
89         printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
90         printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
91         printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
92         printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
93         printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
94         printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
95         printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
96         for (i=10; i<32; i++) {
97                 if (extp->FeatureSupport & (1<<i)) 
98                         printk("     - Unknown Bit %X:      supported\n", i);
99         }
100         
101         printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
102         printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
103         for (i=1; i<8; i++) {
104                 if (extp->SuspendCmdSupport & (1<<i))
105                         printk("     - Unknown Bit %X:               supported\n", i);
106         }
107         
108         printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
109         printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
110         printk("     - Valid Bit Active:     %s\n", extp->BlkStatusRegMask&2?"yes":"no");
111         for (i=2; i<16; i++) {
112                 if (extp->BlkStatusRegMask & (1<<i))
113                         printk("     - Unknown Bit %X Active: yes\n",i);
114         }
115         
116         printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n", 
117                extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
118         if (extp->VppOptimal)
119                 printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n", 
120                        extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
121 }
122 #endif
123
124 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
125 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */ 
126 static void fixup_intel_strataflash(struct map_info *map, void* param)
127 {
128         struct cfi_private *cfi = map->fldrv_priv;
129         struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
130
131         printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
132                             "erase on write disabled.\n");
133         extp->SuspendCmdSupport &= ~1;
134 }
135 #endif
136
137 static void fixup_st_m28w320ct(struct map_info *map, void* param)
138 {
139         struct cfi_private *cfi = map->fldrv_priv;
140         
141         cfi->cfiq->BufWriteTimeoutTyp = 0;      /* Not supported */
142         cfi->cfiq->BufWriteTimeoutMax = 0;      /* Not supported */
143 }
144
145 static void fixup_st_m28w320cb(struct map_info *map, void* param)
146 {
147         struct cfi_private *cfi = map->fldrv_priv;
148         
149         /* Note this is done after the region info is endian swapped */
150         cfi->cfiq->EraseRegionInfo[1] =
151                 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
152 };
153
154 static struct cfi_fixup fixup_table[] = {
155 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
156         {
157                 CFI_MFR_ANY, CFI_ID_ANY,
158                 fixup_intel_strataflash, NULL
159         }, 
160 #endif
161         {
162                 0x0020, /* STMicroelectronics */
163                 0x00ba, /* M28W320CT */
164                 fixup_st_m28w320ct, NULL
165         }, {
166                 0x0020, /* STMicroelectronics */
167                 0x00bb, /* M28W320CB */
168                 fixup_st_m28w320cb, NULL
169         }, {
170                 0, 0, NULL, NULL
171         }
172 };
173
174 /* This routine is made available to other mtd code via
175  * inter_module_register.  It must only be accessed through
176  * inter_module_get which will bump the use count of this module.  The
177  * addresses passed back in cfi are valid as long as the use count of
178  * this module is non-zero, i.e. between inter_module_get and
179  * inter_module_put.  Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
180  */
181 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
182 {
183         struct cfi_private *cfi = map->fldrv_priv;
184         int i;
185
186         if (cfi->cfi_mode == CFI_MODE_CFI) {
187                 /* 
188                  * It's a real CFI chip, not one for which the probe
189                  * routine faked a CFI structure. So we read the feature
190                  * table from it.
191                  */
192                 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
193                 struct cfi_pri_intelext *extp;
194
195                 extp = (struct cfi_pri_intelext*)cfi_read_pri(map, adr, sizeof(*extp), "Intel/Sharp");
196                 if (!extp)
197                         return NULL;
198                 
199                 /* Do some byteswapping if necessary */
200                 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
201                 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
202                 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
203
204                 /* Install our own private info structure */
205                 cfi->cmdset_priv = extp;        
206
207                 cfi_fixup(map, fixup_table);
208                         
209 #ifdef DEBUG_CFI_FEATURES
210                 /* Tell the user about it in lots of lovely detail */
211                 cfi_tell_features(extp);
212 #endif  
213
214                 if(extp->SuspendCmdSupport & 1) {
215                         printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
216                 }
217         }
218
219         for (i=0; i< cfi->numchips; i++) {
220                 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
221                 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
222                 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
223                 cfi->chips[i].ref_point_counter = 0;
224         }               
225
226         map->fldrv = &cfi_intelext_chipdrv;
227         
228         return cfi_intelext_setup(map);
229 }
230
231 static struct mtd_info *cfi_intelext_setup(struct map_info *map)
232 {
233         struct cfi_private *cfi = map->fldrv_priv;
234         struct mtd_info *mtd;
235         unsigned long offset = 0;
236         int i,j;
237         unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
238
239         mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
240         //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
241
242         if (!mtd) {
243                 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
244                 goto setup_err;
245         }
246
247         memset(mtd, 0, sizeof(*mtd));
248         mtd->priv = map;
249         mtd->type = MTD_NORFLASH;
250         mtd->size = devsize * cfi->numchips;
251
252         mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
253         mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) 
254                         * mtd->numeraseregions, GFP_KERNEL);
255         if (!mtd->eraseregions) { 
256                 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
257                 goto setup_err;
258         }
259         
260         for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
261                 unsigned long ernum, ersize;
262                 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
263                 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
264
265                 if (mtd->erasesize < ersize) {
266                         mtd->erasesize = ersize;
267                 }
268                 for (j=0; j<cfi->numchips; j++) {
269                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
270                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
271                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
272                 }
273                 offset += (ersize * ernum);
274         }
275
276         if (offset != devsize) {
277                 /* Argh */
278                 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
279                 goto setup_err;
280         }
281
282         for (i=0; i<mtd->numeraseregions;i++){
283                 printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
284                        i,mtd->eraseregions[i].offset,
285                        mtd->eraseregions[i].erasesize,
286                        mtd->eraseregions[i].numblocks);
287         }
288
289         /* Also select the correct geometry setup too */ 
290         mtd->erase = cfi_intelext_erase_varsize;
291         mtd->read = cfi_intelext_read;
292
293         if (map_is_linear(map)) {
294                 mtd->point = cfi_intelext_point;
295                 mtd->unpoint = cfi_intelext_unpoint;
296         }
297
298         if ( cfi->cfiq->BufWriteTimeoutTyp && !FORCE_WORD_WRITE) {
299                 printk(KERN_INFO "Using buffer write method\n" );
300                 mtd->write = cfi_intelext_write_buffers;
301         } else {
302                 printk(KERN_INFO "Using word write method\n" );
303                 mtd->write = cfi_intelext_write_words;
304         }
305 #if 0
306         mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
307         mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
308 #endif
309         mtd->sync = cfi_intelext_sync;
310         mtd->lock = cfi_intelext_lock;
311         mtd->unlock = cfi_intelext_unlock;
312         mtd->suspend = cfi_intelext_suspend;
313         mtd->resume = cfi_intelext_resume;
314         mtd->flags = MTD_CAP_NORFLASH;
315         map->fldrv = &cfi_intelext_chipdrv;
316         mtd->name = map->name;
317
318         /* This function has the potential to distort the reality
319            a bit and therefore should be called last. */
320         if (cfi_intelext_partition_fixup(map, &cfi) != 0)
321                 goto setup_err;
322
323         __module_get(THIS_MODULE);
324         return mtd;
325
326  setup_err:
327         if(mtd) {
328                 if(mtd->eraseregions)
329                         kfree(mtd->eraseregions);
330                 kfree(mtd);
331         }
332         kfree(cfi->cmdset_priv);
333         return NULL;
334 }
335
336 static int cfi_intelext_partition_fixup(struct map_info *map,
337                                         struct cfi_private **pcfi)
338 {
339         struct cfi_private *cfi = *pcfi;
340         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
341
342         /*
343          * Probing of multi-partition flash ships.
344          *
345          * This is extremely crude at the moment and should probably be
346          * extracted entirely from the Intel extended query data instead.
347          * Right now a L18 flash is assumed if multiple operations is
348          * detected.
349          *
350          * To support multiple partitions when available, we simply arrange
351          * for each of them to have their own flchip structure even if they
352          * are on the same physical chip.  This means completely recreating
353          * a new cfi_private structure right here which is a blatent code
354          * layering violation, but this is still the least intrusive
355          * arrangement at this point. This can be rearranged in the future
356          * if someone feels motivated enough.  --nico
357          */
358         if (extp && extp->FeatureSupport & (1 << 9)) {
359                 struct cfi_private *newcfi;
360                 struct flchip *chip;
361                 struct flchip_shared *shared;
362                 int numparts, partshift, numvirtchips, i, j;
363
364                 /*
365                  * The L18 flash memory array is divided
366                  * into multiple 8-Mbit partitions.
367                  */
368                 numparts = 1 << (cfi->cfiq->DevSize - 20);
369                 partshift = 20 + __ffs(cfi->interleave);
370                 numvirtchips = cfi->numchips * numparts;
371
372                 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
373                 if (!newcfi)
374                         return -ENOMEM;
375                 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
376                 if (!shared) {
377                         kfree(newcfi);
378                         return -ENOMEM;
379                 }
380                 memcpy(newcfi, cfi, sizeof(struct cfi_private));
381                 newcfi->numchips = numvirtchips;
382                 newcfi->chipshift = partshift;
383
384                 chip = &newcfi->chips[0];
385                 for (i = 0; i < cfi->numchips; i++) {
386                         shared[i].writing = shared[i].erasing = NULL;
387                         spin_lock_init(&shared[i].lock);
388                         for (j = 0; j < numparts; j++) {
389                                 *chip = cfi->chips[i];
390                                 chip->start += j << partshift;
391                                 chip->priv = &shared[i];
392                                 /* those should be reset too since
393                                    they create memory references. */
394                                 init_waitqueue_head(&chip->wq);
395                                 spin_lock_init(&chip->_spinlock);
396                                 chip->mutex = &chip->_spinlock;
397                                 chip++;
398                         }
399                 }
400
401                 printk(KERN_DEBUG "%s: %d sets of %d interleaved chips "
402                                   "--> %d partitions of %#x bytes\n",
403                                   map->name, cfi->numchips, cfi->interleave,
404                                   newcfi->numchips, 1<<newcfi->chipshift);
405
406                 map->fldrv_priv = newcfi;
407                 *pcfi = newcfi;
408                 kfree(cfi);
409         }
410
411         return 0;
412 }
413
414 /*
415  *  *********** CHIP ACCESS FUNCTIONS ***********
416  */
417
418 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
419 {
420         DECLARE_WAITQUEUE(wait, current);
421         struct cfi_private *cfi = map->fldrv_priv;
422         map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
423         unsigned long timeo;
424         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
425
426  resettime:
427         timeo = jiffies + HZ;
428  retry:
429         if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING)) {
430                 /*
431                  * OK. We have possibility for contension on the write/erase
432                  * operations which are global to the real chip and not per
433                  * partition.  So let's fight it over in the partition which
434                  * currently has authority on the operation.
435                  *
436                  * The rules are as follows:
437                  *
438                  * - any write operation must own shared->writing.
439                  *
440                  * - any erase operation must own _both_ shared->writing and
441                  *   shared->erasing.
442                  *
443                  * - contension arbitration is handled in the owner's context.
444                  *
445                  * The 'shared' struct can be read when its lock is taken.
446                  * However any writes to it can only be made when the current
447                  * owner's lock is also held.
448                  */
449                 struct flchip_shared *shared = chip->priv;
450                 struct flchip *contender;
451                 spin_lock(&shared->lock);
452                 contender = shared->writing;
453                 if (contender && contender != chip) {
454                         /*
455                          * The engine to perform desired operation on this
456                          * partition is already in use by someone else.
457                          * Let's fight over it in the context of the chip
458                          * currently using it.  If it is possible to suspend,
459                          * that other partition will do just that, otherwise
460                          * it'll happily send us to sleep.  In any case, when
461                          * get_chip returns success we're clear to go ahead.
462                          */
463                         int ret = spin_trylock(contender->mutex);
464                         spin_unlock(&shared->lock);
465                         if (!ret)
466                                 goto retry;
467                         spin_unlock(chip->mutex);
468                         ret = get_chip(map, contender, contender->start, mode);
469                         spin_lock(chip->mutex);
470                         if (ret) {
471                                 spin_unlock(contender->mutex);
472                                 return ret;
473                         }
474                         timeo = jiffies + HZ;
475                         spin_lock(&shared->lock);
476                 }
477
478                 /* We now own it */
479                 shared->writing = chip;
480                 if (mode == FL_ERASING)
481                         shared->erasing = chip;
482                 if (contender && contender != chip)
483                         spin_unlock(contender->mutex);
484                 spin_unlock(&shared->lock);
485         }
486
487         switch (chip->state) {
488
489         case FL_STATUS:
490                 for (;;) {
491                         status = map_read(map, adr);
492                         if (map_word_andequal(map, status, status_OK, status_OK))
493                                 break;
494
495                         /* At this point we're fine with write operations
496                            in other partitions as they don't conflict. */
497                         if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
498                                 break;
499
500                         if (time_after(jiffies, timeo)) {
501                                 printk(KERN_ERR "Waiting for chip to be ready timed out. Status %lx\n", 
502                                        status.x[0]);
503                                 return -EIO;
504                         }
505                         spin_unlock(chip->mutex);
506                         cfi_udelay(1);
507                         spin_lock(chip->mutex);
508                         /* Someone else might have been playing with it. */
509                         goto retry;
510                 }
511                                 
512         case FL_READY:
513         case FL_CFI_QUERY:
514         case FL_JEDEC_QUERY:
515                 return 0;
516
517         case FL_ERASING:
518                 if (!(cfip->FeatureSupport & 2) ||
519                     !(mode == FL_READY || mode == FL_POINT ||
520                      (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
521                         goto sleep;
522
523
524                 /* Erase suspend */
525                 map_write(map, CMD(0xB0), adr);
526
527                 /* If the flash has finished erasing, then 'erase suspend'
528                  * appears to make some (28F320) flash devices switch to
529                  * 'read' mode.  Make sure that we switch to 'read status'
530                  * mode so we get the right data. --rmk
531                  */
532                 map_write(map, CMD(0x70), adr);
533                 chip->oldstate = FL_ERASING;
534                 chip->state = FL_ERASE_SUSPENDING;
535                 chip->erase_suspended = 1;
536                 for (;;) {
537                         status = map_read(map, adr);
538                         if (map_word_andequal(map, status, status_OK, status_OK))
539                                 break;
540
541                         if (time_after(jiffies, timeo)) {
542                                 /* Urgh. Resume and pretend we weren't here.  */
543                                 map_write(map, CMD(0xd0), adr);
544                                 /* Make sure we're in 'read status' mode if it had finished */
545                                 map_write(map, CMD(0x70), adr);
546                                 chip->state = FL_ERASING;
547                                 chip->oldstate = FL_READY;
548                                 printk(KERN_ERR "Chip not ready after erase "
549                                        "suspended: status = 0x%lx\n", status.x[0]);
550                                 return -EIO;
551                         }
552
553                         spin_unlock(chip->mutex);
554                         cfi_udelay(1);
555                         spin_lock(chip->mutex);
556                         /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
557                            So we can just loop here. */
558                 }
559                 chip->state = FL_STATUS;
560                 return 0;
561
562         case FL_POINT:
563                 /* Only if there's no operation suspended... */
564                 if (mode == FL_READY && chip->oldstate == FL_READY)
565                         return 0;
566
567         default:
568         sleep:
569                 set_current_state(TASK_UNINTERRUPTIBLE);
570                 add_wait_queue(&chip->wq, &wait);
571                 spin_unlock(chip->mutex);
572                 schedule();
573                 remove_wait_queue(&chip->wq, &wait);
574                 spin_lock(chip->mutex);
575                 goto resettime;
576         }
577 }
578
579 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
580 {
581         struct cfi_private *cfi = map->fldrv_priv;
582
583         if (chip->priv) {
584                 struct flchip_shared *shared = chip->priv;
585                 spin_lock(&shared->lock);
586                 if (shared->writing == chip) {
587                         /* We own the ability to write, but we're done */
588                         shared->writing = shared->erasing;
589                         if (shared->writing && shared->writing != chip) {
590                                 /* give back ownership to who we loaned it from */
591                                 struct flchip *loaner = shared->writing;
592                                 spin_lock(loaner->mutex);
593                                 spin_unlock(&shared->lock);
594                                 spin_unlock(chip->mutex);
595                                 put_chip(map, loaner, loaner->start);
596                                 spin_lock(chip->mutex);
597                                 spin_unlock(loaner->mutex);
598                         } else {
599                                 if (chip->oldstate != FL_ERASING) {
600                                         shared->erasing = NULL;
601                                         if (chip->oldstate != FL_WRITING)
602                                                 shared->writing = NULL;
603                                 }
604                                 spin_unlock(&shared->lock);
605                         }
606                 } else {
607                         spin_unlock(&shared->lock);
608                 }
609         }
610
611         switch(chip->oldstate) {
612         case FL_ERASING:
613                 chip->state = chip->oldstate;
614                 /* What if one interleaved chip has finished and the 
615                    other hasn't? The old code would leave the finished
616                    one in READY mode. That's bad, and caused -EROFS 
617                    errors to be returned from do_erase_oneblock because
618                    that's the only bit it checked for at the time.
619                    As the state machine appears to explicitly allow 
620                    sending the 0x70 (Read Status) command to an erasing
621                    chip and expecting it to be ignored, that's what we 
622                    do. */
623                 map_write(map, CMD(0xd0), adr);
624                 map_write(map, CMD(0x70), adr);
625                 chip->oldstate = FL_READY;
626                 chip->state = FL_ERASING;
627                 break;
628
629         case FL_READY:
630         case FL_STATUS:
631         case FL_JEDEC_QUERY:
632                 /* We should really make set_vpp() count, rather than doing this */
633                 DISABLE_VPP(map);
634                 break;
635         default:
636                 printk(KERN_ERR "put_chip() called with oldstate %d!!\n", chip->oldstate);
637         }
638         wake_up(&chip->wq);
639 }
640
641 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
642 {
643         unsigned long cmd_addr;
644         struct cfi_private *cfi = map->fldrv_priv;
645         int ret = 0;
646
647         adr += chip->start;
648
649         /* Ensure cmd read/writes are aligned. */ 
650         cmd_addr = adr & ~(map_bankwidth(map)-1); 
651
652         spin_lock(chip->mutex);
653
654         ret = get_chip(map, chip, cmd_addr, FL_POINT);
655
656         if (!ret) {
657                 if (chip->state != FL_POINT && chip->state != FL_READY)
658                         map_write(map, CMD(0xff), cmd_addr);
659
660                 chip->state = FL_POINT;
661                 chip->ref_point_counter++;
662         }
663         spin_unlock(chip->mutex);
664
665         return ret;
666 }
667
668 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
669 {
670         struct map_info *map = mtd->priv;
671         struct cfi_private *cfi = map->fldrv_priv;
672         unsigned long ofs;
673         int chipnum;
674         int ret = 0;
675
676         if (!map->virt || (from + len > mtd->size))
677                 return -EINVAL;
678         
679         *mtdbuf = (void *)map->virt + from;
680         *retlen = 0;
681
682         /* Now lock the chip(s) to POINT state */
683
684         /* ofs: offset within the first chip that the first read should start */
685         chipnum = (from >> cfi->chipshift);
686         ofs = from - (chipnum << cfi->chipshift);
687
688         while (len) {
689                 unsigned long thislen;
690
691                 if (chipnum >= cfi->numchips)
692                         break;
693
694                 if ((len + ofs -1) >> cfi->chipshift)
695                         thislen = (1<<cfi->chipshift) - ofs;
696                 else
697                         thislen = len;
698
699                 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
700                 if (ret)
701                         break;
702
703                 *retlen += thislen;
704                 len -= thislen;
705                 
706                 ofs = 0;
707                 chipnum++;
708         }
709         return 0;
710 }
711
712 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
713 {
714         struct map_info *map = mtd->priv;
715         struct cfi_private *cfi = map->fldrv_priv;
716         unsigned long ofs;
717         int chipnum;
718
719         /* Now unlock the chip(s) POINT state */
720
721         /* ofs: offset within the first chip that the first read should start */
722         chipnum = (from >> cfi->chipshift);
723         ofs = from - (chipnum <<  cfi->chipshift);
724
725         while (len) {
726                 unsigned long thislen;
727                 struct flchip *chip;
728
729                 chip = &cfi->chips[chipnum];
730                 if (chipnum >= cfi->numchips)
731                         break;
732
733                 if ((len + ofs -1) >> cfi->chipshift)
734                         thislen = (1<<cfi->chipshift) - ofs;
735                 else
736                         thislen = len;
737
738                 spin_lock(chip->mutex);
739                 if (chip->state == FL_POINT) {
740                         chip->ref_point_counter--;
741                         if(chip->ref_point_counter == 0)
742                                 chip->state = FL_READY;
743                 } else
744                         printk(KERN_ERR "Warning: unpoint called on non pointed region\n"); /* Should this give an error? */
745
746                 put_chip(map, chip, chip->start);
747                 spin_unlock(chip->mutex);
748
749                 len -= thislen;
750                 ofs = 0;
751                 chipnum++;
752         }
753 }
754
755 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
756 {
757         unsigned long cmd_addr;
758         struct cfi_private *cfi = map->fldrv_priv;
759         int ret;
760
761         adr += chip->start;
762
763         /* Ensure cmd read/writes are aligned. */ 
764         cmd_addr = adr & ~(map_bankwidth(map)-1); 
765
766         spin_lock(chip->mutex);
767         ret = get_chip(map, chip, cmd_addr, FL_READY);
768         if (ret) {
769                 spin_unlock(chip->mutex);
770                 return ret;
771         }
772
773         if (chip->state != FL_POINT && chip->state != FL_READY) {
774                 map_write(map, CMD(0xff), cmd_addr);
775
776                 chip->state = FL_READY;
777         }
778
779         map_copy_from(map, buf, adr, len);
780
781         put_chip(map, chip, cmd_addr);
782
783         spin_unlock(chip->mutex);
784         return 0;
785 }
786
787 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
788 {
789         struct map_info *map = mtd->priv;
790         struct cfi_private *cfi = map->fldrv_priv;
791         unsigned long ofs;
792         int chipnum;
793         int ret = 0;
794
795         /* ofs: offset within the first chip that the first read should start */
796         chipnum = (from >> cfi->chipshift);
797         ofs = from - (chipnum <<  cfi->chipshift);
798
799         *retlen = 0;
800
801         while (len) {
802                 unsigned long thislen;
803
804                 if (chipnum >= cfi->numchips)
805                         break;
806
807                 if ((len + ofs -1) >> cfi->chipshift)
808                         thislen = (1<<cfi->chipshift) - ofs;
809                 else
810                         thislen = len;
811
812                 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
813                 if (ret)
814                         break;
815
816                 *retlen += thislen;
817                 len -= thislen;
818                 buf += thislen;
819                 
820                 ofs = 0;
821                 chipnum++;
822         }
823         return ret;
824 }
825 #if 0
826 static int cfi_intelext_read_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf, int base_offst, int reg_sz)
827 {
828         struct map_info *map = mtd->priv;
829         struct cfi_private *cfi = map->fldrv_priv;
830         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
831         struct flchip *chip;
832         int ofs_factor = cfi->interleave * cfi->device_type;
833         int count = len;
834         int chip_num, offst;
835         int ret;
836
837         chip_num = ((unsigned int)from/reg_sz);
838         offst = from - (reg_sz*chip_num)+base_offst;
839
840         while (count) {
841         /* Calculate which chip & protection register offset we need */
842
843                 if (chip_num >= cfi->numchips)
844                         goto out;
845
846                 chip = &cfi->chips[chip_num];
847                 
848                 spin_lock(chip->mutex);
849                 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
850                 if (ret) {
851                         spin_unlock(chip->mutex);
852                         return (len-count)?:ret;
853                 }
854
855                 if (chip->state != FL_JEDEC_QUERY) {
856                         map_write(map, CMD(0x90), chip->start);
857                         chip->state = FL_JEDEC_QUERY;
858                 }
859
860                 while (count && ((offst-base_offst) < reg_sz)) {
861                         *buf = map_read8(map,(chip->start+((extp->ProtRegAddr+1)*ofs_factor)+offst));
862                         buf++;
863                         offst++;
864                         count--;
865                 }
866
867                 put_chip(map, chip, chip->start);
868                 spin_unlock(chip->mutex);
869
870                 /* Move on to the next chip */
871                 chip_num++;
872                 offst = base_offst;
873         }
874         
875  out:   
876         return len-count;
877 }
878         
879 static int cfi_intelext_read_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
880 {
881         struct map_info *map = mtd->priv;
882         struct cfi_private *cfi = map->fldrv_priv;
883         struct cfi_pri_intelext *extp=cfi->cmdset_priv;
884         int base_offst,reg_sz;
885         
886         /* Check that we actually have some protection registers */
887         if(!(extp->FeatureSupport&64)){
888                 printk(KERN_WARNING "%s: This flash device has no protection data to read!\n",map->name);
889                 return 0;
890         }
891
892         base_offst=(1<<extp->FactProtRegSize);
893         reg_sz=(1<<extp->UserProtRegSize);
894
895         return cfi_intelext_read_prot_reg(mtd, from, len, retlen, buf, base_offst, reg_sz);
896 }
897
898 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
899 {
900         struct map_info *map = mtd->priv;
901         struct cfi_private *cfi = map->fldrv_priv;
902         struct cfi_pri_intelext *extp=cfi->cmdset_priv;
903         int base_offst,reg_sz;
904         
905         /* Check that we actually have some protection registers */
906         if(!(extp->FeatureSupport&64)){
907                 printk(KERN_WARNING "%s: This flash device has no protection data to read!\n",map->name);
908                 return 0;
909         }
910
911         base_offst=0;
912         reg_sz=(1<<extp->FactProtRegSize);
913
914         return cfi_intelext_read_prot_reg(mtd, from, len, retlen, buf, base_offst, reg_sz);
915 }
916 #endif
917
918 static int do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
919 {
920         struct cfi_private *cfi = map->fldrv_priv;
921         map_word status, status_OK;
922         unsigned long timeo;
923         int z, ret=0;
924
925         adr += chip->start;
926
927         /* Let's determine this according to the interleave only once */
928         status_OK = CMD(0x80);
929
930         spin_lock(chip->mutex);
931         ret = get_chip(map, chip, adr, FL_WRITING);
932         if (ret) {
933                 spin_unlock(chip->mutex);
934                 return ret;
935         }
936
937         ENABLE_VPP(map);
938         map_write(map, CMD(0x40), adr);
939         map_write(map, datum, adr);
940         chip->state = FL_WRITING;
941
942         spin_unlock(chip->mutex);
943         INVALIDATE_CACHED_RANGE(map, adr, map_bankwidth(map));
944         cfi_udelay(chip->word_write_time);
945         spin_lock(chip->mutex);
946
947         timeo = jiffies + (HZ/2);
948         z = 0;
949         for (;;) {
950                 if (chip->state != FL_WRITING) {
951                         /* Someone's suspended the write. Sleep */
952                         DECLARE_WAITQUEUE(wait, current);
953
954                         set_current_state(TASK_UNINTERRUPTIBLE);
955                         add_wait_queue(&chip->wq, &wait);
956                         spin_unlock(chip->mutex);
957                         schedule();
958                         remove_wait_queue(&chip->wq, &wait);
959                         timeo = jiffies + (HZ / 2); /* FIXME */
960                         spin_lock(chip->mutex);
961                         continue;
962                 }
963
964                 status = map_read(map, adr);
965                 if (map_word_andequal(map, status, status_OK, status_OK))
966                         break;
967                 
968                 /* OK Still waiting */
969                 if (time_after(jiffies, timeo)) {
970                         chip->state = FL_STATUS;
971                         printk(KERN_ERR "waiting for chip to be ready timed out in word write\n");
972                         ret = -EIO;
973                         goto out;
974                 }
975
976                 /* Latency issues. Drop the lock, wait a while and retry */
977                 spin_unlock(chip->mutex);
978                 z++;
979                 cfi_udelay(1);
980                 spin_lock(chip->mutex);
981         }
982         if (!z) {
983                 chip->word_write_time--;
984                 if (!chip->word_write_time)
985                         chip->word_write_time++;
986         }
987         if (z > 1) 
988                 chip->word_write_time++;
989
990         /* Done and happy. */
991         chip->state = FL_STATUS;
992         /* check for lock bit */
993         if (map_word_bitsset(map, status, CMD(0x02))) {
994                 /* clear status */
995                 map_write(map, CMD(0x50), adr);
996                 /* put back into read status register mode */
997                 map_write(map, CMD(0x70), adr);
998                 ret = -EROFS;
999         }
1000  out:
1001         put_chip(map, chip, adr);
1002         spin_unlock(chip->mutex);
1003
1004         return ret;
1005 }
1006
1007
1008 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1009 {
1010         struct map_info *map = mtd->priv;
1011         struct cfi_private *cfi = map->fldrv_priv;
1012         int ret = 0;
1013         int chipnum;
1014         unsigned long ofs;
1015
1016         *retlen = 0;
1017         if (!len)
1018                 return 0;
1019
1020         chipnum = to >> cfi->chipshift;
1021         ofs = to  - (chipnum << cfi->chipshift);
1022
1023         /* If it's not bus-aligned, do the first byte write */
1024         if (ofs & (map_bankwidth(map)-1)) {
1025                 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1026                 int gap = ofs - bus_ofs;
1027                 int n;
1028                 map_word datum;
1029
1030                 n = min_t(int, len, map_bankwidth(map)-gap);
1031                 datum = map_word_ff(map);
1032                 datum = map_word_load_partial(map, datum, buf, gap, n);
1033
1034                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1035                                                bus_ofs, datum);
1036                 if (ret) 
1037                         return ret;
1038
1039                 len -= n;
1040                 ofs += n;
1041                 buf += n;
1042                 (*retlen) += n;
1043
1044                 if (ofs >> cfi->chipshift) {
1045                         chipnum ++; 
1046                         ofs = 0;
1047                         if (chipnum == cfi->numchips)
1048                                 return 0;
1049                 }
1050         }
1051         
1052         while(len >= map_bankwidth(map)) {
1053                 map_word datum = map_word_load(map, buf);
1054
1055                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1056                                 ofs, datum);
1057                 if (ret)
1058                         return ret;
1059
1060                 ofs += map_bankwidth(map);
1061                 buf += map_bankwidth(map);
1062                 (*retlen) += map_bankwidth(map);
1063                 len -= map_bankwidth(map);
1064
1065                 if (ofs >> cfi->chipshift) {
1066                         chipnum ++; 
1067                         ofs = 0;
1068                         if (chipnum == cfi->numchips)
1069                                 return 0;
1070                 }
1071         }
1072
1073         if (len & (map_bankwidth(map)-1)) {
1074                 map_word datum;
1075
1076                 datum = map_word_ff(map);
1077                 datum = map_word_load_partial(map, datum, buf, 0, len);
1078
1079                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1080                                                ofs, datum);
1081                 if (ret) 
1082                         return ret;
1083                 
1084                 (*retlen) += len;
1085         }
1086
1087         return 0;
1088 }
1089
1090
1091 static inline int do_write_buffer(struct map_info *map, struct flchip *chip, 
1092                                   unsigned long adr, const u_char *buf, int len)
1093 {
1094         struct cfi_private *cfi = map->fldrv_priv;
1095         map_word status, status_OK;
1096         unsigned long cmd_adr, timeo;
1097         int wbufsize, z, ret=0, bytes, words;
1098
1099         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1100         adr += chip->start;
1101         cmd_adr = adr & ~(wbufsize-1);
1102         
1103         /* Let's determine this according to the interleave only once */
1104         status_OK = CMD(0x80);
1105
1106         spin_lock(chip->mutex);
1107         ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1108         if (ret) {
1109                 spin_unlock(chip->mutex);
1110                 return ret;
1111         }
1112
1113         /* Â§4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1114            [...], the device will not accept any more Write to Buffer commands". 
1115            So we must check here and reset those bits if they're set. Otherwise
1116            we're just pissing in the wind */
1117         if (chip->state != FL_STATUS)
1118                 map_write(map, CMD(0x70), cmd_adr);
1119         status = map_read(map, cmd_adr);
1120         if (map_word_bitsset(map, status, CMD(0x30))) {
1121                 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1122                 map_write(map, CMD(0x50), cmd_adr);
1123                 map_write(map, CMD(0x70), cmd_adr);
1124         }
1125
1126         ENABLE_VPP(map);
1127         chip->state = FL_WRITING_TO_BUFFER;
1128
1129         z = 0;
1130         for (;;) {
1131                 map_write(map, CMD(0xe8), cmd_adr);
1132
1133                 status = map_read(map, cmd_adr);
1134                 if (map_word_andequal(map, status, status_OK, status_OK))
1135                         break;
1136
1137                 spin_unlock(chip->mutex);
1138                 cfi_udelay(1);
1139                 spin_lock(chip->mutex);
1140
1141                 if (++z > 20) {
1142                         /* Argh. Not ready for write to buffer */
1143                         map_write(map, CMD(0x70), cmd_adr);
1144                         chip->state = FL_STATUS;
1145                         printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1146                                status.x[0], map_read(map, cmd_adr).x[0]);
1147                         /* Odd. Clear status bits */
1148                         map_write(map, CMD(0x50), cmd_adr);
1149                         map_write(map, CMD(0x70), cmd_adr);
1150                         ret = -EIO;
1151                         goto out;
1152                 }
1153         }
1154
1155         /* Write length of data to come */
1156         bytes = len & (map_bankwidth(map)-1);
1157         words = len / map_bankwidth(map);
1158         map_write(map, CMD(words - !bytes), cmd_adr );
1159
1160         /* Write data */
1161         z = 0;
1162         while(z < words * map_bankwidth(map)) {
1163                 map_word datum = map_word_load(map, buf);
1164                 map_write(map, datum, adr+z);
1165
1166                 z += map_bankwidth(map);
1167                 buf += map_bankwidth(map);
1168         }
1169
1170         if (bytes) {
1171                 map_word datum;
1172
1173                 datum = map_word_ff(map);
1174                 datum = map_word_load_partial(map, datum, buf, 0, bytes);
1175                 map_write(map, datum, adr+z);
1176         }
1177
1178         /* GO GO GO */
1179         map_write(map, CMD(0xd0), cmd_adr);
1180         chip->state = FL_WRITING;
1181
1182         spin_unlock(chip->mutex);
1183         INVALIDATE_CACHED_RANGE(map, adr, len);
1184         cfi_udelay(chip->buffer_write_time);
1185         spin_lock(chip->mutex);
1186
1187         timeo = jiffies + (HZ/2);
1188         z = 0;
1189         for (;;) {
1190                 if (chip->state != FL_WRITING) {
1191                         /* Someone's suspended the write. Sleep */
1192                         DECLARE_WAITQUEUE(wait, current);
1193                         set_current_state(TASK_UNINTERRUPTIBLE);
1194                         add_wait_queue(&chip->wq, &wait);
1195                         spin_unlock(chip->mutex);
1196                         schedule();
1197                         remove_wait_queue(&chip->wq, &wait);
1198                         timeo = jiffies + (HZ / 2); /* FIXME */
1199                         spin_lock(chip->mutex);
1200                         continue;
1201                 }
1202
1203                 status = map_read(map, cmd_adr);
1204                 if (map_word_andequal(map, status, status_OK, status_OK))
1205                         break;
1206
1207                 /* OK Still waiting */
1208                 if (time_after(jiffies, timeo)) {
1209                         chip->state = FL_STATUS;
1210                         printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
1211                         ret = -EIO;
1212                         goto out;
1213                 }
1214                 
1215                 /* Latency issues. Drop the lock, wait a while and retry */
1216                 spin_unlock(chip->mutex);
1217                 cfi_udelay(1);
1218                 z++;
1219                 spin_lock(chip->mutex);
1220         }
1221         if (!z) {
1222                 chip->buffer_write_time--;
1223                 if (!chip->buffer_write_time)
1224                         chip->buffer_write_time++;
1225         }
1226         if (z > 1) 
1227                 chip->buffer_write_time++;
1228
1229         /* Done and happy. */
1230         chip->state = FL_STATUS;
1231
1232         /* check for lock bit */
1233         if (map_word_bitsset(map, status, CMD(0x02))) {
1234                 /* clear status */
1235                 map_write(map, CMD(0x50), cmd_adr);
1236                 /* put back into read status register mode */
1237                 map_write(map, CMD(0x70), adr);
1238                 ret = -EROFS;
1239         }
1240
1241  out:
1242         put_chip(map, chip, cmd_adr);
1243         spin_unlock(chip->mutex);
1244         return ret;
1245 }
1246
1247 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to, 
1248                                        size_t len, size_t *retlen, const u_char *buf)
1249 {
1250         struct map_info *map = mtd->priv;
1251         struct cfi_private *cfi = map->fldrv_priv;
1252         int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1253         int ret = 0;
1254         int chipnum;
1255         unsigned long ofs;
1256
1257         *retlen = 0;
1258         if (!len)
1259                 return 0;
1260
1261         chipnum = to >> cfi->chipshift;
1262         ofs = to  - (chipnum << cfi->chipshift);
1263
1264         /* If it's not bus-aligned, do the first word write */
1265         if (ofs & (map_bankwidth(map)-1)) {
1266                 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1267                 if (local_len > len)
1268                         local_len = len;
1269                 ret = cfi_intelext_write_words(mtd, to, local_len,
1270                                                retlen, buf);
1271                 if (ret)
1272                         return ret;
1273                 ofs += local_len;
1274                 buf += local_len;
1275                 len -= local_len;
1276
1277                 if (ofs >> cfi->chipshift) {
1278                         chipnum ++;
1279                         ofs = 0;
1280                         if (chipnum == cfi->numchips)
1281                                 return 0;
1282                 }
1283         }
1284
1285         while(len) {
1286                 /* We must not cross write block boundaries */
1287                 int size = wbufsize - (ofs & (wbufsize-1));
1288
1289                 if (size > len)
1290                         size = len;
1291                 ret = do_write_buffer(map, &cfi->chips[chipnum], 
1292                                       ofs, buf, size);
1293                 if (ret)
1294                         return ret;
1295
1296                 ofs += size;
1297                 buf += size;
1298                 (*retlen) += size;
1299                 len -= size;
1300
1301                 if (ofs >> cfi->chipshift) {
1302                         chipnum ++; 
1303                         ofs = 0;
1304                         if (chipnum == cfi->numchips)
1305                                 return 0;
1306                 }
1307         }
1308         return 0;
1309 }
1310
1311 typedef int (*varsize_frob_t)(struct map_info *map, struct flchip *chip,
1312                               unsigned long adr, int len, void *thunk);
1313
1314 static int cfi_intelext_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
1315                                      loff_t ofs, size_t len, void *thunk)
1316 {
1317         struct map_info *map = mtd->priv;
1318         struct cfi_private *cfi = map->fldrv_priv;
1319         unsigned long adr;
1320         int chipnum, ret = 0;
1321         int i, first;
1322         struct mtd_erase_region_info *regions = mtd->eraseregions;
1323
1324         if (ofs > mtd->size)
1325                 return -EINVAL;
1326
1327         if ((len + ofs) > mtd->size)
1328                 return -EINVAL;
1329
1330         /* Check that both start and end of the requested erase are
1331          * aligned with the erasesize at the appropriate addresses.
1332          */
1333
1334         i = 0;
1335
1336         /* Skip all erase regions which are ended before the start of 
1337            the requested erase. Actually, to save on the calculations,
1338            we skip to the first erase region which starts after the
1339            start of the requested erase, and then go back one.
1340         */
1341         
1342         while (i < mtd->numeraseregions && ofs >= regions[i].offset)
1343                i++;
1344         i--;
1345
1346         /* OK, now i is pointing at the erase region in which this 
1347            erase request starts. Check the start of the requested
1348            erase range is aligned with the erase size which is in
1349            effect here.
1350         */
1351
1352         if (ofs & (regions[i].erasesize-1))
1353                 return -EINVAL;
1354
1355         /* Remember the erase region we start on */
1356         first = i;
1357
1358         /* Next, check that the end of the requested erase is aligned
1359          * with the erase region at that address.
1360          */
1361
1362         while (i<mtd->numeraseregions && (ofs + len) >= regions[i].offset)
1363                 i++;
1364
1365         /* As before, drop back one to point at the region in which
1366            the address actually falls
1367         */
1368         i--;
1369         
1370         if ((ofs + len) & (regions[i].erasesize-1))
1371                 return -EINVAL;
1372
1373         chipnum = ofs >> cfi->chipshift;
1374         adr = ofs - (chipnum << cfi->chipshift);
1375
1376         i=first;
1377
1378         while(len) {
1379                 unsigned long chipmask;
1380                 int size = regions[i].erasesize;
1381
1382                 ret = (*frob)(map, &cfi->chips[chipnum], adr, size, thunk);
1383                 
1384                 if (ret)
1385                         return ret;
1386
1387                 adr += size;
1388                 len -= size;
1389
1390                 chipmask = (1 << cfi->chipshift) - 1;
1391                 if ((adr & chipmask) == ((regions[i].offset + size * regions[i].numblocks) & chipmask))
1392                         i++;
1393
1394                 if (adr >> cfi->chipshift) {
1395                         adr = 0;
1396                         chipnum++;
1397                         
1398                         if (chipnum >= cfi->numchips)
1399                         break;
1400                 }
1401         }
1402
1403         return 0;
1404 }
1405
1406
1407 static int do_erase_oneblock(struct map_info *map, struct flchip *chip,
1408                              unsigned long adr, int len, void *thunk)
1409 {
1410         struct cfi_private *cfi = map->fldrv_priv;
1411         map_word status, status_OK;
1412         unsigned long timeo;
1413         int retries = 3;
1414         DECLARE_WAITQUEUE(wait, current);
1415         int ret = 0;
1416
1417         adr += chip->start;
1418
1419         /* Let's determine this according to the interleave only once */
1420         status_OK = CMD(0x80);
1421
1422  retry:
1423         spin_lock(chip->mutex);
1424         ret = get_chip(map, chip, adr, FL_ERASING);
1425         if (ret) {
1426                 spin_unlock(chip->mutex);
1427                 return ret;
1428         }
1429
1430         ENABLE_VPP(map);
1431         /* Clear the status register first */
1432         map_write(map, CMD(0x50), adr);
1433
1434         /* Now erase */
1435         map_write(map, CMD(0x20), adr);
1436         map_write(map, CMD(0xD0), adr);
1437         chip->state = FL_ERASING;
1438         chip->erase_suspended = 0;
1439
1440         spin_unlock(chip->mutex);
1441         INVALIDATE_CACHED_RANGE(map, adr, len);
1442         msleep(chip->erase_time / 2);
1443         spin_lock(chip->mutex);
1444
1445         /* FIXME. Use a timer to check this, and return immediately. */
1446         /* Once the state machine's known to be working I'll do that */
1447
1448         timeo = jiffies + (HZ*20);
1449         for (;;) {
1450                 if (chip->state != FL_ERASING) {
1451                         /* Someone's suspended the erase. Sleep */
1452                         set_current_state(TASK_UNINTERRUPTIBLE);
1453                         add_wait_queue(&chip->wq, &wait);
1454                         spin_unlock(chip->mutex);
1455                         schedule();
1456                         remove_wait_queue(&chip->wq, &wait);
1457                         spin_lock(chip->mutex);
1458                         continue;
1459                 }
1460                 if (chip->erase_suspended) {
1461                         /* This erase was suspended and resumed.
1462                            Adjust the timeout */
1463                         timeo = jiffies + (HZ*20); /* FIXME */
1464                         chip->erase_suspended = 0;
1465                 }
1466
1467                 status = map_read(map, adr);
1468                 if (map_word_andequal(map, status, status_OK, status_OK))
1469                         break;
1470                 
1471                 /* OK Still waiting */
1472                 if (time_after(jiffies, timeo)) {
1473                         map_write(map, CMD(0x70), adr);
1474                         chip->state = FL_STATUS;
1475                         printk(KERN_ERR "waiting for erase at %08lx to complete timed out. Xstatus = %lx, status = %lx.\n",
1476                                adr, status.x[0], map_read(map, adr).x[0]);
1477                         /* Clear status bits */
1478                         map_write(map, CMD(0x50), adr);
1479                         map_write(map, CMD(0x70), adr);
1480                         DISABLE_VPP(map);
1481                         spin_unlock(chip->mutex);
1482                         return -EIO;
1483                 }
1484                 
1485                 /* Latency issues. Drop the lock, wait a while and retry */
1486                 spin_unlock(chip->mutex);
1487                 set_current_state(TASK_UNINTERRUPTIBLE);
1488                 schedule_timeout(1);
1489                 spin_lock(chip->mutex);
1490         }
1491         
1492         DISABLE_VPP(map);
1493         ret = 0;
1494
1495         /* We've broken this before. It doesn't hurt to be safe */
1496         map_write(map, CMD(0x70), adr);
1497         chip->state = FL_STATUS;
1498         status = map_read(map, adr);
1499
1500         /* check for lock bit */
1501         if (map_word_bitsset(map, status, CMD(0x3a))) {
1502                 unsigned char chipstatus = status.x[0];
1503                 if (!map_word_equal(map, status, CMD(chipstatus))) {
1504                         int i, w;
1505                         for (w=0; w<map_words(map); w++) {
1506                                 for (i = 0; i<cfi_interleave(cfi); i++) {
1507                                         chipstatus |= status.x[w] >> (cfi->device_type * 8);
1508                                 }
1509                         }
1510                         printk(KERN_WARNING "Status is not identical for all chips: 0x%lx. Merging to give 0x%02x\n",
1511                                status.x[0], chipstatus);
1512                 }
1513                 /* Reset the error bits */
1514                 map_write(map, CMD(0x50), adr);
1515                 map_write(map, CMD(0x70), adr);
1516                 
1517                 if ((chipstatus & 0x30) == 0x30) {
1518                         printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus);
1519                         ret = -EIO;
1520                 } else if (chipstatus & 0x02) {
1521                         /* Protection bit set */
1522                         ret = -EROFS;
1523                 } else if (chipstatus & 0x8) {
1524                         /* Voltage */
1525                         printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", chipstatus);
1526                         ret = -EIO;
1527                 } else if (chipstatus & 0x20) {
1528                         if (retries--) {
1529                                 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
1530                                 timeo = jiffies + HZ;
1531                                 chip->state = FL_STATUS;
1532                                 spin_unlock(chip->mutex);
1533                                 goto retry;
1534                         }
1535                         printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
1536                         ret = -EIO;
1537                 }
1538         }
1539
1540         wake_up(&chip->wq);
1541         spin_unlock(chip->mutex);
1542         return ret;
1543 }
1544
1545 int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1546 {
1547         unsigned long ofs, len;
1548         int ret;
1549
1550         ofs = instr->addr;
1551         len = instr->len;
1552
1553         ret = cfi_intelext_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1554         if (ret)
1555                 return ret;
1556
1557         instr->state = MTD_ERASE_DONE;
1558         mtd_erase_callback(instr);
1559         
1560         return 0;
1561 }
1562
1563 static void cfi_intelext_sync (struct mtd_info *mtd)
1564 {
1565         struct map_info *map = mtd->priv;
1566         struct cfi_private *cfi = map->fldrv_priv;
1567         int i;
1568         struct flchip *chip;
1569         int ret = 0;
1570
1571         for (i=0; !ret && i<cfi->numchips; i++) {
1572                 chip = &cfi->chips[i];
1573
1574                 spin_lock(chip->mutex);
1575                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1576
1577                 if (!ret) {
1578                         chip->oldstate = chip->state;
1579                         chip->state = FL_SYNCING;
1580                         /* No need to wake_up() on this state change - 
1581                          * as the whole point is that nobody can do anything
1582                          * with the chip now anyway.
1583                          */
1584                 }
1585                 spin_unlock(chip->mutex);
1586         }
1587
1588         /* Unlock the chips again */
1589
1590         for (i--; i >=0; i--) {
1591                 chip = &cfi->chips[i];
1592
1593                 spin_lock(chip->mutex);
1594                 
1595                 if (chip->state == FL_SYNCING) {
1596                         chip->state = chip->oldstate;
1597                         wake_up(&chip->wq);
1598                 }
1599                 spin_unlock(chip->mutex);
1600         }
1601 }
1602
1603 #ifdef DEBUG_LOCK_BITS
1604 static int do_printlockstatus_oneblock(struct map_info *map, struct flchip *chip,
1605                                        unsigned long adr, int len, void *thunk)
1606 {
1607         struct cfi_private *cfi = map->fldrv_priv;
1608         int ofs_factor = cfi->interleave * cfi->device_type;
1609
1610         cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1611         printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1612                adr, cfi_read_query(map, adr+(2*ofs_factor)));
1613         chip->state = FL_JEDEC_QUERY;
1614         return 0;
1615 }
1616 #endif
1617
1618 #define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
1619 #define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
1620
1621 static int do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1622                               unsigned long adr, int len, void *thunk)
1623 {
1624         struct cfi_private *cfi = map->fldrv_priv;
1625         map_word status, status_OK;
1626         unsigned long timeo = jiffies + HZ;
1627         int ret;
1628
1629         adr += chip->start;
1630
1631         /* Let's determine this according to the interleave only once */
1632         status_OK = CMD(0x80);
1633
1634         spin_lock(chip->mutex);
1635         ret = get_chip(map, chip, adr, FL_LOCKING);
1636         if (ret) {
1637                 spin_unlock(chip->mutex);
1638                 return ret;
1639         }
1640
1641         ENABLE_VPP(map);
1642         map_write(map, CMD(0x60), adr);
1643
1644         if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1645                 map_write(map, CMD(0x01), adr);
1646                 chip->state = FL_LOCKING;
1647         } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1648                 map_write(map, CMD(0xD0), adr);
1649                 chip->state = FL_UNLOCKING;
1650         } else
1651                 BUG();
1652
1653         spin_unlock(chip->mutex);
1654         schedule_timeout(HZ);
1655         spin_lock(chip->mutex);
1656
1657         /* FIXME. Use a timer to check this, and return immediately. */
1658         /* Once the state machine's known to be working I'll do that */
1659
1660         timeo = jiffies + (HZ*20);
1661         for (;;) {
1662
1663                 status = map_read(map, adr);
1664                 if (map_word_andequal(map, status, status_OK, status_OK))
1665                         break;
1666                 
1667                 /* OK Still waiting */
1668                 if (time_after(jiffies, timeo)) {
1669                         map_write(map, CMD(0x70), adr);
1670                         chip->state = FL_STATUS;
1671                         printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n",
1672                                status.x[0], map_read(map, adr).x[0]);
1673                         DISABLE_VPP(map);
1674                         spin_unlock(chip->mutex);
1675                         return -EIO;
1676                 }
1677                 
1678                 /* Latency issues. Drop the lock, wait a while and retry */
1679                 spin_unlock(chip->mutex);
1680                 cfi_udelay(1);
1681                 spin_lock(chip->mutex);
1682         }
1683         
1684         /* Done and happy. */
1685         chip->state = FL_STATUS;
1686         put_chip(map, chip, adr);
1687         spin_unlock(chip->mutex);
1688         return 0;
1689 }
1690
1691 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1692 {
1693         int ret;
1694
1695 #ifdef DEBUG_LOCK_BITS
1696         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1697                __FUNCTION__, ofs, len);
1698         cfi_intelext_varsize_frob(mtd, do_printlockstatus_oneblock,
1699                                   ofs, len, 0);
1700 #endif
1701
1702         ret = cfi_intelext_varsize_frob(mtd, do_xxlock_oneblock, 
1703                                         ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
1704         
1705 #ifdef DEBUG_LOCK_BITS
1706         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1707                __FUNCTION__, ret);
1708         cfi_intelext_varsize_frob(mtd, do_printlockstatus_oneblock,
1709                                   ofs, len, 0);
1710 #endif
1711
1712         return ret;
1713 }
1714
1715 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1716 {
1717         int ret;
1718
1719 #ifdef DEBUG_LOCK_BITS
1720         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1721                __FUNCTION__, ofs, len);
1722         cfi_intelext_varsize_frob(mtd, do_printlockstatus_oneblock,
1723                                   ofs, len, 0);
1724 #endif
1725
1726         ret = cfi_intelext_varsize_frob(mtd, do_xxlock_oneblock,
1727                                         ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
1728         
1729 #ifdef DEBUG_LOCK_BITS
1730         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1731                __FUNCTION__, ret);
1732         cfi_intelext_varsize_frob(mtd, do_printlockstatus_oneblock, 
1733                                   ofs, len, 0);
1734 #endif
1735         
1736         return ret;
1737 }
1738
1739 static int cfi_intelext_suspend(struct mtd_info *mtd)
1740 {
1741         struct map_info *map = mtd->priv;
1742         struct cfi_private *cfi = map->fldrv_priv;
1743         int i;
1744         struct flchip *chip;
1745         int ret = 0;
1746
1747         for (i=0; !ret && i<cfi->numchips; i++) {
1748                 chip = &cfi->chips[i];
1749
1750                 spin_lock(chip->mutex);
1751
1752                 switch (chip->state) {
1753                 case FL_READY:
1754                 case FL_STATUS:
1755                 case FL_CFI_QUERY:
1756                 case FL_JEDEC_QUERY:
1757                         if (chip->oldstate == FL_READY) {
1758                                 chip->oldstate = chip->state;
1759                                 chip->state = FL_PM_SUSPENDED;
1760                                 /* No need to wake_up() on this state change - 
1761                                  * as the whole point is that nobody can do anything
1762                                  * with the chip now anyway.
1763                                  */
1764                         }
1765                         break;
1766                 default:
1767                         ret = -EAGAIN;
1768                 case FL_PM_SUSPENDED:
1769                         break;
1770                 }
1771                 spin_unlock(chip->mutex);
1772         }
1773
1774         /* Unlock the chips again */
1775
1776         if (ret) {
1777                 for (i--; i >=0; i--) {
1778                         chip = &cfi->chips[i];
1779                         
1780                         spin_lock(chip->mutex);
1781                         
1782                         if (chip->state == FL_PM_SUSPENDED) {
1783                                 /* No need to force it into a known state here,
1784                                    because we're returning failure, and it didn't
1785                                    get power cycled */
1786                                 chip->state = chip->oldstate;
1787                                 wake_up(&chip->wq);
1788                         }
1789                         spin_unlock(chip->mutex);
1790                 }
1791         } 
1792         
1793         return ret;
1794 }
1795
1796 static void cfi_intelext_resume(struct mtd_info *mtd)
1797 {
1798         struct map_info *map = mtd->priv;
1799         struct cfi_private *cfi = map->fldrv_priv;
1800         int i;
1801         struct flchip *chip;
1802
1803         for (i=0; i<cfi->numchips; i++) {
1804         
1805                 chip = &cfi->chips[i];
1806
1807                 spin_lock(chip->mutex);
1808                 
1809                 /* Go to known state. Chip may have been power cycled */
1810                 if (chip->state == FL_PM_SUSPENDED) {
1811                         map_write(map, CMD(0xFF), cfi->chips[i].start);
1812                         chip->state = FL_READY;
1813                         wake_up(&chip->wq);
1814                 }
1815
1816                 spin_unlock(chip->mutex);
1817         }
1818 }
1819
1820 static void cfi_intelext_destroy(struct mtd_info *mtd)
1821 {
1822         struct map_info *map = mtd->priv;
1823         struct cfi_private *cfi = map->fldrv_priv;
1824         kfree(cfi->cmdset_priv);
1825         kfree(cfi->cfiq);
1826         kfree(cfi->chips[0].priv);
1827         kfree(cfi);
1828         kfree(mtd->eraseregions);
1829 }
1830
1831 static char im_name_1[]="cfi_cmdset_0001";
1832 static char im_name_3[]="cfi_cmdset_0003";
1833
1834 int __init cfi_intelext_init(void)
1835 {
1836         inter_module_register(im_name_1, THIS_MODULE, &cfi_cmdset_0001);
1837         inter_module_register(im_name_3, THIS_MODULE, &cfi_cmdset_0001);
1838         return 0;
1839 }
1840
1841 static void __exit cfi_intelext_exit(void)
1842 {
1843         inter_module_unregister(im_name_1);
1844         inter_module_unregister(im_name_3);
1845 }
1846
1847 module_init(cfi_intelext_init);
1848 module_exit(cfi_intelext_exit);
1849
1850 MODULE_LICENSE("GPL");
1851 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
1852 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");