VServer 1.9.2 (patch-2.6.8.1-vs1.9.2.diff)
[linux-2.6.git] / drivers / mtd / chips / cfi_cmdset_0001.c
1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  * $Id: cfi_cmdset_0001.c,v 1.154 2004/08/09 13:19:43 dwmw2 Exp $
8  *
9  * 
10  * 10/10/2000   Nicolas Pitre <nico@cam.org>
11  *      - completely revamped method functions so they are aware and
12  *        independent of the flash geometry (buswidth, interleave, etc.)
13  *      - scalability vs code size is completely set at compile-time
14  *        (see include/linux/mtd/cfi.h for selection)
15  *      - optimized write buffer method
16  * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17  *      - reworked lock/unlock/erase support for var size flash
18  */
19
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
25 #include <asm/io.h>
26 #include <asm/byteorder.h>
27
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/mtd/map.h>
33 #include <linux/mtd/mtd.h>
34 #include <linux/mtd/compatmac.h>
35 #include <linux/mtd/cfi.h>
36
37 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
38
39 // debugging, turns off buffer write mode if set to 1
40 #define FORCE_WORD_WRITE 0
41
42 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
43 //static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
44 //static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
45 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
46 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
47 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
48 static void cfi_intelext_sync (struct mtd_info *);
49 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
50 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
51 static int cfi_intelext_suspend (struct mtd_info *);
52 static void cfi_intelext_resume (struct mtd_info *);
53
54 static void cfi_intelext_destroy(struct mtd_info *);
55
56 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
57
58 static struct mtd_info *cfi_intelext_setup (struct map_info *);
59 static int cfi_intelext_partition_fixup(struct map_info *, struct cfi_private **);
60
61 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
62                      size_t *retlen, u_char **mtdbuf);
63 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
64                         size_t len);
65
66
67 /*
68  *  *********** SETUP AND PROBE BITS  ***********
69  */
70
71 static struct mtd_chip_driver cfi_intelext_chipdrv = {
72         .probe          = NULL, /* Not usable directly */
73         .destroy        = cfi_intelext_destroy,
74         .name           = "cfi_cmdset_0001",
75         .module         = THIS_MODULE
76 };
77
78 /* #define DEBUG_LOCK_BITS */
79 /* #define DEBUG_CFI_FEATURES */
80
81 #ifdef DEBUG_CFI_FEATURES
82 static void cfi_tell_features(struct cfi_pri_intelext *extp)
83 {
84         int i;
85         printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
86         printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
87         printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
88         printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
89         printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
90         printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
91         printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
92         printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
93         printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
94         printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
95         printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
96         for (i=10; i<32; i++) {
97                 if (extp->FeatureSupport & (1<<i)) 
98                         printk("     - Unknown Bit %X:      supported\n", i);
99         }
100         
101         printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
102         printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
103         for (i=1; i<8; i++) {
104                 if (extp->SuspendCmdSupport & (1<<i))
105                         printk("     - Unknown Bit %X:               supported\n", i);
106         }
107         
108         printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
109         printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
110         printk("     - Valid Bit Active:     %s\n", extp->BlkStatusRegMask&2?"yes":"no");
111         for (i=2; i<16; i++) {
112                 if (extp->BlkStatusRegMask & (1<<i))
113                         printk("     - Unknown Bit %X Active: yes\n",i);
114         }
115         
116         printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n", 
117                extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
118         if (extp->VppOptimal)
119                 printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n", 
120                        extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
121 }
122 #endif
123
124 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
125 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */ 
126 static void fixup_intel_strataflash(struct map_info *map, void* param)
127 {
128         struct cfi_private *cfi = map->fldrv_priv;
129         struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
130
131         printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
132                             "erase on write disabled.\n");
133         extp->SuspendCmdSupport &= ~1;
134 }
135 #endif
136
137 static void fixup_st_m28w320ct(struct map_info *map, void* param)
138 {
139         struct cfi_private *cfi = map->fldrv_priv;
140         
141         cfi->cfiq->BufWriteTimeoutTyp = 0;      /* Not supported */
142         cfi->cfiq->BufWriteTimeoutMax = 0;      /* Not supported */
143 }
144
145 static void fixup_st_m28w320cb(struct map_info *map, void* param)
146 {
147         struct cfi_private *cfi = map->fldrv_priv;
148         
149         /* Note this is done after the region info is endian swapped */
150         cfi->cfiq->EraseRegionInfo[1] =
151                 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
152 };
153
154 static struct cfi_fixup fixup_table[] = {
155 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
156         {
157                 CFI_MFR_ANY, CFI_ID_ANY,
158                 fixup_intel_strataflash, NULL
159         }, 
160 #endif
161         {
162                 0x0020, /* STMicroelectronics */
163                 0x00ba, /* M28W320CT */
164                 fixup_st_m28w320ct, NULL
165         }, {
166                 0x0020, /* STMicroelectronics */
167                 0x00bb, /* M28W320CB */
168                 fixup_st_m28w320cb, NULL
169         }, {
170                 0, 0, NULL, NULL
171         }
172 };
173
174 /* This routine is made available to other mtd code via
175  * inter_module_register.  It must only be accessed through
176  * inter_module_get which will bump the use count of this module.  The
177  * addresses passed back in cfi are valid as long as the use count of
178  * this module is non-zero, i.e. between inter_module_get and
179  * inter_module_put.  Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
180  */
181 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
182 {
183         struct cfi_private *cfi = map->fldrv_priv;
184         int i;
185
186         if (cfi->cfi_mode == CFI_MODE_CFI) {
187                 /* 
188                  * It's a real CFI chip, not one for which the probe
189                  * routine faked a CFI structure. So we read the feature
190                  * table from it.
191                  */
192                 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
193                 struct cfi_pri_intelext *extp;
194
195                 extp = (struct cfi_pri_intelext*)cfi_read_pri(map, adr, sizeof(*extp), "Intel/Sharp");
196                 if (!extp)
197                         return NULL;
198                 
199                 /* Do some byteswapping if necessary */
200                 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
201                 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
202                 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
203
204                 /* Install our own private info structure */
205                 cfi->cmdset_priv = extp;        
206
207                 cfi_fixup(map, fixup_table);
208                         
209 #ifdef DEBUG_CFI_FEATURES
210                 /* Tell the user about it in lots of lovely detail */
211                 cfi_tell_features(extp);
212 #endif  
213
214                 if(extp->SuspendCmdSupport & 1) {
215                         printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
216                 }
217         }
218
219         for (i=0; i< cfi->numchips; i++) {
220                 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
221                 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
222                 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
223                 cfi->chips[i].ref_point_counter = 0;
224         }               
225
226         map->fldrv = &cfi_intelext_chipdrv;
227         
228         return cfi_intelext_setup(map);
229 }
230
231 static struct mtd_info *cfi_intelext_setup(struct map_info *map)
232 {
233         struct cfi_private *cfi = map->fldrv_priv;
234         struct mtd_info *mtd;
235         unsigned long offset = 0;
236         int i,j;
237         unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
238
239         mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
240         //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
241
242         if (!mtd) {
243                 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
244                 goto setup_err;
245         }
246
247         memset(mtd, 0, sizeof(*mtd));
248         mtd->priv = map;
249         mtd->type = MTD_NORFLASH;
250         mtd->size = devsize * cfi->numchips;
251
252         mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
253         mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) 
254                         * mtd->numeraseregions, GFP_KERNEL);
255         if (!mtd->eraseregions) { 
256                 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
257                 goto setup_err;
258         }
259         
260         for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
261                 unsigned long ernum, ersize;
262                 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
263                 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
264
265                 if (mtd->erasesize < ersize) {
266                         mtd->erasesize = ersize;
267                 }
268                 for (j=0; j<cfi->numchips; j++) {
269                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
270                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
271                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
272                 }
273                 offset += (ersize * ernum);
274         }
275
276         if (offset != devsize) {
277                 /* Argh */
278                 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
279                 goto setup_err;
280         }
281
282         for (i=0; i<mtd->numeraseregions;i++){
283                 printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
284                        i,mtd->eraseregions[i].offset,
285                        mtd->eraseregions[i].erasesize,
286                        mtd->eraseregions[i].numblocks);
287         }
288
289         /* Also select the correct geometry setup too */ 
290         mtd->erase = cfi_intelext_erase_varsize;
291         mtd->read = cfi_intelext_read;
292
293         if (map_is_linear(map)) {
294                 mtd->point = cfi_intelext_point;
295                 mtd->unpoint = cfi_intelext_unpoint;
296         }
297
298         if ( cfi->cfiq->BufWriteTimeoutTyp && !FORCE_WORD_WRITE) {
299                 printk(KERN_INFO "Using buffer write method\n" );
300                 mtd->write = cfi_intelext_write_buffers;
301         } else {
302                 printk(KERN_INFO "Using word write method\n" );
303                 mtd->write = cfi_intelext_write_words;
304         }
305 #if 0
306         mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
307         mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
308 #endif
309         mtd->sync = cfi_intelext_sync;
310         mtd->lock = cfi_intelext_lock;
311         mtd->unlock = cfi_intelext_unlock;
312         mtd->suspend = cfi_intelext_suspend;
313         mtd->resume = cfi_intelext_resume;
314         mtd->flags = MTD_CAP_NORFLASH;
315         map->fldrv = &cfi_intelext_chipdrv;
316         mtd->name = map->name;
317
318         /* This function has the potential to distort the reality
319            a bit and therefore should be called last. */
320         if (cfi_intelext_partition_fixup(map, &cfi) != 0)
321                 goto setup_err;
322
323         __module_get(THIS_MODULE);
324         return mtd;
325
326  setup_err:
327         if(mtd) {
328                 if(mtd->eraseregions)
329                         kfree(mtd->eraseregions);
330                 kfree(mtd);
331         }
332         kfree(cfi->cmdset_priv);
333         return NULL;
334 }
335
336 static int cfi_intelext_partition_fixup(struct map_info *map,
337                                         struct cfi_private **pcfi)
338 {
339         struct cfi_private *cfi = *pcfi;
340         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
341
342         /*
343          * Probing of multi-partition flash ships.
344          *
345          * This is extremely crude at the moment and should probably be
346          * extracted entirely from the Intel extended query data instead.
347          * Right now a L18 flash is assumed if multiple operations is
348          * detected.
349          *
350          * To support multiple partitions when available, we simply arrange
351          * for each of them to have their own flchip structure even if they
352          * are on the same physical chip.  This means completely recreating
353          * a new cfi_private structure right here which is a blatent code
354          * layering violation, but this is still the least intrusive
355          * arrangement at this point. This can be rearranged in the future
356          * if someone feels motivated enough.  --nico
357          */
358         if (extp && extp->FeatureSupport & (1 << 9)) {
359                 struct cfi_private *newcfi;
360                 struct flchip *chip;
361                 struct flchip_shared *shared;
362                 int numparts, partshift, numvirtchips, i, j;
363
364                 /*
365                  * The L18 flash memory array is divided
366                  * into multiple 8-Mbit partitions.
367                  */
368                 numparts = 1 << (cfi->cfiq->DevSize - 20);
369                 partshift = 20 + __ffs(cfi->interleave);
370                 numvirtchips = cfi->numchips * numparts;
371
372                 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
373                 if (!newcfi)
374                         return -ENOMEM;
375                 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
376                 if (!shared) {
377                         kfree(newcfi);
378                         return -ENOMEM;
379                 }
380                 memcpy(newcfi, cfi, sizeof(struct cfi_private));
381                 newcfi->numchips = numvirtchips;
382                 newcfi->chipshift = partshift;
383
384                 chip = &newcfi->chips[0];
385                 for (i = 0; i < cfi->numchips; i++) {
386                         shared[i].writing = shared[i].erasing = NULL;
387                         spin_lock_init(&shared[i].lock);
388                         for (j = 0; j < numparts; j++) {
389                                 *chip = cfi->chips[i];
390                                 chip->start += j << partshift;
391                                 chip->priv = &shared[i];
392                                 /* those should be reset too since
393                                    they create memory references. */
394                                 init_waitqueue_head(&chip->wq);
395                                 spin_lock_init(&chip->_spinlock);
396                                 chip->mutex = &chip->_spinlock;
397                                 chip++;
398                         }
399                 }
400
401                 printk(KERN_DEBUG "%s: %d sets of %d interleaved chips "
402                                   "--> %d partitions of %#x bytes\n",
403                                   map->name, cfi->numchips, cfi->interleave,
404                                   newcfi->numchips, 1<<newcfi->chipshift);
405
406                 map->fldrv_priv = newcfi;
407                 *pcfi = newcfi;
408                 kfree(cfi);
409         }
410
411         return 0;
412 }
413
414 /*
415  *  *********** CHIP ACCESS FUNCTIONS ***********
416  */
417
418 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
419 {
420         DECLARE_WAITQUEUE(wait, current);
421         struct cfi_private *cfi = map->fldrv_priv;
422         map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
423         unsigned long timeo;
424         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
425
426  resettime:
427         timeo = jiffies + HZ;
428  retry:
429         if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING)) {
430                 /*
431                  * OK. We have possibility for contension on the write/erase
432                  * operations which are global to the real chip and not per
433                  * partition.  So let's fight it over in the partition which
434                  * currently has authority on the operation.
435                  *
436                  * The rules are as follows:
437                  *
438                  * - any write operation must own shared->writing.
439                  *
440                  * - any erase operation must own _both_ shared->writing and
441                  *   shared->erasing.
442                  *
443                  * - contension arbitration is handled in the owner's context.
444                  *
445                  * The 'shared' struct can be read when its lock is taken.
446                  * However any writes to it can only be made when the current
447                  * owner's lock is also held.
448                  */
449                 struct flchip_shared *shared = chip->priv;
450                 struct flchip *contender;
451                 spin_lock(&shared->lock);
452                 contender = shared->writing;
453                 if (contender && contender != chip) {
454                         /*
455                          * The engine to perform desired operation on this
456                          * partition is already in use by someone else.
457                          * Let's fight over it in the context of the chip
458                          * currently using it.  If it is possible to suspend,
459                          * that other partition will do just that, otherwise
460                          * it'll happily send us to sleep.  In any case, when
461                          * get_chip returns success we're clear to go ahead.
462                          */
463                         int ret = spin_trylock(contender->mutex);
464                         spin_unlock(&shared->lock);
465                         if (!ret)
466                                 goto retry;
467                         spin_unlock(chip->mutex);
468                         ret = get_chip(map, contender, contender->start, mode);
469                         spin_lock(chip->mutex);
470                         if (ret) {
471                                 spin_unlock(contender->mutex);
472                                 return ret;
473                         }
474                         timeo = jiffies + HZ;
475                         spin_lock(&shared->lock);
476                 }
477
478                 /* We now own it */
479                 shared->writing = chip;
480                 if (mode == FL_ERASING)
481                         shared->erasing = chip;
482                 if (contender && contender != chip)
483                         spin_unlock(contender->mutex);
484                 spin_unlock(&shared->lock);
485         }
486
487         switch (chip->state) {
488
489         case FL_STATUS:
490                 for (;;) {
491                         status = map_read(map, adr);
492                         if (map_word_andequal(map, status, status_OK, status_OK))
493                                 break;
494
495                         /* At this point we're fine with write operations
496                            in other partitions as they don't conflict. */
497                         if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
498                                 break;
499
500                         if (time_after(jiffies, timeo)) {
501                                 printk(KERN_ERR "Waiting for chip to be ready timed out. Status %lx\n", 
502                                        status.x[0]);
503                                 return -EIO;
504                         }
505                         spin_unlock(chip->mutex);
506                         cfi_udelay(1);
507                         spin_lock(chip->mutex);
508                         /* Someone else might have been playing with it. */
509                         goto retry;
510                 }
511                                 
512         case FL_READY:
513         case FL_CFI_QUERY:
514         case FL_JEDEC_QUERY:
515                 return 0;
516
517         case FL_ERASING:
518                 if (!(cfip->FeatureSupport & 2) ||
519                     !(mode == FL_READY || mode == FL_POINT ||
520                      (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
521                         goto sleep;
522
523
524                 /* Erase suspend */
525                 map_write(map, CMD(0xB0), adr);
526
527                 /* If the flash has finished erasing, then 'erase suspend'
528                  * appears to make some (28F320) flash devices switch to
529                  * 'read' mode.  Make sure that we switch to 'read status'
530                  * mode so we get the right data. --rmk
531                  */
532                 map_write(map, CMD(0x70), adr);
533                 chip->oldstate = FL_ERASING;
534                 chip->state = FL_ERASE_SUSPENDING;
535                 chip->erase_suspended = 1;
536                 for (;;) {
537                         status = map_read(map, adr);
538                         if (map_word_andequal(map, status, status_OK, status_OK))
539                                 break;
540
541                         if (time_after(jiffies, timeo)) {
542                                 /* Urgh. Resume and pretend we weren't here.  */
543                                 map_write(map, CMD(0xd0), adr);
544                                 /* Make sure we're in 'read status' mode if it had finished */
545                                 map_write(map, CMD(0x70), adr);
546                                 chip->state = FL_ERASING;
547                                 chip->oldstate = FL_READY;
548                                 printk(KERN_ERR "Chip not ready after erase "
549                                        "suspended: status = 0x%lx\n", status.x[0]);
550                                 return -EIO;
551                         }
552
553                         spin_unlock(chip->mutex);
554                         cfi_udelay(1);
555                         spin_lock(chip->mutex);
556                         /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
557                            So we can just loop here. */
558                 }
559                 chip->state = FL_STATUS;
560                 return 0;
561
562         case FL_POINT:
563                 /* Only if there's no operation suspended... */
564                 if (mode == FL_READY && chip->oldstate == FL_READY)
565                         return 0;
566
567         default:
568         sleep:
569                 set_current_state(TASK_UNINTERRUPTIBLE);
570                 add_wait_queue(&chip->wq, &wait);
571                 spin_unlock(chip->mutex);
572                 schedule();
573                 remove_wait_queue(&chip->wq, &wait);
574                 spin_lock(chip->mutex);
575                 goto resettime;
576         }
577 }
578
579 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
580 {
581         struct cfi_private *cfi = map->fldrv_priv;
582
583         if (chip->priv) {
584                 struct flchip_shared *shared = chip->priv;
585                 spin_lock(&shared->lock);
586                 if (shared->writing == chip) {
587                         /* We own the ability to write, but we're done */
588                         shared->writing = shared->erasing;
589                         if (shared->writing && shared->writing != chip) {
590                                 /* give back ownership to who we loaned it from */
591                                 struct flchip *loaner = shared->writing;
592                                 spin_lock(loaner->mutex);
593                                 spin_unlock(&shared->lock);
594                                 spin_unlock(chip->mutex);
595                                 put_chip(map, loaner, loaner->start);
596                                 spin_lock(chip->mutex);
597                                 spin_unlock(loaner->mutex);
598                         } else {
599                                 if (chip->oldstate != FL_ERASING) {
600                                         shared->erasing = NULL;
601                                         if (chip->oldstate != FL_WRITING)
602                                                 shared->writing = NULL;
603                                 }
604                                 spin_unlock(&shared->lock);
605                         }
606                 }
607         }
608
609         switch(chip->oldstate) {
610         case FL_ERASING:
611                 chip->state = chip->oldstate;
612                 /* What if one interleaved chip has finished and the 
613                    other hasn't? The old code would leave the finished
614                    one in READY mode. That's bad, and caused -EROFS 
615                    errors to be returned from do_erase_oneblock because
616                    that's the only bit it checked for at the time.
617                    As the state machine appears to explicitly allow 
618                    sending the 0x70 (Read Status) command to an erasing
619                    chip and expecting it to be ignored, that's what we 
620                    do. */
621                 map_write(map, CMD(0xd0), adr);
622                 map_write(map, CMD(0x70), adr);
623                 chip->oldstate = FL_READY;
624                 chip->state = FL_ERASING;
625                 break;
626
627         case FL_READY:
628         case FL_STATUS:
629         case FL_JEDEC_QUERY:
630                 /* We should really make set_vpp() count, rather than doing this */
631                 DISABLE_VPP(map);
632                 break;
633         default:
634                 printk(KERN_ERR "put_chip() called with oldstate %d!!\n", chip->oldstate);
635         }
636         wake_up(&chip->wq);
637 }
638
639 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
640 {
641         unsigned long cmd_addr;
642         struct cfi_private *cfi = map->fldrv_priv;
643         int ret = 0;
644
645         adr += chip->start;
646
647         /* Ensure cmd read/writes are aligned. */ 
648         cmd_addr = adr & ~(map_bankwidth(map)-1); 
649
650         spin_lock(chip->mutex);
651
652         ret = get_chip(map, chip, cmd_addr, FL_POINT);
653
654         if (!ret) {
655                 if (chip->state != FL_POINT && chip->state != FL_READY)
656                         map_write(map, CMD(0xff), cmd_addr);
657
658                 chip->state = FL_POINT;
659                 chip->ref_point_counter++;
660         }
661         spin_unlock(chip->mutex);
662
663         return ret;
664 }
665
666 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
667 {
668         struct map_info *map = mtd->priv;
669         struct cfi_private *cfi = map->fldrv_priv;
670         unsigned long ofs;
671         int chipnum;
672         int ret = 0;
673
674         if (!map->virt || (from + len > mtd->size))
675                 return -EINVAL;
676         
677         *mtdbuf = (void *)map->virt + from;
678         *retlen = 0;
679
680         /* Now lock the chip(s) to POINT state */
681
682         /* ofs: offset within the first chip that the first read should start */
683         chipnum = (from >> cfi->chipshift);
684         ofs = from - (chipnum << cfi->chipshift);
685
686         while (len) {
687                 unsigned long thislen;
688
689                 if (chipnum >= cfi->numchips)
690                         break;
691
692                 if ((len + ofs -1) >> cfi->chipshift)
693                         thislen = (1<<cfi->chipshift) - ofs;
694                 else
695                         thislen = len;
696
697                 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
698                 if (ret)
699                         break;
700
701                 *retlen += thislen;
702                 len -= thislen;
703                 
704                 ofs = 0;
705                 chipnum++;
706         }
707         return 0;
708 }
709
710 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
711 {
712         struct map_info *map = mtd->priv;
713         struct cfi_private *cfi = map->fldrv_priv;
714         unsigned long ofs;
715         int chipnum;
716
717         /* Now unlock the chip(s) POINT state */
718
719         /* ofs: offset within the first chip that the first read should start */
720         chipnum = (from >> cfi->chipshift);
721         ofs = from - (chipnum <<  cfi->chipshift);
722
723         while (len) {
724                 unsigned long thislen;
725                 struct flchip *chip;
726
727                 chip = &cfi->chips[chipnum];
728                 if (chipnum >= cfi->numchips)
729                         break;
730
731                 if ((len + ofs -1) >> cfi->chipshift)
732                         thislen = (1<<cfi->chipshift) - ofs;
733                 else
734                         thislen = len;
735
736                 spin_lock(chip->mutex);
737                 if (chip->state == FL_POINT) {
738                         chip->ref_point_counter--;
739                         if(chip->ref_point_counter == 0)
740                                 chip->state = FL_READY;
741                 } else
742                         printk(KERN_ERR "Warning: unpoint called on non pointed region\n"); /* Should this give an error? */
743
744                 put_chip(map, chip, chip->start);
745                 spin_unlock(chip->mutex);
746
747                 len -= thislen;
748                 ofs = 0;
749                 chipnum++;
750         }
751 }
752
753 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
754 {
755         unsigned long cmd_addr;
756         struct cfi_private *cfi = map->fldrv_priv;
757         int ret;
758
759         adr += chip->start;
760
761         /* Ensure cmd read/writes are aligned. */ 
762         cmd_addr = adr & ~(map_bankwidth(map)-1); 
763
764         spin_lock(chip->mutex);
765         ret = get_chip(map, chip, cmd_addr, FL_READY);
766         if (ret) {
767                 spin_unlock(chip->mutex);
768                 return ret;
769         }
770
771         if (chip->state != FL_POINT && chip->state != FL_READY) {
772                 map_write(map, CMD(0xff), cmd_addr);
773
774                 chip->state = FL_READY;
775         }
776
777         map_copy_from(map, buf, adr, len);
778
779         put_chip(map, chip, cmd_addr);
780
781         spin_unlock(chip->mutex);
782         return 0;
783 }
784
785 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
786 {
787         struct map_info *map = mtd->priv;
788         struct cfi_private *cfi = map->fldrv_priv;
789         unsigned long ofs;
790         int chipnum;
791         int ret = 0;
792
793         /* ofs: offset within the first chip that the first read should start */
794         chipnum = (from >> cfi->chipshift);
795         ofs = from - (chipnum <<  cfi->chipshift);
796
797         *retlen = 0;
798
799         while (len) {
800                 unsigned long thislen;
801
802                 if (chipnum >= cfi->numchips)
803                         break;
804
805                 if ((len + ofs -1) >> cfi->chipshift)
806                         thislen = (1<<cfi->chipshift) - ofs;
807                 else
808                         thislen = len;
809
810                 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
811                 if (ret)
812                         break;
813
814                 *retlen += thislen;
815                 len -= thislen;
816                 buf += thislen;
817                 
818                 ofs = 0;
819                 chipnum++;
820         }
821         return ret;
822 }
823 #if 0
824 static int cfi_intelext_read_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf, int base_offst, int reg_sz)
825 {
826         struct map_info *map = mtd->priv;
827         struct cfi_private *cfi = map->fldrv_priv;
828         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
829         struct flchip *chip;
830         int ofs_factor = cfi->interleave * cfi->device_type;
831         int count = len;
832         int chip_num, offst;
833         int ret;
834
835         chip_num = ((unsigned int)from/reg_sz);
836         offst = from - (reg_sz*chip_num)+base_offst;
837
838         while (count) {
839         /* Calculate which chip & protection register offset we need */
840
841                 if (chip_num >= cfi->numchips)
842                         goto out;
843
844                 chip = &cfi->chips[chip_num];
845                 
846                 spin_lock(chip->mutex);
847                 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
848                 if (ret) {
849                         spin_unlock(chip->mutex);
850                         return (len-count)?:ret;
851                 }
852
853                 if (chip->state != FL_JEDEC_QUERY) {
854                         map_write(map, CMD(0x90), chip->start);
855                         chip->state = FL_JEDEC_QUERY;
856                 }
857
858                 while (count && ((offst-base_offst) < reg_sz)) {
859                         *buf = map_read8(map,(chip->start+((extp->ProtRegAddr+1)*ofs_factor)+offst));
860                         buf++;
861                         offst++;
862                         count--;
863                 }
864
865                 put_chip(map, chip, chip->start);
866                 spin_unlock(chip->mutex);
867
868                 /* Move on to the next chip */
869                 chip_num++;
870                 offst = base_offst;
871         }
872         
873  out:   
874         return len-count;
875 }
876         
877 static int cfi_intelext_read_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
878 {
879         struct map_info *map = mtd->priv;
880         struct cfi_private *cfi = map->fldrv_priv;
881         struct cfi_pri_intelext *extp=cfi->cmdset_priv;
882         int base_offst,reg_sz;
883         
884         /* Check that we actually have some protection registers */
885         if(!(extp->FeatureSupport&64)){
886                 printk(KERN_WARNING "%s: This flash device has no protection data to read!\n",map->name);
887                 return 0;
888         }
889
890         base_offst=(1<<extp->FactProtRegSize);
891         reg_sz=(1<<extp->UserProtRegSize);
892
893         return cfi_intelext_read_prot_reg(mtd, from, len, retlen, buf, base_offst, reg_sz);
894 }
895
896 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
897 {
898         struct map_info *map = mtd->priv;
899         struct cfi_private *cfi = map->fldrv_priv;
900         struct cfi_pri_intelext *extp=cfi->cmdset_priv;
901         int base_offst,reg_sz;
902         
903         /* Check that we actually have some protection registers */
904         if(!(extp->FeatureSupport&64)){
905                 printk(KERN_WARNING "%s: This flash device has no protection data to read!\n",map->name);
906                 return 0;
907         }
908
909         base_offst=0;
910         reg_sz=(1<<extp->FactProtRegSize);
911
912         return cfi_intelext_read_prot_reg(mtd, from, len, retlen, buf, base_offst, reg_sz);
913 }
914 #endif
915
916 static int do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
917 {
918         struct cfi_private *cfi = map->fldrv_priv;
919         map_word status, status_OK;
920         unsigned long timeo;
921         int z, ret=0;
922
923         adr += chip->start;
924
925         /* Let's determine this according to the interleave only once */
926         status_OK = CMD(0x80);
927
928         spin_lock(chip->mutex);
929         ret = get_chip(map, chip, adr, FL_WRITING);
930         if (ret) {
931                 spin_unlock(chip->mutex);
932                 return ret;
933         }
934
935         ENABLE_VPP(map);
936         map_write(map, CMD(0x40), adr);
937         map_write(map, datum, adr);
938         chip->state = FL_WRITING;
939
940         spin_unlock(chip->mutex);
941         INVALIDATE_CACHED_RANGE(map, adr, map_bankwidth(map));
942         cfi_udelay(chip->word_write_time);
943         spin_lock(chip->mutex);
944
945         timeo = jiffies + (HZ/2);
946         z = 0;
947         for (;;) {
948                 if (chip->state != FL_WRITING) {
949                         /* Someone's suspended the write. Sleep */
950                         DECLARE_WAITQUEUE(wait, current);
951
952                         set_current_state(TASK_UNINTERRUPTIBLE);
953                         add_wait_queue(&chip->wq, &wait);
954                         spin_unlock(chip->mutex);
955                         schedule();
956                         remove_wait_queue(&chip->wq, &wait);
957                         timeo = jiffies + (HZ / 2); /* FIXME */
958                         spin_lock(chip->mutex);
959                         continue;
960                 }
961
962                 status = map_read(map, adr);
963                 if (map_word_andequal(map, status, status_OK, status_OK))
964                         break;
965                 
966                 /* OK Still waiting */
967                 if (time_after(jiffies, timeo)) {
968                         chip->state = FL_STATUS;
969                         printk(KERN_ERR "waiting for chip to be ready timed out in word write\n");
970                         ret = -EIO;
971                         goto out;
972                 }
973
974                 /* Latency issues. Drop the lock, wait a while and retry */
975                 spin_unlock(chip->mutex);
976                 z++;
977                 cfi_udelay(1);
978                 spin_lock(chip->mutex);
979         }
980         if (!z) {
981                 chip->word_write_time--;
982                 if (!chip->word_write_time)
983                         chip->word_write_time++;
984         }
985         if (z > 1) 
986                 chip->word_write_time++;
987
988         /* Done and happy. */
989         chip->state = FL_STATUS;
990         /* check for lock bit */
991         if (map_word_bitsset(map, status, CMD(0x02))) {
992                 /* clear status */
993                 map_write(map, CMD(0x50), adr);
994                 /* put back into read status register mode */
995                 map_write(map, CMD(0x70), adr);
996                 ret = -EROFS;
997         }
998  out:
999         put_chip(map, chip, adr);
1000         spin_unlock(chip->mutex);
1001
1002         return ret;
1003 }
1004
1005
1006 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1007 {
1008         struct map_info *map = mtd->priv;
1009         struct cfi_private *cfi = map->fldrv_priv;
1010         int ret = 0;
1011         int chipnum;
1012         unsigned long ofs;
1013
1014         *retlen = 0;
1015         if (!len)
1016                 return 0;
1017
1018         chipnum = to >> cfi->chipshift;
1019         ofs = to  - (chipnum << cfi->chipshift);
1020
1021         /* If it's not bus-aligned, do the first byte write */
1022         if (ofs & (map_bankwidth(map)-1)) {
1023                 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1024                 int gap = ofs - bus_ofs;
1025                 int n;
1026                 map_word datum;
1027
1028                 n = min_t(int, len, map_bankwidth(map)-gap);
1029                 datum = map_word_ff(map);
1030                 datum = map_word_load_partial(map, datum, buf, gap, n);
1031
1032                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1033                                                bus_ofs, datum);
1034                 if (ret) 
1035                         return ret;
1036
1037                 len -= n;
1038                 ofs += n;
1039                 buf += n;
1040                 (*retlen) += n;
1041
1042                 if (ofs >> cfi->chipshift) {
1043                         chipnum ++; 
1044                         ofs = 0;
1045                         if (chipnum == cfi->numchips)
1046                                 return 0;
1047                 }
1048         }
1049         
1050         while(len >= map_bankwidth(map)) {
1051                 map_word datum = map_word_load(map, buf);
1052
1053                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1054                                 ofs, datum);
1055                 if (ret)
1056                         return ret;
1057
1058                 ofs += map_bankwidth(map);
1059                 buf += map_bankwidth(map);
1060                 (*retlen) += map_bankwidth(map);
1061                 len -= map_bankwidth(map);
1062
1063                 if (ofs >> cfi->chipshift) {
1064                         chipnum ++; 
1065                         ofs = 0;
1066                         if (chipnum == cfi->numchips)
1067                                 return 0;
1068                 }
1069         }
1070
1071         if (len & (map_bankwidth(map)-1)) {
1072                 map_word datum;
1073
1074                 datum = map_word_ff(map);
1075                 datum = map_word_load_partial(map, datum, buf, 0, len);
1076
1077                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1078                                                ofs, datum);
1079                 if (ret) 
1080                         return ret;
1081                 
1082                 (*retlen) += len;
1083         }
1084
1085         return 0;
1086 }
1087
1088
1089 static inline int do_write_buffer(struct map_info *map, struct flchip *chip, 
1090                                   unsigned long adr, const u_char *buf, int len)
1091 {
1092         struct cfi_private *cfi = map->fldrv_priv;
1093         map_word status, status_OK;
1094         unsigned long cmd_adr, timeo;
1095         int wbufsize, z, ret=0, bytes, words;
1096
1097         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1098         adr += chip->start;
1099         cmd_adr = adr & ~(wbufsize-1);
1100         
1101         /* Let's determine this according to the interleave only once */
1102         status_OK = CMD(0x80);
1103
1104         spin_lock(chip->mutex);
1105         ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1106         if (ret) {
1107                 spin_unlock(chip->mutex);
1108                 return ret;
1109         }
1110
1111         /* Â§4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1112            [...], the device will not accept any more Write to Buffer commands". 
1113            So we must check here and reset those bits if they're set. Otherwise
1114            we're just pissing in the wind */
1115         if (chip->state != FL_STATUS)
1116                 map_write(map, CMD(0x70), cmd_adr);
1117         status = map_read(map, cmd_adr);
1118         if (map_word_bitsset(map, status, CMD(0x30))) {
1119                 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1120                 map_write(map, CMD(0x50), cmd_adr);
1121                 map_write(map, CMD(0x70), cmd_adr);
1122         }
1123
1124         ENABLE_VPP(map);
1125         chip->state = FL_WRITING_TO_BUFFER;
1126
1127         z = 0;
1128         for (;;) {
1129                 map_write(map, CMD(0xe8), cmd_adr);
1130
1131                 status = map_read(map, cmd_adr);
1132                 if (map_word_andequal(map, status, status_OK, status_OK))
1133                         break;
1134
1135                 spin_unlock(chip->mutex);
1136                 cfi_udelay(1);
1137                 spin_lock(chip->mutex);
1138
1139                 if (++z > 20) {
1140                         /* Argh. Not ready for write to buffer */
1141                         map_write(map, CMD(0x70), cmd_adr);
1142                         chip->state = FL_STATUS;
1143                         printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1144                                status.x[0], map_read(map, cmd_adr).x[0]);
1145                         /* Odd. Clear status bits */
1146                         map_write(map, CMD(0x50), cmd_adr);
1147                         map_write(map, CMD(0x70), cmd_adr);
1148                         ret = -EIO;
1149                         goto out;
1150                 }
1151         }
1152
1153         /* Write length of data to come */
1154         bytes = len & (map_bankwidth(map)-1);
1155         words = len / map_bankwidth(map);
1156         map_write(map, CMD(words - !bytes), cmd_adr );
1157
1158         /* Write data */
1159         z = 0;
1160         while(z < words * map_bankwidth(map)) {
1161                 map_word datum = map_word_load(map, buf);
1162                 map_write(map, datum, adr+z);
1163
1164                 z += map_bankwidth(map);
1165                 buf += map_bankwidth(map);
1166         }
1167
1168         if (bytes) {
1169                 map_word datum;
1170
1171                 datum = map_word_ff(map);
1172                 datum = map_word_load_partial(map, datum, buf, 0, bytes);
1173                 map_write(map, datum, adr+z);
1174         }
1175
1176         /* GO GO GO */
1177         map_write(map, CMD(0xd0), cmd_adr);
1178         chip->state = FL_WRITING;
1179
1180         spin_unlock(chip->mutex);
1181         INVALIDATE_CACHED_RANGE(map, adr, len);
1182         cfi_udelay(chip->buffer_write_time);
1183         spin_lock(chip->mutex);
1184
1185         timeo = jiffies + (HZ/2);
1186         z = 0;
1187         for (;;) {
1188                 if (chip->state != FL_WRITING) {
1189                         /* Someone's suspended the write. Sleep */
1190                         DECLARE_WAITQUEUE(wait, current);
1191                         set_current_state(TASK_UNINTERRUPTIBLE);
1192                         add_wait_queue(&chip->wq, &wait);
1193                         spin_unlock(chip->mutex);
1194                         schedule();
1195                         remove_wait_queue(&chip->wq, &wait);
1196                         timeo = jiffies + (HZ / 2); /* FIXME */
1197                         spin_lock(chip->mutex);
1198                         continue;
1199                 }
1200
1201                 status = map_read(map, cmd_adr);
1202                 if (map_word_andequal(map, status, status_OK, status_OK))
1203                         break;
1204
1205                 /* OK Still waiting */
1206                 if (time_after(jiffies, timeo)) {
1207                         chip->state = FL_STATUS;
1208                         printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
1209                         ret = -EIO;
1210                         goto out;
1211                 }
1212                 
1213                 /* Latency issues. Drop the lock, wait a while and retry */
1214                 spin_unlock(chip->mutex);
1215                 cfi_udelay(1);
1216                 z++;
1217                 spin_lock(chip->mutex);
1218         }
1219         if (!z) {
1220                 chip->buffer_write_time--;
1221                 if (!chip->buffer_write_time)
1222                         chip->buffer_write_time++;
1223         }
1224         if (z > 1) 
1225                 chip->buffer_write_time++;
1226
1227         /* Done and happy. */
1228         chip->state = FL_STATUS;
1229
1230         /* check for lock bit */
1231         if (map_word_bitsset(map, status, CMD(0x02))) {
1232                 /* clear status */
1233                 map_write(map, CMD(0x50), cmd_adr);
1234                 /* put back into read status register mode */
1235                 map_write(map, CMD(0x70), adr);
1236                 ret = -EROFS;
1237         }
1238
1239  out:
1240         put_chip(map, chip, cmd_adr);
1241         spin_unlock(chip->mutex);
1242         return ret;
1243 }
1244
1245 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to, 
1246                                        size_t len, size_t *retlen, const u_char *buf)
1247 {
1248         struct map_info *map = mtd->priv;
1249         struct cfi_private *cfi = map->fldrv_priv;
1250         int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1251         int ret = 0;
1252         int chipnum;
1253         unsigned long ofs;
1254
1255         *retlen = 0;
1256         if (!len)
1257                 return 0;
1258
1259         chipnum = to >> cfi->chipshift;
1260         ofs = to  - (chipnum << cfi->chipshift);
1261
1262         /* If it's not bus-aligned, do the first word write */
1263         if (ofs & (map_bankwidth(map)-1)) {
1264                 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1265                 if (local_len > len)
1266                         local_len = len;
1267                 ret = cfi_intelext_write_words(mtd, to, local_len,
1268                                                retlen, buf);
1269                 if (ret)
1270                         return ret;
1271                 ofs += local_len;
1272                 buf += local_len;
1273                 len -= local_len;
1274
1275                 if (ofs >> cfi->chipshift) {
1276                         chipnum ++;
1277                         ofs = 0;
1278                         if (chipnum == cfi->numchips)
1279                                 return 0;
1280                 }
1281         }
1282
1283         while(len) {
1284                 /* We must not cross write block boundaries */
1285                 int size = wbufsize - (ofs & (wbufsize-1));
1286
1287                 if (size > len)
1288                         size = len;
1289                 ret = do_write_buffer(map, &cfi->chips[chipnum], 
1290                                       ofs, buf, size);
1291                 if (ret)
1292                         return ret;
1293
1294                 ofs += size;
1295                 buf += size;
1296                 (*retlen) += size;
1297                 len -= size;
1298
1299                 if (ofs >> cfi->chipshift) {
1300                         chipnum ++; 
1301                         ofs = 0;
1302                         if (chipnum == cfi->numchips)
1303                                 return 0;
1304                 }
1305         }
1306         return 0;
1307 }
1308
1309 typedef int (*varsize_frob_t)(struct map_info *map, struct flchip *chip,
1310                               unsigned long adr, int len, void *thunk);
1311
1312 static int cfi_intelext_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
1313                                      loff_t ofs, size_t len, void *thunk)
1314 {
1315         struct map_info *map = mtd->priv;
1316         struct cfi_private *cfi = map->fldrv_priv;
1317         unsigned long adr;
1318         int chipnum, ret = 0;
1319         int i, first;
1320         struct mtd_erase_region_info *regions = mtd->eraseregions;
1321
1322         if (ofs > mtd->size)
1323                 return -EINVAL;
1324
1325         if ((len + ofs) > mtd->size)
1326                 return -EINVAL;
1327
1328         /* Check that both start and end of the requested erase are
1329          * aligned with the erasesize at the appropriate addresses.
1330          */
1331
1332         i = 0;
1333
1334         /* Skip all erase regions which are ended before the start of 
1335            the requested erase. Actually, to save on the calculations,
1336            we skip to the first erase region which starts after the
1337            start of the requested erase, and then go back one.
1338         */
1339         
1340         while (i < mtd->numeraseregions && ofs >= regions[i].offset)
1341                i++;
1342         i--;
1343
1344         /* OK, now i is pointing at the erase region in which this 
1345            erase request starts. Check the start of the requested
1346            erase range is aligned with the erase size which is in
1347            effect here.
1348         */
1349
1350         if (ofs & (regions[i].erasesize-1))
1351                 return -EINVAL;
1352
1353         /* Remember the erase region we start on */
1354         first = i;
1355
1356         /* Next, check that the end of the requested erase is aligned
1357          * with the erase region at that address.
1358          */
1359
1360         while (i<mtd->numeraseregions && (ofs + len) >= regions[i].offset)
1361                 i++;
1362
1363         /* As before, drop back one to point at the region in which
1364            the address actually falls
1365         */
1366         i--;
1367         
1368         if ((ofs + len) & (regions[i].erasesize-1))
1369                 return -EINVAL;
1370
1371         chipnum = ofs >> cfi->chipshift;
1372         adr = ofs - (chipnum << cfi->chipshift);
1373
1374         i=first;
1375
1376         while(len) {
1377                 unsigned long chipmask;
1378                 int size = regions[i].erasesize;
1379
1380                 ret = (*frob)(map, &cfi->chips[chipnum], adr, size, thunk);
1381                 
1382                 if (ret)
1383                         return ret;
1384
1385                 adr += size;
1386                 len -= size;
1387
1388                 chipmask = (1 << cfi->chipshift) - 1;
1389                 if ((adr & chipmask) == ((regions[i].offset + size * regions[i].numblocks) & chipmask))
1390                         i++;
1391
1392                 if (adr >> cfi->chipshift) {
1393                         adr = 0;
1394                         chipnum++;
1395                         
1396                         if (chipnum >= cfi->numchips)
1397                         break;
1398                 }
1399         }
1400
1401         return 0;
1402 }
1403
1404
1405 static int do_erase_oneblock(struct map_info *map, struct flchip *chip,
1406                              unsigned long adr, int len, void *thunk)
1407 {
1408         struct cfi_private *cfi = map->fldrv_priv;
1409         map_word status, status_OK;
1410         unsigned long timeo;
1411         int retries = 3;
1412         DECLARE_WAITQUEUE(wait, current);
1413         int ret = 0;
1414
1415         adr += chip->start;
1416
1417         /* Let's determine this according to the interleave only once */
1418         status_OK = CMD(0x80);
1419
1420  retry:
1421         spin_lock(chip->mutex);
1422         ret = get_chip(map, chip, adr, FL_ERASING);
1423         if (ret) {
1424                 spin_unlock(chip->mutex);
1425                 return ret;
1426         }
1427
1428         ENABLE_VPP(map);
1429         /* Clear the status register first */
1430         map_write(map, CMD(0x50), adr);
1431
1432         /* Now erase */
1433         map_write(map, CMD(0x20), adr);
1434         map_write(map, CMD(0xD0), adr);
1435         chip->state = FL_ERASING;
1436         chip->erase_suspended = 0;
1437
1438         spin_unlock(chip->mutex);
1439         INVALIDATE_CACHED_RANGE(map, adr, len);
1440         set_current_state(TASK_UNINTERRUPTIBLE);
1441         schedule_timeout((chip->erase_time*HZ)/(2*1000));
1442         spin_lock(chip->mutex);
1443
1444         /* FIXME. Use a timer to check this, and return immediately. */
1445         /* Once the state machine's known to be working I'll do that */
1446
1447         timeo = jiffies + (HZ*20);
1448         for (;;) {
1449                 if (chip->state != FL_ERASING) {
1450                         /* Someone's suspended the erase. Sleep */
1451                         set_current_state(TASK_UNINTERRUPTIBLE);
1452                         add_wait_queue(&chip->wq, &wait);
1453                         spin_unlock(chip->mutex);
1454                         schedule();
1455                         remove_wait_queue(&chip->wq, &wait);
1456                         spin_lock(chip->mutex);
1457                         continue;
1458                 }
1459                 if (chip->erase_suspended) {
1460                         /* This erase was suspended and resumed.
1461                            Adjust the timeout */
1462                         timeo = jiffies + (HZ*20); /* FIXME */
1463                         chip->erase_suspended = 0;
1464                 }
1465
1466                 status = map_read(map, adr);
1467                 if (map_word_andequal(map, status, status_OK, status_OK))
1468                         break;
1469                 
1470                 /* OK Still waiting */
1471                 if (time_after(jiffies, timeo)) {
1472                         map_write(map, CMD(0x70), adr);
1473                         chip->state = FL_STATUS;
1474                         printk(KERN_ERR "waiting for erase at %08lx to complete timed out. Xstatus = %lx, status = %lx.\n",
1475                                adr, status.x[0], map_read(map, adr).x[0]);
1476                         /* Clear status bits */
1477                         map_write(map, CMD(0x50), adr);
1478                         map_write(map, CMD(0x70), adr);
1479                         DISABLE_VPP(map);
1480                         spin_unlock(chip->mutex);
1481                         return -EIO;
1482                 }
1483                 
1484                 /* Latency issues. Drop the lock, wait a while and retry */
1485                 spin_unlock(chip->mutex);
1486                 set_current_state(TASK_UNINTERRUPTIBLE);
1487                 schedule_timeout(1);
1488                 spin_lock(chip->mutex);
1489         }
1490         
1491         DISABLE_VPP(map);
1492         ret = 0;
1493
1494         /* We've broken this before. It doesn't hurt to be safe */
1495         map_write(map, CMD(0x70), adr);
1496         chip->state = FL_STATUS;
1497         status = map_read(map, adr);
1498
1499         /* check for lock bit */
1500         if (map_word_bitsset(map, status, CMD(0x3a))) {
1501                 unsigned char chipstatus = status.x[0];
1502                 if (!map_word_equal(map, status, CMD(chipstatus))) {
1503                         int i, w;
1504                         for (w=0; w<map_words(map); w++) {
1505                                 for (i = 0; i<cfi_interleave(cfi); i++) {
1506                                         chipstatus |= status.x[w] >> (cfi->device_type * 8);
1507                                 }
1508                         }
1509                         printk(KERN_WARNING "Status is not identical for all chips: 0x%lx. Merging to give 0x%02x\n",
1510                                status.x[0], chipstatus);
1511                 }
1512                 /* Reset the error bits */
1513                 map_write(map, CMD(0x50), adr);
1514                 map_write(map, CMD(0x70), adr);
1515                 
1516                 if ((chipstatus & 0x30) == 0x30) {
1517                         printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus);
1518                         ret = -EIO;
1519                 } else if (chipstatus & 0x02) {
1520                         /* Protection bit set */
1521                         ret = -EROFS;
1522                 } else if (chipstatus & 0x8) {
1523                         /* Voltage */
1524                         printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", chipstatus);
1525                         ret = -EIO;
1526                 } else if (chipstatus & 0x20) {
1527                         if (retries--) {
1528                                 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
1529                                 timeo = jiffies + HZ;
1530                                 chip->state = FL_STATUS;
1531                                 spin_unlock(chip->mutex);
1532                                 goto retry;
1533                         }
1534                         printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
1535                         ret = -EIO;
1536                 }
1537         }
1538
1539         wake_up(&chip->wq);
1540         spin_unlock(chip->mutex);
1541         return ret;
1542 }
1543
1544 int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1545 {
1546         unsigned long ofs, len;
1547         int ret;
1548
1549         ofs = instr->addr;
1550         len = instr->len;
1551
1552         ret = cfi_intelext_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1553         if (ret)
1554                 return ret;
1555
1556         instr->state = MTD_ERASE_DONE;
1557         mtd_erase_callback(instr);
1558         
1559         return 0;
1560 }
1561
1562 static void cfi_intelext_sync (struct mtd_info *mtd)
1563 {
1564         struct map_info *map = mtd->priv;
1565         struct cfi_private *cfi = map->fldrv_priv;
1566         int i;
1567         struct flchip *chip;
1568         int ret = 0;
1569
1570         for (i=0; !ret && i<cfi->numchips; i++) {
1571                 chip = &cfi->chips[i];
1572
1573                 spin_lock(chip->mutex);
1574                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1575
1576                 if (!ret) {
1577                         chip->oldstate = chip->state;
1578                         chip->state = FL_SYNCING;
1579                         /* No need to wake_up() on this state change - 
1580                          * as the whole point is that nobody can do anything
1581                          * with the chip now anyway.
1582                          */
1583                 }
1584                 spin_unlock(chip->mutex);
1585         }
1586
1587         /* Unlock the chips again */
1588
1589         for (i--; i >=0; i--) {
1590                 chip = &cfi->chips[i];
1591
1592                 spin_lock(chip->mutex);
1593                 
1594                 if (chip->state == FL_SYNCING) {
1595                         chip->state = chip->oldstate;
1596                         wake_up(&chip->wq);
1597                 }
1598                 spin_unlock(chip->mutex);
1599         }
1600 }
1601
1602 #ifdef DEBUG_LOCK_BITS
1603 static int do_printlockstatus_oneblock(struct map_info *map, struct flchip *chip,
1604                                        unsigned long adr, int len, void *thunk)
1605 {
1606         struct cfi_private *cfi = map->fldrv_priv;
1607         int ofs_factor = cfi->interleave * cfi->device_type;
1608
1609         cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1610         printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1611                adr, cfi_read_query(map, adr+(2*ofs_factor)));
1612         chip->state = FL_JEDEC_QUERY;
1613         return 0;
1614 }
1615 #endif
1616
1617 #define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
1618 #define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
1619
1620 static int do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1621                               unsigned long adr, int len, void *thunk)
1622 {
1623         struct cfi_private *cfi = map->fldrv_priv;
1624         map_word status, status_OK;
1625         unsigned long timeo = jiffies + HZ;
1626         int ret;
1627
1628         adr += chip->start;
1629
1630         /* Let's determine this according to the interleave only once */
1631         status_OK = CMD(0x80);
1632
1633         spin_lock(chip->mutex);
1634         ret = get_chip(map, chip, adr, FL_LOCKING);
1635         if (ret) {
1636                 spin_unlock(chip->mutex);
1637                 return ret;
1638         }
1639
1640         ENABLE_VPP(map);
1641         map_write(map, CMD(0x60), adr);
1642
1643         if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1644                 map_write(map, CMD(0x01), adr);
1645                 chip->state = FL_LOCKING;
1646         } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1647                 map_write(map, CMD(0xD0), adr);
1648                 chip->state = FL_UNLOCKING;
1649         } else
1650                 BUG();
1651
1652         spin_unlock(chip->mutex);
1653         schedule_timeout(HZ);
1654         spin_lock(chip->mutex);
1655
1656         /* FIXME. Use a timer to check this, and return immediately. */
1657         /* Once the state machine's known to be working I'll do that */
1658
1659         timeo = jiffies + (HZ*20);
1660         for (;;) {
1661
1662                 status = map_read(map, adr);
1663                 if (map_word_andequal(map, status, status_OK, status_OK))
1664                         break;
1665                 
1666                 /* OK Still waiting */
1667                 if (time_after(jiffies, timeo)) {
1668                         map_write(map, CMD(0x70), adr);
1669                         chip->state = FL_STATUS;
1670                         printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n",
1671                                status.x[0], map_read(map, adr).x[0]);
1672                         DISABLE_VPP(map);
1673                         spin_unlock(chip->mutex);
1674                         return -EIO;
1675                 }
1676                 
1677                 /* Latency issues. Drop the lock, wait a while and retry */
1678                 spin_unlock(chip->mutex);
1679                 cfi_udelay(1);
1680                 spin_lock(chip->mutex);
1681         }
1682         
1683         /* Done and happy. */
1684         chip->state = FL_STATUS;
1685         put_chip(map, chip, adr);
1686         spin_unlock(chip->mutex);
1687         return 0;
1688 }
1689
1690 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1691 {
1692         int ret;
1693
1694 #ifdef DEBUG_LOCK_BITS
1695         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1696                __FUNCTION__, ofs, len);
1697         cfi_intelext_varsize_frob(mtd, do_printlockstatus_oneblock,
1698                                   ofs, len, 0);
1699 #endif
1700
1701         ret = cfi_intelext_varsize_frob(mtd, do_xxlock_oneblock, 
1702                                         ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
1703         
1704 #ifdef DEBUG_LOCK_BITS
1705         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1706                __FUNCTION__, ret);
1707         cfi_intelext_varsize_frob(mtd, do_printlockstatus_oneblock,
1708                                   ofs, len, 0);
1709 #endif
1710
1711         return ret;
1712 }
1713
1714 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1715 {
1716         int ret;
1717
1718 #ifdef DEBUG_LOCK_BITS
1719         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1720                __FUNCTION__, ofs, len);
1721         cfi_intelext_varsize_frob(mtd, do_printlockstatus_oneblock,
1722                                   ofs, len, 0);
1723 #endif
1724
1725         ret = cfi_intelext_varsize_frob(mtd, do_xxlock_oneblock,
1726                                         ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
1727         
1728 #ifdef DEBUG_LOCK_BITS
1729         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1730                __FUNCTION__, ret);
1731         cfi_intelext_varsize_frob(mtd, do_printlockstatus_oneblock, 
1732                                   ofs, len, 0);
1733 #endif
1734         
1735         return ret;
1736 }
1737
1738 static int cfi_intelext_suspend(struct mtd_info *mtd)
1739 {
1740         struct map_info *map = mtd->priv;
1741         struct cfi_private *cfi = map->fldrv_priv;
1742         int i;
1743         struct flchip *chip;
1744         int ret = 0;
1745
1746         for (i=0; !ret && i<cfi->numchips; i++) {
1747                 chip = &cfi->chips[i];
1748
1749                 spin_lock(chip->mutex);
1750
1751                 switch (chip->state) {
1752                 case FL_READY:
1753                 case FL_STATUS:
1754                 case FL_CFI_QUERY:
1755                 case FL_JEDEC_QUERY:
1756                         if (chip->oldstate == FL_READY) {
1757                                 chip->oldstate = chip->state;
1758                                 chip->state = FL_PM_SUSPENDED;
1759                                 /* No need to wake_up() on this state change - 
1760                                  * as the whole point is that nobody can do anything
1761                                  * with the chip now anyway.
1762                                  */
1763                         }
1764                         break;
1765                 default:
1766                         ret = -EAGAIN;
1767                 case FL_PM_SUSPENDED:
1768                         break;
1769                 }
1770                 spin_unlock(chip->mutex);
1771         }
1772
1773         /* Unlock the chips again */
1774
1775         if (ret) {
1776                 for (i--; i >=0; i--) {
1777                         chip = &cfi->chips[i];
1778                         
1779                         spin_lock(chip->mutex);
1780                         
1781                         if (chip->state == FL_PM_SUSPENDED) {
1782                                 /* No need to force it into a known state here,
1783                                    because we're returning failure, and it didn't
1784                                    get power cycled */
1785                                 chip->state = chip->oldstate;
1786                                 wake_up(&chip->wq);
1787                         }
1788                         spin_unlock(chip->mutex);
1789                 }
1790         } 
1791         
1792         return ret;
1793 }
1794
1795 static void cfi_intelext_resume(struct mtd_info *mtd)
1796 {
1797         struct map_info *map = mtd->priv;
1798         struct cfi_private *cfi = map->fldrv_priv;
1799         int i;
1800         struct flchip *chip;
1801
1802         for (i=0; i<cfi->numchips; i++) {
1803         
1804                 chip = &cfi->chips[i];
1805
1806                 spin_lock(chip->mutex);
1807                 
1808                 /* Go to known state. Chip may have been power cycled */
1809                 if (chip->state == FL_PM_SUSPENDED) {
1810                         map_write(map, CMD(0xFF), cfi->chips[i].start);
1811                         chip->state = FL_READY;
1812                         wake_up(&chip->wq);
1813                 }
1814
1815                 spin_unlock(chip->mutex);
1816         }
1817 }
1818
1819 static void cfi_intelext_destroy(struct mtd_info *mtd)
1820 {
1821         struct map_info *map = mtd->priv;
1822         struct cfi_private *cfi = map->fldrv_priv;
1823         kfree(cfi->cmdset_priv);
1824         kfree(cfi->cfiq);
1825         kfree(cfi->chips[0].priv);
1826         kfree(cfi);
1827         kfree(mtd->eraseregions);
1828 }
1829
1830 static char im_name_1[]="cfi_cmdset_0001";
1831 static char im_name_3[]="cfi_cmdset_0003";
1832
1833 int __init cfi_intelext_init(void)
1834 {
1835         inter_module_register(im_name_1, THIS_MODULE, &cfi_cmdset_0001);
1836         inter_module_register(im_name_3, THIS_MODULE, &cfi_cmdset_0001);
1837         return 0;
1838 }
1839
1840 static void __exit cfi_intelext_exit(void)
1841 {
1842         inter_module_unregister(im_name_1);
1843         inter_module_unregister(im_name_3);
1844 }
1845
1846 module_init(cfi_intelext_init);
1847 module_exit(cfi_intelext_exit);
1848
1849 MODULE_LICENSE("GPL");
1850 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
1851 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");