upgrade to linux 2.6.10-1.12_FC2
[linux-2.6.git] / drivers / mtd / chips / cfi_cmdset_0001.c
1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  * $Id: cfi_cmdset_0001.c,v 1.160 2004/11/01 06:02:24 nico Exp $
8  * (+ suspend fix from v1.162)
9  * (+ partition detection fix from v1.163)
10  * 
11  * 10/10/2000   Nicolas Pitre <nico@cam.org>
12  *      - completely revamped method functions so they are aware and
13  *        independent of the flash geometry (buswidth, interleave, etc.)
14  *      - scalability vs code size is completely set at compile-time
15  *        (see include/linux/mtd/cfi.h for selection)
16  *      - optimized write buffer method
17  * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
18  *      - reworked lock/unlock/erase support for var size flash
19  */
20
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <linux/init.h>
26 #include <asm/io.h>
27 #include <asm/byteorder.h>
28
29 #include <linux/errno.h>
30 #include <linux/slab.h>
31 #include <linux/delay.h>
32 #include <linux/interrupt.h>
33 #include <linux/mtd/map.h>
34 #include <linux/mtd/mtd.h>
35 #include <linux/mtd/compatmac.h>
36 #include <linux/mtd/cfi.h>
37
38 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
39
40 // debugging, turns off buffer write mode if set to 1
41 #define FORCE_WORD_WRITE 0
42
43 #define MANUFACTURER_INTEL      0x0089
44 #define I82802AB        0x00ad
45 #define I82802AC        0x00ac
46 #define MANUFACTURER_ST         0x0020
47 #define M50LPW080       0x002F
48
49 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
50 //static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
51 //static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
52 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
53 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
55 static void cfi_intelext_sync (struct mtd_info *);
56 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
57 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
58 static int cfi_intelext_suspend (struct mtd_info *);
59 static void cfi_intelext_resume (struct mtd_info *);
60
61 static void cfi_intelext_destroy(struct mtd_info *);
62
63 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
64
65 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
66 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
67
68 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
69                      size_t *retlen, u_char **mtdbuf);
70 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
71                         size_t len);
72
73 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
74 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
75 #include "fwh_lock.h"
76
77
78
79 /*
80  *  *********** SETUP AND PROBE BITS  ***********
81  */
82
83 static struct mtd_chip_driver cfi_intelext_chipdrv = {
84         .probe          = NULL, /* Not usable directly */
85         .destroy        = cfi_intelext_destroy,
86         .name           = "cfi_cmdset_0001",
87         .module         = THIS_MODULE
88 };
89
90 /* #define DEBUG_LOCK_BITS */
91 /* #define DEBUG_CFI_FEATURES */
92
93 #ifdef DEBUG_CFI_FEATURES
94 static void cfi_tell_features(struct cfi_pri_intelext *extp)
95 {
96         int i;
97         printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
98         printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
99         printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
100         printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
101         printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
102         printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
103         printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
104         printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
105         printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
106         printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
107         printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
108         for (i=10; i<32; i++) {
109                 if (extp->FeatureSupport & (1<<i)) 
110                         printk("     - Unknown Bit %X:      supported\n", i);
111         }
112         
113         printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
114         printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
115         for (i=1; i<8; i++) {
116                 if (extp->SuspendCmdSupport & (1<<i))
117                         printk("     - Unknown Bit %X:               supported\n", i);
118         }
119         
120         printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
121         printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
122         printk("     - Valid Bit Active:     %s\n", extp->BlkStatusRegMask&2?"yes":"no");
123         for (i=2; i<16; i++) {
124                 if (extp->BlkStatusRegMask & (1<<i))
125                         printk("     - Unknown Bit %X Active: yes\n",i);
126         }
127         
128         printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n", 
129                extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
130         if (extp->VppOptimal)
131                 printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n", 
132                        extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
133 }
134 #endif
135
136 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
137 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */ 
138 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
139 {
140         struct map_info *map = mtd->priv;
141         struct cfi_private *cfi = map->fldrv_priv;
142         struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
143
144         printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
145                             "erase on write disabled.\n");
146         extp->SuspendCmdSupport &= ~1;
147 }
148 #endif
149
150 static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
151 {
152         struct map_info *map = mtd->priv;
153         struct cfi_private *cfi = map->fldrv_priv;
154         
155         cfi->cfiq->BufWriteTimeoutTyp = 0;      /* Not supported */
156         cfi->cfiq->BufWriteTimeoutMax = 0;      /* Not supported */
157 }
158
159 static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
160 {
161         struct map_info *map = mtd->priv;
162         struct cfi_private *cfi = map->fldrv_priv;
163         
164         /* Note this is done after the region info is endian swapped */
165         cfi->cfiq->EraseRegionInfo[1] =
166                 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
167 };
168
169 static void fixup_use_point(struct mtd_info *mtd, void *param)
170 {
171         struct map_info *map = mtd->priv;
172         if (!mtd->point && map_is_linear(map)) {
173                 mtd->point   = cfi_intelext_point;
174                 mtd->unpoint = cfi_intelext_unpoint;
175         }
176 }
177
178 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
179 {
180         struct map_info *map = mtd->priv;
181         struct cfi_private *cfi = map->fldrv_priv;
182         if (cfi->cfiq->BufWriteTimeoutTyp) {
183                 printk(KERN_INFO "Using buffer write method\n" );
184                 mtd->write = cfi_intelext_write_buffers;
185         }
186 }
187
188 static struct cfi_fixup cfi_fixup_table[] = {
189 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
190         { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL }, 
191 #endif
192 #if !FORCE_WORD_WRITE
193         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
194 #endif
195         { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
196         { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
197         { 0, 0, NULL, NULL }
198 };
199
200 static struct cfi_fixup jedec_fixup_table[] = {
201         { MANUFACTURER_INTEL, I82802AB,   fixup_use_fwh_lock, NULL, },
202         { MANUFACTURER_INTEL, I82802AC,   fixup_use_fwh_lock, NULL, },
203         { MANUFACTURER_ST,    M50LPW080,  fixup_use_fwh_lock, NULL, },
204         { 0, 0, NULL, NULL }
205 };
206 static struct cfi_fixup fixup_table[] = {
207         /* The CFI vendor ids and the JEDEC vendor IDs appear
208          * to be common.  It is like the devices id's are as
209          * well.  This table is to pick all cases where
210          * we know that is the case.
211          */
212         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
213         { 0, 0, NULL, NULL }
214 };
215
216 static inline struct cfi_pri_intelext *
217 read_pri_intelext(struct map_info *map, __u16 adr)
218 {
219         struct cfi_pri_intelext *extp;
220         unsigned int extp_size = sizeof(*extp);
221
222  again:
223         extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
224         if (!extp)
225                 return NULL;
226
227         /* Do some byteswapping if necessary */
228         extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
229         extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
230         extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
231
232         if (extp->MajorVersion == '1' && extp->MinorVersion == '3') {
233                 unsigned int extra_size = 0;
234                 int nb_parts, i;
235
236                 /* Protection Register info */
237                 extra_size += (extp->NumProtectionFields - 1) * (4 + 6);
238
239                 /* Burst Read info */
240                 extra_size += 6;
241
242                 /* Number of hardware-partitions */
243                 extra_size += 1;
244                 if (extp_size < sizeof(*extp) + extra_size)
245                         goto need_more;
246                 nb_parts = extp->extra[extra_size - 1];
247
248                 for (i = 0; i < nb_parts; i++) {
249                         struct cfi_intelext_regioninfo *rinfo;
250                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
251                         extra_size += sizeof(*rinfo);
252                         if (extp_size < sizeof(*extp) + extra_size)
253                                 goto need_more;
254                         rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
255                         extra_size += (rinfo->NumBlockTypes - 1)
256                                       * sizeof(struct cfi_intelext_blockinfo);
257                 }
258
259                 if (extp_size < sizeof(*extp) + extra_size) {
260                         need_more:
261                         extp_size = sizeof(*extp) + extra_size;
262                         kfree(extp);
263                         if (extp_size > 4096) {
264                                 printk(KERN_ERR
265                                         "%s: cfi_pri_intelext is too fat\n",
266                                         __FUNCTION__);
267                                 return NULL;
268                         }
269                         goto again;
270                 }
271         }
272                 
273         return extp;
274 }
275
276 /* This routine is made available to other mtd code via
277  * inter_module_register.  It must only be accessed through
278  * inter_module_get which will bump the use count of this module.  The
279  * addresses passed back in cfi are valid as long as the use count of
280  * this module is non-zero, i.e. between inter_module_get and
281  * inter_module_put.  Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
282  */
283 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
284 {
285         struct cfi_private *cfi = map->fldrv_priv;
286         struct mtd_info *mtd;
287         int i;
288
289         mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
290         if (!mtd) {
291                 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
292                 return NULL;
293         }
294         memset(mtd, 0, sizeof(*mtd));
295         mtd->priv = map;
296         mtd->type = MTD_NORFLASH;
297
298         /* Fill in the default mtd operations */
299         mtd->erase   = cfi_intelext_erase_varsize;
300         mtd->read    = cfi_intelext_read;
301         mtd->write   = cfi_intelext_write_words;
302         mtd->sync    = cfi_intelext_sync;
303         mtd->lock    = cfi_intelext_lock;
304         mtd->unlock  = cfi_intelext_unlock;
305         mtd->suspend = cfi_intelext_suspend;
306         mtd->resume  = cfi_intelext_resume;
307         mtd->flags   = MTD_CAP_NORFLASH;
308         mtd->name    = map->name;
309         
310         if (cfi->cfi_mode == CFI_MODE_CFI) {
311                 /* 
312                  * It's a real CFI chip, not one for which the probe
313                  * routine faked a CFI structure. So we read the feature
314                  * table from it.
315                  */
316                 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
317                 struct cfi_pri_intelext *extp;
318
319                 extp = read_pri_intelext(map, adr);
320                 if (!extp) {
321                         kfree(mtd);
322                         return NULL;
323                 }
324
325                 /* Install our own private info structure */
326                 cfi->cmdset_priv = extp;        
327
328                 cfi_fixup(mtd, cfi_fixup_table);
329
330 #ifdef DEBUG_CFI_FEATURES
331                 /* Tell the user about it in lots of lovely detail */
332                 cfi_tell_features(extp);
333 #endif  
334
335                 if(extp->SuspendCmdSupport & 1) {
336                         printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
337                 }
338         }
339         else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
340                 /* Apply jedec specific fixups */
341                 cfi_fixup(mtd, jedec_fixup_table);
342         }
343         /* Apply generic fixups */
344         cfi_fixup(mtd, fixup_table);
345
346         for (i=0; i< cfi->numchips; i++) {
347                 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
348                 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
349                 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
350                 cfi->chips[i].ref_point_counter = 0;
351         }               
352
353         map->fldrv = &cfi_intelext_chipdrv;
354         
355         return cfi_intelext_setup(mtd);
356 }
357
358 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
359 {
360         struct map_info *map = mtd->priv;
361         struct cfi_private *cfi = map->fldrv_priv;
362         unsigned long offset = 0;
363         int i,j;
364         unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
365
366         //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
367
368         mtd->size = devsize * cfi->numchips;
369
370         mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
371         mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) 
372                         * mtd->numeraseregions, GFP_KERNEL);
373         if (!mtd->eraseregions) { 
374                 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
375                 goto setup_err;
376         }
377         
378         for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
379                 unsigned long ernum, ersize;
380                 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
381                 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
382
383                 if (mtd->erasesize < ersize) {
384                         mtd->erasesize = ersize;
385                 }
386                 for (j=0; j<cfi->numchips; j++) {
387                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
388                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
389                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
390                 }
391                 offset += (ersize * ernum);
392         }
393
394         if (offset != devsize) {
395                 /* Argh */
396                 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
397                 goto setup_err;
398         }
399
400         for (i=0; i<mtd->numeraseregions;i++){
401                 printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
402                        i,mtd->eraseregions[i].offset,
403                        mtd->eraseregions[i].erasesize,
404                        mtd->eraseregions[i].numblocks);
405         }
406
407 #if 0
408         mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
409         mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
410 #endif
411
412         /* This function has the potential to distort the reality
413            a bit and therefore should be called last. */
414         if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
415                 goto setup_err;
416
417         __module_get(THIS_MODULE);
418         return mtd;
419
420  setup_err:
421         if(mtd) {
422                 if(mtd->eraseregions)
423                         kfree(mtd->eraseregions);
424                 kfree(mtd);
425         }
426         kfree(cfi->cmdset_priv);
427         return NULL;
428 }
429
430 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
431                                         struct cfi_private **pcfi)
432 {
433         struct map_info *map = mtd->priv;
434         struct cfi_private *cfi = *pcfi;
435         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
436
437         /*
438          * Probing of multi-partition flash ships.
439          *
440          * To support multiple partitions when available, we simply arrange
441          * for each of them to have their own flchip structure even if they
442          * are on the same physical chip.  This means completely recreating
443          * a new cfi_private structure right here which is a blatent code
444          * layering violation, but this is still the least intrusive
445          * arrangement at this point. This can be rearranged in the future
446          * if someone feels motivated enough.  --nico
447          */
448         if (extp && extp->MajorVersion == '1' && extp->MinorVersion == '3'
449             && extp->FeatureSupport & (1 << 9)) {
450                 struct cfi_private *newcfi;
451                 struct flchip *chip;
452                 struct flchip_shared *shared;
453                 int offs, numregions, numparts, partshift, numvirtchips, i, j;
454
455                 /* Protection Register info */
456                 offs = (extp->NumProtectionFields - 1) * (4 + 6);
457
458                 /* Burst Read info */
459                 offs += 6;
460
461                 /* Number of partition regions */
462                 numregions = extp->extra[offs];
463                 offs += 1;
464
465                 /* Number of hardware partitions */
466                 numparts = 0;
467                 for (i = 0; i < numregions; i++) {
468                         struct cfi_intelext_regioninfo *rinfo;
469                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
470                         numparts += rinfo->NumIdentPartitions;
471                         offs += sizeof(*rinfo)
472                                 + (rinfo->NumBlockTypes - 1) *
473                                   sizeof(struct cfi_intelext_blockinfo);
474                 }
475
476                 /*
477                  * All functions below currently rely on all chips having
478                  * the same geometry so we'll just assume that all hardware
479                  * partitions are of the same size too.
480                  */
481                 partshift = cfi->chipshift - __ffs(numparts);
482
483                 if ((1 << partshift) < mtd->erasesize) {
484                         printk( KERN_ERR
485                                 "%s: bad number of hw partitions (%d)\n",
486                                 __FUNCTION__, numparts);
487                         return -EINVAL;
488                 }
489
490                 numvirtchips = cfi->numchips * numparts;
491                 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
492                 if (!newcfi)
493                         return -ENOMEM;
494                 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
495                 if (!shared) {
496                         kfree(newcfi);
497                         return -ENOMEM;
498                 }
499                 memcpy(newcfi, cfi, sizeof(struct cfi_private));
500                 newcfi->numchips = numvirtchips;
501                 newcfi->chipshift = partshift;
502
503                 chip = &newcfi->chips[0];
504                 for (i = 0; i < cfi->numchips; i++) {
505                         shared[i].writing = shared[i].erasing = NULL;
506                         spin_lock_init(&shared[i].lock);
507                         for (j = 0; j < numparts; j++) {
508                                 *chip = cfi->chips[i];
509                                 chip->start += j << partshift;
510                                 chip->priv = &shared[i];
511                                 /* those should be reset too since
512                                    they create memory references. */
513                                 init_waitqueue_head(&chip->wq);
514                                 spin_lock_init(&chip->_spinlock);
515                                 chip->mutex = &chip->_spinlock;
516                                 chip++;
517                         }
518                 }
519
520                 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
521                                   "--> %d partitions of %d KiB\n",
522                                   map->name, cfi->numchips, cfi->interleave,
523                                   newcfi->numchips, 1<<(newcfi->chipshift-10));
524
525                 map->fldrv_priv = newcfi;
526                 *pcfi = newcfi;
527                 kfree(cfi);
528         }
529
530         return 0;
531 }
532
533 /*
534  *  *********** CHIP ACCESS FUNCTIONS ***********
535  */
536
537 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
538 {
539         DECLARE_WAITQUEUE(wait, current);
540         struct cfi_private *cfi = map->fldrv_priv;
541         map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
542         unsigned long timeo;
543         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
544
545  resettime:
546         timeo = jiffies + HZ;
547  retry:
548         if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING)) {
549                 /*
550                  * OK. We have possibility for contension on the write/erase
551                  * operations which are global to the real chip and not per
552                  * partition.  So let's fight it over in the partition which
553                  * currently has authority on the operation.
554                  *
555                  * The rules are as follows:
556                  *
557                  * - any write operation must own shared->writing.
558                  *
559                  * - any erase operation must own _both_ shared->writing and
560                  *   shared->erasing.
561                  *
562                  * - contension arbitration is handled in the owner's context.
563                  *
564                  * The 'shared' struct can be read when its lock is taken.
565                  * However any writes to it can only be made when the current
566                  * owner's lock is also held.
567                  */
568                 struct flchip_shared *shared = chip->priv;
569                 struct flchip *contender;
570                 spin_lock(&shared->lock);
571                 contender = shared->writing;
572                 if (contender && contender != chip) {
573                         /*
574                          * The engine to perform desired operation on this
575                          * partition is already in use by someone else.
576                          * Let's fight over it in the context of the chip
577                          * currently using it.  If it is possible to suspend,
578                          * that other partition will do just that, otherwise
579                          * it'll happily send us to sleep.  In any case, when
580                          * get_chip returns success we're clear to go ahead.
581                          */
582                         int ret = spin_trylock(contender->mutex);
583                         spin_unlock(&shared->lock);
584                         if (!ret)
585                                 goto retry;
586                         spin_unlock(chip->mutex);
587                         ret = get_chip(map, contender, contender->start, mode);
588                         spin_lock(chip->mutex);
589                         if (ret) {
590                                 spin_unlock(contender->mutex);
591                                 return ret;
592                         }
593                         timeo = jiffies + HZ;
594                         spin_lock(&shared->lock);
595                 }
596
597                 /* We now own it */
598                 shared->writing = chip;
599                 if (mode == FL_ERASING)
600                         shared->erasing = chip;
601                 if (contender && contender != chip)
602                         spin_unlock(contender->mutex);
603                 spin_unlock(&shared->lock);
604         }
605
606         switch (chip->state) {
607
608         case FL_STATUS:
609                 for (;;) {
610                         status = map_read(map, adr);
611                         if (map_word_andequal(map, status, status_OK, status_OK))
612                                 break;
613
614                         /* At this point we're fine with write operations
615                            in other partitions as they don't conflict. */
616                         if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
617                                 break;
618
619                         if (time_after(jiffies, timeo)) {
620                                 printk(KERN_ERR "Waiting for chip to be ready timed out. Status %lx\n", 
621                                        status.x[0]);
622                                 return -EIO;
623                         }
624                         spin_unlock(chip->mutex);
625                         cfi_udelay(1);
626                         spin_lock(chip->mutex);
627                         /* Someone else might have been playing with it. */
628                         goto retry;
629                 }
630                                 
631         case FL_READY:
632         case FL_CFI_QUERY:
633         case FL_JEDEC_QUERY:
634                 return 0;
635
636         case FL_ERASING:
637                 if (!cfip ||
638                     !(cfip->FeatureSupport & 2) ||
639                     !(mode == FL_READY || mode == FL_POINT ||
640                      (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
641                         goto sleep;
642
643
644                 /* Erase suspend */
645                 map_write(map, CMD(0xB0), adr);
646
647                 /* If the flash has finished erasing, then 'erase suspend'
648                  * appears to make some (28F320) flash devices switch to
649                  * 'read' mode.  Make sure that we switch to 'read status'
650                  * mode so we get the right data. --rmk
651                  */
652                 map_write(map, CMD(0x70), adr);
653                 chip->oldstate = FL_ERASING;
654                 chip->state = FL_ERASE_SUSPENDING;
655                 chip->erase_suspended = 1;
656                 for (;;) {
657                         status = map_read(map, adr);
658                         if (map_word_andequal(map, status, status_OK, status_OK))
659                                 break;
660
661                         if (time_after(jiffies, timeo)) {
662                                 /* Urgh. Resume and pretend we weren't here.  */
663                                 map_write(map, CMD(0xd0), adr);
664                                 /* Make sure we're in 'read status' mode if it had finished */
665                                 map_write(map, CMD(0x70), adr);
666                                 chip->state = FL_ERASING;
667                                 chip->oldstate = FL_READY;
668                                 printk(KERN_ERR "Chip not ready after erase "
669                                        "suspended: status = 0x%lx\n", status.x[0]);
670                                 return -EIO;
671                         }
672
673                         spin_unlock(chip->mutex);
674                         cfi_udelay(1);
675                         spin_lock(chip->mutex);
676                         /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
677                            So we can just loop here. */
678                 }
679                 chip->state = FL_STATUS;
680                 return 0;
681
682         case FL_POINT:
683                 /* Only if there's no operation suspended... */
684                 if (mode == FL_READY && chip->oldstate == FL_READY)
685                         return 0;
686
687         default:
688         sleep:
689                 set_current_state(TASK_UNINTERRUPTIBLE);
690                 add_wait_queue(&chip->wq, &wait);
691                 spin_unlock(chip->mutex);
692                 schedule();
693                 remove_wait_queue(&chip->wq, &wait);
694                 spin_lock(chip->mutex);
695                 goto resettime;
696         }
697 }
698
699 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
700 {
701         struct cfi_private *cfi = map->fldrv_priv;
702
703         if (chip->priv) {
704                 struct flchip_shared *shared = chip->priv;
705                 spin_lock(&shared->lock);
706                 if (shared->writing == chip) {
707                         /* We own the ability to write, but we're done */
708                         shared->writing = shared->erasing;
709                         if (shared->writing && shared->writing != chip) {
710                                 /* give back ownership to who we loaned it from */
711                                 struct flchip *loaner = shared->writing;
712                                 spin_lock(loaner->mutex);
713                                 spin_unlock(&shared->lock);
714                                 spin_unlock(chip->mutex);
715                                 put_chip(map, loaner, loaner->start);
716                                 spin_lock(chip->mutex);
717                                 spin_unlock(loaner->mutex);
718                         } else {
719                                 if (chip->oldstate != FL_ERASING) {
720                                         shared->erasing = NULL;
721                                         if (chip->oldstate != FL_WRITING)
722                                                 shared->writing = NULL;
723                                 }
724                                 spin_unlock(&shared->lock);
725                         }
726                 } else {
727                         spin_unlock(&shared->lock);
728                 }
729         }
730
731         switch(chip->oldstate) {
732         case FL_ERASING:
733                 chip->state = chip->oldstate;
734                 /* What if one interleaved chip has finished and the 
735                    other hasn't? The old code would leave the finished
736                    one in READY mode. That's bad, and caused -EROFS 
737                    errors to be returned from do_erase_oneblock because
738                    that's the only bit it checked for at the time.
739                    As the state machine appears to explicitly allow 
740                    sending the 0x70 (Read Status) command to an erasing
741                    chip and expecting it to be ignored, that's what we 
742                    do. */
743                 map_write(map, CMD(0xd0), adr);
744                 map_write(map, CMD(0x70), adr);
745                 chip->oldstate = FL_READY;
746                 chip->state = FL_ERASING;
747                 break;
748
749         case FL_READY:
750         case FL_STATUS:
751         case FL_JEDEC_QUERY:
752                 /* We should really make set_vpp() count, rather than doing this */
753                 DISABLE_VPP(map);
754                 break;
755         default:
756                 printk(KERN_ERR "put_chip() called with oldstate %d!!\n", chip->oldstate);
757         }
758         wake_up(&chip->wq);
759 }
760
761 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
762 {
763         unsigned long cmd_addr;
764         struct cfi_private *cfi = map->fldrv_priv;
765         int ret = 0;
766
767         adr += chip->start;
768
769         /* Ensure cmd read/writes are aligned. */ 
770         cmd_addr = adr & ~(map_bankwidth(map)-1); 
771
772         spin_lock(chip->mutex);
773
774         ret = get_chip(map, chip, cmd_addr, FL_POINT);
775
776         if (!ret) {
777                 if (chip->state != FL_POINT && chip->state != FL_READY)
778                         map_write(map, CMD(0xff), cmd_addr);
779
780                 chip->state = FL_POINT;
781                 chip->ref_point_counter++;
782         }
783         spin_unlock(chip->mutex);
784
785         return ret;
786 }
787
788 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
789 {
790         struct map_info *map = mtd->priv;
791         struct cfi_private *cfi = map->fldrv_priv;
792         unsigned long ofs;
793         int chipnum;
794         int ret = 0;
795
796         if (!map->virt || (from + len > mtd->size))
797                 return -EINVAL;
798         
799         *mtdbuf = (void *)map->virt + from;
800         *retlen = 0;
801
802         /* Now lock the chip(s) to POINT state */
803
804         /* ofs: offset within the first chip that the first read should start */
805         chipnum = (from >> cfi->chipshift);
806         ofs = from - (chipnum << cfi->chipshift);
807
808         while (len) {
809                 unsigned long thislen;
810
811                 if (chipnum >= cfi->numchips)
812                         break;
813
814                 if ((len + ofs -1) >> cfi->chipshift)
815                         thislen = (1<<cfi->chipshift) - ofs;
816                 else
817                         thislen = len;
818
819                 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
820                 if (ret)
821                         break;
822
823                 *retlen += thislen;
824                 len -= thislen;
825                 
826                 ofs = 0;
827                 chipnum++;
828         }
829         return 0;
830 }
831
832 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
833 {
834         struct map_info *map = mtd->priv;
835         struct cfi_private *cfi = map->fldrv_priv;
836         unsigned long ofs;
837         int chipnum;
838
839         /* Now unlock the chip(s) POINT state */
840
841         /* ofs: offset within the first chip that the first read should start */
842         chipnum = (from >> cfi->chipshift);
843         ofs = from - (chipnum <<  cfi->chipshift);
844
845         while (len) {
846                 unsigned long thislen;
847                 struct flchip *chip;
848
849                 chip = &cfi->chips[chipnum];
850                 if (chipnum >= cfi->numchips)
851                         break;
852
853                 if ((len + ofs -1) >> cfi->chipshift)
854                         thislen = (1<<cfi->chipshift) - ofs;
855                 else
856                         thislen = len;
857
858                 spin_lock(chip->mutex);
859                 if (chip->state == FL_POINT) {
860                         chip->ref_point_counter--;
861                         if(chip->ref_point_counter == 0)
862                                 chip->state = FL_READY;
863                 } else
864                         printk(KERN_ERR "Warning: unpoint called on non pointed region\n"); /* Should this give an error? */
865
866                 put_chip(map, chip, chip->start);
867                 spin_unlock(chip->mutex);
868
869                 len -= thislen;
870                 ofs = 0;
871                 chipnum++;
872         }
873 }
874
875 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
876 {
877         unsigned long cmd_addr;
878         struct cfi_private *cfi = map->fldrv_priv;
879         int ret;
880
881         adr += chip->start;
882
883         /* Ensure cmd read/writes are aligned. */ 
884         cmd_addr = adr & ~(map_bankwidth(map)-1); 
885
886         spin_lock(chip->mutex);
887         ret = get_chip(map, chip, cmd_addr, FL_READY);
888         if (ret) {
889                 spin_unlock(chip->mutex);
890                 return ret;
891         }
892
893         if (chip->state != FL_POINT && chip->state != FL_READY) {
894                 map_write(map, CMD(0xff), cmd_addr);
895
896                 chip->state = FL_READY;
897         }
898
899         map_copy_from(map, buf, adr, len);
900
901         put_chip(map, chip, cmd_addr);
902
903         spin_unlock(chip->mutex);
904         return 0;
905 }
906
907 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
908 {
909         struct map_info *map = mtd->priv;
910         struct cfi_private *cfi = map->fldrv_priv;
911         unsigned long ofs;
912         int chipnum;
913         int ret = 0;
914
915         /* ofs: offset within the first chip that the first read should start */
916         chipnum = (from >> cfi->chipshift);
917         ofs = from - (chipnum <<  cfi->chipshift);
918
919         *retlen = 0;
920
921         while (len) {
922                 unsigned long thislen;
923
924                 if (chipnum >= cfi->numchips)
925                         break;
926
927                 if ((len + ofs -1) >> cfi->chipshift)
928                         thislen = (1<<cfi->chipshift) - ofs;
929                 else
930                         thislen = len;
931
932                 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
933                 if (ret)
934                         break;
935
936                 *retlen += thislen;
937                 len -= thislen;
938                 buf += thislen;
939                 
940                 ofs = 0;
941                 chipnum++;
942         }
943         return ret;
944 }
945
946 #if 0
947 static int cfi_intelext_read_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf, int base_offst, int reg_sz)
948 {
949         struct map_info *map = mtd->priv;
950         struct cfi_private *cfi = map->fldrv_priv;
951         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
952         struct flchip *chip;
953         int ofs_factor = cfi->interleave * cfi->device_type;
954         int count = len;
955         int chip_num, offst;
956         int ret;
957
958         chip_num = ((unsigned int)from/reg_sz);
959         offst = from - (reg_sz*chip_num)+base_offst;
960
961         while (count) {
962         /* Calculate which chip & protection register offset we need */
963
964                 if (chip_num >= cfi->numchips)
965                         goto out;
966
967                 chip = &cfi->chips[chip_num];
968                 
969                 spin_lock(chip->mutex);
970                 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
971                 if (ret) {
972                         spin_unlock(chip->mutex);
973                         return (len-count)?:ret;
974                 }
975
976                 if (chip->state != FL_JEDEC_QUERY) {
977                         map_write(map, CMD(0x90), chip->start);
978                         chip->state = FL_JEDEC_QUERY;
979                 }
980
981                 while (count && ((offst-base_offst) < reg_sz)) {
982                         *buf = map_read8(map,(chip->start+((extp->ProtRegAddr+1)*ofs_factor)+offst));
983                         buf++;
984                         offst++;
985                         count--;
986                 }
987
988                 put_chip(map, chip, chip->start);
989                 spin_unlock(chip->mutex);
990
991                 /* Move on to the next chip */
992                 chip_num++;
993                 offst = base_offst;
994         }
995         
996  out:   
997         return len-count;
998 }
999         
1000 static int cfi_intelext_read_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1001 {
1002         struct map_info *map = mtd->priv;
1003         struct cfi_private *cfi = map->fldrv_priv;
1004         struct cfi_pri_intelext *extp=cfi->cmdset_priv;
1005         int base_offst,reg_sz;
1006         
1007         /* Check that we actually have some protection registers */
1008         if(!extp || !(extp->FeatureSupport&64)){
1009                 printk(KERN_WARNING "%s: This flash device has no protection data to read!\n",map->name);
1010                 return 0;
1011         }
1012
1013         base_offst=(1<<extp->FactProtRegSize);
1014         reg_sz=(1<<extp->UserProtRegSize);
1015
1016         return cfi_intelext_read_prot_reg(mtd, from, len, retlen, buf, base_offst, reg_sz);
1017 }
1018
1019 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1020 {
1021         struct map_info *map = mtd->priv;
1022         struct cfi_private *cfi = map->fldrv_priv;
1023         struct cfi_pri_intelext *extp=cfi->cmdset_priv;
1024         int base_offst,reg_sz;
1025         
1026         /* Check that we actually have some protection registers */
1027         if(!extp || !(extp->FeatureSupport&64)){
1028                 printk(KERN_WARNING "%s: This flash device has no protection data to read!\n",map->name);
1029                 return 0;
1030         }
1031
1032         base_offst=0;
1033         reg_sz=(1<<extp->FactProtRegSize);
1034
1035         return cfi_intelext_read_prot_reg(mtd, from, len, retlen, buf, base_offst, reg_sz);
1036 }
1037 #endif
1038
1039 static int do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
1040 {
1041         struct cfi_private *cfi = map->fldrv_priv;
1042         map_word status, status_OK;
1043         unsigned long timeo;
1044         int z, ret=0;
1045
1046         adr += chip->start;
1047
1048         /* Let's determine this according to the interleave only once */
1049         status_OK = CMD(0x80);
1050
1051         spin_lock(chip->mutex);
1052         ret = get_chip(map, chip, adr, FL_WRITING);
1053         if (ret) {
1054                 spin_unlock(chip->mutex);
1055                 return ret;
1056         }
1057
1058         ENABLE_VPP(map);
1059         map_write(map, CMD(0x40), adr);
1060         map_write(map, datum, adr);
1061         chip->state = FL_WRITING;
1062
1063         spin_unlock(chip->mutex);
1064         INVALIDATE_CACHED_RANGE(map, adr, map_bankwidth(map));
1065         cfi_udelay(chip->word_write_time);
1066         spin_lock(chip->mutex);
1067
1068         timeo = jiffies + (HZ/2);
1069         z = 0;
1070         for (;;) {
1071                 if (chip->state != FL_WRITING) {
1072                         /* Someone's suspended the write. Sleep */
1073                         DECLARE_WAITQUEUE(wait, current);
1074
1075                         set_current_state(TASK_UNINTERRUPTIBLE);
1076                         add_wait_queue(&chip->wq, &wait);
1077                         spin_unlock(chip->mutex);
1078                         schedule();
1079                         remove_wait_queue(&chip->wq, &wait);
1080                         timeo = jiffies + (HZ / 2); /* FIXME */
1081                         spin_lock(chip->mutex);
1082                         continue;
1083                 }
1084
1085                 status = map_read(map, adr);
1086                 if (map_word_andequal(map, status, status_OK, status_OK))
1087                         break;
1088                 
1089                 /* OK Still waiting */
1090                 if (time_after(jiffies, timeo)) {
1091                         chip->state = FL_STATUS;
1092                         printk(KERN_ERR "waiting for chip to be ready timed out in word write\n");
1093                         ret = -EIO;
1094                         goto out;
1095                 }
1096
1097                 /* Latency issues. Drop the lock, wait a while and retry */
1098                 spin_unlock(chip->mutex);
1099                 z++;
1100                 cfi_udelay(1);
1101                 spin_lock(chip->mutex);
1102         }
1103         if (!z) {
1104                 chip->word_write_time--;
1105                 if (!chip->word_write_time)
1106                         chip->word_write_time++;
1107         }
1108         if (z > 1) 
1109                 chip->word_write_time++;
1110
1111         /* Done and happy. */
1112         chip->state = FL_STATUS;
1113
1114         /* check for lock bit */
1115         if (map_word_bitsset(map, status, CMD(0x02))) {
1116                 /* clear status */
1117                 map_write(map, CMD(0x50), adr);
1118                 /* put back into read status register mode */
1119                 map_write(map, CMD(0x70), adr);
1120                 ret = -EROFS;
1121         }
1122  out:
1123         put_chip(map, chip, adr);
1124         spin_unlock(chip->mutex);
1125
1126         return ret;
1127 }
1128
1129
1130 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1131 {
1132         struct map_info *map = mtd->priv;
1133         struct cfi_private *cfi = map->fldrv_priv;
1134         int ret = 0;
1135         int chipnum;
1136         unsigned long ofs;
1137
1138         *retlen = 0;
1139         if (!len)
1140                 return 0;
1141
1142         chipnum = to >> cfi->chipshift;
1143         ofs = to  - (chipnum << cfi->chipshift);
1144
1145         /* If it's not bus-aligned, do the first byte write */
1146         if (ofs & (map_bankwidth(map)-1)) {
1147                 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1148                 int gap = ofs - bus_ofs;
1149                 int n;
1150                 map_word datum;
1151
1152                 n = min_t(int, len, map_bankwidth(map)-gap);
1153                 datum = map_word_ff(map);
1154                 datum = map_word_load_partial(map, datum, buf, gap, n);
1155
1156                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1157                                                bus_ofs, datum);
1158                 if (ret) 
1159                         return ret;
1160
1161                 len -= n;
1162                 ofs += n;
1163                 buf += n;
1164                 (*retlen) += n;
1165
1166                 if (ofs >> cfi->chipshift) {
1167                         chipnum ++; 
1168                         ofs = 0;
1169                         if (chipnum == cfi->numchips)
1170                                 return 0;
1171                 }
1172         }
1173         
1174         while(len >= map_bankwidth(map)) {
1175                 map_word datum = map_word_load(map, buf);
1176
1177                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1178                                 ofs, datum);
1179                 if (ret)
1180                         return ret;
1181
1182                 ofs += map_bankwidth(map);
1183                 buf += map_bankwidth(map);
1184                 (*retlen) += map_bankwidth(map);
1185                 len -= map_bankwidth(map);
1186
1187                 if (ofs >> cfi->chipshift) {
1188                         chipnum ++; 
1189                         ofs = 0;
1190                         if (chipnum == cfi->numchips)
1191                                 return 0;
1192                 }
1193         }
1194
1195         if (len & (map_bankwidth(map)-1)) {
1196                 map_word datum;
1197
1198                 datum = map_word_ff(map);
1199                 datum = map_word_load_partial(map, datum, buf, 0, len);
1200
1201                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1202                                                ofs, datum);
1203                 if (ret) 
1204                         return ret;
1205                 
1206                 (*retlen) += len;
1207         }
1208
1209         return 0;
1210 }
1211
1212
1213 static inline int do_write_buffer(struct map_info *map, struct flchip *chip, 
1214                                   unsigned long adr, const u_char *buf, int len)
1215 {
1216         struct cfi_private *cfi = map->fldrv_priv;
1217         map_word status, status_OK;
1218         unsigned long cmd_adr, timeo;
1219         int wbufsize, z, ret=0, bytes, words;
1220
1221         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1222         adr += chip->start;
1223         cmd_adr = adr & ~(wbufsize-1);
1224         
1225         /* Let's determine this according to the interleave only once */
1226         status_OK = CMD(0x80);
1227
1228         spin_lock(chip->mutex);
1229         ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1230         if (ret) {
1231                 spin_unlock(chip->mutex);
1232                 return ret;
1233         }
1234
1235         /* Â§4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1236            [...], the device will not accept any more Write to Buffer commands". 
1237            So we must check here and reset those bits if they're set. Otherwise
1238            we're just pissing in the wind */
1239         if (chip->state != FL_STATUS)
1240                 map_write(map, CMD(0x70), cmd_adr);
1241         status = map_read(map, cmd_adr);
1242         if (map_word_bitsset(map, status, CMD(0x30))) {
1243                 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1244                 map_write(map, CMD(0x50), cmd_adr);
1245                 map_write(map, CMD(0x70), cmd_adr);
1246         }
1247
1248         ENABLE_VPP(map);
1249         chip->state = FL_WRITING_TO_BUFFER;
1250
1251         z = 0;
1252         for (;;) {
1253                 map_write(map, CMD(0xe8), cmd_adr);
1254
1255                 status = map_read(map, cmd_adr);
1256                 if (map_word_andequal(map, status, status_OK, status_OK))
1257                         break;
1258
1259                 spin_unlock(chip->mutex);
1260                 cfi_udelay(1);
1261                 spin_lock(chip->mutex);
1262
1263                 if (++z > 20) {
1264                         /* Argh. Not ready for write to buffer */
1265                         map_word Xstatus;
1266                         map_write(map, CMD(0x70), cmd_adr);
1267                         chip->state = FL_STATUS;
1268                         Xstatus = map_read(map, cmd_adr);
1269                         /* Odd. Clear status bits */
1270                         map_write(map, CMD(0x50), cmd_adr);
1271                         map_write(map, CMD(0x70), cmd_adr);
1272                         printk(KERN_ERR "Chip not ready for buffer write. status = %lx, Xstatus = %lx\n",
1273                                status.x[0], Xstatus.x[0]);
1274                         ret = -EIO;
1275                         goto out;
1276                 }
1277         }
1278
1279         /* Write length of data to come */
1280         bytes = len & (map_bankwidth(map)-1);
1281         words = len / map_bankwidth(map);
1282         map_write(map, CMD(words - !bytes), cmd_adr );
1283
1284         /* Write data */
1285         z = 0;
1286         while(z < words * map_bankwidth(map)) {
1287                 map_word datum = map_word_load(map, buf);
1288                 map_write(map, datum, adr+z);
1289
1290                 z += map_bankwidth(map);
1291                 buf += map_bankwidth(map);
1292         }
1293
1294         if (bytes) {
1295                 map_word datum;
1296
1297                 datum = map_word_ff(map);
1298                 datum = map_word_load_partial(map, datum, buf, 0, bytes);
1299                 map_write(map, datum, adr+z);
1300         }
1301
1302         /* GO GO GO */
1303         map_write(map, CMD(0xd0), cmd_adr);
1304         chip->state = FL_WRITING;
1305
1306         spin_unlock(chip->mutex);
1307         INVALIDATE_CACHED_RANGE(map, adr, len);
1308         cfi_udelay(chip->buffer_write_time);
1309         spin_lock(chip->mutex);
1310
1311         timeo = jiffies + (HZ/2);
1312         z = 0;
1313         for (;;) {
1314                 if (chip->state != FL_WRITING) {
1315                         /* Someone's suspended the write. Sleep */
1316                         DECLARE_WAITQUEUE(wait, current);
1317                         set_current_state(TASK_UNINTERRUPTIBLE);
1318                         add_wait_queue(&chip->wq, &wait);
1319                         spin_unlock(chip->mutex);
1320                         schedule();
1321                         remove_wait_queue(&chip->wq, &wait);
1322                         timeo = jiffies + (HZ / 2); /* FIXME */
1323                         spin_lock(chip->mutex);
1324                         continue;
1325                 }
1326
1327                 status = map_read(map, cmd_adr);
1328                 if (map_word_andequal(map, status, status_OK, status_OK))
1329                         break;
1330
1331                 /* OK Still waiting */
1332                 if (time_after(jiffies, timeo)) {
1333                         chip->state = FL_STATUS;
1334                         printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
1335                         ret = -EIO;
1336                         goto out;
1337                 }
1338                 
1339                 /* Latency issues. Drop the lock, wait a while and retry */
1340                 spin_unlock(chip->mutex);
1341                 cfi_udelay(1);
1342                 z++;
1343                 spin_lock(chip->mutex);
1344         }
1345         if (!z) {
1346                 chip->buffer_write_time--;
1347                 if (!chip->buffer_write_time)
1348                         chip->buffer_write_time++;
1349         }
1350         if (z > 1) 
1351                 chip->buffer_write_time++;
1352
1353         /* Done and happy. */
1354         chip->state = FL_STATUS;
1355
1356         /* check for lock bit */
1357         if (map_word_bitsset(map, status, CMD(0x02))) {
1358                 /* clear status */
1359                 map_write(map, CMD(0x50), cmd_adr);
1360                 /* put back into read status register mode */
1361                 map_write(map, CMD(0x70), adr);
1362                 ret = -EROFS;
1363         }
1364
1365  out:
1366         put_chip(map, chip, cmd_adr);
1367         spin_unlock(chip->mutex);
1368         return ret;
1369 }
1370
1371 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to, 
1372                                        size_t len, size_t *retlen, const u_char *buf)
1373 {
1374         struct map_info *map = mtd->priv;
1375         struct cfi_private *cfi = map->fldrv_priv;
1376         int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1377         int ret = 0;
1378         int chipnum;
1379         unsigned long ofs;
1380
1381         *retlen = 0;
1382         if (!len)
1383                 return 0;
1384
1385         chipnum = to >> cfi->chipshift;
1386         ofs = to  - (chipnum << cfi->chipshift);
1387
1388         /* If it's not bus-aligned, do the first word write */
1389         if (ofs & (map_bankwidth(map)-1)) {
1390                 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1391                 if (local_len > len)
1392                         local_len = len;
1393                 ret = cfi_intelext_write_words(mtd, to, local_len,
1394                                                retlen, buf);
1395                 if (ret)
1396                         return ret;
1397                 ofs += local_len;
1398                 buf += local_len;
1399                 len -= local_len;
1400
1401                 if (ofs >> cfi->chipshift) {
1402                         chipnum ++;
1403                         ofs = 0;
1404                         if (chipnum == cfi->numchips)
1405                                 return 0;
1406                 }
1407         }
1408
1409         while(len) {
1410                 /* We must not cross write block boundaries */
1411                 int size = wbufsize - (ofs & (wbufsize-1));
1412
1413                 if (size > len)
1414                         size = len;
1415                 ret = do_write_buffer(map, &cfi->chips[chipnum], 
1416                                       ofs, buf, size);
1417                 if (ret)
1418                         return ret;
1419
1420                 ofs += size;
1421                 buf += size;
1422                 (*retlen) += size;
1423                 len -= size;
1424
1425                 if (ofs >> cfi->chipshift) {
1426                         chipnum ++; 
1427                         ofs = 0;
1428                         if (chipnum == cfi->numchips)
1429                                 return 0;
1430                 }
1431         }
1432         return 0;
1433 }
1434
1435 static int do_erase_oneblock(struct map_info *map, struct flchip *chip,
1436                              unsigned long adr, int len, void *thunk)
1437 {
1438         struct cfi_private *cfi = map->fldrv_priv;
1439         map_word status, status_OK;
1440         unsigned long timeo;
1441         int retries = 3;
1442         DECLARE_WAITQUEUE(wait, current);
1443         int ret = 0;
1444
1445         adr += chip->start;
1446
1447         /* Let's determine this according to the interleave only once */
1448         status_OK = CMD(0x80);
1449
1450  retry:
1451         spin_lock(chip->mutex);
1452         ret = get_chip(map, chip, adr, FL_ERASING);
1453         if (ret) {
1454                 spin_unlock(chip->mutex);
1455                 return ret;
1456         }
1457
1458         ENABLE_VPP(map);
1459         /* Clear the status register first */
1460         map_write(map, CMD(0x50), adr);
1461
1462         /* Now erase */
1463         map_write(map, CMD(0x20), adr);
1464         map_write(map, CMD(0xD0), adr);
1465         chip->state = FL_ERASING;
1466         chip->erase_suspended = 0;
1467
1468         spin_unlock(chip->mutex);
1469         INVALIDATE_CACHED_RANGE(map, adr, len);
1470         msleep(chip->erase_time / 2);
1471         spin_lock(chip->mutex);
1472
1473         /* FIXME. Use a timer to check this, and return immediately. */
1474         /* Once the state machine's known to be working I'll do that */
1475
1476         timeo = jiffies + (HZ*20);
1477         for (;;) {
1478                 if (chip->state != FL_ERASING) {
1479                         /* Someone's suspended the erase. Sleep */
1480                         set_current_state(TASK_UNINTERRUPTIBLE);
1481                         add_wait_queue(&chip->wq, &wait);
1482                         spin_unlock(chip->mutex);
1483                         schedule();
1484                         remove_wait_queue(&chip->wq, &wait);
1485                         spin_lock(chip->mutex);
1486                         continue;
1487                 }
1488                 if (chip->erase_suspended) {
1489                         /* This erase was suspended and resumed.
1490                            Adjust the timeout */
1491                         timeo = jiffies + (HZ*20); /* FIXME */
1492                         chip->erase_suspended = 0;
1493                 }
1494
1495                 status = map_read(map, adr);
1496                 if (map_word_andequal(map, status, status_OK, status_OK))
1497                         break;
1498                 
1499                 /* OK Still waiting */
1500                 if (time_after(jiffies, timeo)) {
1501                         map_word Xstatus;
1502                         map_write(map, CMD(0x70), adr);
1503                         chip->state = FL_STATUS;
1504                         Xstatus = map_read(map, adr);
1505                         /* Clear status bits */
1506                         map_write(map, CMD(0x50), adr);
1507                         map_write(map, CMD(0x70), adr);
1508                         printk(KERN_ERR "waiting for erase at %08lx to complete timed out. status = %lx, Xstatus = %lx.\n",
1509                                adr, status.x[0], Xstatus.x[0]);
1510                         ret = -EIO;
1511                         goto out;
1512                 }
1513                 
1514                 /* Latency issues. Drop the lock, wait a while and retry */
1515                 spin_unlock(chip->mutex);
1516                 set_current_state(TASK_UNINTERRUPTIBLE);
1517                 schedule_timeout(1);
1518                 spin_lock(chip->mutex);
1519         }
1520
1521         /* We've broken this before. It doesn't hurt to be safe */
1522         map_write(map, CMD(0x70), adr);
1523         chip->state = FL_STATUS;
1524         status = map_read(map, adr);
1525
1526         /* check for lock bit */
1527         if (map_word_bitsset(map, status, CMD(0x3a))) {
1528                 unsigned char chipstatus;
1529
1530                 /* Reset the error bits */
1531                 map_write(map, CMD(0x50), adr);
1532                 map_write(map, CMD(0x70), adr);
1533
1534                 chipstatus = status.x[0];
1535                 if (!map_word_equal(map, status, CMD(chipstatus))) {
1536                         int i, w;
1537                         for (w=0; w<map_words(map); w++) {
1538                                 for (i = 0; i<cfi_interleave(cfi); i++) {
1539                                         chipstatus |= status.x[w] >> (cfi->device_type * 8);
1540                                 }
1541                         }
1542                         printk(KERN_WARNING "Status is not identical for all chips: 0x%lx. Merging to give 0x%02x\n",
1543                                status.x[0], chipstatus);
1544                 }
1545
1546                 if ((chipstatus & 0x30) == 0x30) {
1547                         printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus);
1548                         ret = -EIO;
1549                 } else if (chipstatus & 0x02) {
1550                         /* Protection bit set */
1551                         ret = -EROFS;
1552                 } else if (chipstatus & 0x8) {
1553                         /* Voltage */
1554                         printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", chipstatus);
1555                         ret = -EIO;
1556                 } else if (chipstatus & 0x20) {
1557                         if (retries--) {
1558                                 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
1559                                 timeo = jiffies + HZ;
1560                                 put_chip(map, chip, adr);
1561                                 spin_unlock(chip->mutex);
1562                                 goto retry;
1563                         }
1564                         printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
1565                         ret = -EIO;
1566                 }
1567         } else {
1568                 ret = 0;
1569         }
1570
1571  out:   put_chip(map, chip, adr);
1572         spin_unlock(chip->mutex);
1573         return ret;
1574 }
1575
1576 int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1577 {
1578         unsigned long ofs, len;
1579         int ret;
1580
1581         ofs = instr->addr;
1582         len = instr->len;
1583
1584         ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1585         if (ret)
1586                 return ret;
1587
1588         instr->state = MTD_ERASE_DONE;
1589         mtd_erase_callback(instr);
1590         
1591         return 0;
1592 }
1593
1594 static void cfi_intelext_sync (struct mtd_info *mtd)
1595 {
1596         struct map_info *map = mtd->priv;
1597         struct cfi_private *cfi = map->fldrv_priv;
1598         int i;
1599         struct flchip *chip;
1600         int ret = 0;
1601
1602         for (i=0; !ret && i<cfi->numchips; i++) {
1603                 chip = &cfi->chips[i];
1604
1605                 spin_lock(chip->mutex);
1606                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1607
1608                 if (!ret) {
1609                         chip->oldstate = chip->state;
1610                         chip->state = FL_SYNCING;
1611                         /* No need to wake_up() on this state change - 
1612                          * as the whole point is that nobody can do anything
1613                          * with the chip now anyway.
1614                          */
1615                 }
1616                 spin_unlock(chip->mutex);
1617         }
1618
1619         /* Unlock the chips again */
1620
1621         for (i--; i >=0; i--) {
1622                 chip = &cfi->chips[i];
1623
1624                 spin_lock(chip->mutex);
1625                 
1626                 if (chip->state == FL_SYNCING) {
1627                         chip->state = chip->oldstate;
1628                         wake_up(&chip->wq);
1629                 }
1630                 spin_unlock(chip->mutex);
1631         }
1632 }
1633
1634 #ifdef DEBUG_LOCK_BITS
1635 static int do_printlockstatus_oneblock(struct map_info *map, struct flchip *chip,
1636                                        unsigned long adr, int len, void *thunk)
1637 {
1638         struct cfi_private *cfi = map->fldrv_priv;
1639         int status, ofs_factor = cfi->interleave * cfi->device_type;
1640
1641         cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1642         chip->state = FL_JEDEC_QUERY;
1643         status = cfi_read_query(map, adr+(2*ofs_factor));
1644         printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1645                adr, status);
1646         return 0;
1647 }
1648 #endif
1649
1650 #define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
1651 #define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
1652
1653 static int do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1654                               unsigned long adr, int len, void *thunk)
1655 {
1656         struct cfi_private *cfi = map->fldrv_priv;
1657         map_word status, status_OK;
1658         unsigned long timeo = jiffies + HZ;
1659         int ret;
1660
1661         adr += chip->start;
1662
1663         /* Let's determine this according to the interleave only once */
1664         status_OK = CMD(0x80);
1665
1666         spin_lock(chip->mutex);
1667         ret = get_chip(map, chip, adr, FL_LOCKING);
1668         if (ret) {
1669                 spin_unlock(chip->mutex);
1670                 return ret;
1671         }
1672
1673         ENABLE_VPP(map);
1674         map_write(map, CMD(0x60), adr);
1675
1676         if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1677                 map_write(map, CMD(0x01), adr);
1678                 chip->state = FL_LOCKING;
1679         } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1680                 map_write(map, CMD(0xD0), adr);
1681                 chip->state = FL_UNLOCKING;
1682         } else
1683                 BUG();
1684
1685         spin_unlock(chip->mutex);
1686         schedule_timeout(HZ);
1687         spin_lock(chip->mutex);
1688
1689         /* FIXME. Use a timer to check this, and return immediately. */
1690         /* Once the state machine's known to be working I'll do that */
1691
1692         timeo = jiffies + (HZ*20);
1693         for (;;) {
1694
1695                 status = map_read(map, adr);
1696                 if (map_word_andequal(map, status, status_OK, status_OK))
1697                         break;
1698                 
1699                 /* OK Still waiting */
1700                 if (time_after(jiffies, timeo)) {
1701                         map_word Xstatus;
1702                         map_write(map, CMD(0x70), adr);
1703                         chip->state = FL_STATUS;
1704                         Xstatus = map_read(map, adr);
1705                         printk(KERN_ERR "waiting for unlock to complete timed out. status = %lx, Xstatus = %lx.\n",
1706                                status.x[0], Xstatus.x[0]);
1707                         put_chip(map, chip, adr);
1708                         spin_unlock(chip->mutex);
1709                         return -EIO;
1710                 }
1711                 
1712                 /* Latency issues. Drop the lock, wait a while and retry */
1713                 spin_unlock(chip->mutex);
1714                 cfi_udelay(1);
1715                 spin_lock(chip->mutex);
1716         }
1717         
1718         /* Done and happy. */
1719         chip->state = FL_STATUS;
1720         put_chip(map, chip, adr);
1721         spin_unlock(chip->mutex);
1722         return 0;
1723 }
1724
1725 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1726 {
1727         int ret;
1728
1729 #ifdef DEBUG_LOCK_BITS
1730         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1731                __FUNCTION__, ofs, len);
1732         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1733                 ofs, len, 0);
1734 #endif
1735
1736         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock, 
1737                 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
1738         
1739 #ifdef DEBUG_LOCK_BITS
1740         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1741                __FUNCTION__, ret);
1742         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1743                 ofs, len, 0);
1744 #endif
1745
1746         return ret;
1747 }
1748
1749 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1750 {
1751         int ret;
1752
1753 #ifdef DEBUG_LOCK_BITS
1754         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1755                __FUNCTION__, ofs, len);
1756         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1757                 ofs, len, 0);
1758 #endif
1759
1760         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1761                                         ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
1762         
1763 #ifdef DEBUG_LOCK_BITS
1764         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1765                __FUNCTION__, ret);
1766         cfi_varsize_frob(mtd, do_printlockstatus_oneblock, 
1767                 ofs, len, 0);
1768 #endif
1769         
1770         return ret;
1771 }
1772
1773 static int cfi_intelext_suspend(struct mtd_info *mtd)
1774 {
1775         struct map_info *map = mtd->priv;
1776         struct cfi_private *cfi = map->fldrv_priv;
1777         int i;
1778         struct flchip *chip;
1779         int ret = 0;
1780
1781         for (i=0; !ret && i<cfi->numchips; i++) {
1782                 chip = &cfi->chips[i];
1783
1784                 spin_lock(chip->mutex);
1785
1786                 switch (chip->state) {
1787                 case FL_READY:
1788                 case FL_STATUS:
1789                 case FL_CFI_QUERY:
1790                 case FL_JEDEC_QUERY:
1791                         if (chip->oldstate == FL_READY) {
1792                                 chip->oldstate = chip->state;
1793                                 chip->state = FL_PM_SUSPENDED;
1794                                 /* No need to wake_up() on this state change - 
1795                                  * as the whole point is that nobody can do anything
1796                                  * with the chip now anyway.
1797                                  */
1798                         } else {
1799                                 /* There seems to be an operation pending. We must wait for it. */
1800                                 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
1801                                 ret = -EAGAIN;
1802                         }
1803                         break;
1804                 default:
1805                         /* Should we actually wait? Once upon a time these routines weren't
1806                            allowed to. Or should we return -EAGAIN, because the upper layers
1807                            ought to have already shut down anything which was using the device
1808                            anyway? The latter for now. */
1809                         printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
1810                         ret = -EAGAIN;
1811                 case FL_PM_SUSPENDED:
1812                         break;
1813                 }
1814                 spin_unlock(chip->mutex);
1815         }
1816
1817         /* Unlock the chips again */
1818
1819         if (ret) {
1820                 for (i--; i >=0; i--) {
1821                         chip = &cfi->chips[i];
1822                         
1823                         spin_lock(chip->mutex);
1824                         
1825                         if (chip->state == FL_PM_SUSPENDED) {
1826                                 /* No need to force it into a known state here,
1827                                    because we're returning failure, and it didn't
1828                                    get power cycled */
1829                                 chip->state = chip->oldstate;
1830                                 chip->oldstate = FL_READY;
1831                                 wake_up(&chip->wq);
1832                         }
1833                         spin_unlock(chip->mutex);
1834                 }
1835         } 
1836         
1837         return ret;
1838 }
1839
1840 static void cfi_intelext_resume(struct mtd_info *mtd)
1841 {
1842         struct map_info *map = mtd->priv;
1843         struct cfi_private *cfi = map->fldrv_priv;
1844         int i;
1845         struct flchip *chip;
1846
1847         for (i=0; i<cfi->numchips; i++) {
1848         
1849                 chip = &cfi->chips[i];
1850
1851                 spin_lock(chip->mutex);
1852                 
1853                 /* Go to known state. Chip may have been power cycled */
1854                 if (chip->state == FL_PM_SUSPENDED) {
1855                         map_write(map, CMD(0xFF), cfi->chips[i].start);
1856                         chip->oldstate = chip->state = FL_READY;
1857                         wake_up(&chip->wq);
1858                 }
1859
1860                 spin_unlock(chip->mutex);
1861         }
1862 }
1863
1864 static void cfi_intelext_destroy(struct mtd_info *mtd)
1865 {
1866         struct map_info *map = mtd->priv;
1867         struct cfi_private *cfi = map->fldrv_priv;
1868         kfree(cfi->cmdset_priv);
1869         kfree(cfi->cfiq);
1870         kfree(cfi->chips[0].priv);
1871         kfree(cfi);
1872         kfree(mtd->eraseregions);
1873 }
1874
1875 static char im_name_1[]="cfi_cmdset_0001";
1876 static char im_name_3[]="cfi_cmdset_0003";
1877
1878 int __init cfi_intelext_init(void)
1879 {
1880         inter_module_register(im_name_1, THIS_MODULE, &cfi_cmdset_0001);
1881         inter_module_register(im_name_3, THIS_MODULE, &cfi_cmdset_0001);
1882         return 0;
1883 }
1884
1885 static void __exit cfi_intelext_exit(void)
1886 {
1887         inter_module_unregister(im_name_1);
1888         inter_module_unregister(im_name_3);
1889 }
1890
1891 module_init(cfi_intelext_init);
1892 module_exit(cfi_intelext_exit);
1893
1894 MODULE_LICENSE("GPL");
1895 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
1896 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");