2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
5 * (C) 2000 Red Hat. GPL'd
7 * $Id: cfi_cmdset_0001.c,v 1.154 2004/08/09 13:19:43 dwmw2 Exp $
10 * 10/10/2000 Nicolas Pitre <nico@cam.org>
11 * - completely revamped method functions so they are aware and
12 * independent of the flash geometry (buswidth, interleave, etc.)
13 * - scalability vs code size is completely set at compile-time
14 * (see include/linux/mtd/cfi.h for selection)
15 * - optimized write buffer method
16 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17 * - reworked lock/unlock/erase support for var size flash
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
26 #include <asm/byteorder.h>
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/mtd/map.h>
33 #include <linux/mtd/mtd.h>
34 #include <linux/mtd/compatmac.h>
35 #include <linux/mtd/cfi.h>
37 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
39 // debugging, turns off buffer write mode if set to 1
40 #define FORCE_WORD_WRITE 0
42 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
43 //static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
44 //static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
45 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
46 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
47 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
48 static void cfi_intelext_sync (struct mtd_info *);
49 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
50 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
51 static int cfi_intelext_suspend (struct mtd_info *);
52 static void cfi_intelext_resume (struct mtd_info *);
54 static void cfi_intelext_destroy(struct mtd_info *);
56 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
58 static struct mtd_info *cfi_intelext_setup (struct map_info *);
59 static int cfi_intelext_partition_fixup(struct map_info *, struct cfi_private **);
61 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
62 size_t *retlen, u_char **mtdbuf);
63 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
68 * *********** SETUP AND PROBE BITS ***********
71 static struct mtd_chip_driver cfi_intelext_chipdrv = {
72 .probe = NULL, /* Not usable directly */
73 .destroy = cfi_intelext_destroy,
74 .name = "cfi_cmdset_0001",
78 /* #define DEBUG_LOCK_BITS */
79 /* #define DEBUG_CFI_FEATURES */
81 #ifdef DEBUG_CFI_FEATURES
82 static void cfi_tell_features(struct cfi_pri_intelext *extp)
85 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
86 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
87 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
88 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
89 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
90 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
91 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
92 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
93 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
94 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
95 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
96 for (i=10; i<32; i++) {
97 if (extp->FeatureSupport & (1<<i))
98 printk(" - Unknown Bit %X: supported\n", i);
101 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
102 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
103 for (i=1; i<8; i++) {
104 if (extp->SuspendCmdSupport & (1<<i))
105 printk(" - Unknown Bit %X: supported\n", i);
108 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
109 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
110 printk(" - Valid Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
111 for (i=2; i<16; i++) {
112 if (extp->BlkStatusRegMask & (1<<i))
113 printk(" - Unknown Bit %X Active: yes\n",i);
116 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
117 extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
118 if (extp->VppOptimal)
119 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
120 extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
124 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
125 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
126 static void fixup_intel_strataflash(struct map_info *map, void* param)
128 struct cfi_private *cfi = map->fldrv_priv;
129 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
131 printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
132 "erase on write disabled.\n");
133 extp->SuspendCmdSupport &= ~1;
137 static void fixup_st_m28w320ct(struct map_info *map, void* param)
139 struct cfi_private *cfi = map->fldrv_priv;
141 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
142 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
145 static void fixup_st_m28w320cb(struct map_info *map, void* param)
147 struct cfi_private *cfi = map->fldrv_priv;
149 /* Note this is done after the region info is endian swapped */
150 cfi->cfiq->EraseRegionInfo[1] =
151 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
154 static struct cfi_fixup fixup_table[] = {
155 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
157 CFI_MFR_ANY, CFI_ID_ANY,
158 fixup_intel_strataflash, NULL
162 0x0020, /* STMicroelectronics */
163 0x00ba, /* M28W320CT */
164 fixup_st_m28w320ct, NULL
166 0x0020, /* STMicroelectronics */
167 0x00bb, /* M28W320CB */
168 fixup_st_m28w320cb, NULL
174 /* This routine is made available to other mtd code via
175 * inter_module_register. It must only be accessed through
176 * inter_module_get which will bump the use count of this module. The
177 * addresses passed back in cfi are valid as long as the use count of
178 * this module is non-zero, i.e. between inter_module_get and
179 * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
181 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
183 struct cfi_private *cfi = map->fldrv_priv;
186 if (cfi->cfi_mode == CFI_MODE_CFI) {
188 * It's a real CFI chip, not one for which the probe
189 * routine faked a CFI structure. So we read the feature
192 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
193 struct cfi_pri_intelext *extp;
195 extp = (struct cfi_pri_intelext*)cfi_read_pri(map, adr, sizeof(*extp), "Intel/Sharp");
199 /* Do some byteswapping if necessary */
200 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
201 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
202 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
204 /* Install our own private info structure */
205 cfi->cmdset_priv = extp;
207 cfi_fixup(map, fixup_table);
209 #ifdef DEBUG_CFI_FEATURES
210 /* Tell the user about it in lots of lovely detail */
211 cfi_tell_features(extp);
214 if(extp->SuspendCmdSupport & 1) {
215 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
219 for (i=0; i< cfi->numchips; i++) {
220 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
221 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
222 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
223 cfi->chips[i].ref_point_counter = 0;
226 map->fldrv = &cfi_intelext_chipdrv;
228 return cfi_intelext_setup(map);
231 static struct mtd_info *cfi_intelext_setup(struct map_info *map)
233 struct cfi_private *cfi = map->fldrv_priv;
234 struct mtd_info *mtd;
235 unsigned long offset = 0;
237 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
239 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
240 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
243 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
247 memset(mtd, 0, sizeof(*mtd));
249 mtd->type = MTD_NORFLASH;
250 mtd->size = devsize * cfi->numchips;
252 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
253 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
254 * mtd->numeraseregions, GFP_KERNEL);
255 if (!mtd->eraseregions) {
256 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
260 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
261 unsigned long ernum, ersize;
262 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
263 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
265 if (mtd->erasesize < ersize) {
266 mtd->erasesize = ersize;
268 for (j=0; j<cfi->numchips; j++) {
269 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
270 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
271 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
273 offset += (ersize * ernum);
276 if (offset != devsize) {
278 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
282 for (i=0; i<mtd->numeraseregions;i++){
283 printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
284 i,mtd->eraseregions[i].offset,
285 mtd->eraseregions[i].erasesize,
286 mtd->eraseregions[i].numblocks);
289 /* Also select the correct geometry setup too */
290 mtd->erase = cfi_intelext_erase_varsize;
291 mtd->read = cfi_intelext_read;
293 if (map_is_linear(map)) {
294 mtd->point = cfi_intelext_point;
295 mtd->unpoint = cfi_intelext_unpoint;
298 if ( cfi->cfiq->BufWriteTimeoutTyp && !FORCE_WORD_WRITE) {
299 printk(KERN_INFO "Using buffer write method\n" );
300 mtd->write = cfi_intelext_write_buffers;
302 printk(KERN_INFO "Using word write method\n" );
303 mtd->write = cfi_intelext_write_words;
306 mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
307 mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
309 mtd->sync = cfi_intelext_sync;
310 mtd->lock = cfi_intelext_lock;
311 mtd->unlock = cfi_intelext_unlock;
312 mtd->suspend = cfi_intelext_suspend;
313 mtd->resume = cfi_intelext_resume;
314 mtd->flags = MTD_CAP_NORFLASH;
315 map->fldrv = &cfi_intelext_chipdrv;
316 mtd->name = map->name;
318 /* This function has the potential to distort the reality
319 a bit and therefore should be called last. */
320 if (cfi_intelext_partition_fixup(map, &cfi) != 0)
323 __module_get(THIS_MODULE);
328 if(mtd->eraseregions)
329 kfree(mtd->eraseregions);
332 kfree(cfi->cmdset_priv);
336 static int cfi_intelext_partition_fixup(struct map_info *map,
337 struct cfi_private **pcfi)
339 struct cfi_private *cfi = *pcfi;
340 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
343 * Probing of multi-partition flash ships.
345 * This is extremely crude at the moment and should probably be
346 * extracted entirely from the Intel extended query data instead.
347 * Right now a L18 flash is assumed if multiple operations is
350 * To support multiple partitions when available, we simply arrange
351 * for each of them to have their own flchip structure even if they
352 * are on the same physical chip. This means completely recreating
353 * a new cfi_private structure right here which is a blatent code
354 * layering violation, but this is still the least intrusive
355 * arrangement at this point. This can be rearranged in the future
356 * if someone feels motivated enough. --nico
358 if (extp && extp->FeatureSupport & (1 << 9)) {
359 struct cfi_private *newcfi;
361 struct flchip_shared *shared;
362 int numparts, partshift, numvirtchips, i, j;
365 * The L18 flash memory array is divided
366 * into multiple 8-Mbit partitions.
368 numparts = 1 << (cfi->cfiq->DevSize - 20);
369 partshift = 20 + __ffs(cfi->interleave);
370 numvirtchips = cfi->numchips * numparts;
372 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
375 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
380 memcpy(newcfi, cfi, sizeof(struct cfi_private));
381 newcfi->numchips = numvirtchips;
382 newcfi->chipshift = partshift;
384 chip = &newcfi->chips[0];
385 for (i = 0; i < cfi->numchips; i++) {
386 shared[i].writing = shared[i].erasing = NULL;
387 spin_lock_init(&shared[i].lock);
388 for (j = 0; j < numparts; j++) {
389 *chip = cfi->chips[i];
390 chip->start += j << partshift;
391 chip->priv = &shared[i];
392 /* those should be reset too since
393 they create memory references. */
394 init_waitqueue_head(&chip->wq);
395 spin_lock_init(&chip->_spinlock);
396 chip->mutex = &chip->_spinlock;
401 printk(KERN_DEBUG "%s: %d sets of %d interleaved chips "
402 "--> %d partitions of %#x bytes\n",
403 map->name, cfi->numchips, cfi->interleave,
404 newcfi->numchips, 1<<newcfi->chipshift);
406 map->fldrv_priv = newcfi;
415 * *********** CHIP ACCESS FUNCTIONS ***********
418 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
420 DECLARE_WAITQUEUE(wait, current);
421 struct cfi_private *cfi = map->fldrv_priv;
422 map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
424 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
427 timeo = jiffies + HZ;
429 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING)) {
431 * OK. We have possibility for contension on the write/erase
432 * operations which are global to the real chip and not per
433 * partition. So let's fight it over in the partition which
434 * currently has authority on the operation.
436 * The rules are as follows:
438 * - any write operation must own shared->writing.
440 * - any erase operation must own _both_ shared->writing and
443 * - contension arbitration is handled in the owner's context.
445 * The 'shared' struct can be read when its lock is taken.
446 * However any writes to it can only be made when the current
447 * owner's lock is also held.
449 struct flchip_shared *shared = chip->priv;
450 struct flchip *contender;
451 spin_lock(&shared->lock);
452 contender = shared->writing;
453 if (contender && contender != chip) {
455 * The engine to perform desired operation on this
456 * partition is already in use by someone else.
457 * Let's fight over it in the context of the chip
458 * currently using it. If it is possible to suspend,
459 * that other partition will do just that, otherwise
460 * it'll happily send us to sleep. In any case, when
461 * get_chip returns success we're clear to go ahead.
463 int ret = spin_trylock(contender->mutex);
464 spin_unlock(&shared->lock);
467 spin_unlock(chip->mutex);
468 ret = get_chip(map, contender, contender->start, mode);
469 spin_lock(chip->mutex);
471 spin_unlock(contender->mutex);
474 timeo = jiffies + HZ;
475 spin_lock(&shared->lock);
479 shared->writing = chip;
480 if (mode == FL_ERASING)
481 shared->erasing = chip;
482 if (contender && contender != chip)
483 spin_unlock(contender->mutex);
484 spin_unlock(&shared->lock);
487 switch (chip->state) {
491 status = map_read(map, adr);
492 if (map_word_andequal(map, status, status_OK, status_OK))
495 /* At this point we're fine with write operations
496 in other partitions as they don't conflict. */
497 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
500 if (time_after(jiffies, timeo)) {
501 printk(KERN_ERR "Waiting for chip to be ready timed out. Status %lx\n",
505 spin_unlock(chip->mutex);
507 spin_lock(chip->mutex);
508 /* Someone else might have been playing with it. */
518 if (!(cfip->FeatureSupport & 2) ||
519 !(mode == FL_READY || mode == FL_POINT ||
520 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
525 map_write(map, CMD(0xB0), adr);
527 /* If the flash has finished erasing, then 'erase suspend'
528 * appears to make some (28F320) flash devices switch to
529 * 'read' mode. Make sure that we switch to 'read status'
530 * mode so we get the right data. --rmk
532 map_write(map, CMD(0x70), adr);
533 chip->oldstate = FL_ERASING;
534 chip->state = FL_ERASE_SUSPENDING;
535 chip->erase_suspended = 1;
537 status = map_read(map, adr);
538 if (map_word_andequal(map, status, status_OK, status_OK))
541 if (time_after(jiffies, timeo)) {
542 /* Urgh. Resume and pretend we weren't here. */
543 map_write(map, CMD(0xd0), adr);
544 /* Make sure we're in 'read status' mode if it had finished */
545 map_write(map, CMD(0x70), adr);
546 chip->state = FL_ERASING;
547 chip->oldstate = FL_READY;
548 printk(KERN_ERR "Chip not ready after erase "
549 "suspended: status = 0x%lx\n", status.x[0]);
553 spin_unlock(chip->mutex);
555 spin_lock(chip->mutex);
556 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
557 So we can just loop here. */
559 chip->state = FL_STATUS;
563 /* Only if there's no operation suspended... */
564 if (mode == FL_READY && chip->oldstate == FL_READY)
569 set_current_state(TASK_UNINTERRUPTIBLE);
570 add_wait_queue(&chip->wq, &wait);
571 spin_unlock(chip->mutex);
573 remove_wait_queue(&chip->wq, &wait);
574 spin_lock(chip->mutex);
579 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
581 struct cfi_private *cfi = map->fldrv_priv;
584 struct flchip_shared *shared = chip->priv;
585 spin_lock(&shared->lock);
586 if (shared->writing == chip) {
587 /* We own the ability to write, but we're done */
588 shared->writing = shared->erasing;
589 if (shared->writing && shared->writing != chip) {
590 /* give back ownership to who we loaned it from */
591 struct flchip *loaner = shared->writing;
592 spin_lock(loaner->mutex);
593 spin_unlock(&shared->lock);
594 spin_unlock(chip->mutex);
595 put_chip(map, loaner, loaner->start);
596 spin_lock(chip->mutex);
597 spin_unlock(loaner->mutex);
599 if (chip->oldstate != FL_ERASING) {
600 shared->erasing = NULL;
601 if (chip->oldstate != FL_WRITING)
602 shared->writing = NULL;
604 spin_unlock(&shared->lock);
607 spin_unlock(&shared->lock);
611 switch(chip->oldstate) {
613 chip->state = chip->oldstate;
614 /* What if one interleaved chip has finished and the
615 other hasn't? The old code would leave the finished
616 one in READY mode. That's bad, and caused -EROFS
617 errors to be returned from do_erase_oneblock because
618 that's the only bit it checked for at the time.
619 As the state machine appears to explicitly allow
620 sending the 0x70 (Read Status) command to an erasing
621 chip and expecting it to be ignored, that's what we
623 map_write(map, CMD(0xd0), adr);
624 map_write(map, CMD(0x70), adr);
625 chip->oldstate = FL_READY;
626 chip->state = FL_ERASING;
632 /* We should really make set_vpp() count, rather than doing this */
636 printk(KERN_ERR "put_chip() called with oldstate %d!!\n", chip->oldstate);
641 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
643 unsigned long cmd_addr;
644 struct cfi_private *cfi = map->fldrv_priv;
649 /* Ensure cmd read/writes are aligned. */
650 cmd_addr = adr & ~(map_bankwidth(map)-1);
652 spin_lock(chip->mutex);
654 ret = get_chip(map, chip, cmd_addr, FL_POINT);
657 if (chip->state != FL_POINT && chip->state != FL_READY)
658 map_write(map, CMD(0xff), cmd_addr);
660 chip->state = FL_POINT;
661 chip->ref_point_counter++;
663 spin_unlock(chip->mutex);
668 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
670 struct map_info *map = mtd->priv;
671 struct cfi_private *cfi = map->fldrv_priv;
676 if (!map->virt || (from + len > mtd->size))
679 *mtdbuf = (void *)map->virt + from;
682 /* Now lock the chip(s) to POINT state */
684 /* ofs: offset within the first chip that the first read should start */
685 chipnum = (from >> cfi->chipshift);
686 ofs = from - (chipnum << cfi->chipshift);
689 unsigned long thislen;
691 if (chipnum >= cfi->numchips)
694 if ((len + ofs -1) >> cfi->chipshift)
695 thislen = (1<<cfi->chipshift) - ofs;
699 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
712 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
714 struct map_info *map = mtd->priv;
715 struct cfi_private *cfi = map->fldrv_priv;
719 /* Now unlock the chip(s) POINT state */
721 /* ofs: offset within the first chip that the first read should start */
722 chipnum = (from >> cfi->chipshift);
723 ofs = from - (chipnum << cfi->chipshift);
726 unsigned long thislen;
729 chip = &cfi->chips[chipnum];
730 if (chipnum >= cfi->numchips)
733 if ((len + ofs -1) >> cfi->chipshift)
734 thislen = (1<<cfi->chipshift) - ofs;
738 spin_lock(chip->mutex);
739 if (chip->state == FL_POINT) {
740 chip->ref_point_counter--;
741 if(chip->ref_point_counter == 0)
742 chip->state = FL_READY;
744 printk(KERN_ERR "Warning: unpoint called on non pointed region\n"); /* Should this give an error? */
746 put_chip(map, chip, chip->start);
747 spin_unlock(chip->mutex);
755 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
757 unsigned long cmd_addr;
758 struct cfi_private *cfi = map->fldrv_priv;
763 /* Ensure cmd read/writes are aligned. */
764 cmd_addr = adr & ~(map_bankwidth(map)-1);
766 spin_lock(chip->mutex);
767 ret = get_chip(map, chip, cmd_addr, FL_READY);
769 spin_unlock(chip->mutex);
773 if (chip->state != FL_POINT && chip->state != FL_READY) {
774 map_write(map, CMD(0xff), cmd_addr);
776 chip->state = FL_READY;
779 map_copy_from(map, buf, adr, len);
781 put_chip(map, chip, cmd_addr);
783 spin_unlock(chip->mutex);
787 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
789 struct map_info *map = mtd->priv;
790 struct cfi_private *cfi = map->fldrv_priv;
795 /* ofs: offset within the first chip that the first read should start */
796 chipnum = (from >> cfi->chipshift);
797 ofs = from - (chipnum << cfi->chipshift);
802 unsigned long thislen;
804 if (chipnum >= cfi->numchips)
807 if ((len + ofs -1) >> cfi->chipshift)
808 thislen = (1<<cfi->chipshift) - ofs;
812 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
826 static int cfi_intelext_read_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf, int base_offst, int reg_sz)
828 struct map_info *map = mtd->priv;
829 struct cfi_private *cfi = map->fldrv_priv;
830 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
832 int ofs_factor = cfi->interleave * cfi->device_type;
837 chip_num = ((unsigned int)from/reg_sz);
838 offst = from - (reg_sz*chip_num)+base_offst;
841 /* Calculate which chip & protection register offset we need */
843 if (chip_num >= cfi->numchips)
846 chip = &cfi->chips[chip_num];
848 spin_lock(chip->mutex);
849 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
851 spin_unlock(chip->mutex);
852 return (len-count)?:ret;
855 if (chip->state != FL_JEDEC_QUERY) {
856 map_write(map, CMD(0x90), chip->start);
857 chip->state = FL_JEDEC_QUERY;
860 while (count && ((offst-base_offst) < reg_sz)) {
861 *buf = map_read8(map,(chip->start+((extp->ProtRegAddr+1)*ofs_factor)+offst));
867 put_chip(map, chip, chip->start);
868 spin_unlock(chip->mutex);
870 /* Move on to the next chip */
879 static int cfi_intelext_read_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
881 struct map_info *map = mtd->priv;
882 struct cfi_private *cfi = map->fldrv_priv;
883 struct cfi_pri_intelext *extp=cfi->cmdset_priv;
884 int base_offst,reg_sz;
886 /* Check that we actually have some protection registers */
887 if(!(extp->FeatureSupport&64)){
888 printk(KERN_WARNING "%s: This flash device has no protection data to read!\n",map->name);
892 base_offst=(1<<extp->FactProtRegSize);
893 reg_sz=(1<<extp->UserProtRegSize);
895 return cfi_intelext_read_prot_reg(mtd, from, len, retlen, buf, base_offst, reg_sz);
898 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
900 struct map_info *map = mtd->priv;
901 struct cfi_private *cfi = map->fldrv_priv;
902 struct cfi_pri_intelext *extp=cfi->cmdset_priv;
903 int base_offst,reg_sz;
905 /* Check that we actually have some protection registers */
906 if(!(extp->FeatureSupport&64)){
907 printk(KERN_WARNING "%s: This flash device has no protection data to read!\n",map->name);
912 reg_sz=(1<<extp->FactProtRegSize);
914 return cfi_intelext_read_prot_reg(mtd, from, len, retlen, buf, base_offst, reg_sz);
918 static int do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
920 struct cfi_private *cfi = map->fldrv_priv;
921 map_word status, status_OK;
927 /* Let's determine this according to the interleave only once */
928 status_OK = CMD(0x80);
930 spin_lock(chip->mutex);
931 ret = get_chip(map, chip, adr, FL_WRITING);
933 spin_unlock(chip->mutex);
938 map_write(map, CMD(0x40), adr);
939 map_write(map, datum, adr);
940 chip->state = FL_WRITING;
942 spin_unlock(chip->mutex);
943 INVALIDATE_CACHED_RANGE(map, adr, map_bankwidth(map));
944 cfi_udelay(chip->word_write_time);
945 spin_lock(chip->mutex);
947 timeo = jiffies + (HZ/2);
950 if (chip->state != FL_WRITING) {
951 /* Someone's suspended the write. Sleep */
952 DECLARE_WAITQUEUE(wait, current);
954 set_current_state(TASK_UNINTERRUPTIBLE);
955 add_wait_queue(&chip->wq, &wait);
956 spin_unlock(chip->mutex);
958 remove_wait_queue(&chip->wq, &wait);
959 timeo = jiffies + (HZ / 2); /* FIXME */
960 spin_lock(chip->mutex);
964 status = map_read(map, adr);
965 if (map_word_andequal(map, status, status_OK, status_OK))
968 /* OK Still waiting */
969 if (time_after(jiffies, timeo)) {
970 chip->state = FL_STATUS;
971 printk(KERN_ERR "waiting for chip to be ready timed out in word write\n");
976 /* Latency issues. Drop the lock, wait a while and retry */
977 spin_unlock(chip->mutex);
980 spin_lock(chip->mutex);
983 chip->word_write_time--;
984 if (!chip->word_write_time)
985 chip->word_write_time++;
988 chip->word_write_time++;
990 /* Done and happy. */
991 chip->state = FL_STATUS;
992 /* check for lock bit */
993 if (map_word_bitsset(map, status, CMD(0x02))) {
995 map_write(map, CMD(0x50), adr);
996 /* put back into read status register mode */
997 map_write(map, CMD(0x70), adr);
1001 put_chip(map, chip, adr);
1002 spin_unlock(chip->mutex);
1008 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1010 struct map_info *map = mtd->priv;
1011 struct cfi_private *cfi = map->fldrv_priv;
1020 chipnum = to >> cfi->chipshift;
1021 ofs = to - (chipnum << cfi->chipshift);
1023 /* If it's not bus-aligned, do the first byte write */
1024 if (ofs & (map_bankwidth(map)-1)) {
1025 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1026 int gap = ofs - bus_ofs;
1030 n = min_t(int, len, map_bankwidth(map)-gap);
1031 datum = map_word_ff(map);
1032 datum = map_word_load_partial(map, datum, buf, gap, n);
1034 ret = do_write_oneword(map, &cfi->chips[chipnum],
1044 if (ofs >> cfi->chipshift) {
1047 if (chipnum == cfi->numchips)
1052 while(len >= map_bankwidth(map)) {
1053 map_word datum = map_word_load(map, buf);
1055 ret = do_write_oneword(map, &cfi->chips[chipnum],
1060 ofs += map_bankwidth(map);
1061 buf += map_bankwidth(map);
1062 (*retlen) += map_bankwidth(map);
1063 len -= map_bankwidth(map);
1065 if (ofs >> cfi->chipshift) {
1068 if (chipnum == cfi->numchips)
1073 if (len & (map_bankwidth(map)-1)) {
1076 datum = map_word_ff(map);
1077 datum = map_word_load_partial(map, datum, buf, 0, len);
1079 ret = do_write_oneword(map, &cfi->chips[chipnum],
1091 static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
1092 unsigned long adr, const u_char *buf, int len)
1094 struct cfi_private *cfi = map->fldrv_priv;
1095 map_word status, status_OK;
1096 unsigned long cmd_adr, timeo;
1097 int wbufsize, z, ret=0, bytes, words;
1099 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1101 cmd_adr = adr & ~(wbufsize-1);
1103 /* Let's determine this according to the interleave only once */
1104 status_OK = CMD(0x80);
1106 spin_lock(chip->mutex);
1107 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1109 spin_unlock(chip->mutex);
1113 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1114 [...], the device will not accept any more Write to Buffer commands".
1115 So we must check here and reset those bits if they're set. Otherwise
1116 we're just pissing in the wind */
1117 if (chip->state != FL_STATUS)
1118 map_write(map, CMD(0x70), cmd_adr);
1119 status = map_read(map, cmd_adr);
1120 if (map_word_bitsset(map, status, CMD(0x30))) {
1121 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1122 map_write(map, CMD(0x50), cmd_adr);
1123 map_write(map, CMD(0x70), cmd_adr);
1127 chip->state = FL_WRITING_TO_BUFFER;
1131 map_write(map, CMD(0xe8), cmd_adr);
1133 status = map_read(map, cmd_adr);
1134 if (map_word_andequal(map, status, status_OK, status_OK))
1137 spin_unlock(chip->mutex);
1139 spin_lock(chip->mutex);
1142 /* Argh. Not ready for write to buffer */
1143 map_write(map, CMD(0x70), cmd_adr);
1144 chip->state = FL_STATUS;
1145 printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1146 status.x[0], map_read(map, cmd_adr).x[0]);
1147 /* Odd. Clear status bits */
1148 map_write(map, CMD(0x50), cmd_adr);
1149 map_write(map, CMD(0x70), cmd_adr);
1155 /* Write length of data to come */
1156 bytes = len & (map_bankwidth(map)-1);
1157 words = len / map_bankwidth(map);
1158 map_write(map, CMD(words - !bytes), cmd_adr );
1162 while(z < words * map_bankwidth(map)) {
1163 map_word datum = map_word_load(map, buf);
1164 map_write(map, datum, adr+z);
1166 z += map_bankwidth(map);
1167 buf += map_bankwidth(map);
1173 datum = map_word_ff(map);
1174 datum = map_word_load_partial(map, datum, buf, 0, bytes);
1175 map_write(map, datum, adr+z);
1179 map_write(map, CMD(0xd0), cmd_adr);
1180 chip->state = FL_WRITING;
1182 spin_unlock(chip->mutex);
1183 INVALIDATE_CACHED_RANGE(map, adr, len);
1184 cfi_udelay(chip->buffer_write_time);
1185 spin_lock(chip->mutex);
1187 timeo = jiffies + (HZ/2);
1190 if (chip->state != FL_WRITING) {
1191 /* Someone's suspended the write. Sleep */
1192 DECLARE_WAITQUEUE(wait, current);
1193 set_current_state(TASK_UNINTERRUPTIBLE);
1194 add_wait_queue(&chip->wq, &wait);
1195 spin_unlock(chip->mutex);
1197 remove_wait_queue(&chip->wq, &wait);
1198 timeo = jiffies + (HZ / 2); /* FIXME */
1199 spin_lock(chip->mutex);
1203 status = map_read(map, cmd_adr);
1204 if (map_word_andequal(map, status, status_OK, status_OK))
1207 /* OK Still waiting */
1208 if (time_after(jiffies, timeo)) {
1209 chip->state = FL_STATUS;
1210 printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
1215 /* Latency issues. Drop the lock, wait a while and retry */
1216 spin_unlock(chip->mutex);
1219 spin_lock(chip->mutex);
1222 chip->buffer_write_time--;
1223 if (!chip->buffer_write_time)
1224 chip->buffer_write_time++;
1227 chip->buffer_write_time++;
1229 /* Done and happy. */
1230 chip->state = FL_STATUS;
1232 /* check for lock bit */
1233 if (map_word_bitsset(map, status, CMD(0x02))) {
1235 map_write(map, CMD(0x50), cmd_adr);
1236 /* put back into read status register mode */
1237 map_write(map, CMD(0x70), adr);
1242 put_chip(map, chip, cmd_adr);
1243 spin_unlock(chip->mutex);
1247 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1248 size_t len, size_t *retlen, const u_char *buf)
1250 struct map_info *map = mtd->priv;
1251 struct cfi_private *cfi = map->fldrv_priv;
1252 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1261 chipnum = to >> cfi->chipshift;
1262 ofs = to - (chipnum << cfi->chipshift);
1264 /* If it's not bus-aligned, do the first word write */
1265 if (ofs & (map_bankwidth(map)-1)) {
1266 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1267 if (local_len > len)
1269 ret = cfi_intelext_write_words(mtd, to, local_len,
1277 if (ofs >> cfi->chipshift) {
1280 if (chipnum == cfi->numchips)
1286 /* We must not cross write block boundaries */
1287 int size = wbufsize - (ofs & (wbufsize-1));
1291 ret = do_write_buffer(map, &cfi->chips[chipnum],
1301 if (ofs >> cfi->chipshift) {
1304 if (chipnum == cfi->numchips)
1311 typedef int (*varsize_frob_t)(struct map_info *map, struct flchip *chip,
1312 unsigned long adr, int len, void *thunk);
1314 static int cfi_intelext_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
1315 loff_t ofs, size_t len, void *thunk)
1317 struct map_info *map = mtd->priv;
1318 struct cfi_private *cfi = map->fldrv_priv;
1320 int chipnum, ret = 0;
1322 struct mtd_erase_region_info *regions = mtd->eraseregions;
1324 if (ofs > mtd->size)
1327 if ((len + ofs) > mtd->size)
1330 /* Check that both start and end of the requested erase are
1331 * aligned with the erasesize at the appropriate addresses.
1336 /* Skip all erase regions which are ended before the start of
1337 the requested erase. Actually, to save on the calculations,
1338 we skip to the first erase region which starts after the
1339 start of the requested erase, and then go back one.
1342 while (i < mtd->numeraseregions && ofs >= regions[i].offset)
1346 /* OK, now i is pointing at the erase region in which this
1347 erase request starts. Check the start of the requested
1348 erase range is aligned with the erase size which is in
1352 if (ofs & (regions[i].erasesize-1))
1355 /* Remember the erase region we start on */
1358 /* Next, check that the end of the requested erase is aligned
1359 * with the erase region at that address.
1362 while (i<mtd->numeraseregions && (ofs + len) >= regions[i].offset)
1365 /* As before, drop back one to point at the region in which
1366 the address actually falls
1370 if ((ofs + len) & (regions[i].erasesize-1))
1373 chipnum = ofs >> cfi->chipshift;
1374 adr = ofs - (chipnum << cfi->chipshift);
1379 unsigned long chipmask;
1380 int size = regions[i].erasesize;
1382 ret = (*frob)(map, &cfi->chips[chipnum], adr, size, thunk);
1390 chipmask = (1 << cfi->chipshift) - 1;
1391 if ((adr & chipmask) == ((regions[i].offset + size * regions[i].numblocks) & chipmask))
1394 if (adr >> cfi->chipshift) {
1398 if (chipnum >= cfi->numchips)
1407 static int do_erase_oneblock(struct map_info *map, struct flchip *chip,
1408 unsigned long adr, int len, void *thunk)
1410 struct cfi_private *cfi = map->fldrv_priv;
1411 map_word status, status_OK;
1412 unsigned long timeo;
1414 DECLARE_WAITQUEUE(wait, current);
1419 /* Let's determine this according to the interleave only once */
1420 status_OK = CMD(0x80);
1423 spin_lock(chip->mutex);
1424 ret = get_chip(map, chip, adr, FL_ERASING);
1426 spin_unlock(chip->mutex);
1431 /* Clear the status register first */
1432 map_write(map, CMD(0x50), adr);
1435 map_write(map, CMD(0x20), adr);
1436 map_write(map, CMD(0xD0), adr);
1437 chip->state = FL_ERASING;
1438 chip->erase_suspended = 0;
1440 spin_unlock(chip->mutex);
1441 INVALIDATE_CACHED_RANGE(map, adr, len);
1442 msleep(chip->erase_time / 2);
1443 spin_lock(chip->mutex);
1445 /* FIXME. Use a timer to check this, and return immediately. */
1446 /* Once the state machine's known to be working I'll do that */
1448 timeo = jiffies + (HZ*20);
1450 if (chip->state != FL_ERASING) {
1451 /* Someone's suspended the erase. Sleep */
1452 set_current_state(TASK_UNINTERRUPTIBLE);
1453 add_wait_queue(&chip->wq, &wait);
1454 spin_unlock(chip->mutex);
1456 remove_wait_queue(&chip->wq, &wait);
1457 spin_lock(chip->mutex);
1460 if (chip->erase_suspended) {
1461 /* This erase was suspended and resumed.
1462 Adjust the timeout */
1463 timeo = jiffies + (HZ*20); /* FIXME */
1464 chip->erase_suspended = 0;
1467 status = map_read(map, adr);
1468 if (map_word_andequal(map, status, status_OK, status_OK))
1471 /* OK Still waiting */
1472 if (time_after(jiffies, timeo)) {
1473 map_write(map, CMD(0x70), adr);
1474 chip->state = FL_STATUS;
1475 printk(KERN_ERR "waiting for erase at %08lx to complete timed out. Xstatus = %lx, status = %lx.\n",
1476 adr, status.x[0], map_read(map, adr).x[0]);
1477 /* Clear status bits */
1478 map_write(map, CMD(0x50), adr);
1479 map_write(map, CMD(0x70), adr);
1481 spin_unlock(chip->mutex);
1485 /* Latency issues. Drop the lock, wait a while and retry */
1486 spin_unlock(chip->mutex);
1487 set_current_state(TASK_UNINTERRUPTIBLE);
1488 schedule_timeout(1);
1489 spin_lock(chip->mutex);
1495 /* We've broken this before. It doesn't hurt to be safe */
1496 map_write(map, CMD(0x70), adr);
1497 chip->state = FL_STATUS;
1498 status = map_read(map, adr);
1500 /* check for lock bit */
1501 if (map_word_bitsset(map, status, CMD(0x3a))) {
1502 unsigned char chipstatus = status.x[0];
1503 if (!map_word_equal(map, status, CMD(chipstatus))) {
1505 for (w=0; w<map_words(map); w++) {
1506 for (i = 0; i<cfi_interleave(cfi); i++) {
1507 chipstatus |= status.x[w] >> (cfi->device_type * 8);
1510 printk(KERN_WARNING "Status is not identical for all chips: 0x%lx. Merging to give 0x%02x\n",
1511 status.x[0], chipstatus);
1513 /* Reset the error bits */
1514 map_write(map, CMD(0x50), adr);
1515 map_write(map, CMD(0x70), adr);
1517 if ((chipstatus & 0x30) == 0x30) {
1518 printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus);
1520 } else if (chipstatus & 0x02) {
1521 /* Protection bit set */
1523 } else if (chipstatus & 0x8) {
1525 printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", chipstatus);
1527 } else if (chipstatus & 0x20) {
1529 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
1530 timeo = jiffies + HZ;
1531 chip->state = FL_STATUS;
1532 spin_unlock(chip->mutex);
1535 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
1541 spin_unlock(chip->mutex);
1545 int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1547 unsigned long ofs, len;
1553 ret = cfi_intelext_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1557 instr->state = MTD_ERASE_DONE;
1558 mtd_erase_callback(instr);
1563 static void cfi_intelext_sync (struct mtd_info *mtd)
1565 struct map_info *map = mtd->priv;
1566 struct cfi_private *cfi = map->fldrv_priv;
1568 struct flchip *chip;
1571 for (i=0; !ret && i<cfi->numchips; i++) {
1572 chip = &cfi->chips[i];
1574 spin_lock(chip->mutex);
1575 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1578 chip->oldstate = chip->state;
1579 chip->state = FL_SYNCING;
1580 /* No need to wake_up() on this state change -
1581 * as the whole point is that nobody can do anything
1582 * with the chip now anyway.
1585 spin_unlock(chip->mutex);
1588 /* Unlock the chips again */
1590 for (i--; i >=0; i--) {
1591 chip = &cfi->chips[i];
1593 spin_lock(chip->mutex);
1595 if (chip->state == FL_SYNCING) {
1596 chip->state = chip->oldstate;
1599 spin_unlock(chip->mutex);
1603 #ifdef DEBUG_LOCK_BITS
1604 static int do_printlockstatus_oneblock(struct map_info *map, struct flchip *chip,
1605 unsigned long adr, int len, void *thunk)
1607 struct cfi_private *cfi = map->fldrv_priv;
1608 int ofs_factor = cfi->interleave * cfi->device_type;
1610 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1611 printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1612 adr, cfi_read_query(map, adr+(2*ofs_factor)));
1613 chip->state = FL_JEDEC_QUERY;
1618 #define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
1619 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
1621 static int do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1622 unsigned long adr, int len, void *thunk)
1624 struct cfi_private *cfi = map->fldrv_priv;
1625 map_word status, status_OK;
1626 unsigned long timeo = jiffies + HZ;
1631 /* Let's determine this according to the interleave only once */
1632 status_OK = CMD(0x80);
1634 spin_lock(chip->mutex);
1635 ret = get_chip(map, chip, adr, FL_LOCKING);
1637 spin_unlock(chip->mutex);
1642 map_write(map, CMD(0x60), adr);
1644 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1645 map_write(map, CMD(0x01), adr);
1646 chip->state = FL_LOCKING;
1647 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1648 map_write(map, CMD(0xD0), adr);
1649 chip->state = FL_UNLOCKING;
1653 spin_unlock(chip->mutex);
1654 schedule_timeout(HZ);
1655 spin_lock(chip->mutex);
1657 /* FIXME. Use a timer to check this, and return immediately. */
1658 /* Once the state machine's known to be working I'll do that */
1660 timeo = jiffies + (HZ*20);
1663 status = map_read(map, adr);
1664 if (map_word_andequal(map, status, status_OK, status_OK))
1667 /* OK Still waiting */
1668 if (time_after(jiffies, timeo)) {
1669 map_write(map, CMD(0x70), adr);
1670 chip->state = FL_STATUS;
1671 printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n",
1672 status.x[0], map_read(map, adr).x[0]);
1674 spin_unlock(chip->mutex);
1678 /* Latency issues. Drop the lock, wait a while and retry */
1679 spin_unlock(chip->mutex);
1681 spin_lock(chip->mutex);
1684 /* Done and happy. */
1685 chip->state = FL_STATUS;
1686 put_chip(map, chip, adr);
1687 spin_unlock(chip->mutex);
1691 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1695 #ifdef DEBUG_LOCK_BITS
1696 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1697 __FUNCTION__, ofs, len);
1698 cfi_intelext_varsize_frob(mtd, do_printlockstatus_oneblock,
1702 ret = cfi_intelext_varsize_frob(mtd, do_xxlock_oneblock,
1703 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
1705 #ifdef DEBUG_LOCK_BITS
1706 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1708 cfi_intelext_varsize_frob(mtd, do_printlockstatus_oneblock,
1715 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1719 #ifdef DEBUG_LOCK_BITS
1720 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1721 __FUNCTION__, ofs, len);
1722 cfi_intelext_varsize_frob(mtd, do_printlockstatus_oneblock,
1726 ret = cfi_intelext_varsize_frob(mtd, do_xxlock_oneblock,
1727 ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
1729 #ifdef DEBUG_LOCK_BITS
1730 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1732 cfi_intelext_varsize_frob(mtd, do_printlockstatus_oneblock,
1739 static int cfi_intelext_suspend(struct mtd_info *mtd)
1741 struct map_info *map = mtd->priv;
1742 struct cfi_private *cfi = map->fldrv_priv;
1744 struct flchip *chip;
1747 for (i=0; !ret && i<cfi->numchips; i++) {
1748 chip = &cfi->chips[i];
1750 spin_lock(chip->mutex);
1752 switch (chip->state) {
1756 case FL_JEDEC_QUERY:
1757 if (chip->oldstate == FL_READY) {
1758 chip->oldstate = chip->state;
1759 chip->state = FL_PM_SUSPENDED;
1760 /* No need to wake_up() on this state change -
1761 * as the whole point is that nobody can do anything
1762 * with the chip now anyway.
1768 case FL_PM_SUSPENDED:
1771 spin_unlock(chip->mutex);
1774 /* Unlock the chips again */
1777 for (i--; i >=0; i--) {
1778 chip = &cfi->chips[i];
1780 spin_lock(chip->mutex);
1782 if (chip->state == FL_PM_SUSPENDED) {
1783 /* No need to force it into a known state here,
1784 because we're returning failure, and it didn't
1786 chip->state = chip->oldstate;
1789 spin_unlock(chip->mutex);
1796 static void cfi_intelext_resume(struct mtd_info *mtd)
1798 struct map_info *map = mtd->priv;
1799 struct cfi_private *cfi = map->fldrv_priv;
1801 struct flchip *chip;
1803 for (i=0; i<cfi->numchips; i++) {
1805 chip = &cfi->chips[i];
1807 spin_lock(chip->mutex);
1809 /* Go to known state. Chip may have been power cycled */
1810 if (chip->state == FL_PM_SUSPENDED) {
1811 map_write(map, CMD(0xFF), cfi->chips[i].start);
1812 chip->state = FL_READY;
1816 spin_unlock(chip->mutex);
1820 static void cfi_intelext_destroy(struct mtd_info *mtd)
1822 struct map_info *map = mtd->priv;
1823 struct cfi_private *cfi = map->fldrv_priv;
1824 kfree(cfi->cmdset_priv);
1826 kfree(cfi->chips[0].priv);
1828 kfree(mtd->eraseregions);
1831 static char im_name_1[]="cfi_cmdset_0001";
1832 static char im_name_3[]="cfi_cmdset_0003";
1834 int __init cfi_intelext_init(void)
1836 inter_module_register(im_name_1, THIS_MODULE, &cfi_cmdset_0001);
1837 inter_module_register(im_name_3, THIS_MODULE, &cfi_cmdset_0001);
1841 static void __exit cfi_intelext_exit(void)
1843 inter_module_unregister(im_name_1);
1844 inter_module_unregister(im_name_3);
1847 module_init(cfi_intelext_init);
1848 module_exit(cfi_intelext_exit);
1850 MODULE_LICENSE("GPL");
1851 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
1852 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");