2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
5 * (C) 2000 Red Hat. GPL'd
7 * $Id: cfi_cmdset_0001.c,v 1.153 2004/07/12 21:52:20 dwmw2 Exp $
10 * 10/10/2000 Nicolas Pitre <nico@cam.org>
11 * - completely revamped method functions so they are aware and
12 * independent of the flash geometry (buswidth, interleave, etc.)
13 * - scalability vs code size is completely set at compile-time
14 * (see include/linux/mtd/cfi.h for selection)
15 * - optimized write buffer method
16 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17 * - reworked lock/unlock/erase support for var size flash
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
26 #include <asm/byteorder.h>
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/mtd/map.h>
33 #include <linux/mtd/mtd.h>
34 #include <linux/mtd/compatmac.h>
35 #include <linux/mtd/cfi.h>
37 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
39 // debugging, turns off buffer write mode if set to 1
40 #define FORCE_WORD_WRITE 0
42 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
43 //static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
44 //static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
45 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
46 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
47 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
48 static void cfi_intelext_sync (struct mtd_info *);
49 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
50 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
51 static int cfi_intelext_suspend (struct mtd_info *);
52 static void cfi_intelext_resume (struct mtd_info *);
54 static void cfi_intelext_destroy(struct mtd_info *);
56 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
58 static struct mtd_info *cfi_intelext_setup (struct map_info *);
59 static int cfi_intelext_partition_fixup(struct map_info *, struct cfi_private **);
61 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
62 size_t *retlen, u_char **mtdbuf);
63 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
68 * *********** SETUP AND PROBE BITS ***********
71 static struct mtd_chip_driver cfi_intelext_chipdrv = {
72 .probe = NULL, /* Not usable directly */
73 .destroy = cfi_intelext_destroy,
74 .name = "cfi_cmdset_0001",
78 /* #define DEBUG_LOCK_BITS */
79 /* #define DEBUG_CFI_FEATURES */
81 #ifdef DEBUG_CFI_FEATURES
82 static void cfi_tell_features(struct cfi_pri_intelext *extp)
85 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
86 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
87 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
88 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
89 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
90 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
91 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
92 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
93 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
94 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
95 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
96 for (i=10; i<32; i++) {
97 if (extp->FeatureSupport & (1<<i))
98 printk(" - Unknown Bit %X: supported\n", i);
101 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
102 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
103 for (i=1; i<8; i++) {
104 if (extp->SuspendCmdSupport & (1<<i))
105 printk(" - Unknown Bit %X: supported\n", i);
108 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
109 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
110 printk(" - Valid Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
111 for (i=2; i<16; i++) {
112 if (extp->BlkStatusRegMask & (1<<i))
113 printk(" - Unknown Bit %X Active: yes\n",i);
116 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
117 extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
118 if (extp->VppOptimal)
119 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
120 extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
124 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
125 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
126 static void fixup_intel_strataflash(struct map_info *map, void* param)
128 struct cfi_private *cfi = map->fldrv_priv;
129 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
131 printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
132 "erase on write disabled.\n");
133 extp->SuspendCmdSupport &= ~1;
137 static void fixup_st_m28w320ct(struct map_info *map, void* param)
139 struct cfi_private *cfi = map->fldrv_priv;
141 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
142 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
145 static void fixup_st_m28w320cb(struct map_info *map, void* param)
147 struct cfi_private *cfi = map->fldrv_priv;
149 /* Note this is done after the region info is endian swapped */
150 cfi->cfiq->EraseRegionInfo[1] =
151 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
154 static struct cfi_fixup fixup_table[] = {
155 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
157 CFI_MFR_ANY, CFI_ID_ANY,
158 fixup_intel_strataflash, NULL
162 0x0020, /* STMicroelectronics */
163 0x00ba, /* M28W320CT */
164 fixup_st_m28w320ct, NULL
166 0x0020, /* STMicroelectronics */
167 0x00bb, /* M28W320CB */
168 fixup_st_m28w320cb, NULL
174 /* This routine is made available to other mtd code via
175 * inter_module_register. It must only be accessed through
176 * inter_module_get which will bump the use count of this module. The
177 * addresses passed back in cfi are valid as long as the use count of
178 * this module is non-zero, i.e. between inter_module_get and
179 * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
181 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
183 struct cfi_private *cfi = map->fldrv_priv;
186 if (cfi->cfi_mode == CFI_MODE_CFI) {
188 * It's a real CFI chip, not one for which the probe
189 * routine faked a CFI structure. So we read the feature
192 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
193 struct cfi_pri_intelext *extp;
195 extp = (struct cfi_pri_intelext*)cfi_read_pri(map, adr, sizeof(*extp), "Intel/Sharp");
199 /* Do some byteswapping if necessary */
200 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
201 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
202 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
204 /* Install our own private info structure */
205 cfi->cmdset_priv = extp;
207 cfi_fixup(map, fixup_table);
209 #ifdef DEBUG_CFI_FEATURES
210 /* Tell the user about it in lots of lovely detail */
211 cfi_tell_features(extp);
214 if(extp->SuspendCmdSupport & 1) {
215 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
219 for (i=0; i< cfi->numchips; i++) {
220 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
221 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
222 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
223 cfi->chips[i].ref_point_counter = 0;
226 map->fldrv = &cfi_intelext_chipdrv;
228 return cfi_intelext_setup(map);
231 static struct mtd_info *cfi_intelext_setup(struct map_info *map)
233 struct cfi_private *cfi = map->fldrv_priv;
234 struct mtd_info *mtd;
235 unsigned long offset = 0;
237 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
239 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
240 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
243 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
247 memset(mtd, 0, sizeof(*mtd));
249 mtd->type = MTD_NORFLASH;
250 mtd->size = devsize * cfi->numchips;
252 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
253 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
254 * mtd->numeraseregions, GFP_KERNEL);
255 if (!mtd->eraseregions) {
256 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
260 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
261 unsigned long ernum, ersize;
262 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
263 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
265 if (mtd->erasesize < ersize) {
266 mtd->erasesize = ersize;
268 for (j=0; j<cfi->numchips; j++) {
269 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
270 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
271 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
273 offset += (ersize * ernum);
276 if (offset != devsize) {
278 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
282 for (i=0; i<mtd->numeraseregions;i++){
283 printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
284 i,mtd->eraseregions[i].offset,
285 mtd->eraseregions[i].erasesize,
286 mtd->eraseregions[i].numblocks);
289 /* Also select the correct geometry setup too */
290 mtd->erase = cfi_intelext_erase_varsize;
291 mtd->read = cfi_intelext_read;
293 if (map_is_linear(map)) {
294 mtd->point = cfi_intelext_point;
295 mtd->unpoint = cfi_intelext_unpoint;
298 if ( cfi->cfiq->BufWriteTimeoutTyp && !FORCE_WORD_WRITE) {
299 printk(KERN_INFO "Using buffer write method\n" );
300 mtd->write = cfi_intelext_write_buffers;
302 printk(KERN_INFO "Using word write method\n" );
303 mtd->write = cfi_intelext_write_words;
306 mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
307 mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
309 mtd->sync = cfi_intelext_sync;
310 mtd->lock = cfi_intelext_lock;
311 mtd->unlock = cfi_intelext_unlock;
312 mtd->suspend = cfi_intelext_suspend;
313 mtd->resume = cfi_intelext_resume;
314 mtd->flags = MTD_CAP_NORFLASH;
315 map->fldrv = &cfi_intelext_chipdrv;
316 mtd->name = map->name;
318 /* This function has the potential to distort the reality
319 a bit and therefore should be called last. */
320 if (cfi_intelext_partition_fixup(map, &cfi) != 0)
323 __module_get(THIS_MODULE);
328 if(mtd->eraseregions)
329 kfree(mtd->eraseregions);
332 kfree(cfi->cmdset_priv);
336 static int cfi_intelext_partition_fixup(struct map_info *map,
337 struct cfi_private **pcfi)
339 struct cfi_private *cfi = *pcfi;
340 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
343 * Probing of multi-partition flash ships.
345 * This is extremely crude at the moment and should probably be
346 * extracted entirely from the Intel extended query data instead.
347 * Right now a L18 flash is assumed if multiple operations is
350 * To support multiple partitions when available, we simply arrange
351 * for each of them to have their own flchip structure even if they
352 * are on the same physical chip. This means completely recreating
353 * a new cfi_private structure right here which is a blatent code
354 * layering violation, but this is still the least intrusive
355 * arrangement at this point. This can be rearranged in the future
356 * if someone feels motivated enough. --nico
358 if (extp && extp->FeatureSupport & (1 << 9)) {
359 struct cfi_private *newcfi;
361 struct flchip_shared *shared;
362 int numparts, partshift, numvirtchips, i, j;
365 * The L18 flash memory array is divided
366 * into multiple 8-Mbit partitions.
368 numparts = 1 << (cfi->cfiq->DevSize - 20);
369 partshift = 20 + __ffs(cfi->interleave);
370 numvirtchips = cfi->numchips * numparts;
372 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
375 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
380 memcpy(newcfi, cfi, sizeof(struct cfi_private));
381 newcfi->numchips = numvirtchips;
382 newcfi->chipshift = partshift;
384 chip = &newcfi->chips[0];
385 for (i = 0; i < cfi->numchips; i++) {
386 shared[i].writing = shared[i].erasing = NULL;
387 spin_lock_init(&shared[i].lock);
388 for (j = 0; j < numparts; j++) {
389 *chip = cfi->chips[i];
390 chip->start += j << partshift;
391 chip->priv = &shared[i];
392 /* those should be reset too since
393 they create memory references. */
394 init_waitqueue_head(&chip->wq);
395 spin_lock_init(&chip->_spinlock);
396 chip->mutex = &chip->_spinlock;
401 printk(KERN_DEBUG "%s: %d sets of %d interleaved chips "
402 "--> %d partitions of %#x bytes\n",
403 map->name, cfi->numchips, cfi->interleave,
404 newcfi->numchips, 1<<newcfi->chipshift);
406 map->fldrv_priv = newcfi;
415 * *********** CHIP ACCESS FUNCTIONS ***********
418 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
420 DECLARE_WAITQUEUE(wait, current);
421 struct cfi_private *cfi = map->fldrv_priv;
422 map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
424 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
427 timeo = jiffies + HZ;
429 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING)) {
431 * OK. We have possibility for contension on the write/erase
432 * operations which are global to the real chip and not per
433 * partition. So let's fight it over in the partition which
434 * currently has authority on the operation.
436 * The rules are as follows:
438 * - any write operation must own shared->writing.
440 * - any erase operation must own _both_ shared->writing and
443 * - contension arbitration is handled in the owner's context.
445 * The 'shared' struct can be read when its lock is taken.
446 * However any writes to it can only be made when the current
447 * owner's lock is also held.
449 struct flchip_shared *shared = chip->priv;
450 struct flchip *contender;
451 spin_lock(&shared->lock);
452 contender = shared->writing;
453 if (contender && contender != chip) {
455 * The engine to perform desired operation on this
456 * partition is already in use by someone else.
457 * Let's fight over it in the context of the chip
458 * currently using it. If it is possible to suspend,
459 * that other partition will do just that, otherwise
460 * it'll happily send us to sleep. In any case, when
461 * get_chip returns success we're clear to go ahead.
463 int ret = spin_trylock(contender->mutex);
464 spin_unlock(&shared->lock);
467 spin_unlock(chip->mutex);
468 ret = get_chip(map, contender, contender->start, mode);
469 spin_lock(chip->mutex);
471 spin_unlock(contender->mutex);
474 timeo = jiffies + HZ;
475 spin_lock(&shared->lock);
479 shared->writing = chip;
480 if (mode == FL_ERASING)
481 shared->erasing = chip;
482 if (contender && contender != chip)
483 spin_unlock(contender->mutex);
484 spin_unlock(&shared->lock);
487 switch (chip->state) {
491 status = map_read(map, adr);
492 if (map_word_andequal(map, status, status_OK, status_OK))
495 /* At this point we're fine with write operations
496 in other partitions as they don't conflict. */
497 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
500 if (time_after(jiffies, timeo)) {
501 printk(KERN_ERR "Waiting for chip to be ready timed out. Status %lx\n",
505 spin_unlock(chip->mutex);
507 spin_lock(chip->mutex);
508 /* Someone else might have been playing with it. */
518 if (!(cfip->FeatureSupport & 2) ||
519 !(mode == FL_READY || mode == FL_POINT ||
520 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
525 map_write(map, CMD(0xB0), adr);
527 /* If the flash has finished erasing, then 'erase suspend'
528 * appears to make some (28F320) flash devices switch to
529 * 'read' mode. Make sure that we switch to 'read status'
530 * mode so we get the right data. --rmk
532 map_write(map, CMD(0x70), adr);
533 chip->oldstate = FL_ERASING;
534 chip->state = FL_ERASE_SUSPENDING;
535 chip->erase_suspended = 1;
537 status = map_read(map, adr);
538 if (map_word_andequal(map, status, status_OK, status_OK))
541 if (time_after(jiffies, timeo)) {
542 /* Urgh. Resume and pretend we weren't here. */
543 map_write(map, CMD(0xd0), adr);
544 /* Make sure we're in 'read status' mode if it had finished */
545 map_write(map, CMD(0x70), adr);
546 chip->state = FL_ERASING;
547 chip->oldstate = FL_READY;
548 printk(KERN_ERR "Chip not ready after erase "
549 "suspended: status = 0x%lx\n", status.x[0]);
553 spin_unlock(chip->mutex);
555 spin_lock(chip->mutex);
556 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
557 So we can just loop here. */
559 chip->state = FL_STATUS;
563 /* Only if there's no operation suspended... */
564 if (mode == FL_READY && chip->oldstate == FL_READY)
569 set_current_state(TASK_UNINTERRUPTIBLE);
570 add_wait_queue(&chip->wq, &wait);
571 spin_unlock(chip->mutex);
573 remove_wait_queue(&chip->wq, &wait);
574 spin_lock(chip->mutex);
579 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
581 struct cfi_private *cfi = map->fldrv_priv;
584 struct flchip_shared *shared = chip->priv;
585 spin_lock(&shared->lock);
586 if (shared->writing == chip) {
587 /* We own the ability to write, but we're done */
588 shared->writing = shared->erasing;
589 if (shared->writing && shared->writing != chip) {
590 /* give back ownership to who we loaned it from */
591 struct flchip *loaner = shared->writing;
592 spin_lock(loaner->mutex);
593 spin_unlock(&shared->lock);
594 spin_unlock(chip->mutex);
595 put_chip(map, loaner, loaner->start);
596 spin_lock(chip->mutex);
597 spin_unlock(loaner->mutex);
599 if (chip->oldstate != FL_ERASING) {
600 shared->erasing = NULL;
601 if (chip->oldstate != FL_WRITING)
602 shared->writing = NULL;
604 spin_unlock(&shared->lock);
609 switch(chip->oldstate) {
611 chip->state = chip->oldstate;
612 /* What if one interleaved chip has finished and the
613 other hasn't? The old code would leave the finished
614 one in READY mode. That's bad, and caused -EROFS
615 errors to be returned from do_erase_oneblock because
616 that's the only bit it checked for at the time.
617 As the state machine appears to explicitly allow
618 sending the 0x70 (Read Status) command to an erasing
619 chip and expecting it to be ignored, that's what we
621 map_write(map, CMD(0xd0), adr);
622 map_write(map, CMD(0x70), adr);
623 chip->oldstate = FL_READY;
624 chip->state = FL_ERASING;
630 /* We should really make set_vpp() count, rather than doing this */
634 printk(KERN_ERR "put_chip() called with oldstate %d!!\n", chip->oldstate);
639 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
641 unsigned long cmd_addr;
642 struct cfi_private *cfi = map->fldrv_priv;
647 /* Ensure cmd read/writes are aligned. */
648 cmd_addr = adr & ~(map_bankwidth(map)-1);
650 spin_lock(chip->mutex);
652 ret = get_chip(map, chip, cmd_addr, FL_POINT);
655 if (chip->state != FL_POINT && chip->state != FL_READY)
656 map_write(map, CMD(0xff), cmd_addr);
658 chip->state = FL_POINT;
659 chip->ref_point_counter++;
661 spin_unlock(chip->mutex);
666 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
668 struct map_info *map = mtd->priv;
669 struct cfi_private *cfi = map->fldrv_priv;
674 if (!map->virt || (from + len > mtd->size))
677 *mtdbuf = (void *)map->virt + from;
680 /* Now lock the chip(s) to POINT state */
682 /* ofs: offset within the first chip that the first read should start */
683 chipnum = (from >> cfi->chipshift);
684 ofs = from - (chipnum << cfi->chipshift);
687 unsigned long thislen;
689 if (chipnum >= cfi->numchips)
692 if ((len + ofs -1) >> cfi->chipshift)
693 thislen = (1<<cfi->chipshift) - ofs;
697 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
710 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
712 struct map_info *map = mtd->priv;
713 struct cfi_private *cfi = map->fldrv_priv;
717 /* Now unlock the chip(s) POINT state */
719 /* ofs: offset within the first chip that the first read should start */
720 chipnum = (from >> cfi->chipshift);
721 ofs = from - (chipnum << cfi->chipshift);
724 unsigned long thislen;
727 chip = &cfi->chips[chipnum];
728 if (chipnum >= cfi->numchips)
731 if ((len + ofs -1) >> cfi->chipshift)
732 thislen = (1<<cfi->chipshift) - ofs;
736 spin_lock(chip->mutex);
737 if (chip->state == FL_POINT) {
738 chip->ref_point_counter--;
739 if(chip->ref_point_counter == 0)
740 chip->state = FL_READY;
742 printk(KERN_ERR "Warning: unpoint called on non pointed region\n"); /* Should this give an error? */
744 put_chip(map, chip, chip->start);
745 spin_unlock(chip->mutex);
753 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
755 unsigned long cmd_addr;
756 struct cfi_private *cfi = map->fldrv_priv;
761 /* Ensure cmd read/writes are aligned. */
762 cmd_addr = adr & ~(map_bankwidth(map)-1);
764 spin_lock(chip->mutex);
765 ret = get_chip(map, chip, cmd_addr, FL_READY);
767 spin_unlock(chip->mutex);
771 if (chip->state != FL_POINT && chip->state != FL_READY) {
772 map_write(map, CMD(0xff), cmd_addr);
774 chip->state = FL_READY;
777 map_copy_from(map, buf, adr, len);
779 put_chip(map, chip, cmd_addr);
781 spin_unlock(chip->mutex);
785 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
787 struct map_info *map = mtd->priv;
788 struct cfi_private *cfi = map->fldrv_priv;
793 /* ofs: offset within the first chip that the first read should start */
794 chipnum = (from >> cfi->chipshift);
795 ofs = from - (chipnum << cfi->chipshift);
800 unsigned long thislen;
802 if (chipnum >= cfi->numchips)
805 if ((len + ofs -1) >> cfi->chipshift)
806 thislen = (1<<cfi->chipshift) - ofs;
810 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
824 static int cfi_intelext_read_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf, int base_offst, int reg_sz)
826 struct map_info *map = mtd->priv;
827 struct cfi_private *cfi = map->fldrv_priv;
828 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
830 int ofs_factor = cfi->interleave * cfi->device_type;
835 chip_num = ((unsigned int)from/reg_sz);
836 offst = from - (reg_sz*chip_num)+base_offst;
839 /* Calculate which chip & protection register offset we need */
841 if (chip_num >= cfi->numchips)
844 chip = &cfi->chips[chip_num];
846 spin_lock(chip->mutex);
847 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
849 spin_unlock(chip->mutex);
850 return (len-count)?:ret;
853 if (chip->state != FL_JEDEC_QUERY) {
854 map_write(map, CMD(0x90), chip->start);
855 chip->state = FL_JEDEC_QUERY;
858 while (count && ((offst-base_offst) < reg_sz)) {
859 *buf = map_read8(map,(chip->start+((extp->ProtRegAddr+1)*ofs_factor)+offst));
865 put_chip(map, chip, chip->start);
866 spin_unlock(chip->mutex);
868 /* Move on to the next chip */
877 static int cfi_intelext_read_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
879 struct map_info *map = mtd->priv;
880 struct cfi_private *cfi = map->fldrv_priv;
881 struct cfi_pri_intelext *extp=cfi->cmdset_priv;
882 int base_offst,reg_sz;
884 /* Check that we actually have some protection registers */
885 if(!(extp->FeatureSupport&64)){
886 printk(KERN_WARNING "%s: This flash device has no protection data to read!\n",map->name);
890 base_offst=(1<<extp->FactProtRegSize);
891 reg_sz=(1<<extp->UserProtRegSize);
893 return cfi_intelext_read_prot_reg(mtd, from, len, retlen, buf, base_offst, reg_sz);
896 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
898 struct map_info *map = mtd->priv;
899 struct cfi_private *cfi = map->fldrv_priv;
900 struct cfi_pri_intelext *extp=cfi->cmdset_priv;
901 int base_offst,reg_sz;
903 /* Check that we actually have some protection registers */
904 if(!(extp->FeatureSupport&64)){
905 printk(KERN_WARNING "%s: This flash device has no protection data to read!\n",map->name);
910 reg_sz=(1<<extp->FactProtRegSize);
912 return cfi_intelext_read_prot_reg(mtd, from, len, retlen, buf, base_offst, reg_sz);
916 static int do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
918 struct cfi_private *cfi = map->fldrv_priv;
919 map_word status, status_OK;
925 /* Let's determine this according to the interleave only once */
926 status_OK = CMD(0x80);
928 spin_lock(chip->mutex);
929 ret = get_chip(map, chip, adr, FL_WRITING);
931 spin_unlock(chip->mutex);
936 map_write(map, CMD(0x40), adr);
937 map_write(map, datum, adr);
938 chip->state = FL_WRITING;
940 spin_unlock(chip->mutex);
941 INVALIDATE_CACHED_RANGE(map, adr, map_bankwidth(map));
942 cfi_udelay(chip->word_write_time);
943 spin_lock(chip->mutex);
945 timeo = jiffies + (HZ/2);
948 if (chip->state != FL_WRITING) {
949 /* Someone's suspended the write. Sleep */
950 DECLARE_WAITQUEUE(wait, current);
952 set_current_state(TASK_UNINTERRUPTIBLE);
953 add_wait_queue(&chip->wq, &wait);
954 spin_unlock(chip->mutex);
956 remove_wait_queue(&chip->wq, &wait);
957 timeo = jiffies + (HZ / 2); /* FIXME */
958 spin_lock(chip->mutex);
962 status = map_read(map, adr);
963 if (map_word_andequal(map, status, status_OK, status_OK))
966 /* OK Still waiting */
967 if (time_after(jiffies, timeo)) {
968 chip->state = FL_STATUS;
969 printk(KERN_ERR "waiting for chip to be ready timed out in word write\n");
974 /* Latency issues. Drop the lock, wait a while and retry */
975 spin_unlock(chip->mutex);
978 spin_lock(chip->mutex);
981 chip->word_write_time--;
982 if (!chip->word_write_time)
983 chip->word_write_time++;
986 chip->word_write_time++;
988 /* Done and happy. */
989 chip->state = FL_STATUS;
990 /* check for lock bit */
991 if (map_word_bitsset(map, status, CMD(0x02))) {
993 map_write(map, CMD(0x50), adr);
994 /* put back into read status register mode */
995 map_write(map, CMD(0x70), adr);
999 put_chip(map, chip, adr);
1000 spin_unlock(chip->mutex);
1006 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1008 struct map_info *map = mtd->priv;
1009 struct cfi_private *cfi = map->fldrv_priv;
1018 chipnum = to >> cfi->chipshift;
1019 ofs = to - (chipnum << cfi->chipshift);
1021 /* If it's not bus-aligned, do the first byte write */
1022 if (ofs & (map_bankwidth(map)-1)) {
1023 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1024 int gap = ofs - bus_ofs;
1028 n = min_t(int, len, map_bankwidth(map)-gap);
1029 datum = map_word_ff(map);
1030 datum = map_word_load_partial(map, datum, buf, gap, n);
1032 ret = do_write_oneword(map, &cfi->chips[chipnum],
1042 if (ofs >> cfi->chipshift) {
1045 if (chipnum == cfi->numchips)
1050 while(len >= map_bankwidth(map)) {
1051 map_word datum = map_word_load(map, buf);
1053 ret = do_write_oneword(map, &cfi->chips[chipnum],
1058 ofs += map_bankwidth(map);
1059 buf += map_bankwidth(map);
1060 (*retlen) += map_bankwidth(map);
1061 len -= map_bankwidth(map);
1063 if (ofs >> cfi->chipshift) {
1066 if (chipnum == cfi->numchips)
1071 if (len & (map_bankwidth(map)-1)) {
1074 datum = map_word_ff(map);
1075 datum = map_word_load_partial(map, datum, buf, 0, len);
1077 ret = do_write_oneword(map, &cfi->chips[chipnum],
1089 static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
1090 unsigned long adr, const u_char *buf, int len)
1092 struct cfi_private *cfi = map->fldrv_priv;
1093 map_word status, status_OK;
1094 unsigned long cmd_adr, timeo;
1095 int wbufsize, z, ret=0, bytes, words;
1097 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1099 cmd_adr = adr & ~(wbufsize-1);
1101 /* Let's determine this according to the interleave only once */
1102 status_OK = CMD(0x80);
1104 spin_lock(chip->mutex);
1105 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1107 spin_unlock(chip->mutex);
1111 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1112 [...], the device will not accept any more Write to Buffer commands".
1113 So we must check here and reset those bits if they're set. Otherwise
1114 we're just pissing in the wind */
1115 if (chip->state != FL_STATUS)
1116 map_write(map, CMD(0x70), cmd_adr);
1117 status = map_read(map, cmd_adr);
1118 if (map_word_bitsset(map, status, CMD(0x30))) {
1119 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1120 map_write(map, CMD(0x50), cmd_adr);
1121 map_write(map, CMD(0x70), cmd_adr);
1125 chip->state = FL_WRITING_TO_BUFFER;
1129 map_write(map, CMD(0xe8), cmd_adr);
1131 status = map_read(map, cmd_adr);
1132 if (map_word_andequal(map, status, status_OK, status_OK))
1135 spin_unlock(chip->mutex);
1137 spin_lock(chip->mutex);
1140 /* Argh. Not ready for write to buffer */
1141 map_write(map, CMD(0x70), cmd_adr);
1142 chip->state = FL_STATUS;
1143 printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1144 status.x[0], map_read(map, cmd_adr).x[0]);
1145 /* Odd. Clear status bits */
1146 map_write(map, CMD(0x50), cmd_adr);
1147 map_write(map, CMD(0x70), cmd_adr);
1153 /* Write length of data to come */
1154 bytes = len & (map_bankwidth(map)-1);
1155 words = len / map_bankwidth(map);
1156 map_write(map, CMD(words - !bytes), cmd_adr );
1160 while(z < words * map_bankwidth(map)) {
1161 map_word datum = map_word_load(map, buf);
1162 map_write(map, datum, adr+z);
1164 z += map_bankwidth(map);
1165 buf += map_bankwidth(map);
1171 datum = map_word_ff(map);
1172 datum = map_word_load_partial(map, datum, buf, 0, bytes);
1173 map_write(map, datum, adr+z);
1177 map_write(map, CMD(0xd0), cmd_adr);
1178 chip->state = FL_WRITING;
1180 spin_unlock(chip->mutex);
1181 INVALIDATE_CACHED_RANGE(map, adr, len);
1182 cfi_udelay(chip->buffer_write_time);
1183 spin_lock(chip->mutex);
1185 timeo = jiffies + (HZ/2);
1188 if (chip->state != FL_WRITING) {
1189 /* Someone's suspended the write. Sleep */
1190 DECLARE_WAITQUEUE(wait, current);
1191 set_current_state(TASK_UNINTERRUPTIBLE);
1192 add_wait_queue(&chip->wq, &wait);
1193 spin_unlock(chip->mutex);
1195 remove_wait_queue(&chip->wq, &wait);
1196 timeo = jiffies + (HZ / 2); /* FIXME */
1197 spin_lock(chip->mutex);
1201 status = map_read(map, cmd_adr);
1202 if (map_word_andequal(map, status, status_OK, status_OK))
1205 /* OK Still waiting */
1206 if (time_after(jiffies, timeo)) {
1207 chip->state = FL_STATUS;
1208 printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
1213 /* Latency issues. Drop the lock, wait a while and retry */
1214 spin_unlock(chip->mutex);
1217 spin_lock(chip->mutex);
1220 chip->buffer_write_time--;
1221 if (!chip->buffer_write_time)
1222 chip->buffer_write_time++;
1225 chip->buffer_write_time++;
1227 /* Done and happy. */
1228 chip->state = FL_STATUS;
1230 /* check for lock bit */
1231 if (map_word_bitsset(map, status, CMD(0x02))) {
1233 map_write(map, CMD(0x50), cmd_adr);
1234 /* put back into read status register mode */
1235 map_write(map, CMD(0x70), adr);
1240 put_chip(map, chip, cmd_adr);
1241 spin_unlock(chip->mutex);
1245 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1246 size_t len, size_t *retlen, const u_char *buf)
1248 struct map_info *map = mtd->priv;
1249 struct cfi_private *cfi = map->fldrv_priv;
1250 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1259 chipnum = to >> cfi->chipshift;
1260 ofs = to - (chipnum << cfi->chipshift);
1262 /* If it's not bus-aligned, do the first word write */
1263 if (ofs & (map_bankwidth(map)-1)) {
1264 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1265 if (local_len > len)
1267 ret = cfi_intelext_write_words(mtd, to, local_len,
1275 if (ofs >> cfi->chipshift) {
1278 if (chipnum == cfi->numchips)
1284 /* We must not cross write block boundaries */
1285 int size = wbufsize - (ofs & (wbufsize-1));
1289 ret = do_write_buffer(map, &cfi->chips[chipnum],
1299 if (ofs >> cfi->chipshift) {
1302 if (chipnum == cfi->numchips)
1309 typedef int (*varsize_frob_t)(struct map_info *map, struct flchip *chip,
1310 unsigned long adr, int len, void *thunk);
1312 static int cfi_intelext_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
1313 loff_t ofs, size_t len, void *thunk)
1315 struct map_info *map = mtd->priv;
1316 struct cfi_private *cfi = map->fldrv_priv;
1318 int chipnum, ret = 0;
1320 struct mtd_erase_region_info *regions = mtd->eraseregions;
1322 if (ofs > mtd->size)
1325 if ((len + ofs) > mtd->size)
1328 /* Check that both start and end of the requested erase are
1329 * aligned with the erasesize at the appropriate addresses.
1334 /* Skip all erase regions which are ended before the start of
1335 the requested erase. Actually, to save on the calculations,
1336 we skip to the first erase region which starts after the
1337 start of the requested erase, and then go back one.
1340 while (i < mtd->numeraseregions && ofs >= regions[i].offset)
1344 /* OK, now i is pointing at the erase region in which this
1345 erase request starts. Check the start of the requested
1346 erase range is aligned with the erase size which is in
1350 if (ofs & (regions[i].erasesize-1))
1353 /* Remember the erase region we start on */
1356 /* Next, check that the end of the requested erase is aligned
1357 * with the erase region at that address.
1360 while (i<mtd->numeraseregions && (ofs + len) >= regions[i].offset)
1363 /* As before, drop back one to point at the region in which
1364 the address actually falls
1368 if ((ofs + len) & (regions[i].erasesize-1))
1371 chipnum = ofs >> cfi->chipshift;
1372 adr = ofs - (chipnum << cfi->chipshift);
1377 unsigned long chipmask;
1378 int size = regions[i].erasesize;
1380 ret = (*frob)(map, &cfi->chips[chipnum], adr, size, thunk);
1388 chipmask = (1 << cfi->chipshift) - 1;
1389 if ((adr & chipmask) == ((regions[i].offset + size * regions[i].numblocks) & chipmask))
1392 if (adr >> cfi->chipshift) {
1396 if (chipnum >= cfi->numchips)
1405 static int do_erase_oneblock(struct map_info *map, struct flchip *chip,
1406 unsigned long adr, int len, void *thunk)
1408 struct cfi_private *cfi = map->fldrv_priv;
1409 map_word status, status_OK;
1410 unsigned long timeo;
1412 DECLARE_WAITQUEUE(wait, current);
1417 /* Let's determine this according to the interleave only once */
1418 status_OK = CMD(0x80);
1421 spin_lock(chip->mutex);
1422 ret = get_chip(map, chip, adr, FL_ERASING);
1424 spin_unlock(chip->mutex);
1429 /* Clear the status register first */
1430 map_write(map, CMD(0x50), adr);
1433 map_write(map, CMD(0x20), adr);
1434 map_write(map, CMD(0xD0), adr);
1435 chip->state = FL_ERASING;
1436 chip->erase_suspended = 0;
1438 spin_unlock(chip->mutex);
1439 INVALIDATE_CACHED_RANGE(map, adr, len);
1440 set_current_state(TASK_UNINTERRUPTIBLE);
1441 schedule_timeout((chip->erase_time*HZ)/(2*1000));
1442 spin_lock(chip->mutex);
1444 /* FIXME. Use a timer to check this, and return immediately. */
1445 /* Once the state machine's known to be working I'll do that */
1447 timeo = jiffies + (HZ*20);
1449 if (chip->state != FL_ERASING) {
1450 /* Someone's suspended the erase. Sleep */
1451 set_current_state(TASK_UNINTERRUPTIBLE);
1452 add_wait_queue(&chip->wq, &wait);
1453 spin_unlock(chip->mutex);
1455 remove_wait_queue(&chip->wq, &wait);
1456 spin_lock(chip->mutex);
1459 if (chip->erase_suspended) {
1460 /* This erase was suspended and resumed.
1461 Adjust the timeout */
1462 timeo = jiffies + (HZ*20); /* FIXME */
1463 chip->erase_suspended = 0;
1466 status = map_read(map, adr);
1467 if (map_word_andequal(map, status, status_OK, status_OK))
1470 /* OK Still waiting */
1471 if (time_after(jiffies, timeo)) {
1472 map_write(map, CMD(0x70), adr);
1473 chip->state = FL_STATUS;
1474 printk(KERN_ERR "waiting for erase at %08lx to complete timed out. Xstatus = %lx, status = %lx.\n",
1475 adr, status.x[0], map_read(map, adr).x[0]);
1476 /* Clear status bits */
1477 map_write(map, CMD(0x50), adr);
1478 map_write(map, CMD(0x70), adr);
1480 spin_unlock(chip->mutex);
1484 /* Latency issues. Drop the lock, wait a while and retry */
1485 spin_unlock(chip->mutex);
1486 set_current_state(TASK_UNINTERRUPTIBLE);
1487 schedule_timeout(1);
1488 spin_lock(chip->mutex);
1494 /* We've broken this before. It doesn't hurt to be safe */
1495 map_write(map, CMD(0x70), adr);
1496 chip->state = FL_STATUS;
1497 status = map_read(map, adr);
1499 /* check for lock bit */
1500 if (map_word_bitsset(map, status, CMD(0x3a))) {
1501 unsigned char chipstatus = status.x[0];
1502 if (!map_word_equal(map, status, CMD(chipstatus))) {
1504 for (w=0; w<map_words(map); w++) {
1505 for (i = 0; i<cfi_interleave(cfi); i++) {
1506 chipstatus |= status.x[w] >> (cfi->device_type * 8);
1509 printk(KERN_WARNING "Status is not identical for all chips: 0x%lx. Merging to give 0x%02x\n",
1510 status.x[0], chipstatus);
1512 /* Reset the error bits */
1513 map_write(map, CMD(0x50), adr);
1514 map_write(map, CMD(0x70), adr);
1516 if ((chipstatus & 0x30) == 0x30) {
1517 printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus);
1519 } else if (chipstatus & 0x02) {
1520 /* Protection bit set */
1522 } else if (chipstatus & 0x8) {
1524 printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", chipstatus);
1526 } else if (chipstatus & 0x20) {
1528 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
1529 timeo = jiffies + HZ;
1530 chip->state = FL_STATUS;
1531 spin_unlock(chip->mutex);
1534 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
1540 spin_unlock(chip->mutex);
1544 int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1546 unsigned long ofs, len;
1552 ret = cfi_intelext_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1556 instr->state = MTD_ERASE_DONE;
1557 if (instr->callback)
1558 instr->callback(instr);
1563 static void cfi_intelext_sync (struct mtd_info *mtd)
1565 struct map_info *map = mtd->priv;
1566 struct cfi_private *cfi = map->fldrv_priv;
1568 struct flchip *chip;
1571 for (i=0; !ret && i<cfi->numchips; i++) {
1572 chip = &cfi->chips[i];
1574 spin_lock(chip->mutex);
1575 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1578 chip->oldstate = chip->state;
1579 chip->state = FL_SYNCING;
1580 /* No need to wake_up() on this state change -
1581 * as the whole point is that nobody can do anything
1582 * with the chip now anyway.
1585 spin_unlock(chip->mutex);
1588 /* Unlock the chips again */
1590 for (i--; i >=0; i--) {
1591 chip = &cfi->chips[i];
1593 spin_lock(chip->mutex);
1595 if (chip->state == FL_SYNCING) {
1596 chip->state = chip->oldstate;
1599 spin_unlock(chip->mutex);
1603 #ifdef DEBUG_LOCK_BITS
1604 static int do_printlockstatus_oneblock(struct map_info *map, struct flchip *chip,
1605 unsigned long adr, int len, void *thunk)
1607 struct cfi_private *cfi = map->fldrv_priv;
1608 int ofs_factor = cfi->interleave * cfi->device_type;
1610 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1611 printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1612 adr, cfi_read_query(map, adr+(2*ofs_factor)));
1613 chip->state = FL_JEDEC_QUERY;
1618 #define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
1619 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
1621 static int do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1622 unsigned long adr, int len, void *thunk)
1624 struct cfi_private *cfi = map->fldrv_priv;
1625 map_word status, status_OK;
1626 unsigned long timeo = jiffies + HZ;
1631 /* Let's determine this according to the interleave only once */
1632 status_OK = CMD(0x80);
1634 spin_lock(chip->mutex);
1635 ret = get_chip(map, chip, adr, FL_LOCKING);
1637 spin_unlock(chip->mutex);
1642 map_write(map, CMD(0x60), adr);
1644 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1645 map_write(map, CMD(0x01), adr);
1646 chip->state = FL_LOCKING;
1647 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1648 map_write(map, CMD(0xD0), adr);
1649 chip->state = FL_UNLOCKING;
1653 spin_unlock(chip->mutex);
1654 schedule_timeout(HZ);
1655 spin_lock(chip->mutex);
1657 /* FIXME. Use a timer to check this, and return immediately. */
1658 /* Once the state machine's known to be working I'll do that */
1660 timeo = jiffies + (HZ*20);
1663 status = map_read(map, adr);
1664 if (map_word_andequal(map, status, status_OK, status_OK))
1667 /* OK Still waiting */
1668 if (time_after(jiffies, timeo)) {
1669 map_write(map, CMD(0x70), adr);
1670 chip->state = FL_STATUS;
1671 printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n",
1672 status.x[0], map_read(map, adr).x[0]);
1674 spin_unlock(chip->mutex);
1678 /* Latency issues. Drop the lock, wait a while and retry */
1679 spin_unlock(chip->mutex);
1681 spin_lock(chip->mutex);
1684 /* Done and happy. */
1685 chip->state = FL_STATUS;
1686 put_chip(map, chip, adr);
1687 spin_unlock(chip->mutex);
1691 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1695 #ifdef DEBUG_LOCK_BITS
1696 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1697 __FUNCTION__, ofs, len);
1698 cfi_intelext_varsize_frob(mtd, do_printlockstatus_oneblock,
1702 ret = cfi_intelext_varsize_frob(mtd, do_xxlock_oneblock,
1703 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
1705 #ifdef DEBUG_LOCK_BITS
1706 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1708 cfi_intelext_varsize_frob(mtd, do_printlockstatus_oneblock,
1715 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1719 #ifdef DEBUG_LOCK_BITS
1720 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1721 __FUNCTION__, ofs, len);
1722 cfi_intelext_varsize_frob(mtd, do_printlockstatus_oneblock,
1726 ret = cfi_intelext_varsize_frob(mtd, do_xxlock_oneblock,
1727 ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
1729 #ifdef DEBUG_LOCK_BITS
1730 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1732 cfi_intelext_varsize_frob(mtd, do_printlockstatus_oneblock,
1739 static int cfi_intelext_suspend(struct mtd_info *mtd)
1741 struct map_info *map = mtd->priv;
1742 struct cfi_private *cfi = map->fldrv_priv;
1744 struct flchip *chip;
1747 for (i=0; !ret && i<cfi->numchips; i++) {
1748 chip = &cfi->chips[i];
1750 spin_lock(chip->mutex);
1752 switch (chip->state) {
1756 case FL_JEDEC_QUERY:
1757 if (chip->oldstate == FL_READY) {
1758 chip->oldstate = chip->state;
1759 chip->state = FL_PM_SUSPENDED;
1760 /* No need to wake_up() on this state change -
1761 * as the whole point is that nobody can do anything
1762 * with the chip now anyway.
1768 case FL_PM_SUSPENDED:
1771 spin_unlock(chip->mutex);
1774 /* Unlock the chips again */
1777 for (i--; i >=0; i--) {
1778 chip = &cfi->chips[i];
1780 spin_lock(chip->mutex);
1782 if (chip->state == FL_PM_SUSPENDED) {
1783 /* No need to force it into a known state here,
1784 because we're returning failure, and it didn't
1786 chip->state = chip->oldstate;
1789 spin_unlock(chip->mutex);
1796 static void cfi_intelext_resume(struct mtd_info *mtd)
1798 struct map_info *map = mtd->priv;
1799 struct cfi_private *cfi = map->fldrv_priv;
1801 struct flchip *chip;
1803 for (i=0; i<cfi->numchips; i++) {
1805 chip = &cfi->chips[i];
1807 spin_lock(chip->mutex);
1809 /* Go to known state. Chip may have been power cycled */
1810 if (chip->state == FL_PM_SUSPENDED) {
1811 map_write(map, CMD(0xFF), cfi->chips[i].start);
1812 chip->state = FL_READY;
1816 spin_unlock(chip->mutex);
1820 static void cfi_intelext_destroy(struct mtd_info *mtd)
1822 struct map_info *map = mtd->priv;
1823 struct cfi_private *cfi = map->fldrv_priv;
1824 kfree(cfi->cmdset_priv);
1826 kfree(cfi->chips[0].priv);
1828 kfree(mtd->eraseregions);
1831 static char im_name_1[]="cfi_cmdset_0001";
1832 static char im_name_3[]="cfi_cmdset_0003";
1834 int __init cfi_intelext_init(void)
1836 inter_module_register(im_name_1, THIS_MODULE, &cfi_cmdset_0001);
1837 inter_module_register(im_name_3, THIS_MODULE, &cfi_cmdset_0001);
1841 static void __exit cfi_intelext_exit(void)
1843 inter_module_unregister(im_name_1);
1844 inter_module_unregister(im_name_3);
1847 module_init(cfi_intelext_init);
1848 module_exit(cfi_intelext_exit);
1850 MODULE_LICENSE("GPL");
1851 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
1852 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");