2 * Common Flash Interface support:
3 * ST Advanced Architecture Command Set (ID 0x0020)
5 * (C) 2000 Red Hat. GPL'd
7 * $Id: cfi_cmdset_0020.c,v 1.13 2004/07/12 21:52:50 dwmw2 Exp $
9 * 10/10/2000 Nicolas Pitre <nico@cam.org>
10 * - completely revamped method functions so they are aware and
11 * independent of the flash geometry (buswidth, interleave, etc.)
12 * - scalability vs code size is completely set at compile-time
13 * (see include/linux/mtd/cfi.h for selection)
14 * - optimized write buffer method
15 * 06/21/2002 Joern Engel <joern@wh.fh-wedel.de> and others
16 * - modified Intel Command Set 0x0001 to support ST Advanced Architecture
17 * (command set 0x0020)
18 * - added a writev function
21 #include <linux/version.h>
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/sched.h>
26 #include <linux/init.h>
28 #include <asm/byteorder.h>
30 #include <linux/errno.h>
31 #include <linux/slab.h>
32 #include <linux/delay.h>
33 #include <linux/interrupt.h>
34 #include <linux/mtd/map.h>
35 #include <linux/mtd/cfi.h>
36 #include <linux/mtd/mtd.h>
37 #include <linux/mtd/compatmac.h>
40 static int cfi_staa_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *);
41 static int cfi_staa_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
42 static int cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
43 unsigned long count, loff_t to, size_t *retlen);
44 static int cfi_staa_erase_varsize(struct mtd_info *, struct erase_info *);
45 static void cfi_staa_sync (struct mtd_info *);
46 static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
47 static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
48 static int cfi_staa_suspend (struct mtd_info *);
49 static void cfi_staa_resume (struct mtd_info *);
51 static void cfi_staa_destroy(struct mtd_info *);
53 struct mtd_info *cfi_cmdset_0020(struct map_info *, int);
55 static struct mtd_info *cfi_staa_setup (struct map_info *);
57 static struct mtd_chip_driver cfi_staa_chipdrv = {
58 .probe = NULL, /* Not usable directly */
59 .destroy = cfi_staa_destroy,
60 .name = "cfi_cmdset_0020",
64 /* #define DEBUG_LOCK_BITS */
65 //#define DEBUG_CFI_FEATURES
67 #ifdef DEBUG_CFI_FEATURES
68 static void cfi_tell_features(struct cfi_pri_intelext *extp)
71 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
72 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
73 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
74 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
75 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
76 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
77 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
78 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
79 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
80 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
81 for (i=9; i<32; i++) {
82 if (extp->FeatureSupport & (1<<i))
83 printk(" - Unknown Bit %X: supported\n", i);
86 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
87 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
89 if (extp->SuspendCmdSupport & (1<<i))
90 printk(" - Unknown Bit %X: supported\n", i);
93 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
94 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
95 printk(" - Valid Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
96 for (i=2; i<16; i++) {
97 if (extp->BlkStatusRegMask & (1<<i))
98 printk(" - Unknown Bit %X Active: yes\n",i);
101 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
102 extp->VccOptimal >> 8, extp->VccOptimal & 0xf);
103 if (extp->VppOptimal)
104 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
105 extp->VppOptimal >> 8, extp->VppOptimal & 0xf);
109 /* This routine is made available to other mtd code via
110 * inter_module_register. It must only be accessed through
111 * inter_module_get which will bump the use count of this module. The
112 * addresses passed back in cfi are valid as long as the use count of
113 * this module is non-zero, i.e. between inter_module_get and
114 * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
116 struct mtd_info *cfi_cmdset_0020(struct map_info *map, int primary)
118 struct cfi_private *cfi = map->fldrv_priv;
123 * It's a real CFI chip, not one for which the probe
124 * routine faked a CFI structure. So we read the feature
127 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
128 struct cfi_pri_intelext *extp;
130 extp = (struct cfi_pri_intelext*)cfi_read_pri(map, adr, sizeof(*extp), "ST Microelectronics");
134 /* Do some byteswapping if necessary */
135 extp->FeatureSupport = cfi32_to_cpu(extp->FeatureSupport);
136 extp->BlkStatusRegMask = cfi32_to_cpu(extp->BlkStatusRegMask);
138 #ifdef DEBUG_CFI_FEATURES
139 /* Tell the user about it in lots of lovely detail */
140 cfi_tell_features(extp);
143 /* Install our own private info structure */
144 cfi->cmdset_priv = extp;
147 for (i=0; i< cfi->numchips; i++) {
148 cfi->chips[i].word_write_time = 128;
149 cfi->chips[i].buffer_write_time = 128;
150 cfi->chips[i].erase_time = 1024;
153 return cfi_staa_setup(map);
156 static struct mtd_info *cfi_staa_setup(struct map_info *map)
158 struct cfi_private *cfi = map->fldrv_priv;
159 struct mtd_info *mtd;
160 unsigned long offset = 0;
162 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
164 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
165 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
168 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
169 kfree(cfi->cmdset_priv);
173 memset(mtd, 0, sizeof(*mtd));
175 mtd->type = MTD_NORFLASH;
176 mtd->size = devsize * cfi->numchips;
178 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
179 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
180 * mtd->numeraseregions, GFP_KERNEL);
181 if (!mtd->eraseregions) {
182 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
183 kfree(cfi->cmdset_priv);
188 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
189 unsigned long ernum, ersize;
190 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
191 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
193 if (mtd->erasesize < ersize) {
194 mtd->erasesize = ersize;
196 for (j=0; j<cfi->numchips; j++) {
197 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
198 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
199 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
201 offset += (ersize * ernum);
204 if (offset != devsize) {
206 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
207 kfree(mtd->eraseregions);
208 kfree(cfi->cmdset_priv);
213 for (i=0; i<mtd->numeraseregions;i++){
214 printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
215 i,mtd->eraseregions[i].offset,
216 mtd->eraseregions[i].erasesize,
217 mtd->eraseregions[i].numblocks);
220 /* Also select the correct geometry setup too */
221 mtd->erase = cfi_staa_erase_varsize;
222 mtd->read = cfi_staa_read;
223 mtd->write = cfi_staa_write_buffers;
224 mtd->writev = cfi_staa_writev;
225 mtd->sync = cfi_staa_sync;
226 mtd->lock = cfi_staa_lock;
227 mtd->unlock = cfi_staa_unlock;
228 mtd->suspend = cfi_staa_suspend;
229 mtd->resume = cfi_staa_resume;
230 mtd->flags = MTD_CAP_NORFLASH;
231 mtd->flags |= MTD_ECC; /* FIXME: Not all STMicro flashes have this */
232 mtd->eccsize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */
233 map->fldrv = &cfi_staa_chipdrv;
234 __module_get(THIS_MODULE);
235 mtd->name = map->name;
240 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
242 map_word status, status_OK;
244 DECLARE_WAITQUEUE(wait, current);
246 unsigned long cmd_addr;
247 struct cfi_private *cfi = map->fldrv_priv;
251 /* Ensure cmd read/writes are aligned. */
252 cmd_addr = adr & ~(map_bankwidth(map)-1);
254 /* Let's determine this according to the interleave only once */
255 status_OK = CMD(0x80);
257 timeo = jiffies + HZ;
259 spin_lock_bh(chip->mutex);
261 /* Check that the chip's ready to talk to us.
262 * If it's in FL_ERASING state, suspend it and make it talk now.
264 switch (chip->state) {
266 if (!(((struct cfi_pri_intelext *)cfi->cmdset_priv)->FeatureSupport & 2))
267 goto sleep; /* We don't support erase suspend */
269 map_write (map, CMD(0xb0), cmd_addr);
270 /* If the flash has finished erasing, then 'erase suspend'
271 * appears to make some (28F320) flash devices switch to
272 * 'read' mode. Make sure that we switch to 'read status'
273 * mode so we get the right data. --rmk
275 map_write(map, CMD(0x70), cmd_addr);
276 chip->oldstate = FL_ERASING;
277 chip->state = FL_ERASE_SUSPENDING;
278 // printk("Erase suspending at 0x%lx\n", cmd_addr);
280 status = map_read(map, cmd_addr);
281 if (map_word_andequal(map, status, status_OK, status_OK))
284 if (time_after(jiffies, timeo)) {
286 map_write(map, CMD(0xd0), cmd_addr);
287 /* make sure we're in 'read status' mode */
288 map_write(map, CMD(0x70), cmd_addr);
289 chip->state = FL_ERASING;
290 spin_unlock_bh(chip->mutex);
291 printk(KERN_ERR "Chip not ready after erase "
292 "suspended: status = 0x%lx\n", status.x[0]);
296 spin_unlock_bh(chip->mutex);
298 spin_lock_bh(chip->mutex);
302 map_write(map, CMD(0xff), cmd_addr);
303 chip->state = FL_READY;
316 map_write(map, CMD(0x70), cmd_addr);
317 chip->state = FL_STATUS;
320 status = map_read(map, cmd_addr);
321 if (map_word_andequal(map, status, status_OK, status_OK)) {
322 map_write(map, CMD(0xff), cmd_addr);
323 chip->state = FL_READY;
327 /* Urgh. Chip not yet ready to talk to us. */
328 if (time_after(jiffies, timeo)) {
329 spin_unlock_bh(chip->mutex);
330 printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %lx\n", status.x[0]);
334 /* Latency issues. Drop the lock, wait a while and retry */
335 spin_unlock_bh(chip->mutex);
341 /* Stick ourselves on a wait queue to be woken when
342 someone changes the status */
343 set_current_state(TASK_UNINTERRUPTIBLE);
344 add_wait_queue(&chip->wq, &wait);
345 spin_unlock_bh(chip->mutex);
347 remove_wait_queue(&chip->wq, &wait);
348 timeo = jiffies + HZ;
352 map_copy_from(map, buf, adr, len);
355 chip->state = chip->oldstate;
356 /* What if one interleaved chip has finished and the
357 other hasn't? The old code would leave the finished
358 one in READY mode. That's bad, and caused -EROFS
359 errors to be returned from do_erase_oneblock because
360 that's the only bit it checked for at the time.
361 As the state machine appears to explicitly allow
362 sending the 0x70 (Read Status) command to an erasing
363 chip and expecting it to be ignored, that's what we
365 map_write(map, CMD(0xd0), cmd_addr);
366 map_write(map, CMD(0x70), cmd_addr);
370 spin_unlock_bh(chip->mutex);
374 static int cfi_staa_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
376 struct map_info *map = mtd->priv;
377 struct cfi_private *cfi = map->fldrv_priv;
382 /* ofs: offset within the first chip that the first read should start */
383 chipnum = (from >> cfi->chipshift);
384 ofs = from - (chipnum << cfi->chipshift);
389 unsigned long thislen;
391 if (chipnum >= cfi->numchips)
394 if ((len + ofs -1) >> cfi->chipshift)
395 thislen = (1<<cfi->chipshift) - ofs;
399 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
413 static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
414 unsigned long adr, const u_char *buf, int len)
416 struct cfi_private *cfi = map->fldrv_priv;
417 map_word status, status_OK;
418 unsigned long cmd_adr, timeo;
419 DECLARE_WAITQUEUE(wait, current);
422 /* M58LW064A requires bus alignment for buffer wriets -- saw */
423 if (adr & (map_bankwidth(map)-1))
426 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
428 cmd_adr = adr & ~(wbufsize-1);
430 /* Let's determine this according to the interleave only once */
431 status_OK = CMD(0x80);
433 timeo = jiffies + HZ;
436 #ifdef DEBUG_CFI_FEATURES
437 printk("%s: chip->state[%d]\n", __FUNCTION__, chip->state);
439 spin_lock_bh(chip->mutex);
441 /* Check that the chip's ready to talk to us.
442 * Later, we can actually think about interrupting it
443 * if it's in FL_ERASING state.
444 * Not just yet, though.
446 switch (chip->state) {
452 map_write(map, CMD(0x70), cmd_adr);
453 chip->state = FL_STATUS;
454 #ifdef DEBUG_CFI_FEATURES
455 printk("%s: 1 status[%x]\n", __FUNCTION__, map_read(map, cmd_adr));
459 status = map_read(map, cmd_adr);
460 if (map_word_andequal(map, status, status_OK, status_OK))
462 /* Urgh. Chip not yet ready to talk to us. */
463 if (time_after(jiffies, timeo)) {
464 spin_unlock_bh(chip->mutex);
465 printk(KERN_ERR "waiting for chip to be ready timed out in buffer write Xstatus = %lx, status = %lx\n",
466 status.x[0], map_read(map, cmd_adr).x[0]);
470 /* Latency issues. Drop the lock, wait a while and retry */
471 spin_unlock_bh(chip->mutex);
476 /* Stick ourselves on a wait queue to be woken when
477 someone changes the status */
478 set_current_state(TASK_UNINTERRUPTIBLE);
479 add_wait_queue(&chip->wq, &wait);
480 spin_unlock_bh(chip->mutex);
482 remove_wait_queue(&chip->wq, &wait);
483 timeo = jiffies + HZ;
488 map_write(map, CMD(0xe8), cmd_adr);
489 chip->state = FL_WRITING_TO_BUFFER;
493 status = map_read(map, cmd_adr);
494 if (map_word_andequal(map, status, status_OK, status_OK))
497 spin_unlock_bh(chip->mutex);
499 spin_lock_bh(chip->mutex);
502 /* Argh. Not ready for write to buffer */
504 map_write(map, CMD(0x70), cmd_adr);
505 chip->state = FL_STATUS;
506 spin_unlock_bh(chip->mutex);
507 printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx\n", status.x[0]);
512 /* Write length of data to come */
513 map_write(map, CMD(len/map_bankwidth(map)-1), cmd_adr );
517 z += map_bankwidth(map), buf += map_bankwidth(map)) {
519 d = map_word_load(map, buf);
520 map_write(map, d, adr+z);
523 map_write(map, CMD(0xd0), cmd_adr);
524 chip->state = FL_WRITING;
526 spin_unlock_bh(chip->mutex);
527 cfi_udelay(chip->buffer_write_time);
528 spin_lock_bh(chip->mutex);
530 timeo = jiffies + (HZ/2);
533 if (chip->state != FL_WRITING) {
534 /* Someone's suspended the write. Sleep */
535 set_current_state(TASK_UNINTERRUPTIBLE);
536 add_wait_queue(&chip->wq, &wait);
537 spin_unlock_bh(chip->mutex);
539 remove_wait_queue(&chip->wq, &wait);
540 timeo = jiffies + (HZ / 2); /* FIXME */
541 spin_lock_bh(chip->mutex);
545 status = map_read(map, cmd_adr);
546 if (map_word_andequal(map, status, status_OK, status_OK))
549 /* OK Still waiting */
550 if (time_after(jiffies, timeo)) {
552 map_write(map, CMD(0x50), cmd_adr);
553 /* put back into read status register mode */
554 map_write(map, CMD(0x70), adr);
555 chip->state = FL_STATUS;
557 spin_unlock_bh(chip->mutex);
558 printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
562 /* Latency issues. Drop the lock, wait a while and retry */
563 spin_unlock_bh(chip->mutex);
566 spin_lock_bh(chip->mutex);
569 chip->buffer_write_time--;
570 if (!chip->buffer_write_time)
571 chip->buffer_write_time++;
574 chip->buffer_write_time++;
576 /* Done and happy. */
578 chip->state = FL_STATUS;
580 /* check for errors: 'lock bit', 'VPP', 'dead cell'/'unerased cell' or 'incorrect cmd' -- saw */
581 if (map_word_bitsset(map, status, CMD(0x3a))) {
582 #ifdef DEBUG_CFI_FEATURES
583 printk("%s: 2 status[%lx]\n", __FUNCTION__, status.x[0]);
586 map_write(map, CMD(0x50), cmd_adr);
587 /* put back into read status register mode */
588 map_write(map, CMD(0x70), adr);
590 spin_unlock_bh(chip->mutex);
591 return map_word_bitsset(map, status, CMD(0x02)) ? -EROFS : -EIO;
594 spin_unlock_bh(chip->mutex);
599 static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to,
600 size_t len, size_t *retlen, const u_char *buf)
602 struct map_info *map = mtd->priv;
603 struct cfi_private *cfi = map->fldrv_priv;
604 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
613 chipnum = to >> cfi->chipshift;
614 ofs = to - (chipnum << cfi->chipshift);
616 #ifdef DEBUG_CFI_FEATURES
617 printk("%s: map_bankwidth(map)[%x]\n", __FUNCTION__, map_bankwidth(map));
618 printk("%s: chipnum[%x] wbufsize[%x]\n", __FUNCTION__, chipnum, wbufsize);
619 printk("%s: ofs[%x] len[%x]\n", __FUNCTION__, ofs, len);
622 /* Write buffer is worth it only if more than one word to write... */
624 /* We must not cross write block boundaries */
625 int size = wbufsize - (ofs & (wbufsize-1));
630 ret = do_write_buffer(map, &cfi->chips[chipnum],
640 if (ofs >> cfi->chipshift) {
643 if (chipnum == cfi->numchips)
652 * Writev for ECC-Flashes is a little more complicated. We need to maintain
653 * a small buffer for this.
654 * XXX: If the buffer size is not a multiple of 2, this will break
656 #define ECCBUF_SIZE (mtd->eccsize)
657 #define ECCBUF_DIV(x) ((x) & ~(ECCBUF_SIZE - 1))
658 #define ECCBUF_MOD(x) ((x) & (ECCBUF_SIZE - 1))
660 cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
661 unsigned long count, loff_t to, size_t *retlen)
664 size_t totlen = 0, thislen;
670 /* We should fall back to a general writev implementation.
671 * Until that is written, just break.
675 buffer = kmalloc(ECCBUF_SIZE, GFP_KERNEL);
679 for (i=0; i<count; i++) {
680 size_t elem_len = vecs[i].iov_len;
681 void *elem_base = vecs[i].iov_base;
682 if (!elem_len) /* FIXME: Might be unnecessary. Check that */
684 if (buflen) { /* cut off head */
685 if (buflen + elem_len < ECCBUF_SIZE) { /* just accumulate */
686 memcpy(buffer+buflen, elem_base, elem_len);
690 memcpy(buffer+buflen, elem_base, ECCBUF_SIZE-buflen);
691 ret = mtd->write(mtd, to, ECCBUF_SIZE, &thislen, buffer);
693 if (ret || thislen != ECCBUF_SIZE)
695 elem_len -= thislen-buflen;
696 elem_base += thislen-buflen;
699 if (ECCBUF_DIV(elem_len)) { /* write clean aligned data */
700 ret = mtd->write(mtd, to, ECCBUF_DIV(elem_len), &thislen, elem_base);
702 if (ret || thislen != ECCBUF_DIV(elem_len))
706 buflen = ECCBUF_MOD(elem_len); /* cut off tail */
708 memset(buffer, 0xff, ECCBUF_SIZE);
709 memcpy(buffer, elem_base + thislen, buflen);
712 if (buflen) { /* flush last page, even if not full */
713 /* This is sometimes intended behaviour, really */
714 ret = mtd->write(mtd, to, buflen, &thislen, buffer);
716 if (ret || thislen != ECCBUF_SIZE)
726 static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
728 struct cfi_private *cfi = map->fldrv_priv;
729 map_word status, status_OK;
732 DECLARE_WAITQUEUE(wait, current);
737 /* Let's determine this according to the interleave only once */
738 status_OK = CMD(0x80);
740 timeo = jiffies + HZ;
742 spin_lock_bh(chip->mutex);
744 /* Check that the chip's ready to talk to us. */
745 switch (chip->state) {
749 map_write(map, CMD(0x70), adr);
750 chip->state = FL_STATUS;
753 status = map_read(map, adr);
754 if (map_word_andequal(map, status, status_OK, status_OK))
757 /* Urgh. Chip not yet ready to talk to us. */
758 if (time_after(jiffies, timeo)) {
759 spin_unlock_bh(chip->mutex);
760 printk(KERN_ERR "waiting for chip to be ready timed out in erase\n");
764 /* Latency issues. Drop the lock, wait a while and retry */
765 spin_unlock_bh(chip->mutex);
770 /* Stick ourselves on a wait queue to be woken when
771 someone changes the status */
772 set_current_state(TASK_UNINTERRUPTIBLE);
773 add_wait_queue(&chip->wq, &wait);
774 spin_unlock_bh(chip->mutex);
776 remove_wait_queue(&chip->wq, &wait);
777 timeo = jiffies + HZ;
782 /* Clear the status register first */
783 map_write(map, CMD(0x50), adr);
786 map_write(map, CMD(0x20), adr);
787 map_write(map, CMD(0xD0), adr);
788 chip->state = FL_ERASING;
790 spin_unlock_bh(chip->mutex);
791 schedule_timeout(HZ);
792 spin_lock_bh(chip->mutex);
794 /* FIXME. Use a timer to check this, and return immediately. */
795 /* Once the state machine's known to be working I'll do that */
797 timeo = jiffies + (HZ*20);
799 if (chip->state != FL_ERASING) {
800 /* Someone's suspended the erase. Sleep */
801 set_current_state(TASK_UNINTERRUPTIBLE);
802 add_wait_queue(&chip->wq, &wait);
803 spin_unlock_bh(chip->mutex);
805 remove_wait_queue(&chip->wq, &wait);
806 timeo = jiffies + (HZ*20); /* FIXME */
807 spin_lock_bh(chip->mutex);
811 status = map_read(map, adr);
812 if (map_word_andequal(map, status, status_OK, status_OK))
815 /* OK Still waiting */
816 if (time_after(jiffies, timeo)) {
817 map_write(map, CMD(0x70), adr);
818 chip->state = FL_STATUS;
819 printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
821 spin_unlock_bh(chip->mutex);
825 /* Latency issues. Drop the lock, wait a while and retry */
826 spin_unlock_bh(chip->mutex);
828 spin_lock_bh(chip->mutex);
834 /* We've broken this before. It doesn't hurt to be safe */
835 map_write(map, CMD(0x70), adr);
836 chip->state = FL_STATUS;
837 status = map_read(map, adr);
839 /* check for lock bit */
840 if (map_word_bitsset(map, status, CMD(0x3a))) {
841 unsigned char chipstatus = status.x[0];
842 if (!map_word_equal(map, status, CMD(chipstatus))) {
844 for (w=0; w<map_words(map); w++) {
845 for (i = 0; i<cfi_interleave(cfi); i++) {
846 chipstatus |= status.x[w] >> (cfi->device_type * 8);
849 printk(KERN_WARNING "Status is not identical for all chips: 0x%lx. Merging to give 0x%02x\n",
850 status.x[0], chipstatus);
852 /* Reset the error bits */
853 map_write(map, CMD(0x50), adr);
854 map_write(map, CMD(0x70), adr);
856 if ((chipstatus & 0x30) == 0x30) {
857 printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus);
859 } else if (chipstatus & 0x02) {
860 /* Protection bit set */
862 } else if (chipstatus & 0x8) {
864 printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", chipstatus);
866 } else if (chipstatus & 0x20) {
868 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
869 timeo = jiffies + HZ;
870 chip->state = FL_STATUS;
871 spin_unlock_bh(chip->mutex);
874 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
880 spin_unlock_bh(chip->mutex);
884 int cfi_staa_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
885 { struct map_info *map = mtd->priv;
886 struct cfi_private *cfi = map->fldrv_priv;
887 unsigned long adr, len;
888 int chipnum, ret = 0;
890 struct mtd_erase_region_info *regions = mtd->eraseregions;
892 if (instr->addr > mtd->size)
895 if ((instr->len + instr->addr) > mtd->size)
898 /* Check that both start and end of the requested erase are
899 * aligned with the erasesize at the appropriate addresses.
904 /* Skip all erase regions which are ended before the start of
905 the requested erase. Actually, to save on the calculations,
906 we skip to the first erase region which starts after the
907 start of the requested erase, and then go back one.
910 while (i < mtd->numeraseregions && instr->addr >= regions[i].offset)
914 /* OK, now i is pointing at the erase region in which this
915 erase request starts. Check the start of the requested
916 erase range is aligned with the erase size which is in
920 if (instr->addr & (regions[i].erasesize-1))
923 /* Remember the erase region we start on */
926 /* Next, check that the end of the requested erase is aligned
927 * with the erase region at that address.
930 while (i<mtd->numeraseregions && (instr->addr + instr->len) >= regions[i].offset)
933 /* As before, drop back one to point at the region in which
934 the address actually falls
938 if ((instr->addr + instr->len) & (regions[i].erasesize-1))
941 chipnum = instr->addr >> cfi->chipshift;
942 adr = instr->addr - (chipnum << cfi->chipshift);
948 ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
953 adr += regions[i].erasesize;
954 len -= regions[i].erasesize;
956 if (adr % (1<< cfi->chipshift) == ((regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
959 if (adr >> cfi->chipshift) {
963 if (chipnum >= cfi->numchips)
968 instr->state = MTD_ERASE_DONE;
970 instr->callback(instr);
975 static void cfi_staa_sync (struct mtd_info *mtd)
977 struct map_info *map = mtd->priv;
978 struct cfi_private *cfi = map->fldrv_priv;
982 DECLARE_WAITQUEUE(wait, current);
984 for (i=0; !ret && i<cfi->numchips; i++) {
985 chip = &cfi->chips[i];
988 spin_lock_bh(chip->mutex);
990 switch(chip->state) {
995 chip->oldstate = chip->state;
996 chip->state = FL_SYNCING;
997 /* No need to wake_up() on this state change -
998 * as the whole point is that nobody can do anything
999 * with the chip now anyway.
1002 spin_unlock_bh(chip->mutex);
1006 /* Not an idle state */
1007 add_wait_queue(&chip->wq, &wait);
1009 spin_unlock_bh(chip->mutex);
1011 remove_wait_queue(&chip->wq, &wait);
1017 /* Unlock the chips again */
1019 for (i--; i >=0; i--) {
1020 chip = &cfi->chips[i];
1022 spin_lock_bh(chip->mutex);
1024 if (chip->state == FL_SYNCING) {
1025 chip->state = chip->oldstate;
1028 spin_unlock_bh(chip->mutex);
1032 static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1034 struct cfi_private *cfi = map->fldrv_priv;
1035 map_word status, status_OK;
1036 unsigned long timeo = jiffies + HZ;
1037 DECLARE_WAITQUEUE(wait, current);
1041 /* Let's determine this according to the interleave only once */
1042 status_OK = CMD(0x80);
1044 timeo = jiffies + HZ;
1046 spin_lock_bh(chip->mutex);
1048 /* Check that the chip's ready to talk to us. */
1049 switch (chip->state) {
1051 case FL_JEDEC_QUERY:
1053 map_write(map, CMD(0x70), adr);
1054 chip->state = FL_STATUS;
1057 status = map_read(map, adr);
1058 if (map_word_andequal(map, status, status_OK, status_OK))
1061 /* Urgh. Chip not yet ready to talk to us. */
1062 if (time_after(jiffies, timeo)) {
1063 spin_unlock_bh(chip->mutex);
1064 printk(KERN_ERR "waiting for chip to be ready timed out in lock\n");
1068 /* Latency issues. Drop the lock, wait a while and retry */
1069 spin_unlock_bh(chip->mutex);
1074 /* Stick ourselves on a wait queue to be woken when
1075 someone changes the status */
1076 set_current_state(TASK_UNINTERRUPTIBLE);
1077 add_wait_queue(&chip->wq, &wait);
1078 spin_unlock_bh(chip->mutex);
1080 remove_wait_queue(&chip->wq, &wait);
1081 timeo = jiffies + HZ;
1086 map_write(map, CMD(0x60), adr);
1087 map_write(map, CMD(0x01), adr);
1088 chip->state = FL_LOCKING;
1090 spin_unlock_bh(chip->mutex);
1091 schedule_timeout(HZ);
1092 spin_lock_bh(chip->mutex);
1094 /* FIXME. Use a timer to check this, and return immediately. */
1095 /* Once the state machine's known to be working I'll do that */
1097 timeo = jiffies + (HZ*2);
1100 status = map_read(map, adr);
1101 if (map_word_andequal(map, status, status_OK, status_OK))
1104 /* OK Still waiting */
1105 if (time_after(jiffies, timeo)) {
1106 map_write(map, CMD(0x70), adr);
1107 chip->state = FL_STATUS;
1108 printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
1110 spin_unlock_bh(chip->mutex);
1114 /* Latency issues. Drop the lock, wait a while and retry */
1115 spin_unlock_bh(chip->mutex);
1117 spin_lock_bh(chip->mutex);
1120 /* Done and happy. */
1121 chip->state = FL_STATUS;
1124 spin_unlock_bh(chip->mutex);
1127 static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1129 struct map_info *map = mtd->priv;
1130 struct cfi_private *cfi = map->fldrv_priv;
1132 int chipnum, ret = 0;
1133 #ifdef DEBUG_LOCK_BITS
1134 int ofs_factor = cfi->interleave * cfi->device_type;
1137 if (ofs & (mtd->erasesize - 1))
1140 if (len & (mtd->erasesize -1))
1143 if ((len + ofs) > mtd->size)
1146 chipnum = ofs >> cfi->chipshift;
1147 adr = ofs - (chipnum << cfi->chipshift);
1151 #ifdef DEBUG_LOCK_BITS
1152 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1153 printk("before lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1154 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1157 ret = do_lock_oneblock(map, &cfi->chips[chipnum], adr);
1159 #ifdef DEBUG_LOCK_BITS
1160 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1161 printk("after lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1162 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1168 adr += mtd->erasesize;
1169 len -= mtd->erasesize;
1171 if (adr >> cfi->chipshift) {
1175 if (chipnum >= cfi->numchips)
1181 static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1183 struct cfi_private *cfi = map->fldrv_priv;
1184 map_word status, status_OK;
1185 unsigned long timeo = jiffies + HZ;
1186 DECLARE_WAITQUEUE(wait, current);
1190 /* Let's determine this according to the interleave only once */
1191 status_OK = CMD(0x80);
1193 timeo = jiffies + HZ;
1195 spin_lock_bh(chip->mutex);
1197 /* Check that the chip's ready to talk to us. */
1198 switch (chip->state) {
1200 case FL_JEDEC_QUERY:
1202 map_write(map, CMD(0x70), adr);
1203 chip->state = FL_STATUS;
1206 status = map_read(map, adr);
1207 if (map_word_andequal(map, status, status_OK, status_OK))
1210 /* Urgh. Chip not yet ready to talk to us. */
1211 if (time_after(jiffies, timeo)) {
1212 spin_unlock_bh(chip->mutex);
1213 printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n");
1217 /* Latency issues. Drop the lock, wait a while and retry */
1218 spin_unlock_bh(chip->mutex);
1223 /* Stick ourselves on a wait queue to be woken when
1224 someone changes the status */
1225 set_current_state(TASK_UNINTERRUPTIBLE);
1226 add_wait_queue(&chip->wq, &wait);
1227 spin_unlock_bh(chip->mutex);
1229 remove_wait_queue(&chip->wq, &wait);
1230 timeo = jiffies + HZ;
1235 map_write(map, CMD(0x60), adr);
1236 map_write(map, CMD(0xD0), adr);
1237 chip->state = FL_UNLOCKING;
1239 spin_unlock_bh(chip->mutex);
1240 schedule_timeout(HZ);
1241 spin_lock_bh(chip->mutex);
1243 /* FIXME. Use a timer to check this, and return immediately. */
1244 /* Once the state machine's known to be working I'll do that */
1246 timeo = jiffies + (HZ*2);
1249 status = map_read(map, adr);
1250 if (map_word_andequal(map, status, status_OK, status_OK))
1253 /* OK Still waiting */
1254 if (time_after(jiffies, timeo)) {
1255 map_write(map, CMD(0x70), adr);
1256 chip->state = FL_STATUS;
1257 printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
1259 spin_unlock_bh(chip->mutex);
1263 /* Latency issues. Drop the unlock, wait a while and retry */
1264 spin_unlock_bh(chip->mutex);
1266 spin_lock_bh(chip->mutex);
1269 /* Done and happy. */
1270 chip->state = FL_STATUS;
1273 spin_unlock_bh(chip->mutex);
1276 static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1278 struct map_info *map = mtd->priv;
1279 struct cfi_private *cfi = map->fldrv_priv;
1281 int chipnum, ret = 0;
1282 #ifdef DEBUG_LOCK_BITS
1283 int ofs_factor = cfi->interleave * cfi->device_type;
1286 chipnum = ofs >> cfi->chipshift;
1287 adr = ofs - (chipnum << cfi->chipshift);
1289 #ifdef DEBUG_LOCK_BITS
1291 unsigned long temp_adr = adr;
1292 unsigned long temp_len = len;
1294 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1296 printk("before unlock %x: block status register is %x\n",temp_adr,cfi_read_query(map, temp_adr+(2*ofs_factor)));
1297 temp_adr += mtd->erasesize;
1298 temp_len -= mtd->erasesize;
1300 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1304 ret = do_unlock_oneblock(map, &cfi->chips[chipnum], adr);
1306 #ifdef DEBUG_LOCK_BITS
1307 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1308 printk("after unlock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1309 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1315 static int cfi_staa_suspend(struct mtd_info *mtd)
1317 struct map_info *map = mtd->priv;
1318 struct cfi_private *cfi = map->fldrv_priv;
1320 struct flchip *chip;
1323 for (i=0; !ret && i<cfi->numchips; i++) {
1324 chip = &cfi->chips[i];
1326 spin_lock_bh(chip->mutex);
1328 switch(chip->state) {
1332 case FL_JEDEC_QUERY:
1333 chip->oldstate = chip->state;
1334 chip->state = FL_PM_SUSPENDED;
1335 /* No need to wake_up() on this state change -
1336 * as the whole point is that nobody can do anything
1337 * with the chip now anyway.
1339 case FL_PM_SUSPENDED:
1346 spin_unlock_bh(chip->mutex);
1349 /* Unlock the chips again */
1352 for (i--; i >=0; i--) {
1353 chip = &cfi->chips[i];
1355 spin_lock_bh(chip->mutex);
1357 if (chip->state == FL_PM_SUSPENDED) {
1358 /* No need to force it into a known state here,
1359 because we're returning failure, and it didn't
1361 chip->state = chip->oldstate;
1364 spin_unlock_bh(chip->mutex);
1371 static void cfi_staa_resume(struct mtd_info *mtd)
1373 struct map_info *map = mtd->priv;
1374 struct cfi_private *cfi = map->fldrv_priv;
1376 struct flchip *chip;
1378 for (i=0; i<cfi->numchips; i++) {
1380 chip = &cfi->chips[i];
1382 spin_lock_bh(chip->mutex);
1384 /* Go to known state. Chip may have been power cycled */
1385 if (chip->state == FL_PM_SUSPENDED) {
1386 map_write(map, CMD(0xFF), 0);
1387 chip->state = FL_READY;
1391 spin_unlock_bh(chip->mutex);
1395 static void cfi_staa_destroy(struct mtd_info *mtd)
1397 struct map_info *map = mtd->priv;
1398 struct cfi_private *cfi = map->fldrv_priv;
1399 kfree(cfi->cmdset_priv);
1403 #if LINUX_VERSION_CODE < 0x20212 && defined(MODULE)
1404 #define cfi_staa_init init_module
1405 #define cfi_staa_exit cleanup_module
1408 static char im_name[]="cfi_cmdset_0020";
1410 int __init cfi_staa_init(void)
1412 inter_module_register(im_name, THIS_MODULE, &cfi_cmdset_0020);
1416 static void __exit cfi_staa_exit(void)
1418 inter_module_unregister(im_name);
1421 module_init(cfi_staa_init);
1422 module_exit(cfi_staa_exit);
1424 MODULE_LICENSE("GPL");