2 * Common Flash Interface support:
3 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
7 * 2_by_8 routines added by Simon Munton
11 * $Id: cfi_cmdset_0002.c,v 1.74 2003/05/28 12:51:48 dwmw2 Exp $
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/sched.h>
19 #include <linux/init.h>
21 #include <asm/byteorder.h>
23 #include <linux/errno.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/interrupt.h>
27 #include <linux/mtd/map.h>
28 #include <linux/mtd/mtd.h>
29 #include <linux/mtd/cfi.h>
30 #include <linux/mtd/compatmac.h>
32 #define AMD_BOOTLOC_BUG
34 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
35 static int cfi_amdstd_write(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
36 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
37 static int cfi_amdstd_erase_onesize(struct mtd_info *, struct erase_info *);
38 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
39 static void cfi_amdstd_sync (struct mtd_info *);
40 static int cfi_amdstd_suspend (struct mtd_info *);
41 static void cfi_amdstd_resume (struct mtd_info *);
42 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
44 static void cfi_amdstd_destroy(struct mtd_info *);
46 struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
47 static struct mtd_info *cfi_amdstd_setup (struct map_info *);
50 static struct mtd_chip_driver cfi_amdstd_chipdrv = {
51 .probe = NULL, /* Not usable directly */
52 .destroy = cfi_amdstd_destroy,
53 .name = "cfi_cmdset_0002",
58 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
60 struct cfi_private *cfi = map->fldrv_priv;
61 unsigned char bootloc;
62 int ofs_factor = cfi->interleave * cfi->device_type;
65 __u32 base = cfi->chips[0].start;
67 if (cfi->cfi_mode==CFI_MODE_CFI){
68 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
70 cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
72 major = cfi_read_query(map, base + (adr+3)*ofs_factor);
73 minor = cfi_read_query(map, base + (adr+4)*ofs_factor);
75 printk(KERN_NOTICE " Amd/Fujitsu Extended Query Table v%c.%c at 0x%4.4X\n",
77 cfi_send_gen_cmd(0xf0, 0x55, base, map, cfi, cfi->device_type, NULL);
79 cfi_send_gen_cmd(0xaa, 0x555, base, map, cfi, cfi->device_type, NULL);
80 cfi_send_gen_cmd(0x55, 0x2aa, base, map, cfi, cfi->device_type, NULL);
81 cfi_send_gen_cmd(0x90, 0x555, base, map, cfi, cfi->device_type, NULL);
82 /* FIXME - should have a delay before continuing */
83 cfi->mfr = cfi_read_query(map, base);
84 cfi->id = cfi_read_query(map, base + ofs_factor);
86 /* Wheee. Bring me the head of someone at AMD. */
87 #ifdef AMD_BOOTLOC_BUG
88 if (((major << 8) | minor) < 0x3131) {
89 /* CFI version 1.0 => don't trust bootloc */
91 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
92 bootloc = 3; /* top boot */
94 bootloc = 2; /* bottom boot */
99 cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
100 bootloc = cfi_read_query(map, base + (adr+15)*ofs_factor);
102 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
103 printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name);
105 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
106 int j = (cfi->cfiq->NumEraseRegions-1)-i;
109 swap = cfi->cfiq->EraseRegionInfo[i];
110 cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
111 cfi->cfiq->EraseRegionInfo[j] = swap;
115 * FIXME - These might already be setup (more correctly)
118 switch (cfi->device_type) {
119 case CFI_DEVICETYPE_X8:
120 cfi->addr_unlock1 = 0x555;
121 cfi->addr_unlock2 = 0x2aa;
123 case CFI_DEVICETYPE_X16:
124 cfi->addr_unlock1 = 0xaaa;
125 if (map->buswidth == cfi->interleave) {
126 /* X16 chip(s) in X8 mode */
127 cfi->addr_unlock2 = 0x555;
129 cfi->addr_unlock2 = 0x554;
132 case CFI_DEVICETYPE_X32:
133 cfi->addr_unlock1 = 0x1555;
134 cfi->addr_unlock2 = 0xaaa;
137 printk(KERN_NOTICE "Eep. Unknown cfi_cmdset_0002 device type %d\n", cfi->device_type);
142 for (i=0; i< cfi->numchips; i++) {
143 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
144 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
145 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
148 map->fldrv = &cfi_amdstd_chipdrv;
150 cfi_send_gen_cmd(0xf0, 0x55, base, map, cfi, cfi->device_type, NULL);
151 return cfi_amdstd_setup(map);
154 static struct mtd_info *cfi_amdstd_setup(struct map_info *map)
156 struct cfi_private *cfi = map->fldrv_priv;
157 struct mtd_info *mtd;
158 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
160 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
161 printk(KERN_NOTICE "number of %s chips: %d\n",
162 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
165 printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
169 memset(mtd, 0, sizeof(*mtd));
171 mtd->type = MTD_NORFLASH;
172 /* Also select the correct geometry setup too */
173 mtd->size = devsize * cfi->numchips;
175 if (cfi->cfiq->NumEraseRegions == 1) {
176 /* No need to muck about with multiple erase sizes */
177 mtd->erasesize = ((cfi->cfiq->EraseRegionInfo[0] >> 8) & ~0xff) * cfi->interleave;
179 unsigned long offset = 0;
182 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
183 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) * mtd->numeraseregions, GFP_KERNEL);
184 if (!mtd->eraseregions) {
185 printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
189 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
190 unsigned long ernum, ersize;
191 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
192 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
194 if (mtd->erasesize < ersize) {
195 mtd->erasesize = ersize;
197 for (j=0; j<cfi->numchips; j++) {
198 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
199 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
200 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
202 offset += (ersize * ernum);
204 if (offset != devsize) {
206 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
211 for (i=0; i<mtd->numeraseregions;i++){
212 printk("%d: offset=0x%x,size=0x%x,blocks=%d\n",
213 i,mtd->eraseregions[i].offset,
214 mtd->eraseregions[i].erasesize,
215 mtd->eraseregions[i].numblocks);
220 switch (CFIDEV_BUSWIDTH)
226 if (mtd->numeraseregions > 1)
227 mtd->erase = cfi_amdstd_erase_varsize;
230 if (((cfi->cfiq->EraseRegionInfo[0] & 0xffff) + 1) == 1)
231 mtd->erase = cfi_amdstd_erase_chip;
233 mtd->erase = cfi_amdstd_erase_onesize;
234 mtd->read = cfi_amdstd_read;
235 mtd->write = cfi_amdstd_write;
239 printk(KERN_WARNING "Unsupported buswidth\n");
243 if (cfi->fast_prog) {
244 /* In cfi_amdstd_write() we frob the protection stuff
245 without paying any attention to the state machine.
246 This upsets in-progress erases. So we turn this flag
247 off for now till the code gets fixed. */
248 printk(KERN_NOTICE "cfi_cmdset_0002: Disabling fast programming due to code brokenness.\n");
253 /* does this chip have a secsi area? */
264 mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
265 mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
272 mtd->sync = cfi_amdstd_sync;
273 mtd->suspend = cfi_amdstd_suspend;
274 mtd->resume = cfi_amdstd_resume;
275 mtd->flags = MTD_CAP_NORFLASH;
276 map->fldrv = &cfi_amdstd_chipdrv;
277 mtd->name = map->name;
278 __module_get(THIS_MODULE);
283 if(mtd->eraseregions)
284 kfree(mtd->eraseregions);
287 kfree(cfi->cmdset_priv);
292 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
294 DECLARE_WAITQUEUE(wait, current);
295 unsigned long timeo = jiffies + HZ;
298 cfi_spin_lock(chip->mutex);
300 if (chip->state != FL_READY){
302 printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state);
304 set_current_state(TASK_UNINTERRUPTIBLE);
305 add_wait_queue(&chip->wq, &wait);
307 cfi_spin_unlock(chip->mutex);
310 remove_wait_queue(&chip->wq, &wait);
312 if(signal_pending(current))
315 timeo = jiffies + HZ;
322 chip->state = FL_READY;
324 map_copy_from(map, buf, adr, len);
327 cfi_spin_unlock(chip->mutex);
332 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
334 struct map_info *map = mtd->priv;
335 struct cfi_private *cfi = map->fldrv_priv;
340 /* ofs: offset within the first chip that the first read should start */
342 chipnum = (from >> cfi->chipshift);
343 ofs = from - (chipnum << cfi->chipshift);
349 unsigned long thislen;
351 if (chipnum >= cfi->numchips)
354 if ((len + ofs -1) >> cfi->chipshift)
355 thislen = (1<<cfi->chipshift) - ofs;
359 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
373 static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
375 DECLARE_WAITQUEUE(wait, current);
376 unsigned long timeo = jiffies + HZ;
377 struct cfi_private *cfi = map->fldrv_priv;
380 cfi_spin_lock(chip->mutex);
382 if (chip->state != FL_READY){
384 printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state);
386 set_current_state(TASK_UNINTERRUPTIBLE);
387 add_wait_queue(&chip->wq, &wait);
389 cfi_spin_unlock(chip->mutex);
392 remove_wait_queue(&chip->wq, &wait);
394 if(signal_pending(current))
397 timeo = jiffies + HZ;
404 chip->state = FL_READY;
406 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
407 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
408 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
410 map_copy_from(map, buf, adr, len);
412 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
413 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
414 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
415 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
418 cfi_spin_unlock(chip->mutex);
423 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
425 struct map_info *map = mtd->priv;
426 struct cfi_private *cfi = map->fldrv_priv;
432 /* ofs: offset within the first chip that the first read should start */
434 /* 8 secsi bytes per chip */
442 unsigned long thislen;
444 if (chipnum >= cfi->numchips)
447 if ((len + ofs -1) >> 3)
448 thislen = (1<<3) - ofs;
452 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
466 static int do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, cfi_word datum, int fast)
468 unsigned long timeo = jiffies + HZ;
469 unsigned int oldstatus, status, prev_oldstatus, prev_status;
471 struct cfi_private *cfi = map->fldrv_priv;
472 /* We use a 1ms + 1 jiffies generic timeout for writes (most devices have
473 a max write time of a few hundreds usec). However, we should use the
474 maximum timeout value given by the chip at probe time instead.
475 Unfortunately, struct flchip does have a field for maximum timeout,
476 only for typical which can be far too short depending of the conditions.
477 The ' + 1' is to avoid having a timeout of 0 jiffies if HZ is smaller
478 than 1000. Using a static variable allows makes us save the costly
479 divide operation at each word write.*/
480 static unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
481 DECLARE_WAITQUEUE(wait, current);
486 cfi_spin_lock(chip->mutex);
488 if (chip->state != FL_READY) {
490 printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", chip->state);
492 set_current_state(TASK_UNINTERRUPTIBLE);
493 add_wait_queue(&chip->wq, &wait);
495 cfi_spin_unlock(chip->mutex);
498 remove_wait_queue(&chip->wq, &wait);
500 printk(KERN_DEBUG "Wake up to write:\n");
501 if(signal_pending(current))
504 timeo = jiffies + HZ;
509 chip->state = FL_WRITING;
512 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8llx)\n",
513 __func__, adr, datum );
516 if (fast) { /* Unlock bypass */
517 cfi_send_gen_cmd(0xA0, 0, chip->start, map, cfi, cfi->device_type, NULL);
520 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
521 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
522 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
524 cfi_write(map, datum, adr);
526 cfi_spin_unlock(chip->mutex);
527 cfi_udelay(chip->word_write_time);
528 cfi_spin_lock(chip->mutex);
531 * Polling toggle bits instead of reading back many times
532 * This ensures that write operation is really completed,
533 * or tells us why it failed.
535 * It appears tha the polling and decoding of error state might
536 * be simplified. Don't do it unless you really know what you
537 * are doing. You must remember that JESD21-C 3.5.3 states that
538 * the status must be read back an _additional_ two times before
539 * a failure is determined. This is because these devices have
540 * internal state machines that are asynchronous to the external
541 * data bus. During an erase or write the read-back status of the
542 * polling bits might be transitioning internaly when the external
543 * read-back occurs. This means that the bits aren't in the final
544 * state and they might appear to report an error as they transition
545 * and are in a weird state. This will produce infrequent errors
546 * that will usually disappear the next time an erase or write
547 * happens (Try tracking those errors down!). To ensure that
548 * the bits are not in transition the location must be read-back
549 * two more times and compared against what was written - BOTH reads
550 * MUST match what was written - don't think this can be simplified
551 * to only the last read matching. If the comparison fails, error
552 * state can then be decoded.
557 /* See comment above for timeout value. */
558 timeo = jiffies + uWriteTimeout;
560 oldstatus = cfi_read(map, adr);
561 status = cfi_read(map, adr);
562 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): Check 0x%.8x 0x%.8x\n",
563 __func__, oldstatus, status );
566 * This only checks if dq6 is still toggling and that our
567 * timer hasn't expired. We purposefully ignore the chips
568 * internal timer that will assert dq5 and leave dq6 toggling.
569 * This is done for a variety of reasons:
570 * 1) Not all chips support dq5.
571 * 2) Dealing with asynchronous status bit and data updates
572 * and reading a device two more times creates _messy_
573 * logic when trying to deal with interleaved devices -
574 * some may be changing while others are still busy.
575 * 3) Checking dq5 only helps to optimize an error case that
576 * should at worst be infrequent and at best non-existent.
578 * If our timeout occurs _then_ we will check dq5 to see
579 * if the device also had an internal timeout.
581 while( ( ( status ^ oldstatus ) & dq6 )
582 && ! ( ta = time_after(jiffies, timeo) ) ) {
584 if (need_resched()) {
585 cfi_spin_unlock(chip->mutex);
587 cfi_spin_lock(chip->mutex);
591 oldstatus = cfi_read( map, adr );
592 status = cfi_read( map, adr );
593 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): Check 0x%.8x 0x%.8x\n",
594 __func__, oldstatus, status );
598 * Something kicked us out of the read-back loop. We'll
599 * check success befor checking failure.
600 * Even though dq6 might be true data, it is unkown if
601 * all of the other bits have changed to true data due to
602 * the asynchronous nature of the internal state machine.
603 * We will read two more times and use this to either
604 * verify that the write completed successfully or
605 * that something really went wrong. BOTH reads
606 * must match what was written - this certifies that
607 * bits aren't still changing and that the status
608 * bits erroneously match the datum that was written.
610 prev_oldstatus = oldstatus;
611 prev_status = status;
612 oldstatus = cfi_read(map, adr);
613 status = cfi_read(map, adr);
614 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): Check 0x%.8x 0x%.8x\n",
615 __func__, oldstatus, status );
617 if ( oldstatus == datum && status == datum ) {
618 /* success - do nothing */
623 int dq5mask = ( ( status ^ oldstatus ) & dq6 ) >> 1;
624 if ( status & dq5mask ) {
625 /* dq5 asserted - decode interleave chips */
627 "MTD %s(): FLASH internal timeout: 0x%.8x\n",
632 "MTD %s(): Software timed out during write.\n",
639 * If we get to here then it means that something
640 * is wrong and it's not a timeout. Something
641 * is seriously wacky! Dump some debug info.
644 "MTD %s(): Wacky! Unable to decode failure status\n",
648 "MTD %s(): 0x%.8lx(0x%.8llx): 0x%.8x 0x%.8x 0x%.8x 0x%.8x\n",
649 __func__, adr, datum,
650 prev_oldstatus, prev_status,
655 /* reset on all failures. */
656 cfi_write( map, CMD(0xF0), chip->start );
657 /* FIXME - should have reset delay before continuing */
661 chip->state = FL_READY;
663 cfi_spin_unlock(chip->mutex);
668 static int cfi_amdstd_write (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
670 struct map_info *map = mtd->priv;
671 struct cfi_private *cfi = map->fldrv_priv;
674 unsigned long ofs, chipstart;
680 chipnum = to >> cfi->chipshift;
681 ofs = to - (chipnum << cfi->chipshift);
682 chipstart = cfi->chips[chipnum].start;
684 /* If it's not bus-aligned, do the first byte write */
685 if (ofs & (CFIDEV_BUSWIDTH-1)) {
686 unsigned long bus_ofs = ofs & ~(CFIDEV_BUSWIDTH-1);
687 int i = ofs - bus_ofs;
692 map_copy_from(map, tmp_buf, bus_ofs + cfi->chips[chipnum].start, CFIDEV_BUSWIDTH);
693 while (len && i < CFIDEV_BUSWIDTH)
694 tmp_buf[i++] = buf[n++], len--;
696 if (cfi_buswidth_is_2()) {
697 datum = *(__u16*)tmp_buf;
698 } else if (cfi_buswidth_is_4()) {
699 datum = *(__u32*)tmp_buf;
701 return -EINVAL; /* should never happen, but be safe */
704 ret = do_write_oneword(map, &cfi->chips[chipnum],
713 if (ofs >> cfi->chipshift) {
716 if (chipnum == cfi->numchips)
721 if (cfi->fast_prog) {
722 /* Go into unlock bypass mode */
723 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chipstart, map, cfi, CFI_DEVICETYPE_X8, NULL);
724 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chipstart, map, cfi, CFI_DEVICETYPE_X8, NULL);
725 cfi_send_gen_cmd(0x20, cfi->addr_unlock1, chipstart, map, cfi, CFI_DEVICETYPE_X8, NULL);
728 /* We are now aligned, write as much as possible */
729 while(len >= CFIDEV_BUSWIDTH) {
732 if (cfi_buswidth_is_1()) {
734 } else if (cfi_buswidth_is_2()) {
735 datum = *(__u16*)buf;
736 } else if (cfi_buswidth_is_4()) {
737 datum = *(__u32*)buf;
741 ret = do_write_oneword(map, &cfi->chips[chipnum],
742 ofs, datum, cfi->fast_prog);
745 /* Get out of unlock bypass mode */
746 cfi_send_gen_cmd(0x90, 0, chipstart, map, cfi, cfi->device_type, NULL);
747 cfi_send_gen_cmd(0x00, 0, chipstart, map, cfi, cfi->device_type, NULL);
752 ofs += CFIDEV_BUSWIDTH;
753 buf += CFIDEV_BUSWIDTH;
754 (*retlen) += CFIDEV_BUSWIDTH;
755 len -= CFIDEV_BUSWIDTH;
757 if (ofs >> cfi->chipshift) {
759 /* Get out of unlock bypass mode */
760 cfi_send_gen_cmd(0x90, 0, chipstart, map, cfi, cfi->device_type, NULL);
761 cfi_send_gen_cmd(0x00, 0, chipstart, map, cfi, cfi->device_type, NULL);
766 if (chipnum == cfi->numchips)
768 chipstart = cfi->chips[chipnum].start;
770 /* Go into unlock bypass mode for next set of chips */
771 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chipstart, map, cfi, CFI_DEVICETYPE_X8, NULL);
772 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chipstart, map, cfi, CFI_DEVICETYPE_X8, NULL);
773 cfi_send_gen_cmd(0x20, cfi->addr_unlock1, chipstart, map, cfi, CFI_DEVICETYPE_X8, NULL);
779 /* Get out of unlock bypass mode */
780 cfi_send_gen_cmd(0x90, 0, chipstart, map, cfi, cfi->device_type, NULL);
781 cfi_send_gen_cmd(0x00, 0, chipstart, map, cfi, cfi->device_type, NULL);
784 /* Write the trailing bytes if any */
785 if (len & (CFIDEV_BUSWIDTH-1)) {
790 map_copy_from(map, tmp_buf, ofs + cfi->chips[chipnum].start, CFIDEV_BUSWIDTH);
792 tmp_buf[i++] = buf[n++];
794 if (cfi_buswidth_is_2()) {
795 datum = *(__u16*)tmp_buf;
796 } else if (cfi_buswidth_is_4()) {
797 datum = *(__u32*)tmp_buf;
799 return -EINVAL; /* should never happen, but be safe */
802 ret = do_write_oneword(map, &cfi->chips[chipnum],
813 static inline int do_erase_chip(struct map_info *map, struct flchip *chip)
815 unsigned int oldstatus, status, prev_oldstatus, prev_status;
817 unsigned long timeo = jiffies + HZ;
818 unsigned long int adr;
819 struct cfi_private *cfi = map->fldrv_priv;
820 DECLARE_WAITQUEUE(wait, current);
826 cfi_spin_lock(chip->mutex);
828 if (chip->state != FL_READY){
829 set_current_state(TASK_UNINTERRUPTIBLE);
830 add_wait_queue(&chip->wq, &wait);
832 cfi_spin_unlock(chip->mutex);
835 remove_wait_queue(&chip->wq, &wait);
837 if(signal_pending(current))
840 timeo = jiffies + HZ;
845 chip->state = FL_ERASING;
846 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
847 __func__, chip->start );
849 /* Handle devices with one erase region, that only implement
850 * the chip erase command.
853 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
854 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
855 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
856 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
857 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
858 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
859 timeo = jiffies + (HZ*20);
860 adr = cfi->addr_unlock1;
862 /* Wait for the end of programing/erasure by using the toggle method.
863 * As long as there is a programming procedure going on, bit 6
864 * is toggling it's state with each consecutive read.
865 * The toggling stops as soon as the procedure is completed.
867 * If the process has gone on for too long on the chip bit 5 gets.
868 * After bit5 is set you can kill the operation by sending a reset
869 * command to the chip.
871 /* see comments in do_write_oneword */
874 oldstatus = cfi_read(map, adr);
875 status = cfi_read(map, adr);
876 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): Check 0x%.8x 0x%.8x\n",
877 __func__, oldstatus, status );
879 while( ( ( status ^ oldstatus ) & dq6 )
880 && ! ( ta = time_after(jiffies, timeo) ) ) {
883 /* an initial short sleep */
884 cfi_spin_unlock(chip->mutex);
885 schedule_timeout(HZ/100);
886 cfi_spin_lock(chip->mutex);
888 if (chip->state != FL_ERASING) {
889 /* Someone's suspended the erase. Sleep */
890 set_current_state(TASK_UNINTERRUPTIBLE);
891 add_wait_queue(&chip->wq, &wait);
893 cfi_spin_unlock(chip->mutex);
894 printk("erase suspended. Sleeping\n");
897 remove_wait_queue(&chip->wq, &wait);
899 if (signal_pending(current))
902 timeo = jiffies + (HZ*2); /* FIXME */
903 cfi_spin_lock(chip->mutex);
907 /* Busy wait for 1/10 of a milisecond */
910 && ( ( status ^ oldstatus ) & dq6 );
913 /* Latency issues. Drop the lock, wait a while and retry */
914 cfi_spin_unlock(chip->mutex);
918 cfi_spin_lock(chip->mutex);
919 oldstatus = cfi_read(map, adr);
920 status = cfi_read(map, adr);
921 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): Check 0x%.8x 0x%.8x\n",
922 __func__, oldstatus, status );
924 oldstatus = cfi_read(map, adr);
925 status = cfi_read(map, adr);
926 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): Check 0x%.8x 0x%.8x\n",
927 __func__, oldstatus, status );
930 prev_oldstatus = oldstatus;
931 prev_status = status;
932 oldstatus = cfi_read(map, adr);
933 status = cfi_read(map, adr);
934 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): Check 0x%.8x 0x%.8x\n",
935 __func__, oldstatus, status );
937 if ( cfi_buswidth_is_1() ) {
939 } else if ( cfi_buswidth_is_2() ) {
941 } else if ( cfi_buswidth_is_4() ) {
944 printk(KERN_WARNING "Unsupported buswidth\n");
948 if ( oldstatus == ones && status == ones ) {
949 /* success - do nothing */
954 int dq5mask = ( ( status ^ oldstatus ) & dq6 ) >> 1;
955 if ( status & dq5mask ) {
956 /* dq5 asserted - decode interleave chips */
958 "MTD %s(): FLASH internal timeout: 0x%.8x\n",
963 "MTD %s(): Software timed out during write.\n",
970 "MTD %s(): Wacky! Unable to decode failure status\n",
974 "MTD %s(): 0x%.8lx(0x%.8llx): 0x%.8x 0x%.8x 0x%.8x 0x%.8x\n",
976 prev_oldstatus, prev_status,
981 /* reset on all failures. */
982 cfi_write( map, CMD(0xF0), chip->start );
983 /* FIXME - should have reset delay before continuing */
987 chip->state = FL_READY;
989 cfi_spin_unlock(chip->mutex);
994 static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
996 unsigned int oldstatus, status, prev_oldstatus, prev_status;
998 unsigned long timeo = jiffies + HZ;
999 struct cfi_private *cfi = map->fldrv_priv;
1000 DECLARE_WAITQUEUE(wait, current);
1006 cfi_spin_lock(chip->mutex);
1008 if (chip->state != FL_READY){
1009 set_current_state(TASK_UNINTERRUPTIBLE);
1010 add_wait_queue(&chip->wq, &wait);
1012 cfi_spin_unlock(chip->mutex);
1015 remove_wait_queue(&chip->wq, &wait);
1017 if(signal_pending(current))
1020 timeo = jiffies + HZ;
1025 chip->state = FL_ERASING;
1028 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1032 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
1033 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
1034 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
1035 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
1036 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
1038 cfi_write(map, CMD(0x30), adr);
1040 timeo = jiffies + (HZ*20);
1042 /* Wait for the end of programing/erasure by using the toggle method.
1043 * As long as there is a programming procedure going on, bit 6
1044 * is toggling it's state with each consecutive read.
1045 * The toggling stops as soon as the procedure is completed.
1047 * If the process has gone on for too long on the chip bit 5 gets.
1048 * After bit5 is set you can kill the operation by sending a reset
1049 * command to the chip.
1051 /* see comments in do_write_oneword */
1054 oldstatus = cfi_read(map, adr);
1055 status = cfi_read(map, adr);
1056 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): Check 0x%.8x 0x%.8x\n",
1057 __func__, oldstatus, status );
1059 while( ( ( status ^ oldstatus ) & dq6 )
1060 && ! ( ta = time_after(jiffies, timeo) ) ) {
1063 /* an initial short sleep */
1064 cfi_spin_unlock(chip->mutex);
1065 schedule_timeout(HZ/100);
1066 cfi_spin_lock(chip->mutex);
1068 if (chip->state != FL_ERASING) {
1069 /* Someone's suspended the erase. Sleep */
1070 set_current_state(TASK_UNINTERRUPTIBLE);
1071 add_wait_queue(&chip->wq, &wait);
1073 cfi_spin_unlock(chip->mutex);
1074 printk(KERN_DEBUG "erase suspended. Sleeping\n");
1077 remove_wait_queue(&chip->wq, &wait);
1079 if (signal_pending(current))
1082 timeo = jiffies + (HZ*2); /* FIXME */
1083 cfi_spin_lock(chip->mutex);
1087 /* Busy wait for 1/10 of a milisecond */
1090 && ( ( status ^ oldstatus ) & dq6 );
1093 /* Latency issues. Drop the lock, wait a while and retry */
1094 cfi_spin_unlock(chip->mutex);
1098 cfi_spin_lock(chip->mutex);
1099 oldstatus = cfi_read(map, adr);
1100 status = cfi_read(map, adr);
1101 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): Check 0x%.8x 0x%.8x\n",
1102 __func__, oldstatus, status );
1104 oldstatus = cfi_read(map, adr);
1105 status = cfi_read(map, adr);
1106 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): Check 0x%.8x 0x%.8x\n",
1107 __func__, oldstatus, status );
1110 prev_oldstatus = oldstatus;
1111 prev_status = status;
1112 oldstatus = cfi_read(map, adr);
1113 status = cfi_read(map, adr);
1114 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): Check 0x%.8x 0x%.8x\n",
1115 __func__, oldstatus, status );
1117 if ( cfi_buswidth_is_1() ) {
1119 } else if ( cfi_buswidth_is_2() ) {
1121 } else if ( cfi_buswidth_is_4() ) {
1124 printk(KERN_WARNING "Unsupported buswidth\n");
1128 if ( oldstatus == ones && status == ones ) {
1129 /* success - do nothing */
1134 int dq5mask = ( ( status ^ oldstatus ) & dq6 ) >> 1;
1135 if ( status & dq5mask ) {
1136 /* dq5 asserted - decode interleave chips */
1137 printk( KERN_WARNING
1138 "MTD %s(): FLASH internal timeout: 0x%.8x\n",
1142 printk( KERN_WARNING
1143 "MTD %s(): Software timed out during write.\n",
1150 "MTD %s(): Wacky! Unable to decode failure status\n",
1154 "MTD %s(): 0x%.8lx(0x%.8llx): 0x%.8x 0x%.8x 0x%.8x 0x%.8x\n",
1155 __func__, adr, ones,
1156 prev_oldstatus, prev_status,
1161 /* reset on all failures. */
1162 cfi_write( map, CMD(0xF0), chip->start );
1163 /* FIXME - should have reset delay before continuing */
1167 chip->state = FL_READY;
1169 cfi_spin_unlock(chip->mutex);
1173 static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1175 struct map_info *map = mtd->priv;
1176 struct cfi_private *cfi = map->fldrv_priv;
1177 unsigned long adr, len;
1178 int chipnum, ret = 0;
1180 struct mtd_erase_region_info *regions = mtd->eraseregions;
1182 if (instr->addr > mtd->size)
1185 if ((instr->len + instr->addr) > mtd->size)
1188 /* Check that both start and end of the requested erase are
1189 * aligned with the erasesize at the appropriate addresses.
1194 /* Skip all erase regions which are ended before the start of
1195 the requested erase. Actually, to save on the calculations,
1196 we skip to the first erase region which starts after the
1197 start of the requested erase, and then go back one.
1200 while (i < mtd->numeraseregions && instr->addr >= regions[i].offset)
1204 /* OK, now i is pointing at the erase region in which this
1205 erase request starts. Check the start of the requested
1206 erase range is aligned with the erase size which is in
1210 if (instr->addr & (regions[i].erasesize-1))
1213 /* Remember the erase region we start on */
1216 /* Next, check that the end of the requested erase is aligned
1217 * with the erase region at that address.
1220 while (i<mtd->numeraseregions && (instr->addr + instr->len) >= regions[i].offset)
1223 /* As before, drop back one to point at the region in which
1224 the address actually falls
1228 if ((instr->addr + instr->len) & (regions[i].erasesize-1))
1231 chipnum = instr->addr >> cfi->chipshift;
1232 adr = instr->addr - (chipnum << cfi->chipshift);
1238 ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
1243 adr += regions[i].erasesize;
1244 len -= regions[i].erasesize;
1246 if (adr % (1<< cfi->chipshift) == ((regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
1249 if (adr >> cfi->chipshift) {
1253 if (chipnum >= cfi->numchips)
1258 instr->state = MTD_ERASE_DONE;
1259 if (instr->callback)
1260 instr->callback(instr);
1265 static int cfi_amdstd_erase_onesize(struct mtd_info *mtd, struct erase_info *instr)
1267 struct map_info *map = mtd->priv;
1268 struct cfi_private *cfi = map->fldrv_priv;
1269 unsigned long adr, len;
1270 int chipnum, ret = 0;
1272 if (instr->addr & (mtd->erasesize - 1))
1275 if (instr->len & (mtd->erasesize -1))
1278 if ((instr->len + instr->addr) > mtd->size)
1281 chipnum = instr->addr >> cfi->chipshift;
1282 adr = instr->addr - (chipnum << cfi->chipshift);
1286 ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
1291 adr += mtd->erasesize;
1292 len -= mtd->erasesize;
1294 if (adr >> cfi->chipshift) {
1298 if (chipnum >= cfi->numchips)
1303 instr->state = MTD_ERASE_DONE;
1304 if (instr->callback)
1305 instr->callback(instr);
1310 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
1312 struct map_info *map = mtd->priv;
1313 struct cfi_private *cfi = map->fldrv_priv;
1316 if (instr->addr != 0)
1319 if (instr->len != mtd->size)
1322 ret = do_erase_chip(map, &cfi->chips[0]);
1326 instr->state = MTD_ERASE_DONE;
1327 if (instr->callback)
1328 instr->callback(instr);
1333 static void cfi_amdstd_sync (struct mtd_info *mtd)
1335 struct map_info *map = mtd->priv;
1336 struct cfi_private *cfi = map->fldrv_priv;
1338 struct flchip *chip;
1340 DECLARE_WAITQUEUE(wait, current);
1342 for (i=0; !ret && i<cfi->numchips; i++) {
1343 chip = &cfi->chips[i];
1346 cfi_spin_lock(chip->mutex);
1348 switch(chip->state) {
1352 case FL_JEDEC_QUERY:
1353 chip->oldstate = chip->state;
1354 chip->state = FL_SYNCING;
1355 /* No need to wake_up() on this state change -
1356 * as the whole point is that nobody can do anything
1357 * with the chip now anyway.
1360 cfi_spin_unlock(chip->mutex);
1364 /* Not an idle state */
1365 add_wait_queue(&chip->wq, &wait);
1367 cfi_spin_unlock(chip->mutex);
1371 remove_wait_queue(&chip->wq, &wait);
1377 /* Unlock the chips again */
1379 for (i--; i >=0; i--) {
1380 chip = &cfi->chips[i];
1382 cfi_spin_lock(chip->mutex);
1384 if (chip->state == FL_SYNCING) {
1385 chip->state = chip->oldstate;
1388 cfi_spin_unlock(chip->mutex);
1393 static int cfi_amdstd_suspend(struct mtd_info *mtd)
1395 struct map_info *map = mtd->priv;
1396 struct cfi_private *cfi = map->fldrv_priv;
1398 struct flchip *chip;
1401 for (i=0; !ret && i<cfi->numchips; i++) {
1402 chip = &cfi->chips[i];
1404 cfi_spin_lock(chip->mutex);
1406 switch(chip->state) {
1410 case FL_JEDEC_QUERY:
1411 chip->oldstate = chip->state;
1412 chip->state = FL_PM_SUSPENDED;
1413 /* No need to wake_up() on this state change -
1414 * as the whole point is that nobody can do anything
1415 * with the chip now anyway.
1417 case FL_PM_SUSPENDED:
1424 cfi_spin_unlock(chip->mutex);
1427 /* Unlock the chips again */
1430 for (i--; i >=0; i--) {
1431 chip = &cfi->chips[i];
1433 cfi_spin_lock(chip->mutex);
1435 if (chip->state == FL_PM_SUSPENDED) {
1436 chip->state = chip->oldstate;
1439 cfi_spin_unlock(chip->mutex);
1446 static void cfi_amdstd_resume(struct mtd_info *mtd)
1448 struct map_info *map = mtd->priv;
1449 struct cfi_private *cfi = map->fldrv_priv;
1451 struct flchip *chip;
1453 for (i=0; i<cfi->numchips; i++) {
1455 chip = &cfi->chips[i];
1457 cfi_spin_lock(chip->mutex);
1459 if (chip->state == FL_PM_SUSPENDED) {
1460 chip->state = FL_READY;
1461 cfi_write(map, CMD(0xF0), chip->start);
1465 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
1467 cfi_spin_unlock(chip->mutex);
1471 static void cfi_amdstd_destroy(struct mtd_info *mtd)
1473 struct map_info *map = mtd->priv;
1474 struct cfi_private *cfi = map->fldrv_priv;
1475 kfree(cfi->cmdset_priv);
1478 kfree(mtd->eraseregions);
1481 static char im_name[]="cfi_cmdset_0002";
1483 int __init cfi_amdstd_init(void)
1485 inter_module_register(im_name, THIS_MODULE, &cfi_cmdset_0002);
1489 static void __exit cfi_amdstd_exit(void)
1491 inter_module_unregister(im_name);
1494 module_init(cfi_amdstd_init);
1495 module_exit(cfi_amdstd_exit);
1497 MODULE_LICENSE("GPL");
1498 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
1499 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");