2 * Common Flash Interface support:
3 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
8 * 2_by_8 routines added by Simon Munton
10 * 4_by_16 work by Carolyn J. Smith
12 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
16 * $Id: cfi_cmdset_0002.c,v 1.106 2004/08/09 14:02:32 dwmw2 Exp $
20 #include <linux/config.h>
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <linux/init.h>
27 #include <asm/byteorder.h>
29 #include <linux/errno.h>
30 #include <linux/slab.h>
31 #include <linux/delay.h>
32 #include <linux/interrupt.h>
33 #include <linux/mtd/compatmac.h>
34 #include <linux/mtd/map.h>
35 #include <linux/mtd/mtd.h>
36 #include <linux/mtd/cfi.h>
38 #define AMD_BOOTLOC_BUG
39 #define FORCE_WORD_WRITE 0
41 #define MAX_WORD_RETRIES 3
43 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
44 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
45 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
46 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
47 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
48 static int cfi_amdstd_lock_varsize(struct mtd_info *, loff_t, size_t);
49 static int cfi_amdstd_unlock_varsize(struct mtd_info *, loff_t, size_t);
50 static void cfi_amdstd_sync (struct mtd_info *);
51 static int cfi_amdstd_suspend (struct mtd_info *);
52 static void cfi_amdstd_resume (struct mtd_info *);
53 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
55 static void cfi_amdstd_destroy(struct mtd_info *);
57 struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
58 static struct mtd_info *cfi_amdstd_setup (struct map_info *);
61 static struct mtd_chip_driver cfi_amdstd_chipdrv = {
62 .probe = NULL, /* Not usable directly */
63 .destroy = cfi_amdstd_destroy,
64 .name = "cfi_cmdset_0002",
69 /* #define DEBUG_LOCK_BITS */
70 /* #define DEBUG_CFI_FEATURES */
73 #ifdef DEBUG_CFI_FEATURES
74 static void cfi_tell_features(struct cfi_pri_amdstd *extp)
76 const char* erase_suspend[3] = {
77 "Not supported", "Read only", "Read/write"
79 const char* top_bottom[6] = {
80 "No WP", "8x8KiB sectors at top & bottom, no WP",
81 "Bottom boot", "Top boot",
82 "Uniform, Bottom WP", "Uniform, Top WP"
85 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1);
86 printk(" Address sensitive unlock: %s\n",
87 (extp->SiliconRevision & 1) ? "Not required" : "Required");
89 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
90 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
92 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
94 if (extp->BlkProt == 0)
95 printk(" Block protection: Not supported\n");
97 printk(" Block protection: %d sectors per group\n", extp->BlkProt);
100 printk(" Temporary block unprotect: %s\n",
101 extp->TmpBlkUnprotect ? "Supported" : "Not supported");
102 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
103 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps);
104 printk(" Burst mode: %s\n",
105 extp->BurstMode ? "Supported" : "Not supported");
106 if (extp->PageMode == 0)
107 printk(" Page mode: Not supported\n");
109 printk(" Page mode: %d word page\n", extp->PageMode << 2);
111 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
112 extp->VppMin >> 4, extp->VppMin & 0xf);
113 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
114 extp->VppMax >> 4, extp->VppMax & 0xf);
116 if (extp->TopBottom < ARRAY_SIZE(top_bottom))
117 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
119 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
123 #ifdef AMD_BOOTLOC_BUG
124 /* Wheee. Bring me the head of someone at AMD. */
125 static void fixup_amd_bootblock(struct map_info *map, void* param)
127 struct cfi_private *cfi = map->fldrv_priv;
128 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
129 __u8 major = extp->MajorVersion;
130 __u8 minor = extp->MinorVersion;
132 if (((major << 8) | minor) < 0x3131) {
133 /* CFI version 1.0 => don't trust bootloc */
134 if (cfi->id & 0x80) {
135 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
136 extp->TopBottom = 3; /* top boot */
138 extp->TopBottom = 2; /* bottom boot */
144 static struct cfi_fixup fixup_table[] = {
145 #ifdef AMD_BOOTLOC_BUG
149 fixup_amd_bootblock, NULL
156 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
158 struct cfi_private *cfi = map->fldrv_priv;
159 unsigned char bootloc;
162 if (cfi->cfi_mode==CFI_MODE_CFI){
164 * It's a real CFI chip, not one for which the probe
165 * routine faked a CFI structure. So we read the feature
168 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
169 struct cfi_pri_amdstd *extp;
171 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
175 /* Install our own private info structure */
176 cfi->cmdset_priv = extp;
178 cfi_fixup(map, fixup_table);
180 #ifdef DEBUG_CFI_FEATURES
181 /* Tell the user about it in lots of lovely detail */
182 cfi_tell_features(extp);
185 bootloc = extp->TopBottom;
186 if ((bootloc != 2) && (bootloc != 3)) {
187 printk(KERN_WARNING "%s: CFI does not contain boot "
188 "bank location. Assuming top.\n", map->name);
192 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
193 printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name);
195 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
196 int j = (cfi->cfiq->NumEraseRegions-1)-i;
199 swap = cfi->cfiq->EraseRegionInfo[i];
200 cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
201 cfi->cfiq->EraseRegionInfo[j] = swap;
205 * These might already be setup (more correctly) by
206 * jedec_probe.c - still need it for cfi_probe.c path.
208 if ( ! (cfi->addr_unlock1 && cfi->addr_unlock2) ) {
209 switch (cfi->device_type) {
210 case CFI_DEVICETYPE_X8:
211 cfi->addr_unlock1 = 0x555;
212 cfi->addr_unlock2 = 0x2aa;
214 case CFI_DEVICETYPE_X16:
215 cfi->addr_unlock1 = 0xaaa;
216 if (map_bankwidth(map) == cfi_interleave(cfi)) {
217 /* X16 chip(s) in X8 mode */
218 cfi->addr_unlock2 = 0x555;
220 cfi->addr_unlock2 = 0x554;
223 case CFI_DEVICETYPE_X32:
224 cfi->addr_unlock1 = 0x1554;
225 if (map_bankwidth(map) == cfi_interleave(cfi)*2) {
226 /* X32 chip(s) in X16 mode */
227 cfi->addr_unlock1 = 0xaaa;
229 cfi->addr_unlock2 = 0xaa8;
234 "MTD %s(): Unsupported device type %d\n",
235 __func__, cfi->device_type);
242 for (i=0; i< cfi->numchips; i++) {
243 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
244 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
245 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
248 map->fldrv = &cfi_amdstd_chipdrv;
250 return cfi_amdstd_setup(map);
254 static struct mtd_info *cfi_amdstd_setup(struct map_info *map)
256 struct cfi_private *cfi = map->fldrv_priv;
257 struct mtd_info *mtd;
258 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
259 unsigned long offset = 0;
262 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
263 printk(KERN_NOTICE "number of %s chips: %d\n",
264 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
267 printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
271 memset(mtd, 0, sizeof(*mtd));
273 mtd->type = MTD_NORFLASH;
274 /* Also select the correct geometry setup too */
275 mtd->size = devsize * cfi->numchips;
277 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
278 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
279 * mtd->numeraseregions, GFP_KERNEL);
280 if (!mtd->eraseregions) {
281 printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
285 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
286 unsigned long ernum, ersize;
287 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
288 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
290 if (mtd->erasesize < ersize) {
291 mtd->erasesize = ersize;
293 for (j=0; j<cfi->numchips; j++) {
294 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
295 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
296 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
298 offset += (ersize * ernum);
300 if (offset != devsize) {
302 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
307 for (i=0; i<mtd->numeraseregions;i++){
308 printk("%d: offset=0x%x,size=0x%x,blocks=%d\n",
309 i,mtd->eraseregions[i].offset,
310 mtd->eraseregions[i].erasesize,
311 mtd->eraseregions[i].numblocks);
315 if (mtd->numeraseregions == 1
316 && ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) + 1) == 1) {
317 mtd->erase = cfi_amdstd_erase_chip;
319 mtd->erase = cfi_amdstd_erase_varsize;
320 mtd->lock = cfi_amdstd_lock_varsize;
321 mtd->unlock = cfi_amdstd_unlock_varsize;
324 if ( cfi->cfiq->BufWriteTimeoutTyp && !FORCE_WORD_WRITE) {
325 DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" );
326 mtd->write = cfi_amdstd_write_buffers;
328 DEBUG(MTD_DEBUG_LEVEL1, "Using word write method\n" );
329 mtd->write = cfi_amdstd_write_words;
332 mtd->read = cfi_amdstd_read;
334 /* FIXME: erase-suspend-program is broken. See
335 http://lists.infradead.org/pipermail/linux-mtd/2003-December/009001.html */
336 printk(KERN_NOTICE "cfi_cmdset_0002: Disabling erase-suspend-program due to code brokenness.\n");
338 /* does this chip have a secsi area? */
349 mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
350 mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
357 mtd->sync = cfi_amdstd_sync;
358 mtd->suspend = cfi_amdstd_suspend;
359 mtd->resume = cfi_amdstd_resume;
360 mtd->flags = MTD_CAP_NORFLASH;
361 map->fldrv = &cfi_amdstd_chipdrv;
362 mtd->name = map->name;
363 __module_get(THIS_MODULE);
368 if(mtd->eraseregions)
369 kfree(mtd->eraseregions);
372 kfree(cfi->cmdset_priv);
378 * Return true if the chip is ready.
380 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
381 * non-suspended sector) and is indicated by no toggle bits toggling.
383 * Note that anything more complicated than checking if no bits are toggling
384 * (including checking DQ5 for an error status) is tricky to get working
385 * correctly and is therefore not done (particulary with interleaved chips
386 * as each chip must be checked independantly of the others).
388 static int chip_ready(struct map_info *map, unsigned long addr)
392 d = map_read(map, addr);
393 t = map_read(map, addr);
395 return map_word_equal(map, d, t);
398 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
400 DECLARE_WAITQUEUE(wait, current);
401 struct cfi_private *cfi = map->fldrv_priv;
403 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
406 timeo = jiffies + HZ;
408 switch (chip->state) {
412 if (chip_ready(map, adr))
415 if (time_after(jiffies, timeo)) {
416 printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
417 cfi_spin_unlock(chip->mutex);
420 cfi_spin_unlock(chip->mutex);
422 cfi_spin_lock(chip->mutex);
423 /* Someone else might have been playing with it. */
433 if (mode == FL_WRITING) /* FIXME: Erase-suspend-program appears broken. */
436 if (!(mode == FL_READY || mode == FL_POINT
437 || (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))
438 || (mode == FL_WRITING && (cfip->EraseSuspend & 0x1))))
441 /* We could check to see if we're trying to access the sector
442 * that is currently being erased. However, no user will try
443 * anything like that so we just wait for the timeout. */
446 /* It's harmless to issue the Erase-Suspend and Erase-Resume
447 * commands when the erase algorithm isn't in progress. */
448 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
449 chip->oldstate = FL_ERASING;
450 chip->state = FL_ERASE_SUSPENDING;
451 chip->erase_suspended = 1;
453 if (chip_ready(map, adr))
456 if (time_after(jiffies, timeo)) {
457 /* Should have suspended the erase by now.
458 * Send an Erase-Resume command as either
459 * there was an error (so leave the erase
460 * routine to recover from it) or we trying to
461 * use the erase-in-progress sector. */
462 map_write(map, CMD(0x30), chip->in_progress_block_addr);
463 chip->state = FL_ERASING;
464 chip->oldstate = FL_READY;
465 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
469 cfi_spin_unlock(chip->mutex);
471 cfi_spin_lock(chip->mutex);
472 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
473 So we can just loop here. */
475 chip->state = FL_READY;
479 /* Only if there's no operation suspended... */
480 if (mode == FL_READY && chip->oldstate == FL_READY)
485 set_current_state(TASK_UNINTERRUPTIBLE);
486 add_wait_queue(&chip->wq, &wait);
487 cfi_spin_unlock(chip->mutex);
489 remove_wait_queue(&chip->wq, &wait);
490 cfi_spin_lock(chip->mutex);
496 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
498 struct cfi_private *cfi = map->fldrv_priv;
500 switch(chip->oldstate) {
502 chip->state = chip->oldstate;
503 map_write(map, CMD(0x30), chip->in_progress_block_addr);
504 chip->oldstate = FL_READY;
505 chip->state = FL_ERASING;
510 /* We should really make set_vpp() count, rather than doing this */
514 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
520 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
522 unsigned long cmd_addr;
523 struct cfi_private *cfi = map->fldrv_priv;
528 /* Ensure cmd read/writes are aligned. */
529 cmd_addr = adr & ~(map_bankwidth(map)-1);
531 cfi_spin_lock(chip->mutex);
532 ret = get_chip(map, chip, cmd_addr, FL_READY);
534 cfi_spin_unlock(chip->mutex);
538 if (chip->state != FL_POINT && chip->state != FL_READY) {
539 map_write(map, CMD(0xf0), cmd_addr);
540 chip->state = FL_READY;
543 map_copy_from(map, buf, adr, len);
545 put_chip(map, chip, cmd_addr);
547 cfi_spin_unlock(chip->mutex);
552 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
554 struct map_info *map = mtd->priv;
555 struct cfi_private *cfi = map->fldrv_priv;
560 /* ofs: offset within the first chip that the first read should start */
562 chipnum = (from >> cfi->chipshift);
563 ofs = from - (chipnum << cfi->chipshift);
569 unsigned long thislen;
571 if (chipnum >= cfi->numchips)
574 if ((len + ofs -1) >> cfi->chipshift)
575 thislen = (1<<cfi->chipshift) - ofs;
579 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
594 static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
596 DECLARE_WAITQUEUE(wait, current);
597 unsigned long timeo = jiffies + HZ;
598 struct cfi_private *cfi = map->fldrv_priv;
601 cfi_spin_lock(chip->mutex);
603 if (chip->state != FL_READY){
605 printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state);
607 set_current_state(TASK_UNINTERRUPTIBLE);
608 add_wait_queue(&chip->wq, &wait);
610 cfi_spin_unlock(chip->mutex);
613 remove_wait_queue(&chip->wq, &wait);
615 if(signal_pending(current))
618 timeo = jiffies + HZ;
625 chip->state = FL_READY;
627 /* should these be CFI_DEVICETYPE_X8 instead of cfi->device_type? */
628 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
629 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
630 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
632 map_copy_from(map, buf, adr, len);
634 /* should these be CFI_DEVICETYPE_X8 instead of cfi->device_type? */
635 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
636 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
637 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
638 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
641 cfi_spin_unlock(chip->mutex);
646 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
648 struct map_info *map = mtd->priv;
649 struct cfi_private *cfi = map->fldrv_priv;
655 /* ofs: offset within the first chip that the first read should start */
657 /* 8 secsi bytes per chip */
665 unsigned long thislen;
667 if (chipnum >= cfi->numchips)
670 if ((len + ofs -1) >> 3)
671 thislen = (1<<3) - ofs;
675 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
690 static int do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
692 struct cfi_private *cfi = map->fldrv_priv;
693 unsigned long timeo = jiffies + HZ;
695 * We use a 1ms + 1 jiffies generic timeout for writes (most devices
696 * have a max write time of a few hundreds usec). However, we should
697 * use the maximum timeout value given by the chip at probe time
698 * instead. Unfortunately, struct flchip does have a field for
699 * maximum timeout, only for typical which can be far too short
700 * depending of the conditions. The ' + 1' is to avoid having a
701 * timeout of 0 jiffies if HZ is smaller than 1000.
703 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
710 cfi_spin_lock(chip->mutex);
711 ret = get_chip(map, chip, adr, FL_WRITING);
713 cfi_spin_unlock(chip->mutex);
717 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
718 __func__, adr, datum.x[0] );
721 * Check for a NOP for the case when the datum to write is already
722 * present - it saves time and works around buggy chips that corrupt
723 * data at other locations when 0xff is written to a location that
724 * already contains 0xff.
726 oldd = map_read(map, adr);
727 if (map_word_equal(map, oldd, datum)) {
728 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n",
736 * The CFI_DEVICETYPE_X8 argument is needed even when
737 * cfi->device_type != CFI_DEVICETYPE_X8. The addresses for
738 * command sequences don't scale even when the device is
739 * wider. This is the case for many of the cfi_send_gen_cmd()
740 * below. I'm not sure, however, why some use
743 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
744 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
745 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
746 map_write(map, datum, adr);
747 chip->state = FL_WRITING;
749 cfi_spin_unlock(chip->mutex);
750 cfi_udelay(chip->word_write_time);
751 cfi_spin_lock(chip->mutex);
753 /* See comment above for timeout value. */
754 timeo = jiffies + uWriteTimeout;
756 if (chip->state != FL_WRITING) {
757 /* Someone's suspended the write. Sleep */
758 DECLARE_WAITQUEUE(wait, current);
760 set_current_state(TASK_UNINTERRUPTIBLE);
761 add_wait_queue(&chip->wq, &wait);
762 cfi_spin_unlock(chip->mutex);
764 remove_wait_queue(&chip->wq, &wait);
765 timeo = jiffies + (HZ / 2); /* FIXME */
766 cfi_spin_lock(chip->mutex);
770 /* Test to see if toggling has stopped. */
771 oldd = map_read(map, adr);
772 curd = map_read(map, adr);
773 if (map_word_equal(map, curd, oldd)) {
774 /* Do we have the correct value? */
775 if (map_word_equal(map, curd, datum)) {
778 /* Nope something has gone wrong. */
782 if (time_after(jiffies, timeo)) {
783 printk(KERN_WARNING "MTD %s(): software timeout\n",
788 /* Latency issues. Drop the lock, wait a while and retry */
789 cfi_spin_unlock(chip->mutex);
791 cfi_spin_lock(chip->mutex);
794 /* reset on all failures. */
795 map_write( map, CMD(0xF0), chip->start );
796 /* FIXME - should have reset delay before continuing */
797 if (++retry_cnt <= MAX_WORD_RETRIES)
802 chip->state = FL_READY;
803 put_chip(map, chip, adr);
804 cfi_spin_unlock(chip->mutex);
810 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
811 size_t *retlen, const u_char *buf)
813 struct map_info *map = mtd->priv;
814 struct cfi_private *cfi = map->fldrv_priv;
817 unsigned long ofs, chipstart;
818 DECLARE_WAITQUEUE(wait, current);
824 chipnum = to >> cfi->chipshift;
825 ofs = to - (chipnum << cfi->chipshift);
826 chipstart = cfi->chips[chipnum].start;
828 /* If it's not bus-aligned, do the first byte write */
829 if (ofs & (map_bankwidth(map)-1)) {
830 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
831 int i = ofs - bus_ofs;
836 cfi_spin_lock(cfi->chips[chipnum].mutex);
838 if (cfi->chips[chipnum].state != FL_READY) {
840 printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
842 set_current_state(TASK_UNINTERRUPTIBLE);
843 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
845 cfi_spin_unlock(cfi->chips[chipnum].mutex);
848 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
850 if(signal_pending(current))
856 /* Load 'tmp_buf' with old contents of flash */
857 tmp_buf = map_read(map, bus_ofs+chipstart);
859 cfi_spin_unlock(cfi->chips[chipnum].mutex);
861 /* Number of bytes to copy from buffer */
862 n = min_t(int, len, map_bankwidth(map)-i);
864 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
866 ret = do_write_oneword(map, &cfi->chips[chipnum],
876 if (ofs >> cfi->chipshift) {
879 if (chipnum == cfi->numchips)
884 /* We are now aligned, write as much as possible */
885 while(len >= map_bankwidth(map)) {
888 datum = map_word_load(map, buf);
890 ret = do_write_oneword(map, &cfi->chips[chipnum],
895 ofs += map_bankwidth(map);
896 buf += map_bankwidth(map);
897 (*retlen) += map_bankwidth(map);
898 len -= map_bankwidth(map);
900 if (ofs >> cfi->chipshift) {
903 if (chipnum == cfi->numchips)
905 chipstart = cfi->chips[chipnum].start;
909 /* Write the trailing bytes if any */
910 if (len & (map_bankwidth(map)-1)) {
914 cfi_spin_lock(cfi->chips[chipnum].mutex);
916 if (cfi->chips[chipnum].state != FL_READY) {
918 printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
920 set_current_state(TASK_UNINTERRUPTIBLE);
921 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
923 cfi_spin_unlock(cfi->chips[chipnum].mutex);
926 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
928 if(signal_pending(current))
934 tmp_buf = map_read(map, ofs + chipstart);
936 cfi_spin_unlock(cfi->chips[chipnum].mutex);
938 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
940 ret = do_write_oneword(map, &cfi->chips[chipnum],
953 * FIXME: interleaved mode not tested, and probably not supported!
955 static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
956 unsigned long adr, const u_char *buf, int len)
958 struct cfi_private *cfi = map->fldrv_priv;
959 unsigned long timeo = jiffies + HZ;
960 /* see comments in do_write_oneword() regarding uWriteTimeo. */
961 static unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
963 unsigned long cmd_adr;
970 cfi_spin_lock(chip->mutex);
971 ret = get_chip(map, chip, adr, FL_WRITING);
973 cfi_spin_unlock(chip->mutex);
977 datum = map_word_load(map, buf);
979 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
980 __func__, adr, datum.x[0] );
983 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
984 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
985 //cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
987 /* Write Buffer Load */
988 map_write(map, CMD(0x25), cmd_adr);
990 chip->state = FL_WRITING_TO_BUFFER;
992 /* Write length of data to come */
993 words = len / map_bankwidth(map);
994 map_write(map, CMD(words - 1), cmd_adr);
997 while(z < words * map_bankwidth(map)) {
998 datum = map_word_load(map, buf);
999 map_write(map, datum, adr + z);
1001 z += map_bankwidth(map);
1002 buf += map_bankwidth(map);
1004 z -= map_bankwidth(map);
1008 /* Write Buffer Program Confirm: GO GO GO */
1009 map_write(map, CMD(0x29), cmd_adr);
1010 chip->state = FL_WRITING;
1012 cfi_spin_unlock(chip->mutex);
1013 cfi_udelay(chip->buffer_write_time);
1014 cfi_spin_lock(chip->mutex);
1016 timeo = jiffies + uWriteTimeout;
1019 if (chip->state != FL_WRITING) {
1020 /* Someone's suspended the write. Sleep */
1021 DECLARE_WAITQUEUE(wait, current);
1023 set_current_state(TASK_UNINTERRUPTIBLE);
1024 add_wait_queue(&chip->wq, &wait);
1025 cfi_spin_unlock(chip->mutex);
1027 remove_wait_queue(&chip->wq, &wait);
1028 timeo = jiffies + (HZ / 2); /* FIXME */
1029 cfi_spin_lock(chip->mutex);
1033 if (chip_ready(map, adr))
1036 if( time_after(jiffies, timeo))
1039 /* Latency issues. Drop the lock, wait a while and retry */
1040 cfi_spin_unlock(chip->mutex);
1042 cfi_spin_lock(chip->mutex);
1045 printk(KERN_WARNING "MTD %s(): software timeout\n",
1048 /* reset on all failures. */
1049 map_write( map, CMD(0xF0), chip->start );
1050 /* FIXME - should have reset delay before continuing */
1054 chip->state = FL_READY;
1055 put_chip(map, chip, adr);
1056 cfi_spin_unlock(chip->mutex);
1062 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1063 size_t *retlen, const u_char *buf)
1065 struct map_info *map = mtd->priv;
1066 struct cfi_private *cfi = map->fldrv_priv;
1067 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1076 chipnum = to >> cfi->chipshift;
1077 ofs = to - (chipnum << cfi->chipshift);
1079 /* If it's not bus-aligned, do the first word write */
1080 if (ofs & (map_bankwidth(map)-1)) {
1081 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1082 if (local_len > len)
1084 ret = cfi_amdstd_write_words(mtd, to, local_len,
1092 if (ofs >> cfi->chipshift) {
1095 if (chipnum == cfi->numchips)
1100 /* Write buffer is worth it only if more than one word to write... */
1101 while (len >= map_bankwidth(map) * 2) {
1102 /* We must not cross write block boundaries */
1103 int size = wbufsize - (ofs & (wbufsize-1));
1107 if (size % map_bankwidth(map))
1108 size -= size % map_bankwidth(map);
1110 ret = do_write_buffer(map, &cfi->chips[chipnum],
1120 if (ofs >> cfi->chipshift) {
1123 if (chipnum == cfi->numchips)
1129 size_t retlen_dregs = 0;
1131 ret = cfi_amdstd_write_words(mtd, to, len, &retlen_dregs, buf);
1133 *retlen += retlen_dregs;
1142 * Handle devices with one erase region, that only implement
1143 * the chip erase command.
1145 static inline int do_erase_chip(struct map_info *map, struct flchip *chip)
1147 struct cfi_private *cfi = map->fldrv_priv;
1148 unsigned long timeo = jiffies + HZ;
1149 unsigned long int adr;
1150 DECLARE_WAITQUEUE(wait, current);
1153 adr = cfi->addr_unlock1;
1155 cfi_spin_lock(chip->mutex);
1156 ret = get_chip(map, chip, adr, FL_WRITING);
1158 cfi_spin_unlock(chip->mutex);
1162 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1163 __func__, chip->start );
1166 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
1167 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
1168 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
1169 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
1170 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
1171 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
1173 chip->state = FL_ERASING;
1174 chip->erase_suspended = 0;
1175 chip->in_progress_block_addr = adr;
1177 cfi_spin_unlock(chip->mutex);
1178 set_current_state(TASK_UNINTERRUPTIBLE);
1179 schedule_timeout((chip->erase_time*HZ)/(2*1000));
1180 cfi_spin_lock(chip->mutex);
1182 timeo = jiffies + (HZ*20);
1185 if (chip->state != FL_ERASING) {
1186 /* Someone's suspended the erase. Sleep */
1187 set_current_state(TASK_UNINTERRUPTIBLE);
1188 add_wait_queue(&chip->wq, &wait);
1189 cfi_spin_unlock(chip->mutex);
1191 remove_wait_queue(&chip->wq, &wait);
1192 cfi_spin_lock(chip->mutex);
1195 if (chip->erase_suspended) {
1196 /* This erase was suspended and resumed.
1197 Adjust the timeout */
1198 timeo = jiffies + (HZ*20); /* FIXME */
1199 chip->erase_suspended = 0;
1202 if (chip_ready(map, adr))
1205 if (time_after(jiffies, timeo))
1208 /* Latency issues. Drop the lock, wait a while and retry */
1209 cfi_spin_unlock(chip->mutex);
1210 set_current_state(TASK_UNINTERRUPTIBLE);
1211 schedule_timeout(1);
1212 cfi_spin_lock(chip->mutex);
1215 printk(KERN_WARNING "MTD %s(): software timeout\n",
1218 /* reset on all failures. */
1219 map_write( map, CMD(0xF0), chip->start );
1220 /* FIXME - should have reset delay before continuing */
1224 chip->state = FL_READY;
1225 put_chip(map, chip, adr);
1226 cfi_spin_unlock(chip->mutex);
1232 typedef int (*frob_t)(struct map_info *map, struct flchip *chip,
1233 unsigned long adr, void *thunk);
1236 static int cfi_amdstd_varsize_frob(struct mtd_info *mtd, frob_t frob,
1237 loff_t ofs, size_t len, void *thunk)
1239 struct map_info *map = mtd->priv;
1240 struct cfi_private *cfi = map->fldrv_priv;
1242 int chipnum, ret = 0;
1244 struct mtd_erase_region_info *regions = mtd->eraseregions;
1246 if (ofs > mtd->size)
1249 if ((len + ofs) > mtd->size)
1252 /* Check that both start and end of the requested erase are
1253 * aligned with the erasesize at the appropriate addresses.
1258 /* Skip all erase regions which are ended before the start of
1259 the requested erase. Actually, to save on the calculations,
1260 we skip to the first erase region which starts after the
1261 start of the requested erase, and then go back one.
1264 while (i < mtd->numeraseregions && ofs >= regions[i].offset)
1268 /* OK, now i is pointing at the erase region in which this
1269 erase request starts. Check the start of the requested
1270 erase range is aligned with the erase size which is in
1274 if (ofs & (regions[i].erasesize-1))
1277 /* Remember the erase region we start on */
1280 /* Next, check that the end of the requested erase is aligned
1281 * with the erase region at that address.
1284 while (i<mtd->numeraseregions && (ofs + len) >= regions[i].offset)
1287 /* As before, drop back one to point at the region in which
1288 the address actually falls
1292 if ((ofs + len) & (regions[i].erasesize-1))
1295 chipnum = ofs >> cfi->chipshift;
1296 adr = ofs - (chipnum << cfi->chipshift);
1301 ret = (*frob)(map, &cfi->chips[chipnum], adr, thunk);
1306 adr += regions[i].erasesize;
1307 len -= regions[i].erasesize;
1309 if (adr % (1<< cfi->chipshift) == ((regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
1312 if (adr >> cfi->chipshift) {
1316 if (chipnum >= cfi->numchips)
1325 static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, void *thunk)
1327 struct cfi_private *cfi = map->fldrv_priv;
1328 unsigned long timeo = jiffies + HZ;
1329 DECLARE_WAITQUEUE(wait, current);
1334 cfi_spin_lock(chip->mutex);
1335 ret = get_chip(map, chip, adr, FL_ERASING);
1337 cfi_spin_unlock(chip->mutex);
1341 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1345 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
1346 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
1347 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
1348 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
1349 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
1350 map_write(map, CMD(0x30), adr);
1352 chip->state = FL_ERASING;
1353 chip->erase_suspended = 0;
1354 chip->in_progress_block_addr = adr;
1356 cfi_spin_unlock(chip->mutex);
1357 set_current_state(TASK_UNINTERRUPTIBLE);
1358 schedule_timeout((chip->erase_time*HZ)/(2*1000));
1359 cfi_spin_lock(chip->mutex);
1361 timeo = jiffies + (HZ*20);
1364 if (chip->state != FL_ERASING) {
1365 /* Someone's suspended the erase. Sleep */
1366 set_current_state(TASK_UNINTERRUPTIBLE);
1367 add_wait_queue(&chip->wq, &wait);
1368 cfi_spin_unlock(chip->mutex);
1370 remove_wait_queue(&chip->wq, &wait);
1371 cfi_spin_lock(chip->mutex);
1374 if (chip->erase_suspended) {
1375 /* This erase was suspended and resumed.
1376 Adjust the timeout */
1377 timeo = jiffies + (HZ*20); /* FIXME */
1378 chip->erase_suspended = 0;
1381 if (chip_ready(map, adr))
1384 if (time_after(jiffies, timeo))
1387 /* Latency issues. Drop the lock, wait a while and retry */
1388 cfi_spin_unlock(chip->mutex);
1389 set_current_state(TASK_UNINTERRUPTIBLE);
1390 schedule_timeout(1);
1391 cfi_spin_lock(chip->mutex);
1394 printk(KERN_WARNING "MTD %s(): software timeout\n",
1397 /* reset on all failures. */
1398 map_write( map, CMD(0xF0), chip->start );
1399 /* FIXME - should have reset delay before continuing */
1403 chip->state = FL_READY;
1404 put_chip(map, chip, adr);
1405 cfi_spin_unlock(chip->mutex);
1410 int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1412 unsigned long ofs, len;
1418 ret = cfi_amdstd_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1422 instr->state = MTD_ERASE_DONE;
1423 mtd_erase_callback(instr);
1429 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
1431 struct map_info *map = mtd->priv;
1432 struct cfi_private *cfi = map->fldrv_priv;
1435 if (instr->addr != 0)
1438 if (instr->len != mtd->size)
1441 ret = do_erase_chip(map, &cfi->chips[0]);
1445 instr->state = MTD_ERASE_DONE;
1446 mtd_erase_callback(instr);
1452 static void cfi_amdstd_sync (struct mtd_info *mtd)
1454 struct map_info *map = mtd->priv;
1455 struct cfi_private *cfi = map->fldrv_priv;
1457 struct flchip *chip;
1459 DECLARE_WAITQUEUE(wait, current);
1461 for (i=0; !ret && i<cfi->numchips; i++) {
1462 chip = &cfi->chips[i];
1465 cfi_spin_lock(chip->mutex);
1467 switch(chip->state) {
1471 case FL_JEDEC_QUERY:
1472 chip->oldstate = chip->state;
1473 chip->state = FL_SYNCING;
1474 /* No need to wake_up() on this state change -
1475 * as the whole point is that nobody can do anything
1476 * with the chip now anyway.
1479 cfi_spin_unlock(chip->mutex);
1483 /* Not an idle state */
1484 add_wait_queue(&chip->wq, &wait);
1486 cfi_spin_unlock(chip->mutex);
1490 remove_wait_queue(&chip->wq, &wait);
1496 /* Unlock the chips again */
1498 for (i--; i >=0; i--) {
1499 chip = &cfi->chips[i];
1501 cfi_spin_lock(chip->mutex);
1503 if (chip->state == FL_SYNCING) {
1504 chip->state = chip->oldstate;
1507 cfi_spin_unlock(chip->mutex);
1512 static int cfi_amdstd_suspend(struct mtd_info *mtd)
1514 struct map_info *map = mtd->priv;
1515 struct cfi_private *cfi = map->fldrv_priv;
1517 struct flchip *chip;
1520 for (i=0; !ret && i<cfi->numchips; i++) {
1521 chip = &cfi->chips[i];
1523 cfi_spin_lock(chip->mutex);
1525 switch(chip->state) {
1529 case FL_JEDEC_QUERY:
1530 chip->oldstate = chip->state;
1531 chip->state = FL_PM_SUSPENDED;
1532 /* No need to wake_up() on this state change -
1533 * as the whole point is that nobody can do anything
1534 * with the chip now anyway.
1536 case FL_PM_SUSPENDED:
1543 cfi_spin_unlock(chip->mutex);
1546 /* Unlock the chips again */
1549 for (i--; i >=0; i--) {
1550 chip = &cfi->chips[i];
1552 cfi_spin_lock(chip->mutex);
1554 if (chip->state == FL_PM_SUSPENDED) {
1555 chip->state = chip->oldstate;
1558 cfi_spin_unlock(chip->mutex);
1566 static void cfi_amdstd_resume(struct mtd_info *mtd)
1568 struct map_info *map = mtd->priv;
1569 struct cfi_private *cfi = map->fldrv_priv;
1571 struct flchip *chip;
1573 for (i=0; i<cfi->numchips; i++) {
1575 chip = &cfi->chips[i];
1577 cfi_spin_lock(chip->mutex);
1579 if (chip->state == FL_PM_SUSPENDED) {
1580 chip->state = FL_READY;
1581 map_write(map, CMD(0xF0), chip->start);
1585 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
1587 cfi_spin_unlock(chip->mutex);
1592 #ifdef DEBUG_LOCK_BITS
1594 static int do_printlockstatus_oneblock(struct map_info *map,
1595 struct flchip *chip,
1599 struct cfi_private *cfi = map->fldrv_priv;
1600 int ofs_factor = cfi->interleave * cfi->device_type;
1602 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1603 printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1604 adr, cfi_read_query(map, adr+(2*ofs_factor)));
1605 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1611 #define debug_dump_locks(mtd, frob, ofs, len, thunk) \
1612 cfi_amdstd_varsize_frob((mtd), (frob), (ofs), (len), (thunk))
1616 #define debug_dump_locks(...)
1618 #endif /* DEBUG_LOCK_BITS */
1621 struct xxlock_thunk {
1627 #define DO_XXLOCK_ONEBLOCK_LOCK ((struct xxlock_thunk){0x01, FL_LOCKING})
1628 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((struct xxlock_thunk){0x00, FL_UNLOCKING})
1632 * FIXME - this is *very* specific to a particular chip. It likely won't
1633 * work for all chips that require unlock. It also hasn't been tested
1634 * with interleaved chips.
1636 static int do_xxlock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, void *thunk)
1638 struct cfi_private *cfi = map->fldrv_priv;
1639 struct xxlock_thunk *xxlt = (struct xxlock_thunk *)thunk;
1643 * This is easy because these are writes to registers and not writes
1644 * to flash memory - that means that we don't have to check status
1650 * lock block registers:
1651 * - on 64k boundariesand
1653 * - block lock registers are 4MiB lower - overflow subtract (danger)
1655 adr = ((adr & ~0xffff) | 0x2) + ~0x3fffff;
1657 cfi_spin_lock(chip->mutex);
1658 ret = get_chip(map, chip, adr, FL_LOCKING);
1660 cfi_spin_unlock(chip->mutex);
1664 chip->state = xxlt->state;
1665 map_write(map, CMD(xxlt->val), adr);
1667 /* Done and happy. */
1668 chip->state = FL_READY;
1669 put_chip(map, chip, adr);
1670 cfi_spin_unlock(chip->mutex);
1675 static int cfi_amdstd_lock_varsize(struct mtd_info *mtd,
1681 DEBUG(MTD_DEBUG_LEVEL3,
1682 "%s: lock status before, ofs=0x%08llx, len=0x%08zX\n",
1683 __func__, ofs, len);
1684 debug_dump_locks(mtd, do_printlockstatus_oneblock, ofs, len, 0);
1686 ret = cfi_amdstd_varsize_frob(mtd, do_xxlock_oneblock, ofs, len,
1687 (void *)&DO_XXLOCK_ONEBLOCK_LOCK);
1689 DEBUG(MTD_DEBUG_LEVEL3,
1690 "%s: lock status after, ret=%d\n",
1693 debug_dump_locks(mtd, do_printlockstatus_oneblock, ofs, len, 0);
1699 static int cfi_amdstd_unlock_varsize(struct mtd_info *mtd,
1705 DEBUG(MTD_DEBUG_LEVEL3,
1706 "%s: lock status before, ofs=0x%08llx, len=0x%08zX\n",
1707 __func__, ofs, len);
1708 debug_dump_locks(mtd, do_printlockstatus_oneblock, ofs, len, 0);
1710 ret = cfi_amdstd_varsize_frob(mtd, do_xxlock_oneblock, ofs, len,
1711 (void *)&DO_XXLOCK_ONEBLOCK_UNLOCK);
1713 DEBUG(MTD_DEBUG_LEVEL3,
1714 "%s: lock status after, ret=%d\n",
1716 debug_dump_locks(mtd, do_printlockstatus_oneblock, ofs, len, 0);
1722 static void cfi_amdstd_destroy(struct mtd_info *mtd)
1724 struct map_info *map = mtd->priv;
1725 struct cfi_private *cfi = map->fldrv_priv;
1726 kfree(cfi->cmdset_priv);
1729 kfree(mtd->eraseregions);
1732 static char im_name[]="cfi_cmdset_0002";
1735 int __init cfi_amdstd_init(void)
1737 inter_module_register(im_name, THIS_MODULE, &cfi_cmdset_0002);
1742 static void __exit cfi_amdstd_exit(void)
1744 inter_module_unregister(im_name);
1748 module_init(cfi_amdstd_init);
1749 module_exit(cfi_amdstd_exit);
1751 MODULE_LICENSE("GPL");
1752 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
1753 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");