X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=drivers%2Fmtd%2Fchips%2Fcfi_cmdset_0002.c;h=5bd15f548f22e47f00103b14e6eb1245ca5d1b4a;hb=9bf4aaab3e101692164d49b7ca357651eb691cb6;hp=df3309048290e576b9863b6417c4206a764e09c8;hpb=db216c3d5e4c040e557a50f8f5d35d5c415e8c1c;p=linux-2.6.git diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c index df3309048..5bd15f548 100644 --- a/drivers/mtd/chips/cfi_cmdset_0002.c +++ b/drivers/mtd/chips/cfi_cmdset_0002.c @@ -3,15 +3,21 @@ * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002) * * Copyright (C) 2000 Crossnet Co. + * Copyright (C) 2004 Arcom Control Systems Ltd * * 2_by_8 routines added by Simon Munton * + * 4_by_16 work by Carolyn J. Smith + * + * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com + * * This code is GPL * - * $Id: cfi_cmdset_0002.c,v 1.74 2003/05/28 12:51:48 dwmw2 Exp $ + * $Id: cfi_cmdset_0002.c,v 1.106 2004/08/09 14:02:32 dwmw2 Exp $ * */ +#include #include #include #include @@ -24,18 +30,23 @@ #include #include #include +#include #include #include #include -#include #define AMD_BOOTLOC_BUG +#define FORCE_WORD_WRITE 0 + +#define MAX_WORD_RETRIES 3 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); -static int cfi_amdstd_write(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); +static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); +static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *); -static int cfi_amdstd_erase_onesize(struct mtd_info *, struct erase_info *); static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *); +static int cfi_amdstd_lock_varsize(struct mtd_info *, loff_t, size_t); +static int cfi_amdstd_unlock_varsize(struct mtd_info *, loff_t, size_t); static void cfi_amdstd_sync (struct mtd_info *); static int cfi_amdstd_suspend (struct mtd_info *); static void cfi_amdstd_resume (struct mtd_info *); @@ -55,50 +66,129 @@ static struct mtd_chip_driver cfi_amdstd_chipdrv = { }; +/* #define DEBUG_LOCK_BITS */ +/* #define DEBUG_CFI_FEATURES */ + + +#ifdef DEBUG_CFI_FEATURES +static void cfi_tell_features(struct cfi_pri_amdstd *extp) +{ + const char* erase_suspend[3] = { + "Not supported", "Read only", "Read/write" + }; + const char* top_bottom[6] = { + "No WP", "8x8KiB sectors at top & bottom, no WP", + "Bottom boot", "Top boot", + "Uniform, Bottom WP", "Uniform, Top WP" + }; + + printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1); + printk(" Address sensitive unlock: %s\n", + (extp->SiliconRevision & 1) ? "Not required" : "Required"); + + if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend)) + printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]); + else + printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend); + + if (extp->BlkProt == 0) + printk(" Block protection: Not supported\n"); + else + printk(" Block protection: %d sectors per group\n", extp->BlkProt); + + + printk(" Temporary block unprotect: %s\n", + extp->TmpBlkUnprotect ? "Supported" : "Not supported"); + printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot); + printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps); + printk(" Burst mode: %s\n", + extp->BurstMode ? "Supported" : "Not supported"); + if (extp->PageMode == 0) + printk(" Page mode: Not supported\n"); + else + printk(" Page mode: %d word page\n", extp->PageMode << 2); + + printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n", + extp->VppMin >> 4, extp->VppMin & 0xf); + printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n", + extp->VppMax >> 4, extp->VppMax & 0xf); + + if (extp->TopBottom < ARRAY_SIZE(top_bottom)) + printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]); + else + printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom); +} +#endif + +#ifdef AMD_BOOTLOC_BUG +/* Wheee. Bring me the head of someone at AMD. */ +static void fixup_amd_bootblock(struct map_info *map, void* param) +{ + struct cfi_private *cfi = map->fldrv_priv; + struct cfi_pri_amdstd *extp = cfi->cmdset_priv; + __u8 major = extp->MajorVersion; + __u8 minor = extp->MinorVersion; + + if (((major << 8) | minor) < 0x3131) { + /* CFI version 1.0 => don't trust bootloc */ + if (cfi->id & 0x80) { + printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id); + extp->TopBottom = 3; /* top boot */ + } else { + extp->TopBottom = 2; /* bottom boot */ + } + } +} +#endif + +static struct cfi_fixup fixup_table[] = { +#ifdef AMD_BOOTLOC_BUG + { + 0x0001, /* AMD */ + CFI_ID_ANY, + fixup_amd_bootblock, NULL + }, +#endif + { 0, 0, NULL, NULL } +}; + + struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) { struct cfi_private *cfi = map->fldrv_priv; unsigned char bootloc; - int ofs_factor = cfi->interleave * cfi->device_type; int i; - __u8 major, minor; - __u32 base = cfi->chips[0].start; if (cfi->cfi_mode==CFI_MODE_CFI){ + /* + * It's a real CFI chip, not one for which the probe + * routine faked a CFI structure. So we read the feature + * table from it. + */ __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR; + struct cfi_pri_amdstd *extp; + + extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu"); + if (!extp) + return NULL; + + /* Install our own private info structure */ + cfi->cmdset_priv = extp; + + cfi_fixup(map, fixup_table); + +#ifdef DEBUG_CFI_FEATURES + /* Tell the user about it in lots of lovely detail */ + cfi_tell_features(extp); +#endif + + bootloc = extp->TopBottom; + if ((bootloc != 2) && (bootloc != 3)) { + printk(KERN_WARNING "%s: CFI does not contain boot " + "bank location. Assuming top.\n", map->name); + bootloc = 2; + } - cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL); - - major = cfi_read_query(map, base + (adr+3)*ofs_factor); - minor = cfi_read_query(map, base + (adr+4)*ofs_factor); - - printk(KERN_NOTICE " Amd/Fujitsu Extended Query Table v%c.%c at 0x%4.4X\n", - major, minor, adr); - cfi_send_gen_cmd(0xf0, 0x55, base, map, cfi, cfi->device_type, NULL); - - cfi_send_gen_cmd(0xaa, 0x555, base, map, cfi, cfi->device_type, NULL); - cfi_send_gen_cmd(0x55, 0x2aa, base, map, cfi, cfi->device_type, NULL); - cfi_send_gen_cmd(0x90, 0x555, base, map, cfi, cfi->device_type, NULL); - /* FIXME - should have a delay before continuing */ - cfi->mfr = cfi_read_query(map, base); - cfi->id = cfi_read_query(map, base + ofs_factor); - - /* Wheee. Bring me the head of someone at AMD. */ -#ifdef AMD_BOOTLOC_BUG - if (((major << 8) | minor) < 0x3131) { - /* CFI version 1.0 => don't trust bootloc */ - if (cfi->id & 0x80) { - printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id); - bootloc = 3; /* top boot */ - } else { - bootloc = 2; /* bottom boot */ - } - } else -#endif - { - cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL); - bootloc = cfi_read_query(map, base + (adr+15)*ofs_factor); - } if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) { printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name); @@ -112,31 +202,41 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) } } /* - * FIXME - These might already be setup (more correctly) - * buy jedec_probe.c. + * These might already be setup (more correctly) by + * jedec_probe.c - still need it for cfi_probe.c path. */ - switch (cfi->device_type) { - case CFI_DEVICETYPE_X8: - cfi->addr_unlock1 = 0x555; - cfi->addr_unlock2 = 0x2aa; - break; - case CFI_DEVICETYPE_X16: - cfi->addr_unlock1 = 0xaaa; - if (map->buswidth == cfi->interleave) { - /* X16 chip(s) in X8 mode */ - cfi->addr_unlock2 = 0x555; - } else { - cfi->addr_unlock2 = 0x554; + if ( ! (cfi->addr_unlock1 && cfi->addr_unlock2) ) { + switch (cfi->device_type) { + case CFI_DEVICETYPE_X8: + cfi->addr_unlock1 = 0x555; + cfi->addr_unlock2 = 0x2aa; + break; + case CFI_DEVICETYPE_X16: + cfi->addr_unlock1 = 0xaaa; + if (map_bankwidth(map) == cfi_interleave(cfi)) { + /* X16 chip(s) in X8 mode */ + cfi->addr_unlock2 = 0x555; + } else { + cfi->addr_unlock2 = 0x554; + } + break; + case CFI_DEVICETYPE_X32: + cfi->addr_unlock1 = 0x1554; + if (map_bankwidth(map) == cfi_interleave(cfi)*2) { + /* X32 chip(s) in X16 mode */ + cfi->addr_unlock1 = 0xaaa; + } else { + cfi->addr_unlock2 = 0xaa8; + } + break; + default: + printk(KERN_WARNING + "MTD %s(): Unsupported device type %d\n", + __func__, cfi->device_type); + return NULL; } - break; - case CFI_DEVICETYPE_X32: - cfi->addr_unlock1 = 0x1555; - cfi->addr_unlock2 = 0xaaa; - break; - default: - printk(KERN_NOTICE "Eep. Unknown cfi_cmdset_0002 device type %d\n", cfi->device_type); - return NULL; } + } /* CFI mode */ for (i=0; i< cfi->numchips; i++) { @@ -147,23 +247,25 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) map->fldrv = &cfi_amdstd_chipdrv; - cfi_send_gen_cmd(0xf0, 0x55, base, map, cfi, cfi->device_type, NULL); return cfi_amdstd_setup(map); } + static struct mtd_info *cfi_amdstd_setup(struct map_info *map) { struct cfi_private *cfi = map->fldrv_priv; struct mtd_info *mtd; unsigned long devsize = (1<cfiq->DevSize) * cfi->interleave; + unsigned long offset = 0; + int i,j; mtd = kmalloc(sizeof(*mtd), GFP_KERNEL); printk(KERN_NOTICE "number of %s chips: %d\n", - (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips); + (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips); if (!mtd) { - printk(KERN_WARNING "Failed to allocate memory for MTD device\n"); - goto setup_err; + printk(KERN_WARNING "Failed to allocate memory for MTD device\n"); + goto setup_err; } memset(mtd, 0, sizeof(*mtd)); @@ -171,86 +273,69 @@ static struct mtd_info *cfi_amdstd_setup(struct map_info *map) mtd->type = MTD_NORFLASH; /* Also select the correct geometry setup too */ mtd->size = devsize * cfi->numchips; - - if (cfi->cfiq->NumEraseRegions == 1) { - /* No need to muck about with multiple erase sizes */ - mtd->erasesize = ((cfi->cfiq->EraseRegionInfo[0] >> 8) & ~0xff) * cfi->interleave; - } else { - unsigned long offset = 0; - int i,j; - - mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips; - mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) * mtd->numeraseregions, GFP_KERNEL); - if (!mtd->eraseregions) { - printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n"); - goto setup_err; - } + + mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips; + mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) + * mtd->numeraseregions, GFP_KERNEL); + if (!mtd->eraseregions) { + printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n"); + goto setup_err; + } - for (i=0; icfiq->NumEraseRegions; i++) { - unsigned long ernum, ersize; - ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave; - ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1; + for (i=0; icfiq->NumEraseRegions; i++) { + unsigned long ernum, ersize; + ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave; + ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1; - if (mtd->erasesize < ersize) { - mtd->erasesize = ersize; - } - for (j=0; jnumchips; j++) { - mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset; - mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize; - mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum; - } - offset += (ersize * ernum); + if (mtd->erasesize < ersize) { + mtd->erasesize = ersize; } - if (offset != devsize) { - /* Argh */ - printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize); - goto setup_err; + for (j=0; jnumchips; j++) { + mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset; + mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize; + mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum; } + offset += (ersize * ernum); + } + if (offset != devsize) { + /* Argh */ + printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize); + goto setup_err; + } #if 0 - // debug - for (i=0; inumeraseregions;i++){ - printk("%d: offset=0x%x,size=0x%x,blocks=%d\n", - i,mtd->eraseregions[i].offset, - mtd->eraseregions[i].erasesize, - mtd->eraseregions[i].numblocks); - } -#endif + // debug + for (i=0; inumeraseregions;i++){ + printk("%d: offset=0x%x,size=0x%x,blocks=%d\n", + i,mtd->eraseregions[i].offset, + mtd->eraseregions[i].erasesize, + mtd->eraseregions[i].numblocks); } - - switch (CFIDEV_BUSWIDTH) - { - case 1: - case 2: - case 4: -#if 1 - if (mtd->numeraseregions > 1) - mtd->erase = cfi_amdstd_erase_varsize; - else #endif - if (((cfi->cfiq->EraseRegionInfo[0] & 0xffff) + 1) == 1) - mtd->erase = cfi_amdstd_erase_chip; - else - mtd->erase = cfi_amdstd_erase_onesize; - mtd->read = cfi_amdstd_read; - mtd->write = cfi_amdstd_write; - break; - default: - printk(KERN_WARNING "Unsupported buswidth\n"); - goto setup_err; - break; + if (mtd->numeraseregions == 1 + && ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) + 1) == 1) { + mtd->erase = cfi_amdstd_erase_chip; + } else { + mtd->erase = cfi_amdstd_erase_varsize; + mtd->lock = cfi_amdstd_lock_varsize; + mtd->unlock = cfi_amdstd_unlock_varsize; } - if (cfi->fast_prog) { - /* In cfi_amdstd_write() we frob the protection stuff - without paying any attention to the state machine. - This upsets in-progress erases. So we turn this flag - off for now till the code gets fixed. */ - printk(KERN_NOTICE "cfi_cmdset_0002: Disabling fast programming due to code brokenness.\n"); - cfi->fast_prog = 0; + + if ( cfi->cfiq->BufWriteTimeoutTyp && !FORCE_WORD_WRITE) { + DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" ); + mtd->write = cfi_amdstd_write_buffers; + } else { + DEBUG(MTD_DEBUG_LEVEL1, "Using word write method\n" ); + mtd->write = cfi_amdstd_write_words; } + mtd->read = cfi_amdstd_read; - /* does this chip have a secsi area? */ + /* FIXME: erase-suspend-program is broken. See + http://lists.infradead.org/pipermail/linux-mtd/2003-December/009001.html */ + printk(KERN_NOTICE "cfi_cmdset_0002: Disabling erase-suspend-program due to code brokenness.\n"); + + /* does this chip have a secsi area? */ if(cfi->mfr==1){ switch(cfi->id){ @@ -289,46 +374,181 @@ static struct mtd_info *cfi_amdstd_setup(struct map_info *map) return NULL; } -static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf) +/* + * Return true if the chip is ready. + * + * Ready is one of: read mode, query mode, erase-suspend-read mode (in any + * non-suspended sector) and is indicated by no toggle bits toggling. + * + * Note that anything more complicated than checking if no bits are toggling + * (including checking DQ5 for an error status) is tricky to get working + * correctly and is therefore not done (particulary with interleaved chips + * as each chip must be checked independantly of the others). + */ +static int chip_ready(struct map_info *map, unsigned long addr) +{ + map_word d, t; + + d = map_read(map, addr); + t = map_read(map, addr); + + return map_word_equal(map, d, t); +} + +static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode) { DECLARE_WAITQUEUE(wait, current); - unsigned long timeo = jiffies + HZ; + struct cfi_private *cfi = map->fldrv_priv; + unsigned long timeo; + struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv; + resettime: + timeo = jiffies + HZ; retry: - cfi_spin_lock(chip->mutex); + switch (chip->state) { - if (chip->state != FL_READY){ -#if 0 - printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state); -#endif + case FL_STATUS: + for (;;) { + if (chip_ready(map, adr)) + break; + + if (time_after(jiffies, timeo)) { + printk(KERN_ERR "Waiting for chip to be ready timed out.\n"); + cfi_spin_unlock(chip->mutex); + return -EIO; + } + cfi_spin_unlock(chip->mutex); + cfi_udelay(1); + cfi_spin_lock(chip->mutex); + /* Someone else might have been playing with it. */ + goto retry; + } + + case FL_READY: + case FL_CFI_QUERY: + case FL_JEDEC_QUERY: + return 0; + + case FL_ERASING: + if (mode == FL_WRITING) /* FIXME: Erase-suspend-program appears broken. */ + goto sleep; + + if (!(mode == FL_READY || mode == FL_POINT + || (mode == FL_WRITING && (cfip->EraseSuspend & 0x2)) + || (mode == FL_WRITING && (cfip->EraseSuspend & 0x1)))) + goto sleep; + + /* We could check to see if we're trying to access the sector + * that is currently being erased. However, no user will try + * anything like that so we just wait for the timeout. */ + + /* Erase suspend */ + /* It's harmless to issue the Erase-Suspend and Erase-Resume + * commands when the erase algorithm isn't in progress. */ + map_write(map, CMD(0xB0), chip->in_progress_block_addr); + chip->oldstate = FL_ERASING; + chip->state = FL_ERASE_SUSPENDING; + chip->erase_suspended = 1; + for (;;) { + if (chip_ready(map, adr)) + break; + + if (time_after(jiffies, timeo)) { + /* Should have suspended the erase by now. + * Send an Erase-Resume command as either + * there was an error (so leave the erase + * routine to recover from it) or we trying to + * use the erase-in-progress sector. */ + map_write(map, CMD(0x30), chip->in_progress_block_addr); + chip->state = FL_ERASING; + chip->oldstate = FL_READY; + printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__); + return -EIO; + } + + cfi_spin_unlock(chip->mutex); + cfi_udelay(1); + cfi_spin_lock(chip->mutex); + /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING. + So we can just loop here. */ + } + chip->state = FL_READY; + return 0; + + case FL_POINT: + /* Only if there's no operation suspended... */ + if (mode == FL_READY && chip->oldstate == FL_READY) + return 0; + + default: + sleep: set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&chip->wq, &wait); - cfi_spin_unlock(chip->mutex); - schedule(); remove_wait_queue(&chip->wq, &wait); -#if 0 - if(signal_pending(current)) - return -EINTR; -#endif - timeo = jiffies + HZ; + cfi_spin_lock(chip->mutex); + goto resettime; + } +} - goto retry; - } + +static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr) +{ + struct cfi_private *cfi = map->fldrv_priv; + + switch(chip->oldstate) { + case FL_ERASING: + chip->state = chip->oldstate; + map_write(map, CMD(0x30), chip->in_progress_block_addr); + chip->oldstate = FL_READY; + chip->state = FL_ERASING; + break; + + case FL_READY: + case FL_STATUS: + /* We should really make set_vpp() count, rather than doing this */ + DISABLE_VPP(map); + break; + default: + printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate); + } + wake_up(&chip->wq); +} + + +static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf) +{ + unsigned long cmd_addr; + struct cfi_private *cfi = map->fldrv_priv; + int ret; adr += chip->start; - chip->state = FL_READY; + /* Ensure cmd read/writes are aligned. */ + cmd_addr = adr & ~(map_bankwidth(map)-1); + + cfi_spin_lock(chip->mutex); + ret = get_chip(map, chip, cmd_addr, FL_READY); + if (ret) { + cfi_spin_unlock(chip->mutex); + return ret; + } + + if (chip->state != FL_POINT && chip->state != FL_READY) { + map_write(map, CMD(0xf0), cmd_addr); + chip->state = FL_READY; + } map_copy_from(map, buf, adr, len); - wake_up(&chip->wq); - cfi_spin_unlock(chip->mutex); + put_chip(map, chip, cmd_addr); + cfi_spin_unlock(chip->mutex); return 0; } + static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) { struct map_info *map = mtd->priv; @@ -370,6 +590,7 @@ static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_ return ret; } + static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf) { DECLARE_WAITQUEUE(wait, current); @@ -381,11 +602,11 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi if (chip->state != FL_READY){ #if 0 - printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state); + printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state); #endif set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&chip->wq, &wait); - + cfi_spin_unlock(chip->mutex); schedule(); @@ -402,13 +623,15 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi adr += chip->start; chip->state = FL_READY; - + + /* should these be CFI_DEVICETYPE_X8 instead of cfi->device_type? */ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); map_copy_from(map, buf, adr, len); + /* should these be CFI_DEVICETYPE_X8 instead of cfi->device_type? */ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); @@ -463,215 +686,136 @@ static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, return ret; } -static int do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, cfi_word datum, int fast) + +static int do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum) { - unsigned long timeo = jiffies + HZ; - unsigned int oldstatus, status, prev_oldstatus, prev_status; - unsigned int dq6; struct cfi_private *cfi = map->fldrv_priv; - /* We use a 1ms + 1 jiffies generic timeout for writes (most devices have - a max write time of a few hundreds usec). However, we should use the - maximum timeout value given by the chip at probe time instead. - Unfortunately, struct flchip does have a field for maximum timeout, - only for typical which can be far too short depending of the conditions. - The ' + 1' is to avoid having a timeout of 0 jiffies if HZ is smaller - than 1000. Using a static variable allows makes us save the costly - divide operation at each word write.*/ - static unsigned long uWriteTimeout = ( HZ / 1000 ) + 1; - DECLARE_WAITQUEUE(wait, current); + unsigned long timeo = jiffies + HZ; + /* + * We use a 1ms + 1 jiffies generic timeout for writes (most devices + * have a max write time of a few hundreds usec). However, we should + * use the maximum timeout value given by the chip at probe time + * instead. Unfortunately, struct flchip does have a field for + * maximum timeout, only for typical which can be far too short + * depending of the conditions. The ' + 1' is to avoid having a + * timeout of 0 jiffies if HZ is smaller than 1000. + */ + unsigned long uWriteTimeout = ( HZ / 1000 ) + 1; int ret = 0; - int ta = 0; + map_word oldd, curd; + int retry_cnt = 0; - retry: - cfi_spin_lock(chip->mutex); + adr += chip->start; - if (chip->state != FL_READY) { -#if 0 - printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", chip->state); -#endif - set_current_state(TASK_UNINTERRUPTIBLE); - add_wait_queue(&chip->wq, &wait); - + cfi_spin_lock(chip->mutex); + ret = get_chip(map, chip, adr, FL_WRITING); + if (ret) { cfi_spin_unlock(chip->mutex); + return ret; + } - schedule(); - remove_wait_queue(&chip->wq, &wait); -#if 0 - printk(KERN_DEBUG "Wake up to write:\n"); - if(signal_pending(current)) - return -EINTR; -#endif - timeo = jiffies + HZ; - - goto retry; - } - - chip->state = FL_WRITING; + DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", + __func__, adr, datum.x[0] ); - adr += chip->start; - DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8x)\n", - __func__, adr, datum ); + /* + * Check for a NOP for the case when the datum to write is already + * present - it saves time and works around buggy chips that corrupt + * data at other locations when 0xff is written to a location that + * already contains 0xff. + */ + oldd = map_read(map, adr); + if (map_word_equal(map, oldd, datum)) { + DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n", + __func__); + goto op_done; + } ENABLE_VPP(map); - if (fast) { /* Unlock bypass */ - cfi_send_gen_cmd(0xA0, 0, chip->start, map, cfi, cfi->device_type, NULL); - } - else { - cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL); - cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL); - cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL); - } - cfi_write(map, datum, adr); + retry: + /* + * The CFI_DEVICETYPE_X8 argument is needed even when + * cfi->device_type != CFI_DEVICETYPE_X8. The addresses for + * command sequences don't scale even when the device is + * wider. This is the case for many of the cfi_send_gen_cmd() + * below. I'm not sure, however, why some use + * cfi->device_type. + */ + cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL); + cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL); + cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL); + map_write(map, datum, adr); + chip->state = FL_WRITING; cfi_spin_unlock(chip->mutex); cfi_udelay(chip->word_write_time); cfi_spin_lock(chip->mutex); - /* - * Polling toggle bits instead of reading back many times - * This ensures that write operation is really completed, - * or tells us why it failed. - * - * It appears tha the polling and decoding of error state might - * be simplified. Don't do it unless you really know what you - * are doing. You must remember that JESD21-C 3.5.3 states that - * the status must be read back an _additional_ two times before - * a failure is determined. This is because these devices have - * internal state machines that are asynchronous to the external - * data bus. During an erase or write the read-back status of the - * polling bits might be transitioning internaly when the external - * read-back occurs. This means that the bits aren't in the final - * state and they might appear to report an error as they transition - * and are in a weird state. This will produce infrequent errors - * that will usually disappear the next time an erase or write - * happens (Try tracking those errors down!). To ensure that - * the bits are not in transition the location must be read-back - * two more times and compared against what was written - BOTH reads - * MUST match what was written - don't think this can be simplified - * to only the last read matching. If the comparison fails, error - * state can then be decoded. - * - * - Thayne Harbaugh - */ - dq6 = CMD(1<<6); /* See comment above for timeout value. */ timeo = jiffies + uWriteTimeout; - - oldstatus = cfi_read(map, adr); - status = cfi_read(map, adr); - DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): Check 0x%.8x 0x%.8x\n", - __func__, oldstatus, status ); - - /* - * This only checks if dq6 is still toggling and that our - * timer hasn't expired. We purposefully ignore the chips - * internal timer that will assert dq5 and leave dq6 toggling. - * This is done for a variety of reasons: - * 1) Not all chips support dq5. - * 2) Dealing with asynchronous status bit and data updates - * and reading a device two more times creates _messy_ - * logic when trying to deal with interleaved devices - - * some may be changing while others are still busy. - * 3) Checking dq5 only helps to optimize an error case that - * should at worst be infrequent and at best non-existent. - * - * If our timeout occurs _then_ we will check dq5 to see - * if the device also had an internal timeout. - */ - while( ( ( status ^ oldstatus ) & dq6 ) - && ! ( ta = time_after(jiffies, timeo) ) ) { + for (;;) { + if (chip->state != FL_WRITING) { + /* Someone's suspended the write. Sleep */ + DECLARE_WAITQUEUE(wait, current); - if (need_resched()) { + set_current_state(TASK_UNINTERRUPTIBLE); + add_wait_queue(&chip->wq, &wait); cfi_spin_unlock(chip->mutex); - yield(); + schedule(); + remove_wait_queue(&chip->wq, &wait); + timeo = jiffies + (HZ / 2); /* FIXME */ cfi_spin_lock(chip->mutex); - } else - udelay(1); - - oldstatus = cfi_read( map, adr ); - status = cfi_read( map, adr ); - DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): Check 0x%.8x 0x%.8x\n", - __func__, oldstatus, status ); - } + continue; + } - /* - * Something kicked us out of the read-back loop. We'll - * check success befor checking failure. - * Even though dq6 might be true data, it is unkown if - * all of the other bits have changed to true data due to - * the asynchronous nature of the internal state machine. - * We will read two more times and use this to either - * verify that the write completed successfully or - * that something really went wrong. BOTH reads - * must match what was written - this certifies that - * bits aren't still changing and that the status - * bits erroneously match the datum that was written. - */ - prev_oldstatus = oldstatus; - prev_status = status; - oldstatus = cfi_read(map, adr); - status = cfi_read(map, adr); - DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): Check 0x%.8x 0x%.8x\n", - __func__, oldstatus, status ); - - if ( oldstatus == datum && status == datum ) { - /* success - do nothing */ - goto write_done; - } + /* Test to see if toggling has stopped. */ + oldd = map_read(map, adr); + curd = map_read(map, adr); + if (map_word_equal(map, curd, oldd)) { + /* Do we have the correct value? */ + if (map_word_equal(map, curd, datum)) { + goto op_done; + } + /* Nope something has gone wrong. */ + break; + } - if ( ta ) { - int dq5mask = ( ( status ^ oldstatus ) & dq6 ) >> 1; - if ( status & dq5mask ) { - /* dq5 asserted - decode interleave chips */ - printk( KERN_WARNING - "MTD %s(): FLASH internal timeout: 0x%.8x\n", - __func__, - status & dq5mask ); - } else { - printk( KERN_WARNING - "MTD %s(): Software timed out during write.\n", + if (time_after(jiffies, timeo)) { + printk(KERN_WARNING "MTD %s(): software timeout\n", __func__ ); + break; } - goto write_failed; - } - - /* - * If we get to here then it means that something - * is wrong and it's not a timeout. Something - * is seriously wacky! Dump some debug info. - */ - printk(KERN_WARNING - "MTD %s(): Wacky! Unable to decode failure status\n", - __func__ ); - printk(KERN_WARNING - "MTD %s(): 0x%.8lx(0x%.8x): 0x%.8x 0x%.8x 0x%.8x 0x%.8x\n", - __func__, adr, datum, - prev_oldstatus, prev_status, - oldstatus, status); + /* Latency issues. Drop the lock, wait a while and retry */ + cfi_spin_unlock(chip->mutex); + cfi_udelay(1); + cfi_spin_lock(chip->mutex); + } - write_failed: - ret = -EIO; /* reset on all failures. */ - cfi_write( map, CMD(0xF0), chip->start ); + map_write( map, CMD(0xF0), chip->start ); /* FIXME - should have reset delay before continuing */ + if (++retry_cnt <= MAX_WORD_RETRIES) + goto retry; - write_done: - DISABLE_VPP(map); + ret = -EIO; + op_done: chip->state = FL_READY; - wake_up(&chip->wq); + put_chip(map, chip, adr); cfi_spin_unlock(chip->mutex); return ret; } -static int cfi_amdstd_write (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf) + +static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, + size_t *retlen, const u_char *buf) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; int ret = 0; int chipnum; unsigned long ofs, chipstart; + DECLARE_WAITQUEUE(wait, current); *retlen = 0; if (!len) @@ -682,33 +826,52 @@ static int cfi_amdstd_write (struct mtd_info *mtd, loff_t to , size_t len, size_ chipstart = cfi->chips[chipnum].start; /* If it's not bus-aligned, do the first byte write */ - if (ofs & (CFIDEV_BUSWIDTH-1)) { - unsigned long bus_ofs = ofs & ~(CFIDEV_BUSWIDTH-1); + if (ofs & (map_bankwidth(map)-1)) { + unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1); int i = ofs - bus_ofs; int n = 0; - u_char tmp_buf[8]; - cfi_word datum; + map_word tmp_buf; - map_copy_from(map, tmp_buf, bus_ofs + cfi->chips[chipnum].start, CFIDEV_BUSWIDTH); - while (len && i < CFIDEV_BUSWIDTH) - tmp_buf[i++] = buf[n++], len--; + retry: + cfi_spin_lock(cfi->chips[chipnum].mutex); - if (cfi_buswidth_is_2()) { - datum = *(__u16*)tmp_buf; - } else if (cfi_buswidth_is_4()) { - datum = *(__u32*)tmp_buf; - } else { - return -EINVAL; /* should never happen, but be safe */ + if (cfi->chips[chipnum].state != FL_READY) { +#if 0 + printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state); +#endif + set_current_state(TASK_UNINTERRUPTIBLE); + add_wait_queue(&cfi->chips[chipnum].wq, &wait); + + cfi_spin_unlock(cfi->chips[chipnum].mutex); + + schedule(); + remove_wait_queue(&cfi->chips[chipnum].wq, &wait); +#if 0 + if(signal_pending(current)) + return -EINTR; +#endif + goto retry; } + /* Load 'tmp_buf' with old contents of flash */ + tmp_buf = map_read(map, bus_ofs+chipstart); + + cfi_spin_unlock(cfi->chips[chipnum].mutex); + + /* Number of bytes to copy from buffer */ + n = min_t(int, len, map_bankwidth(map)-i); + + tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n); + ret = do_write_oneword(map, &cfi->chips[chipnum], - bus_ofs, datum, 0); + bus_ofs, tmp_buf); if (ret) return ret; ofs += n; buf += n; (*retlen) += n; + len -= n; if (ofs >> cfi->chipshift) { chipnum ++; @@ -718,315 +881,286 @@ static int cfi_amdstd_write (struct mtd_info *mtd, loff_t to , size_t len, size_ } } - if (cfi->fast_prog) { - /* Go into unlock bypass mode */ - cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chipstart, map, cfi, CFI_DEVICETYPE_X8, NULL); - cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chipstart, map, cfi, CFI_DEVICETYPE_X8, NULL); - cfi_send_gen_cmd(0x20, cfi->addr_unlock1, chipstart, map, cfi, CFI_DEVICETYPE_X8, NULL); - } - /* We are now aligned, write as much as possible */ - while(len >= CFIDEV_BUSWIDTH) { - cfi_word datum; - - if (cfi_buswidth_is_1()) { - datum = *(__u8*)buf; - } else if (cfi_buswidth_is_2()) { - datum = *(__u16*)buf; - } else if (cfi_buswidth_is_4()) { - datum = *(__u32*)buf; - } else { - return -EINVAL; - } - ret = do_write_oneword(map, &cfi->chips[chipnum], - ofs, datum, cfi->fast_prog); - if (ret) { - if (cfi->fast_prog){ - /* Get out of unlock bypass mode */ - cfi_send_gen_cmd(0x90, 0, chipstart, map, cfi, cfi->device_type, NULL); - cfi_send_gen_cmd(0x00, 0, chipstart, map, cfi, cfi->device_type, NULL); - } - return ret; - } + while(len >= map_bankwidth(map)) { + map_word datum; - ofs += CFIDEV_BUSWIDTH; - buf += CFIDEV_BUSWIDTH; - (*retlen) += CFIDEV_BUSWIDTH; - len -= CFIDEV_BUSWIDTH; + datum = map_word_load(map, buf); - if (ofs >> cfi->chipshift) { - if (cfi->fast_prog){ - /* Get out of unlock bypass mode */ - cfi_send_gen_cmd(0x90, 0, chipstart, map, cfi, cfi->device_type, NULL); - cfi_send_gen_cmd(0x00, 0, chipstart, map, cfi, cfi->device_type, NULL); - } + ret = do_write_oneword(map, &cfi->chips[chipnum], + ofs, datum); + if (ret) + return ret; + + ofs += map_bankwidth(map); + buf += map_bankwidth(map); + (*retlen) += map_bankwidth(map); + len -= map_bankwidth(map); + if (ofs >> cfi->chipshift) { chipnum ++; ofs = 0; if (chipnum == cfi->numchips) return 0; chipstart = cfi->chips[chipnum].start; - if (cfi->fast_prog){ - /* Go into unlock bypass mode for next set of chips */ - cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chipstart, map, cfi, CFI_DEVICETYPE_X8, NULL); - cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chipstart, map, cfi, CFI_DEVICETYPE_X8, NULL); - cfi_send_gen_cmd(0x20, cfi->addr_unlock1, chipstart, map, cfi, CFI_DEVICETYPE_X8, NULL); - } } } - if (cfi->fast_prog){ - /* Get out of unlock bypass mode */ - cfi_send_gen_cmd(0x90, 0, chipstart, map, cfi, cfi->device_type, NULL); - cfi_send_gen_cmd(0x00, 0, chipstart, map, cfi, cfi->device_type, NULL); - } - /* Write the trailing bytes if any */ - if (len & (CFIDEV_BUSWIDTH-1)) { - int i = 0, n = 0; - u_char tmp_buf[8]; - cfi_word datum; - - map_copy_from(map, tmp_buf, ofs + cfi->chips[chipnum].start, CFIDEV_BUSWIDTH); - while (len--) - tmp_buf[i++] = buf[n++]; - - if (cfi_buswidth_is_2()) { - datum = *(__u16*)tmp_buf; - } else if (cfi_buswidth_is_4()) { - datum = *(__u32*)tmp_buf; - } else { - return -EINVAL; /* should never happen, but be safe */ + if (len & (map_bankwidth(map)-1)) { + map_word tmp_buf; + + retry1: + cfi_spin_lock(cfi->chips[chipnum].mutex); + + if (cfi->chips[chipnum].state != FL_READY) { +#if 0 + printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state); +#endif + set_current_state(TASK_UNINTERRUPTIBLE); + add_wait_queue(&cfi->chips[chipnum].wq, &wait); + + cfi_spin_unlock(cfi->chips[chipnum].mutex); + + schedule(); + remove_wait_queue(&cfi->chips[chipnum].wq, &wait); +#if 0 + if(signal_pending(current)) + return -EINTR; +#endif + goto retry1; } + tmp_buf = map_read(map, ofs + chipstart); + + cfi_spin_unlock(cfi->chips[chipnum].mutex); + + tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); + ret = do_write_oneword(map, &cfi->chips[chipnum], - ofs, datum, 0); + ofs, tmp_buf); if (ret) return ret; - (*retlen) += n; + (*retlen) += len; } return 0; } -static inline int do_erase_chip(struct map_info *map, struct flchip *chip) + +/* + * FIXME: interleaved mode not tested, and probably not supported! + */ +static inline int do_write_buffer(struct map_info *map, struct flchip *chip, + unsigned long adr, const u_char *buf, int len) { - unsigned int oldstatus, status, prev_oldstatus, prev_status; - unsigned int dq6; - unsigned long timeo = jiffies + HZ; - unsigned long int adr; struct cfi_private *cfi = map->fldrv_priv; - DECLARE_WAITQUEUE(wait, current); - int ret = 0; - int ta = 0; - cfi_word ones = 0; + unsigned long timeo = jiffies + HZ; + /* see comments in do_write_oneword() regarding uWriteTimeo. */ + static unsigned long uWriteTimeout = ( HZ / 1000 ) + 1; + int ret = -EIO; + unsigned long cmd_adr; + int z, words; + map_word datum; - retry: - cfi_spin_lock(chip->mutex); + adr += chip->start; + cmd_adr = adr; - if (chip->state != FL_READY){ - set_current_state(TASK_UNINTERRUPTIBLE); - add_wait_queue(&chip->wq, &wait); - + cfi_spin_lock(chip->mutex); + ret = get_chip(map, chip, adr, FL_WRITING); + if (ret) { cfi_spin_unlock(chip->mutex); + return ret; + } - schedule(); - remove_wait_queue(&chip->wq, &wait); -#if 0 - if(signal_pending(current)) - return -EINTR; -#endif - timeo = jiffies + HZ; + datum = map_word_load(map, buf); - goto retry; - } + DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", + __func__, adr, datum.x[0] ); - chip->state = FL_ERASING; - DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n", - __func__, chip->start ); - - /* Handle devices with one erase region, that only implement - * the chip erase command. - */ ENABLE_VPP(map); cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL); cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL); - cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL); - cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL); - cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL); - cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL); - timeo = jiffies + (HZ*20); - adr = cfi->addr_unlock1; + //cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL); - /* Wait for the end of programing/erasure by using the toggle method. - * As long as there is a programming procedure going on, bit 6 - * is toggling it's state with each consecutive read. - * The toggling stops as soon as the procedure is completed. - * - * If the process has gone on for too long on the chip bit 5 gets. - * After bit5 is set you can kill the operation by sending a reset - * command to the chip. - */ - /* see comments in do_write_oneword */ - dq6 = CMD(1<<6); + /* Write Buffer Load */ + map_write(map, CMD(0x25), cmd_adr); - oldstatus = cfi_read(map, adr); - status = cfi_read(map, adr); - DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): Check 0x%.8x 0x%.8x\n", - __func__, oldstatus, status ); + chip->state = FL_WRITING_TO_BUFFER; - while( ( ( status ^ oldstatus ) & dq6 ) - && ! ( ta = time_after(jiffies, timeo) ) ) { - int wait_reps; + /* Write length of data to come */ + words = len / map_bankwidth(map); + map_write(map, CMD(words - 1), cmd_adr); + /* Write data */ + z = 0; + while(z < words * map_bankwidth(map)) { + datum = map_word_load(map, buf); + map_write(map, datum, adr + z); - /* an initial short sleep */ - cfi_spin_unlock(chip->mutex); - schedule_timeout(HZ/100); - cfi_spin_lock(chip->mutex); + z += map_bankwidth(map); + buf += map_bankwidth(map); + } + z -= map_bankwidth(map); + + adr += z; + + /* Write Buffer Program Confirm: GO GO GO */ + map_write(map, CMD(0x29), cmd_adr); + chip->state = FL_WRITING; + + cfi_spin_unlock(chip->mutex); + cfi_udelay(chip->buffer_write_time); + cfi_spin_lock(chip->mutex); + + timeo = jiffies + uWriteTimeout; - if (chip->state != FL_ERASING) { - /* Someone's suspended the erase. Sleep */ + for (;;) { + if (chip->state != FL_WRITING) { + /* Someone's suspended the write. Sleep */ + DECLARE_WAITQUEUE(wait, current); + set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&chip->wq, &wait); - cfi_spin_unlock(chip->mutex); - printk("erase suspended. Sleeping\n"); - schedule(); remove_wait_queue(&chip->wq, &wait); -#if 0 - if (signal_pending(current)) - return -EINTR; -#endif - timeo = jiffies + (HZ*2); /* FIXME */ + timeo = jiffies + (HZ / 2); /* FIXME */ cfi_spin_lock(chip->mutex); continue; } - /* Busy wait for 1/10 of a milisecond */ - for(wait_reps = 0; - (wait_reps < 100) - && ( ( status ^ oldstatus ) & dq6 ); - wait_reps++) { - - /* Latency issues. Drop the lock, wait a while and retry */ - cfi_spin_unlock(chip->mutex); - - cfi_udelay(1); - - cfi_spin_lock(chip->mutex); - oldstatus = cfi_read(map, adr); - status = cfi_read(map, adr); - DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): Check 0x%.8x 0x%.8x\n", - __func__, oldstatus, status ); - } - oldstatus = cfi_read(map, adr); - status = cfi_read(map, adr); - DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): Check 0x%.8x 0x%.8x\n", - __func__, oldstatus, status ); - } - - prev_oldstatus = oldstatus; - prev_status = status; - oldstatus = cfi_read(map, adr); - status = cfi_read(map, adr); - DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): Check 0x%.8x 0x%.8x\n", - __func__, oldstatus, status ); - - if ( cfi_buswidth_is_1() ) { - ones = (__u8)~0; - } else if ( cfi_buswidth_is_2() ) { - ones = (__u16)~0; - } else if ( cfi_buswidth_is_4() ) { - ones = (__u32)~0; - } else { - printk(KERN_WARNING "Unsupported buswidth\n"); - goto erase_failed; - } - - if ( oldstatus == ones && status == ones ) { - /* success - do nothing */ - goto erase_done; - } + if (chip_ready(map, adr)) + goto op_done; + + if( time_after(jiffies, timeo)) + break; - if ( ta ) { - int dq5mask = ( ( status ^ oldstatus ) & dq6 ) >> 1; - if ( status & dq5mask ) { - /* dq5 asserted - decode interleave chips */ - printk( KERN_WARNING - "MTD %s(): FLASH internal timeout: 0x%.8x\n", - __func__, - status & dq5mask ); - } else { - printk( KERN_WARNING - "MTD %s(): Software timed out during write.\n", - __func__ ); - } - goto erase_failed; + /* Latency issues. Drop the lock, wait a while and retry */ + cfi_spin_unlock(chip->mutex); + cfi_udelay(1); + cfi_spin_lock(chip->mutex); } - printk(KERN_WARNING - "MTD %s(): Wacky! Unable to decode failure status\n", + printk(KERN_WARNING "MTD %s(): software timeout\n", __func__ ); - printk(KERN_WARNING - "MTD %s(): 0x%.8lx(0x%.8x): 0x%.8x 0x%.8x 0x%.8x 0x%.8x\n", - __func__, adr, ones, - prev_oldstatus, prev_status, - oldstatus, status); - - erase_failed: - ret = -EIO; /* reset on all failures. */ - cfi_write( map, CMD(0xF0), chip->start ); + map_write( map, CMD(0xF0), chip->start ); /* FIXME - should have reset delay before continuing */ - erase_done: - DISABLE_VPP(map); + ret = -EIO; + op_done: chip->state = FL_READY; - wake_up(&chip->wq); + put_chip(map, chip, adr); cfi_spin_unlock(chip->mutex); + return ret; } -static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr) +static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len, + size_t *retlen, const u_char *buf) { - unsigned int oldstatus, status, prev_oldstatus, prev_status; - unsigned int dq6; - unsigned long timeo = jiffies + HZ; + struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; - DECLARE_WAITQUEUE(wait, current); + int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; int ret = 0; - int ta = 0; - cfi_word ones = 0; + int chipnum; + unsigned long ofs; - retry: - cfi_spin_lock(chip->mutex); + *retlen = 0; + if (!len) + return 0; - if (chip->state != FL_READY){ - set_current_state(TASK_UNINTERRUPTIBLE); - add_wait_queue(&chip->wq, &wait); - - cfi_spin_unlock(chip->mutex); + chipnum = to >> cfi->chipshift; + ofs = to - (chipnum << cfi->chipshift); - schedule(); - remove_wait_queue(&chip->wq, &wait); -#if 0 - if(signal_pending(current)) - return -EINTR; -#endif - timeo = jiffies + HZ; + /* If it's not bus-aligned, do the first word write */ + if (ofs & (map_bankwidth(map)-1)) { + size_t local_len = (-ofs)&(map_bankwidth(map)-1); + if (local_len > len) + local_len = len; + ret = cfi_amdstd_write_words(mtd, to, local_len, + retlen, buf); + if (ret) + return ret; + ofs += local_len; + buf += local_len; + len -= local_len; - goto retry; - } + if (ofs >> cfi->chipshift) { + chipnum ++; + ofs = 0; + if (chipnum == cfi->numchips) + return 0; + } + } - chip->state = FL_ERASING; + /* Write buffer is worth it only if more than one word to write... */ + while (len >= map_bankwidth(map) * 2) { + /* We must not cross write block boundaries */ + int size = wbufsize - (ofs & (wbufsize-1)); + + if (size > len) + size = len; + if (size % map_bankwidth(map)) + size -= size % map_bankwidth(map); + + ret = do_write_buffer(map, &cfi->chips[chipnum], + ofs, buf, size); + if (ret) + return ret; + + ofs += size; + buf += size; + (*retlen) += size; + len -= size; + + if (ofs >> cfi->chipshift) { + chipnum ++; + ofs = 0; + if (chipnum == cfi->numchips) + return 0; + } + } + + if (len) { + size_t retlen_dregs = 0; + + ret = cfi_amdstd_write_words(mtd, to, len, &retlen_dregs, buf); + + *retlen += retlen_dregs; + return ret; + } + + return 0; +} + + +/* + * Handle devices with one erase region, that only implement + * the chip erase command. + */ +static inline int do_erase_chip(struct map_info *map, struct flchip *chip) +{ + struct cfi_private *cfi = map->fldrv_priv; + unsigned long timeo = jiffies + HZ; + unsigned long int adr; + DECLARE_WAITQUEUE(wait, current); + int ret = 0; + + adr = cfi->addr_unlock1; + + cfi_spin_lock(chip->mutex); + ret = get_chip(map, chip, adr, FL_WRITING); + if (ret) { + cfi_spin_unlock(chip->mutex); + return ret; + } - adr += chip->start; DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n", - __func__, adr ); + __func__, chip->start ); ENABLE_VPP(map); cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL); @@ -1034,155 +1168,85 @@ static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, u cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL); cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL); cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL); + cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL); - cfi_write(map, CMD(0x30), adr); - - timeo = jiffies + (HZ*20); - - /* Wait for the end of programing/erasure by using the toggle method. - * As long as there is a programming procedure going on, bit 6 - * is toggling it's state with each consecutive read. - * The toggling stops as soon as the procedure is completed. - * - * If the process has gone on for too long on the chip bit 5 gets. - * After bit5 is set you can kill the operation by sending a reset - * command to the chip. - */ - /* see comments in do_write_oneword */ - dq6 = CMD(1<<6); + chip->state = FL_ERASING; + chip->erase_suspended = 0; + chip->in_progress_block_addr = adr; - oldstatus = cfi_read(map, adr); - status = cfi_read(map, adr); - DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): Check 0x%.8x 0x%.8x\n", - __func__, oldstatus, status ); + cfi_spin_unlock(chip->mutex); + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout((chip->erase_time*HZ)/(2*1000)); + cfi_spin_lock(chip->mutex); - while( ( ( status ^ oldstatus ) & dq6 ) - && ! ( ta = time_after(jiffies, timeo) ) ) { - int wait_reps; + timeo = jiffies + (HZ*20); - /* an initial short sleep */ - cfi_spin_unlock(chip->mutex); - schedule_timeout(HZ/100); - cfi_spin_lock(chip->mutex); - + for (;;) { if (chip->state != FL_ERASING) { /* Someone's suspended the erase. Sleep */ set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&chip->wq, &wait); - cfi_spin_unlock(chip->mutex); - printk(KERN_DEBUG "erase suspended. Sleeping\n"); - schedule(); remove_wait_queue(&chip->wq, &wait); -#if 0 - if (signal_pending(current)) - return -EINTR; -#endif - timeo = jiffies + (HZ*2); /* FIXME */ cfi_spin_lock(chip->mutex); continue; } - - /* Busy wait for 1/10 of a milisecond */ - for(wait_reps = 0; - (wait_reps < 100) - && ( ( status ^ oldstatus ) & dq6 ); - wait_reps++) { - - /* Latency issues. Drop the lock, wait a while and retry */ - cfi_spin_unlock(chip->mutex); - - cfi_udelay(1); - - cfi_spin_lock(chip->mutex); - oldstatus = cfi_read(map, adr); - status = cfi_read(map, adr); - DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): Check 0x%.8x 0x%.8x\n", - __func__, oldstatus, status ); + if (chip->erase_suspended) { + /* This erase was suspended and resumed. + Adjust the timeout */ + timeo = jiffies + (HZ*20); /* FIXME */ + chip->erase_suspended = 0; } - oldstatus = cfi_read(map, adr); - status = cfi_read(map, adr); - DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): Check 0x%.8x 0x%.8x\n", - __func__, oldstatus, status ); - } - prev_oldstatus = oldstatus; - prev_status = status; - oldstatus = cfi_read(map, adr); - status = cfi_read(map, adr); - DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): Check 0x%.8x 0x%.8x\n", - __func__, oldstatus, status ); - - if ( cfi_buswidth_is_1() ) { - ones = (__u8)~0; - } else if ( cfi_buswidth_is_2() ) { - ones = (__u16)~0; - } else if ( cfi_buswidth_is_4() ) { - ones = (__u32)~0; - } else { - printk(KERN_WARNING "Unsupported buswidth\n"); - goto erase_failed; - } + if (chip_ready(map, adr)) + goto op_done; - if ( oldstatus == ones && status == ones ) { - /* success - do nothing */ - goto erase_done; - } + if (time_after(jiffies, timeo)) + break; - if ( ta ) { - int dq5mask = ( ( status ^ oldstatus ) & dq6 ) >> 1; - if ( status & dq5mask ) { - /* dq5 asserted - decode interleave chips */ - printk( KERN_WARNING - "MTD %s(): FLASH internal timeout: 0x%.8x\n", - __func__, - status & dq5mask ); - } else { - printk( KERN_WARNING - "MTD %s(): Software timed out during write.\n", - __func__ ); - } - goto erase_failed; + /* Latency issues. Drop the lock, wait a while and retry */ + cfi_spin_unlock(chip->mutex); + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(1); + cfi_spin_lock(chip->mutex); } - printk(KERN_WARNING - "MTD %s(): Wacky! Unable to decode failure status\n", + printk(KERN_WARNING "MTD %s(): software timeout\n", __func__ ); - printk(KERN_WARNING - "MTD %s(): 0x%.8lx(0x%.8x): 0x%.8x 0x%.8x 0x%.8x 0x%.8x\n", - __func__, adr, ones, - prev_oldstatus, prev_status, - oldstatus, status); - - erase_failed: - ret = -EIO; /* reset on all failures. */ - cfi_write( map, CMD(0xF0), chip->start ); + map_write( map, CMD(0xF0), chip->start ); /* FIXME - should have reset delay before continuing */ - erase_done: - DISABLE_VPP(map); + ret = -EIO; + op_done: chip->state = FL_READY; - wake_up(&chip->wq); + put_chip(map, chip, adr); cfi_spin_unlock(chip->mutex); + return ret; } -static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr) + +typedef int (*frob_t)(struct map_info *map, struct flchip *chip, + unsigned long adr, void *thunk); + + +static int cfi_amdstd_varsize_frob(struct mtd_info *mtd, frob_t frob, + loff_t ofs, size_t len, void *thunk) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; - unsigned long adr, len; + unsigned long adr; int chipnum, ret = 0; int i, first; struct mtd_erase_region_info *regions = mtd->eraseregions; - if (instr->addr > mtd->size) + if (ofs > mtd->size) return -EINVAL; - if ((instr->len + instr->addr) > mtd->size) + if ((len + ofs) > mtd->size) return -EINVAL; /* Check that both start and end of the requested erase are @@ -1197,7 +1261,7 @@ static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *ins start of the requested erase, and then go back one. */ - while (i < mtd->numeraseregions && instr->addr >= regions[i].offset) + while (i < mtd->numeraseregions && ofs >= regions[i].offset) i++; i--; @@ -1207,7 +1271,7 @@ static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *ins effect here. */ - if (instr->addr & (regions[i].erasesize-1)) + if (ofs & (regions[i].erasesize-1)) return -EINVAL; /* Remember the erase region we start on */ @@ -1217,7 +1281,7 @@ static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *ins * with the erase region at that address. */ - while (inumeraseregions && (instr->addr + instr->len) >= regions[i].offset) + while (inumeraseregions && (ofs + len) >= regions[i].offset) i++; /* As before, drop back one to point at the region in which @@ -1225,18 +1289,17 @@ static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *ins */ i--; - if ((instr->addr + instr->len) & (regions[i].erasesize-1)) + if ((ofs + len) & (regions[i].erasesize-1)) return -EINVAL; - - chipnum = instr->addr >> cfi->chipshift; - adr = instr->addr - (chipnum << cfi->chipshift); - len = instr->len; - i=first; + chipnum = ofs >> cfi->chipshift; + adr = ofs - (chipnum << cfi->chipshift); - while(len) { - ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr); + i=first; + while (len) { + ret = (*frob)(map, &cfi->chips[chipnum], adr, thunk); + if (ret) return ret; @@ -1255,58 +1318,114 @@ static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *ins } } - instr->state = MTD_ERASE_DONE; - if (instr->callback) - instr->callback(instr); - return 0; } -static int cfi_amdstd_erase_onesize(struct mtd_info *mtd, struct erase_info *instr) + +static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, void *thunk) { - struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; - unsigned long adr, len; - int chipnum, ret = 0; + unsigned long timeo = jiffies + HZ; + DECLARE_WAITQUEUE(wait, current); + int ret = 0; - if (instr->addr & (mtd->erasesize - 1)) - return -EINVAL; + adr += chip->start; - if (instr->len & (mtd->erasesize -1)) - return -EINVAL; + cfi_spin_lock(chip->mutex); + ret = get_chip(map, chip, adr, FL_ERASING); + if (ret) { + cfi_spin_unlock(chip->mutex); + return ret; + } - if ((instr->len + instr->addr) > mtd->size) - return -EINVAL; + DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n", + __func__, adr ); - chipnum = instr->addr >> cfi->chipshift; - adr = instr->addr - (chipnum << cfi->chipshift); - len = instr->len; + ENABLE_VPP(map); + cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL); + cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL); + cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL); + cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL); + cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL); + map_write(map, CMD(0x30), adr); - while(len) { - ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr); + chip->state = FL_ERASING; + chip->erase_suspended = 0; + chip->in_progress_block_addr = adr; + + cfi_spin_unlock(chip->mutex); + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout((chip->erase_time*HZ)/(2*1000)); + cfi_spin_lock(chip->mutex); - if (ret) - return ret; + timeo = jiffies + (HZ*20); - adr += mtd->erasesize; - len -= mtd->erasesize; + for (;;) { + if (chip->state != FL_ERASING) { + /* Someone's suspended the erase. Sleep */ + set_current_state(TASK_UNINTERRUPTIBLE); + add_wait_queue(&chip->wq, &wait); + cfi_spin_unlock(chip->mutex); + schedule(); + remove_wait_queue(&chip->wq, &wait); + cfi_spin_lock(chip->mutex); + continue; + } + if (chip->erase_suspended) { + /* This erase was suspended and resumed. + Adjust the timeout */ + timeo = jiffies + (HZ*20); /* FIXME */ + chip->erase_suspended = 0; + } - if (adr >> cfi->chipshift) { - adr = 0; - chipnum++; - - if (chipnum >= cfi->numchips) + if (chip_ready(map, adr)) + goto op_done; + + if (time_after(jiffies, timeo)) break; - } + + /* Latency issues. Drop the lock, wait a while and retry */ + cfi_spin_unlock(chip->mutex); + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(1); + cfi_spin_lock(chip->mutex); } - + + printk(KERN_WARNING "MTD %s(): software timeout\n", + __func__ ); + + /* reset on all failures. */ + map_write( map, CMD(0xF0), chip->start ); + /* FIXME - should have reset delay before continuing */ + + ret = -EIO; + op_done: + chip->state = FL_READY; + put_chip(map, chip, adr); + cfi_spin_unlock(chip->mutex); + return ret; +} + + +int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr) +{ + unsigned long ofs, len; + int ret; + + ofs = instr->addr; + len = instr->len; + + ret = cfi_amdstd_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL); + if (ret) + return ret; + instr->state = MTD_ERASE_DONE; - if (instr->callback) - instr->callback(instr); + mtd_erase_callback(instr); return 0; } + static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr) { struct map_info *map = mtd->priv; @@ -1324,12 +1443,12 @@ static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr) return ret; instr->state = MTD_ERASE_DONE; - if (instr->callback) - instr->callback(instr); + mtd_erase_callback(instr); return 0; } + static void cfi_amdstd_sync (struct mtd_info *mtd) { struct map_info *map = mtd->priv; @@ -1368,7 +1487,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd) schedule(); - remove_wait_queue(&chip->wq, &wait); + remove_wait_queue(&chip->wq, &wait); goto retry; } @@ -1427,7 +1546,7 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd) /* Unlock the chips again */ if (ret) { - for (i--; i >=0; i--) { + for (i--; i >=0; i--) { chip = &cfi->chips[i]; cfi_spin_lock(chip->mutex); @@ -1443,6 +1562,7 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd) return ret; } + static void cfi_amdstd_resume(struct mtd_info *mtd) { struct map_info *map = mtd->priv; @@ -1458,7 +1578,7 @@ static void cfi_amdstd_resume(struct mtd_info *mtd) if (chip->state == FL_PM_SUSPENDED) { chip->state = FL_READY; - cfi_write(map, CMD(0xF0), chip->start); + map_write(map, CMD(0xF0), chip->start); wake_up(&chip->wq); } else @@ -1468,6 +1588,137 @@ static void cfi_amdstd_resume(struct mtd_info *mtd) } } + +#ifdef DEBUG_LOCK_BITS + +static int do_printlockstatus_oneblock(struct map_info *map, + struct flchip *chip, + unsigned long adr, + void *thunk) +{ + struct cfi_private *cfi = map->fldrv_priv; + int ofs_factor = cfi->interleave * cfi->device_type; + + cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL); + printk(KERN_DEBUG "block status register for 0x%08lx is %x\n", + adr, cfi_read_query(map, adr+(2*ofs_factor))); + cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL); + + return 0; +} + + +#define debug_dump_locks(mtd, frob, ofs, len, thunk) \ + cfi_amdstd_varsize_frob((mtd), (frob), (ofs), (len), (thunk)) + +#else + +#define debug_dump_locks(...) + +#endif /* DEBUG_LOCK_BITS */ + + +struct xxlock_thunk { + uint8_t val; + flstate_t state; +}; + + +#define DO_XXLOCK_ONEBLOCK_LOCK ((struct xxlock_thunk){0x01, FL_LOCKING}) +#define DO_XXLOCK_ONEBLOCK_UNLOCK ((struct xxlock_thunk){0x00, FL_UNLOCKING}) + + +/* + * FIXME - this is *very* specific to a particular chip. It likely won't + * work for all chips that require unlock. It also hasn't been tested + * with interleaved chips. + */ +static int do_xxlock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, void *thunk) +{ + struct cfi_private *cfi = map->fldrv_priv; + struct xxlock_thunk *xxlt = (struct xxlock_thunk *)thunk; + int ret; + + /* + * This is easy because these are writes to registers and not writes + * to flash memory - that means that we don't have to check status + * and timeout. + */ + + adr += chip->start; + /* + * lock block registers: + * - on 64k boundariesand + * - bit 1 set high + * - block lock registers are 4MiB lower - overflow subtract (danger) + */ + adr = ((adr & ~0xffff) | 0x2) + ~0x3fffff; + + cfi_spin_lock(chip->mutex); + ret = get_chip(map, chip, adr, FL_LOCKING); + if (ret) { + cfi_spin_unlock(chip->mutex); + return ret; + } + + chip->state = xxlt->state; + map_write(map, CMD(xxlt->val), adr); + + /* Done and happy. */ + chip->state = FL_READY; + put_chip(map, chip, adr); + cfi_spin_unlock(chip->mutex); + return 0; +} + + +static int cfi_amdstd_lock_varsize(struct mtd_info *mtd, + loff_t ofs, + size_t len) +{ + int ret; + + DEBUG(MTD_DEBUG_LEVEL3, + "%s: lock status before, ofs=0x%08llx, len=0x%08zX\n", + __func__, ofs, len); + debug_dump_locks(mtd, do_printlockstatus_oneblock, ofs, len, 0); + + ret = cfi_amdstd_varsize_frob(mtd, do_xxlock_oneblock, ofs, len, + (void *)&DO_XXLOCK_ONEBLOCK_LOCK); + + DEBUG(MTD_DEBUG_LEVEL3, + "%s: lock status after, ret=%d\n", + __func__, ret); + + debug_dump_locks(mtd, do_printlockstatus_oneblock, ofs, len, 0); + + return ret; +} + + +static int cfi_amdstd_unlock_varsize(struct mtd_info *mtd, + loff_t ofs, + size_t len) +{ + int ret; + + DEBUG(MTD_DEBUG_LEVEL3, + "%s: lock status before, ofs=0x%08llx, len=0x%08zX\n", + __func__, ofs, len); + debug_dump_locks(mtd, do_printlockstatus_oneblock, ofs, len, 0); + + ret = cfi_amdstd_varsize_frob(mtd, do_xxlock_oneblock, ofs, len, + (void *)&DO_XXLOCK_ONEBLOCK_UNLOCK); + + DEBUG(MTD_DEBUG_LEVEL3, + "%s: lock status after, ret=%d\n", + __func__, ret); + debug_dump_locks(mtd, do_printlockstatus_oneblock, ofs, len, 0); + + return ret; +} + + static void cfi_amdstd_destroy(struct mtd_info *mtd) { struct map_info *map = mtd->priv; @@ -1480,21 +1731,23 @@ static void cfi_amdstd_destroy(struct mtd_info *mtd) static char im_name[]="cfi_cmdset_0002"; + int __init cfi_amdstd_init(void) { inter_module_register(im_name, THIS_MODULE, &cfi_cmdset_0002); return 0; } + static void __exit cfi_amdstd_exit(void) { inter_module_unregister(im_name); } + module_init(cfi_amdstd_init); module_exit(cfi_amdstd_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Crossnet Co. et al."); MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips"); -