2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
5 * (C) 2000 Red Hat. GPL'd
7 * $Id: cfi_cmdset_0001.c,v 1.126 2003/06/23 07:45:48 dwmw2 Exp $
10 * 10/10/2000 Nicolas Pitre <nico@cam.org>
11 * - completely revamped method functions so they are aware and
12 * independent of the flash geometry (buswidth, interleave, etc.)
13 * - scalability vs code size is completely set at compile-time
14 * (see include/linux/mtd/cfi.h for selection)
15 * - optimized write buffer method
16 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17 * - reworked lock/unlock/erase support for var size flash
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
26 #include <asm/byteorder.h>
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/mtd/map.h>
33 #include <linux/mtd/mtd.h>
34 #include <linux/mtd/compatmac.h>
35 #include <linux/mtd/cfi.h>
37 // debugging, turns off buffer write mode if set to 1
38 #define FORCE_WORD_WRITE 0
40 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
41 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
42 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
43 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
44 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
45 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
46 static void cfi_intelext_sync (struct mtd_info *);
47 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
48 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
49 static int cfi_intelext_suspend (struct mtd_info *);
50 static void cfi_intelext_resume (struct mtd_info *);
52 static void cfi_intelext_destroy(struct mtd_info *);
54 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
56 static struct mtd_info *cfi_intelext_setup (struct map_info *);
58 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
59 size_t *retlen, u_char **mtdbuf);
60 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
65 * *********** SETUP AND PROBE BITS ***********
68 static struct mtd_chip_driver cfi_intelext_chipdrv = {
69 .probe = NULL, /* Not usable directly */
70 .destroy = cfi_intelext_destroy,
71 .name = "cfi_cmdset_0001",
75 /* #define DEBUG_LOCK_BITS */
76 /* #define DEBUG_CFI_FEATURES */
78 #ifdef DEBUG_CFI_FEATURES
79 static void cfi_tell_features(struct cfi_pri_intelext *extp)
82 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
83 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
84 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
85 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
86 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
87 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
88 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
89 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
90 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
91 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
92 for (i=9; i<32; i++) {
93 if (extp->FeatureSupport & (1<<i))
94 printk(" - Unknown Bit %X: supported\n", i);
97 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
98 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
100 if (extp->SuspendCmdSupport & (1<<i))
101 printk(" - Unknown Bit %X: supported\n", i);
104 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
105 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
106 printk(" - Valid Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
107 for (i=2; i<16; i++) {
108 if (extp->BlkStatusRegMask & (1<<i))
109 printk(" - Unknown Bit %X Active: yes\n",i);
112 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
113 extp->VccOptimal >> 8, extp->VccOptimal & 0xf);
114 if (extp->VppOptimal)
115 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
116 extp->VppOptimal >> 8, extp->VppOptimal & 0xf);
120 /* This routine is made available to other mtd code via
121 * inter_module_register. It must only be accessed through
122 * inter_module_get which will bump the use count of this module. The
123 * addresses passed back in cfi are valid as long as the use count of
124 * this module is non-zero, i.e. between inter_module_get and
125 * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
127 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
129 struct cfi_private *cfi = map->fldrv_priv;
131 __u32 base = cfi->chips[0].start;
133 if (cfi->cfi_mode == CFI_MODE_CFI) {
135 * It's a real CFI chip, not one for which the probe
136 * routine faked a CFI structure. So we read the feature
139 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
140 struct cfi_pri_intelext *extp;
141 int ofs_factor = cfi->interleave * cfi->device_type;
143 //printk(" Intel/Sharp Extended Query Table at 0x%4.4X\n", adr);
147 /* Switch it into Query Mode */
148 cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
150 extp = kmalloc(sizeof(*extp), GFP_KERNEL);
152 printk(KERN_ERR "Failed to allocate memory\n");
156 /* Read in the Extended Query Table */
157 for (i=0; i<sizeof(*extp); i++) {
158 ((unsigned char *)extp)[i] =
159 cfi_read_query(map, (base+((adr+i)*ofs_factor)));
162 if (extp->MajorVersion != '1' ||
163 (extp->MinorVersion < '0' || extp->MinorVersion > '3')) {
164 printk(KERN_WARNING " Unknown IntelExt Extended Query "
165 "version %c.%c.\n", extp->MajorVersion,
171 /* Do some byteswapping if necessary */
172 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
173 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
174 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
176 #ifdef DEBUG_CFI_FEATURES
177 /* Tell the user about it in lots of lovely detail */
178 cfi_tell_features(extp);
181 if(extp->SuspendCmdSupport & 1) {
182 //#define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
183 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
184 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
185 printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
186 "erase on write disabled.\n");
187 extp->SuspendCmdSupport &= ~1;
189 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
192 /* Install our own private info structure */
193 cfi->cmdset_priv = extp;
196 for (i=0; i< cfi->numchips; i++) {
197 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
198 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
199 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
200 cfi->chips[i].ref_point_counter = 0;
203 map->fldrv = &cfi_intelext_chipdrv;
205 /* Make sure it's in read mode */
206 cfi_send_gen_cmd(0xff, 0x55, base, map, cfi, cfi->device_type, NULL);
207 return cfi_intelext_setup(map);
210 static struct mtd_info *cfi_intelext_setup(struct map_info *map)
212 struct cfi_private *cfi = map->fldrv_priv;
213 struct mtd_info *mtd;
214 unsigned long offset = 0;
216 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
218 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
219 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
222 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
226 memset(mtd, 0, sizeof(*mtd));
228 mtd->type = MTD_NORFLASH;
229 mtd->size = devsize * cfi->numchips;
231 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
232 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
233 * mtd->numeraseregions, GFP_KERNEL);
234 if (!mtd->eraseregions) {
235 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
239 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
240 unsigned long ernum, ersize;
241 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
242 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
244 if (mtd->erasesize < ersize) {
245 mtd->erasesize = ersize;
247 for (j=0; j<cfi->numchips; j++) {
248 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
249 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
250 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
252 offset += (ersize * ernum);
255 if (offset != devsize) {
257 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
261 for (i=0; i<mtd->numeraseregions;i++){
262 printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
263 i,mtd->eraseregions[i].offset,
264 mtd->eraseregions[i].erasesize,
265 mtd->eraseregions[i].numblocks);
268 /* Also select the correct geometry setup too */
269 mtd->erase = cfi_intelext_erase_varsize;
270 mtd->read = cfi_intelext_read;
272 if (map_is_linear(map)) {
273 mtd->point = cfi_intelext_point;
274 mtd->unpoint = cfi_intelext_unpoint;
277 if ( cfi->cfiq->BufWriteTimeoutTyp && !FORCE_WORD_WRITE) {
278 printk(KERN_INFO "Using buffer write method\n" );
279 mtd->write = cfi_intelext_write_buffers;
281 printk(KERN_INFO "Using word write method\n" );
282 mtd->write = cfi_intelext_write_words;
284 mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
285 mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
286 mtd->sync = cfi_intelext_sync;
287 mtd->lock = cfi_intelext_lock;
288 mtd->unlock = cfi_intelext_unlock;
289 mtd->suspend = cfi_intelext_suspend;
290 mtd->resume = cfi_intelext_resume;
291 mtd->flags = MTD_CAP_NORFLASH;
292 map->fldrv = &cfi_intelext_chipdrv;
293 mtd->name = map->name;
294 __module_get(THIS_MODULE);
299 if(mtd->eraseregions)
300 kfree(mtd->eraseregions);
303 kfree(cfi->cmdset_priv);
309 * *********** CHIP ACCESS FUNCTIONS ***********
312 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
314 DECLARE_WAITQUEUE(wait, current);
315 struct cfi_private *cfi = map->fldrv_priv;
316 cfi_word status, status_OK = CMD(0x80);
318 struct cfi_pri_intelext *cfip = (struct cfi_pri_intelext *)cfi->cmdset_priv;
321 timeo = jiffies + HZ;
323 switch (chip->state) {
327 status = cfi_read(map, adr);
328 if ((status & status_OK) == status_OK)
331 if (time_after(jiffies, timeo)) {
332 printk(KERN_ERR "Waiting for chip to be ready timed out. Status %llx\n",
334 spin_unlock(chip->mutex);
337 spin_unlock(chip->mutex);
339 spin_lock(chip->mutex);
340 /* Someone else might have been playing with it. */
350 if (!(cfip->FeatureSupport & 2) ||
351 !(mode == FL_READY || mode == FL_POINT ||
352 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
357 cfi_write(map, CMD(0xB0), adr);
359 /* If the flash has finished erasing, then 'erase suspend'
360 * appears to make some (28F320) flash devices switch to
361 * 'read' mode. Make sure that we switch to 'read status'
362 * mode so we get the right data. --rmk
364 cfi_write(map, CMD(0x70), adr);
365 chip->oldstate = FL_ERASING;
366 chip->state = FL_ERASE_SUSPENDING;
367 chip->erase_suspended = 1;
369 status = cfi_read(map, adr);
370 if ((status & status_OK) == status_OK)
373 if (time_after(jiffies, timeo)) {
374 /* Urgh. Resume and pretend we weren't here. */
375 cfi_write(map, CMD(0xd0), adr);
376 /* Make sure we're in 'read status' mode if it had finished */
377 cfi_write(map, CMD(0x70), adr);
378 chip->state = FL_ERASING;
379 chip->oldstate = FL_READY;
380 printk(KERN_ERR "Chip not ready after erase "
381 "suspended: status = 0x%x\n", status);
385 spin_unlock(chip->mutex);
387 spin_lock(chip->mutex);
388 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
389 So we can just loop here. */
391 chip->state = FL_STATUS;
395 /* Only if there's no operation suspended... */
396 if (mode == FL_READY && chip->oldstate == FL_READY)
401 set_current_state(TASK_UNINTERRUPTIBLE);
402 add_wait_queue(&chip->wq, &wait);
403 spin_unlock(chip->mutex);
405 remove_wait_queue(&chip->wq, &wait);
406 spin_lock(chip->mutex);
411 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
413 struct cfi_private *cfi = map->fldrv_priv;
415 switch(chip->oldstate) {
417 chip->state = chip->oldstate;
418 /* What if one interleaved chip has finished and the
419 other hasn't? The old code would leave the finished
420 one in READY mode. That's bad, and caused -EROFS
421 errors to be returned from do_erase_oneblock because
422 that's the only bit it checked for at the time.
423 As the state machine appears to explicitly allow
424 sending the 0x70 (Read Status) command to an erasing
425 chip and expecting it to be ignored, that's what we
427 cfi_write(map, CMD(0xd0), adr);
428 cfi_write(map, CMD(0x70), adr);
429 chip->oldstate = FL_READY;
430 chip->state = FL_ERASING;
435 /* We should really make set_vpp() count, rather than doing this */
439 printk(KERN_ERR "put_chip() called with oldstate %d!!\n", chip->oldstate);
444 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
446 unsigned long cmd_addr;
447 struct cfi_private *cfi = map->fldrv_priv;
452 /* Ensure cmd read/writes are aligned. */
453 cmd_addr = adr & ~(CFIDEV_BUSWIDTH-1);
455 spin_lock(chip->mutex);
457 ret = get_chip(map, chip, cmd_addr, FL_POINT);
460 if (chip->state != FL_POINT && chip->state != FL_READY)
461 cfi_write(map, CMD(0xff), cmd_addr);
463 chip->state = FL_POINT;
464 chip->ref_point_counter++;
466 spin_unlock(chip->mutex);
471 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
473 struct map_info *map = mtd->priv;
474 struct cfi_private *cfi = map->fldrv_priv;
479 if (from + len > mtd->size)
482 *mtdbuf = (void *)map->virt + from;
484 return -EINVAL; /* can not point this region */
487 /* Now lock the chip(s) to POINT state */
489 /* ofs: offset within the first chip that the first read should start */
490 chipnum = (from >> cfi->chipshift);
491 ofs = from - (chipnum << cfi->chipshift);
494 unsigned long thislen;
496 if (chipnum >= cfi->numchips)
499 if ((len + ofs -1) >> cfi->chipshift)
500 thislen = (1<<cfi->chipshift) - ofs;
504 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
517 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
519 struct map_info *map = mtd->priv;
520 struct cfi_private *cfi = map->fldrv_priv;
524 /* Now unlock the chip(s) POINT state */
526 /* ofs: offset within the first chip that the first read should start */
527 chipnum = (from >> cfi->chipshift);
528 ofs = from - (chipnum << cfi->chipshift);
531 unsigned long thislen;
534 chip = &cfi->chips[chipnum];
535 if (chipnum >= cfi->numchips)
538 if ((len + ofs -1) >> cfi->chipshift)
539 thislen = (1<<cfi->chipshift) - ofs;
543 spin_lock(chip->mutex);
544 if (chip->state == FL_POINT) {
545 chip->ref_point_counter--;
546 if(chip->ref_point_counter == 0)
547 chip->state = FL_READY;
549 printk(KERN_ERR "Warning: unpoint called on non pointed region\n"); /* Should this give an error? */
551 put_chip(map, chip, chip->start);
552 spin_unlock(chip->mutex);
560 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
562 unsigned long cmd_addr;
563 struct cfi_private *cfi = map->fldrv_priv;
568 /* Ensure cmd read/writes are aligned. */
569 cmd_addr = adr & ~(CFIDEV_BUSWIDTH-1);
571 spin_lock(chip->mutex);
572 ret = get_chip(map, chip, cmd_addr, FL_READY);
574 spin_unlock(chip->mutex);
578 if (chip->state != FL_POINT && chip->state != FL_READY) {
579 cfi_write(map, CMD(0xff), cmd_addr);
581 chip->state = FL_READY;
584 map_copy_from(map, buf, adr, len);
586 put_chip(map, chip, cmd_addr);
588 spin_unlock(chip->mutex);
592 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
594 struct map_info *map = mtd->priv;
595 struct cfi_private *cfi = map->fldrv_priv;
600 /* ofs: offset within the first chip that the first read should start */
601 chipnum = (from >> cfi->chipshift);
602 ofs = from - (chipnum << cfi->chipshift);
607 unsigned long thislen;
609 if (chipnum >= cfi->numchips)
612 if ((len + ofs -1) >> cfi->chipshift)
613 thislen = (1<<cfi->chipshift) - ofs;
617 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
631 static int cfi_intelext_read_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf, int base_offst, int reg_sz)
633 struct map_info *map = mtd->priv;
634 struct cfi_private *cfi = map->fldrv_priv;
635 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
637 int ofs_factor = cfi->interleave * cfi->device_type;
642 chip_num = ((unsigned int)from/reg_sz);
643 offst = from - (reg_sz*chip_num)+base_offst;
646 /* Calculate which chip & protection register offset we need */
648 if (chip_num >= cfi->numchips)
651 chip = &cfi->chips[chip_num];
653 spin_lock(chip->mutex);
654 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
656 spin_unlock(chip->mutex);
657 return (len-count)?:ret;
660 if (chip->state != FL_JEDEC_QUERY) {
661 cfi_write(map, CMD(0x90), chip->start);
662 chip->state = FL_JEDEC_QUERY;
665 while (count && ((offst-base_offst) < reg_sz)) {
666 *buf = map_read8(map,(chip->start+((extp->ProtRegAddr+1)*ofs_factor)+offst));
672 put_chip(map, chip, chip->start);
673 spin_unlock(chip->mutex);
675 /* Move on to the next chip */
684 static int cfi_intelext_read_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
686 struct map_info *map = mtd->priv;
687 struct cfi_private *cfi = map->fldrv_priv;
688 struct cfi_pri_intelext *extp=cfi->cmdset_priv;
689 int base_offst,reg_sz;
691 /* Check that we actually have some protection registers */
692 if(!(extp->FeatureSupport&64)){
693 printk(KERN_WARNING "%s: This flash device has no protection data to read!\n",map->name);
697 base_offst=(1<<extp->FactProtRegSize);
698 reg_sz=(1<<extp->UserProtRegSize);
700 return cfi_intelext_read_prot_reg(mtd, from, len, retlen, buf, base_offst, reg_sz);
703 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
705 struct map_info *map = mtd->priv;
706 struct cfi_private *cfi = map->fldrv_priv;
707 struct cfi_pri_intelext *extp=cfi->cmdset_priv;
708 int base_offst,reg_sz;
710 /* Check that we actually have some protection registers */
711 if(!(extp->FeatureSupport&64)){
712 printk(KERN_WARNING "%s: This flash device has no protection data to read!\n",map->name);
717 reg_sz=(1<<extp->FactProtRegSize);
719 return cfi_intelext_read_prot_reg(mtd, from, len, retlen, buf, base_offst, reg_sz);
723 static int do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, cfi_word datum)
725 struct cfi_private *cfi = map->fldrv_priv;
726 cfi_word status, status_OK;
732 /* Let's determine this according to the interleave only once */
733 status_OK = CMD(0x80);
735 spin_lock(chip->mutex);
736 ret = get_chip(map, chip, adr, FL_WRITING);
738 spin_unlock(chip->mutex);
743 cfi_write(map, CMD(0x40), adr);
744 cfi_write(map, datum, adr);
745 chip->state = FL_WRITING;
747 spin_unlock(chip->mutex);
748 cfi_udelay(chip->word_write_time);
749 spin_lock(chip->mutex);
751 timeo = jiffies + (HZ/2);
754 if (chip->state != FL_WRITING) {
755 /* Someone's suspended the write. Sleep */
756 DECLARE_WAITQUEUE(wait, current);
758 set_current_state(TASK_UNINTERRUPTIBLE);
759 add_wait_queue(&chip->wq, &wait);
760 spin_unlock(chip->mutex);
762 remove_wait_queue(&chip->wq, &wait);
763 timeo = jiffies + (HZ / 2); /* FIXME */
764 spin_lock(chip->mutex);
768 status = cfi_read(map, adr);
769 if ((status & status_OK) == status_OK)
772 /* OK Still waiting */
773 if (time_after(jiffies, timeo)) {
774 chip->state = FL_STATUS;
775 printk(KERN_ERR "waiting for chip to be ready timed out in word write\n");
780 /* Latency issues. Drop the lock, wait a while and retry */
781 spin_unlock(chip->mutex);
784 spin_lock(chip->mutex);
787 chip->word_write_time--;
788 if (!chip->word_write_time)
789 chip->word_write_time++;
792 chip->word_write_time++;
794 /* Done and happy. */
795 chip->state = FL_STATUS;
796 /* check for lock bit */
797 if (status & CMD(0x02)) {
799 cfi_write(map, CMD(0x50), adr);
800 /* put back into read status register mode */
801 cfi_write(map, CMD(0x70), adr);
805 put_chip(map, chip, adr);
806 spin_unlock(chip->mutex);
812 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
814 struct map_info *map = mtd->priv;
815 struct cfi_private *cfi = map->fldrv_priv;
824 chipnum = to >> cfi->chipshift;
825 ofs = to - (chipnum << cfi->chipshift);
827 /* If it's not bus-aligned, do the first byte write */
828 if (ofs & (CFIDEV_BUSWIDTH-1)) {
829 unsigned long bus_ofs = ofs & ~(CFIDEV_BUSWIDTH-1);
830 int gap = ofs - bus_ofs;
837 while (len && i < CFIDEV_BUSWIDTH)
838 tmp_buf[i++] = buf[n++], len--;
839 while (i < CFIDEV_BUSWIDTH)
842 if (cfi_buswidth_is_2()) {
843 datum = *(__u16*)tmp_buf;
844 } else if (cfi_buswidth_is_4()) {
845 datum = *(__u32*)tmp_buf;
846 } else if (cfi_buswidth_is_8()) {
847 datum = *(__u64*)tmp_buf;
849 return -EINVAL; /* should never happen, but be safe */
852 ret = do_write_oneword(map, &cfi->chips[chipnum],
861 if (ofs >> cfi->chipshift) {
864 if (chipnum == cfi->numchips)
869 while(len >= CFIDEV_BUSWIDTH) {
872 if (cfi_buswidth_is_1()) {
874 } else if (cfi_buswidth_is_2()) {
875 datum = *(__u16*)buf;
876 } else if (cfi_buswidth_is_4()) {
877 datum = *(__u32*)buf;
878 } else if (cfi_buswidth_is_8()) {
879 datum = *(__u64*)buf;
884 ret = do_write_oneword(map, &cfi->chips[chipnum],
889 ofs += CFIDEV_BUSWIDTH;
890 buf += CFIDEV_BUSWIDTH;
891 (*retlen) += CFIDEV_BUSWIDTH;
892 len -= CFIDEV_BUSWIDTH;
894 if (ofs >> cfi->chipshift) {
897 if (chipnum == cfi->numchips)
902 if (len & (CFIDEV_BUSWIDTH-1)) {
908 tmp_buf[i++] = buf[n++];
909 while (i < CFIDEV_BUSWIDTH)
912 if (cfi_buswidth_is_2()) {
913 datum = *(__u16*)tmp_buf;
914 } else if (cfi_buswidth_is_4()) {
915 datum = *(__u32*)tmp_buf;
916 } else if (cfi_buswidth_is_8()) {
917 datum = *(__u64*)tmp_buf;
919 return -EINVAL; /* should never happen, but be safe */
922 ret = do_write_oneword(map, &cfi->chips[chipnum],
934 static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
935 unsigned long adr, const u_char *buf, int len)
937 struct cfi_private *cfi = map->fldrv_priv;
938 cfi_word status, status_OK;
939 unsigned long cmd_adr, timeo;
940 int wbufsize, z, ret=0, bytes, words;
942 wbufsize = CFIDEV_INTERLEAVE << cfi->cfiq->MaxBufWriteSize;
944 cmd_adr = adr & ~(wbufsize-1);
946 /* Let's determine this according to the interleave only once */
947 status_OK = CMD(0x80);
949 spin_lock(chip->mutex);
950 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
952 spin_unlock(chip->mutex);
956 if (chip->state != FL_STATUS)
957 cfi_write(map, CMD(0x70), cmd_adr);
959 status = cfi_read(map, cmd_adr);
961 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
962 [...], the device will not accept any more Write to Buffer commands".
963 So we must check here and reset those bits if they're set. Otherwise
964 we're just pissing in the wind */
965 if (status & CMD(0x30)) {
966 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %x). Clearing.\n", status);
967 cfi_write(map, CMD(0x50), cmd_adr);
968 cfi_write(map, CMD(0x70), cmd_adr);
971 chip->state = FL_WRITING_TO_BUFFER;
975 cfi_write(map, CMD(0xe8), cmd_adr);
977 status = cfi_read(map, cmd_adr);
978 if ((status & status_OK) == status_OK)
981 spin_unlock(chip->mutex);
983 spin_lock(chip->mutex);
986 /* Argh. Not ready for write to buffer */
987 cfi_write(map, CMD(0x70), cmd_adr);
988 chip->state = FL_STATUS;
989 printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %llx, status = %llx\n", (__u64)status, (__u64)cfi_read(map, cmd_adr));
990 /* Odd. Clear status bits */
991 cfi_write(map, CMD(0x50), cmd_adr);
992 cfi_write(map, CMD(0x70), cmd_adr);
998 /* Write length of data to come */
999 bytes = len & (CFIDEV_BUSWIDTH-1);
1000 words = len / CFIDEV_BUSWIDTH;
1001 cfi_write(map, CMD(words - !bytes), cmd_adr );
1005 while(z < words * CFIDEV_BUSWIDTH) {
1006 if (cfi_buswidth_is_1()) {
1009 map_write8 (map, *b++, adr+z);
1010 buf = (const u_char *)b;
1011 } else if (cfi_buswidth_is_2()) {
1012 u16 *b = (u16 *)buf;
1014 map_write16 (map, *b++, adr+z);
1015 buf = (const u_char *)b;
1016 } else if (cfi_buswidth_is_4()) {
1017 u32 *b = (u32 *)buf;
1019 map_write32 (map, *b++, adr+z);
1020 buf = (const u_char *)b;
1021 } else if (cfi_buswidth_is_8()) {
1022 u64 *b = (u64 *)buf;
1024 map_write64 (map, *b++, adr+z);
1025 buf = (const u_char *)b;
1030 z += CFIDEV_BUSWIDTH;
1034 u_char tmp_buf[8], *tmp_p = tmp_buf;
1037 tmp_buf[i++] = buf[n++];
1038 while (i < CFIDEV_BUSWIDTH)
1039 tmp_buf[i++] = 0xff;
1040 if (cfi_buswidth_is_2()) {
1041 u16 *b = (u16 *)tmp_p;
1043 map_write16 (map, *b++, adr+z);
1044 tmp_p = (u_char *)b;
1045 } else if (cfi_buswidth_is_4()) {
1046 u32 *b = (u32 *)tmp_p;
1048 map_write32 (map, *b++, adr+z);
1049 tmp_p = (u_char *)b;
1050 } else if (cfi_buswidth_is_8()) {
1051 u64 *b = (u64 *)tmp_p;
1053 map_write64 (map, *b++, adr+z);
1054 tmp_p = (u_char *)b;
1061 cfi_write(map, CMD(0xd0), cmd_adr);
1062 chip->state = FL_WRITING;
1064 spin_unlock(chip->mutex);
1065 cfi_udelay(chip->buffer_write_time);
1066 spin_lock(chip->mutex);
1068 timeo = jiffies + (HZ/2);
1071 if (chip->state != FL_WRITING) {
1072 /* Someone's suspended the write. Sleep */
1073 DECLARE_WAITQUEUE(wait, current);
1074 set_current_state(TASK_UNINTERRUPTIBLE);
1075 add_wait_queue(&chip->wq, &wait);
1076 spin_unlock(chip->mutex);
1078 remove_wait_queue(&chip->wq, &wait);
1079 timeo = jiffies + (HZ / 2); /* FIXME */
1080 spin_lock(chip->mutex);
1084 status = cfi_read(map, cmd_adr);
1085 if ((status & status_OK) == status_OK)
1088 /* OK Still waiting */
1089 if (time_after(jiffies, timeo)) {
1090 chip->state = FL_STATUS;
1091 printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
1096 /* Latency issues. Drop the lock, wait a while and retry */
1097 spin_unlock(chip->mutex);
1100 spin_lock(chip->mutex);
1103 chip->buffer_write_time--;
1104 if (!chip->buffer_write_time)
1105 chip->buffer_write_time++;
1108 chip->buffer_write_time++;
1110 /* Done and happy. */
1111 chip->state = FL_STATUS;
1113 /* check for lock bit */
1114 if (status & CMD(0x02)) {
1116 cfi_write(map, CMD(0x50), cmd_adr);
1117 /* put back into read status register mode */
1118 cfi_write(map, CMD(0x70), adr);
1123 put_chip(map, chip, cmd_adr);
1124 spin_unlock(chip->mutex);
1128 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1129 size_t len, size_t *retlen, const u_char *buf)
1131 struct map_info *map = mtd->priv;
1132 struct cfi_private *cfi = map->fldrv_priv;
1133 int wbufsize = CFIDEV_INTERLEAVE << cfi->cfiq->MaxBufWriteSize;
1142 chipnum = to >> cfi->chipshift;
1143 ofs = to - (chipnum << cfi->chipshift);
1145 /* If it's not bus-aligned, do the first word write */
1146 if (ofs & (CFIDEV_BUSWIDTH-1)) {
1147 size_t local_len = (-ofs)&(CFIDEV_BUSWIDTH-1);
1148 if (local_len > len)
1150 ret = cfi_intelext_write_words(mtd, to, local_len,
1158 if (ofs >> cfi->chipshift) {
1161 if (chipnum == cfi->numchips)
1166 /* Write buffer is worth it only if more than one word to write... */
1168 /* We must not cross write block boundaries */
1169 int size = wbufsize - (ofs & (wbufsize-1));
1173 ret = do_write_buffer(map, &cfi->chips[chipnum],
1183 if (ofs >> cfi->chipshift) {
1186 if (chipnum == cfi->numchips)
1193 typedef int (*varsize_frob_t)(struct map_info *map, struct flchip *chip,
1194 unsigned long adr, void *thunk);
1196 static int cfi_intelext_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
1197 loff_t ofs, size_t len, void *thunk)
1199 struct map_info *map = mtd->priv;
1200 struct cfi_private *cfi = map->fldrv_priv;
1202 int chipnum, ret = 0;
1204 struct mtd_erase_region_info *regions = mtd->eraseregions;
1206 if (ofs > mtd->size)
1209 if ((len + ofs) > mtd->size)
1212 /* Check that both start and end of the requested erase are
1213 * aligned with the erasesize at the appropriate addresses.
1218 /* Skip all erase regions which are ended before the start of
1219 the requested erase. Actually, to save on the calculations,
1220 we skip to the first erase region which starts after the
1221 start of the requested erase, and then go back one.
1224 while (i < mtd->numeraseregions && ofs >= regions[i].offset)
1228 /* OK, now i is pointing at the erase region in which this
1229 erase request starts. Check the start of the requested
1230 erase range is aligned with the erase size which is in
1234 if (ofs & (regions[i].erasesize-1))
1237 /* Remember the erase region we start on */
1240 /* Next, check that the end of the requested erase is aligned
1241 * with the erase region at that address.
1244 while (i<mtd->numeraseregions && (ofs + len) >= regions[i].offset)
1247 /* As before, drop back one to point at the region in which
1248 the address actually falls
1252 if ((ofs + len) & (regions[i].erasesize-1))
1255 chipnum = ofs >> cfi->chipshift;
1256 adr = ofs - (chipnum << cfi->chipshift);
1261 ret = (*frob)(map, &cfi->chips[chipnum], adr, thunk);
1266 adr += regions[i].erasesize;
1267 len -= regions[i].erasesize;
1269 if (adr % (1<< cfi->chipshift) == ((regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
1272 if (adr >> cfi->chipshift) {
1276 if (chipnum >= cfi->numchips)
1285 static int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, void *thunk)
1287 struct cfi_private *cfi = map->fldrv_priv;
1288 cfi_word status, status_OK;
1289 unsigned long timeo;
1291 DECLARE_WAITQUEUE(wait, current);
1296 /* Let's determine this according to the interleave only once */
1297 status_OK = CMD(0x80);
1300 spin_lock(chip->mutex);
1301 ret = get_chip(map, chip, adr, FL_ERASING);
1303 spin_unlock(chip->mutex);
1308 /* Clear the status register first */
1309 cfi_write(map, CMD(0x50), adr);
1312 cfi_write(map, CMD(0x20), adr);
1313 cfi_write(map, CMD(0xD0), adr);
1314 chip->state = FL_ERASING;
1315 chip->erase_suspended = 0;
1317 spin_unlock(chip->mutex);
1318 set_current_state(TASK_UNINTERRUPTIBLE);
1319 schedule_timeout((chip->erase_time*HZ)/(2*1000));
1320 spin_lock(chip->mutex);
1322 /* FIXME. Use a timer to check this, and return immediately. */
1323 /* Once the state machine's known to be working I'll do that */
1325 timeo = jiffies + (HZ*20);
1327 if (chip->state != FL_ERASING) {
1328 /* Someone's suspended the erase. Sleep */
1329 set_current_state(TASK_UNINTERRUPTIBLE);
1330 add_wait_queue(&chip->wq, &wait);
1331 spin_unlock(chip->mutex);
1333 remove_wait_queue(&chip->wq, &wait);
1334 spin_lock(chip->mutex);
1337 if (chip->erase_suspended) {
1338 /* This erase was suspended and resumed.
1339 Adjust the timeout */
1340 timeo = jiffies + (HZ*20); /* FIXME */
1341 chip->erase_suspended = 0;
1344 status = cfi_read(map, adr);
1345 if ((status & status_OK) == status_OK)
1348 /* OK Still waiting */
1349 if (time_after(jiffies, timeo)) {
1350 cfi_write(map, CMD(0x70), adr);
1351 chip->state = FL_STATUS;
1352 printk(KERN_ERR "waiting for erase at %08lx to complete timed out. Xstatus = %llx, status = %llx.\n",
1353 adr, (__u64)status, (__u64)cfi_read(map, adr));
1354 /* Clear status bits */
1355 cfi_write(map, CMD(0x50), adr);
1356 cfi_write(map, CMD(0x70), adr);
1358 spin_unlock(chip->mutex);
1362 /* Latency issues. Drop the lock, wait a while and retry */
1363 spin_unlock(chip->mutex);
1364 set_current_state(TASK_UNINTERRUPTIBLE);
1365 schedule_timeout(1);
1366 spin_lock(chip->mutex);
1372 /* We've broken this before. It doesn't hurt to be safe */
1373 cfi_write(map, CMD(0x70), adr);
1374 chip->state = FL_STATUS;
1375 status = cfi_read(map, adr);
1377 /* check for lock bit */
1378 if (status & CMD(0x3a)) {
1379 unsigned char chipstatus = status;
1380 if (status != CMD(status & 0xff)) {
1382 for (i = 1; i<CFIDEV_INTERLEAVE; i++) {
1383 chipstatus |= status >> (cfi->device_type * 8);
1385 printk(KERN_WARNING "Status is not identical for all chips: 0x%llx. Merging to give 0x%02x\n", (__u64)status, chipstatus);
1387 /* Reset the error bits */
1388 cfi_write(map, CMD(0x50), adr);
1389 cfi_write(map, CMD(0x70), adr);
1391 if ((chipstatus & 0x30) == 0x30) {
1392 printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%llx\n", (__u64)status);
1394 } else if (chipstatus & 0x02) {
1395 /* Protection bit set */
1397 } else if (chipstatus & 0x8) {
1399 printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%llx\n", (__u64)status);
1401 } else if (chipstatus & 0x20) {
1403 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%llx. Retrying...\n", adr, (__u64)status);
1404 timeo = jiffies + HZ;
1405 chip->state = FL_STATUS;
1406 spin_unlock(chip->mutex);
1409 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%llx\n", adr, (__u64)status);
1415 spin_unlock(chip->mutex);
1419 int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1421 unsigned long ofs, len;
1427 ret = cfi_intelext_varsize_frob(mtd, do_erase_oneblock, ofs, len, 0);
1431 instr->state = MTD_ERASE_DONE;
1432 if (instr->callback)
1433 instr->callback(instr);
1438 static void cfi_intelext_sync (struct mtd_info *mtd)
1440 struct map_info *map = mtd->priv;
1441 struct cfi_private *cfi = map->fldrv_priv;
1443 struct flchip *chip;
1446 for (i=0; !ret && i<cfi->numchips; i++) {
1447 chip = &cfi->chips[i];
1449 spin_lock(chip->mutex);
1450 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1453 chip->oldstate = chip->state;
1454 chip->state = FL_SYNCING;
1455 /* No need to wake_up() on this state change -
1456 * as the whole point is that nobody can do anything
1457 * with the chip now anyway.
1460 spin_unlock(chip->mutex);
1463 /* Unlock the chips again */
1465 for (i--; i >=0; i--) {
1466 chip = &cfi->chips[i];
1468 spin_lock(chip->mutex);
1470 if (chip->state == FL_SYNCING) {
1471 chip->state = chip->oldstate;
1474 spin_unlock(chip->mutex);
1478 #ifdef DEBUG_LOCK_BITS
1479 static int do_printlockstatus_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, void *thunk)
1481 struct cfi_private *cfi = map->fldrv_priv;
1482 int ofs_factor = cfi->interleave * cfi->device_type;
1484 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1485 printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1486 adr, cfi_read_query(map, adr+(2*ofs_factor)));
1487 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1493 #define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
1494 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
1496 static int do_xxlock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, void *thunk)
1498 struct cfi_private *cfi = map->fldrv_priv;
1499 cfi_word status, status_OK;
1500 unsigned long timeo = jiffies + HZ;
1505 /* Let's determine this according to the interleave only once */
1506 status_OK = CMD(0x80);
1508 spin_lock(chip->mutex);
1509 ret = get_chip(map, chip, adr, FL_LOCKING);
1511 spin_unlock(chip->mutex);
1516 cfi_write(map, CMD(0x60), adr);
1518 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1519 cfi_write(map, CMD(0x01), adr);
1520 chip->state = FL_LOCKING;
1521 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1522 cfi_write(map, CMD(0xD0), adr);
1523 chip->state = FL_UNLOCKING;
1527 spin_unlock(chip->mutex);
1528 schedule_timeout(HZ);
1529 spin_lock(chip->mutex);
1531 /* FIXME. Use a timer to check this, and return immediately. */
1532 /* Once the state machine's known to be working I'll do that */
1534 timeo = jiffies + (HZ*20);
1537 status = cfi_read(map, adr);
1538 if ((status & status_OK) == status_OK)
1541 /* OK Still waiting */
1542 if (time_after(jiffies, timeo)) {
1543 cfi_write(map, CMD(0x70), adr);
1544 chip->state = FL_STATUS;
1545 printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %llx, status = %llx.\n", (__u64)status, (__u64)cfi_read(map, adr));
1547 spin_unlock(chip->mutex);
1551 /* Latency issues. Drop the lock, wait a while and retry */
1552 spin_unlock(chip->mutex);
1554 spin_lock(chip->mutex);
1557 /* Done and happy. */
1558 chip->state = FL_STATUS;
1559 put_chip(map, chip, adr);
1560 spin_unlock(chip->mutex);
1564 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1568 #ifdef DEBUG_LOCK_BITS
1569 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1570 __FUNCTION__, ofs, len);
1571 cfi_intelext_varsize_frob(mtd, do_printlockstatus_oneblock,
1575 ret = cfi_intelext_varsize_frob(mtd, do_xxlock_oneblock,
1576 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
1578 #ifdef DEBUG_LOCK_BITS
1580 "%s: lock status after, ret=%d\n", __FUNCTION__, ret);
1581 cfi_intelext_varsize_frob(mtd, do_printlockstatus_oneblock,
1588 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1592 #ifdef DEBUG_LOCK_BITS
1593 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1594 __FUNCTION__, ofs, len);
1595 cfi_intelext_varsize_frob(mtd, do_printlockstatus_oneblock,
1599 ret = cfi_intelext_varsize_frob(mtd, do_xxlock_oneblock,
1600 ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
1602 #ifdef DEBUG_LOCK_BITS
1603 printk(KERN_DEBUG "%s: lock status after, ret=%d\n", __FUNCTION__, ret);
1604 cfi_intelext_varsize_frob(mtd, do_printlockstatus_oneblock,
1611 static int cfi_intelext_suspend(struct mtd_info *mtd)
1613 struct map_info *map = mtd->priv;
1614 struct cfi_private *cfi = map->fldrv_priv;
1616 struct flchip *chip;
1619 for (i=0; !ret && i<cfi->numchips; i++) {
1620 chip = &cfi->chips[i];
1622 spin_lock(chip->mutex);
1624 switch (chip->state) {
1628 case FL_JEDEC_QUERY:
1629 if (chip->oldstate == FL_READY) {
1630 chip->oldstate = chip->state;
1631 chip->state = FL_PM_SUSPENDED;
1632 /* No need to wake_up() on this state change -
1633 * as the whole point is that nobody can do anything
1634 * with the chip now anyway.
1640 case FL_PM_SUSPENDED:
1643 spin_unlock(chip->mutex);
1646 /* Unlock the chips again */
1649 for (i--; i >=0; i--) {
1650 chip = &cfi->chips[i];
1652 spin_lock(chip->mutex);
1654 if (chip->state == FL_PM_SUSPENDED) {
1655 /* No need to force it into a known state here,
1656 because we're returning failure, and it didn't
1658 chip->state = chip->oldstate;
1661 spin_unlock(chip->mutex);
1668 static void cfi_intelext_resume(struct mtd_info *mtd)
1670 struct map_info *map = mtd->priv;
1671 struct cfi_private *cfi = map->fldrv_priv;
1673 struct flchip *chip;
1675 for (i=0; i<cfi->numchips; i++) {
1677 chip = &cfi->chips[i];
1679 spin_lock(chip->mutex);
1681 /* Go to known state. Chip may have been power cycled */
1682 if (chip->state == FL_PM_SUSPENDED) {
1683 cfi_write(map, CMD(0xFF), 0);
1684 chip->state = FL_READY;
1688 spin_unlock(chip->mutex);
1692 static void cfi_intelext_destroy(struct mtd_info *mtd)
1694 struct map_info *map = mtd->priv;
1695 struct cfi_private *cfi = map->fldrv_priv;
1696 kfree(cfi->cmdset_priv);
1699 kfree(mtd->eraseregions);
1702 static char im_name_1[]="cfi_cmdset_0001";
1703 static char im_name_3[]="cfi_cmdset_0003";
1705 int __init cfi_intelext_init(void)
1707 inter_module_register(im_name_1, THIS_MODULE, &cfi_cmdset_0001);
1708 inter_module_register(im_name_3, THIS_MODULE, &cfi_cmdset_0001);
1712 static void __exit cfi_intelext_exit(void)
1714 inter_module_unregister(im_name_1);
1715 inter_module_unregister(im_name_3);
1718 module_init(cfi_intelext_init);
1719 module_exit(cfi_intelext_exit);
1721 MODULE_LICENSE("GPL");
1722 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
1723 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");