2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
5 * (C) 2000 Red Hat. GPL'd
7 * $Id: cfi_cmdset_0001.c,v 1.126 2003/06/23 07:45:48 dwmw2 Exp $
10 * 10/10/2000 Nicolas Pitre <nico@cam.org>
11 * - completely revamped method functions so they are aware and
12 * independent of the flash geometry (buswidth, interleave, etc.)
13 * - scalability vs code size is completely set at compile-time
14 * (see include/linux/mtd/cfi.h for selection)
15 * - optimized write buffer method
16 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17 * - reworked lock/unlock/erase support for var size flash
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
26 #include <asm/byteorder.h>
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/mtd/map.h>
33 #include <linux/mtd/mtd.h>
34 #include <linux/mtd/compatmac.h>
35 #include <linux/mtd/cfi.h>
37 // debugging, turns off buffer write mode if set to 1
38 #define FORCE_WORD_WRITE 0
40 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
41 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
42 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
43 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
44 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
45 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
46 static void cfi_intelext_sync (struct mtd_info *);
47 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
48 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
49 static int cfi_intelext_suspend (struct mtd_info *);
50 static void cfi_intelext_resume (struct mtd_info *);
52 static void cfi_intelext_destroy(struct mtd_info *);
54 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
56 static struct mtd_info *cfi_intelext_setup (struct map_info *);
58 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
59 size_t *retlen, u_char **mtdbuf);
60 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
65 * *********** SETUP AND PROBE BITS ***********
68 static struct mtd_chip_driver cfi_intelext_chipdrv = {
69 .probe = NULL, /* Not usable directly */
70 .destroy = cfi_intelext_destroy,
71 .name = "cfi_cmdset_0001",
75 /* #define DEBUG_LOCK_BITS */
76 /* #define DEBUG_CFI_FEATURES */
78 #ifdef DEBUG_CFI_FEATURES
79 static void cfi_tell_features(struct cfi_pri_intelext *extp)
82 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
83 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
84 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
85 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
86 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
87 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
88 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
89 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
90 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
91 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
92 for (i=9; i<32; i++) {
93 if (extp->FeatureSupport & (1<<i))
94 printk(" - Unknown Bit %X: supported\n", i);
97 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
98 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
100 if (extp->SuspendCmdSupport & (1<<i))
101 printk(" - Unknown Bit %X: supported\n", i);
104 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
105 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
106 printk(" - Valid Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
107 for (i=2; i<16; i++) {
108 if (extp->BlkStatusRegMask & (1<<i))
109 printk(" - Unknown Bit %X Active: yes\n",i);
112 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
113 extp->VccOptimal >> 8, extp->VccOptimal & 0xf);
114 if (extp->VppOptimal)
115 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
116 extp->VppOptimal >> 8, extp->VppOptimal & 0xf);
120 /* This routine is made available to other mtd code via
121 * inter_module_register. It must only be accessed through
122 * inter_module_get which will bump the use count of this module. The
123 * addresses passed back in cfi are valid as long as the use count of
124 * this module is non-zero, i.e. between inter_module_get and
125 * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
127 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
129 struct cfi_private *cfi = map->fldrv_priv;
131 __u32 base = cfi->chips[0].start;
133 if (cfi->cfi_mode == CFI_MODE_CFI) {
135 * It's a real CFI chip, not one for which the probe
136 * routine faked a CFI structure. So we read the feature
139 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
140 struct cfi_pri_intelext *extp;
141 int ofs_factor = cfi->interleave * cfi->device_type;
143 //printk(" Intel/Sharp Extended Query Table at 0x%4.4X\n", adr);
147 /* Switch it into Query Mode */
148 cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
150 extp = kmalloc(sizeof(*extp), GFP_KERNEL);
152 printk(KERN_ERR "Failed to allocate memory\n");
156 /* Read in the Extended Query Table */
157 for (i=0; i<sizeof(*extp); i++) {
158 ((unsigned char *)extp)[i] =
159 cfi_read_query(map, (base+((adr+i)*ofs_factor)));
162 if (extp->MajorVersion != '1' ||
163 (extp->MinorVersion < '0' || extp->MinorVersion > '3')) {
164 printk(KERN_WARNING " Unknown IntelExt Extended Query "
165 "version %c.%c.\n", extp->MajorVersion,
171 /* Do some byteswapping if necessary */
172 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
173 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
174 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
176 #ifdef DEBUG_CFI_FEATURES
177 /* Tell the user about it in lots of lovely detail */
178 cfi_tell_features(extp);
181 if(extp->SuspendCmdSupport & 1) {
182 //#define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
183 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
184 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
185 printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
186 "erase on write disabled.\n");
187 extp->SuspendCmdSupport &= ~1;
189 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
192 /* Install our own private info structure */
193 cfi->cmdset_priv = extp;
196 for (i=0; i< cfi->numchips; i++) {
197 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
198 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
199 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
200 cfi->chips[i].ref_point_counter = 0;
203 map->fldrv = &cfi_intelext_chipdrv;
205 /* Make sure it's in read mode */
206 cfi_send_gen_cmd(0xff, 0x55, base, map, cfi, cfi->device_type, NULL);
207 return cfi_intelext_setup(map);
210 static struct mtd_info *cfi_intelext_setup(struct map_info *map)
212 struct cfi_private *cfi = map->fldrv_priv;
213 struct mtd_info *mtd;
214 unsigned long offset = 0;
216 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
218 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
219 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
222 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
226 memset(mtd, 0, sizeof(*mtd));
228 mtd->type = MTD_NORFLASH;
229 mtd->size = devsize * cfi->numchips;
231 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
232 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
233 * mtd->numeraseregions, GFP_KERNEL);
234 if (!mtd->eraseregions) {
235 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
239 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
240 unsigned long ernum, ersize;
241 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
242 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
244 if (mtd->erasesize < ersize) {
245 mtd->erasesize = ersize;
247 for (j=0; j<cfi->numchips; j++) {
248 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
249 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
250 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
252 offset += (ersize * ernum);
255 if (offset != devsize) {
257 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
261 for (i=0; i<mtd->numeraseregions;i++){
262 printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
263 i,mtd->eraseregions[i].offset,
264 mtd->eraseregions[i].erasesize,
265 mtd->eraseregions[i].numblocks);
268 /* Also select the correct geometry setup too */
269 mtd->erase = cfi_intelext_erase_varsize;
270 mtd->read = cfi_intelext_read;
272 if (map_is_linear(map)) {
273 mtd->point = cfi_intelext_point;
274 mtd->unpoint = cfi_intelext_unpoint;
277 if ( cfi->cfiq->BufWriteTimeoutTyp && !FORCE_WORD_WRITE) {
278 printk(KERN_INFO "Using buffer write method\n" );
279 mtd->write = cfi_intelext_write_buffers;
281 printk(KERN_INFO "Using word write method\n" );
282 mtd->write = cfi_intelext_write_words;
284 mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
285 mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
286 mtd->sync = cfi_intelext_sync;
287 mtd->lock = cfi_intelext_lock;
288 mtd->unlock = cfi_intelext_unlock;
289 mtd->suspend = cfi_intelext_suspend;
290 mtd->resume = cfi_intelext_resume;
291 mtd->flags = MTD_CAP_NORFLASH;
292 map->fldrv = &cfi_intelext_chipdrv;
293 mtd->name = map->name;
294 __module_get(THIS_MODULE);
299 if(mtd->eraseregions)
300 kfree(mtd->eraseregions);
303 kfree(cfi->cmdset_priv);
309 * *********** CHIP ACCESS FUNCTIONS ***********
312 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
314 DECLARE_WAITQUEUE(wait, current);
315 struct cfi_private *cfi = map->fldrv_priv;
316 cfi_word status, status_OK = CMD(0x80);
318 struct cfi_pri_intelext *cfip = (struct cfi_pri_intelext *)cfi->cmdset_priv;
321 timeo = jiffies + HZ;
323 switch (chip->state) {
327 status = cfi_read(map, adr);
328 if ((status & status_OK) == status_OK)
331 if (time_after(jiffies, timeo)) {
332 printk(KERN_ERR "Waiting for chip to be ready timed out. Status %llx\n",
334 spin_unlock(chip->mutex);
337 spin_unlock(chip->mutex);
339 spin_lock(chip->mutex);
340 /* Someone else might have been playing with it. */
350 if (!(cfip->FeatureSupport & 2) ||
351 !(mode == FL_READY || mode == FL_POINT ||
352 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
357 cfi_write(map, CMD(0xB0), adr);
359 /* If the flash has finished erasing, then 'erase suspend'
360 * appears to make some (28F320) flash devices switch to
361 * 'read' mode. Make sure that we switch to 'read status'
362 * mode so we get the right data. --rmk
364 cfi_write(map, CMD(0x70), adr);
365 chip->oldstate = FL_ERASING;
366 chip->state = FL_ERASE_SUSPENDING;
367 chip->erase_suspended = 1;
369 status = cfi_read(map, adr);
370 if ((status & status_OK) == status_OK)
373 if (time_after(jiffies, timeo)) {
374 /* Urgh. Resume and pretend we weren't here. */
375 cfi_write(map, CMD(0xd0), adr);
376 /* Make sure we're in 'read status' mode if it had finished */
377 cfi_write(map, CMD(0x70), adr);
378 chip->state = FL_ERASING;
379 chip->oldstate = FL_READY;
380 printk(KERN_ERR "Chip not ready after erase "
381 "suspended: status = 0x%x\n", status);
385 spin_unlock(chip->mutex);
387 spin_lock(chip->mutex);
388 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
389 So we can just loop here. */
391 chip->state = FL_STATUS;
395 /* Only if there's no operation suspended... */
396 if (mode == FL_READY && chip->oldstate == FL_READY)
401 set_current_state(TASK_UNINTERRUPTIBLE);
402 add_wait_queue(&chip->wq, &wait);
403 spin_unlock(chip->mutex);
405 remove_wait_queue(&chip->wq, &wait);
406 spin_lock(chip->mutex);
411 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
413 struct cfi_private *cfi = map->fldrv_priv;
415 switch(chip->oldstate) {
417 chip->state = chip->oldstate;
418 /* What if one interleaved chip has finished and the
419 other hasn't? The old code would leave the finished
420 one in READY mode. That's bad, and caused -EROFS
421 errors to be returned from do_erase_oneblock because
422 that's the only bit it checked for at the time.
423 As the state machine appears to explicitly allow
424 sending the 0x70 (Read Status) command to an erasing
425 chip and expecting it to be ignored, that's what we
427 cfi_write(map, CMD(0xd0), adr);
428 cfi_write(map, CMD(0x70), adr);
429 chip->oldstate = FL_READY;
430 chip->state = FL_ERASING;
434 /* We should really make set_vpp() count, rather than doing this */
438 printk(KERN_ERR "put_chip() called with oldstate %d!!\n", chip->oldstate);
443 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
445 unsigned long cmd_addr;
446 struct cfi_private *cfi = map->fldrv_priv;
451 /* Ensure cmd read/writes are aligned. */
452 cmd_addr = adr & ~(CFIDEV_BUSWIDTH-1);
454 spin_lock(chip->mutex);
456 ret = get_chip(map, chip, cmd_addr, FL_POINT);
459 if (chip->state != FL_POINT && chip->state != FL_READY)
460 cfi_write(map, CMD(0xff), cmd_addr);
462 chip->state = FL_POINT;
463 chip->ref_point_counter++;
465 spin_unlock(chip->mutex);
470 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
472 struct map_info *map = mtd->priv;
473 struct cfi_private *cfi = map->fldrv_priv;
478 if (from + len > mtd->size)
481 *mtdbuf = (void *)map->virt + from;
483 return -EINVAL; /* can not point this region */
486 /* Now lock the chip(s) to POINT state */
488 /* ofs: offset within the first chip that the first read should start */
489 chipnum = (from >> cfi->chipshift);
490 ofs = from - (chipnum << cfi->chipshift);
493 unsigned long thislen;
495 if (chipnum >= cfi->numchips)
498 if ((len + ofs -1) >> cfi->chipshift)
499 thislen = (1<<cfi->chipshift) - ofs;
503 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
516 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
518 struct map_info *map = mtd->priv;
519 struct cfi_private *cfi = map->fldrv_priv;
523 /* Now unlock the chip(s) POINT state */
525 /* ofs: offset within the first chip that the first read should start */
526 chipnum = (from >> cfi->chipshift);
527 ofs = from - (chipnum << cfi->chipshift);
530 unsigned long thislen;
533 chip = &cfi->chips[chipnum];
534 if (chipnum >= cfi->numchips)
537 if ((len + ofs -1) >> cfi->chipshift)
538 thislen = (1<<cfi->chipshift) - ofs;
542 spin_lock(chip->mutex);
543 if (chip->state == FL_POINT) {
544 chip->ref_point_counter--;
545 if(chip->ref_point_counter == 0)
546 chip->state = FL_READY;
548 printk(KERN_ERR "Warning: unpoint called on non pointed region\n"); /* Should this give an error? */
550 put_chip(map, chip, chip->start);
551 spin_unlock(chip->mutex);
559 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
561 unsigned long cmd_addr;
562 struct cfi_private *cfi = map->fldrv_priv;
567 /* Ensure cmd read/writes are aligned. */
568 cmd_addr = adr & ~(CFIDEV_BUSWIDTH-1);
570 spin_lock(chip->mutex);
571 ret = get_chip(map, chip, cmd_addr, FL_READY);
573 spin_unlock(chip->mutex);
577 if (chip->state != FL_POINT && chip->state != FL_READY) {
578 cfi_write(map, CMD(0xff), cmd_addr);
580 chip->state = FL_READY;
583 map_copy_from(map, buf, adr, len);
585 put_chip(map, chip, cmd_addr);
587 spin_unlock(chip->mutex);
591 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
593 struct map_info *map = mtd->priv;
594 struct cfi_private *cfi = map->fldrv_priv;
599 /* ofs: offset within the first chip that the first read should start */
600 chipnum = (from >> cfi->chipshift);
601 ofs = from - (chipnum << cfi->chipshift);
606 unsigned long thislen;
608 if (chipnum >= cfi->numchips)
611 if ((len + ofs -1) >> cfi->chipshift)
612 thislen = (1<<cfi->chipshift) - ofs;
616 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
630 static int cfi_intelext_read_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf, int base_offst, int reg_sz)
632 struct map_info *map = mtd->priv;
633 struct cfi_private *cfi = map->fldrv_priv;
634 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
636 int ofs_factor = cfi->interleave * cfi->device_type;
641 chip_num = ((unsigned int)from/reg_sz);
642 offst = from - (reg_sz*chip_num)+base_offst;
645 /* Calculate which chip & protection register offset we need */
647 if (chip_num >= cfi->numchips)
650 chip = &cfi->chips[chip_num];
652 spin_lock(chip->mutex);
653 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
655 spin_unlock(chip->mutex);
656 return (len-count)?:ret;
659 if (chip->state != FL_JEDEC_QUERY) {
660 cfi_write(map, CMD(0x90), chip->start);
661 chip->state = FL_JEDEC_QUERY;
664 while (count && ((offst-base_offst) < reg_sz)) {
665 *buf = map_read8(map,(chip->start+((extp->ProtRegAddr+1)*ofs_factor)+offst));
671 put_chip(map, chip, chip->start);
672 spin_unlock(chip->mutex);
674 /* Move on to the next chip */
683 static int cfi_intelext_read_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
685 struct map_info *map = mtd->priv;
686 struct cfi_private *cfi = map->fldrv_priv;
687 struct cfi_pri_intelext *extp=cfi->cmdset_priv;
688 int base_offst,reg_sz;
690 /* Check that we actually have some protection registers */
691 if(!(extp->FeatureSupport&64)){
692 printk(KERN_WARNING "%s: This flash device has no protection data to read!\n",map->name);
696 base_offst=(1<<extp->FactProtRegSize);
697 reg_sz=(1<<extp->UserProtRegSize);
699 return cfi_intelext_read_prot_reg(mtd, from, len, retlen, buf, base_offst, reg_sz);
702 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
704 struct map_info *map = mtd->priv;
705 struct cfi_private *cfi = map->fldrv_priv;
706 struct cfi_pri_intelext *extp=cfi->cmdset_priv;
707 int base_offst,reg_sz;
709 /* Check that we actually have some protection registers */
710 if(!(extp->FeatureSupport&64)){
711 printk(KERN_WARNING "%s: This flash device has no protection data to read!\n",map->name);
716 reg_sz=(1<<extp->FactProtRegSize);
718 return cfi_intelext_read_prot_reg(mtd, from, len, retlen, buf, base_offst, reg_sz);
722 static int do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, cfi_word datum)
724 struct cfi_private *cfi = map->fldrv_priv;
725 cfi_word status, status_OK;
731 /* Let's determine this according to the interleave only once */
732 status_OK = CMD(0x80);
734 spin_lock(chip->mutex);
735 ret = get_chip(map, chip, adr, FL_WRITING);
737 spin_unlock(chip->mutex);
742 cfi_write(map, CMD(0x40), adr);
743 cfi_write(map, datum, adr);
744 chip->state = FL_WRITING;
746 spin_unlock(chip->mutex);
747 cfi_udelay(chip->word_write_time);
748 spin_lock(chip->mutex);
750 timeo = jiffies + (HZ/2);
753 if (chip->state != FL_WRITING) {
754 /* Someone's suspended the write. Sleep */
755 DECLARE_WAITQUEUE(wait, current);
757 set_current_state(TASK_UNINTERRUPTIBLE);
758 add_wait_queue(&chip->wq, &wait);
759 spin_unlock(chip->mutex);
761 remove_wait_queue(&chip->wq, &wait);
762 timeo = jiffies + (HZ / 2); /* FIXME */
763 spin_lock(chip->mutex);
767 status = cfi_read(map, adr);
768 if ((status & status_OK) == status_OK)
771 /* OK Still waiting */
772 if (time_after(jiffies, timeo)) {
773 chip->state = FL_STATUS;
774 printk(KERN_ERR "waiting for chip to be ready timed out in word write\n");
779 /* Latency issues. Drop the lock, wait a while and retry */
780 spin_unlock(chip->mutex);
783 spin_lock(chip->mutex);
786 chip->word_write_time--;
787 if (!chip->word_write_time)
788 chip->word_write_time++;
791 chip->word_write_time++;
793 /* Done and happy. */
794 chip->state = FL_STATUS;
795 /* check for lock bit */
796 if (status & CMD(0x02)) {
798 cfi_write(map, CMD(0x50), adr);
799 /* put back into read status register mode */
800 cfi_write(map, CMD(0x70), adr);
804 put_chip(map, chip, adr);
805 spin_unlock(chip->mutex);
811 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
813 struct map_info *map = mtd->priv;
814 struct cfi_private *cfi = map->fldrv_priv;
823 chipnum = to >> cfi->chipshift;
824 ofs = to - (chipnum << cfi->chipshift);
826 /* If it's not bus-aligned, do the first byte write */
827 if (ofs & (CFIDEV_BUSWIDTH-1)) {
828 unsigned long bus_ofs = ofs & ~(CFIDEV_BUSWIDTH-1);
829 int gap = ofs - bus_ofs;
836 while (len && i < CFIDEV_BUSWIDTH)
837 tmp_buf[i++] = buf[n++], len--;
838 while (i < CFIDEV_BUSWIDTH)
841 if (cfi_buswidth_is_2()) {
842 datum = *(__u16*)tmp_buf;
843 } else if (cfi_buswidth_is_4()) {
844 datum = *(__u32*)tmp_buf;
845 } else if (cfi_buswidth_is_8()) {
846 datum = *(__u64*)tmp_buf;
848 return -EINVAL; /* should never happen, but be safe */
851 ret = do_write_oneword(map, &cfi->chips[chipnum],
860 if (ofs >> cfi->chipshift) {
863 if (chipnum == cfi->numchips)
868 while(len >= CFIDEV_BUSWIDTH) {
871 if (cfi_buswidth_is_1()) {
873 } else if (cfi_buswidth_is_2()) {
874 datum = *(__u16*)buf;
875 } else if (cfi_buswidth_is_4()) {
876 datum = *(__u32*)buf;
877 } else if (cfi_buswidth_is_8()) {
878 datum = *(__u64*)buf;
883 ret = do_write_oneword(map, &cfi->chips[chipnum],
888 ofs += CFIDEV_BUSWIDTH;
889 buf += CFIDEV_BUSWIDTH;
890 (*retlen) += CFIDEV_BUSWIDTH;
891 len -= CFIDEV_BUSWIDTH;
893 if (ofs >> cfi->chipshift) {
896 if (chipnum == cfi->numchips)
901 if (len & (CFIDEV_BUSWIDTH-1)) {
907 tmp_buf[i++] = buf[n++];
908 while (i < CFIDEV_BUSWIDTH)
911 if (cfi_buswidth_is_2()) {
912 datum = *(__u16*)tmp_buf;
913 } else if (cfi_buswidth_is_4()) {
914 datum = *(__u32*)tmp_buf;
915 } else if (cfi_buswidth_is_8()) {
916 datum = *(__u64*)tmp_buf;
918 return -EINVAL; /* should never happen, but be safe */
921 ret = do_write_oneword(map, &cfi->chips[chipnum],
933 static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
934 unsigned long adr, const u_char *buf, int len)
936 struct cfi_private *cfi = map->fldrv_priv;
937 cfi_word status, status_OK;
938 unsigned long cmd_adr, timeo;
939 int wbufsize, z, ret=0, bytes, words;
941 wbufsize = CFIDEV_INTERLEAVE << cfi->cfiq->MaxBufWriteSize;
943 cmd_adr = adr & ~(wbufsize-1);
945 /* Let's determine this according to the interleave only once */
946 status_OK = CMD(0x80);
948 spin_lock(chip->mutex);
949 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
951 spin_unlock(chip->mutex);
955 if (chip->state != FL_STATUS)
956 cfi_write(map, CMD(0x70), cmd_adr);
958 status = cfi_read(map, cmd_adr);
960 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
961 [...], the device will not accept any more Write to Buffer commands".
962 So we must check here and reset those bits if they're set. Otherwise
963 we're just pissing in the wind */
964 if (status & CMD(0x30)) {
965 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %x). Clearing.\n", status);
966 cfi_write(map, CMD(0x50), cmd_adr);
967 cfi_write(map, CMD(0x70), cmd_adr);
970 chip->state = FL_WRITING_TO_BUFFER;
974 cfi_write(map, CMD(0xe8), cmd_adr);
976 status = cfi_read(map, cmd_adr);
977 if ((status & status_OK) == status_OK)
980 spin_unlock(chip->mutex);
982 spin_lock(chip->mutex);
985 /* Argh. Not ready for write to buffer */
986 cfi_write(map, CMD(0x70), cmd_adr);
987 chip->state = FL_STATUS;
988 printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %llx, status = %llx\n", (__u64)status, (__u64)cfi_read(map, cmd_adr));
989 /* Odd. Clear status bits */
990 cfi_write(map, CMD(0x50), cmd_adr);
991 cfi_write(map, CMD(0x70), cmd_adr);
997 /* Write length of data to come */
998 bytes = len & (CFIDEV_BUSWIDTH-1);
999 words = len / CFIDEV_BUSWIDTH;
1000 cfi_write(map, CMD(words - !bytes), cmd_adr );
1004 while(z < words * CFIDEV_BUSWIDTH) {
1005 if (cfi_buswidth_is_1()) {
1008 map_write8 (map, *b++, adr+z);
1009 buf = (const u_char *)b;
1010 } else if (cfi_buswidth_is_2()) {
1011 u16 *b = (u16 *)buf;
1013 map_write16 (map, *b++, adr+z);
1014 buf = (const u_char *)b;
1015 } else if (cfi_buswidth_is_4()) {
1016 u32 *b = (u32 *)buf;
1018 map_write32 (map, *b++, adr+z);
1019 buf = (const u_char *)b;
1020 } else if (cfi_buswidth_is_8()) {
1021 u64 *b = (u64 *)buf;
1023 map_write64 (map, *b++, adr+z);
1024 buf = (const u_char *)b;
1029 z += CFIDEV_BUSWIDTH;
1033 u_char tmp_buf[8], *tmp_p = tmp_buf;
1036 tmp_buf[i++] = buf[n++];
1037 while (i < CFIDEV_BUSWIDTH)
1038 tmp_buf[i++] = 0xff;
1039 if (cfi_buswidth_is_2()) {
1040 u16 *b = (u16 *)tmp_p;
1042 map_write16 (map, *b++, adr+z);
1043 tmp_p = (u_char *)b;
1044 } else if (cfi_buswidth_is_4()) {
1045 u32 *b = (u32 *)tmp_p;
1047 map_write32 (map, *b++, adr+z);
1048 tmp_p = (u_char *)b;
1049 } else if (cfi_buswidth_is_8()) {
1050 u64 *b = (u64 *)tmp_p;
1052 map_write64 (map, *b++, adr+z);
1053 tmp_p = (u_char *)b;
1060 cfi_write(map, CMD(0xd0), cmd_adr);
1061 chip->state = FL_WRITING;
1063 spin_unlock(chip->mutex);
1064 cfi_udelay(chip->buffer_write_time);
1065 spin_lock(chip->mutex);
1067 timeo = jiffies + (HZ/2);
1070 if (chip->state != FL_WRITING) {
1071 /* Someone's suspended the write. Sleep */
1072 DECLARE_WAITQUEUE(wait, current);
1073 set_current_state(TASK_UNINTERRUPTIBLE);
1074 add_wait_queue(&chip->wq, &wait);
1075 spin_unlock(chip->mutex);
1077 remove_wait_queue(&chip->wq, &wait);
1078 timeo = jiffies + (HZ / 2); /* FIXME */
1079 spin_lock(chip->mutex);
1083 status = cfi_read(map, cmd_adr);
1084 if ((status & status_OK) == status_OK)
1087 /* OK Still waiting */
1088 if (time_after(jiffies, timeo)) {
1089 chip->state = FL_STATUS;
1090 printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
1095 /* Latency issues. Drop the lock, wait a while and retry */
1096 spin_unlock(chip->mutex);
1099 spin_lock(chip->mutex);
1102 chip->buffer_write_time--;
1103 if (!chip->buffer_write_time)
1104 chip->buffer_write_time++;
1107 chip->buffer_write_time++;
1109 /* Done and happy. */
1110 chip->state = FL_STATUS;
1112 /* check for lock bit */
1113 if (status & CMD(0x02)) {
1115 cfi_write(map, CMD(0x50), cmd_adr);
1116 /* put back into read status register mode */
1117 cfi_write(map, CMD(0x70), adr);
1122 put_chip(map, chip, cmd_adr);
1123 spin_unlock(chip->mutex);
1127 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1128 size_t len, size_t *retlen, const u_char *buf)
1130 struct map_info *map = mtd->priv;
1131 struct cfi_private *cfi = map->fldrv_priv;
1132 int wbufsize = CFIDEV_INTERLEAVE << cfi->cfiq->MaxBufWriteSize;
1141 chipnum = to >> cfi->chipshift;
1142 ofs = to - (chipnum << cfi->chipshift);
1144 /* If it's not bus-aligned, do the first word write */
1145 if (ofs & (CFIDEV_BUSWIDTH-1)) {
1146 size_t local_len = (-ofs)&(CFIDEV_BUSWIDTH-1);
1147 if (local_len > len)
1149 ret = cfi_intelext_write_words(mtd, to, local_len,
1157 if (ofs >> cfi->chipshift) {
1160 if (chipnum == cfi->numchips)
1165 /* Write buffer is worth it only if more than one word to write... */
1167 /* We must not cross write block boundaries */
1168 int size = wbufsize - (ofs & (wbufsize-1));
1172 ret = do_write_buffer(map, &cfi->chips[chipnum],
1182 if (ofs >> cfi->chipshift) {
1185 if (chipnum == cfi->numchips)
1192 typedef int (*varsize_frob_t)(struct map_info *map, struct flchip *chip,
1193 unsigned long adr, void *thunk);
1195 static int cfi_intelext_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
1196 loff_t ofs, size_t len, void *thunk)
1198 struct map_info *map = mtd->priv;
1199 struct cfi_private *cfi = map->fldrv_priv;
1201 int chipnum, ret = 0;
1203 struct mtd_erase_region_info *regions = mtd->eraseregions;
1205 if (ofs > mtd->size)
1208 if ((len + ofs) > mtd->size)
1211 /* Check that both start and end of the requested erase are
1212 * aligned with the erasesize at the appropriate addresses.
1217 /* Skip all erase regions which are ended before the start of
1218 the requested erase. Actually, to save on the calculations,
1219 we skip to the first erase region which starts after the
1220 start of the requested erase, and then go back one.
1223 while (i < mtd->numeraseregions && ofs >= regions[i].offset)
1227 /* OK, now i is pointing at the erase region in which this
1228 erase request starts. Check the start of the requested
1229 erase range is aligned with the erase size which is in
1233 if (ofs & (regions[i].erasesize-1))
1236 /* Remember the erase region we start on */
1239 /* Next, check that the end of the requested erase is aligned
1240 * with the erase region at that address.
1243 while (i<mtd->numeraseregions && (ofs + len) >= regions[i].offset)
1246 /* As before, drop back one to point at the region in which
1247 the address actually falls
1251 if ((ofs + len) & (regions[i].erasesize-1))
1254 chipnum = ofs >> cfi->chipshift;
1255 adr = ofs - (chipnum << cfi->chipshift);
1260 ret = (*frob)(map, &cfi->chips[chipnum], adr, thunk);
1265 adr += regions[i].erasesize;
1266 len -= regions[i].erasesize;
1268 if (adr % (1<< cfi->chipshift) == ((regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
1271 if (adr >> cfi->chipshift) {
1275 if (chipnum >= cfi->numchips)
1284 static int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, void *thunk)
1286 struct cfi_private *cfi = map->fldrv_priv;
1287 cfi_word status, status_OK;
1288 unsigned long timeo;
1290 DECLARE_WAITQUEUE(wait, current);
1295 /* Let's determine this according to the interleave only once */
1296 status_OK = CMD(0x80);
1299 spin_lock(chip->mutex);
1300 ret = get_chip(map, chip, adr, FL_ERASING);
1302 spin_unlock(chip->mutex);
1307 /* Clear the status register first */
1308 cfi_write(map, CMD(0x50), adr);
1311 cfi_write(map, CMD(0x20), adr);
1312 cfi_write(map, CMD(0xD0), adr);
1313 chip->state = FL_ERASING;
1314 chip->erase_suspended = 0;
1316 spin_unlock(chip->mutex);
1317 set_current_state(TASK_UNINTERRUPTIBLE);
1318 schedule_timeout((chip->erase_time*HZ)/(2*1000));
1319 spin_lock(chip->mutex);
1321 /* FIXME. Use a timer to check this, and return immediately. */
1322 /* Once the state machine's known to be working I'll do that */
1324 timeo = jiffies + (HZ*20);
1326 if (chip->state != FL_ERASING) {
1327 /* Someone's suspended the erase. Sleep */
1328 set_current_state(TASK_UNINTERRUPTIBLE);
1329 add_wait_queue(&chip->wq, &wait);
1330 spin_unlock(chip->mutex);
1332 remove_wait_queue(&chip->wq, &wait);
1333 spin_lock(chip->mutex);
1336 if (chip->erase_suspended) {
1337 /* This erase was suspended and resumed.
1338 Adjust the timeout */
1339 timeo = jiffies + (HZ*20); /* FIXME */
1340 chip->erase_suspended = 0;
1343 status = cfi_read(map, adr);
1344 if ((status & status_OK) == status_OK)
1347 /* OK Still waiting */
1348 if (time_after(jiffies, timeo)) {
1349 cfi_write(map, CMD(0x70), adr);
1350 chip->state = FL_STATUS;
1351 printk(KERN_ERR "waiting for erase at %08lx to complete timed out. Xstatus = %llx, status = %llx.\n",
1352 adr, (__u64)status, (__u64)cfi_read(map, adr));
1353 /* Clear status bits */
1354 cfi_write(map, CMD(0x50), adr);
1355 cfi_write(map, CMD(0x70), adr);
1357 spin_unlock(chip->mutex);
1361 /* Latency issues. Drop the lock, wait a while and retry */
1362 spin_unlock(chip->mutex);
1363 set_current_state(TASK_UNINTERRUPTIBLE);
1364 schedule_timeout(1);
1365 spin_lock(chip->mutex);
1371 /* We've broken this before. It doesn't hurt to be safe */
1372 cfi_write(map, CMD(0x70), adr);
1373 chip->state = FL_STATUS;
1374 status = cfi_read(map, adr);
1376 /* check for lock bit */
1377 if (status & CMD(0x3a)) {
1378 unsigned char chipstatus = status;
1379 if (status != CMD(status & 0xff)) {
1381 for (i = 1; i<CFIDEV_INTERLEAVE; i++) {
1382 chipstatus |= status >> (cfi->device_type * 8);
1384 printk(KERN_WARNING "Status is not identical for all chips: 0x%llx. Merging to give 0x%02x\n", (__u64)status, chipstatus);
1386 /* Reset the error bits */
1387 cfi_write(map, CMD(0x50), adr);
1388 cfi_write(map, CMD(0x70), adr);
1390 if ((chipstatus & 0x30) == 0x30) {
1391 printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%llx\n", (__u64)status);
1393 } else if (chipstatus & 0x02) {
1394 /* Protection bit set */
1396 } else if (chipstatus & 0x8) {
1398 printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%llx\n", (__u64)status);
1400 } else if (chipstatus & 0x20) {
1402 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%llx. Retrying...\n", adr, (__u64)status);
1403 timeo = jiffies + HZ;
1404 chip->state = FL_STATUS;
1405 spin_unlock(chip->mutex);
1408 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%llx\n", adr, (__u64)status);
1414 spin_unlock(chip->mutex);
1418 int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1420 unsigned long ofs, len;
1426 ret = cfi_intelext_varsize_frob(mtd, do_erase_oneblock, ofs, len, 0);
1430 instr->state = MTD_ERASE_DONE;
1431 if (instr->callback)
1432 instr->callback(instr);
1437 static void cfi_intelext_sync (struct mtd_info *mtd)
1439 struct map_info *map = mtd->priv;
1440 struct cfi_private *cfi = map->fldrv_priv;
1442 struct flchip *chip;
1445 for (i=0; !ret && i<cfi->numchips; i++) {
1446 chip = &cfi->chips[i];
1448 spin_lock(chip->mutex);
1449 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1452 chip->oldstate = chip->state;
1453 chip->state = FL_SYNCING;
1454 /* No need to wake_up() on this state change -
1455 * as the whole point is that nobody can do anything
1456 * with the chip now anyway.
1459 spin_unlock(chip->mutex);
1462 /* Unlock the chips again */
1464 for (i--; i >=0; i--) {
1465 chip = &cfi->chips[i];
1467 spin_lock(chip->mutex);
1469 if (chip->state == FL_SYNCING) {
1470 chip->state = chip->oldstate;
1473 spin_unlock(chip->mutex);
1477 #ifdef DEBUG_LOCK_BITS
1478 static int do_printlockstatus_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, void *thunk)
1480 struct cfi_private *cfi = map->fldrv_priv;
1481 int ofs_factor = cfi->interleave * cfi->device_type;
1483 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1484 printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1485 adr, cfi_read_query(map, adr+(2*ofs_factor)));
1486 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1492 #define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
1493 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
1495 static int do_xxlock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, void *thunk)
1497 struct cfi_private *cfi = map->fldrv_priv;
1498 cfi_word status, status_OK;
1499 unsigned long timeo = jiffies + HZ;
1504 /* Let's determine this according to the interleave only once */
1505 status_OK = CMD(0x80);
1507 spin_lock(chip->mutex);
1508 ret = get_chip(map, chip, adr, FL_LOCKING);
1510 spin_unlock(chip->mutex);
1515 cfi_write(map, CMD(0x60), adr);
1517 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1518 cfi_write(map, CMD(0x01), adr);
1519 chip->state = FL_LOCKING;
1520 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1521 cfi_write(map, CMD(0xD0), adr);
1522 chip->state = FL_UNLOCKING;
1526 spin_unlock(chip->mutex);
1527 schedule_timeout(HZ);
1528 spin_lock(chip->mutex);
1530 /* FIXME. Use a timer to check this, and return immediately. */
1531 /* Once the state machine's known to be working I'll do that */
1533 timeo = jiffies + (HZ*20);
1536 status = cfi_read(map, adr);
1537 if ((status & status_OK) == status_OK)
1540 /* OK Still waiting */
1541 if (time_after(jiffies, timeo)) {
1542 cfi_write(map, CMD(0x70), adr);
1543 chip->state = FL_STATUS;
1544 printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %llx, status = %llx.\n", (__u64)status, (__u64)cfi_read(map, adr));
1546 spin_unlock(chip->mutex);
1550 /* Latency issues. Drop the lock, wait a while and retry */
1551 spin_unlock(chip->mutex);
1553 spin_lock(chip->mutex);
1556 /* Done and happy. */
1557 chip->state = FL_STATUS;
1558 put_chip(map, chip, adr);
1559 spin_unlock(chip->mutex);
1563 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1567 #ifdef DEBUG_LOCK_BITS
1568 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1569 __FUNCTION__, ofs, len);
1570 cfi_intelext_varsize_frob(mtd, do_printlockstatus_oneblock,
1574 ret = cfi_intelext_varsize_frob(mtd, do_xxlock_oneblock,
1575 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
1577 #ifdef DEBUG_LOCK_BITS
1579 "%s: lock status after, ret=%d\n", __FUNCTION__, ret);
1580 cfi_intelext_varsize_frob(mtd, do_printlockstatus_oneblock,
1587 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1591 #ifdef DEBUG_LOCK_BITS
1592 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1593 __FUNCTION__, ofs, len);
1594 cfi_intelext_varsize_frob(mtd, do_printlockstatus_oneblock,
1598 ret = cfi_intelext_varsize_frob(mtd, do_xxlock_oneblock,
1599 ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
1601 #ifdef DEBUG_LOCK_BITS
1602 printk(KERN_DEBUG "%s: lock status after, ret=%d\n", __FUNCTION__, ret);
1603 cfi_intelext_varsize_frob(mtd, do_printlockstatus_oneblock,
1610 static int cfi_intelext_suspend(struct mtd_info *mtd)
1612 struct map_info *map = mtd->priv;
1613 struct cfi_private *cfi = map->fldrv_priv;
1615 struct flchip *chip;
1618 for (i=0; !ret && i<cfi->numchips; i++) {
1619 chip = &cfi->chips[i];
1621 spin_lock(chip->mutex);
1623 switch (chip->state) {
1627 case FL_JEDEC_QUERY:
1628 if (chip->oldstate == FL_READY) {
1629 chip->oldstate = chip->state;
1630 chip->state = FL_PM_SUSPENDED;
1631 /* No need to wake_up() on this state change -
1632 * as the whole point is that nobody can do anything
1633 * with the chip now anyway.
1639 case FL_PM_SUSPENDED:
1642 spin_unlock(chip->mutex);
1645 /* Unlock the chips again */
1648 for (i--; i >=0; i--) {
1649 chip = &cfi->chips[i];
1651 spin_lock(chip->mutex);
1653 if (chip->state == FL_PM_SUSPENDED) {
1654 /* No need to force it into a known state here,
1655 because we're returning failure, and it didn't
1657 chip->state = chip->oldstate;
1660 spin_unlock(chip->mutex);
1667 static void cfi_intelext_resume(struct mtd_info *mtd)
1669 struct map_info *map = mtd->priv;
1670 struct cfi_private *cfi = map->fldrv_priv;
1672 struct flchip *chip;
1674 for (i=0; i<cfi->numchips; i++) {
1676 chip = &cfi->chips[i];
1678 spin_lock(chip->mutex);
1680 /* Go to known state. Chip may have been power cycled */
1681 if (chip->state == FL_PM_SUSPENDED) {
1682 cfi_write(map, CMD(0xFF), 0);
1683 chip->state = FL_READY;
1687 spin_unlock(chip->mutex);
1691 static void cfi_intelext_destroy(struct mtd_info *mtd)
1693 struct map_info *map = mtd->priv;
1694 struct cfi_private *cfi = map->fldrv_priv;
1695 kfree(cfi->cmdset_priv);
1698 kfree(mtd->eraseregions);
1701 static char im_name_1[]="cfi_cmdset_0001";
1702 static char im_name_3[]="cfi_cmdset_0003";
1704 int __init cfi_intelext_init(void)
1706 inter_module_register(im_name_1, THIS_MODULE, &cfi_cmdset_0001);
1707 inter_module_register(im_name_3, THIS_MODULE, &cfi_cmdset_0001);
1711 static void __exit cfi_intelext_exit(void)
1713 inter_module_unregister(im_name_1);
1714 inter_module_unregister(im_name_3);
1717 module_init(cfi_intelext_init);
1718 module_exit(cfi_intelext_exit);
1720 MODULE_LICENSE("GPL");
1721 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
1722 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");