Merge master.kernel.org:/pub/scm/linux/kernel/git/tglx/mtd-2.6

This commit is contained in:
Linus Torvalds 2005-07-16 10:24:32 -07:00
commit 1fa4aad496
3 changed files with 116 additions and 108 deletions

View File

@ -59,7 +59,7 @@
* The AG-AND chips have nice features for speed improvement,
* which are not supported yet. Read / program 4 pages in one go.
*
* $Id: nand_base.c,v 1.146 2005/06/17 15:02:06 gleixner Exp $
* $Id: nand_base.c,v 1.147 2005/07/15 07:18:06 gleixner Exp $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@ -1410,16 +1410,6 @@ static int nand_read_oob (struct mtd_info *mtd, loff_t from, size_t len, size_t
this->read_buf(mtd, &buf[i], thislen);
i += thislen;
/* Apply delay or wait for ready/busy pin
* Do this before the AUTOINCR check, so no problems
* arise if a chip which does auto increment
* is marked as NOAUTOINCR by the board driver.
*/
if (!this->dev_ready)
udelay (this->chip_delay);
else
nand_wait_ready(mtd);
/* Read more ? */
if (i < len) {
page++;
@ -1432,6 +1422,16 @@ static int nand_read_oob (struct mtd_info *mtd, loff_t from, size_t len, size_t
this->select_chip(mtd, chipnr);
}
/* Apply delay or wait for ready/busy pin
* Do this before the AUTOINCR check, so no problems
* arise if a chip which does auto increment
* is marked as NOAUTOINCR by the board driver.
*/
if (!this->dev_ready)
udelay (this->chip_delay);
else
nand_wait_ready(mtd);
/* Check, if the chip supports auto page increment
* or if we have hit a block boundary.
*/

View File

@ -6,7 +6,7 @@
*
* Copyright (C) 2004 Thomas Gleixner (tglx@linutronix.de)
*
* $Id: nand_bbt.c,v 1.33 2005/06/14 15:47:56 gleixner Exp $
* $Id: nand_bbt.c,v 1.35 2005/07/15 13:53:47 gleixner Exp $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@ -109,24 +109,21 @@ static int check_pattern (uint8_t *buf, int len, int paglen, struct nand_bbt_des
/**
* check_short_pattern - [GENERIC] check if a pattern is in the buffer
* @buf: the buffer to search
* @len: the length of buffer to search
* @paglen: the pagelength
* @td: search pattern descriptor
*
* Check for a pattern at the given place. Used to search bad block
* tables and good / bad block identifiers. Same as check_pattern, but
* no optional empty check and the pattern is expected to start
* at offset 0.
* no optional empty check
*
*/
static int check_short_pattern (uint8_t *buf, int len, int paglen, struct nand_bbt_descr *td)
static int check_short_pattern (uint8_t *buf, struct nand_bbt_descr *td)
{
int i;
uint8_t *p = buf;
/* Compare the pattern */
for (i = 0; i < td->len; i++) {
if (p[i] != td->pattern[i])
if (p[td->offs + i] != td->pattern[i])
return -1;
}
return 0;
@ -337,13 +334,14 @@ static int create_bbt (struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
if (!(bd->options & NAND_BBT_SCANEMPTY)) {
size_t retlen;
/* No need to read pages fully, just read required OOB bytes */
ret = mtd->read_oob(mtd, from + j * mtd->oobblock + bd->offs,
readlen, &retlen, &buf[0]);
/* Read the full oob until read_oob is fixed to
* handle single byte reads for 16 bit buswidth */
ret = mtd->read_oob(mtd, from + j * mtd->oobblock,
mtd->oobsize, &retlen, buf);
if (ret)
return ret;
if (check_short_pattern (&buf[j * scanlen], scanlen, mtd->oobblock, bd)) {
if (check_short_pattern (buf, bd)) {
this->bbt[i >> 3] |= 0x03 << (i & 0x6);
printk (KERN_WARNING "Bad eraseblock %d at 0x%08x\n",
i >> 1, (unsigned int) from);

View File

@ -7,7 +7,7 @@
*
* For licensing information, see the file 'LICENCE' in this directory.
*
* $Id: erase.c,v 1.76 2005/05/03 15:11:40 dedekind Exp $
* $Id: erase.c,v 1.80 2005/07/14 19:46:24 joern Exp $
*
*/
@ -300,100 +300,86 @@ static void jffs2_free_all_node_refs(struct jffs2_sb_info *c, struct jffs2_erase
jeb->last_node = NULL;
}
static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t *bad_offset)
{
void *ebuf;
uint32_t ofs;
size_t retlen;
int ret = -EIO;
ebuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!ebuf) {
printk(KERN_WARNING "Failed to allocate page buffer for verifying erase at 0x%08x. Refiling\n", jeb->offset);
return -EAGAIN;
}
D1(printk(KERN_DEBUG "Verifying erase at 0x%08x\n", jeb->offset));
for (ofs = jeb->offset; ofs < jeb->offset + c->sector_size; ) {
uint32_t readlen = min((uint32_t)PAGE_SIZE, jeb->offset + c->sector_size - ofs);
int i;
*bad_offset = ofs;
ret = jffs2_flash_read(c, ofs, readlen, &retlen, ebuf);
if (ret) {
printk(KERN_WARNING "Read of newly-erased block at 0x%08x failed: %d. Putting on bad_list\n", ofs, ret);
goto fail;
}
if (retlen != readlen) {
printk(KERN_WARNING "Short read from newly-erased block at 0x%08x. Wanted %d, got %zd\n", ofs, readlen, retlen);
goto fail;
}
for (i=0; i<readlen; i += sizeof(unsigned long)) {
/* It's OK. We know it's properly aligned */
unsigned long *datum = ebuf + i;
if (*datum + 1) {
*bad_offset += i;
printk(KERN_WARNING "Newly-erased block contained word 0x%lx at offset 0x%08x\n", *datum, *bad_offset);
goto fail;
}
}
ofs += readlen;
cond_resched();
}
ret = 0;
fail:
kfree(ebuf);
return ret;
}
static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
{
struct jffs2_raw_node_ref *marker_ref = NULL;
unsigned char *ebuf;
size_t retlen;
int ret;
uint32_t bad_offset;
if ((!jffs2_cleanmarker_oob(c)) && (c->cleanmarker_size > 0)) {
marker_ref = jffs2_alloc_raw_node_ref();
if (!marker_ref) {
printk(KERN_WARNING "Failed to allocate raw node ref for clean marker\n");
/* Stick it back on the list from whence it came and come back later */
jffs2_erase_pending_trigger(c);
spin_lock(&c->erase_completion_lock);
list_add(&jeb->list, &c->erase_complete_list);
spin_unlock(&c->erase_completion_lock);
return;
}
switch (jffs2_block_check_erase(c, jeb, &bad_offset)) {
case -EAGAIN: goto refile;
case -EIO: goto filebad;
}
ebuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!ebuf) {
printk(KERN_WARNING "Failed to allocate page buffer for verifying erase at 0x%08x. Assuming it worked\n", jeb->offset);
} else {
uint32_t ofs = jeb->offset;
D1(printk(KERN_DEBUG "Verifying erase at 0x%08x\n", jeb->offset));
while(ofs < jeb->offset + c->sector_size) {
uint32_t readlen = min((uint32_t)PAGE_SIZE, jeb->offset + c->sector_size - ofs);
int i;
bad_offset = ofs;
ret = c->mtd->read(c->mtd, ofs, readlen, &retlen, ebuf);
if (ret) {
printk(KERN_WARNING "Read of newly-erased block at 0x%08x failed: %d. Putting on bad_list\n", ofs, ret);
goto bad;
}
if (retlen != readlen) {
printk(KERN_WARNING "Short read from newly-erased block at 0x%08x. Wanted %d, got %zd\n", ofs, readlen, retlen);
goto bad;
}
for (i=0; i<readlen; i += sizeof(unsigned long)) {
/* It's OK. We know it's properly aligned */
unsigned long datum = *(unsigned long *)(&ebuf[i]);
if (datum + 1) {
bad_offset += i;
printk(KERN_WARNING "Newly-erased block contained word 0x%lx at offset 0x%08x\n", datum, bad_offset);
bad:
if ((!jffs2_cleanmarker_oob(c)) && (c->cleanmarker_size > 0))
jffs2_free_raw_node_ref(marker_ref);
kfree(ebuf);
bad2:
spin_lock(&c->erase_completion_lock);
/* Stick it on a list (any list) so
erase_failed can take it right off
again. Silly, but shouldn't happen
often. */
list_add(&jeb->list, &c->erasing_list);
spin_unlock(&c->erase_completion_lock);
jffs2_erase_failed(c, jeb, bad_offset);
return;
}
}
ofs += readlen;
cond_resched();
}
kfree(ebuf);
}
bad_offset = jeb->offset;
/* Write the erase complete marker */
D1(printk(KERN_DEBUG "Writing erased marker to block at 0x%08x\n", jeb->offset));
if (jffs2_cleanmarker_oob(c)) {
bad_offset = jeb->offset;
if (jffs2_write_nand_cleanmarker(c, jeb))
goto bad2;
/* Cleanmarker in oob area or no cleanmarker at all ? */
if (jffs2_cleanmarker_oob(c) || c->cleanmarker_size == 0) {
if (jffs2_cleanmarker_oob(c)) {
if (jffs2_write_nand_cleanmarker(c, jeb))
goto filebad;
}
jeb->first_node = jeb->last_node = NULL;
jeb->free_size = c->sector_size;
jeb->used_size = 0;
jeb->dirty_size = 0;
jeb->wasted_size = 0;
} else if (c->cleanmarker_size == 0) {
jeb->first_node = jeb->last_node = NULL;
jeb->free_size = c->sector_size;
jeb->used_size = 0;
jeb->dirty_size = 0;
jeb->wasted_size = 0;
} else {
struct kvec vecs[1];
struct jffs2_unknown_node marker = {
.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
@ -401,21 +387,28 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
.totlen = cpu_to_je32(c->cleanmarker_size)
};
marker_ref = jffs2_alloc_raw_node_ref();
if (!marker_ref) {
printk(KERN_WARNING "Failed to allocate raw node ref for clean marker. Refiling\n");
goto refile;
}
marker.hdr_crc = cpu_to_je32(crc32(0, &marker, sizeof(struct jffs2_unknown_node)-4));
vecs[0].iov_base = (unsigned char *) &marker;
vecs[0].iov_len = sizeof(marker);
ret = jffs2_flash_direct_writev(c, vecs, 1, jeb->offset, &retlen);
if (ret) {
printk(KERN_WARNING "Write clean marker to block at 0x%08x failed: %d\n",
jeb->offset, ret);
goto bad2;
}
if (retlen != sizeof(marker)) {
printk(KERN_WARNING "Short write to newly-erased block at 0x%08x: Wanted %zd, got %zd\n",
jeb->offset, sizeof(marker), retlen);
goto bad2;
if (ret || retlen != sizeof(marker)) {
if (ret)
printk(KERN_WARNING "Write clean marker to block at 0x%08x failed: %d\n",
jeb->offset, ret);
else
printk(KERN_WARNING "Short write to newly-erased block at 0x%08x: Wanted %zd, got %zd\n",
jeb->offset, sizeof(marker), retlen);
jffs2_free_raw_node_ref(marker_ref);
goto filebad;
}
marker_ref->next_in_ino = NULL;
@ -444,5 +437,22 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
c->nr_free_blocks++;
spin_unlock(&c->erase_completion_lock);
wake_up(&c->erase_wait);
}
return;
filebad:
spin_lock(&c->erase_completion_lock);
/* Stick it on a list (any list) so erase_failed can take it
right off again. Silly, but shouldn't happen often. */
list_add(&jeb->list, &c->erasing_list);
spin_unlock(&c->erase_completion_lock);
jffs2_erase_failed(c, jeb, bad_offset);
return;
refile:
/* Stick it back on the list from whence it came and come back later */
jffs2_erase_pending_trigger(c);
spin_lock(&c->erase_completion_lock);
list_add(&jeb->list, &c->erase_complete_list);
spin_unlock(&c->erase_completion_lock);
return;
}