2020-01-13 04:21:09 +00:00
|
|
|
# allocator tests
|
2020-02-10 05:09:46 +00:00
|
|
|
# note for these to work there are a number constraints on the device geometry
|
|
|
|
if = 'LFS_BLOCK_CYCLES == -1'
|
2020-01-13 04:21:09 +00:00
|
|
|
|
|
|
|
[[case]] # parallel allocation test
|
2020-01-14 15:14:01 +00:00
|
|
|
define.FILES = 3
|
2020-02-10 05:09:46 +00:00
|
|
|
define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-6)) / FILES)'
|
2020-01-13 04:21:09 +00:00
|
|
|
code = '''
|
|
|
|
const char *names[FILES] = {"bacon", "eggs", "pancakes"};
|
|
|
|
lfs_file_t files[FILES];
|
|
|
|
|
|
|
|
lfs_format(&lfs, &cfg) => 0;
|
|
|
|
lfs_mount(&lfs, &cfg) => 0;
|
|
|
|
lfs_mkdir(&lfs, "breakfast") => 0;
|
|
|
|
lfs_unmount(&lfs) => 0;
|
|
|
|
|
|
|
|
lfs_mount(&lfs, &cfg) => 0;
|
|
|
|
for (int n = 0; n < FILES; n++) {
|
|
|
|
sprintf(path, "breakfast/%s", names[n]);
|
|
|
|
lfs_file_open(&lfs, &files[n], path,
|
|
|
|
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
|
|
|
|
}
|
|
|
|
for (int n = 0; n < FILES; n++) {
|
|
|
|
size = strlen(names[n]);
|
|
|
|
for (lfs_size_t i = 0; i < SIZE; i += size) {
|
|
|
|
lfs_file_write(&lfs, &files[n], names[n], size) => size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for (int n = 0; n < FILES; n++) {
|
|
|
|
lfs_file_close(&lfs, &files[n]) => 0;
|
|
|
|
}
|
|
|
|
lfs_unmount(&lfs) => 0;
|
|
|
|
|
|
|
|
lfs_mount(&lfs, &cfg) => 0;
|
|
|
|
for (int n = 0; n < FILES; n++) {
|
|
|
|
sprintf(path, "breakfast/%s", names[n]);
|
|
|
|
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
|
|
|
size = strlen(names[n]);
|
|
|
|
for (lfs_size_t i = 0; i < SIZE; i += size) {
|
|
|
|
lfs_file_read(&lfs, &file, buffer, size) => size;
|
|
|
|
assert(memcmp(buffer, names[n], size) == 0);
|
|
|
|
}
|
|
|
|
lfs_file_close(&lfs, &file) => 0;
|
|
|
|
}
|
|
|
|
lfs_unmount(&lfs) => 0;
|
|
|
|
'''
|
|
|
|
|
|
|
|
[[case]] # serial allocation test
|
2020-01-14 15:14:01 +00:00
|
|
|
define.FILES = 3
|
2020-02-10 05:09:46 +00:00
|
|
|
define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-6)) / FILES)'
|
2020-01-13 04:21:09 +00:00
|
|
|
code = '''
|
|
|
|
const char *names[FILES] = {"bacon", "eggs", "pancakes"};
|
|
|
|
|
|
|
|
lfs_format(&lfs, &cfg) => 0;
|
|
|
|
lfs_mount(&lfs, &cfg) => 0;
|
|
|
|
lfs_mkdir(&lfs, "breakfast") => 0;
|
|
|
|
lfs_unmount(&lfs) => 0;
|
|
|
|
|
|
|
|
for (int n = 0; n < FILES; n++) {
|
|
|
|
lfs_mount(&lfs, &cfg) => 0;
|
|
|
|
sprintf(path, "breakfast/%s", names[n]);
|
|
|
|
lfs_file_open(&lfs, &file, path,
|
|
|
|
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
|
|
|
|
size = strlen(names[n]);
|
|
|
|
memcpy(buffer, names[n], size);
|
|
|
|
for (int i = 0; i < SIZE; i += size) {
|
|
|
|
lfs_file_write(&lfs, &file, buffer, size) => size;
|
|
|
|
}
|
|
|
|
lfs_file_close(&lfs, &file) => 0;
|
|
|
|
lfs_unmount(&lfs) => 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
lfs_mount(&lfs, &cfg) => 0;
|
|
|
|
for (int n = 0; n < FILES; n++) {
|
|
|
|
sprintf(path, "breakfast/%s", names[n]);
|
|
|
|
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
|
|
|
size = strlen(names[n]);
|
|
|
|
for (int i = 0; i < SIZE; i += size) {
|
|
|
|
lfs_file_read(&lfs, &file, buffer, size) => size;
|
|
|
|
assert(memcmp(buffer, names[n], size) == 0);
|
|
|
|
}
|
|
|
|
lfs_file_close(&lfs, &file) => 0;
|
|
|
|
}
|
|
|
|
lfs_unmount(&lfs) => 0;
|
|
|
|
'''
|
|
|
|
|
|
|
|
[[case]] # parallel allocation reuse test
|
2020-01-14 15:14:01 +00:00
|
|
|
define.FILES = 3
|
2020-02-10 05:09:46 +00:00
|
|
|
define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-6)) / FILES)'
|
2020-01-14 15:14:01 +00:00
|
|
|
define.CYCLES = [1, 10]
|
2020-01-13 04:21:09 +00:00
|
|
|
code = '''
|
|
|
|
const char *names[FILES] = {"bacon", "eggs", "pancakes"};
|
|
|
|
lfs_file_t files[FILES];
|
|
|
|
|
|
|
|
lfs_format(&lfs, &cfg) => 0;
|
|
|
|
|
|
|
|
for (int c = 0; c < CYCLES; c++) {
|
|
|
|
lfs_mount(&lfs, &cfg) => 0;
|
|
|
|
lfs_mkdir(&lfs, "breakfast") => 0;
|
|
|
|
lfs_unmount(&lfs) => 0;
|
|
|
|
|
|
|
|
lfs_mount(&lfs, &cfg) => 0;
|
|
|
|
for (int n = 0; n < FILES; n++) {
|
|
|
|
sprintf(path, "breakfast/%s", names[n]);
|
|
|
|
lfs_file_open(&lfs, &files[n], path,
|
|
|
|
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
|
|
|
|
}
|
|
|
|
for (int n = 0; n < FILES; n++) {
|
|
|
|
size = strlen(names[n]);
|
|
|
|
for (int i = 0; i < SIZE; i += size) {
|
|
|
|
lfs_file_write(&lfs, &files[n], names[n], size) => size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for (int n = 0; n < FILES; n++) {
|
|
|
|
lfs_file_close(&lfs, &files[n]) => 0;
|
|
|
|
}
|
|
|
|
lfs_unmount(&lfs) => 0;
|
|
|
|
|
|
|
|
lfs_mount(&lfs, &cfg) => 0;
|
|
|
|
for (int n = 0; n < FILES; n++) {
|
|
|
|
sprintf(path, "breakfast/%s", names[n]);
|
|
|
|
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
|
|
|
size = strlen(names[n]);
|
|
|
|
for (int i = 0; i < SIZE; i += size) {
|
|
|
|
lfs_file_read(&lfs, &file, buffer, size) => size;
|
|
|
|
assert(memcmp(buffer, names[n], size) == 0);
|
|
|
|
}
|
|
|
|
lfs_file_close(&lfs, &file) => 0;
|
|
|
|
}
|
|
|
|
lfs_unmount(&lfs) => 0;
|
|
|
|
|
|
|
|
lfs_mount(&lfs, &cfg) => 0;
|
|
|
|
for (int n = 0; n < FILES; n++) {
|
|
|
|
sprintf(path, "breakfast/%s", names[n]);
|
|
|
|
lfs_remove(&lfs, path) => 0;
|
|
|
|
}
|
|
|
|
lfs_remove(&lfs, "breakfast") => 0;
|
|
|
|
lfs_unmount(&lfs) => 0;
|
|
|
|
}
|
|
|
|
'''
|
2020-01-14 15:14:01 +00:00
|
|
|
|
|
|
|
[[case]] # serial allocation reuse test
|
2020-01-13 04:21:09 +00:00
|
|
|
define.FILES = 3
|
2020-02-10 05:09:46 +00:00
|
|
|
define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-6)) / FILES)'
|
2020-01-13 04:21:09 +00:00
|
|
|
define.CYCLES = [1, 10]
|
|
|
|
code = '''
|
|
|
|
const char *names[FILES] = {"bacon", "eggs", "pancakes"};
|
|
|
|
|
|
|
|
lfs_format(&lfs, &cfg) => 0;
|
|
|
|
|
|
|
|
for (int c = 0; c < CYCLES; c++) {
|
|
|
|
lfs_mount(&lfs, &cfg) => 0;
|
|
|
|
lfs_mkdir(&lfs, "breakfast") => 0;
|
|
|
|
lfs_unmount(&lfs) => 0;
|
|
|
|
|
|
|
|
for (int n = 0; n < FILES; n++) {
|
|
|
|
lfs_mount(&lfs, &cfg) => 0;
|
|
|
|
sprintf(path, "breakfast/%s", names[n]);
|
|
|
|
lfs_file_open(&lfs, &file, path,
|
|
|
|
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
|
|
|
|
size = strlen(names[n]);
|
|
|
|
memcpy(buffer, names[n], size);
|
|
|
|
for (int i = 0; i < SIZE; i += size) {
|
|
|
|
lfs_file_write(&lfs, &file, buffer, size) => size;
|
|
|
|
}
|
|
|
|
lfs_file_close(&lfs, &file) => 0;
|
|
|
|
lfs_unmount(&lfs) => 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
lfs_mount(&lfs, &cfg) => 0;
|
|
|
|
for (int n = 0; n < FILES; n++) {
|
|
|
|
sprintf(path, "breakfast/%s", names[n]);
|
|
|
|
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
|
|
|
size = strlen(names[n]);
|
|
|
|
for (int i = 0; i < SIZE; i += size) {
|
|
|
|
lfs_file_read(&lfs, &file, buffer, size) => size;
|
|
|
|
assert(memcmp(buffer, names[n], size) == 0);
|
|
|
|
}
|
|
|
|
lfs_file_close(&lfs, &file) => 0;
|
|
|
|
}
|
|
|
|
lfs_unmount(&lfs) => 0;
|
|
|
|
|
|
|
|
lfs_mount(&lfs, &cfg) => 0;
|
|
|
|
for (int n = 0; n < FILES; n++) {
|
|
|
|
sprintf(path, "breakfast/%s", names[n]);
|
|
|
|
lfs_remove(&lfs, path) => 0;
|
|
|
|
}
|
|
|
|
lfs_remove(&lfs, "breakfast") => 0;
|
|
|
|
lfs_unmount(&lfs) => 0;
|
|
|
|
}
|
|
|
|
'''
|
|
|
|
|
|
|
|
[[case]] # exhaustion test
|
|
|
|
code = '''
|
|
|
|
lfs_format(&lfs, &cfg) => 0;
|
|
|
|
lfs_mount(&lfs, &cfg) => 0;
|
|
|
|
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
|
|
|
|
size = strlen("exhaustion");
|
|
|
|
memcpy(buffer, "exhaustion", size);
|
|
|
|
lfs_file_write(&lfs, &file, buffer, size) => size;
|
|
|
|
lfs_file_sync(&lfs, &file) => 0;
|
|
|
|
|
|
|
|
size = strlen("blahblahblahblah");
|
|
|
|
memcpy(buffer, "blahblahblahblah", size);
|
|
|
|
lfs_ssize_t res;
|
|
|
|
while (true) {
|
|
|
|
res = lfs_file_write(&lfs, &file, buffer, size);
|
|
|
|
if (res < 0) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
res => size;
|
|
|
|
}
|
|
|
|
res => LFS_ERR_NOSPC;
|
|
|
|
|
|
|
|
lfs_file_close(&lfs, &file) => 0;
|
|
|
|
lfs_unmount(&lfs) => 0;
|
|
|
|
|
|
|
|
lfs_mount(&lfs, &cfg) => 0;
|
|
|
|
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_RDONLY);
|
|
|
|
size = strlen("exhaustion");
|
|
|
|
lfs_file_size(&lfs, &file) => size;
|
|
|
|
lfs_file_read(&lfs, &file, buffer, size) => size;
|
|
|
|
memcmp(buffer, "exhaustion", size) => 0;
|
|
|
|
lfs_file_close(&lfs, &file) => 0;
|
|
|
|
lfs_unmount(&lfs) => 0;
|
|
|
|
'''
|
|
|
|
|
|
|
|
[[case]] # exhaustion wraparound test
|
2020-01-14 15:14:01 +00:00
|
|
|
define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-4)) / 3)'
|
2020-01-13 04:21:09 +00:00
|
|
|
code = '''
|
|
|
|
lfs_format(&lfs, &cfg) => 0;
|
|
|
|
lfs_mount(&lfs, &cfg) => 0;
|
|
|
|
|
|
|
|
lfs_file_open(&lfs, &file, "padding", LFS_O_WRONLY | LFS_O_CREAT);
|
|
|
|
size = strlen("buffering");
|
|
|
|
memcpy(buffer, "buffering", size);
|
|
|
|
for (int i = 0; i < SIZE; i += size) {
|
|
|
|
lfs_file_write(&lfs, &file, buffer, size) => size;
|
|
|
|
}
|
|
|
|
lfs_file_close(&lfs, &file) => 0;
|
|
|
|
lfs_remove(&lfs, "padding") => 0;
|
|
|
|
|
|
|
|
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
|
|
|
|
size = strlen("exhaustion");
|
|
|
|
memcpy(buffer, "exhaustion", size);
|
|
|
|
lfs_file_write(&lfs, &file, buffer, size) => size;
|
|
|
|
lfs_file_sync(&lfs, &file) => 0;
|
|
|
|
|
|
|
|
size = strlen("blahblahblahblah");
|
|
|
|
memcpy(buffer, "blahblahblahblah", size);
|
|
|
|
lfs_ssize_t res;
|
|
|
|
while (true) {
|
|
|
|
res = lfs_file_write(&lfs, &file, buffer, size);
|
|
|
|
if (res < 0) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
res => size;
|
|
|
|
}
|
|
|
|
res => LFS_ERR_NOSPC;
|
|
|
|
|
|
|
|
lfs_file_close(&lfs, &file) => 0;
|
|
|
|
lfs_unmount(&lfs) => 0;
|
|
|
|
|
|
|
|
lfs_mount(&lfs, &cfg) => 0;
|
|
|
|
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_RDONLY);
|
|
|
|
size = strlen("exhaustion");
|
|
|
|
lfs_file_size(&lfs, &file) => size;
|
|
|
|
lfs_file_read(&lfs, &file, buffer, size) => size;
|
|
|
|
memcmp(buffer, "exhaustion", size) => 0;
|
|
|
|
lfs_file_close(&lfs, &file) => 0;
|
|
|
|
lfs_remove(&lfs, "exhaustion") => 0;
|
|
|
|
lfs_unmount(&lfs) => 0;
|
|
|
|
'''
|
|
|
|
|
|
|
|
[[case]] # dir exhaustion test
|
|
|
|
code = '''
|
|
|
|
lfs_format(&lfs, &cfg) => 0;
|
|
|
|
lfs_mount(&lfs, &cfg) => 0;
|
|
|
|
|
|
|
|
// find out max file size
|
|
|
|
lfs_mkdir(&lfs, "exhaustiondir") => 0;
|
|
|
|
size = strlen("blahblahblahblah");
|
|
|
|
memcpy(buffer, "blahblahblahblah", size);
|
|
|
|
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
|
|
|
|
int count = 0;
|
|
|
|
while (true) {
|
|
|
|
err = lfs_file_write(&lfs, &file, buffer, size);
|
|
|
|
if (err < 0) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
count += 1;
|
|
|
|
}
|
|
|
|
err => LFS_ERR_NOSPC;
|
|
|
|
lfs_file_close(&lfs, &file) => 0;
|
|
|
|
|
|
|
|
lfs_remove(&lfs, "exhaustion") => 0;
|
|
|
|
lfs_remove(&lfs, "exhaustiondir") => 0;
|
|
|
|
|
|
|
|
// see if dir fits with max file size
|
|
|
|
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
|
|
|
|
for (int i = 0; i < count; i++) {
|
|
|
|
lfs_file_write(&lfs, &file, buffer, size) => size;
|
|
|
|
}
|
|
|
|
lfs_file_close(&lfs, &file) => 0;
|
|
|
|
|
|
|
|
lfs_mkdir(&lfs, "exhaustiondir") => 0;
|
|
|
|
lfs_remove(&lfs, "exhaustiondir") => 0;
|
|
|
|
lfs_remove(&lfs, "exhaustion") => 0;
|
|
|
|
|
|
|
|
// see if dir fits with > max file size
|
|
|
|
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
|
|
|
|
for (int i = 0; i < count+1; i++) {
|
|
|
|
lfs_file_write(&lfs, &file, buffer, size) => size;
|
|
|
|
}
|
|
|
|
lfs_file_close(&lfs, &file) => 0;
|
|
|
|
|
|
|
|
lfs_mkdir(&lfs, "exhaustiondir") => LFS_ERR_NOSPC;
|
|
|
|
|
|
|
|
lfs_remove(&lfs, "exhaustion") => 0;
|
|
|
|
lfs_unmount(&lfs) => 0;
|
|
|
|
'''
|
|
|
|
|
2020-03-24 22:30:25 +00:00
|
|
|
[[case]] # what if we have a bad block during an allocation scan?
|
|
|
|
in = "lfs.c"
|
|
|
|
define.LFS_ERASE_CYCLES = 0xffffffff
|
|
|
|
define.LFS_BADBLOCK_BEHAVIOR = 'LFS_TESTBD_BADBLOCK_READERROR'
|
|
|
|
code = '''
|
|
|
|
lfs_format(&lfs, &cfg) => 0;
|
|
|
|
lfs_mount(&lfs, &cfg) => 0;
|
|
|
|
// first fill to exhaustion to find available space
|
|
|
|
lfs_file_open(&lfs, &file, "pacman", LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
|
|
|
strcpy((char*)buffer, "waka");
|
|
|
|
size = strlen("waka");
|
|
|
|
lfs_size_t filesize = 0;
|
|
|
|
while (true) {
|
|
|
|
lfs_ssize_t res = lfs_file_write(&lfs, &file, buffer, size);
|
|
|
|
assert(res == (lfs_ssize_t)size || res == LFS_ERR_NOSPC);
|
|
|
|
if (res == LFS_ERR_NOSPC) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
filesize += size;
|
|
|
|
}
|
|
|
|
lfs_file_close(&lfs, &file) => 0;
|
|
|
|
// now fill all but a couple of blocks of the filesystem with data
|
|
|
|
filesize -= 3*LFS_BLOCK_SIZE;
|
|
|
|
lfs_file_open(&lfs, &file, "pacman", LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
|
|
|
strcpy((char*)buffer, "waka");
|
|
|
|
size = strlen("waka");
|
|
|
|
for (lfs_size_t i = 0; i < filesize/size; i++) {
|
|
|
|
lfs_file_write(&lfs, &file, buffer, size) => size;
|
|
|
|
}
|
|
|
|
lfs_file_close(&lfs, &file) => 0;
|
|
|
|
// also save head of file so we can error during lookahead scan
|
|
|
|
lfs_block_t fileblock = file.ctz.head;
|
|
|
|
lfs_unmount(&lfs) => 0;
|
|
|
|
|
|
|
|
// remount to force an alloc scan
|
|
|
|
lfs_mount(&lfs, &cfg) => 0;
|
|
|
|
|
|
|
|
// but mark the head of our file as a "bad block", this is force our
|
|
|
|
// scan to bail early
|
|
|
|
lfs_testbd_setwear(&cfg, fileblock, 0xffffffff) => 0;
|
|
|
|
lfs_file_open(&lfs, &file, "ghost", LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
|
|
|
strcpy((char*)buffer, "chomp");
|
|
|
|
size = strlen("chomp");
|
|
|
|
while (true) {
|
|
|
|
lfs_ssize_t res = lfs_file_write(&lfs, &file, buffer, size);
|
|
|
|
assert(res == (lfs_ssize_t)size || res == LFS_ERR_CORRUPT);
|
|
|
|
if (res == LFS_ERR_CORRUPT) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
lfs_file_close(&lfs, &file) => 0;
|
|
|
|
|
|
|
|
// now reverse the "bad block" and try to write the file again until we
|
|
|
|
// run out of space
|
|
|
|
lfs_testbd_setwear(&cfg, fileblock, 0) => 0;
|
|
|
|
lfs_file_open(&lfs, &file, "ghost", LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
|
|
|
strcpy((char*)buffer, "chomp");
|
|
|
|
size = strlen("chomp");
|
|
|
|
while (true) {
|
|
|
|
lfs_ssize_t res = lfs_file_write(&lfs, &file, buffer, size);
|
|
|
|
assert(res == (lfs_ssize_t)size || res == LFS_ERR_NOSPC);
|
|
|
|
if (res == LFS_ERR_NOSPC) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
lfs_file_close(&lfs, &file) => 0;
|
|
|
|
|
|
|
|
lfs_unmount(&lfs) => 0;
|
|
|
|
|
|
|
|
// check that the disk isn't hurt
|
|
|
|
lfs_mount(&lfs, &cfg) => 0;
|
|
|
|
lfs_file_open(&lfs, &file, "pacman", LFS_O_RDONLY) => 0;
|
|
|
|
strcpy((char*)buffer, "waka");
|
|
|
|
size = strlen("waka");
|
|
|
|
for (lfs_size_t i = 0; i < filesize/size; i++) {
|
|
|
|
uint8_t rbuffer[4];
|
|
|
|
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
|
|
|
assert(memcmp(rbuffer, buffer, size) == 0);
|
|
|
|
}
|
|
|
|
lfs_file_close(&lfs, &file) => 0;
|
|
|
|
lfs_unmount(&lfs) => 0;
|
|
|
|
'''
|
|
|
|
|
|
|
|
|
2020-01-13 04:21:09 +00:00
|
|
|
# Below, I don't like these tests. They're fragile and depend _heavily_
|
|
|
|
# on the geometry of the block device. But they are valuable. Eventually they
|
|
|
|
# should be removed and replaced with generalized tests.
|
|
|
|
|
|
|
|
[[case]] # chained dir exhaustion test
|
2020-01-14 15:14:01 +00:00
|
|
|
define.LFS_BLOCK_SIZE = 512
|
|
|
|
define.LFS_BLOCK_COUNT = 1024
|
Fixed more bugs, mostly related to ENOSPC on different geometries
Fixes:
- Fixed reproducability issue when we can't read a directory revision
- Fixed incorrect erase assumption if lfs_dir_fetch exceeds block size
- Fixed cleanup issue caused by lfs_fs_relocate failing when trying to
outline a file in lfs_file_sync
- Fixed cleanup issue if we run out of space while extending a CTZ skip-list
- Fixed missing half-orphans when allocating blocks during lfs_fs_deorphan
Also:
- Added cycle-detection to readtree.py
- Allowed pseudo-C expressions in test conditions (and it's
beautifully hacky, see line 187 of test.py)
- Better handling of ctrl-C during test runs
- Added build-only mode to test.py
- Limited stdout of test failures to 5 lines unless in verbose mode
Explanation of fixes below
1. Fixed reproducability issue when we can't read a directory revision
An interesting subtlety of the block-device layer is that the
block-device is allowed to return LFS_ERR_CORRUPT on reads to
untouched blocks. This can easily happen if a user is using ECC or
some sort of CMAC on their blocks. Normally we never run into this,
except for the optimization around directory revisions where we use
uninitialized data to start our revision count.
We correctly handle this case by ignoring whats on disk if the read
fails, but end up using unitialized RAM instead. This is not an issue
for normal use, though it can lead to a small information leak.
However it creates a big problem for reproducability, which is very
helpful for debugging.
I ended up running into a case where the RAM values for the revision
count was different, causing two identical runs to wear-level at
different times, leading to one version running out of space before a
bug occured because it expanded the superblock early.
2. Fixed incorrect erase assumption if lfs_dir_fetch exceeds block size
This could be caused if the previous tag was a valid commit and we
lost power causing a partially written tag as the start of a new
commit.
Fortunately we already have a separate condition for exceeding the
block size, so we can force that case to always treat the mdir as
unerased.
3. Fixed cleanup issue caused by lfs_fs_relocate failing when trying to
outline a file in lfs_file_sync
Most operations involving metadata-pairs treat the mdir struct as
entirely temporary and throw it out if any error occurs. Except for
lfs_file_sync since the mdir is also a part of the file struct.
This is relevant because of a cleanup issue in lfs_dir_compact that
usually doesn't have side-effects. The issue is that lfs_fs_relocate
can fail. It needs to allocate new blocks to relocate to, and as the
disk reaches its end of life, it can fail with ENOSPC quite often.
If lfs_fs_relocate fails, the containing lfs_dir_compact would return
immediately without restoring the previous state of the mdir. If a new
commit comes in on the same mdir, the old state left there could
corrupt the filesystem.
It's interesting to note this is forced to happen in lfs_file_sync,
since it always tries to outline the file if it gets ENOSPC (ENOSPC
can mean both no blocks to allocate and that the mdir is full). I'm
not actually sure this bit of code is necessary anymore, we may be
able to remove it.
4. Fixed cleanup issue if we run out of space while extending a CTZ
skip-list
The actually CTZ skip-list logic itself hasn't been touched in more
than a year at this point, so I was surprised to find a bug here. But
it turns out the CTZ skip-list could be put in an invalid state if we
run out of space while trying to extend the skip-list.
This only becomes a problem if we keep the file open, clean up some
space elsewhere, and then continue to write to the open file without
modifying it. Fortunately an easy fix.
5. Fixed missing half-orphans when allocating blocks during
lfs_fs_deorphan
This was a really interesting bug. Normally, we don't have to worry
about allocations, since we force consistency before we are allowed
to allocate blocks. But what about the deorphan operation itself?
Don't we need to allocate blocks if we relocate while deorphaning?
It turns out the deorphan operation can lead to allocating blocks
while there's still orphans and half-orphans on the threaded
linked-list. Orphans aren't an issue, but half-orphans may contain
references to blocks in the outdated half, which doesn't get scanned
during the normal allocation pass.
Fortunately we already fetch directory entries to check CTZ lists, so
we can also check half-orphans here. However this causes
lfs_fs_traverse to duplicate all metadata-pairs, not sure what to do
about this yet.
2020-01-29 07:45:19 +00:00
|
|
|
if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024'
|
2020-01-13 04:21:09 +00:00
|
|
|
code = '''
|
|
|
|
lfs_format(&lfs, &cfg) => 0;
|
|
|
|
lfs_mount(&lfs, &cfg) => 0;
|
|
|
|
|
|
|
|
// find out max file size
|
|
|
|
lfs_mkdir(&lfs, "exhaustiondir") => 0;
|
|
|
|
for (int i = 0; i < 10; i++) {
|
|
|
|
sprintf(path, "dirwithanexhaustivelylongnameforpadding%d", i);
|
|
|
|
lfs_mkdir(&lfs, path) => 0;
|
|
|
|
}
|
|
|
|
size = strlen("blahblahblahblah");
|
|
|
|
memcpy(buffer, "blahblahblahblah", size);
|
|
|
|
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
|
|
|
|
int count = 0;
|
|
|
|
while (true) {
|
|
|
|
err = lfs_file_write(&lfs, &file, buffer, size);
|
|
|
|
if (err < 0) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
count += 1;
|
|
|
|
}
|
|
|
|
err => LFS_ERR_NOSPC;
|
|
|
|
lfs_file_close(&lfs, &file) => 0;
|
|
|
|
|
|
|
|
lfs_remove(&lfs, "exhaustion") => 0;
|
|
|
|
lfs_remove(&lfs, "exhaustiondir") => 0;
|
|
|
|
for (int i = 0; i < 10; i++) {
|
|
|
|
sprintf(path, "dirwithanexhaustivelylongnameforpadding%d", i);
|
|
|
|
lfs_remove(&lfs, path) => 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// see that chained dir fails
|
|
|
|
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
|
|
|
|
for (int i = 0; i < count+1; i++) {
|
|
|
|
lfs_file_write(&lfs, &file, buffer, size) => size;
|
|
|
|
}
|
|
|
|
lfs_file_sync(&lfs, &file) => 0;
|
|
|
|
|
|
|
|
for (int i = 0; i < 10; i++) {
|
|
|
|
sprintf(path, "dirwithanexhaustivelylongnameforpadding%d", i);
|
|
|
|
lfs_mkdir(&lfs, path) => 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
lfs_mkdir(&lfs, "exhaustiondir") => LFS_ERR_NOSPC;
|
|
|
|
|
|
|
|
// shorten file to try a second chained dir
|
|
|
|
while (true) {
|
|
|
|
err = lfs_mkdir(&lfs, "exhaustiondir");
|
|
|
|
if (err != LFS_ERR_NOSPC) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
lfs_ssize_t filesize = lfs_file_size(&lfs, &file);
|
|
|
|
filesize > 0 => true;
|
|
|
|
|
|
|
|
lfs_file_truncate(&lfs, &file, filesize - size) => 0;
|
|
|
|
lfs_file_sync(&lfs, &file) => 0;
|
|
|
|
}
|
|
|
|
err => 0;
|
|
|
|
|
|
|
|
lfs_mkdir(&lfs, "exhaustiondir2") => LFS_ERR_NOSPC;
|
|
|
|
|
|
|
|
lfs_file_close(&lfs, &file) => 0;
|
|
|
|
lfs_unmount(&lfs) => 0;
|
|
|
|
'''
|
2020-01-14 15:14:01 +00:00
|
|
|
|
|
|
|
[[case]] # split dir test
|
2020-01-13 04:21:09 +00:00
|
|
|
define.LFS_BLOCK_SIZE = 512
|
|
|
|
define.LFS_BLOCK_COUNT = 1024
|
Fixed more bugs, mostly related to ENOSPC on different geometries
Fixes:
- Fixed reproducability issue when we can't read a directory revision
- Fixed incorrect erase assumption if lfs_dir_fetch exceeds block size
- Fixed cleanup issue caused by lfs_fs_relocate failing when trying to
outline a file in lfs_file_sync
- Fixed cleanup issue if we run out of space while extending a CTZ skip-list
- Fixed missing half-orphans when allocating blocks during lfs_fs_deorphan
Also:
- Added cycle-detection to readtree.py
- Allowed pseudo-C expressions in test conditions (and it's
beautifully hacky, see line 187 of test.py)
- Better handling of ctrl-C during test runs
- Added build-only mode to test.py
- Limited stdout of test failures to 5 lines unless in verbose mode
Explanation of fixes below
1. Fixed reproducability issue when we can't read a directory revision
An interesting subtlety of the block-device layer is that the
block-device is allowed to return LFS_ERR_CORRUPT on reads to
untouched blocks. This can easily happen if a user is using ECC or
some sort of CMAC on their blocks. Normally we never run into this,
except for the optimization around directory revisions where we use
uninitialized data to start our revision count.
We correctly handle this case by ignoring whats on disk if the read
fails, but end up using unitialized RAM instead. This is not an issue
for normal use, though it can lead to a small information leak.
However it creates a big problem for reproducability, which is very
helpful for debugging.
I ended up running into a case where the RAM values for the revision
count was different, causing two identical runs to wear-level at
different times, leading to one version running out of space before a
bug occured because it expanded the superblock early.
2. Fixed incorrect erase assumption if lfs_dir_fetch exceeds block size
This could be caused if the previous tag was a valid commit and we
lost power causing a partially written tag as the start of a new
commit.
Fortunately we already have a separate condition for exceeding the
block size, so we can force that case to always treat the mdir as
unerased.
3. Fixed cleanup issue caused by lfs_fs_relocate failing when trying to
outline a file in lfs_file_sync
Most operations involving metadata-pairs treat the mdir struct as
entirely temporary and throw it out if any error occurs. Except for
lfs_file_sync since the mdir is also a part of the file struct.
This is relevant because of a cleanup issue in lfs_dir_compact that
usually doesn't have side-effects. The issue is that lfs_fs_relocate
can fail. It needs to allocate new blocks to relocate to, and as the
disk reaches its end of life, it can fail with ENOSPC quite often.
If lfs_fs_relocate fails, the containing lfs_dir_compact would return
immediately without restoring the previous state of the mdir. If a new
commit comes in on the same mdir, the old state left there could
corrupt the filesystem.
It's interesting to note this is forced to happen in lfs_file_sync,
since it always tries to outline the file if it gets ENOSPC (ENOSPC
can mean both no blocks to allocate and that the mdir is full). I'm
not actually sure this bit of code is necessary anymore, we may be
able to remove it.
4. Fixed cleanup issue if we run out of space while extending a CTZ
skip-list
The actually CTZ skip-list logic itself hasn't been touched in more
than a year at this point, so I was surprised to find a bug here. But
it turns out the CTZ skip-list could be put in an invalid state if we
run out of space while trying to extend the skip-list.
This only becomes a problem if we keep the file open, clean up some
space elsewhere, and then continue to write to the open file without
modifying it. Fortunately an easy fix.
5. Fixed missing half-orphans when allocating blocks during
lfs_fs_deorphan
This was a really interesting bug. Normally, we don't have to worry
about allocations, since we force consistency before we are allowed
to allocate blocks. But what about the deorphan operation itself?
Don't we need to allocate blocks if we relocate while deorphaning?
It turns out the deorphan operation can lead to allocating blocks
while there's still orphans and half-orphans on the threaded
linked-list. Orphans aren't an issue, but half-orphans may contain
references to blocks in the outdated half, which doesn't get scanned
during the normal allocation pass.
Fortunately we already fetch directory entries to check CTZ lists, so
we can also check half-orphans here. However this causes
lfs_fs_traverse to duplicate all metadata-pairs, not sure what to do
about this yet.
2020-01-29 07:45:19 +00:00
|
|
|
if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024'
|
2020-01-13 04:21:09 +00:00
|
|
|
code = '''
|
|
|
|
lfs_format(&lfs, &cfg) => 0;
|
|
|
|
lfs_mount(&lfs, &cfg) => 0;
|
|
|
|
|
|
|
|
// create one block hole for half a directory
|
|
|
|
lfs_file_open(&lfs, &file, "bump", LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
|
|
|
for (lfs_size_t i = 0; i < cfg.block_size; i += 2) {
|
|
|
|
memcpy(&buffer[i], "hi", 2);
|
|
|
|
}
|
|
|
|
lfs_file_write(&lfs, &file, buffer, cfg.block_size) => cfg.block_size;
|
|
|
|
lfs_file_close(&lfs, &file) => 0;
|
|
|
|
|
|
|
|
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
|
|
|
|
size = strlen("blahblahblahblah");
|
|
|
|
memcpy(buffer, "blahblahblahblah", size);
|
|
|
|
for (lfs_size_t i = 0;
|
|
|
|
i < (cfg.block_count-4)*(cfg.block_size-8);
|
|
|
|
i += size) {
|
|
|
|
lfs_file_write(&lfs, &file, buffer, size) => size;
|
|
|
|
}
|
|
|
|
lfs_file_close(&lfs, &file) => 0;
|
|
|
|
|
|
|
|
// remount to force reset of lookahead
|
|
|
|
lfs_unmount(&lfs) => 0;
|
|
|
|
lfs_mount(&lfs, &cfg) => 0;
|
|
|
|
|
|
|
|
// open hole
|
|
|
|
lfs_remove(&lfs, "bump") => 0;
|
|
|
|
|
|
|
|
lfs_mkdir(&lfs, "splitdir") => 0;
|
|
|
|
lfs_file_open(&lfs, &file, "splitdir/bump",
|
|
|
|
LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
|
|
|
for (lfs_size_t i = 0; i < cfg.block_size; i += 2) {
|
|
|
|
memcpy(&buffer[i], "hi", 2);
|
|
|
|
}
|
|
|
|
lfs_file_write(&lfs, &file, buffer, 2*cfg.block_size) => LFS_ERR_NOSPC;
|
|
|
|
lfs_file_close(&lfs, &file) => 0;
|
|
|
|
|
|
|
|
lfs_unmount(&lfs) => 0;
|
|
|
|
'''
|
2020-01-14 15:14:01 +00:00
|
|
|
|
|
|
|
[[case]] # outdated lookahead test
|
2020-01-13 04:21:09 +00:00
|
|
|
define.LFS_BLOCK_SIZE = 512
|
|
|
|
define.LFS_BLOCK_COUNT = 1024
|
Fixed more bugs, mostly related to ENOSPC on different geometries
Fixes:
- Fixed reproducability issue when we can't read a directory revision
- Fixed incorrect erase assumption if lfs_dir_fetch exceeds block size
- Fixed cleanup issue caused by lfs_fs_relocate failing when trying to
outline a file in lfs_file_sync
- Fixed cleanup issue if we run out of space while extending a CTZ skip-list
- Fixed missing half-orphans when allocating blocks during lfs_fs_deorphan
Also:
- Added cycle-detection to readtree.py
- Allowed pseudo-C expressions in test conditions (and it's
beautifully hacky, see line 187 of test.py)
- Better handling of ctrl-C during test runs
- Added build-only mode to test.py
- Limited stdout of test failures to 5 lines unless in verbose mode
Explanation of fixes below
1. Fixed reproducability issue when we can't read a directory revision
An interesting subtlety of the block-device layer is that the
block-device is allowed to return LFS_ERR_CORRUPT on reads to
untouched blocks. This can easily happen if a user is using ECC or
some sort of CMAC on their blocks. Normally we never run into this,
except for the optimization around directory revisions where we use
uninitialized data to start our revision count.
We correctly handle this case by ignoring whats on disk if the read
fails, but end up using unitialized RAM instead. This is not an issue
for normal use, though it can lead to a small information leak.
However it creates a big problem for reproducability, which is very
helpful for debugging.
I ended up running into a case where the RAM values for the revision
count was different, causing two identical runs to wear-level at
different times, leading to one version running out of space before a
bug occured because it expanded the superblock early.
2. Fixed incorrect erase assumption if lfs_dir_fetch exceeds block size
This could be caused if the previous tag was a valid commit and we
lost power causing a partially written tag as the start of a new
commit.
Fortunately we already have a separate condition for exceeding the
block size, so we can force that case to always treat the mdir as
unerased.
3. Fixed cleanup issue caused by lfs_fs_relocate failing when trying to
outline a file in lfs_file_sync
Most operations involving metadata-pairs treat the mdir struct as
entirely temporary and throw it out if any error occurs. Except for
lfs_file_sync since the mdir is also a part of the file struct.
This is relevant because of a cleanup issue in lfs_dir_compact that
usually doesn't have side-effects. The issue is that lfs_fs_relocate
can fail. It needs to allocate new blocks to relocate to, and as the
disk reaches its end of life, it can fail with ENOSPC quite often.
If lfs_fs_relocate fails, the containing lfs_dir_compact would return
immediately without restoring the previous state of the mdir. If a new
commit comes in on the same mdir, the old state left there could
corrupt the filesystem.
It's interesting to note this is forced to happen in lfs_file_sync,
since it always tries to outline the file if it gets ENOSPC (ENOSPC
can mean both no blocks to allocate and that the mdir is full). I'm
not actually sure this bit of code is necessary anymore, we may be
able to remove it.
4. Fixed cleanup issue if we run out of space while extending a CTZ
skip-list
The actually CTZ skip-list logic itself hasn't been touched in more
than a year at this point, so I was surprised to find a bug here. But
it turns out the CTZ skip-list could be put in an invalid state if we
run out of space while trying to extend the skip-list.
This only becomes a problem if we keep the file open, clean up some
space elsewhere, and then continue to write to the open file without
modifying it. Fortunately an easy fix.
5. Fixed missing half-orphans when allocating blocks during
lfs_fs_deorphan
This was a really interesting bug. Normally, we don't have to worry
about allocations, since we force consistency before we are allowed
to allocate blocks. But what about the deorphan operation itself?
Don't we need to allocate blocks if we relocate while deorphaning?
It turns out the deorphan operation can lead to allocating blocks
while there's still orphans and half-orphans on the threaded
linked-list. Orphans aren't an issue, but half-orphans may contain
references to blocks in the outdated half, which doesn't get scanned
during the normal allocation pass.
Fortunately we already fetch directory entries to check CTZ lists, so
we can also check half-orphans here. However this causes
lfs_fs_traverse to duplicate all metadata-pairs, not sure what to do
about this yet.
2020-01-29 07:45:19 +00:00
|
|
|
if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024'
|
2020-01-13 04:21:09 +00:00
|
|
|
code = '''
|
|
|
|
lfs_format(&lfs, &cfg) => 0;
|
|
|
|
lfs_mount(&lfs, &cfg) => 0;
|
|
|
|
|
|
|
|
// fill completely with two files
|
|
|
|
lfs_file_open(&lfs, &file, "exhaustion1",
|
|
|
|
LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
|
|
|
size = strlen("blahblahblahblah");
|
|
|
|
memcpy(buffer, "blahblahblahblah", size);
|
|
|
|
for (lfs_size_t i = 0;
|
|
|
|
i < ((cfg.block_count-2)/2)*(cfg.block_size-8);
|
|
|
|
i += size) {
|
|
|
|
lfs_file_write(&lfs, &file, buffer, size) => size;
|
|
|
|
}
|
|
|
|
lfs_file_close(&lfs, &file) => 0;
|
|
|
|
|
|
|
|
lfs_file_open(&lfs, &file, "exhaustion2",
|
|
|
|
LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
|
|
|
size = strlen("blahblahblahblah");
|
|
|
|
memcpy(buffer, "blahblahblahblah", size);
|
|
|
|
for (lfs_size_t i = 0;
|
|
|
|
i < ((cfg.block_count-2+1)/2)*(cfg.block_size-8);
|
|
|
|
i += size) {
|
|
|
|
lfs_file_write(&lfs, &file, buffer, size) => size;
|
|
|
|
}
|
|
|
|
lfs_file_close(&lfs, &file) => 0;
|
|
|
|
|
|
|
|
// remount to force reset of lookahead
|
|
|
|
lfs_unmount(&lfs) => 0;
|
|
|
|
lfs_mount(&lfs, &cfg) => 0;
|
|
|
|
|
|
|
|
// rewrite one file
|
|
|
|
lfs_file_open(&lfs, &file, "exhaustion1",
|
|
|
|
LFS_O_WRONLY | LFS_O_TRUNC) => 0;
|
|
|
|
lfs_file_sync(&lfs, &file) => 0;
|
|
|
|
size = strlen("blahblahblahblah");
|
|
|
|
memcpy(buffer, "blahblahblahblah", size);
|
|
|
|
for (lfs_size_t i = 0;
|
|
|
|
i < ((cfg.block_count-2)/2)*(cfg.block_size-8);
|
|
|
|
i += size) {
|
|
|
|
lfs_file_write(&lfs, &file, buffer, size) => size;
|
|
|
|
}
|
|
|
|
lfs_file_close(&lfs, &file) => 0;
|
|
|
|
|
|
|
|
// rewrite second file, this requires lookahead does not
|
|
|
|
// use old population
|
|
|
|
lfs_file_open(&lfs, &file, "exhaustion2",
|
|
|
|
LFS_O_WRONLY | LFS_O_TRUNC) => 0;
|
|
|
|
lfs_file_sync(&lfs, &file) => 0;
|
|
|
|
size = strlen("blahblahblahblah");
|
|
|
|
memcpy(buffer, "blahblahblahblah", size);
|
|
|
|
for (lfs_size_t i = 0;
|
|
|
|
i < ((cfg.block_count-2+1)/2)*(cfg.block_size-8);
|
|
|
|
i += size) {
|
|
|
|
lfs_file_write(&lfs, &file, buffer, size) => size;
|
|
|
|
}
|
|
|
|
lfs_file_close(&lfs, &file) => 0;
|
Restructured block devices again for better test exploitation
Also finished migrating tests with test_relocations and test_exhaustion.
The issue I was running into when migrating these tests was a lack of
flexibility with what you could do with the block devices. It was
possible to hack in some hooks for things like bad blocks and power
loss, but it wasn't clean or easily extendable.
The solution here was to just put all of these test extensions into a
third block device, testbd, that uses the other two example block
devices internally.
testbd has several useful features for testing. Note this makes it a
pretty terrible block device _example_ since these hooks look more
complicated than a block device needs to be.
- testbd can simulate different erase values, supporting 1s, 0s, other byte
patterns, or no erases at all (which can cause surprising bugs). This
actually depends on the simulated erase values in ramdb and filebd.
I did try to move this out of rambd/filebd, but it's not possible to
simulate erases in testbd without buffering entire blocks and creating
an excessive amount of extra write operations.
- testbd also helps simulate power-loss by containing a "power cycles"
counter that is decremented every write operation until it calls exit.
This is notably faster than the previous gdb approach, which is
valuable since the reentrant tests tend to take a while to resolve.
- testbd also tracks wear, which can be manually set and read. This is
very useful for testing things like bad block handling, wear leveling,
or even changing the effective size of the block device at runtime.
2020-01-16 12:30:40 +00:00
|
|
|
|
|
|
|
lfs_unmount(&lfs) => 0;
|
2020-01-13 04:21:09 +00:00
|
|
|
'''
|
2020-01-14 15:14:01 +00:00
|
|
|
|
|
|
|
[[case]] # outdated lookahead and split dir test
|
2020-01-13 04:21:09 +00:00
|
|
|
define.LFS_BLOCK_SIZE = 512
|
|
|
|
define.LFS_BLOCK_COUNT = 1024
|
Fixed more bugs, mostly related to ENOSPC on different geometries
Fixes:
- Fixed reproducability issue when we can't read a directory revision
- Fixed incorrect erase assumption if lfs_dir_fetch exceeds block size
- Fixed cleanup issue caused by lfs_fs_relocate failing when trying to
outline a file in lfs_file_sync
- Fixed cleanup issue if we run out of space while extending a CTZ skip-list
- Fixed missing half-orphans when allocating blocks during lfs_fs_deorphan
Also:
- Added cycle-detection to readtree.py
- Allowed pseudo-C expressions in test conditions (and it's
beautifully hacky, see line 187 of test.py)
- Better handling of ctrl-C during test runs
- Added build-only mode to test.py
- Limited stdout of test failures to 5 lines unless in verbose mode
Explanation of fixes below
1. Fixed reproducability issue when we can't read a directory revision
An interesting subtlety of the block-device layer is that the
block-device is allowed to return LFS_ERR_CORRUPT on reads to
untouched blocks. This can easily happen if a user is using ECC or
some sort of CMAC on their blocks. Normally we never run into this,
except for the optimization around directory revisions where we use
uninitialized data to start our revision count.
We correctly handle this case by ignoring whats on disk if the read
fails, but end up using unitialized RAM instead. This is not an issue
for normal use, though it can lead to a small information leak.
However it creates a big problem for reproducability, which is very
helpful for debugging.
I ended up running into a case where the RAM values for the revision
count was different, causing two identical runs to wear-level at
different times, leading to one version running out of space before a
bug occured because it expanded the superblock early.
2. Fixed incorrect erase assumption if lfs_dir_fetch exceeds block size
This could be caused if the previous tag was a valid commit and we
lost power causing a partially written tag as the start of a new
commit.
Fortunately we already have a separate condition for exceeding the
block size, so we can force that case to always treat the mdir as
unerased.
3. Fixed cleanup issue caused by lfs_fs_relocate failing when trying to
outline a file in lfs_file_sync
Most operations involving metadata-pairs treat the mdir struct as
entirely temporary and throw it out if any error occurs. Except for
lfs_file_sync since the mdir is also a part of the file struct.
This is relevant because of a cleanup issue in lfs_dir_compact that
usually doesn't have side-effects. The issue is that lfs_fs_relocate
can fail. It needs to allocate new blocks to relocate to, and as the
disk reaches its end of life, it can fail with ENOSPC quite often.
If lfs_fs_relocate fails, the containing lfs_dir_compact would return
immediately without restoring the previous state of the mdir. If a new
commit comes in on the same mdir, the old state left there could
corrupt the filesystem.
It's interesting to note this is forced to happen in lfs_file_sync,
since it always tries to outline the file if it gets ENOSPC (ENOSPC
can mean both no blocks to allocate and that the mdir is full). I'm
not actually sure this bit of code is necessary anymore, we may be
able to remove it.
4. Fixed cleanup issue if we run out of space while extending a CTZ
skip-list
The actually CTZ skip-list logic itself hasn't been touched in more
than a year at this point, so I was surprised to find a bug here. But
it turns out the CTZ skip-list could be put in an invalid state if we
run out of space while trying to extend the skip-list.
This only becomes a problem if we keep the file open, clean up some
space elsewhere, and then continue to write to the open file without
modifying it. Fortunately an easy fix.
5. Fixed missing half-orphans when allocating blocks during
lfs_fs_deorphan
This was a really interesting bug. Normally, we don't have to worry
about allocations, since we force consistency before we are allowed
to allocate blocks. But what about the deorphan operation itself?
Don't we need to allocate blocks if we relocate while deorphaning?
It turns out the deorphan operation can lead to allocating blocks
while there's still orphans and half-orphans on the threaded
linked-list. Orphans aren't an issue, but half-orphans may contain
references to blocks in the outdated half, which doesn't get scanned
during the normal allocation pass.
Fortunately we already fetch directory entries to check CTZ lists, so
we can also check half-orphans here. However this causes
lfs_fs_traverse to duplicate all metadata-pairs, not sure what to do
about this yet.
2020-01-29 07:45:19 +00:00
|
|
|
if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024'
|
2020-01-13 04:21:09 +00:00
|
|
|
code = '''
|
|
|
|
lfs_format(&lfs, &cfg) => 0;
|
|
|
|
lfs_mount(&lfs, &cfg) => 0;
|
|
|
|
|
|
|
|
// fill completely with two files
|
|
|
|
lfs_file_open(&lfs, &file, "exhaustion1",
|
|
|
|
LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
|
|
|
size = strlen("blahblahblahblah");
|
|
|
|
memcpy(buffer, "blahblahblahblah", size);
|
|
|
|
for (lfs_size_t i = 0;
|
|
|
|
i < ((cfg.block_count-2)/2)*(cfg.block_size-8);
|
|
|
|
i += size) {
|
|
|
|
lfs_file_write(&lfs, &file, buffer, size) => size;
|
|
|
|
}
|
|
|
|
lfs_file_close(&lfs, &file) => 0;
|
|
|
|
|
|
|
|
lfs_file_open(&lfs, &file, "exhaustion2",
|
|
|
|
LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
|
|
|
size = strlen("blahblahblahblah");
|
|
|
|
memcpy(buffer, "blahblahblahblah", size);
|
|
|
|
for (lfs_size_t i = 0;
|
|
|
|
i < ((cfg.block_count-2+1)/2)*(cfg.block_size-8);
|
|
|
|
i += size) {
|
|
|
|
lfs_file_write(&lfs, &file, buffer, size) => size;
|
|
|
|
}
|
|
|
|
lfs_file_close(&lfs, &file) => 0;
|
|
|
|
|
|
|
|
// remount to force reset of lookahead
|
|
|
|
lfs_unmount(&lfs) => 0;
|
|
|
|
lfs_mount(&lfs, &cfg) => 0;
|
|
|
|
|
|
|
|
// rewrite one file with a hole of one block
|
|
|
|
lfs_file_open(&lfs, &file, "exhaustion1",
|
|
|
|
LFS_O_WRONLY | LFS_O_TRUNC) => 0;
|
|
|
|
lfs_file_sync(&lfs, &file) => 0;
|
|
|
|
size = strlen("blahblahblahblah");
|
|
|
|
memcpy(buffer, "blahblahblahblah", size);
|
|
|
|
for (lfs_size_t i = 0;
|
|
|
|
i < ((cfg.block_count-2)/2 - 1)*(cfg.block_size-8);
|
|
|
|
i += size) {
|
|
|
|
lfs_file_write(&lfs, &file, buffer, size) => size;
|
|
|
|
}
|
|
|
|
lfs_file_close(&lfs, &file) => 0;
|
|
|
|
|
|
|
|
// try to allocate a directory, should fail!
|
|
|
|
lfs_mkdir(&lfs, "split") => LFS_ERR_NOSPC;
|
|
|
|
|
|
|
|
// file should not fail
|
|
|
|
lfs_file_open(&lfs, &file, "notasplit",
|
|
|
|
LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
|
|
|
lfs_file_write(&lfs, &file, "hi", 2) => 2;
|
|
|
|
lfs_file_close(&lfs, &file) => 0;
|
|
|
|
|
|
|
|
lfs_unmount(&lfs) => 0;
|
|
|
|
'''
|