2017-02-27 00:05:27 +00:00
|
|
|
/*
|
|
|
|
* The little filesystem
|
|
|
|
*
|
2017-10-13 01:27:33 +00:00
|
|
|
* Copyright (c) 2017 ARM Limited
|
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
2017-02-27 00:05:27 +00:00
|
|
|
*/
|
|
|
|
#include "lfs.h"
|
2017-03-25 21:20:31 +00:00
|
|
|
#include "lfs_util.h"
|
2017-02-27 00:05:27 +00:00
|
|
|
|
|
|
|
|
2017-04-30 16:19:37 +00:00
|
|
|
/// Caching block device operations ///
|
|
|
|
static int lfs_cache_read(lfs_t *lfs, lfs_cache_t *rcache,
|
|
|
|
const lfs_cache_t *pcache, lfs_block_t block,
|
2017-04-24 04:49:21 +00:00
|
|
|
lfs_off_t off, void *buffer, lfs_size_t size) {
|
2017-04-22 18:30:40 +00:00
|
|
|
uint8_t *data = buffer;
|
2018-03-17 15:28:14 +00:00
|
|
|
LFS_ASSERT(block != 0xffffffff);
|
2017-04-22 18:30:40 +00:00
|
|
|
|
|
|
|
while (size > 0) {
|
2017-04-30 16:19:37 +00:00
|
|
|
if (pcache && block == pcache->block && off >= pcache->off &&
|
|
|
|
off < pcache->off + lfs->cfg->prog_size) {
|
|
|
|
// is already in pcache?
|
2017-04-22 18:30:40 +00:00
|
|
|
lfs_size_t diff = lfs_min(size,
|
2017-04-30 16:19:37 +00:00
|
|
|
lfs->cfg->prog_size - (off-pcache->off));
|
|
|
|
memcpy(data, &pcache->buffer[off-pcache->off], diff);
|
2017-04-22 18:30:40 +00:00
|
|
|
|
|
|
|
data += diff;
|
|
|
|
off += diff;
|
|
|
|
size -= diff;
|
|
|
|
continue;
|
2017-04-30 16:19:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (block == rcache->block && off >= rcache->off &&
|
|
|
|
off < rcache->off + lfs->cfg->read_size) {
|
|
|
|
// is already in rcache?
|
2017-04-22 18:30:40 +00:00
|
|
|
lfs_size_t diff = lfs_min(size,
|
2017-04-30 16:19:37 +00:00
|
|
|
lfs->cfg->read_size - (off-rcache->off));
|
|
|
|
memcpy(data, &rcache->buffer[off-rcache->off], diff);
|
2017-04-22 18:30:40 +00:00
|
|
|
|
|
|
|
data += diff;
|
|
|
|
off += diff;
|
|
|
|
size -= diff;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2017-04-30 16:19:37 +00:00
|
|
|
if (off % lfs->cfg->read_size == 0 && size >= lfs->cfg->read_size) {
|
2017-04-22 18:30:40 +00:00
|
|
|
// bypass cache?
|
|
|
|
lfs_size_t diff = size - (size % lfs->cfg->read_size);
|
2017-04-24 04:49:21 +00:00
|
|
|
int err = lfs->cfg->read(lfs->cfg, block, off, data, diff);
|
2017-04-22 18:30:40 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
data += diff;
|
|
|
|
off += diff;
|
|
|
|
size -= diff;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// load to cache, first condition can no longer fail
|
2018-03-17 15:28:14 +00:00
|
|
|
LFS_ASSERT(block < lfs->cfg->block_count);
|
2017-04-30 16:19:37 +00:00
|
|
|
rcache->block = block;
|
|
|
|
rcache->off = off - (off % lfs->cfg->read_size);
|
|
|
|
int err = lfs->cfg->read(lfs->cfg, rcache->block,
|
|
|
|
rcache->off, rcache->buffer, lfs->cfg->read_size);
|
2017-04-22 18:30:40 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2017-03-25 21:20:31 +00:00
|
|
|
}
|
2017-02-27 00:05:27 +00:00
|
|
|
|
2017-06-24 05:43:05 +00:00
|
|
|
static int lfs_cache_cmp(lfs_t *lfs, lfs_cache_t *rcache,
|
|
|
|
const lfs_cache_t *pcache, lfs_block_t block,
|
|
|
|
lfs_off_t off, const void *buffer, lfs_size_t size) {
|
|
|
|
const uint8_t *data = buffer;
|
|
|
|
|
|
|
|
for (lfs_off_t i = 0; i < size; i++) {
|
|
|
|
uint8_t c;
|
|
|
|
int err = lfs_cache_read(lfs, rcache, pcache,
|
|
|
|
block, off+i, &c, 1);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (c != data[i]) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int lfs_cache_crc(lfs_t *lfs, lfs_cache_t *rcache,
|
|
|
|
const lfs_cache_t *pcache, lfs_block_t block,
|
|
|
|
lfs_off_t off, lfs_size_t size, uint32_t *crc) {
|
|
|
|
for (lfs_off_t i = 0; i < size; i++) {
|
|
|
|
uint8_t c;
|
|
|
|
int err = lfs_cache_read(lfs, rcache, pcache,
|
|
|
|
block, off+i, &c, 1);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
lfs_crc(crc, &c, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int lfs_cache_flush(lfs_t *lfs,
|
|
|
|
lfs_cache_t *pcache, lfs_cache_t *rcache) {
|
|
|
|
if (pcache->block != 0xffffffff) {
|
2018-03-17 15:28:14 +00:00
|
|
|
LFS_ASSERT(pcache->block < lfs->cfg->block_count);
|
2017-06-24 05:43:05 +00:00
|
|
|
int err = lfs->cfg->prog(lfs->cfg, pcache->block,
|
|
|
|
pcache->off, pcache->buffer, lfs->cfg->prog_size);
|
2017-04-30 16:19:37 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
2017-04-22 18:30:40 +00:00
|
|
|
|
2017-06-24 05:43:05 +00:00
|
|
|
if (rcache) {
|
|
|
|
int res = lfs_cache_cmp(lfs, rcache, NULL, pcache->block,
|
|
|
|
pcache->off, pcache->buffer, lfs->cfg->prog_size);
|
|
|
|
if (res < 0) {
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!res) {
|
|
|
|
return LFS_ERR_CORRUPT;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pcache->block = 0xffffffff;
|
2017-04-22 18:30:40 +00:00
|
|
|
}
|
|
|
|
|
2017-04-30 16:19:37 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-06-24 05:43:05 +00:00
|
|
|
static int lfs_cache_prog(lfs_t *lfs, lfs_cache_t *pcache,
|
|
|
|
lfs_cache_t *rcache, lfs_block_t block,
|
2017-04-30 16:19:37 +00:00
|
|
|
lfs_off_t off, const void *buffer, lfs_size_t size) {
|
|
|
|
const uint8_t *data = buffer;
|
2018-03-17 15:28:14 +00:00
|
|
|
LFS_ASSERT(block != 0xffffffff);
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
LFS_ASSERT(off + size <= lfs->cfg->block_size);
|
2017-04-30 16:19:37 +00:00
|
|
|
|
2017-04-22 18:30:40 +00:00
|
|
|
while (size > 0) {
|
2017-06-24 05:43:05 +00:00
|
|
|
if (block == pcache->block && off >= pcache->off &&
|
|
|
|
off < pcache->off + lfs->cfg->prog_size) {
|
|
|
|
// is already in pcache?
|
2017-04-22 18:30:40 +00:00
|
|
|
lfs_size_t diff = lfs_min(size,
|
2017-06-24 05:43:05 +00:00
|
|
|
lfs->cfg->prog_size - (off-pcache->off));
|
|
|
|
memcpy(&pcache->buffer[off-pcache->off], data, diff);
|
2017-04-22 18:30:40 +00:00
|
|
|
|
|
|
|
data += diff;
|
|
|
|
off += diff;
|
|
|
|
size -= diff;
|
2017-04-30 16:19:37 +00:00
|
|
|
|
|
|
|
if (off % lfs->cfg->prog_size == 0) {
|
2017-06-24 05:43:05 +00:00
|
|
|
// eagerly flush out pcache if we fill up
|
|
|
|
int err = lfs_cache_flush(lfs, pcache, rcache);
|
2017-04-30 16:19:37 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-22 18:30:40 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2017-06-24 05:43:05 +00:00
|
|
|
// pcache must have been flushed, either by programming and
|
|
|
|
// entire block or manually flushing the pcache
|
2018-01-29 21:20:12 +00:00
|
|
|
LFS_ASSERT(pcache->block == 0xffffffff);
|
2017-04-22 18:30:40 +00:00
|
|
|
|
|
|
|
if (off % lfs->cfg->prog_size == 0 &&
|
|
|
|
size >= lfs->cfg->prog_size) {
|
2017-06-24 05:43:05 +00:00
|
|
|
// bypass pcache?
|
2018-03-17 15:28:14 +00:00
|
|
|
LFS_ASSERT(block < lfs->cfg->block_count);
|
2017-04-22 18:30:40 +00:00
|
|
|
lfs_size_t diff = size - (size % lfs->cfg->prog_size);
|
2017-04-24 04:49:21 +00:00
|
|
|
int err = lfs->cfg->prog(lfs->cfg, block, off, data, diff);
|
2017-04-22 18:30:40 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-06-24 05:43:05 +00:00
|
|
|
if (rcache) {
|
|
|
|
int res = lfs_cache_cmp(lfs, rcache, NULL,
|
|
|
|
block, off, data, diff);
|
|
|
|
if (res < 0) {
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!res) {
|
|
|
|
return LFS_ERR_CORRUPT;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-22 18:30:40 +00:00
|
|
|
data += diff;
|
|
|
|
off += diff;
|
|
|
|
size -= diff;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2017-06-24 05:43:05 +00:00
|
|
|
// prepare pcache, first condition can no longer fail
|
|
|
|
pcache->block = block;
|
|
|
|
pcache->off = off - (off % lfs->cfg->prog_size);
|
2017-04-22 18:30:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2017-03-25 21:20:31 +00:00
|
|
|
}
|
|
|
|
|
2017-04-30 16:19:37 +00:00
|
|
|
|
|
|
|
/// General lfs block device operations ///
|
2017-06-24 05:43:05 +00:00
|
|
|
static int lfs_bd_read(lfs_t *lfs, lfs_block_t block,
|
2017-04-30 16:19:37 +00:00
|
|
|
lfs_off_t off, void *buffer, lfs_size_t size) {
|
2018-05-19 23:25:47 +00:00
|
|
|
return lfs_cache_read(lfs, &lfs->rcache, &lfs->pcache,
|
2017-04-30 16:19:37 +00:00
|
|
|
block, off, buffer, size);
|
|
|
|
}
|
|
|
|
|
2017-06-24 05:43:05 +00:00
|
|
|
static int lfs_bd_prog(lfs_t *lfs, lfs_block_t block,
|
2017-04-30 16:19:37 +00:00
|
|
|
lfs_off_t off, const void *buffer, lfs_size_t size) {
|
2017-06-24 05:43:05 +00:00
|
|
|
return lfs_cache_prog(lfs, &lfs->pcache, NULL,
|
2017-04-30 16:19:37 +00:00
|
|
|
block, off, buffer, size);
|
|
|
|
}
|
|
|
|
|
2017-06-24 05:43:05 +00:00
|
|
|
static int lfs_bd_cmp(lfs_t *lfs, lfs_block_t block,
|
|
|
|
lfs_off_t off, const void *buffer, lfs_size_t size) {
|
|
|
|
return lfs_cache_cmp(lfs, &lfs->rcache, NULL, block, off, buffer, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int lfs_bd_crc(lfs_t *lfs, lfs_block_t block,
|
|
|
|
lfs_off_t off, lfs_size_t size, uint32_t *crc) {
|
|
|
|
return lfs_cache_crc(lfs, &lfs->rcache, NULL, block, off, size, crc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int lfs_bd_erase(lfs_t *lfs, lfs_block_t block) {
|
2018-03-17 15:28:14 +00:00
|
|
|
LFS_ASSERT(block < lfs->cfg->block_count);
|
2017-04-22 16:42:05 +00:00
|
|
|
return lfs->cfg->erase(lfs->cfg, block);
|
2017-03-25 21:20:31 +00:00
|
|
|
}
|
2017-02-27 00:05:27 +00:00
|
|
|
|
2017-06-24 05:43:05 +00:00
|
|
|
static int lfs_bd_sync(lfs_t *lfs) {
|
2017-06-25 21:21:14 +00:00
|
|
|
lfs->rcache.block = 0xffffffff;
|
|
|
|
|
2017-06-24 05:43:05 +00:00
|
|
|
int err = lfs_cache_flush(lfs, &lfs->pcache, NULL);
|
2017-04-22 18:30:40 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-04-22 16:42:05 +00:00
|
|
|
return lfs->cfg->sync(lfs->cfg);
|
2017-02-27 00:05:27 +00:00
|
|
|
}
|
|
|
|
|
2017-03-25 21:20:31 +00:00
|
|
|
|
2017-05-14 17:01:45 +00:00
|
|
|
/// Internal operations predeclared here ///
|
2018-05-26 18:50:06 +00:00
|
|
|
int lfs_fs_traverse(lfs_t *lfs,
|
|
|
|
int (*cb)(lfs_t*, void*, lfs_block_t), void *data);
|
2018-05-29 06:11:26 +00:00
|
|
|
static int lfs_pred(lfs_t *lfs, const lfs_block_t dir[2], lfs_mdir_t *pdir);
|
2017-05-14 17:01:45 +00:00
|
|
|
static int lfs_parent(lfs_t *lfs, const lfs_block_t dir[2],
|
2018-05-29 06:11:26 +00:00
|
|
|
lfs_mdir_t *parent, lfs_mattr_t *attr);
|
2017-05-14 17:01:45 +00:00
|
|
|
static int lfs_relocate(lfs_t *lfs,
|
|
|
|
const lfs_block_t oldpair[2], const lfs_block_t newpair[2]);
|
2018-07-02 03:29:42 +00:00
|
|
|
int lfs_scan(lfs_t *lfs);
|
|
|
|
int lfs_fixmove(lfs_t *lfs);
|
2017-05-14 17:01:45 +00:00
|
|
|
int lfs_deorphan(lfs_t *lfs);
|
|
|
|
|
|
|
|
|
2017-03-25 21:20:31 +00:00
|
|
|
/// Block allocator ///
|
2018-05-21 05:56:20 +00:00
|
|
|
static int lfs_alloc_lookahead(lfs_t *lfs, void *p, lfs_block_t block) {
|
2018-04-10 20:14:27 +00:00
|
|
|
lfs_block_t off = ((block - lfs->free.off)
|
2017-09-17 19:52:25 +00:00
|
|
|
+ lfs->cfg->block_count) % lfs->cfg->block_count;
|
|
|
|
|
2018-02-08 07:30:21 +00:00
|
|
|
if (off < lfs->free.size) {
|
2017-09-19 02:20:33 +00:00
|
|
|
lfs->free.buffer[off / 32] |= 1U << (off % 32);
|
2017-04-01 15:44:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-05-14 17:01:45 +00:00
|
|
|
static int lfs_alloc(lfs_t *lfs, lfs_block_t *block) {
|
2017-04-01 15:44:17 +00:00
|
|
|
while (true) {
|
2018-04-10 20:14:27 +00:00
|
|
|
while (lfs->free.i != lfs->free.size) {
|
|
|
|
lfs_block_t off = lfs->free.i;
|
|
|
|
lfs->free.i += 1;
|
|
|
|
lfs->free.ack -= 1;
|
2017-04-22 19:56:12 +00:00
|
|
|
|
2017-09-19 02:20:33 +00:00
|
|
|
if (!(lfs->free.buffer[off / 32] & (1U << (off % 32)))) {
|
2017-04-22 19:56:12 +00:00
|
|
|
// found a free block
|
2018-04-10 20:14:27 +00:00
|
|
|
*block = (lfs->free.off + off) % lfs->cfg->block_count;
|
2018-04-06 22:00:29 +00:00
|
|
|
|
|
|
|
// eagerly find next off so an alloc ack can
|
|
|
|
// discredit old lookahead blocks
|
2018-04-10 20:14:27 +00:00
|
|
|
while (lfs->free.i != lfs->free.size &&
|
|
|
|
(lfs->free.buffer[lfs->free.i / 32]
|
|
|
|
& (1U << (lfs->free.i % 32)))) {
|
|
|
|
lfs->free.i += 1;
|
|
|
|
lfs->free.ack -= 1;
|
2018-04-06 22:00:29 +00:00
|
|
|
}
|
|
|
|
|
2017-04-22 19:56:12 +00:00
|
|
|
return 0;
|
2017-04-01 15:44:17 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-08 07:30:21 +00:00
|
|
|
// check if we have looked at all blocks since last ack
|
2018-04-10 20:14:27 +00:00
|
|
|
if (lfs->free.ack == 0) {
|
|
|
|
LFS_WARN("No more free space %d", lfs->free.i + lfs->free.off);
|
2018-02-08 07:30:21 +00:00
|
|
|
return LFS_ERR_NOSPC;
|
|
|
|
}
|
|
|
|
|
2018-04-10 20:14:27 +00:00
|
|
|
lfs->free.off = (lfs->free.off + lfs->free.size)
|
|
|
|
% lfs->cfg->block_count;
|
|
|
|
lfs->free.size = lfs_min(lfs->cfg->lookahead, lfs->free.ack);
|
|
|
|
lfs->free.i = 0;
|
2017-04-01 15:44:17 +00:00
|
|
|
|
2017-04-22 19:56:12 +00:00
|
|
|
// find mask of free blocks from tree
|
2017-11-10 01:10:08 +00:00
|
|
|
memset(lfs->free.buffer, 0, lfs->cfg->lookahead/8);
|
2018-05-26 18:50:06 +00:00
|
|
|
int err = lfs_fs_traverse(lfs, lfs_alloc_lookahead, NULL);
|
2017-04-01 15:44:17 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
2017-04-01 17:23:15 +00:00
|
|
|
}
|
2017-03-25 21:20:31 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-14 17:01:45 +00:00
|
|
|
static void lfs_alloc_ack(lfs_t *lfs) {
|
2018-04-10 20:14:27 +00:00
|
|
|
lfs->free.ack = lfs->cfg->block_count;
|
2017-03-25 21:20:31 +00:00
|
|
|
}
|
2017-03-13 00:41:08 +00:00
|
|
|
|
2017-02-27 00:05:27 +00:00
|
|
|
|
2018-02-02 11:58:43 +00:00
|
|
|
/// Endian swapping functions ///
|
2018-05-26 18:50:06 +00:00
|
|
|
//static void lfs_dir_fromle32(struct lfs_disk_dir *d) {
|
|
|
|
// d->rev = lfs_fromle32(d->rev);
|
|
|
|
// d->size = lfs_fromle32(d->size);
|
|
|
|
// d->tail[0] = lfs_fromle32(d->tail[0]);
|
|
|
|
// d->tail[1] = lfs_fromle32(d->tail[1]);
|
|
|
|
//}
|
|
|
|
//
|
2018-05-29 06:11:26 +00:00
|
|
|
//static void lfs_mdir_tole32(struct lfs_disk_dir *d) {
|
2018-05-26 18:50:06 +00:00
|
|
|
// d->rev = lfs_tole32(d->rev);
|
|
|
|
// d->size = lfs_tole32(d->size);
|
|
|
|
// d->tail[0] = lfs_tole32(d->tail[0]);
|
|
|
|
// d->tail[1] = lfs_tole32(d->tail[1]);
|
|
|
|
//}
|
|
|
|
//
|
|
|
|
//static void lfs_entry_fromle32(struct lfs_disk_entry *d) {
|
|
|
|
// d->u.dir[0] = lfs_fromle32(d->u.dir[0]);
|
|
|
|
// d->u.dir[1] = lfs_fromle32(d->u.dir[1]);
|
|
|
|
//}
|
|
|
|
//
|
|
|
|
//static void lfs_entry_tole32(struct lfs_disk_entry *d) {
|
|
|
|
// d->u.dir[0] = lfs_tole32(d->u.dir[0]);
|
|
|
|
// d->u.dir[1] = lfs_tole32(d->u.dir[1]);
|
|
|
|
//}
|
2018-02-02 11:58:43 +00:00
|
|
|
|
2018-05-26 18:50:06 +00:00
|
|
|
///*static*/ void lfs_superblock_fromle32(struct lfs_disk_superblock *d) {
|
|
|
|
// d->root[0] = lfs_fromle32(d->root[0]);
|
|
|
|
// d->root[1] = lfs_fromle32(d->root[1]);
|
|
|
|
// d->block_size = lfs_fromle32(d->block_size);
|
|
|
|
// d->block_count = lfs_fromle32(d->block_count);
|
|
|
|
// d->version = lfs_fromle32(d->version);
|
|
|
|
// d->inline_size = lfs_fromle32(d->inline_size);
|
|
|
|
// d->attrs_size = lfs_fromle32(d->attrs_size);
|
|
|
|
// d->name_size = lfs_fromle32(d->name_size);
|
|
|
|
//}
|
|
|
|
//
|
|
|
|
///*static*/ void lfs_superblock_tole32(struct lfs_disk_superblock *d) {
|
|
|
|
// d->root[0] = lfs_tole32(d->root[0]);
|
|
|
|
// d->root[1] = lfs_tole32(d->root[1]);
|
|
|
|
// d->block_size = lfs_tole32(d->block_size);
|
|
|
|
// d->block_count = lfs_tole32(d->block_count);
|
|
|
|
// d->version = lfs_tole32(d->version);
|
|
|
|
// d->inline_size = lfs_tole32(d->inline_size);
|
|
|
|
// d->attrs_size = lfs_tole32(d->attrs_size);
|
|
|
|
// d->name_size = lfs_tole32(d->name_size);
|
|
|
|
//}
|
2018-02-02 11:58:43 +00:00
|
|
|
|
2018-04-03 13:28:09 +00:00
|
|
|
/// Other struct functions ///
|
2018-05-29 05:50:47 +00:00
|
|
|
//static inline lfs_size_t lfs_entry_elen(const lfs_mattr_t *attr) {
|
|
|
|
// return (lfs_size_t)(attr->d.elen) |
|
|
|
|
// ((lfs_size_t)(attr->d.alen & 0xc0) << 2);
|
2018-05-26 18:50:06 +00:00
|
|
|
//}
|
|
|
|
//
|
2018-05-29 05:50:47 +00:00
|
|
|
//static inline lfs_size_t lfs_entry_alen(const lfs_mattr_t *attr) {
|
|
|
|
// return attr->d.alen & 0x3f;
|
2018-05-26 18:50:06 +00:00
|
|
|
//}
|
|
|
|
//
|
2018-05-29 05:50:47 +00:00
|
|
|
//static inline lfs_size_t lfs_entry_nlen(const lfs_mattr_t *attr) {
|
|
|
|
// return attr->d.nlen;
|
2018-05-26 18:50:06 +00:00
|
|
|
//}
|
|
|
|
//
|
2018-05-29 05:50:47 +00:00
|
|
|
//static inline lfs_size_t lfs_entry_size(const lfs_mattr_t *attr) {
|
|
|
|
// return 4 + lfs_entry_elen(attr) +
|
|
|
|
// lfs_entry_alen(attr) +
|
|
|
|
// lfs_entry_nlen(attr);
|
2018-05-26 18:50:06 +00:00
|
|
|
//}
|
2018-04-03 13:28:09 +00:00
|
|
|
|
2018-02-02 11:58:43 +00:00
|
|
|
|
2017-04-18 03:27:06 +00:00
|
|
|
/// Metadata pair and directory operations ///
|
2017-04-01 15:44:17 +00:00
|
|
|
static inline void lfs_pairswap(lfs_block_t pair[2]) {
|
2017-03-25 21:20:31 +00:00
|
|
|
lfs_block_t t = pair[0];
|
|
|
|
pair[0] = pair[1];
|
|
|
|
pair[1] = t;
|
2017-03-12 20:11:52 +00:00
|
|
|
}
|
|
|
|
|
2017-04-18 03:27:06 +00:00
|
|
|
static inline bool lfs_pairisnull(const lfs_block_t pair[2]) {
|
2017-04-30 16:54:27 +00:00
|
|
|
return pair[0] == 0xffffffff || pair[1] == 0xffffffff;
|
2017-04-18 03:27:06 +00:00
|
|
|
}
|
|
|
|
|
2017-04-01 15:44:17 +00:00
|
|
|
static inline int lfs_paircmp(
|
|
|
|
const lfs_block_t paira[2],
|
|
|
|
const lfs_block_t pairb[2]) {
|
2017-04-29 17:41:53 +00:00
|
|
|
return !(paira[0] == pairb[0] || paira[1] == pairb[1] ||
|
|
|
|
paira[0] == pairb[1] || paira[1] == pairb[0]);
|
2017-04-01 15:44:17 +00:00
|
|
|
}
|
|
|
|
|
2017-05-14 17:01:45 +00:00
|
|
|
static inline bool lfs_pairsync(
|
|
|
|
const lfs_block_t paira[2],
|
|
|
|
const lfs_block_t pairb[2]) {
|
|
|
|
return (paira[0] == pairb[0] && paira[1] == pairb[1]) ||
|
|
|
|
(paira[0] == pairb[1] && paira[1] == pairb[0]);
|
|
|
|
}
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
/// Entry tag operations ///
|
|
|
|
static inline lfs_tag_t lfs_mktag(
|
|
|
|
uint16_t type, uint16_t id, lfs_size_t size) {
|
2018-05-19 23:25:47 +00:00
|
|
|
return (type << 22) | (id << 12) | size;
|
|
|
|
}
|
|
|
|
|
2018-07-09 19:13:31 +00:00
|
|
|
static inline bool lfs_tag_isvalid(lfs_tag_t tag) {
|
2018-05-19 23:25:47 +00:00
|
|
|
return !(tag & 0x80000000);
|
|
|
|
}
|
|
|
|
|
2018-07-09 19:13:31 +00:00
|
|
|
static inline bool lfs_tag_isuser(lfs_tag_t tag) {
|
|
|
|
return (tag & 0x40000000);
|
2018-05-19 23:25:47 +00:00
|
|
|
}
|
|
|
|
|
2018-07-09 19:13:31 +00:00
|
|
|
static inline uint16_t lfs_tag_type(lfs_tag_t tag) {
|
|
|
|
return (tag & 0x7fc00000) >> 22;
|
2018-05-21 05:56:20 +00:00
|
|
|
}
|
|
|
|
|
2018-07-09 19:13:31 +00:00
|
|
|
static inline uint16_t lfs_tag_subtype(lfs_tag_t tag) {
|
|
|
|
return (tag & 0x7c000000) >> 22;
|
2018-05-21 05:56:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint16_t lfs_tag_id(lfs_tag_t tag) {
|
2018-05-30 01:08:42 +00:00
|
|
|
return (tag & 0x003ff000) >> 12;
|
2018-05-19 23:25:47 +00:00
|
|
|
}
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
static inline lfs_size_t lfs_tag_size(lfs_tag_t tag) {
|
2018-05-19 23:25:47 +00:00
|
|
|
return tag & 0x00000fff;
|
|
|
|
}
|
|
|
|
|
2018-07-02 03:29:42 +00:00
|
|
|
// operations on globals
|
|
|
|
static lfs_globals_t lfs_globals_xor(
|
|
|
|
const lfs_globals_t *a, const lfs_globals_t *b) {
|
|
|
|
lfs_globals_t res;
|
|
|
|
res.move.pair[0] = a->move.pair[0] ^ b->move.pair[0];
|
|
|
|
res.move.pair[1] = a->move.pair[1] ^ b->move.pair[1];
|
|
|
|
res.move.id = a->move.id ^ b->move.id;
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool lfs_globals_iszero(const lfs_globals_t *a) {
|
|
|
|
return (a->move.pair[0] == 0 && a->move.pair[1] == 0 && a->move.id == 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// commit logic
|
2018-05-19 23:25:47 +00:00
|
|
|
struct lfs_commit {
|
|
|
|
lfs_block_t block;
|
|
|
|
lfs_off_t off;
|
|
|
|
lfs_off_t begin;
|
|
|
|
lfs_off_t end;
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
lfs_tag_t ptag;
|
2018-05-19 23:25:47 +00:00
|
|
|
uint32_t crc;
|
|
|
|
|
|
|
|
struct {
|
2018-05-26 00:04:01 +00:00
|
|
|
uint16_t begin;
|
|
|
|
uint16_t end;
|
|
|
|
} filter;
|
2018-05-19 23:25:47 +00:00
|
|
|
};
|
|
|
|
|
2018-05-28 07:08:16 +00:00
|
|
|
// TODO predelcare
|
2018-05-29 00:49:20 +00:00
|
|
|
static int lfs_commit_move(lfs_t *lfs, struct lfs_commit *commit,
|
2018-05-28 14:17:44 +00:00
|
|
|
uint16_t fromid, uint16_t toid,
|
2018-05-29 06:11:26 +00:00
|
|
|
lfs_mdir_t *dir, lfs_mattrlist_t *list);
|
2018-05-28 07:08:16 +00:00
|
|
|
|
2018-05-19 23:25:47 +00:00
|
|
|
static int lfs_commit_commit(lfs_t *lfs,
|
2018-05-29 05:50:47 +00:00
|
|
|
struct lfs_commit *commit, lfs_mattr_t attr) {
|
2018-05-26 00:04:01 +00:00
|
|
|
// filter out ids
|
2018-07-09 19:51:57 +00:00
|
|
|
if (lfs_tag_id(attr.tag) < 0x3ff && (
|
2018-05-29 05:50:47 +00:00
|
|
|
lfs_tag_id(attr.tag) < commit->filter.begin ||
|
|
|
|
lfs_tag_id(attr.tag) >= commit->filter.end)) {
|
2018-05-26 00:04:01 +00:00
|
|
|
return 0;
|
2018-05-19 23:25:47 +00:00
|
|
|
}
|
2018-05-28 07:08:16 +00:00
|
|
|
|
|
|
|
// special cases
|
2018-07-09 19:51:57 +00:00
|
|
|
if (lfs_tag_type(attr.tag) == LFS_FROM_DIR) {
|
2018-05-29 00:49:20 +00:00
|
|
|
return lfs_commit_move(lfs, commit,
|
2018-05-29 05:50:47 +00:00
|
|
|
lfs_tag_size(attr.tag), lfs_tag_id(attr.tag),
|
|
|
|
attr.u.dir, NULL);
|
2018-05-28 07:08:16 +00:00
|
|
|
}
|
|
|
|
|
2018-05-29 05:50:47 +00:00
|
|
|
uint16_t id = lfs_tag_id(attr.tag) - commit->filter.begin;
|
2018-05-30 01:08:42 +00:00
|
|
|
attr.tag = lfs_mktag(0, id, 0) | (attr.tag & 0xffc00fff);
|
2018-05-19 23:25:47 +00:00
|
|
|
|
|
|
|
// check if we fit
|
2018-05-29 05:50:47 +00:00
|
|
|
lfs_size_t size = lfs_tag_size(attr.tag);
|
2018-05-21 05:56:20 +00:00
|
|
|
if (commit->off + sizeof(lfs_tag_t)+size > commit->end) {
|
2018-05-19 23:25:47 +00:00
|
|
|
return LFS_ERR_NOSPC;
|
|
|
|
}
|
|
|
|
|
|
|
|
// write out tag
|
2018-05-26 18:50:06 +00:00
|
|
|
// TODO rm me
|
2018-05-29 05:50:47 +00:00
|
|
|
//printf("tag w %#010x (%x:%x %03x %03x %03x)\n", attr.tag, commit->block, commit->off+sizeof(lfs_tag_t), lfs_tag_type(attr.tag), lfs_tag_id(attr.tag), lfs_tag_size(attr.tag));
|
|
|
|
lfs_tag_t tag = lfs_tole32((attr.tag & 0x7fffffff) ^ commit->ptag);
|
2018-05-19 23:25:47 +00:00
|
|
|
lfs_crc(&commit->crc, &tag, sizeof(tag));
|
|
|
|
int err = lfs_bd_prog(lfs, commit->block, commit->off, &tag, sizeof(tag));
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
commit->off += sizeof(tag);
|
|
|
|
|
2018-05-29 05:50:47 +00:00
|
|
|
if (!(attr.tag & 0x80000000)) {
|
2018-05-19 23:25:47 +00:00
|
|
|
// from memory
|
2018-05-29 05:50:47 +00:00
|
|
|
lfs_crc(&commit->crc, attr.u.buffer, size);
|
2018-05-19 23:25:47 +00:00
|
|
|
err = lfs_bd_prog(lfs, commit->block, commit->off,
|
2018-05-29 05:50:47 +00:00
|
|
|
attr.u.buffer, size);
|
2018-05-19 23:25:47 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// from disk
|
|
|
|
for (lfs_off_t i = 0; i < size; i++) {
|
|
|
|
uint8_t dat;
|
|
|
|
int err = lfs_bd_read(lfs,
|
2018-05-29 05:50:47 +00:00
|
|
|
attr.u.d.block, attr.u.d.off+i, &dat, 1);
|
2018-05-19 23:25:47 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
lfs_crc(&commit->crc, &dat, 1);
|
|
|
|
err = lfs_bd_prog(lfs, commit->block, commit->off+i, &dat, 1);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
commit->off += size;
|
2018-05-29 05:50:47 +00:00
|
|
|
commit->ptag = attr.tag & 0x7fffffff; // TODO do this once
|
2018-05-19 23:25:47 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int lfs_commit_crc(lfs_t *lfs, struct lfs_commit *commit) {
|
|
|
|
// align to program units
|
|
|
|
lfs_off_t noff = lfs_alignup(
|
|
|
|
commit->off + 2*sizeof(uint32_t), lfs->cfg->prog_size);
|
|
|
|
|
|
|
|
// read erased state from next program unit
|
2018-05-21 05:56:20 +00:00
|
|
|
lfs_tag_t tag;
|
2018-05-19 23:25:47 +00:00
|
|
|
int err = lfs_bd_read(lfs, commit->block, noff, &tag, sizeof(tag));
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
// build crc tag
|
|
|
|
tag = (0x80000000 & ~lfs_fromle32(tag)) |
|
2018-05-30 01:08:42 +00:00
|
|
|
lfs_mktag(LFS_TYPE_CRC, 0x3ff,
|
2018-05-19 23:25:47 +00:00
|
|
|
noff - (commit->off+sizeof(uint32_t)));
|
|
|
|
|
|
|
|
// write out crc
|
2018-05-27 15:15:28 +00:00
|
|
|
//printf("tag w %#010x (%x:%x %03x %03x %03x)\n", tag, commit->block, commit->off+sizeof(tag), lfs_tag_type(tag), lfs_tag_id(tag), lfs_tag_size(tag));
|
2018-05-19 23:25:47 +00:00
|
|
|
uint32_t footer[2];
|
|
|
|
footer[0] = lfs_tole32(tag ^ commit->ptag);
|
|
|
|
lfs_crc(&commit->crc, &footer[0], sizeof(footer[0]));
|
|
|
|
footer[1] = lfs_tole32(commit->crc);
|
|
|
|
err = lfs_bd_prog(lfs, commit->block, commit->off,
|
|
|
|
footer, sizeof(footer));
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
2018-05-21 05:56:20 +00:00
|
|
|
commit->off += sizeof(tag)+lfs_tag_size(tag);
|
2018-05-19 23:25:47 +00:00
|
|
|
commit->ptag = tag;
|
|
|
|
|
|
|
|
// flush buffers
|
|
|
|
err = lfs_bd_sync(lfs);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
// successful commit, check checksum to make sure
|
|
|
|
uint32_t crc = 0xffffffff;
|
|
|
|
err = lfs_bd_crc(lfs, commit->block, commit->begin,
|
|
|
|
commit->off-lfs_tag_size(tag) - commit->begin, &crc);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (crc != commit->crc) {
|
|
|
|
return LFS_ERR_CORRUPT;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-28 07:08:16 +00:00
|
|
|
static int lfs_commit_list(lfs_t *lfs, struct lfs_commit *commit,
|
2018-05-29 05:50:47 +00:00
|
|
|
lfs_mattrlist_t *list) {
|
2018-05-28 07:08:16 +00:00
|
|
|
for (; list; list = list->next) {
|
|
|
|
int err = lfs_commit_commit(lfs, commit, list->e);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-26 00:04:01 +00:00
|
|
|
|
|
|
|
// committer for moves
|
2018-05-28 07:08:16 +00:00
|
|
|
// TODO rename?
|
2018-05-26 00:04:01 +00:00
|
|
|
struct lfs_commit_move {
|
2018-05-29 06:11:26 +00:00
|
|
|
lfs_mdir_t *dir; // TODO need dir?
|
2018-05-26 00:04:01 +00:00
|
|
|
struct {
|
|
|
|
uint16_t from;
|
|
|
|
uint16_t to;
|
|
|
|
} id;
|
|
|
|
|
|
|
|
struct lfs_commit *commit;
|
|
|
|
};
|
|
|
|
|
2018-05-28 07:08:16 +00:00
|
|
|
|
2018-05-27 15:15:28 +00:00
|
|
|
// TODO redeclare
|
Introduced xored-globals logic to fix fundamental problem with moves
This was a big roadblock for a while: with the new feature of inlined
files, the existing move logic was fundamentally flawed.
To pull off atomic moves between two different metadata-pairs, littlefs
uses a simple, if a bit clumsy trick.
1. Marks entry as "moving"
2. Copies entry to new metadata-pair
3. Deletes old entry
If power is lost before the move operation is completed, we will find the
"moving" tag. This means there may or may not be an incomplete move on
the filesystem. In this case, we simply search for the moved entry, if
we find it, we remove the old entry, otherwise we just remove the
"moving" tag.
This worked perfectly, until we introduced inlined files. See, unlike
the existing directory and ctz entries, inlined files have no guarantee
they are unique. There is nothing we can search for that will allow us
to find a moved file unless we assign entries globally-unique ids. (note
that moves are fundamentally rename operations, so searching for names
does not make sense).
---
Solving this problem required completely restructuring how littlefs
handled moves and pulled out a really old idea that had been left in the
cutting room floor back when littlefs was going through many
designs: xored-globals.
The problem xored-globals solves is the need to maintain some global state
via commits to these distributed, independent metadata-pairs. The idea
is that we can use some sort of symmetric operation, such as xor, to
introduces deltas of the global state that can be committed atomically
along with any other info to these metadata-pairs.
This means that to figure out our global state, we xor together the global
delta stored in every metadata-pair.
Which means any commit can update the global state atomically, opening
up a whole new set atomic possibilities.
There is a couple of downsides. These globals may end up with deltas on
every single metadata-pair, effectively duplicating the data for each
block. Additionally, these globals need to have multiple copies in RAM.
This means and globals need to be a bounded size and very small, since even
small globals will have a large footprint.
---
On top of xored-globals, it's trivial to fix our move logic. Here we've
added an indirect delete tag which allows us to atomically specify a
delete of any entry on the filesystem.
Our move operation is now:
1. Copy entry to new metadata-pair and atomically xor globals to
indirectly delete our original entry.
2. Delete the original entry and xor globals to remove the indirect
delete.
Extra exciting is that this now takes our relatively clumsy move
operation into a sexy guaranteed O(1) move operation with no searching
necessary (though we do need to xor globals during mount).
Also reintroduced entry struct, now with a specific purpose to describe
the metadata-pair + id combo needed by indirect deletes to locate an
entry.
2018-05-29 17:35:23 +00:00
|
|
|
static int lfs_dir_traverse(lfs_t *lfs, lfs_mdir_t *dir,
|
2018-05-29 05:50:47 +00:00
|
|
|
int (*cb)(lfs_t *lfs, void *data, lfs_mattr_t attr),
|
2018-05-27 15:15:28 +00:00
|
|
|
void *data);
|
2018-05-29 06:11:26 +00:00
|
|
|
static int lfs_dir_get(lfs_t *lfs, lfs_mdir_t *dir,
|
2018-05-29 05:50:47 +00:00
|
|
|
uint32_t mask, lfs_mattr_t *attr);
|
2018-05-26 00:04:01 +00:00
|
|
|
|
2018-05-29 05:50:47 +00:00
|
|
|
static int lfs_commit_movescan(lfs_t *lfs, void *p, lfs_mattr_t attr) {
|
2018-05-27 15:15:28 +00:00
|
|
|
struct lfs_commit_move *move = p;
|
2018-05-26 18:50:06 +00:00
|
|
|
|
2018-07-09 19:13:31 +00:00
|
|
|
if (lfs_tag_type(attr.tag) == LFS_TYPE_DELETE &&
|
2018-05-29 05:50:47 +00:00
|
|
|
lfs_tag_id(attr.tag) <= move->id.from) {
|
2018-05-27 15:15:28 +00:00
|
|
|
// something was deleted, we need to move around it
|
|
|
|
move->id.from += 1;
|
|
|
|
return 0;
|
2018-05-26 00:04:01 +00:00
|
|
|
}
|
|
|
|
|
2018-07-09 19:51:57 +00:00
|
|
|
if (lfs_tag_id(attr.tag) != move->id.from) {
|
2018-05-26 00:04:01 +00:00
|
|
|
// ignore non-matching ids
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-27 15:15:28 +00:00
|
|
|
// check if type has already been committed
|
|
|
|
int err = lfs_dir_get(lfs,
|
2018-05-29 06:11:26 +00:00
|
|
|
&(lfs_mdir_t){
|
2018-05-27 15:15:28 +00:00
|
|
|
.pair[0]=move->commit->block,
|
|
|
|
.off=move->commit->off,
|
|
|
|
.etag=move->commit->ptag,
|
|
|
|
.stop_at_commit=true},
|
2018-07-09 19:13:31 +00:00
|
|
|
lfs_tag_isuser(attr.tag) ? 0x7ffff000 : 0x7c3ff000,
|
2018-05-29 05:50:47 +00:00
|
|
|
&(lfs_mattr_t){
|
|
|
|
lfs_mktag(lfs_tag_type(attr.tag),
|
2018-05-28 14:17:44 +00:00
|
|
|
move->id.to - move->commit->filter.begin, 0)}); // TODO can all these filter adjustments be consolidated?
|
2018-05-27 15:15:28 +00:00
|
|
|
if (err && err != LFS_ERR_NOENT) {
|
|
|
|
return err;
|
2018-05-26 00:04:01 +00:00
|
|
|
}
|
|
|
|
|
2018-05-27 15:15:28 +00:00
|
|
|
if (err != LFS_ERR_NOENT) {
|
2018-05-26 00:04:01 +00:00
|
|
|
// already committed
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// update id and commit, as we are currently unique
|
2018-05-30 01:08:42 +00:00
|
|
|
attr.tag = lfs_mktag(0, move->id.to, 0) | (attr.tag & 0xffc00fff);
|
2018-05-29 05:50:47 +00:00
|
|
|
return lfs_commit_commit(lfs, move->commit, attr);
|
2018-05-26 00:04:01 +00:00
|
|
|
}
|
|
|
|
|
2018-05-29 00:49:20 +00:00
|
|
|
static int lfs_commit_move(lfs_t *lfs, struct lfs_commit *commit,
|
2018-05-28 14:17:44 +00:00
|
|
|
uint16_t fromid, uint16_t toid,
|
2018-05-29 06:11:26 +00:00
|
|
|
lfs_mdir_t *dir, lfs_mattrlist_t *list) {
|
2018-05-28 07:08:16 +00:00
|
|
|
struct lfs_commit_move move = {
|
2018-05-28 14:17:44 +00:00
|
|
|
.id.from = fromid,
|
|
|
|
.id.to = toid,
|
2018-05-28 07:08:16 +00:00
|
|
|
.commit = commit,
|
|
|
|
};
|
|
|
|
|
2018-05-28 14:17:44 +00:00
|
|
|
for (; list; list = list->next) {
|
|
|
|
int err = lfs_commit_movescan(lfs, &move, list->e);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Introduced xored-globals logic to fix fundamental problem with moves
This was a big roadblock for a while: with the new feature of inlined
files, the existing move logic was fundamentally flawed.
To pull off atomic moves between two different metadata-pairs, littlefs
uses a simple, if a bit clumsy trick.
1. Marks entry as "moving"
2. Copies entry to new metadata-pair
3. Deletes old entry
If power is lost before the move operation is completed, we will find the
"moving" tag. This means there may or may not be an incomplete move on
the filesystem. In this case, we simply search for the moved entry, if
we find it, we remove the old entry, otherwise we just remove the
"moving" tag.
This worked perfectly, until we introduced inlined files. See, unlike
the existing directory and ctz entries, inlined files have no guarantee
they are unique. There is nothing we can search for that will allow us
to find a moved file unless we assign entries globally-unique ids. (note
that moves are fundamentally rename operations, so searching for names
does not make sense).
---
Solving this problem required completely restructuring how littlefs
handled moves and pulled out a really old idea that had been left in the
cutting room floor back when littlefs was going through many
designs: xored-globals.
The problem xored-globals solves is the need to maintain some global state
via commits to these distributed, independent metadata-pairs. The idea
is that we can use some sort of symmetric operation, such as xor, to
introduces deltas of the global state that can be committed atomically
along with any other info to these metadata-pairs.
This means that to figure out our global state, we xor together the global
delta stored in every metadata-pair.
Which means any commit can update the global state atomically, opening
up a whole new set atomic possibilities.
There is a couple of downsides. These globals may end up with deltas on
every single metadata-pair, effectively duplicating the data for each
block. Additionally, these globals need to have multiple copies in RAM.
This means and globals need to be a bounded size and very small, since even
small globals will have a large footprint.
---
On top of xored-globals, it's trivial to fix our move logic. Here we've
added an indirect delete tag which allows us to atomically specify a
delete of any entry on the filesystem.
Our move operation is now:
1. Copy entry to new metadata-pair and atomically xor globals to
indirectly delete our original entry.
2. Delete the original entry and xor globals to remove the indirect
delete.
Extra exciting is that this now takes our relatively clumsy move
operation into a sexy guaranteed O(1) move operation with no searching
necessary (though we do need to xor globals during mount).
Also reintroduced entry struct, now with a specific purpose to describe
the metadata-pair + id combo needed by indirect deletes to locate an
entry.
2018-05-29 17:35:23 +00:00
|
|
|
int err = lfs_dir_traverse(lfs, dir, lfs_commit_movescan, &move);
|
2018-05-28 07:08:16 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-07-02 03:29:42 +00:00
|
|
|
static int lfs_commit_globals(lfs_t *lfs, struct lfs_commit *commit,
|
|
|
|
const lfs_globals_t *source, const lfs_globals_t *diff) {
|
2018-07-04 06:35:04 +00:00
|
|
|
if (lfs_globals_iszero(diff)) {
|
|
|
|
return 0;
|
|
|
|
}
|
2018-07-02 03:29:42 +00:00
|
|
|
|
2018-07-04 06:35:04 +00:00
|
|
|
// TODO check performance/complexity of different strategies here
|
|
|
|
lfs_globals_t res = lfs_globals_xor(source, diff);
|
|
|
|
int err = lfs_commit_commit(lfs, commit, (lfs_mattr_t){
|
2018-07-09 19:51:57 +00:00
|
|
|
lfs_mktag(LFS_TYPE_GLOBALS, 0x3ff, sizeof(res)),
|
|
|
|
.u.buffer=&res});
|
2018-07-04 06:35:04 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
2018-07-02 03:29:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-29 06:11:26 +00:00
|
|
|
static int lfs_dir_alloc(lfs_t *lfs, lfs_mdir_t *dir,
|
2018-05-26 00:04:01 +00:00
|
|
|
bool split, const lfs_block_t tail[2]) {
|
2018-05-19 23:25:47 +00:00
|
|
|
// allocate pair of dir blocks (backwards, so we write to block 1 first)
|
|
|
|
for (int i = 0; i < 2; i++) {
|
|
|
|
int err = lfs_alloc(lfs, &dir->pair[(i+1)%2]);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// rather than clobbering one of the blocks we just pretend
|
|
|
|
// the revision may be valid
|
|
|
|
int err = lfs_bd_read(lfs, dir->pair[0], 0, &dir->rev, 4);
|
|
|
|
dir->rev = lfs_fromle32(dir->rev);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
// set defaults
|
|
|
|
dir->off = sizeof(dir->rev);
|
|
|
|
dir->etag = 0;
|
|
|
|
dir->count = 0;
|
2018-05-22 22:43:39 +00:00
|
|
|
dir->tail[0] = tail[0];
|
|
|
|
dir->tail[1] = tail[1];
|
2018-05-21 05:56:20 +00:00
|
|
|
dir->erased = false;
|
2018-05-26 00:04:01 +00:00
|
|
|
dir->split = split;
|
2018-07-02 03:29:42 +00:00
|
|
|
dir->globals = (lfs_globals_t){0};
|
2018-05-19 23:25:47 +00:00
|
|
|
|
|
|
|
// don't write out yet, let caller take care of that
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-26 18:50:06 +00:00
|
|
|
static int lfs_dir_fetchwith(lfs_t *lfs,
|
2018-05-29 06:11:26 +00:00
|
|
|
lfs_mdir_t *dir, const lfs_block_t pair[2],
|
2018-05-29 05:50:47 +00:00
|
|
|
int (*cb)(lfs_t *lfs, void *data, lfs_mattr_t attr), void *data) {
|
2018-05-19 23:25:47 +00:00
|
|
|
dir->pair[0] = pair[0];
|
|
|
|
dir->pair[1] = pair[1];
|
2018-05-27 15:15:28 +00:00
|
|
|
dir->stop_at_commit = false;
|
2018-05-19 23:25:47 +00:00
|
|
|
|
|
|
|
// find the block with the most recent revision
|
|
|
|
uint32_t rev[2];
|
|
|
|
for (int i = 0; i < 2; i++) {
|
|
|
|
int err = lfs_bd_read(lfs, dir->pair[i], 0, &rev[i], sizeof(rev[i]));
|
|
|
|
rev[i] = lfs_fromle32(rev[i]);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (lfs_scmp(rev[1], rev[0]) > 0) {
|
|
|
|
lfs_pairswap(dir->pair);
|
|
|
|
lfs_pairswap(rev);
|
|
|
|
}
|
|
|
|
|
|
|
|
// load blocks and check crc
|
|
|
|
for (int i = 0; i < 2; i++) {
|
|
|
|
lfs_off_t off = sizeof(dir->rev);
|
2018-05-21 05:56:20 +00:00
|
|
|
lfs_tag_t ptag = 0;
|
2018-05-19 23:25:47 +00:00
|
|
|
uint32_t crc = 0xffffffff;
|
2018-05-21 05:56:20 +00:00
|
|
|
dir->tail[0] = 0xffffffff;
|
|
|
|
dir->tail[1] = 0xffffffff;
|
|
|
|
dir->count = 0;
|
|
|
|
dir->split = false;
|
2018-07-02 03:29:42 +00:00
|
|
|
dir->globals = (lfs_globals_t){0};
|
2018-05-19 23:25:47 +00:00
|
|
|
|
|
|
|
dir->rev = lfs_tole32(rev[0]);
|
|
|
|
lfs_crc(&crc, &dir->rev, sizeof(dir->rev));
|
|
|
|
dir->rev = lfs_fromle32(dir->rev);
|
|
|
|
|
2018-05-29 06:11:26 +00:00
|
|
|
lfs_mdir_t temp = *dir;
|
2018-05-28 22:46:32 +00:00
|
|
|
|
2018-05-19 23:25:47 +00:00
|
|
|
while (true) {
|
|
|
|
// extract next tag
|
2018-05-21 05:56:20 +00:00
|
|
|
lfs_tag_t tag;
|
2018-05-28 22:46:32 +00:00
|
|
|
int err = lfs_bd_read(lfs, temp.pair[0], off, &tag, sizeof(tag));
|
2018-05-19 23:25:47 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
lfs_crc(&crc, &tag, sizeof(tag));
|
|
|
|
tag = lfs_fromle32(tag) ^ ptag;
|
|
|
|
|
|
|
|
// next commit not yet programmed
|
2018-07-09 19:13:31 +00:00
|
|
|
if (lfs_tag_type(ptag) == LFS_TYPE_CRC && !lfs_tag_isvalid(tag)) {
|
2018-07-02 03:29:42 +00:00
|
|
|
// synthetic move
|
|
|
|
if (lfs_paircmp(dir->pair, lfs->globals.move.pair) == 0
|
|
|
|
&& cb) {
|
Introduced xored-globals logic to fix fundamental problem with moves
This was a big roadblock for a while: with the new feature of inlined
files, the existing move logic was fundamentally flawed.
To pull off atomic moves between two different metadata-pairs, littlefs
uses a simple, if a bit clumsy trick.
1. Marks entry as "moving"
2. Copies entry to new metadata-pair
3. Deletes old entry
If power is lost before the move operation is completed, we will find the
"moving" tag. This means there may or may not be an incomplete move on
the filesystem. In this case, we simply search for the moved entry, if
we find it, we remove the old entry, otherwise we just remove the
"moving" tag.
This worked perfectly, until we introduced inlined files. See, unlike
the existing directory and ctz entries, inlined files have no guarantee
they are unique. There is nothing we can search for that will allow us
to find a moved file unless we assign entries globally-unique ids. (note
that moves are fundamentally rename operations, so searching for names
does not make sense).
---
Solving this problem required completely restructuring how littlefs
handled moves and pulled out a really old idea that had been left in the
cutting room floor back when littlefs was going through many
designs: xored-globals.
The problem xored-globals solves is the need to maintain some global state
via commits to these distributed, independent metadata-pairs. The idea
is that we can use some sort of symmetric operation, such as xor, to
introduces deltas of the global state that can be committed atomically
along with any other info to these metadata-pairs.
This means that to figure out our global state, we xor together the global
delta stored in every metadata-pair.
Which means any commit can update the global state atomically, opening
up a whole new set atomic possibilities.
There is a couple of downsides. These globals may end up with deltas on
every single metadata-pair, effectively duplicating the data for each
block. Additionally, these globals need to have multiple copies in RAM.
This means and globals need to be a bounded size and very small, since even
small globals will have a large footprint.
---
On top of xored-globals, it's trivial to fix our move logic. Here we've
added an indirect delete tag which allows us to atomically specify a
delete of any entry on the filesystem.
Our move operation is now:
1. Copy entry to new metadata-pair and atomically xor globals to
indirectly delete our original entry.
2. Delete the original entry and xor globals to remove the indirect
delete.
Extra exciting is that this now takes our relatively clumsy move
operation into a sexy guaranteed O(1) move operation with no searching
necessary (though we do need to xor globals during mount).
Also reintroduced entry struct, now with a specific purpose to describe
the metadata-pair + id combo needed by indirect deletes to locate an
entry.
2018-05-29 17:35:23 +00:00
|
|
|
int err = cb(lfs, data, (lfs_mattr_t){
|
2018-07-09 19:13:31 +00:00
|
|
|
lfs_mktag(LFS_TYPE_DELETE,
|
2018-07-02 03:29:42 +00:00
|
|
|
lfs->globals.move.id, 0)});
|
Introduced xored-globals logic to fix fundamental problem with moves
This was a big roadblock for a while: with the new feature of inlined
files, the existing move logic was fundamentally flawed.
To pull off atomic moves between two different metadata-pairs, littlefs
uses a simple, if a bit clumsy trick.
1. Marks entry as "moving"
2. Copies entry to new metadata-pair
3. Deletes old entry
If power is lost before the move operation is completed, we will find the
"moving" tag. This means there may or may not be an incomplete move on
the filesystem. In this case, we simply search for the moved entry, if
we find it, we remove the old entry, otherwise we just remove the
"moving" tag.
This worked perfectly, until we introduced inlined files. See, unlike
the existing directory and ctz entries, inlined files have no guarantee
they are unique. There is nothing we can search for that will allow us
to find a moved file unless we assign entries globally-unique ids. (note
that moves are fundamentally rename operations, so searching for names
does not make sense).
---
Solving this problem required completely restructuring how littlefs
handled moves and pulled out a really old idea that had been left in the
cutting room floor back when littlefs was going through many
designs: xored-globals.
The problem xored-globals solves is the need to maintain some global state
via commits to these distributed, independent metadata-pairs. The idea
is that we can use some sort of symmetric operation, such as xor, to
introduces deltas of the global state that can be committed atomically
along with any other info to these metadata-pairs.
This means that to figure out our global state, we xor together the global
delta stored in every metadata-pair.
Which means any commit can update the global state atomically, opening
up a whole new set atomic possibilities.
There is a couple of downsides. These globals may end up with deltas on
every single metadata-pair, effectively duplicating the data for each
block. Additionally, these globals need to have multiple copies in RAM.
This means and globals need to be a bounded size and very small, since even
small globals will have a large footprint.
---
On top of xored-globals, it's trivial to fix our move logic. Here we've
added an indirect delete tag which allows us to atomically specify a
delete of any entry on the filesystem.
Our move operation is now:
1. Copy entry to new metadata-pair and atomically xor globals to
indirectly delete our original entry.
2. Delete the original entry and xor globals to remove the indirect
delete.
Extra exciting is that this now takes our relatively clumsy move
operation into a sexy guaranteed O(1) move operation with no searching
necessary (though we do need to xor globals during mount).
Also reintroduced entry struct, now with a specific purpose to describe
the metadata-pair + id combo needed by indirect deletes to locate an
entry.
2018-05-29 17:35:23 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-19 23:25:47 +00:00
|
|
|
dir->erased = true;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// check we're in valid range
|
2018-05-26 18:50:06 +00:00
|
|
|
if (off + sizeof(tag)+lfs_tag_size(tag) > lfs->cfg->block_size) {
|
2018-05-19 23:25:47 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-05-28 22:46:32 +00:00
|
|
|
//printf("tag r %#010x (%x:%x %03x %03x %03x)\n", tag, temp.pair[0], off+sizeof(tag), lfs_tag_type(tag), lfs_tag_id(tag), lfs_tag_size(tag));
|
2018-05-26 18:50:06 +00:00
|
|
|
if (lfs_tag_type(tag) == LFS_TYPE_CRC) {
|
2018-05-29 05:50:47 +00:00
|
|
|
// check the crc attr
|
2018-05-19 23:25:47 +00:00
|
|
|
uint32_t dcrc;
|
2018-05-28 22:46:32 +00:00
|
|
|
int err = lfs_bd_read(lfs, temp.pair[0],
|
2018-05-21 05:56:20 +00:00
|
|
|
off+sizeof(tag), &dcrc, sizeof(dcrc));
|
2018-05-19 23:25:47 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (crc != lfs_fromle32(dcrc)) {
|
2018-05-28 22:46:32 +00:00
|
|
|
if (off == sizeof(temp.rev)) {
|
2018-05-19 23:25:47 +00:00
|
|
|
// try other block
|
|
|
|
break;
|
|
|
|
} else {
|
2018-07-02 03:29:42 +00:00
|
|
|
// snythetic move
|
Introduced xored-globals logic to fix fundamental problem with moves
This was a big roadblock for a while: with the new feature of inlined
files, the existing move logic was fundamentally flawed.
To pull off atomic moves between two different metadata-pairs, littlefs
uses a simple, if a bit clumsy trick.
1. Marks entry as "moving"
2. Copies entry to new metadata-pair
3. Deletes old entry
If power is lost before the move operation is completed, we will find the
"moving" tag. This means there may or may not be an incomplete move on
the filesystem. In this case, we simply search for the moved entry, if
we find it, we remove the old entry, otherwise we just remove the
"moving" tag.
This worked perfectly, until we introduced inlined files. See, unlike
the existing directory and ctz entries, inlined files have no guarantee
they are unique. There is nothing we can search for that will allow us
to find a moved file unless we assign entries globally-unique ids. (note
that moves are fundamentally rename operations, so searching for names
does not make sense).
---
Solving this problem required completely restructuring how littlefs
handled moves and pulled out a really old idea that had been left in the
cutting room floor back when littlefs was going through many
designs: xored-globals.
The problem xored-globals solves is the need to maintain some global state
via commits to these distributed, independent metadata-pairs. The idea
is that we can use some sort of symmetric operation, such as xor, to
introduces deltas of the global state that can be committed atomically
along with any other info to these metadata-pairs.
This means that to figure out our global state, we xor together the global
delta stored in every metadata-pair.
Which means any commit can update the global state atomically, opening
up a whole new set atomic possibilities.
There is a couple of downsides. These globals may end up with deltas on
every single metadata-pair, effectively duplicating the data for each
block. Additionally, these globals need to have multiple copies in RAM.
This means and globals need to be a bounded size and very small, since even
small globals will have a large footprint.
---
On top of xored-globals, it's trivial to fix our move logic. Here we've
added an indirect delete tag which allows us to atomically specify a
delete of any entry on the filesystem.
Our move operation is now:
1. Copy entry to new metadata-pair and atomically xor globals to
indirectly delete our original entry.
2. Delete the original entry and xor globals to remove the indirect
delete.
Extra exciting is that this now takes our relatively clumsy move
operation into a sexy guaranteed O(1) move operation with no searching
necessary (though we do need to xor globals during mount).
Also reintroduced entry struct, now with a specific purpose to describe
the metadata-pair + id combo needed by indirect deletes to locate an
entry.
2018-05-29 17:35:23 +00:00
|
|
|
// TODO combine with above?
|
2018-07-02 03:29:42 +00:00
|
|
|
if (lfs_paircmp(dir->pair, lfs->globals.move.pair) == 0
|
Introduced xored-globals logic to fix fundamental problem with moves
This was a big roadblock for a while: with the new feature of inlined
files, the existing move logic was fundamentally flawed.
To pull off atomic moves between two different metadata-pairs, littlefs
uses a simple, if a bit clumsy trick.
1. Marks entry as "moving"
2. Copies entry to new metadata-pair
3. Deletes old entry
If power is lost before the move operation is completed, we will find the
"moving" tag. This means there may or may not be an incomplete move on
the filesystem. In this case, we simply search for the moved entry, if
we find it, we remove the old entry, otherwise we just remove the
"moving" tag.
This worked perfectly, until we introduced inlined files. See, unlike
the existing directory and ctz entries, inlined files have no guarantee
they are unique. There is nothing we can search for that will allow us
to find a moved file unless we assign entries globally-unique ids. (note
that moves are fundamentally rename operations, so searching for names
does not make sense).
---
Solving this problem required completely restructuring how littlefs
handled moves and pulled out a really old idea that had been left in the
cutting room floor back when littlefs was going through many
designs: xored-globals.
The problem xored-globals solves is the need to maintain some global state
via commits to these distributed, independent metadata-pairs. The idea
is that we can use some sort of symmetric operation, such as xor, to
introduces deltas of the global state that can be committed atomically
along with any other info to these metadata-pairs.
This means that to figure out our global state, we xor together the global
delta stored in every metadata-pair.
Which means any commit can update the global state atomically, opening
up a whole new set atomic possibilities.
There is a couple of downsides. These globals may end up with deltas on
every single metadata-pair, effectively duplicating the data for each
block. Additionally, these globals need to have multiple copies in RAM.
This means and globals need to be a bounded size and very small, since even
small globals will have a large footprint.
---
On top of xored-globals, it's trivial to fix our move logic. Here we've
added an indirect delete tag which allows us to atomically specify a
delete of any entry on the filesystem.
Our move operation is now:
1. Copy entry to new metadata-pair and atomically xor globals to
indirectly delete our original entry.
2. Delete the original entry and xor globals to remove the indirect
delete.
Extra exciting is that this now takes our relatively clumsy move
operation into a sexy guaranteed O(1) move operation with no searching
necessary (though we do need to xor globals during mount).
Also reintroduced entry struct, now with a specific purpose to describe
the metadata-pair + id combo needed by indirect deletes to locate an
entry.
2018-05-29 17:35:23 +00:00
|
|
|
&& cb) {
|
|
|
|
int err = cb(lfs, data, (lfs_mattr_t){
|
2018-07-09 19:13:31 +00:00
|
|
|
lfs_mktag(LFS_TYPE_DELETE,
|
2018-07-02 03:29:42 +00:00
|
|
|
lfs->globals.move.id, 0)});
|
Introduced xored-globals logic to fix fundamental problem with moves
This was a big roadblock for a while: with the new feature of inlined
files, the existing move logic was fundamentally flawed.
To pull off atomic moves between two different metadata-pairs, littlefs
uses a simple, if a bit clumsy trick.
1. Marks entry as "moving"
2. Copies entry to new metadata-pair
3. Deletes old entry
If power is lost before the move operation is completed, we will find the
"moving" tag. This means there may or may not be an incomplete move on
the filesystem. In this case, we simply search for the moved entry, if
we find it, we remove the old entry, otherwise we just remove the
"moving" tag.
This worked perfectly, until we introduced inlined files. See, unlike
the existing directory and ctz entries, inlined files have no guarantee
they are unique. There is nothing we can search for that will allow us
to find a moved file unless we assign entries globally-unique ids. (note
that moves are fundamentally rename operations, so searching for names
does not make sense).
---
Solving this problem required completely restructuring how littlefs
handled moves and pulled out a really old idea that had been left in the
cutting room floor back when littlefs was going through many
designs: xored-globals.
The problem xored-globals solves is the need to maintain some global state
via commits to these distributed, independent metadata-pairs. The idea
is that we can use some sort of symmetric operation, such as xor, to
introduces deltas of the global state that can be committed atomically
along with any other info to these metadata-pairs.
This means that to figure out our global state, we xor together the global
delta stored in every metadata-pair.
Which means any commit can update the global state atomically, opening
up a whole new set atomic possibilities.
There is a couple of downsides. These globals may end up with deltas on
every single metadata-pair, effectively duplicating the data for each
block. Additionally, these globals need to have multiple copies in RAM.
This means and globals need to be a bounded size and very small, since even
small globals will have a large footprint.
---
On top of xored-globals, it's trivial to fix our move logic. Here we've
added an indirect delete tag which allows us to atomically specify a
delete of any entry on the filesystem.
Our move operation is now:
1. Copy entry to new metadata-pair and atomically xor globals to
indirectly delete our original entry.
2. Delete the original entry and xor globals to remove the indirect
delete.
Extra exciting is that this now takes our relatively clumsy move
operation into a sexy guaranteed O(1) move operation with no searching
necessary (though we do need to xor globals during mount).
Also reintroduced entry struct, now with a specific purpose to describe
the metadata-pair + id combo needed by indirect deletes to locate an
entry.
2018-05-29 17:35:23 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-19 23:25:47 +00:00
|
|
|
// consider what we have good enough
|
|
|
|
dir->erased = false;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-28 22:46:32 +00:00
|
|
|
temp.off = off + sizeof(tag)+lfs_tag_size(tag);
|
|
|
|
temp.etag = tag;
|
2018-05-19 23:25:47 +00:00
|
|
|
crc = 0xffffffff;
|
2018-05-28 22:46:32 +00:00
|
|
|
*dir = temp;
|
2018-07-04 06:35:04 +00:00
|
|
|
|
|
|
|
// TODO simplify this?
|
|
|
|
if (cb) {
|
|
|
|
err = cb(lfs, data, (lfs_mattr_t){
|
|
|
|
(tag | 0x80000000),
|
|
|
|
.u.d.block=temp.pair[0],
|
|
|
|
.u.d.off=off+sizeof(tag)});
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
2018-05-19 23:25:47 +00:00
|
|
|
} else {
|
Introduced xored-globals logic to fix fundamental problem with moves
This was a big roadblock for a while: with the new feature of inlined
files, the existing move logic was fundamentally flawed.
To pull off atomic moves between two different metadata-pairs, littlefs
uses a simple, if a bit clumsy trick.
1. Marks entry as "moving"
2. Copies entry to new metadata-pair
3. Deletes old entry
If power is lost before the move operation is completed, we will find the
"moving" tag. This means there may or may not be an incomplete move on
the filesystem. In this case, we simply search for the moved entry, if
we find it, we remove the old entry, otherwise we just remove the
"moving" tag.
This worked perfectly, until we introduced inlined files. See, unlike
the existing directory and ctz entries, inlined files have no guarantee
they are unique. There is nothing we can search for that will allow us
to find a moved file unless we assign entries globally-unique ids. (note
that moves are fundamentally rename operations, so searching for names
does not make sense).
---
Solving this problem required completely restructuring how littlefs
handled moves and pulled out a really old idea that had been left in the
cutting room floor back when littlefs was going through many
designs: xored-globals.
The problem xored-globals solves is the need to maintain some global state
via commits to these distributed, independent metadata-pairs. The idea
is that we can use some sort of symmetric operation, such as xor, to
introduces deltas of the global state that can be committed atomically
along with any other info to these metadata-pairs.
This means that to figure out our global state, we xor together the global
delta stored in every metadata-pair.
Which means any commit can update the global state atomically, opening
up a whole new set atomic possibilities.
There is a couple of downsides. These globals may end up with deltas on
every single metadata-pair, effectively duplicating the data for each
block. Additionally, these globals need to have multiple copies in RAM.
This means and globals need to be a bounded size and very small, since even
small globals will have a large footprint.
---
On top of xored-globals, it's trivial to fix our move logic. Here we've
added an indirect delete tag which allows us to atomically specify a
delete of any entry on the filesystem.
Our move operation is now:
1. Copy entry to new metadata-pair and atomically xor globals to
indirectly delete our original entry.
2. Delete the original entry and xor globals to remove the indirect
delete.
Extra exciting is that this now takes our relatively clumsy move
operation into a sexy guaranteed O(1) move operation with no searching
necessary (though we do need to xor globals during mount).
Also reintroduced entry struct, now with a specific purpose to describe
the metadata-pair + id combo needed by indirect deletes to locate an
entry.
2018-05-29 17:35:23 +00:00
|
|
|
// TODO crc before callback???
|
2018-05-28 22:46:32 +00:00
|
|
|
err = lfs_bd_crc(lfs, temp.pair[0],
|
2018-05-21 05:56:20 +00:00
|
|
|
off+sizeof(tag), lfs_tag_size(tag), &crc);
|
2018-05-19 23:25:47 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-07-09 19:13:31 +00:00
|
|
|
if (lfs_tag_subtype(tag) == LFS_TYPE_TAIL) {
|
|
|
|
temp.split = (lfs_tag_type(tag) & 1);
|
2018-05-28 22:46:32 +00:00
|
|
|
err = lfs_bd_read(lfs, temp.pair[0], off+sizeof(tag),
|
|
|
|
temp.tail, sizeof(temp.tail));
|
2018-05-21 05:56:20 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
2018-07-09 19:13:31 +00:00
|
|
|
} else if (lfs_tag_type(tag) == LFS_TYPE_GLOBALS) {
|
Introduced xored-globals logic to fix fundamental problem with moves
This was a big roadblock for a while: with the new feature of inlined
files, the existing move logic was fundamentally flawed.
To pull off atomic moves between two different metadata-pairs, littlefs
uses a simple, if a bit clumsy trick.
1. Marks entry as "moving"
2. Copies entry to new metadata-pair
3. Deletes old entry
If power is lost before the move operation is completed, we will find the
"moving" tag. This means there may or may not be an incomplete move on
the filesystem. In this case, we simply search for the moved entry, if
we find it, we remove the old entry, otherwise we just remove the
"moving" tag.
This worked perfectly, until we introduced inlined files. See, unlike
the existing directory and ctz entries, inlined files have no guarantee
they are unique. There is nothing we can search for that will allow us
to find a moved file unless we assign entries globally-unique ids. (note
that moves are fundamentally rename operations, so searching for names
does not make sense).
---
Solving this problem required completely restructuring how littlefs
handled moves and pulled out a really old idea that had been left in the
cutting room floor back when littlefs was going through many
designs: xored-globals.
The problem xored-globals solves is the need to maintain some global state
via commits to these distributed, independent metadata-pairs. The idea
is that we can use some sort of symmetric operation, such as xor, to
introduces deltas of the global state that can be committed atomically
along with any other info to these metadata-pairs.
This means that to figure out our global state, we xor together the global
delta stored in every metadata-pair.
Which means any commit can update the global state atomically, opening
up a whole new set atomic possibilities.
There is a couple of downsides. These globals may end up with deltas on
every single metadata-pair, effectively duplicating the data for each
block. Additionally, these globals need to have multiple copies in RAM.
This means and globals need to be a bounded size and very small, since even
small globals will have a large footprint.
---
On top of xored-globals, it's trivial to fix our move logic. Here we've
added an indirect delete tag which allows us to atomically specify a
delete of any entry on the filesystem.
Our move operation is now:
1. Copy entry to new metadata-pair and atomically xor globals to
indirectly delete our original entry.
2. Delete the original entry and xor globals to remove the indirect
delete.
Extra exciting is that this now takes our relatively clumsy move
operation into a sexy guaranteed O(1) move operation with no searching
necessary (though we do need to xor globals during mount).
Also reintroduced entry struct, now with a specific purpose to describe
the metadata-pair + id combo needed by indirect deletes to locate an
entry.
2018-05-29 17:35:23 +00:00
|
|
|
err = lfs_bd_read(lfs, temp.pair[0], off+sizeof(tag),
|
2018-07-09 19:51:57 +00:00
|
|
|
&temp.globals, sizeof(temp.globals));
|
Introduced xored-globals logic to fix fundamental problem with moves
This was a big roadblock for a while: with the new feature of inlined
files, the existing move logic was fundamentally flawed.
To pull off atomic moves between two different metadata-pairs, littlefs
uses a simple, if a bit clumsy trick.
1. Marks entry as "moving"
2. Copies entry to new metadata-pair
3. Deletes old entry
If power is lost before the move operation is completed, we will find the
"moving" tag. This means there may or may not be an incomplete move on
the filesystem. In this case, we simply search for the moved entry, if
we find it, we remove the old entry, otherwise we just remove the
"moving" tag.
This worked perfectly, until we introduced inlined files. See, unlike
the existing directory and ctz entries, inlined files have no guarantee
they are unique. There is nothing we can search for that will allow us
to find a moved file unless we assign entries globally-unique ids. (note
that moves are fundamentally rename operations, so searching for names
does not make sense).
---
Solving this problem required completely restructuring how littlefs
handled moves and pulled out a really old idea that had been left in the
cutting room floor back when littlefs was going through many
designs: xored-globals.
The problem xored-globals solves is the need to maintain some global state
via commits to these distributed, independent metadata-pairs. The idea
is that we can use some sort of symmetric operation, such as xor, to
introduces deltas of the global state that can be committed atomically
along with any other info to these metadata-pairs.
This means that to figure out our global state, we xor together the global
delta stored in every metadata-pair.
Which means any commit can update the global state atomically, opening
up a whole new set atomic possibilities.
There is a couple of downsides. These globals may end up with deltas on
every single metadata-pair, effectively duplicating the data for each
block. Additionally, these globals need to have multiple copies in RAM.
This means and globals need to be a bounded size and very small, since even
small globals will have a large footprint.
---
On top of xored-globals, it's trivial to fix our move logic. Here we've
added an indirect delete tag which allows us to atomically specify a
delete of any entry on the filesystem.
Our move operation is now:
1. Copy entry to new metadata-pair and atomically xor globals to
indirectly delete our original entry.
2. Delete the original entry and xor globals to remove the indirect
delete.
Extra exciting is that this now takes our relatively clumsy move
operation into a sexy guaranteed O(1) move operation with no searching
necessary (though we do need to xor globals during mount).
Also reintroduced entry struct, now with a specific purpose to describe
the metadata-pair + id combo needed by indirect deletes to locate an
entry.
2018-05-29 17:35:23 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
2018-05-26 00:04:01 +00:00
|
|
|
} else {
|
2018-07-09 19:51:57 +00:00
|
|
|
if (lfs_tag_id(tag) < 0x3ff &&
|
|
|
|
lfs_tag_id(tag) >= temp.count) {
|
2018-05-28 22:46:32 +00:00
|
|
|
temp.count = lfs_tag_id(tag)+1;
|
2018-05-26 00:04:01 +00:00
|
|
|
}
|
|
|
|
|
2018-07-09 19:13:31 +00:00
|
|
|
if (lfs_tag_type(tag) == LFS_TYPE_DELETE) {
|
2018-05-28 22:46:32 +00:00
|
|
|
temp.count -= 1;
|
2018-05-26 00:04:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (cb) {
|
2018-05-29 05:50:47 +00:00
|
|
|
err = cb(lfs, data, (lfs_mattr_t){
|
2018-05-26 00:04:01 +00:00
|
|
|
(tag | 0x80000000),
|
2018-05-28 22:46:32 +00:00
|
|
|
.u.d.block=temp.pair[0],
|
2018-05-26 00:04:01 +00:00
|
|
|
.u.d.off=off+sizeof(tag)});
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
2018-05-19 23:25:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ptag = tag;
|
2018-05-21 05:56:20 +00:00
|
|
|
off += sizeof(tag)+lfs_tag_size(tag);
|
2018-05-19 23:25:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// failed, try the other crc?
|
|
|
|
lfs_pairswap(dir->pair);
|
|
|
|
lfs_pairswap(rev);
|
|
|
|
}
|
|
|
|
|
|
|
|
LFS_ERROR("Corrupted dir pair at %d %d", dir->pair[0], dir->pair[1]);
|
|
|
|
return LFS_ERR_CORRUPT;
|
|
|
|
}
|
|
|
|
|
2018-05-26 18:50:06 +00:00
|
|
|
static int lfs_dir_fetch(lfs_t *lfs,
|
2018-05-29 06:11:26 +00:00
|
|
|
lfs_mdir_t *dir, const lfs_block_t pair[2]) {
|
2018-05-26 18:50:06 +00:00
|
|
|
return lfs_dir_fetchwith(lfs, dir, pair, NULL, NULL);
|
2018-05-22 22:43:39 +00:00
|
|
|
}
|
|
|
|
|
Introduced xored-globals logic to fix fundamental problem with moves
This was a big roadblock for a while: with the new feature of inlined
files, the existing move logic was fundamentally flawed.
To pull off atomic moves between two different metadata-pairs, littlefs
uses a simple, if a bit clumsy trick.
1. Marks entry as "moving"
2. Copies entry to new metadata-pair
3. Deletes old entry
If power is lost before the move operation is completed, we will find the
"moving" tag. This means there may or may not be an incomplete move on
the filesystem. In this case, we simply search for the moved entry, if
we find it, we remove the old entry, otherwise we just remove the
"moving" tag.
This worked perfectly, until we introduced inlined files. See, unlike
the existing directory and ctz entries, inlined files have no guarantee
they are unique. There is nothing we can search for that will allow us
to find a moved file unless we assign entries globally-unique ids. (note
that moves are fundamentally rename operations, so searching for names
does not make sense).
---
Solving this problem required completely restructuring how littlefs
handled moves and pulled out a really old idea that had been left in the
cutting room floor back when littlefs was going through many
designs: xored-globals.
The problem xored-globals solves is the need to maintain some global state
via commits to these distributed, independent metadata-pairs. The idea
is that we can use some sort of symmetric operation, such as xor, to
introduces deltas of the global state that can be committed atomically
along with any other info to these metadata-pairs.
This means that to figure out our global state, we xor together the global
delta stored in every metadata-pair.
Which means any commit can update the global state atomically, opening
up a whole new set atomic possibilities.
There is a couple of downsides. These globals may end up with deltas on
every single metadata-pair, effectively duplicating the data for each
block. Additionally, these globals need to have multiple copies in RAM.
This means and globals need to be a bounded size and very small, since even
small globals will have a large footprint.
---
On top of xored-globals, it's trivial to fix our move logic. Here we've
added an indirect delete tag which allows us to atomically specify a
delete of any entry on the filesystem.
Our move operation is now:
1. Copy entry to new metadata-pair and atomically xor globals to
indirectly delete our original entry.
2. Delete the original entry and xor globals to remove the indirect
delete.
Extra exciting is that this now takes our relatively clumsy move
operation into a sexy guaranteed O(1) move operation with no searching
necessary (though we do need to xor globals during mount).
Also reintroduced entry struct, now with a specific purpose to describe
the metadata-pair + id combo needed by indirect deletes to locate an
entry.
2018-05-29 17:35:23 +00:00
|
|
|
static int lfs_dir_traverse(lfs_t *lfs, lfs_mdir_t *dir,
|
2018-05-29 05:50:47 +00:00
|
|
|
int (*cb)(lfs_t *lfs, void *data, lfs_mattr_t attr), void *data) {
|
2018-05-27 15:15:28 +00:00
|
|
|
// iterate over dir block backwards (for faster lookups)
|
|
|
|
lfs_block_t block = dir->pair[0];
|
|
|
|
lfs_off_t off = dir->off;
|
|
|
|
lfs_tag_t tag = dir->etag;
|
|
|
|
|
2018-07-02 03:29:42 +00:00
|
|
|
// synthetic move
|
|
|
|
if (lfs_paircmp(dir->pair, lfs->globals.move.pair) == 0) {
|
Introduced xored-globals logic to fix fundamental problem with moves
This was a big roadblock for a while: with the new feature of inlined
files, the existing move logic was fundamentally flawed.
To pull off atomic moves between two different metadata-pairs, littlefs
uses a simple, if a bit clumsy trick.
1. Marks entry as "moving"
2. Copies entry to new metadata-pair
3. Deletes old entry
If power is lost before the move operation is completed, we will find the
"moving" tag. This means there may or may not be an incomplete move on
the filesystem. In this case, we simply search for the moved entry, if
we find it, we remove the old entry, otherwise we just remove the
"moving" tag.
This worked perfectly, until we introduced inlined files. See, unlike
the existing directory and ctz entries, inlined files have no guarantee
they are unique. There is nothing we can search for that will allow us
to find a moved file unless we assign entries globally-unique ids. (note
that moves are fundamentally rename operations, so searching for names
does not make sense).
---
Solving this problem required completely restructuring how littlefs
handled moves and pulled out a really old idea that had been left in the
cutting room floor back when littlefs was going through many
designs: xored-globals.
The problem xored-globals solves is the need to maintain some global state
via commits to these distributed, independent metadata-pairs. The idea
is that we can use some sort of symmetric operation, such as xor, to
introduces deltas of the global state that can be committed atomically
along with any other info to these metadata-pairs.
This means that to figure out our global state, we xor together the global
delta stored in every metadata-pair.
Which means any commit can update the global state atomically, opening
up a whole new set atomic possibilities.
There is a couple of downsides. These globals may end up with deltas on
every single metadata-pair, effectively duplicating the data for each
block. Additionally, these globals need to have multiple copies in RAM.
This means and globals need to be a bounded size and very small, since even
small globals will have a large footprint.
---
On top of xored-globals, it's trivial to fix our move logic. Here we've
added an indirect delete tag which allows us to atomically specify a
delete of any entry on the filesystem.
Our move operation is now:
1. Copy entry to new metadata-pair and atomically xor globals to
indirectly delete our original entry.
2. Delete the original entry and xor globals to remove the indirect
delete.
Extra exciting is that this now takes our relatively clumsy move
operation into a sexy guaranteed O(1) move operation with no searching
necessary (though we do need to xor globals during mount).
Also reintroduced entry struct, now with a specific purpose to describe
the metadata-pair + id combo needed by indirect deletes to locate an
entry.
2018-05-29 17:35:23 +00:00
|
|
|
int err = cb(lfs, data, (lfs_mattr_t){
|
2018-07-09 19:13:31 +00:00
|
|
|
lfs_mktag(LFS_TYPE_DELETE, lfs->globals.move.id, 0)});
|
Introduced xored-globals logic to fix fundamental problem with moves
This was a big roadblock for a while: with the new feature of inlined
files, the existing move logic was fundamentally flawed.
To pull off atomic moves between two different metadata-pairs, littlefs
uses a simple, if a bit clumsy trick.
1. Marks entry as "moving"
2. Copies entry to new metadata-pair
3. Deletes old entry
If power is lost before the move operation is completed, we will find the
"moving" tag. This means there may or may not be an incomplete move on
the filesystem. In this case, we simply search for the moved entry, if
we find it, we remove the old entry, otherwise we just remove the
"moving" tag.
This worked perfectly, until we introduced inlined files. See, unlike
the existing directory and ctz entries, inlined files have no guarantee
they are unique. There is nothing we can search for that will allow us
to find a moved file unless we assign entries globally-unique ids. (note
that moves are fundamentally rename operations, so searching for names
does not make sense).
---
Solving this problem required completely restructuring how littlefs
handled moves and pulled out a really old idea that had been left in the
cutting room floor back when littlefs was going through many
designs: xored-globals.
The problem xored-globals solves is the need to maintain some global state
via commits to these distributed, independent metadata-pairs. The idea
is that we can use some sort of symmetric operation, such as xor, to
introduces deltas of the global state that can be committed atomically
along with any other info to these metadata-pairs.
This means that to figure out our global state, we xor together the global
delta stored in every metadata-pair.
Which means any commit can update the global state atomically, opening
up a whole new set atomic possibilities.
There is a couple of downsides. These globals may end up with deltas on
every single metadata-pair, effectively duplicating the data for each
block. Additionally, these globals need to have multiple copies in RAM.
This means and globals need to be a bounded size and very small, since even
small globals will have a large footprint.
---
On top of xored-globals, it's trivial to fix our move logic. Here we've
added an indirect delete tag which allows us to atomically specify a
delete of any entry on the filesystem.
Our move operation is now:
1. Copy entry to new metadata-pair and atomically xor globals to
indirectly delete our original entry.
2. Delete the original entry and xor globals to remove the indirect
delete.
Extra exciting is that this now takes our relatively clumsy move
operation into a sexy guaranteed O(1) move operation with no searching
necessary (though we do need to xor globals during mount).
Also reintroduced entry struct, now with a specific purpose to describe
the metadata-pair + id combo needed by indirect deletes to locate an
entry.
2018-05-29 17:35:23 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-27 15:15:28 +00:00
|
|
|
while (off != sizeof(uint32_t)) {
|
|
|
|
// TODO rm me
|
|
|
|
//printf("tag r %#010x (%x:%x %03x %03x %03x)\n", tag, block, off-lfs_tag_size(tag), lfs_tag_type(tag), lfs_tag_id(tag), lfs_tag_size(tag));
|
|
|
|
|
|
|
|
// TODO hmm
|
Introduced xored-globals logic to fix fundamental problem with moves
This was a big roadblock for a while: with the new feature of inlined
files, the existing move logic was fundamentally flawed.
To pull off atomic moves between two different metadata-pairs, littlefs
uses a simple, if a bit clumsy trick.
1. Marks entry as "moving"
2. Copies entry to new metadata-pair
3. Deletes old entry
If power is lost before the move operation is completed, we will find the
"moving" tag. This means there may or may not be an incomplete move on
the filesystem. In this case, we simply search for the moved entry, if
we find it, we remove the old entry, otherwise we just remove the
"moving" tag.
This worked perfectly, until we introduced inlined files. See, unlike
the existing directory and ctz entries, inlined files have no guarantee
they are unique. There is nothing we can search for that will allow us
to find a moved file unless we assign entries globally-unique ids. (note
that moves are fundamentally rename operations, so searching for names
does not make sense).
---
Solving this problem required completely restructuring how littlefs
handled moves and pulled out a really old idea that had been left in the
cutting room floor back when littlefs was going through many
designs: xored-globals.
The problem xored-globals solves is the need to maintain some global state
via commits to these distributed, independent metadata-pairs. The idea
is that we can use some sort of symmetric operation, such as xor, to
introduces deltas of the global state that can be committed atomically
along with any other info to these metadata-pairs.
This means that to figure out our global state, we xor together the global
delta stored in every metadata-pair.
Which means any commit can update the global state atomically, opening
up a whole new set atomic possibilities.
There is a couple of downsides. These globals may end up with deltas on
every single metadata-pair, effectively duplicating the data for each
block. Additionally, these globals need to have multiple copies in RAM.
This means and globals need to be a bounded size and very small, since even
small globals will have a large footprint.
---
On top of xored-globals, it's trivial to fix our move logic. Here we've
added an indirect delete tag which allows us to atomically specify a
delete of any entry on the filesystem.
Our move operation is now:
1. Copy entry to new metadata-pair and atomically xor globals to
indirectly delete our original entry.
2. Delete the original entry and xor globals to remove the indirect
delete.
Extra exciting is that this now takes our relatively clumsy move
operation into a sexy guaranteed O(1) move operation with no searching
necessary (though we do need to xor globals during mount).
Also reintroduced entry struct, now with a specific purpose to describe
the metadata-pair + id combo needed by indirect deletes to locate an
entry.
2018-05-29 17:35:23 +00:00
|
|
|
if (lfs_tag_type(tag) == LFS_TYPE_CRC) {
|
|
|
|
if (dir->stop_at_commit) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
int err = cb(lfs, data, (lfs_mattr_t){
|
|
|
|
(0x80000000 | tag),
|
|
|
|
.u.d.block=block,
|
|
|
|
.u.d.off=off-lfs_tag_size(tag)});
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
2018-05-27 15:15:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
LFS_ASSERT(off > sizeof(tag)+lfs_tag_size(tag));
|
|
|
|
off -= sizeof(tag)+lfs_tag_size(tag);
|
|
|
|
|
|
|
|
lfs_tag_t ntag;
|
Introduced xored-globals logic to fix fundamental problem with moves
This was a big roadblock for a while: with the new feature of inlined
files, the existing move logic was fundamentally flawed.
To pull off atomic moves between two different metadata-pairs, littlefs
uses a simple, if a bit clumsy trick.
1. Marks entry as "moving"
2. Copies entry to new metadata-pair
3. Deletes old entry
If power is lost before the move operation is completed, we will find the
"moving" tag. This means there may or may not be an incomplete move on
the filesystem. In this case, we simply search for the moved entry, if
we find it, we remove the old entry, otherwise we just remove the
"moving" tag.
This worked perfectly, until we introduced inlined files. See, unlike
the existing directory and ctz entries, inlined files have no guarantee
they are unique. There is nothing we can search for that will allow us
to find a moved file unless we assign entries globally-unique ids. (note
that moves are fundamentally rename operations, so searching for names
does not make sense).
---
Solving this problem required completely restructuring how littlefs
handled moves and pulled out a really old idea that had been left in the
cutting room floor back when littlefs was going through many
designs: xored-globals.
The problem xored-globals solves is the need to maintain some global state
via commits to these distributed, independent metadata-pairs. The idea
is that we can use some sort of symmetric operation, such as xor, to
introduces deltas of the global state that can be committed atomically
along with any other info to these metadata-pairs.
This means that to figure out our global state, we xor together the global
delta stored in every metadata-pair.
Which means any commit can update the global state atomically, opening
up a whole new set atomic possibilities.
There is a couple of downsides. These globals may end up with deltas on
every single metadata-pair, effectively duplicating the data for each
block. Additionally, these globals need to have multiple copies in RAM.
This means and globals need to be a bounded size and very small, since even
small globals will have a large footprint.
---
On top of xored-globals, it's trivial to fix our move logic. Here we've
added an indirect delete tag which allows us to atomically specify a
delete of any entry on the filesystem.
Our move operation is now:
1. Copy entry to new metadata-pair and atomically xor globals to
indirectly delete our original entry.
2. Delete the original entry and xor globals to remove the indirect
delete.
Extra exciting is that this now takes our relatively clumsy move
operation into a sexy guaranteed O(1) move operation with no searching
necessary (though we do need to xor globals during mount).
Also reintroduced entry struct, now with a specific purpose to describe
the metadata-pair + id combo needed by indirect deletes to locate an
entry.
2018-05-29 17:35:23 +00:00
|
|
|
int err = lfs_bd_read(lfs, block, off, &ntag, sizeof(ntag));
|
2018-05-27 15:15:28 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
tag ^= lfs_fromle32(ntag);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2018-05-19 23:25:47 +00:00
|
|
|
}
|
|
|
|
|
2018-05-29 06:11:26 +00:00
|
|
|
static int lfs_dir_compact(lfs_t *lfs, lfs_mdir_t *dir, lfs_mattrlist_t *list,
|
|
|
|
lfs_mdir_t *source, uint16_t begin, uint16_t end) {
|
2018-05-28 07:08:16 +00:00
|
|
|
// save some state in case block is bad
|
|
|
|
const lfs_block_t oldpair[2] = {dir->pair[1], dir->pair[0]};
|
|
|
|
bool relocated = false;
|
|
|
|
|
2018-07-04 06:35:04 +00:00
|
|
|
// There's nothing special about our global delta, so feed it back
|
|
|
|
// into the global global delta
|
2018-07-08 19:21:29 +00:00
|
|
|
// TODO IMMENSE HMM globals get bleed into from above, need to be fixed after commits due to potential moves
|
|
|
|
lfs_globals_t gtemp = dir->globals; // TODO hmm, why did we have different variables then?
|
|
|
|
|
2018-07-04 06:35:04 +00:00
|
|
|
lfs->diff = lfs_globals_xor(&lfs->diff, &dir->globals);
|
|
|
|
dir->globals = (lfs_globals_t){0};
|
|
|
|
|
2018-05-28 07:08:16 +00:00
|
|
|
// increment revision count
|
|
|
|
dir->rev += 1;
|
|
|
|
|
|
|
|
while (true) {
|
|
|
|
// last complete id
|
|
|
|
int16_t ack = -1;
|
|
|
|
dir->count = end - begin;
|
|
|
|
|
|
|
|
if (true) {
|
|
|
|
// erase block to write to
|
|
|
|
int err = lfs_bd_erase(lfs, dir->pair[1]);
|
|
|
|
if (err) {
|
|
|
|
if (err == LFS_ERR_CORRUPT) {
|
|
|
|
goto relocate;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
// write out header
|
|
|
|
uint32_t crc = 0xffffffff;
|
|
|
|
uint32_t rev = lfs_tole32(dir->rev);
|
|
|
|
lfs_crc(&crc, &rev, sizeof(rev));
|
|
|
|
err = lfs_bd_prog(lfs, dir->pair[1], 0, &rev, sizeof(rev));
|
|
|
|
if (err) {
|
|
|
|
if (err == LFS_ERR_CORRUPT) {
|
|
|
|
goto relocate;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
// setup compaction
|
|
|
|
struct lfs_commit commit = {
|
|
|
|
.block = dir->pair[1],
|
|
|
|
.off = sizeof(dir->rev),
|
|
|
|
|
Introduced xored-globals logic to fix fundamental problem with moves
This was a big roadblock for a while: with the new feature of inlined
files, the existing move logic was fundamentally flawed.
To pull off atomic moves between two different metadata-pairs, littlefs
uses a simple, if a bit clumsy trick.
1. Marks entry as "moving"
2. Copies entry to new metadata-pair
3. Deletes old entry
If power is lost before the move operation is completed, we will find the
"moving" tag. This means there may or may not be an incomplete move on
the filesystem. In this case, we simply search for the moved entry, if
we find it, we remove the old entry, otherwise we just remove the
"moving" tag.
This worked perfectly, until we introduced inlined files. See, unlike
the existing directory and ctz entries, inlined files have no guarantee
they are unique. There is nothing we can search for that will allow us
to find a moved file unless we assign entries globally-unique ids. (note
that moves are fundamentally rename operations, so searching for names
does not make sense).
---
Solving this problem required completely restructuring how littlefs
handled moves and pulled out a really old idea that had been left in the
cutting room floor back when littlefs was going through many
designs: xored-globals.
The problem xored-globals solves is the need to maintain some global state
via commits to these distributed, independent metadata-pairs. The idea
is that we can use some sort of symmetric operation, such as xor, to
introduces deltas of the global state that can be committed atomically
along with any other info to these metadata-pairs.
This means that to figure out our global state, we xor together the global
delta stored in every metadata-pair.
Which means any commit can update the global state atomically, opening
up a whole new set atomic possibilities.
There is a couple of downsides. These globals may end up with deltas on
every single metadata-pair, effectively duplicating the data for each
block. Additionally, these globals need to have multiple copies in RAM.
This means and globals need to be a bounded size and very small, since even
small globals will have a large footprint.
---
On top of xored-globals, it's trivial to fix our move logic. Here we've
added an indirect delete tag which allows us to atomically specify a
delete of any entry on the filesystem.
Our move operation is now:
1. Copy entry to new metadata-pair and atomically xor globals to
indirectly delete our original entry.
2. Delete the original entry and xor globals to remove the indirect
delete.
Extra exciting is that this now takes our relatively clumsy move
operation into a sexy guaranteed O(1) move operation with no searching
necessary (though we do need to xor globals during mount).
Also reintroduced entry struct, now with a specific purpose to describe
the metadata-pair + id combo needed by indirect deletes to locate an
entry.
2018-05-29 17:35:23 +00:00
|
|
|
// space is complicated, we need room for tail, crc, idelete,
|
2018-05-28 07:08:16 +00:00
|
|
|
// and we keep cap at around half a block
|
|
|
|
.begin = 0,
|
|
|
|
.end = lfs_min(
|
|
|
|
lfs_alignup(lfs->cfg->block_size / 2,
|
|
|
|
lfs->cfg->prog_size),
|
|
|
|
lfs->cfg->block_size - 5*sizeof(uint32_t)),
|
|
|
|
.crc = crc,
|
|
|
|
.ptag = 0,
|
|
|
|
|
|
|
|
// filter out ids
|
|
|
|
.filter.begin = begin,
|
|
|
|
.filter.end = end,
|
|
|
|
};
|
|
|
|
|
2018-07-09 16:47:04 +00:00
|
|
|
if (!relocated) {
|
|
|
|
err = lfs_commit_globals(lfs, &commit,
|
|
|
|
&dir->globals, &lfs->diff);
|
2018-05-28 07:08:16 +00:00
|
|
|
if (err) {
|
|
|
|
if (err == LFS_ERR_NOSPC) {
|
|
|
|
goto split;
|
|
|
|
} else if (err == LFS_ERR_CORRUPT) {
|
|
|
|
goto relocate;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-09 16:47:04 +00:00
|
|
|
// commit with a move
|
|
|
|
for (uint16_t id = begin; id < end; id++) {
|
|
|
|
err = lfs_commit_move(lfs, &commit, id, id, source, list);
|
2018-05-28 07:08:16 +00:00
|
|
|
if (err) {
|
2018-07-02 03:29:42 +00:00
|
|
|
if (err == LFS_ERR_NOSPC) {
|
|
|
|
goto split;
|
|
|
|
} else if (err == LFS_ERR_CORRUPT) {
|
2018-05-28 07:08:16 +00:00
|
|
|
goto relocate;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
2018-07-09 16:47:04 +00:00
|
|
|
|
|
|
|
ack = id;
|
2018-05-28 07:08:16 +00:00
|
|
|
}
|
|
|
|
|
2018-07-09 16:47:04 +00:00
|
|
|
// reopen reserved space at the end
|
|
|
|
commit.end = lfs->cfg->block_size - 2*sizeof(uint32_t);
|
|
|
|
|
2018-07-02 03:29:42 +00:00
|
|
|
if (!lfs_pairisnull(dir->tail)) {
|
2018-07-09 16:47:04 +00:00
|
|
|
// commit tail, which may be new after last size check
|
Introduced xored-globals logic to fix fundamental problem with moves
This was a big roadblock for a while: with the new feature of inlined
files, the existing move logic was fundamentally flawed.
To pull off atomic moves between two different metadata-pairs, littlefs
uses a simple, if a bit clumsy trick.
1. Marks entry as "moving"
2. Copies entry to new metadata-pair
3. Deletes old entry
If power is lost before the move operation is completed, we will find the
"moving" tag. This means there may or may not be an incomplete move on
the filesystem. In this case, we simply search for the moved entry, if
we find it, we remove the old entry, otherwise we just remove the
"moving" tag.
This worked perfectly, until we introduced inlined files. See, unlike
the existing directory and ctz entries, inlined files have no guarantee
they are unique. There is nothing we can search for that will allow us
to find a moved file unless we assign entries globally-unique ids. (note
that moves are fundamentally rename operations, so searching for names
does not make sense).
---
Solving this problem required completely restructuring how littlefs
handled moves and pulled out a really old idea that had been left in the
cutting room floor back when littlefs was going through many
designs: xored-globals.
The problem xored-globals solves is the need to maintain some global state
via commits to these distributed, independent metadata-pairs. The idea
is that we can use some sort of symmetric operation, such as xor, to
introduces deltas of the global state that can be committed atomically
along with any other info to these metadata-pairs.
This means that to figure out our global state, we xor together the global
delta stored in every metadata-pair.
Which means any commit can update the global state atomically, opening
up a whole new set atomic possibilities.
There is a couple of downsides. These globals may end up with deltas on
every single metadata-pair, effectively duplicating the data for each
block. Additionally, these globals need to have multiple copies in RAM.
This means and globals need to be a bounded size and very small, since even
small globals will have a large footprint.
---
On top of xored-globals, it's trivial to fix our move logic. Here we've
added an indirect delete tag which allows us to atomically specify a
delete of any entry on the filesystem.
Our move operation is now:
1. Copy entry to new metadata-pair and atomically xor globals to
indirectly delete our original entry.
2. Delete the original entry and xor globals to remove the indirect
delete.
Extra exciting is that this now takes our relatively clumsy move
operation into a sexy guaranteed O(1) move operation with no searching
necessary (though we do need to xor globals during mount).
Also reintroduced entry struct, now with a specific purpose to describe
the metadata-pair + id combo needed by indirect deletes to locate an
entry.
2018-05-29 17:35:23 +00:00
|
|
|
// TODO le32
|
|
|
|
err = lfs_commit_commit(lfs, &commit, (lfs_mattr_t){
|
2018-07-09 19:13:31 +00:00
|
|
|
lfs_mktag(LFS_TYPE_TAIL + dir->split,
|
2018-07-02 03:29:42 +00:00
|
|
|
0x3ff, sizeof(dir->tail)),
|
|
|
|
.u.buffer=dir->tail});
|
Introduced xored-globals logic to fix fundamental problem with moves
This was a big roadblock for a while: with the new feature of inlined
files, the existing move logic was fundamentally flawed.
To pull off atomic moves between two different metadata-pairs, littlefs
uses a simple, if a bit clumsy trick.
1. Marks entry as "moving"
2. Copies entry to new metadata-pair
3. Deletes old entry
If power is lost before the move operation is completed, we will find the
"moving" tag. This means there may or may not be an incomplete move on
the filesystem. In this case, we simply search for the moved entry, if
we find it, we remove the old entry, otherwise we just remove the
"moving" tag.
This worked perfectly, until we introduced inlined files. See, unlike
the existing directory and ctz entries, inlined files have no guarantee
they are unique. There is nothing we can search for that will allow us
to find a moved file unless we assign entries globally-unique ids. (note
that moves are fundamentally rename operations, so searching for names
does not make sense).
---
Solving this problem required completely restructuring how littlefs
handled moves and pulled out a really old idea that had been left in the
cutting room floor back when littlefs was going through many
designs: xored-globals.
The problem xored-globals solves is the need to maintain some global state
via commits to these distributed, independent metadata-pairs. The idea
is that we can use some sort of symmetric operation, such as xor, to
introduces deltas of the global state that can be committed atomically
along with any other info to these metadata-pairs.
This means that to figure out our global state, we xor together the global
delta stored in every metadata-pair.
Which means any commit can update the global state atomically, opening
up a whole new set atomic possibilities.
There is a couple of downsides. These globals may end up with deltas on
every single metadata-pair, effectively duplicating the data for each
block. Additionally, these globals need to have multiple copies in RAM.
This means and globals need to be a bounded size and very small, since even
small globals will have a large footprint.
---
On top of xored-globals, it's trivial to fix our move logic. Here we've
added an indirect delete tag which allows us to atomically specify a
delete of any entry on the filesystem.
Our move operation is now:
1. Copy entry to new metadata-pair and atomically xor globals to
indirectly delete our original entry.
2. Delete the original entry and xor globals to remove the indirect
delete.
Extra exciting is that this now takes our relatively clumsy move
operation into a sexy guaranteed O(1) move operation with no searching
necessary (though we do need to xor globals during mount).
Also reintroduced entry struct, now with a specific purpose to describe
the metadata-pair + id combo needed by indirect deletes to locate an
entry.
2018-05-29 17:35:23 +00:00
|
|
|
if (err) {
|
|
|
|
if (err == LFS_ERR_CORRUPT) {
|
|
|
|
goto relocate;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-28 07:08:16 +00:00
|
|
|
err = lfs_commit_crc(lfs, &commit);
|
|
|
|
if (err) {
|
|
|
|
if (err == LFS_ERR_CORRUPT) {
|
|
|
|
goto relocate;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
// successful compaction, swap dir pair to indicate most recent
|
|
|
|
lfs_pairswap(dir->pair);
|
|
|
|
dir->off = commit.off;
|
|
|
|
dir->etag = commit.ptag;
|
|
|
|
dir->erased = true;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
split:
|
|
|
|
// commit no longer fits, need to split dir,
|
|
|
|
// drop caches and create tail
|
|
|
|
lfs->pcache.block = 0xffffffff;
|
|
|
|
|
2018-05-29 06:11:26 +00:00
|
|
|
lfs_mdir_t tail;
|
2018-05-28 07:08:16 +00:00
|
|
|
int err = lfs_dir_alloc(lfs, &tail, dir->split, dir->tail);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-05-29 00:49:20 +00:00
|
|
|
err = lfs_dir_compact(lfs, &tail, list, dir, ack+1, end);
|
2018-05-28 07:08:16 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
end = ack+1;
|
|
|
|
dir->tail[0] = tail.pair[0];
|
|
|
|
dir->tail[1] = tail.pair[1];
|
|
|
|
dir->split = true;
|
|
|
|
continue;
|
|
|
|
|
|
|
|
relocate:
|
|
|
|
//commit was corrupted
|
|
|
|
LFS_DEBUG("Bad block at %d", dir->pair[1]);
|
|
|
|
|
|
|
|
// drop caches and prepare to relocate block
|
|
|
|
relocated = true;
|
|
|
|
lfs->pcache.block = 0xffffffff;
|
|
|
|
|
|
|
|
// can't relocate superblock, filesystem is now frozen
|
|
|
|
if (lfs_paircmp(oldpair, (const lfs_block_t[2]){0, 1}) == 0) {
|
|
|
|
LFS_WARN("Superblock %d has become unwritable", oldpair[1]);
|
|
|
|
return LFS_ERR_CORRUPT;
|
|
|
|
}
|
|
|
|
|
|
|
|
// relocate half of pair
|
|
|
|
err = lfs_alloc(lfs, &dir->pair[1]);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (relocated) {
|
|
|
|
// update references if we relocated
|
|
|
|
LFS_DEBUG("Relocating %d %d to %d %d",
|
|
|
|
oldpair[0], oldpair[1], dir->pair[0], dir->pair[1]);
|
|
|
|
int err = lfs_relocate(lfs, oldpair, dir->pair);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
2018-07-02 03:29:42 +00:00
|
|
|
} else {
|
|
|
|
lfs->globals = lfs_globals_xor(&lfs->globals, &lfs->diff);
|
|
|
|
lfs->diff = (lfs_globals_t){0};
|
2018-05-28 07:08:16 +00:00
|
|
|
}
|
|
|
|
|
2018-07-08 19:21:29 +00:00
|
|
|
lfs->globals = lfs_globals_xor(&lfs->globals, >emp); // TODO hmm, why did we have different variables then?
|
|
|
|
|
2018-05-28 07:08:16 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-29 06:11:26 +00:00
|
|
|
static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir, lfs_mattrlist_t *list) {
|
2018-05-29 05:50:47 +00:00
|
|
|
while (true) {
|
|
|
|
if (!dir->erased) {
|
|
|
|
// not erased, must compact
|
|
|
|
goto compact;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct lfs_commit commit = {
|
|
|
|
.block = dir->pair[0],
|
|
|
|
.begin = dir->off,
|
|
|
|
.off = dir->off,
|
|
|
|
.end = lfs->cfg->block_size - 2*sizeof(uint32_t),
|
|
|
|
.crc = 0xffffffff,
|
|
|
|
.ptag = dir->etag,
|
|
|
|
.filter.begin = 0,
|
2018-05-30 01:08:42 +00:00
|
|
|
.filter.end = 0x3ff,
|
2018-05-29 05:50:47 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
int err = lfs_commit_list(lfs, &commit, list);
|
|
|
|
if (err) {
|
|
|
|
if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) {
|
|
|
|
goto compact;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
2018-05-28 07:08:16 +00:00
|
|
|
|
2018-07-04 06:35:04 +00:00
|
|
|
err = lfs_commit_globals(lfs, &commit, &dir->globals, &lfs->diff);
|
|
|
|
if (err) {
|
|
|
|
if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) {
|
|
|
|
goto compact;
|
2018-07-02 03:29:42 +00:00
|
|
|
}
|
2018-07-04 06:35:04 +00:00
|
|
|
return err;
|
2018-07-02 03:29:42 +00:00
|
|
|
}
|
|
|
|
|
2018-05-29 05:50:47 +00:00
|
|
|
err = lfs_commit_crc(lfs, &commit);
|
|
|
|
if (err) {
|
|
|
|
if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) {
|
|
|
|
goto compact;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
2018-05-28 07:08:16 +00:00
|
|
|
|
2018-05-29 05:50:47 +00:00
|
|
|
// successful commit, lets update dir
|
|
|
|
dir->off = commit.off;
|
|
|
|
dir->etag = commit.ptag;
|
2018-07-08 19:21:29 +00:00
|
|
|
// // TODO hm
|
|
|
|
// dir->globals = lfs_globals_xor(&dir->globals, &lfs->diff);
|
2018-07-02 03:29:42 +00:00
|
|
|
lfs->globals = lfs_globals_xor(&lfs->globals, &lfs->diff);
|
|
|
|
lfs->diff = (lfs_globals_t){0};
|
2018-05-29 05:50:47 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
compact:
|
|
|
|
lfs->pcache.block = 0xffffffff;
|
|
|
|
err = lfs_dir_compact(lfs, dir, list, dir, 0, dir->count);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
2018-05-19 23:25:47 +00:00
|
|
|
}
|
2018-05-29 05:50:47 +00:00
|
|
|
break;
|
2018-05-19 23:25:47 +00:00
|
|
|
}
|
|
|
|
|
2018-05-29 05:50:47 +00:00
|
|
|
// update any directories that are affected
|
2018-07-04 06:35:04 +00:00
|
|
|
// TODO what about pairs? what if we're splitting??
|
2018-05-29 05:50:47 +00:00
|
|
|
for (lfs_dir_t *d = lfs->dirs; d; d = d->next) {
|
2018-05-29 06:11:26 +00:00
|
|
|
if (lfs_paircmp(d->m.pair, dir->pair) == 0) {
|
|
|
|
d->m = *dir;
|
2018-05-19 23:25:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-09 16:47:04 +00:00
|
|
|
// TODO what if we relocated the block containing the move?
|
2018-05-19 23:25:47 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-29 06:11:26 +00:00
|
|
|
static int lfs_dir_append(lfs_t *lfs, lfs_mdir_t *dir, uint16_t *id) {
|
2018-05-21 05:56:20 +00:00
|
|
|
*id = dir->count;
|
|
|
|
dir->count += 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-29 06:11:26 +00:00
|
|
|
static int lfs_dir_delete(lfs_t *lfs, lfs_mdir_t *dir, uint16_t id) {
|
2018-05-21 05:56:20 +00:00
|
|
|
dir->count -= 1;
|
2018-05-28 14:17:44 +00:00
|
|
|
|
|
|
|
// check if we should drop the directory block
|
|
|
|
if (dir->count == 0) {
|
2018-05-29 06:11:26 +00:00
|
|
|
lfs_mdir_t pdir;
|
2018-05-28 14:17:44 +00:00
|
|
|
int res = lfs_pred(lfs, dir->pair, &pdir);
|
|
|
|
if (res < 0) {
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (res && pdir.split) {
|
2018-07-02 03:29:42 +00:00
|
|
|
// steal tail, and global state
|
2018-05-28 14:17:44 +00:00
|
|
|
pdir.split = dir->split;
|
|
|
|
pdir.tail[0] = dir->tail[0];
|
|
|
|
pdir.tail[1] = dir->tail[1];
|
2018-07-02 03:29:42 +00:00
|
|
|
lfs->diff = dir->globals;
|
|
|
|
lfs->globals = lfs_globals_xor(&lfs->globals, &dir->globals);
|
|
|
|
|
2018-05-29 05:50:47 +00:00
|
|
|
int err = lfs_dir_commit(lfs, &pdir, &(lfs_mattrlist_t){
|
2018-07-09 19:13:31 +00:00
|
|
|
{lfs_mktag(LFS_TYPE_TAIL + pdir.split,
|
2018-05-30 01:08:42 +00:00
|
|
|
0x3ff, sizeof(pdir.tail)),
|
2018-05-28 14:17:44 +00:00
|
|
|
.u.buffer=pdir.tail}});
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-29 05:50:47 +00:00
|
|
|
int err = lfs_dir_commit(lfs, dir, &(lfs_mattrlist_t){
|
2018-07-09 19:13:31 +00:00
|
|
|
{lfs_mktag(LFS_TYPE_DELETE, id, 0)}});
|
2018-05-28 14:17:44 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
Introduced xored-globals logic to fix fundamental problem with moves
This was a big roadblock for a while: with the new feature of inlined
files, the existing move logic was fundamentally flawed.
To pull off atomic moves between two different metadata-pairs, littlefs
uses a simple, if a bit clumsy trick.
1. Marks entry as "moving"
2. Copies entry to new metadata-pair
3. Deletes old entry
If power is lost before the move operation is completed, we will find the
"moving" tag. This means there may or may not be an incomplete move on
the filesystem. In this case, we simply search for the moved entry, if
we find it, we remove the old entry, otherwise we just remove the
"moving" tag.
This worked perfectly, until we introduced inlined files. See, unlike
the existing directory and ctz entries, inlined files have no guarantee
they are unique. There is nothing we can search for that will allow us
to find a moved file unless we assign entries globally-unique ids. (note
that moves are fundamentally rename operations, so searching for names
does not make sense).
---
Solving this problem required completely restructuring how littlefs
handled moves and pulled out a really old idea that had been left in the
cutting room floor back when littlefs was going through many
designs: xored-globals.
The problem xored-globals solves is the need to maintain some global state
via commits to these distributed, independent metadata-pairs. The idea
is that we can use some sort of symmetric operation, such as xor, to
introduces deltas of the global state that can be committed atomically
along with any other info to these metadata-pairs.
This means that to figure out our global state, we xor together the global
delta stored in every metadata-pair.
Which means any commit can update the global state atomically, opening
up a whole new set atomic possibilities.
There is a couple of downsides. These globals may end up with deltas on
every single metadata-pair, effectively duplicating the data for each
block. Additionally, these globals need to have multiple copies in RAM.
This means and globals need to be a bounded size and very small, since even
small globals will have a large footprint.
---
On top of xored-globals, it's trivial to fix our move logic. Here we've
added an indirect delete tag which allows us to atomically specify a
delete of any entry on the filesystem.
Our move operation is now:
1. Copy entry to new metadata-pair and atomically xor globals to
indirectly delete our original entry.
2. Delete the original entry and xor globals to remove the indirect
delete.
Extra exciting is that this now takes our relatively clumsy move
operation into a sexy guaranteed O(1) move operation with no searching
necessary (though we do need to xor globals during mount).
Also reintroduced entry struct, now with a specific purpose to describe
the metadata-pair + id combo needed by indirect deletes to locate an
entry.
2018-05-29 17:35:23 +00:00
|
|
|
// shift over any dirs/files that are affected
|
2018-05-29 05:50:47 +00:00
|
|
|
for (lfs_dir_t *d = lfs->dirs; d; d = d->next) {
|
2018-05-29 06:11:26 +00:00
|
|
|
if (lfs_paircmp(d->m.pair, dir->pair) == 0) {
|
2018-05-29 05:50:47 +00:00
|
|
|
if (d->id > id) {
|
|
|
|
d->id -= 1;
|
|
|
|
d->pos -= 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-28 14:17:44 +00:00
|
|
|
for (lfs_file_t *f = lfs->files; f; f = f->next) {
|
|
|
|
if (lfs_paircmp(f->pair, dir->pair) == 0) {
|
|
|
|
if (f->id == id) {
|
|
|
|
f->pair[0] = 0xffffffff;
|
|
|
|
f->pair[1] = 0xffffffff;
|
|
|
|
} else if (f->id > id) {
|
|
|
|
f->id -= 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-29 00:49:20 +00:00
|
|
|
return 0;
|
2018-05-21 05:56:20 +00:00
|
|
|
}
|
|
|
|
|
Introduced xored-globals logic to fix fundamental problem with moves
This was a big roadblock for a while: with the new feature of inlined
files, the existing move logic was fundamentally flawed.
To pull off atomic moves between two different metadata-pairs, littlefs
uses a simple, if a bit clumsy trick.
1. Marks entry as "moving"
2. Copies entry to new metadata-pair
3. Deletes old entry
If power is lost before the move operation is completed, we will find the
"moving" tag. This means there may or may not be an incomplete move on
the filesystem. In this case, we simply search for the moved entry, if
we find it, we remove the old entry, otherwise we just remove the
"moving" tag.
This worked perfectly, until we introduced inlined files. See, unlike
the existing directory and ctz entries, inlined files have no guarantee
they are unique. There is nothing we can search for that will allow us
to find a moved file unless we assign entries globally-unique ids. (note
that moves are fundamentally rename operations, so searching for names
does not make sense).
---
Solving this problem required completely restructuring how littlefs
handled moves and pulled out a really old idea that had been left in the
cutting room floor back when littlefs was going through many
designs: xored-globals.
The problem xored-globals solves is the need to maintain some global state
via commits to these distributed, independent metadata-pairs. The idea
is that we can use some sort of symmetric operation, such as xor, to
introduces deltas of the global state that can be committed atomically
along with any other info to these metadata-pairs.
This means that to figure out our global state, we xor together the global
delta stored in every metadata-pair.
Which means any commit can update the global state atomically, opening
up a whole new set atomic possibilities.
There is a couple of downsides. These globals may end up with deltas on
every single metadata-pair, effectively duplicating the data for each
block. Additionally, these globals need to have multiple copies in RAM.
This means and globals need to be a bounded size and very small, since even
small globals will have a large footprint.
---
On top of xored-globals, it's trivial to fix our move logic. Here we've
added an indirect delete tag which allows us to atomically specify a
delete of any entry on the filesystem.
Our move operation is now:
1. Copy entry to new metadata-pair and atomically xor globals to
indirectly delete our original entry.
2. Delete the original entry and xor globals to remove the indirect
delete.
Extra exciting is that this now takes our relatively clumsy move
operation into a sexy guaranteed O(1) move operation with no searching
necessary (though we do need to xor globals during mount).
Also reintroduced entry struct, now with a specific purpose to describe
the metadata-pair + id combo needed by indirect deletes to locate an
entry.
2018-05-29 17:35:23 +00:00
|
|
|
struct lfs_dir_get {
|
2018-05-22 22:43:39 +00:00
|
|
|
uint32_t mask;
|
|
|
|
lfs_tag_t tag;
|
2018-05-29 05:50:47 +00:00
|
|
|
lfs_mattr_t *attr;
|
2018-05-22 22:43:39 +00:00
|
|
|
};
|
|
|
|
|
Introduced xored-globals logic to fix fundamental problem with moves
This was a big roadblock for a while: with the new feature of inlined
files, the existing move logic was fundamentally flawed.
To pull off atomic moves between two different metadata-pairs, littlefs
uses a simple, if a bit clumsy trick.
1. Marks entry as "moving"
2. Copies entry to new metadata-pair
3. Deletes old entry
If power is lost before the move operation is completed, we will find the
"moving" tag. This means there may or may not be an incomplete move on
the filesystem. In this case, we simply search for the moved entry, if
we find it, we remove the old entry, otherwise we just remove the
"moving" tag.
This worked perfectly, until we introduced inlined files. See, unlike
the existing directory and ctz entries, inlined files have no guarantee
they are unique. There is nothing we can search for that will allow us
to find a moved file unless we assign entries globally-unique ids. (note
that moves are fundamentally rename operations, so searching for names
does not make sense).
---
Solving this problem required completely restructuring how littlefs
handled moves and pulled out a really old idea that had been left in the
cutting room floor back when littlefs was going through many
designs: xored-globals.
The problem xored-globals solves is the need to maintain some global state
via commits to these distributed, independent metadata-pairs. The idea
is that we can use some sort of symmetric operation, such as xor, to
introduces deltas of the global state that can be committed atomically
along with any other info to these metadata-pairs.
This means that to figure out our global state, we xor together the global
delta stored in every metadata-pair.
Which means any commit can update the global state atomically, opening
up a whole new set atomic possibilities.
There is a couple of downsides. These globals may end up with deltas on
every single metadata-pair, effectively duplicating the data for each
block. Additionally, these globals need to have multiple copies in RAM.
This means and globals need to be a bounded size and very small, since even
small globals will have a large footprint.
---
On top of xored-globals, it's trivial to fix our move logic. Here we've
added an indirect delete tag which allows us to atomically specify a
delete of any entry on the filesystem.
Our move operation is now:
1. Copy entry to new metadata-pair and atomically xor globals to
indirectly delete our original entry.
2. Delete the original entry and xor globals to remove the indirect
delete.
Extra exciting is that this now takes our relatively clumsy move
operation into a sexy guaranteed O(1) move operation with no searching
necessary (though we do need to xor globals during mount).
Also reintroduced entry struct, now with a specific purpose to describe
the metadata-pair + id combo needed by indirect deletes to locate an
entry.
2018-05-29 17:35:23 +00:00
|
|
|
static int lfs_dir_getscan(lfs_t *lfs, void *p, lfs_mattr_t attr) {
|
|
|
|
struct lfs_dir_get *get = p;
|
2018-05-27 15:15:28 +00:00
|
|
|
|
2018-05-29 05:50:47 +00:00
|
|
|
if ((attr.tag & get->mask) == (get->tag & get->mask)) {
|
|
|
|
*get->attr = attr;
|
2018-05-22 22:43:39 +00:00
|
|
|
return true;
|
2018-07-09 19:13:31 +00:00
|
|
|
} else if (lfs_tag_type(attr.tag) == LFS_TYPE_DELETE) {
|
2018-05-29 05:50:47 +00:00
|
|
|
if (lfs_tag_id(attr.tag) <= lfs_tag_id(get->tag)) {
|
2018-05-27 15:15:28 +00:00
|
|
|
get->tag += lfs_mktag(0, 1, 0);
|
|
|
|
}
|
2018-05-22 22:43:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-05-29 06:11:26 +00:00
|
|
|
static int lfs_dir_get(lfs_t *lfs, lfs_mdir_t *dir,
|
2018-05-29 05:50:47 +00:00
|
|
|
uint32_t mask, lfs_mattr_t *attr) {
|
|
|
|
uint16_t id = lfs_tag_id(attr->tag);
|
Introduced xored-globals logic to fix fundamental problem with moves
This was a big roadblock for a while: with the new feature of inlined
files, the existing move logic was fundamentally flawed.
To pull off atomic moves between two different metadata-pairs, littlefs
uses a simple, if a bit clumsy trick.
1. Marks entry as "moving"
2. Copies entry to new metadata-pair
3. Deletes old entry
If power is lost before the move operation is completed, we will find the
"moving" tag. This means there may or may not be an incomplete move on
the filesystem. In this case, we simply search for the moved entry, if
we find it, we remove the old entry, otherwise we just remove the
"moving" tag.
This worked perfectly, until we introduced inlined files. See, unlike
the existing directory and ctz entries, inlined files have no guarantee
they are unique. There is nothing we can search for that will allow us
to find a moved file unless we assign entries globally-unique ids. (note
that moves are fundamentally rename operations, so searching for names
does not make sense).
---
Solving this problem required completely restructuring how littlefs
handled moves and pulled out a really old idea that had been left in the
cutting room floor back when littlefs was going through many
designs: xored-globals.
The problem xored-globals solves is the need to maintain some global state
via commits to these distributed, independent metadata-pairs. The idea
is that we can use some sort of symmetric operation, such as xor, to
introduces deltas of the global state that can be committed atomically
along with any other info to these metadata-pairs.
This means that to figure out our global state, we xor together the global
delta stored in every metadata-pair.
Which means any commit can update the global state atomically, opening
up a whole new set atomic possibilities.
There is a couple of downsides. These globals may end up with deltas on
every single metadata-pair, effectively duplicating the data for each
block. Additionally, these globals need to have multiple copies in RAM.
This means and globals need to be a bounded size and very small, since even
small globals will have a large footprint.
---
On top of xored-globals, it's trivial to fix our move logic. Here we've
added an indirect delete tag which allows us to atomically specify a
delete of any entry on the filesystem.
Our move operation is now:
1. Copy entry to new metadata-pair and atomically xor globals to
indirectly delete our original entry.
2. Delete the original entry and xor globals to remove the indirect
delete.
Extra exciting is that this now takes our relatively clumsy move
operation into a sexy guaranteed O(1) move operation with no searching
necessary (though we do need to xor globals during mount).
Also reintroduced entry struct, now with a specific purpose to describe
the metadata-pair + id combo needed by indirect deletes to locate an
entry.
2018-05-29 17:35:23 +00:00
|
|
|
int res = lfs_dir_traverse(lfs, dir, lfs_dir_getscan,
|
|
|
|
&(struct lfs_dir_get){mask, attr->tag, attr});
|
2018-05-22 22:43:39 +00:00
|
|
|
if (res < 0) {
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!res) {
|
|
|
|
return LFS_ERR_NOENT;
|
|
|
|
}
|
|
|
|
|
2018-05-30 01:08:42 +00:00
|
|
|
attr->tag = lfs_mktag(0, id, 0) | (attr->tag & 0xffc00fff);
|
2018-05-22 22:43:39 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-29 06:11:26 +00:00
|
|
|
static int lfs_dir_getbuffer(lfs_t *lfs, lfs_mdir_t *dir,
|
2018-05-29 05:50:47 +00:00
|
|
|
uint32_t mask, lfs_mattr_t *attr) {
|
|
|
|
void *buffer = attr->u.buffer;
|
|
|
|
lfs_size_t size = lfs_tag_size(attr->tag);
|
|
|
|
int err = lfs_dir_get(lfs, dir, mask, attr);
|
2018-05-22 22:43:39 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-05-29 05:50:47 +00:00
|
|
|
lfs_size_t diff = lfs_min(size, lfs_tag_size(attr->tag));
|
2018-05-22 22:43:39 +00:00
|
|
|
memset((uint8_t*)buffer + diff, 0, size - diff);
|
2018-05-29 05:50:47 +00:00
|
|
|
err = lfs_bd_read(lfs, attr->u.d.block, attr->u.d.off, buffer, diff);
|
2018-05-22 22:43:39 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-05-29 05:50:47 +00:00
|
|
|
if (lfs_tag_size(attr->tag) > size) {
|
2018-05-22 22:43:39 +00:00
|
|
|
return LFS_ERR_RANGE;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-29 06:11:26 +00:00
|
|
|
static int lfs_dir_getentry(lfs_t *lfs, lfs_mdir_t *dir,
|
2018-05-29 05:50:47 +00:00
|
|
|
uint32_t mask, lfs_tag_t tag, lfs_mattr_t *attr) {
|
|
|
|
attr->tag = tag | sizeof(attr->u);
|
|
|
|
attr->u.buffer = &attr->u;
|
|
|
|
int err = lfs_dir_getbuffer(lfs, dir, mask, attr);
|
2018-05-26 18:50:06 +00:00
|
|
|
if (err && err != LFS_ERR_RANGE) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-29 06:11:26 +00:00
|
|
|
static int lfs_dir_getinfo(lfs_t *lfs, lfs_mdir_t *dir,
|
2018-05-26 18:50:06 +00:00
|
|
|
int16_t id, struct lfs_info *info) {
|
2018-07-09 17:51:31 +00:00
|
|
|
lfs_mattr_t attr = {
|
2018-07-09 19:13:31 +00:00
|
|
|
lfs_mktag(LFS_TYPE_NAME, id, lfs->name_size+1),
|
2018-07-09 17:51:31 +00:00
|
|
|
.u.buffer=info->name,
|
|
|
|
};
|
|
|
|
|
2018-07-09 19:13:31 +00:00
|
|
|
int err = lfs_dir_getbuffer(lfs, dir, 0x7c3ff000, &attr);
|
2018-07-09 17:51:31 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-07-09 19:13:31 +00:00
|
|
|
info->type = lfs_tag_type(attr.tag);
|
2018-07-09 17:51:31 +00:00
|
|
|
|
2018-07-09 19:13:31 +00:00
|
|
|
err = lfs_dir_getentry(lfs, dir, 0x7c3ff000,
|
|
|
|
lfs_mktag(LFS_TYPE_STRUCT, id, 0), &attr);
|
2018-05-26 18:50:06 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-07-09 19:13:31 +00:00
|
|
|
if (lfs_tag_type(attr.tag) == LFS_STRUCT_CTZ) {
|
2018-05-29 05:50:47 +00:00
|
|
|
info->size = attr.u.ctz.size;
|
2018-07-09 19:13:31 +00:00
|
|
|
} else if (lfs_tag_type(attr.tag) == LFS_STRUCT_INLINE) {
|
2018-05-29 05:50:47 +00:00
|
|
|
info->size = lfs_tag_size(attr.tag);
|
2018-05-26 18:50:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2018-05-22 22:43:39 +00:00
|
|
|
}
|
|
|
|
|
Introduced xored-globals logic to fix fundamental problem with moves
This was a big roadblock for a while: with the new feature of inlined
files, the existing move logic was fundamentally flawed.
To pull off atomic moves between two different metadata-pairs, littlefs
uses a simple, if a bit clumsy trick.
1. Marks entry as "moving"
2. Copies entry to new metadata-pair
3. Deletes old entry
If power is lost before the move operation is completed, we will find the
"moving" tag. This means there may or may not be an incomplete move on
the filesystem. In this case, we simply search for the moved entry, if
we find it, we remove the old entry, otherwise we just remove the
"moving" tag.
This worked perfectly, until we introduced inlined files. See, unlike
the existing directory and ctz entries, inlined files have no guarantee
they are unique. There is nothing we can search for that will allow us
to find a moved file unless we assign entries globally-unique ids. (note
that moves are fundamentally rename operations, so searching for names
does not make sense).
---
Solving this problem required completely restructuring how littlefs
handled moves and pulled out a really old idea that had been left in the
cutting room floor back when littlefs was going through many
designs: xored-globals.
The problem xored-globals solves is the need to maintain some global state
via commits to these distributed, independent metadata-pairs. The idea
is that we can use some sort of symmetric operation, such as xor, to
introduces deltas of the global state that can be committed atomically
along with any other info to these metadata-pairs.
This means that to figure out our global state, we xor together the global
delta stored in every metadata-pair.
Which means any commit can update the global state atomically, opening
up a whole new set atomic possibilities.
There is a couple of downsides. These globals may end up with deltas on
every single metadata-pair, effectively duplicating the data for each
block. Additionally, these globals need to have multiple copies in RAM.
This means and globals need to be a bounded size and very small, since even
small globals will have a large footprint.
---
On top of xored-globals, it's trivial to fix our move logic. Here we've
added an indirect delete tag which allows us to atomically specify a
delete of any entry on the filesystem.
Our move operation is now:
1. Copy entry to new metadata-pair and atomically xor globals to
indirectly delete our original entry.
2. Delete the original entry and xor globals to remove the indirect
delete.
Extra exciting is that this now takes our relatively clumsy move
operation into a sexy guaranteed O(1) move operation with no searching
necessary (though we do need to xor globals during mount).
Also reintroduced entry struct, now with a specific purpose to describe
the metadata-pair + id combo needed by indirect deletes to locate an
entry.
2018-05-29 17:35:23 +00:00
|
|
|
struct lfs_dir_find {
|
2018-05-22 22:43:39 +00:00
|
|
|
const char *name;
|
2018-05-26 00:04:01 +00:00
|
|
|
uint16_t len;
|
2018-05-22 22:43:39 +00:00
|
|
|
int16_t id;
|
2018-07-04 06:35:04 +00:00
|
|
|
int16_t tempid;
|
2018-07-09 17:51:31 +00:00
|
|
|
uint8_t findtype;
|
|
|
|
uint8_t tempfindtype;
|
2018-05-22 22:43:39 +00:00
|
|
|
};
|
|
|
|
|
Introduced xored-globals logic to fix fundamental problem with moves
This was a big roadblock for a while: with the new feature of inlined
files, the existing move logic was fundamentally flawed.
To pull off atomic moves between two different metadata-pairs, littlefs
uses a simple, if a bit clumsy trick.
1. Marks entry as "moving"
2. Copies entry to new metadata-pair
3. Deletes old entry
If power is lost before the move operation is completed, we will find the
"moving" tag. This means there may or may not be an incomplete move on
the filesystem. In this case, we simply search for the moved entry, if
we find it, we remove the old entry, otherwise we just remove the
"moving" tag.
This worked perfectly, until we introduced inlined files. See, unlike
the existing directory and ctz entries, inlined files have no guarantee
they are unique. There is nothing we can search for that will allow us
to find a moved file unless we assign entries globally-unique ids. (note
that moves are fundamentally rename operations, so searching for names
does not make sense).
---
Solving this problem required completely restructuring how littlefs
handled moves and pulled out a really old idea that had been left in the
cutting room floor back when littlefs was going through many
designs: xored-globals.
The problem xored-globals solves is the need to maintain some global state
via commits to these distributed, independent metadata-pairs. The idea
is that we can use some sort of symmetric operation, such as xor, to
introduces deltas of the global state that can be committed atomically
along with any other info to these metadata-pairs.
This means that to figure out our global state, we xor together the global
delta stored in every metadata-pair.
Which means any commit can update the global state atomically, opening
up a whole new set atomic possibilities.
There is a couple of downsides. These globals may end up with deltas on
every single metadata-pair, effectively duplicating the data for each
block. Additionally, these globals need to have multiple copies in RAM.
This means and globals need to be a bounded size and very small, since even
small globals will have a large footprint.
---
On top of xored-globals, it's trivial to fix our move logic. Here we've
added an indirect delete tag which allows us to atomically specify a
delete of any entry on the filesystem.
Our move operation is now:
1. Copy entry to new metadata-pair and atomically xor globals to
indirectly delete our original entry.
2. Delete the original entry and xor globals to remove the indirect
delete.
Extra exciting is that this now takes our relatively clumsy move
operation into a sexy guaranteed O(1) move operation with no searching
necessary (though we do need to xor globals during mount).
Also reintroduced entry struct, now with a specific purpose to describe
the metadata-pair + id combo needed by indirect deletes to locate an
entry.
2018-05-29 17:35:23 +00:00
|
|
|
static int lfs_dir_findscan(lfs_t *lfs, void *p, lfs_mattr_t attr) {
|
|
|
|
struct lfs_dir_find *find = p;
|
2018-05-19 23:25:47 +00:00
|
|
|
|
2018-07-09 19:13:31 +00:00
|
|
|
if (lfs_tag_subtype(attr.tag) == LFS_TYPE_NAME &&
|
2018-05-29 05:50:47 +00:00
|
|
|
lfs_tag_size(attr.tag) == find->len) {
|
|
|
|
int res = lfs_bd_cmp(lfs, attr.u.d.block, attr.u.d.off,
|
2018-05-19 23:25:47 +00:00
|
|
|
find->name, find->len);
|
|
|
|
if (res < 0) {
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (res) {
|
|
|
|
// found a match
|
2018-07-04 06:35:04 +00:00
|
|
|
find->tempid = lfs_tag_id(attr.tag);
|
2018-07-09 19:13:31 +00:00
|
|
|
find->tempfindtype = lfs_tag_type(attr.tag);
|
2018-05-19 23:25:47 +00:00
|
|
|
}
|
2018-07-09 19:13:31 +00:00
|
|
|
} else if (lfs_tag_type(attr.tag) == LFS_TYPE_DELETE) {
|
2018-07-04 06:35:04 +00:00
|
|
|
if (lfs_tag_id(attr.tag) == find->tempid) {
|
|
|
|
find->tempid = -1;
|
|
|
|
} else if (lfs_tag_id(attr.tag) < find->tempid) {
|
|
|
|
find->tempid -= 1;
|
2018-05-26 00:04:01 +00:00
|
|
|
}
|
2018-07-04 06:35:04 +00:00
|
|
|
} else if (lfs_tag_type(attr.tag) == LFS_TYPE_CRC) {
|
|
|
|
find->id = find->tempid;
|
2018-07-09 17:51:31 +00:00
|
|
|
find->findtype = find->tempfindtype;
|
2018-05-19 23:25:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-26 18:50:06 +00:00
|
|
|
// TODO drop others, make this only return id, also make get take in only entry to populate (with embedded tag)
|
2018-05-29 06:11:26 +00:00
|
|
|
static int lfs_dir_find(lfs_t *lfs, lfs_mdir_t *dir,
|
2018-07-09 17:51:31 +00:00
|
|
|
const char **path, uint16_t *id, uint8_t *type) {
|
2018-05-29 05:50:47 +00:00
|
|
|
lfs_mattr_t attr = {
|
2018-05-26 18:50:06 +00:00
|
|
|
.u.pair[0] = lfs->root[0],
|
|
|
|
.u.pair[1] = lfs->root[1],
|
|
|
|
};
|
|
|
|
|
Introduced xored-globals logic to fix fundamental problem with moves
This was a big roadblock for a while: with the new feature of inlined
files, the existing move logic was fundamentally flawed.
To pull off atomic moves between two different metadata-pairs, littlefs
uses a simple, if a bit clumsy trick.
1. Marks entry as "moving"
2. Copies entry to new metadata-pair
3. Deletes old entry
If power is lost before the move operation is completed, we will find the
"moving" tag. This means there may or may not be an incomplete move on
the filesystem. In this case, we simply search for the moved entry, if
we find it, we remove the old entry, otherwise we just remove the
"moving" tag.
This worked perfectly, until we introduced inlined files. See, unlike
the existing directory and ctz entries, inlined files have no guarantee
they are unique. There is nothing we can search for that will allow us
to find a moved file unless we assign entries globally-unique ids. (note
that moves are fundamentally rename operations, so searching for names
does not make sense).
---
Solving this problem required completely restructuring how littlefs
handled moves and pulled out a really old idea that had been left in the
cutting room floor back when littlefs was going through many
designs: xored-globals.
The problem xored-globals solves is the need to maintain some global state
via commits to these distributed, independent metadata-pairs. The idea
is that we can use some sort of symmetric operation, such as xor, to
introduces deltas of the global state that can be committed atomically
along with any other info to these metadata-pairs.
This means that to figure out our global state, we xor together the global
delta stored in every metadata-pair.
Which means any commit can update the global state atomically, opening
up a whole new set atomic possibilities.
There is a couple of downsides. These globals may end up with deltas on
every single metadata-pair, effectively duplicating the data for each
block. Additionally, these globals need to have multiple copies in RAM.
This means and globals need to be a bounded size and very small, since even
small globals will have a large footprint.
---
On top of xored-globals, it's trivial to fix our move logic. Here we've
added an indirect delete tag which allows us to atomically specify a
delete of any entry on the filesystem.
Our move operation is now:
1. Copy entry to new metadata-pair and atomically xor globals to
indirectly delete our original entry.
2. Delete the original entry and xor globals to remove the indirect
delete.
Extra exciting is that this now takes our relatively clumsy move
operation into a sexy guaranteed O(1) move operation with no searching
necessary (though we do need to xor globals during mount).
Also reintroduced entry struct, now with a specific purpose to describe
the metadata-pair + id combo needed by indirect deletes to locate an
entry.
2018-05-29 17:35:23 +00:00
|
|
|
struct lfs_dir_find find = {
|
2018-05-19 23:25:47 +00:00
|
|
|
.name = *path,
|
|
|
|
};
|
|
|
|
|
|
|
|
while (true) {
|
|
|
|
nextname:
|
|
|
|
// skip slashes
|
|
|
|
find.name += strspn(find.name, "/");
|
|
|
|
find.len = strcspn(find.name, "/");
|
|
|
|
|
|
|
|
// special case for root dir
|
|
|
|
if (find.name[0] == '\0') {
|
2018-05-29 06:21:55 +00:00
|
|
|
// Return ISDIR when we hit root
|
2018-07-09 17:51:31 +00:00
|
|
|
// TODO change this to -1 or 0x3ff?
|
|
|
|
*type = LFS_TYPE_DIR;
|
2018-05-29 06:21:55 +00:00
|
|
|
return LFS_ERR_ISDIR;
|
2018-05-19 23:25:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// skip '.' and root '..'
|
|
|
|
if ((find.len == 1 && memcmp(find.name, ".", 1) == 0) ||
|
|
|
|
(find.len == 2 && memcmp(find.name, "..", 2) == 0)) {
|
|
|
|
find.name += find.len;
|
|
|
|
goto nextname;
|
|
|
|
}
|
|
|
|
|
|
|
|
// skip if matched by '..' in name
|
|
|
|
const char *suffix = find.name + find.len;
|
|
|
|
lfs_size_t sufflen;
|
|
|
|
int depth = 1;
|
|
|
|
while (true) {
|
|
|
|
suffix += strspn(suffix, "/");
|
|
|
|
sufflen = strcspn(suffix, "/");
|
|
|
|
if (sufflen == 0) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sufflen == 2 && memcmp(suffix, "..", 2) == 0) {
|
|
|
|
depth -= 1;
|
|
|
|
if (depth == 0) {
|
|
|
|
find.name = suffix + sufflen;
|
|
|
|
goto nextname;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
depth += 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
suffix += sufflen;
|
|
|
|
}
|
|
|
|
|
|
|
|
// update what we've found
|
|
|
|
*path = find.name;
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
// find path
|
2018-05-19 23:25:47 +00:00
|
|
|
while (true) {
|
2018-05-29 05:50:47 +00:00
|
|
|
//printf("checking %d %d for %s\n", attr.u.pair[0], attr.u.pair[1], *path);
|
2018-05-19 23:25:47 +00:00
|
|
|
find.id = -1;
|
2018-07-04 06:35:04 +00:00
|
|
|
find.tempid = -1;
|
2018-05-29 05:50:47 +00:00
|
|
|
int err = lfs_dir_fetchwith(lfs, dir, attr.u.pair,
|
Introduced xored-globals logic to fix fundamental problem with moves
This was a big roadblock for a while: with the new feature of inlined
files, the existing move logic was fundamentally flawed.
To pull off atomic moves between two different metadata-pairs, littlefs
uses a simple, if a bit clumsy trick.
1. Marks entry as "moving"
2. Copies entry to new metadata-pair
3. Deletes old entry
If power is lost before the move operation is completed, we will find the
"moving" tag. This means there may or may not be an incomplete move on
the filesystem. In this case, we simply search for the moved entry, if
we find it, we remove the old entry, otherwise we just remove the
"moving" tag.
This worked perfectly, until we introduced inlined files. See, unlike
the existing directory and ctz entries, inlined files have no guarantee
they are unique. There is nothing we can search for that will allow us
to find a moved file unless we assign entries globally-unique ids. (note
that moves are fundamentally rename operations, so searching for names
does not make sense).
---
Solving this problem required completely restructuring how littlefs
handled moves and pulled out a really old idea that had been left in the
cutting room floor back when littlefs was going through many
designs: xored-globals.
The problem xored-globals solves is the need to maintain some global state
via commits to these distributed, independent metadata-pairs. The idea
is that we can use some sort of symmetric operation, such as xor, to
introduces deltas of the global state that can be committed atomically
along with any other info to these metadata-pairs.
This means that to figure out our global state, we xor together the global
delta stored in every metadata-pair.
Which means any commit can update the global state atomically, opening
up a whole new set atomic possibilities.
There is a couple of downsides. These globals may end up with deltas on
every single metadata-pair, effectively duplicating the data for each
block. Additionally, these globals need to have multiple copies in RAM.
This means and globals need to be a bounded size and very small, since even
small globals will have a large footprint.
---
On top of xored-globals, it's trivial to fix our move logic. Here we've
added an indirect delete tag which allows us to atomically specify a
delete of any entry on the filesystem.
Our move operation is now:
1. Copy entry to new metadata-pair and atomically xor globals to
indirectly delete our original entry.
2. Delete the original entry and xor globals to remove the indirect
delete.
Extra exciting is that this now takes our relatively clumsy move
operation into a sexy guaranteed O(1) move operation with no searching
necessary (though we do need to xor globals during mount).
Also reintroduced entry struct, now with a specific purpose to describe
the metadata-pair + id combo needed by indirect deletes to locate an
entry.
2018-05-29 17:35:23 +00:00
|
|
|
lfs_dir_findscan, &find);
|
2018-05-19 23:25:47 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (find.id >= 0) {
|
|
|
|
// found it
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-05-27 15:15:28 +00:00
|
|
|
if (!dir->split) {
|
2018-05-19 23:25:47 +00:00
|
|
|
return LFS_ERR_NOENT;
|
|
|
|
}
|
|
|
|
|
2018-05-29 05:50:47 +00:00
|
|
|
attr.u.pair[0] = dir->tail[0];
|
|
|
|
attr.u.pair[1] = dir->tail[1];
|
2018-05-19 23:25:47 +00:00
|
|
|
}
|
2018-05-27 15:15:28 +00:00
|
|
|
|
2018-05-26 18:50:06 +00:00
|
|
|
*id = find.id;
|
2018-07-09 17:51:31 +00:00
|
|
|
*type = find.findtype;
|
2018-05-19 23:25:47 +00:00
|
|
|
find.name += find.len;
|
|
|
|
find.name += strspn(find.name, "/");
|
|
|
|
if (find.name[0] == '\0') {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-07-09 17:51:31 +00:00
|
|
|
// don't continue on if we didn't hit a directory
|
|
|
|
// TODO update with what's on master?
|
|
|
|
if (find.findtype != LFS_TYPE_DIR) {
|
|
|
|
return LFS_ERR_NOTDIR;
|
|
|
|
}
|
|
|
|
|
2018-05-26 18:50:06 +00:00
|
|
|
// TODO optimize grab for inline files and like?
|
|
|
|
// TODO would this mean more code?
|
|
|
|
// grab the entry data
|
2018-07-09 19:13:31 +00:00
|
|
|
int err = lfs_dir_getentry(lfs, dir, 0x7c3ff000,
|
|
|
|
lfs_mktag(LFS_TYPE_STRUCT, find.id, 0), &attr);
|
2017-06-24 05:43:05 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
2017-03-05 20:11:52 +00:00
|
|
|
}
|
2018-03-11 16:28:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-18 03:27:06 +00:00
|
|
|
/// Top level directory operations ///
|
2018-05-21 05:56:20 +00:00
|
|
|
int lfs_mkdir(lfs_t *lfs, const char *path) {
|
|
|
|
// deorphan if we haven't yet, needed at most once after poweron
|
|
|
|
if (!lfs->deorphaned) {
|
2018-05-26 18:50:06 +00:00
|
|
|
int err = lfs_deorphan(lfs);
|
2018-05-21 05:56:20 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-29 06:11:26 +00:00
|
|
|
lfs_mdir_t cwd;
|
2018-07-09 17:51:31 +00:00
|
|
|
int err = lfs_dir_find(lfs, &cwd, &path, &(uint16_t){0}, &(uint8_t){0});
|
2018-05-21 05:56:20 +00:00
|
|
|
if (err != LFS_ERR_NOENT || strchr(path, '/') != NULL) {
|
2018-05-29 06:21:55 +00:00
|
|
|
if (!err || err == LFS_ERR_ISDIR) {
|
2018-05-21 05:56:20 +00:00
|
|
|
return LFS_ERR_EXIST;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
// check that name fits
|
|
|
|
lfs_size_t nlen = strlen(path);
|
|
|
|
if (nlen > lfs->name_size) {
|
|
|
|
return LFS_ERR_NAMETOOLONG;
|
|
|
|
}
|
|
|
|
|
|
|
|
// build up new directory
|
|
|
|
lfs_alloc_ack(lfs);
|
|
|
|
|
2018-05-29 06:11:26 +00:00
|
|
|
lfs_mdir_t dir;
|
2018-05-26 18:50:06 +00:00
|
|
|
err = lfs_dir_alloc(lfs, &dir, false, cwd.tail);
|
2018-05-21 05:56:20 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-05-29 00:49:20 +00:00
|
|
|
err = lfs_dir_commit(lfs, &dir, NULL);
|
2018-05-21 05:56:20 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
// get next slot and commit
|
|
|
|
uint16_t id;
|
2018-05-26 18:50:06 +00:00
|
|
|
err = lfs_dir_append(lfs, &cwd, &id);
|
2018-05-21 05:56:20 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-05-27 15:15:28 +00:00
|
|
|
cwd.tail[0] = dir.pair[0];
|
|
|
|
cwd.tail[1] = dir.pair[1];
|
2018-05-29 05:50:47 +00:00
|
|
|
err = lfs_dir_commit(lfs, &cwd, &(lfs_mattrlist_t){
|
2018-07-09 19:13:31 +00:00
|
|
|
{lfs_mktag(LFS_TYPE_DIR, id, nlen),
|
2018-05-29 05:50:47 +00:00
|
|
|
.u.buffer=(void*)path}, &(lfs_mattrlist_t){
|
2018-07-09 17:51:31 +00:00
|
|
|
{lfs_mktag(LFS_STRUCT_DIR, id, sizeof(dir.pair)),
|
2018-05-29 05:50:47 +00:00
|
|
|
.u.buffer=dir.pair}, &(lfs_mattrlist_t){
|
2018-05-30 01:08:42 +00:00
|
|
|
{lfs_mktag(LFS_TYPE_SOFTTAIL, 0x3ff, sizeof(cwd.tail)),
|
2018-05-27 15:15:28 +00:00
|
|
|
.u.buffer=cwd.tail}}}});
|
2018-05-21 05:56:20 +00:00
|
|
|
|
|
|
|
// TODO need ack here?
|
|
|
|
lfs_alloc_ack(lfs);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-26 18:50:06 +00:00
|
|
|
int lfs_dir_open(lfs_t *lfs, lfs_dir_t *dir, const char *path) {
|
2018-05-29 06:21:55 +00:00
|
|
|
uint16_t id;
|
2018-07-09 17:51:31 +00:00
|
|
|
uint8_t type;
|
|
|
|
int err = lfs_dir_find(lfs, &dir->m, &path, &id, &type);
|
2018-05-29 06:21:55 +00:00
|
|
|
if (err && err != LFS_ERR_ISDIR) {
|
2018-05-21 05:56:20 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-07-09 17:51:31 +00:00
|
|
|
if (type != LFS_TYPE_DIR) {
|
|
|
|
return LFS_ERR_NOTDIR;
|
|
|
|
}
|
|
|
|
|
2018-05-29 05:50:47 +00:00
|
|
|
lfs_mattr_t attr;
|
2018-05-29 06:21:55 +00:00
|
|
|
if (err == LFS_ERR_ISDIR) {
|
2018-05-26 18:50:06 +00:00
|
|
|
// handle root dir separately
|
2018-05-29 05:50:47 +00:00
|
|
|
attr.u.pair[0] = lfs->root[0];
|
|
|
|
attr.u.pair[1] = lfs->root[1];
|
2018-05-26 18:50:06 +00:00
|
|
|
} else {
|
|
|
|
// get dir pair from parent
|
2018-07-09 19:13:31 +00:00
|
|
|
err = lfs_dir_getentry(lfs, &dir->m, 0x7c3ff000,
|
|
|
|
lfs_mktag(LFS_TYPE_STRUCT, id, 0), &attr);
|
2018-05-26 18:50:06 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
2018-05-21 05:56:20 +00:00
|
|
|
}
|
|
|
|
|
2018-05-26 18:50:06 +00:00
|
|
|
// fetch first pair
|
2018-05-29 06:11:26 +00:00
|
|
|
err = lfs_dir_fetch(lfs, &dir->m, attr.u.pair);
|
2018-05-21 05:56:20 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-05-29 05:50:47 +00:00
|
|
|
// setup entry
|
2018-05-29 06:11:26 +00:00
|
|
|
dir->head[0] = dir->m.pair[0];
|
|
|
|
dir->head[1] = dir->m.pair[1];
|
2018-05-21 05:56:20 +00:00
|
|
|
dir->id = 0;
|
2018-05-29 05:50:47 +00:00
|
|
|
dir->pos = 0;
|
2018-05-21 05:56:20 +00:00
|
|
|
|
|
|
|
// add to list of directories
|
|
|
|
dir->next = lfs->dirs;
|
|
|
|
lfs->dirs = dir;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-26 18:50:06 +00:00
|
|
|
int lfs_dir_close(lfs_t *lfs, lfs_dir_t *dir) {
|
2018-05-21 05:56:20 +00:00
|
|
|
// remove from list of directories
|
2018-05-26 18:50:06 +00:00
|
|
|
for (lfs_dir_t **p = &lfs->dirs; *p; p = &(*p)->next) {
|
2018-05-21 05:56:20 +00:00
|
|
|
if (*p == dir) {
|
|
|
|
*p = dir->next;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-26 18:50:06 +00:00
|
|
|
int lfs_dir_read(lfs_t *lfs, lfs_dir_t *dir, struct lfs_info *info) {
|
2018-05-21 05:56:20 +00:00
|
|
|
memset(info, 0, sizeof(*info));
|
|
|
|
|
|
|
|
// special offset for '.' and '..'
|
|
|
|
if (dir->pos == 0) {
|
|
|
|
info->type = LFS_TYPE_DIR;
|
|
|
|
strcpy(info->name, ".");
|
|
|
|
dir->pos += 1;
|
|
|
|
return 1;
|
|
|
|
} else if (dir->pos == 1) {
|
|
|
|
info->type = LFS_TYPE_DIR;
|
|
|
|
strcpy(info->name, "..");
|
|
|
|
dir->pos += 1;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (true) {
|
2018-05-29 06:11:26 +00:00
|
|
|
if (dir->id == dir->m.count) {
|
|
|
|
if (!dir->m.split) {
|
2018-05-21 05:56:20 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-05-29 06:11:26 +00:00
|
|
|
int err = lfs_dir_fetch(lfs, &dir->m, dir->m.tail);
|
2018-05-21 05:56:20 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
dir->id = 0;
|
|
|
|
}
|
|
|
|
|
2018-05-29 06:11:26 +00:00
|
|
|
int err = lfs_dir_getinfo(lfs, &dir->m, dir->id, info);
|
2018-05-26 18:50:06 +00:00
|
|
|
if (err && err != LFS_ERR_NOENT) {
|
2018-05-21 05:56:20 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
dir->id += 1;
|
2018-05-26 18:50:06 +00:00
|
|
|
if (err != LFS_ERR_NOENT) {
|
|
|
|
break;
|
|
|
|
}
|
2018-05-21 05:56:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
dir->pos += 1;
|
|
|
|
return true;
|
|
|
|
}
|
2017-03-13 00:41:08 +00:00
|
|
|
|
2018-05-26 00:04:01 +00:00
|
|
|
// TODO does this work?
|
2018-05-26 18:50:06 +00:00
|
|
|
int lfs_dir_seek(lfs_t *lfs, lfs_dir_t *dir, lfs_off_t off) {
|
2018-05-26 00:04:01 +00:00
|
|
|
// simply walk from head dir
|
2018-05-26 18:50:06 +00:00
|
|
|
int err = lfs_dir_rewind(lfs, dir);
|
2017-03-25 21:20:31 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
2017-04-23 04:11:13 +00:00
|
|
|
}
|
|
|
|
|
2018-05-26 00:04:01 +00:00
|
|
|
// first two for ./..
|
2018-05-28 14:17:44 +00:00
|
|
|
dir->pos = lfs_min(2, off);
|
|
|
|
off -= dir->pos;
|
2017-03-25 21:20:31 +00:00
|
|
|
|
2018-05-26 00:04:01 +00:00
|
|
|
while (off != 0) {
|
2018-05-29 06:11:26 +00:00
|
|
|
dir->id = lfs_min(dir->m.count, off);
|
2018-05-28 14:17:44 +00:00
|
|
|
dir->pos += dir->id;
|
|
|
|
off -= dir->id;
|
|
|
|
|
2018-05-29 06:11:26 +00:00
|
|
|
if (dir->id == dir->m.count) {
|
|
|
|
if (!dir->m.split) {
|
2018-05-26 00:04:01 +00:00
|
|
|
return LFS_ERR_INVAL;
|
|
|
|
}
|
2017-04-15 16:26:37 +00:00
|
|
|
|
2018-05-29 06:11:26 +00:00
|
|
|
int err = lfs_dir_fetch(lfs, &dir->m, dir->m.tail);
|
2018-05-26 00:04:01 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
2017-04-23 04:11:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-26 18:50:06 +00:00
|
|
|
lfs_soff_t lfs_dir_tell(lfs_t *lfs, lfs_dir_t *dir) {
|
2018-02-04 19:10:07 +00:00
|
|
|
(void)lfs;
|
2017-04-23 04:11:13 +00:00
|
|
|
return dir->pos;
|
|
|
|
}
|
|
|
|
|
2018-05-26 18:50:06 +00:00
|
|
|
int lfs_dir_rewind(lfs_t *lfs, lfs_dir_t *dir) {
|
2017-04-23 04:11:13 +00:00
|
|
|
// reload the head dir
|
2018-05-29 06:11:26 +00:00
|
|
|
int err = lfs_dir_fetch(lfs, &dir->m, dir->head);
|
2017-04-23 04:11:13 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-05-29 06:11:26 +00:00
|
|
|
dir->m.pair[0] = dir->head[0];
|
|
|
|
dir->m.pair[1] = dir->head[1];
|
2018-05-26 00:04:01 +00:00
|
|
|
dir->id = 0;
|
2018-05-29 05:50:47 +00:00
|
|
|
dir->pos = 0;
|
2017-04-23 04:11:13 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-03-25 21:20:31 +00:00
|
|
|
|
2017-04-30 16:19:37 +00:00
|
|
|
/// File index list operations ///
|
2017-10-18 05:41:43 +00:00
|
|
|
static int lfs_ctz_index(lfs_t *lfs, lfs_off_t *off) {
|
2017-10-17 00:08:47 +00:00
|
|
|
lfs_off_t size = *off;
|
2017-10-18 05:33:59 +00:00
|
|
|
lfs_off_t b = lfs->cfg->block_size - 2*4;
|
|
|
|
lfs_off_t i = size / b;
|
2017-10-17 00:08:47 +00:00
|
|
|
if (i == 0) {
|
|
|
|
return 0;
|
2017-04-23 00:48:31 +00:00
|
|
|
}
|
|
|
|
|
2017-10-18 05:33:59 +00:00
|
|
|
i = (size - 4*(lfs_popc(i-1)+2)) / b;
|
|
|
|
*off = size - b*i - 4*lfs_popc(i);
|
|
|
|
return i;
|
2017-04-23 00:48:31 +00:00
|
|
|
}
|
|
|
|
|
2017-10-18 05:41:43 +00:00
|
|
|
static int lfs_ctz_find(lfs_t *lfs,
|
2017-04-30 16:19:37 +00:00
|
|
|
lfs_cache_t *rcache, const lfs_cache_t *pcache,
|
|
|
|
lfs_block_t head, lfs_size_t size,
|
2017-04-23 00:48:31 +00:00
|
|
|
lfs_size_t pos, lfs_block_t *block, lfs_off_t *off) {
|
|
|
|
if (size == 0) {
|
2017-10-17 00:31:56 +00:00
|
|
|
*block = 0xffffffff;
|
2017-04-23 00:48:31 +00:00
|
|
|
*off = 0;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-10-18 05:41:43 +00:00
|
|
|
lfs_off_t current = lfs_ctz_index(lfs, &(lfs_off_t){size-1});
|
|
|
|
lfs_off_t target = lfs_ctz_index(lfs, &pos);
|
2017-04-23 00:48:31 +00:00
|
|
|
|
2017-04-23 02:42:22 +00:00
|
|
|
while (current > target) {
|
2017-04-23 00:48:31 +00:00
|
|
|
lfs_size_t skip = lfs_min(
|
|
|
|
lfs_npw2(current-target+1) - 1,
|
2017-10-10 23:48:24 +00:00
|
|
|
lfs_ctz(current));
|
2017-04-23 00:48:31 +00:00
|
|
|
|
2017-04-30 16:19:37 +00:00
|
|
|
int err = lfs_cache_read(lfs, rcache, pcache, head, 4*skip, &head, 4);
|
2018-02-02 11:58:43 +00:00
|
|
|
head = lfs_fromle32(head);
|
2017-04-23 00:48:31 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-01-29 21:20:12 +00:00
|
|
|
LFS_ASSERT(head >= 2 && head <= lfs->cfg->block_count);
|
2017-04-23 00:48:31 +00:00
|
|
|
current -= 1 << skip;
|
|
|
|
}
|
|
|
|
|
|
|
|
*block = head;
|
|
|
|
*off = pos;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-10-18 05:41:43 +00:00
|
|
|
static int lfs_ctz_extend(lfs_t *lfs,
|
2017-04-30 16:19:37 +00:00
|
|
|
lfs_cache_t *rcache, lfs_cache_t *pcache,
|
2017-04-23 00:48:31 +00:00
|
|
|
lfs_block_t head, lfs_size_t size,
|
2017-11-16 21:10:17 +00:00
|
|
|
lfs_block_t *block, lfs_off_t *off) {
|
2017-05-14 17:01:45 +00:00
|
|
|
while (true) {
|
2017-11-16 21:10:17 +00:00
|
|
|
// go ahead and grab a block
|
|
|
|
lfs_block_t nblock;
|
|
|
|
int err = lfs_alloc(lfs, &nblock);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
2018-01-29 21:20:12 +00:00
|
|
|
LFS_ASSERT(nblock >= 2 && nblock <= lfs->cfg->block_count);
|
2017-04-23 00:48:31 +00:00
|
|
|
|
2017-11-16 21:10:17 +00:00
|
|
|
if (true) {
|
|
|
|
err = lfs_bd_erase(lfs, nblock);
|
2017-10-17 00:31:56 +00:00
|
|
|
if (err) {
|
|
|
|
if (err == LFS_ERR_CORRUPT) {
|
|
|
|
goto relocate;
|
|
|
|
}
|
|
|
|
return err;
|
2017-05-14 17:01:45 +00:00
|
|
|
}
|
2017-04-23 00:48:31 +00:00
|
|
|
|
2017-10-17 00:31:56 +00:00
|
|
|
if (size == 0) {
|
2017-11-16 21:10:17 +00:00
|
|
|
*block = nblock;
|
2017-10-17 00:31:56 +00:00
|
|
|
*off = 0;
|
|
|
|
return 0;
|
|
|
|
}
|
2017-04-23 00:48:31 +00:00
|
|
|
|
2017-10-17 00:31:56 +00:00
|
|
|
size -= 1;
|
|
|
|
lfs_off_t index = lfs_ctz_index(lfs, &size);
|
|
|
|
size += 1;
|
|
|
|
|
|
|
|
// just copy out the last block if it is incomplete
|
|
|
|
if (size != lfs->cfg->block_size) {
|
|
|
|
for (lfs_off_t i = 0; i < size; i++) {
|
|
|
|
uint8_t data;
|
2018-01-29 19:53:28 +00:00
|
|
|
err = lfs_cache_read(lfs, rcache, NULL,
|
2017-10-17 00:31:56 +00:00
|
|
|
head, i, &data, 1);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
2017-04-23 02:42:22 +00:00
|
|
|
|
2017-10-17 00:31:56 +00:00
|
|
|
err = lfs_cache_prog(lfs, pcache, rcache,
|
2017-11-16 21:10:17 +00:00
|
|
|
nblock, i, &data, 1);
|
2017-10-17 00:31:56 +00:00
|
|
|
if (err) {
|
|
|
|
if (err == LFS_ERR_CORRUPT) {
|
|
|
|
goto relocate;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
2017-05-14 17:01:45 +00:00
|
|
|
}
|
|
|
|
|
2017-11-16 21:10:17 +00:00
|
|
|
*block = nblock;
|
2017-10-17 00:31:56 +00:00
|
|
|
*off = size;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// append block
|
|
|
|
index += 1;
|
|
|
|
lfs_size_t skips = lfs_ctz(index) + 1;
|
|
|
|
|
|
|
|
for (lfs_off_t i = 0; i < skips; i++) {
|
2018-02-02 11:58:43 +00:00
|
|
|
head = lfs_tole32(head);
|
2018-01-29 19:53:28 +00:00
|
|
|
err = lfs_cache_prog(lfs, pcache, rcache,
|
2017-11-16 21:10:17 +00:00
|
|
|
nblock, 4*i, &head, 4);
|
2018-02-02 11:58:43 +00:00
|
|
|
head = lfs_fromle32(head);
|
2017-05-14 17:01:45 +00:00
|
|
|
if (err) {
|
|
|
|
if (err == LFS_ERR_CORRUPT) {
|
|
|
|
goto relocate;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-10-17 00:31:56 +00:00
|
|
|
if (i != skips-1) {
|
|
|
|
err = lfs_cache_read(lfs, rcache, NULL,
|
|
|
|
head, 4*i, &head, 4);
|
2018-02-02 11:58:43 +00:00
|
|
|
head = lfs_fromle32(head);
|
2017-10-17 00:31:56 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
2017-05-14 17:01:45 +00:00
|
|
|
}
|
|
|
|
|
2018-01-29 21:20:12 +00:00
|
|
|
LFS_ASSERT(head >= 2 && head <= lfs->cfg->block_count);
|
2017-05-14 17:01:45 +00:00
|
|
|
}
|
2017-09-17 17:53:18 +00:00
|
|
|
|
2017-11-16 21:10:17 +00:00
|
|
|
*block = nblock;
|
2017-10-17 00:31:56 +00:00
|
|
|
*off = 4*skips;
|
|
|
|
return 0;
|
2017-04-23 02:42:22 +00:00
|
|
|
}
|
|
|
|
|
2017-05-14 17:01:45 +00:00
|
|
|
relocate:
|
2017-11-16 21:10:17 +00:00
|
|
|
LFS_DEBUG("Bad block at %d", nblock);
|
2017-04-23 00:48:31 +00:00
|
|
|
|
2017-05-14 17:01:45 +00:00
|
|
|
// just clear cache and try a new block
|
|
|
|
pcache->block = 0xffffffff;
|
2017-04-23 00:48:31 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-18 05:41:43 +00:00
|
|
|
static int lfs_ctz_traverse(lfs_t *lfs,
|
2017-04-30 16:19:37 +00:00
|
|
|
lfs_cache_t *rcache, const lfs_cache_t *pcache,
|
2017-04-23 00:48:31 +00:00
|
|
|
lfs_block_t head, lfs_size_t size,
|
2018-05-26 18:50:06 +00:00
|
|
|
int (*cb)(lfs_t*, void*, lfs_block_t), void *data) {
|
2017-04-23 00:48:31 +00:00
|
|
|
if (size == 0) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-10-18 05:41:43 +00:00
|
|
|
lfs_off_t index = lfs_ctz_index(lfs, &(lfs_off_t){size-1});
|
2017-04-23 00:48:31 +00:00
|
|
|
|
|
|
|
while (true) {
|
2018-05-26 18:50:06 +00:00
|
|
|
int err = cb(lfs, data, head);
|
2017-04-23 00:48:31 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (index == 0) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-12-27 18:30:01 +00:00
|
|
|
lfs_block_t heads[2];
|
|
|
|
int count = 2 - (index & 1);
|
|
|
|
err = lfs_cache_read(lfs, rcache, pcache, head, 0, &heads, count*4);
|
2018-02-02 11:58:43 +00:00
|
|
|
heads[0] = lfs_fromle32(heads[0]);
|
|
|
|
heads[1] = lfs_fromle32(heads[1]);
|
2017-04-23 00:48:31 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-12-27 18:30:01 +00:00
|
|
|
for (int i = 0; i < count-1; i++) {
|
2018-05-26 18:50:06 +00:00
|
|
|
err = cb(lfs, data, heads[i]);
|
2017-12-27 18:30:01 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
head = heads[count-1];
|
|
|
|
index -= count;
|
2017-04-23 00:48:31 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-04-18 03:27:06 +00:00
|
|
|
/// Top level file operations ///
|
2018-05-23 04:57:19 +00:00
|
|
|
int lfs_file_open(lfs_t *lfs, lfs_file_t *file,
|
2018-05-22 22:43:39 +00:00
|
|
|
const char *path, int flags) {
|
|
|
|
// deorphan if we haven't yet, needed at most once after poweron
|
|
|
|
if ((flags & 3) != LFS_O_RDONLY && !lfs->deorphaned) {
|
2018-05-26 18:50:06 +00:00
|
|
|
int err = lfs_deorphan(lfs);
|
2018-05-22 22:43:39 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// allocate entry for file if it doesn't exist
|
2018-05-29 06:11:26 +00:00
|
|
|
lfs_mdir_t cwd;
|
2018-05-29 06:21:55 +00:00
|
|
|
uint16_t id;
|
2018-07-09 17:51:31 +00:00
|
|
|
uint8_t type;
|
|
|
|
int err = lfs_dir_find(lfs, &cwd, &path, &id, &type);
|
|
|
|
if (err && (err != LFS_ERR_NOENT || strchr(path, '/') != NULL) &&
|
|
|
|
err != LFS_ERR_ISDIR) {
|
2018-05-22 22:43:39 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-05-29 05:50:47 +00:00
|
|
|
lfs_mattr_t attr;
|
2018-05-22 22:43:39 +00:00
|
|
|
if (err == LFS_ERR_NOENT) {
|
|
|
|
if (!(flags & LFS_O_CREAT)) {
|
|
|
|
return LFS_ERR_NOENT;
|
|
|
|
}
|
|
|
|
|
|
|
|
// check that name fits
|
|
|
|
lfs_size_t nlen = strlen(path);
|
|
|
|
if (nlen > lfs->name_size) {
|
|
|
|
return LFS_ERR_NAMETOOLONG;
|
|
|
|
}
|
|
|
|
|
|
|
|
// get next slot and create entry to remember name
|
2018-05-26 18:50:06 +00:00
|
|
|
err = lfs_dir_append(lfs, &cwd, &id);
|
2018-05-22 22:43:39 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-05-28 14:17:44 +00:00
|
|
|
// TODO do we need to make file registered to list to catch updates from this commit? ie if id/cwd change
|
2018-05-29 05:50:47 +00:00
|
|
|
err = lfs_dir_commit(lfs, &cwd, &(lfs_mattrlist_t){
|
2018-07-09 19:13:31 +00:00
|
|
|
{lfs_mktag(LFS_TYPE_REG, id, nlen),
|
2018-05-30 01:08:42 +00:00
|
|
|
.u.buffer=(void*)path}, &(lfs_mattrlist_t){
|
2018-07-09 17:51:31 +00:00
|
|
|
{lfs_mktag(LFS_STRUCT_INLINE, id, 0)}}});
|
2018-05-26 18:50:06 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-05-28 14:17:44 +00:00
|
|
|
// TODO eh
|
|
|
|
if (id >= cwd.count) {
|
|
|
|
// catch updates from a compact in the above commit
|
|
|
|
id -= cwd.count;
|
|
|
|
cwd.pair[0] = cwd.tail[0];
|
|
|
|
cwd.pair[1] = cwd.tail[1];
|
|
|
|
}
|
|
|
|
|
2018-07-09 17:51:31 +00:00
|
|
|
attr.tag = lfs_mktag(LFS_STRUCT_INLINE, id, 0);
|
2018-05-26 18:50:06 +00:00
|
|
|
} else {
|
2018-07-09 17:51:31 +00:00
|
|
|
if (type != LFS_TYPE_REG) {
|
2018-05-28 14:17:44 +00:00
|
|
|
return LFS_ERR_ISDIR;
|
|
|
|
} else if (flags & LFS_O_EXCL) {
|
2018-05-26 18:50:06 +00:00
|
|
|
return LFS_ERR_EXIST;
|
|
|
|
}
|
|
|
|
|
2018-07-09 19:13:31 +00:00
|
|
|
attr.tag = lfs_mktag(LFS_TYPE_STRUCT, id, 0);
|
|
|
|
err = lfs_dir_get(lfs, &cwd, 0x7c3ff000, &attr);
|
2018-05-22 22:43:39 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// setup file struct
|
|
|
|
file->pair[0] = cwd.pair[0];
|
|
|
|
file->pair[1] = cwd.pair[1];
|
2018-05-26 18:50:06 +00:00
|
|
|
file->id = id;
|
2018-05-22 22:43:39 +00:00
|
|
|
file->flags = flags;
|
|
|
|
file->pos = 0;
|
2018-05-23 04:57:19 +00:00
|
|
|
file->attrs = NULL;
|
2017-03-20 03:00:56 +00:00
|
|
|
|
2017-04-30 16:19:37 +00:00
|
|
|
// allocate buffer if needed
|
2017-04-30 16:54:27 +00:00
|
|
|
file->cache.block = 0xffffffff;
|
2017-04-30 16:19:37 +00:00
|
|
|
if (lfs->cfg->file_buffer) {
|
|
|
|
file->cache.buffer = lfs->cfg->file_buffer;
|
|
|
|
} else if ((file->flags & 3) == LFS_O_RDONLY) {
|
2018-01-29 21:20:12 +00:00
|
|
|
file->cache.buffer = lfs_malloc(lfs->cfg->read_size);
|
2017-04-30 16:19:37 +00:00
|
|
|
if (!file->cache.buffer) {
|
|
|
|
return LFS_ERR_NOMEM;
|
|
|
|
}
|
|
|
|
} else {
|
2018-01-29 21:20:12 +00:00
|
|
|
file->cache.buffer = lfs_malloc(lfs->cfg->prog_size);
|
2017-04-30 16:19:37 +00:00
|
|
|
if (!file->cache.buffer) {
|
|
|
|
return LFS_ERR_NOMEM;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-09 19:13:31 +00:00
|
|
|
if (lfs_tag_type(attr.tag) == LFS_STRUCT_INLINE) {
|
2018-04-03 13:28:09 +00:00
|
|
|
// load inline files
|
2018-03-17 15:28:14 +00:00
|
|
|
file->head = 0xfffffffe;
|
2018-05-29 05:50:47 +00:00
|
|
|
file->size = lfs_tag_size(attr.tag);
|
2018-03-17 15:28:14 +00:00
|
|
|
file->flags |= LFS_F_INLINE;
|
|
|
|
file->cache.block = file->head;
|
|
|
|
file->cache.off = 0;
|
2018-05-26 18:50:06 +00:00
|
|
|
// don't always read (may be new file)
|
|
|
|
if (file->size > 0) {
|
2018-05-29 05:50:47 +00:00
|
|
|
err = lfs_bd_read(lfs, attr.u.d.block, attr.u.d.off,
|
2018-05-26 18:50:06 +00:00
|
|
|
file->cache.buffer, file->size);
|
|
|
|
if (err) {
|
|
|
|
lfs_free(file->cache.buffer);
|
|
|
|
return err;
|
|
|
|
}
|
2018-03-17 15:28:14 +00:00
|
|
|
}
|
2018-04-03 13:28:09 +00:00
|
|
|
} else {
|
|
|
|
// use ctz list from entry
|
2018-05-29 05:50:47 +00:00
|
|
|
err = lfs_bd_read(lfs, attr.u.d.block, attr.u.d.off,
|
2018-05-23 04:57:19 +00:00
|
|
|
&file->head, 2*sizeof(uint32_t));
|
2018-04-03 13:28:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// truncate if requested
|
|
|
|
if (flags & LFS_O_TRUNC) {
|
|
|
|
if (file->size != 0) {
|
|
|
|
file->flags |= LFS_F_DIRTY;
|
|
|
|
}
|
|
|
|
|
|
|
|
file->head = 0xfffffffe;
|
|
|
|
file->size = 0;
|
|
|
|
file->flags |= LFS_F_INLINE;
|
|
|
|
file->cache.block = file->head;
|
|
|
|
file->cache.off = 0;
|
2018-03-17 15:28:14 +00:00
|
|
|
}
|
|
|
|
|
2017-04-29 15:22:01 +00:00
|
|
|
// add to list of files
|
|
|
|
file->next = lfs->files;
|
|
|
|
lfs->files = file;
|
|
|
|
|
2017-04-18 03:27:06 +00:00
|
|
|
return 0;
|
2017-03-20 03:00:56 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 04:57:19 +00:00
|
|
|
int lfs_file_close(lfs_t *lfs, lfs_file_t *file) {
|
|
|
|
int err = lfs_file_sync(lfs, file);
|
2018-05-22 22:43:39 +00:00
|
|
|
|
2018-05-23 04:57:19 +00:00
|
|
|
// remove from list of files
|
|
|
|
for (lfs_file_t **p = &lfs->files; *p; p = &(*p)->next) {
|
|
|
|
if (*p == file) {
|
|
|
|
*p = file->next;
|
|
|
|
break;
|
2018-05-22 22:43:39 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-23 04:57:19 +00:00
|
|
|
// clean up memory
|
|
|
|
if (!lfs->cfg->file_buffer) {
|
|
|
|
lfs_free(file->cache.buffer);
|
2018-05-22 22:43:39 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 04:57:19 +00:00
|
|
|
return err;
|
2018-05-22 22:43:39 +00:00
|
|
|
}
|
|
|
|
|
2018-04-11 00:55:17 +00:00
|
|
|
static int lfs_file_relocate(lfs_t *lfs, lfs_file_t *file) {
|
|
|
|
relocate:;
|
|
|
|
// just relocate what exists into new block
|
|
|
|
lfs_block_t nblock;
|
|
|
|
int err = lfs_alloc(lfs, &nblock);
|
|
|
|
if (err) {
|
2017-06-25 21:56:12 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-04-11 00:55:17 +00:00
|
|
|
err = lfs_bd_erase(lfs, nblock);
|
|
|
|
if (err) {
|
|
|
|
if (err == LFS_ERR_CORRUPT) {
|
|
|
|
goto relocate;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
// either read from dirty cache or disk
|
|
|
|
for (lfs_off_t i = 0; i < file->off; i++) {
|
|
|
|
uint8_t data;
|
|
|
|
err = lfs_cache_read(lfs, &lfs->rcache, &file->cache,
|
|
|
|
file->block, i, &data, 1);
|
2017-06-25 22:23:40 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
2017-06-25 19:01:33 +00:00
|
|
|
}
|
|
|
|
|
2018-04-11 00:55:17 +00:00
|
|
|
err = lfs_cache_prog(lfs, &lfs->pcache, &lfs->rcache,
|
|
|
|
nblock, i, &data, 1);
|
2017-06-25 19:01:33 +00:00
|
|
|
if (err) {
|
|
|
|
if (err == LFS_ERR_CORRUPT) {
|
|
|
|
goto relocate;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
2018-04-11 00:55:17 +00:00
|
|
|
}
|
2017-06-25 19:01:33 +00:00
|
|
|
|
2018-04-11 00:55:17 +00:00
|
|
|
// copy over new state of file
|
|
|
|
memcpy(file->cache.buffer, lfs->pcache.buffer, lfs->cfg->prog_size);
|
|
|
|
file->cache.block = lfs->pcache.block;
|
|
|
|
file->cache.off = lfs->pcache.off;
|
|
|
|
lfs->pcache.block = 0xffffffff;
|
2017-06-25 19:01:33 +00:00
|
|
|
|
2018-04-11 00:55:17 +00:00
|
|
|
file->block = nblock;
|
|
|
|
return 0;
|
|
|
|
}
|
2018-04-08 21:58:12 +00:00
|
|
|
|
2018-04-11 00:55:17 +00:00
|
|
|
static int lfs_file_flush(lfs_t *lfs, lfs_file_t *file) {
|
|
|
|
if (file->flags & LFS_F_READING) {
|
|
|
|
file->flags &= ~LFS_F_READING;
|
2017-04-23 02:42:22 +00:00
|
|
|
}
|
|
|
|
|
2018-04-11 00:55:17 +00:00
|
|
|
if (file->flags & LFS_F_WRITING) {
|
|
|
|
lfs_off_t pos = file->pos;
|
2018-04-08 21:58:12 +00:00
|
|
|
|
2018-04-11 00:55:17 +00:00
|
|
|
if (!(file->flags & LFS_F_INLINE)) {
|
|
|
|
// copy over anything after current branch
|
|
|
|
lfs_file_t orig = {
|
|
|
|
.head = file->head,
|
|
|
|
.size = file->size,
|
|
|
|
.flags = LFS_O_RDONLY,
|
|
|
|
.pos = file->pos,
|
|
|
|
.cache = lfs->rcache,
|
|
|
|
};
|
2018-03-19 01:36:48 +00:00
|
|
|
lfs->rcache.block = 0xffffffff;
|
2017-04-24 04:39:50 +00:00
|
|
|
|
2018-03-19 01:36:48 +00:00
|
|
|
while (file->pos < file->size) {
|
|
|
|
// copy over a byte at a time, leave it up to caching
|
|
|
|
// to make this efficient
|
|
|
|
uint8_t data;
|
|
|
|
lfs_ssize_t res = lfs_file_read(lfs, &orig, &data, 1);
|
|
|
|
if (res < 0) {
|
|
|
|
return res;
|
|
|
|
}
|
2017-04-23 02:42:22 +00:00
|
|
|
|
2018-03-19 01:36:48 +00:00
|
|
|
res = lfs_file_write(lfs, file, &data, 1);
|
|
|
|
if (res < 0) {
|
|
|
|
return res;
|
|
|
|
}
|
2017-04-30 16:19:37 +00:00
|
|
|
|
2018-03-19 01:36:48 +00:00
|
|
|
// keep our reference to the rcache in sync
|
|
|
|
if (lfs->rcache.block != 0xffffffff) {
|
|
|
|
orig.cache.block = 0xffffffff;
|
|
|
|
lfs->rcache.block = 0xffffffff;
|
|
|
|
}
|
2017-04-30 16:19:37 +00:00
|
|
|
}
|
|
|
|
|
2018-03-19 01:36:48 +00:00
|
|
|
// write out what we have
|
|
|
|
while (true) {
|
|
|
|
int err = lfs_cache_flush(lfs, &file->cache, &lfs->rcache);
|
|
|
|
if (err) {
|
|
|
|
if (err == LFS_ERR_CORRUPT) {
|
|
|
|
goto relocate;
|
|
|
|
}
|
|
|
|
return err;
|
2017-06-25 19:01:33 +00:00
|
|
|
}
|
|
|
|
|
2018-03-19 01:36:48 +00:00
|
|
|
break;
|
2017-06-25 19:01:33 +00:00
|
|
|
relocate:
|
2018-03-19 01:36:48 +00:00
|
|
|
LFS_DEBUG("Bad block at %d", file->block);
|
|
|
|
err = lfs_file_relocate(lfs, file);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
2017-06-25 19:01:33 +00:00
|
|
|
}
|
2018-03-19 01:36:48 +00:00
|
|
|
} else {
|
|
|
|
file->size = lfs_max(file->pos, file->size);
|
2017-04-23 02:42:22 +00:00
|
|
|
}
|
|
|
|
|
2017-04-24 04:39:50 +00:00
|
|
|
// actual file updates
|
2017-04-30 16:19:37 +00:00
|
|
|
file->head = file->block;
|
2017-04-24 04:39:50 +00:00
|
|
|
file->size = file->pos;
|
2017-04-30 16:19:37 +00:00
|
|
|
file->flags &= ~LFS_F_WRITING;
|
|
|
|
file->flags |= LFS_F_DIRTY;
|
2017-04-24 04:39:50 +00:00
|
|
|
|
|
|
|
file->pos = pos;
|
|
|
|
}
|
2017-04-23 02:42:22 +00:00
|
|
|
|
2017-04-23 04:11:13 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-23 04:57:19 +00:00
|
|
|
int lfs_file_sync(lfs_t *lfs, lfs_file_t *file) {
|
|
|
|
int err = lfs_file_flush(lfs, file);
|
2018-05-22 22:43:39 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((file->flags & LFS_F_DIRTY) &&
|
|
|
|
!(file->flags & LFS_F_ERRED) &&
|
|
|
|
!lfs_pairisnull(file->pair)) {
|
|
|
|
// update dir entry
|
|
|
|
// TODO keep list of dirs including these guys for no
|
|
|
|
// need of another reload?
|
2018-05-29 06:11:26 +00:00
|
|
|
lfs_mdir_t cwd;
|
2018-05-26 18:50:06 +00:00
|
|
|
err = lfs_dir_fetch(lfs, &cwd, file->pair);
|
2018-05-22 22:43:39 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
// either update the references or inline the whole file
|
2018-03-19 01:36:48 +00:00
|
|
|
if (!(file->flags & LFS_F_INLINE)) {
|
2018-05-29 05:50:47 +00:00
|
|
|
int err = lfs_dir_commit(lfs, &cwd, &(lfs_mattrlist_t){
|
2018-07-09 17:51:31 +00:00
|
|
|
{lfs_mktag(LFS_STRUCT_CTZ,
|
2018-05-30 01:08:42 +00:00
|
|
|
file->id, 2*sizeof(uint32_t)), .u.buffer=&file->head},
|
|
|
|
file->attrs});
|
2018-05-23 04:57:19 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
2018-03-17 15:28:14 +00:00
|
|
|
} else {
|
2018-05-29 05:50:47 +00:00
|
|
|
int err = lfs_dir_commit(lfs, &cwd, &(lfs_mattrlist_t){
|
2018-07-09 17:51:31 +00:00
|
|
|
{lfs_mktag(LFS_STRUCT_INLINE,
|
2018-05-30 01:08:42 +00:00
|
|
|
file->id, file->size), .u.buffer=file->cache.buffer},
|
|
|
|
file->attrs});
|
2018-05-23 04:57:19 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
2017-04-24 04:39:50 +00:00
|
|
|
}
|
|
|
|
|
2017-04-30 16:19:37 +00:00
|
|
|
file->flags &= ~LFS_F_DIRTY;
|
2017-03-20 03:00:56 +00:00
|
|
|
}
|
|
|
|
|
2017-04-24 04:39:50 +00:00
|
|
|
return 0;
|
2017-03-20 03:00:56 +00:00
|
|
|
}
|
|
|
|
|
2017-04-24 02:40:03 +00:00
|
|
|
lfs_ssize_t lfs_file_read(lfs_t *lfs, lfs_file_t *file,
|
|
|
|
void *buffer, lfs_size_t size) {
|
|
|
|
uint8_t *data = buffer;
|
|
|
|
lfs_size_t nsize = size;
|
|
|
|
|
|
|
|
if ((file->flags & 3) == LFS_O_WRONLY) {
|
2018-02-04 20:36:36 +00:00
|
|
|
return LFS_ERR_BADF;
|
2017-04-24 02:40:03 +00:00
|
|
|
}
|
|
|
|
|
2017-04-30 16:19:37 +00:00
|
|
|
if (file->flags & LFS_F_WRITING) {
|
2017-04-24 04:39:50 +00:00
|
|
|
// flush out any writes
|
|
|
|
int err = lfs_file_flush(lfs, file);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-17 22:57:12 +00:00
|
|
|
if (file->pos >= file->size) {
|
|
|
|
// eof if past end
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-04-24 04:39:50 +00:00
|
|
|
size = lfs_min(size, file->size - file->pos);
|
|
|
|
nsize = size;
|
|
|
|
|
2017-04-24 02:40:03 +00:00
|
|
|
while (nsize > 0) {
|
|
|
|
// check if we need a new block
|
2017-04-30 16:19:37 +00:00
|
|
|
if (!(file->flags & LFS_F_READING) ||
|
|
|
|
file->off == lfs->cfg->block_size) {
|
2018-03-19 01:36:48 +00:00
|
|
|
if (!(file->flags & LFS_F_INLINE)) {
|
2018-03-17 15:28:14 +00:00
|
|
|
int err = lfs_ctz_find(lfs, &file->cache, NULL,
|
|
|
|
file->head, file->size,
|
|
|
|
file->pos, &file->block, &file->off);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
2018-03-19 01:36:48 +00:00
|
|
|
} else {
|
|
|
|
file->block = 0xfffffffe;
|
|
|
|
file->off = file->pos;
|
2017-04-24 02:40:03 +00:00
|
|
|
}
|
2017-04-30 16:19:37 +00:00
|
|
|
|
|
|
|
file->flags |= LFS_F_READING;
|
2017-04-24 02:40:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// read as much as we can in current block
|
2017-04-30 16:19:37 +00:00
|
|
|
lfs_size_t diff = lfs_min(nsize, lfs->cfg->block_size - file->off);
|
|
|
|
int err = lfs_cache_read(lfs, &file->cache, NULL,
|
|
|
|
file->block, file->off, data, diff);
|
2017-04-24 02:40:03 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-04-24 04:39:50 +00:00
|
|
|
file->pos += diff;
|
2017-04-30 16:19:37 +00:00
|
|
|
file->off += diff;
|
2017-04-24 02:40:03 +00:00
|
|
|
data += diff;
|
|
|
|
nsize -= diff;
|
|
|
|
}
|
|
|
|
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
2017-03-20 03:00:56 +00:00
|
|
|
lfs_ssize_t lfs_file_write(lfs_t *lfs, lfs_file_t *file,
|
|
|
|
const void *buffer, lfs_size_t size) {
|
|
|
|
const uint8_t *data = buffer;
|
|
|
|
lfs_size_t nsize = size;
|
|
|
|
|
2017-04-23 04:11:13 +00:00
|
|
|
if ((file->flags & 3) == LFS_O_RDONLY) {
|
2018-02-04 20:36:36 +00:00
|
|
|
return LFS_ERR_BADF;
|
2017-04-23 04:11:13 +00:00
|
|
|
}
|
|
|
|
|
2017-04-30 16:19:37 +00:00
|
|
|
if (file->flags & LFS_F_READING) {
|
2017-04-24 04:39:50 +00:00
|
|
|
// drop any reads
|
|
|
|
int err = lfs_file_flush(lfs, file);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((file->flags & LFS_O_APPEND) && file->pos < file->size) {
|
|
|
|
file->pos = file->size;
|
|
|
|
}
|
|
|
|
|
2017-09-17 22:57:12 +00:00
|
|
|
if (!(file->flags & LFS_F_WRITING) && file->pos > file->size) {
|
|
|
|
// fill with zeros
|
|
|
|
lfs_off_t pos = file->pos;
|
|
|
|
file->pos = file->size;
|
|
|
|
|
|
|
|
while (file->pos < pos) {
|
|
|
|
lfs_ssize_t res = lfs_file_write(lfs, file, &(uint8_t){0}, 1);
|
|
|
|
if (res < 0) {
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Added disk-backed limits on the name/attrs/inline sizes
Being a portable, microcontroller-scale embedded filesystem, littlefs is
presented with a relatively unique challenge. The amount of RAM
available is on completely different scales from machine to machine, and
what is normally a reasonable RAM assumption may break completely on an
embedded system.
A great example of this is file names. On almost every PC these days, the limit
for a file name is 255 bytes. It's a very convenient limit for a number
of reasons. However, on microcontrollers, allocating 255 bytes of RAM to
do a file search can be unreasonable.
The simplest solution (and one that has existing in littlefs for a
while), is to let this limit be redefined to a smaller value on devices
that need to save RAM. However, this presents an interesting portability
issue. If these devices are plugged into a PC with relatively infinite
RAM, nothing stops the PC from writing files with full 255-byte file
names, which can't be read on the small device.
One solution here is to store this limit on the superblock during format
time. When mounting a disk, the filesystem implementation is responsible for
checking this limit in the superblock. If it's larger than what can be
read, raise an error. If it's smaller, respect the limit on the
superblock and raise an error if the user attempts to exceed it.
In this commit, this strategy is adopted for file names, inline files,
and the size of all attributes, since these could impact the memory
consumption of the filesystem. (Recording the attribute's limit is
iffy, but is the only other arbitrary limit and could be used for disabling
support of custom attributes).
Note! This changes makes it very important to configure littlefs
correctly at format time. If littlefs is formatted on a PC without
changing the limits appropriately, it will be rejected by a smaller
device.
2018-04-01 20:36:29 +00:00
|
|
|
if ((file->flags & LFS_F_INLINE) &&
|
2018-05-23 04:57:19 +00:00
|
|
|
file->pos + nsize >= lfs->cfg->inline_size) {
|
2018-04-03 13:29:28 +00:00
|
|
|
// inline file doesn't fit anymore
|
2018-03-19 01:36:48 +00:00
|
|
|
file->block = 0xfffffffe;
|
|
|
|
file->off = file->pos;
|
|
|
|
|
|
|
|
lfs_alloc_ack(lfs);
|
2018-03-18 01:32:16 +00:00
|
|
|
int err = lfs_file_relocate(lfs, file);
|
|
|
|
if (err) {
|
|
|
|
file->flags |= LFS_F_ERRED;
|
|
|
|
return err;
|
2018-03-17 15:28:14 +00:00
|
|
|
}
|
|
|
|
|
2018-03-18 01:32:16 +00:00
|
|
|
file->flags &= ~LFS_F_INLINE;
|
|
|
|
file->flags |= LFS_F_WRITING;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (nsize > 0) {
|
2017-04-23 02:42:22 +00:00
|
|
|
// check if we need a new block
|
2017-04-30 16:19:37 +00:00
|
|
|
if (!(file->flags & LFS_F_WRITING) ||
|
|
|
|
file->off == lfs->cfg->block_size) {
|
2018-03-19 01:36:48 +00:00
|
|
|
if (!(file->flags & LFS_F_INLINE)) {
|
2018-03-17 15:28:14 +00:00
|
|
|
if (!(file->flags & LFS_F_WRITING) && file->pos > 0) {
|
|
|
|
// find out which block we're extending from
|
|
|
|
int err = lfs_ctz_find(lfs, &file->cache, NULL,
|
|
|
|
file->head, file->size,
|
|
|
|
file->pos-1, &file->block, &file->off);
|
|
|
|
if (err) {
|
|
|
|
file->flags |= LFS_F_ERRED;
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
// mark cache as dirty since we may have read data into it
|
|
|
|
file->cache.block = 0xffffffff;
|
|
|
|
}
|
|
|
|
|
|
|
|
// extend file with new blocks
|
|
|
|
lfs_alloc_ack(lfs);
|
|
|
|
int err = lfs_ctz_extend(lfs, &lfs->rcache, &file->cache,
|
|
|
|
file->block, file->pos,
|
|
|
|
&file->block, &file->off);
|
2017-04-23 04:11:13 +00:00
|
|
|
if (err) {
|
2017-11-16 23:25:41 +00:00
|
|
|
file->flags |= LFS_F_ERRED;
|
2017-04-23 04:11:13 +00:00
|
|
|
return err;
|
|
|
|
}
|
2018-03-19 01:36:48 +00:00
|
|
|
} else {
|
|
|
|
file->block = 0xfffffffe;
|
|
|
|
file->off = file->pos;
|
2017-03-20 03:00:56 +00:00
|
|
|
}
|
2017-09-17 17:53:18 +00:00
|
|
|
|
|
|
|
file->flags |= LFS_F_WRITING;
|
2017-03-20 03:00:56 +00:00
|
|
|
}
|
|
|
|
|
2017-04-23 02:42:22 +00:00
|
|
|
// program as much as we can in current block
|
2017-04-30 16:19:37 +00:00
|
|
|
lfs_size_t diff = lfs_min(nsize, lfs->cfg->block_size - file->off);
|
2017-05-14 17:01:45 +00:00
|
|
|
while (true) {
|
2017-06-24 05:43:05 +00:00
|
|
|
int err = lfs_cache_prog(lfs, &file->cache, &lfs->rcache,
|
2017-05-14 17:01:45 +00:00
|
|
|
file->block, file->off, data, diff);
|
|
|
|
if (err) {
|
|
|
|
if (err == LFS_ERR_CORRUPT) {
|
|
|
|
goto relocate;
|
|
|
|
}
|
2017-11-16 23:25:41 +00:00
|
|
|
file->flags |= LFS_F_ERRED;
|
2017-05-14 17:01:45 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
relocate:
|
2017-06-25 19:01:33 +00:00
|
|
|
err = lfs_file_relocate(lfs, file);
|
2017-05-14 17:01:45 +00:00
|
|
|
if (err) {
|
2017-11-16 23:25:41 +00:00
|
|
|
file->flags |= LFS_F_ERRED;
|
2017-05-14 17:01:45 +00:00
|
|
|
return err;
|
|
|
|
}
|
2017-03-20 03:00:56 +00:00
|
|
|
}
|
|
|
|
|
2017-04-24 04:39:50 +00:00
|
|
|
file->pos += diff;
|
2017-04-30 16:19:37 +00:00
|
|
|
file->off += diff;
|
2017-03-20 03:00:56 +00:00
|
|
|
data += diff;
|
|
|
|
nsize -= diff;
|
2017-05-14 17:01:45 +00:00
|
|
|
|
|
|
|
lfs_alloc_ack(lfs);
|
2017-04-23 02:42:22 +00:00
|
|
|
}
|
|
|
|
|
2017-11-16 23:25:41 +00:00
|
|
|
file->flags &= ~LFS_F_ERRED;
|
2017-03-20 03:00:56 +00:00
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
2017-04-23 04:11:13 +00:00
|
|
|
lfs_soff_t lfs_file_seek(lfs_t *lfs, lfs_file_t *file,
|
|
|
|
lfs_soff_t off, int whence) {
|
|
|
|
// write out everything beforehand, may be noop if rdonly
|
|
|
|
int err = lfs_file_flush(lfs, file);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-04-24 04:39:50 +00:00
|
|
|
// update pos
|
|
|
|
if (whence == LFS_SEEK_SET) {
|
|
|
|
file->pos = off;
|
|
|
|
} else if (whence == LFS_SEEK_CUR) {
|
2018-01-03 21:00:04 +00:00
|
|
|
if (off < 0 && (lfs_off_t)-off > file->pos) {
|
2017-09-17 22:57:12 +00:00
|
|
|
return LFS_ERR_INVAL;
|
|
|
|
}
|
|
|
|
|
2017-04-24 04:39:50 +00:00
|
|
|
file->pos = file->pos + off;
|
|
|
|
} else if (whence == LFS_SEEK_END) {
|
2018-01-03 21:00:04 +00:00
|
|
|
if (off < 0 && (lfs_off_t)-off > file->size) {
|
2017-09-17 22:57:12 +00:00
|
|
|
return LFS_ERR_INVAL;
|
|
|
|
}
|
|
|
|
|
2017-04-24 04:39:50 +00:00
|
|
|
file->pos = file->size + off;
|
2017-04-23 04:11:13 +00:00
|
|
|
}
|
|
|
|
|
2017-09-27 00:50:39 +00:00
|
|
|
return file->pos;
|
2017-04-23 04:11:13 +00:00
|
|
|
}
|
|
|
|
|
2018-01-20 23:30:40 +00:00
|
|
|
int lfs_file_truncate(lfs_t *lfs, lfs_file_t *file, lfs_off_t size) {
|
|
|
|
if ((file->flags & 3) == LFS_O_RDONLY) {
|
2018-02-04 20:36:36 +00:00
|
|
|
return LFS_ERR_BADF;
|
2018-01-20 23:30:40 +00:00
|
|
|
}
|
|
|
|
|
2018-02-04 19:10:07 +00:00
|
|
|
lfs_off_t oldsize = lfs_file_size(lfs, file);
|
|
|
|
if (size < oldsize) {
|
2018-01-20 23:30:40 +00:00
|
|
|
// need to flush since directly changing metadata
|
|
|
|
int err = lfs_file_flush(lfs, file);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
// lookup new head in ctz skip list
|
|
|
|
err = lfs_ctz_find(lfs, &file->cache, NULL,
|
|
|
|
file->head, file->size,
|
|
|
|
size, &file->head, &(lfs_off_t){0});
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
file->size = size;
|
|
|
|
file->flags |= LFS_F_DIRTY;
|
2018-02-04 19:10:07 +00:00
|
|
|
} else if (size > oldsize) {
|
2018-01-20 23:30:40 +00:00
|
|
|
lfs_off_t pos = file->pos;
|
|
|
|
|
|
|
|
// flush+seek if not already at end
|
2018-02-04 19:10:07 +00:00
|
|
|
if (file->pos != oldsize) {
|
2018-01-29 21:20:12 +00:00
|
|
|
int err = lfs_file_seek(lfs, file, 0, LFS_SEEK_END);
|
2018-02-04 19:48:44 +00:00
|
|
|
if (err < 0) {
|
2018-01-20 23:30:40 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// fill with zeros
|
|
|
|
while (file->pos < size) {
|
|
|
|
lfs_ssize_t res = lfs_file_write(lfs, file, &(uint8_t){0}, 1);
|
|
|
|
if (res < 0) {
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// restore pos
|
|
|
|
int err = lfs_file_seek(lfs, file, pos, LFS_SEEK_SET);
|
|
|
|
if (err < 0) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-04-23 04:11:13 +00:00
|
|
|
lfs_soff_t lfs_file_tell(lfs_t *lfs, lfs_file_t *file) {
|
2018-02-04 19:10:07 +00:00
|
|
|
(void)lfs;
|
2017-04-24 04:39:50 +00:00
|
|
|
return file->pos;
|
2017-04-23 04:11:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int lfs_file_rewind(lfs_t *lfs, lfs_file_t *file) {
|
|
|
|
lfs_soff_t res = lfs_file_seek(lfs, file, 0, LFS_SEEK_SET);
|
|
|
|
if (res < 0) {
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-04-24 02:40:03 +00:00
|
|
|
lfs_soff_t lfs_file_size(lfs_t *lfs, lfs_file_t *file) {
|
2018-02-04 19:10:07 +00:00
|
|
|
(void)lfs;
|
2018-01-20 23:30:40 +00:00
|
|
|
if (file->flags & LFS_F_WRITING) {
|
|
|
|
return lfs_max(file->pos, file->size);
|
|
|
|
} else {
|
|
|
|
return file->size;
|
|
|
|
}
|
2017-04-24 02:40:03 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 04:57:19 +00:00
|
|
|
//int lfs_file_getattrs(lfs_t *lfs, lfs_file_t *file,
|
|
|
|
// const struct lfs_attr *attrs, int count) {
|
|
|
|
// // set to null in case we can't find the attrs (missing file?)
|
|
|
|
// for (int j = 0; j < count; j++) {
|
|
|
|
// memset(attrs[j].buffer, 0, attrs[j].size);
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// // load from disk if we haven't already been deleted
|
|
|
|
// if (!lfs_pairisnull(file->pair)) {
|
2018-05-29 06:11:26 +00:00
|
|
|
// lfs_mdir_t cwd;
|
2018-05-23 04:57:19 +00:00
|
|
|
// int err = lfs_dir_fetch(lfs, &cwd, file->pair);
|
|
|
|
// if (err) {
|
|
|
|
// return err;
|
|
|
|
// }
|
|
|
|
//
|
2018-05-29 05:50:47 +00:00
|
|
|
// lfs_mattr_t entry = {.off = file->pairoff};
|
2018-05-23 04:57:19 +00:00
|
|
|
// err = lfs_dir_get(lfs, &cwd, entry.off, &entry.d, 4);
|
|
|
|
// if (err) {
|
|
|
|
// return err;
|
|
|
|
// }
|
|
|
|
// entry.size = lfs_entry_size(&entry);
|
|
|
|
//
|
|
|
|
// err = lfs_dir_getattrs(lfs, &cwd, &entry, attrs, count);
|
|
|
|
// if (err) {
|
|
|
|
// return err;
|
|
|
|
// }
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// // override an attrs we have stored locally
|
|
|
|
// for (int i = 0; i < file->attrcount; i++) {
|
|
|
|
// for (int j = 0; j < count; j++) {
|
|
|
|
// if (attrs[j].type == file->attrs[i].type) {
|
|
|
|
// if (attrs[j].size < file->attrs[i].size) {
|
|
|
|
// return LFS_ERR_RANGE;
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// memset(attrs[j].buffer, 0, attrs[j].size);
|
|
|
|
// memcpy(attrs[j].buffer,
|
|
|
|
// file->attrs[i].buffer, file->attrs[i].size);
|
|
|
|
// }
|
|
|
|
// }
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// return 0;
|
|
|
|
//}
|
|
|
|
|
|
|
|
//int lfs_file_setattrs(lfs_t *lfs, lfs_file_t *file,
|
|
|
|
// const struct lfs_attr *attrs, int count) {
|
|
|
|
// if ((file->flags & 3) == LFS_O_RDONLY) {
|
|
|
|
// return LFS_ERR_BADF;
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// // at least make sure attributes fit
|
|
|
|
// if (!lfs_pairisnull(file->pair)) {
|
2018-05-29 06:11:26 +00:00
|
|
|
// lfs_mdir_t cwd;
|
2018-05-23 04:57:19 +00:00
|
|
|
// int err = lfs_dir_fetch(lfs, &cwd, file->pair);
|
|
|
|
// if (err) {
|
|
|
|
// return err;
|
|
|
|
// }
|
|
|
|
//
|
2018-05-29 05:50:47 +00:00
|
|
|
// lfs_mattr_t entry = {.off = file->pairoff};
|
2018-05-23 04:57:19 +00:00
|
|
|
// err = lfs_dir_get(lfs, &cwd, entry.off, &entry.d, 4);
|
|
|
|
// if (err) {
|
|
|
|
// return err;
|
|
|
|
// }
|
|
|
|
// entry.size = lfs_entry_size(&entry);
|
|
|
|
//
|
|
|
|
// lfs_ssize_t res = lfs_dir_checkattrs(lfs, &cwd, &entry, attrs, count);
|
|
|
|
// if (res < 0) {
|
|
|
|
// return res;
|
|
|
|
// }
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// // just tack to the file, will be written at sync time
|
|
|
|
// file->attrs = attrs;
|
|
|
|
// file->attrcount = count;
|
|
|
|
// file->flags |= LFS_F_DIRTY;
|
|
|
|
//
|
|
|
|
// return 0;
|
|
|
|
//}
|
2018-04-08 21:58:12 +00:00
|
|
|
|
2017-04-24 02:40:03 +00:00
|
|
|
|
2018-01-30 19:07:37 +00:00
|
|
|
/// General fs operations ///
|
2017-04-24 02:40:03 +00:00
|
|
|
int lfs_stat(lfs_t *lfs, const char *path, struct lfs_info *info) {
|
2018-05-29 06:11:26 +00:00
|
|
|
lfs_mdir_t cwd;
|
2018-05-29 06:21:55 +00:00
|
|
|
uint16_t id;
|
2018-07-09 17:51:31 +00:00
|
|
|
// TODO pass to getinfo?
|
|
|
|
int err = lfs_dir_find(lfs, &cwd, &path, &id, &(uint8_t){0});
|
2018-05-29 06:21:55 +00:00
|
|
|
if (err && err != LFS_ERR_ISDIR) {
|
2018-04-06 00:03:58 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-05-29 06:21:55 +00:00
|
|
|
if (err == LFS_ERR_ISDIR) {
|
|
|
|
// special case for root
|
|
|
|
strcpy(info->name, "/");
|
|
|
|
info->type = LFS_TYPE_DIR;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-26 18:50:06 +00:00
|
|
|
return lfs_dir_getinfo(lfs, &cwd, id, info);
|
2018-04-06 00:03:58 +00:00
|
|
|
}
|
|
|
|
|
2018-05-27 15:15:28 +00:00
|
|
|
int lfs_remove(lfs_t *lfs, const char *path) {
|
|
|
|
// deorphan if we haven't yet, needed at most once after poweron
|
|
|
|
if (!lfs->deorphaned) {
|
|
|
|
int err = lfs_deorphan(lfs);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-29 06:11:26 +00:00
|
|
|
lfs_mdir_t cwd;
|
2018-05-27 15:15:28 +00:00
|
|
|
int err = lfs_dir_fetch(lfs, &cwd, lfs->root);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-05-29 06:21:55 +00:00
|
|
|
uint16_t id;
|
2018-07-09 17:51:31 +00:00
|
|
|
uint8_t type;
|
|
|
|
err = lfs_dir_find(lfs, &cwd, &path, &id, &type);
|
2018-05-27 15:15:28 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-07-02 03:29:42 +00:00
|
|
|
lfs_mdir_t dir;
|
2018-07-09 17:51:31 +00:00
|
|
|
if (type == LFS_TYPE_DIR) {
|
2018-05-27 15:15:28 +00:00
|
|
|
// must be empty before removal
|
2018-07-09 17:51:31 +00:00
|
|
|
lfs_mattr_t attr;
|
2018-07-09 19:13:31 +00:00
|
|
|
err = lfs_dir_getentry(lfs, &cwd, 0x7c3ff000,
|
|
|
|
lfs_mktag(LFS_TYPE_STRUCT, id, 0), &attr);
|
2018-07-09 17:51:31 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-05-29 05:50:47 +00:00
|
|
|
err = lfs_dir_fetch(lfs, &dir, attr.u.pair);
|
2018-05-27 15:15:28 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-07-02 03:29:42 +00:00
|
|
|
// TODO lfs_dir_empty?
|
2018-05-27 15:15:28 +00:00
|
|
|
if (dir.count > 0 || dir.split) {
|
|
|
|
return LFS_ERR_NOTEMPTY;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// delete the entry
|
|
|
|
err = lfs_dir_delete(lfs, &cwd, id);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-07-09 17:51:31 +00:00
|
|
|
if (type == LFS_TYPE_DIR) {
|
2018-07-02 03:29:42 +00:00
|
|
|
int res = lfs_pred(lfs, dir.pair, &cwd);
|
|
|
|
if (res < 0) {
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
LFS_ASSERT(res); // must have pred
|
|
|
|
cwd.tail[0] = dir.tail[0];
|
|
|
|
cwd.tail[1] = dir.tail[1];
|
|
|
|
err = lfs_dir_commit(lfs, &cwd, &(lfs_mattrlist_t){
|
|
|
|
{lfs_mktag(LFS_TYPE_SOFTTAIL, 0x3ff, sizeof(cwd.tail)),
|
|
|
|
.u.buffer=cwd.tail}});
|
|
|
|
}
|
|
|
|
|
2018-05-27 15:15:28 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int lfs_rename(lfs_t *lfs, const char *oldpath, const char *newpath) {
|
|
|
|
// deorphan if we haven't yet, needed at most once after poweron
|
|
|
|
if (!lfs->deorphaned) {
|
|
|
|
int err = lfs_deorphan(lfs);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// find old entry
|
2018-05-29 06:11:26 +00:00
|
|
|
lfs_mdir_t oldcwd;
|
2018-05-29 06:21:55 +00:00
|
|
|
uint16_t oldid;
|
2018-07-09 17:51:31 +00:00
|
|
|
uint8_t oldtype;
|
|
|
|
int err = lfs_dir_find(lfs, &oldcwd, &oldpath, &oldid, &oldtype);
|
2018-05-27 15:15:28 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
// find new entry
|
2018-05-29 06:11:26 +00:00
|
|
|
lfs_mdir_t newcwd;
|
2018-05-29 06:21:55 +00:00
|
|
|
uint16_t newid;
|
2018-07-09 17:51:31 +00:00
|
|
|
uint8_t prevtype;
|
|
|
|
err = lfs_dir_find(lfs, &newcwd, &newpath, &newid, &prevtype);
|
2018-05-27 15:15:28 +00:00
|
|
|
if (err && err != LFS_ERR_NOENT) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool prevexists = (err != LFS_ERR_NOENT);
|
2018-07-02 03:29:42 +00:00
|
|
|
//bool samepair = (lfs_paircmp(oldcwd.pair, newcwd.pair) == 0);
|
|
|
|
|
|
|
|
lfs_mdir_t prevdir;
|
2018-05-27 15:15:28 +00:00
|
|
|
if (prevexists) {
|
2018-07-09 17:51:31 +00:00
|
|
|
// check that we have same type
|
|
|
|
if (prevtype != oldtype) {
|
2018-05-27 15:15:28 +00:00
|
|
|
return LFS_ERR_ISDIR;
|
|
|
|
}
|
|
|
|
|
2018-07-09 17:51:31 +00:00
|
|
|
if (prevtype == LFS_TYPE_DIR) {
|
|
|
|
// must be empty before removal
|
|
|
|
lfs_mattr_t prevattr;
|
2018-07-09 19:13:31 +00:00
|
|
|
err = lfs_dir_getentry(lfs, &newcwd, 0x7c3ff000,
|
|
|
|
lfs_mktag(LFS_TYPE_STRUCT, newid, 0), &prevattr);
|
2018-07-09 17:51:31 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-05-27 15:15:28 +00:00
|
|
|
// must be empty before removal
|
2018-05-29 05:50:47 +00:00
|
|
|
err = lfs_dir_fetch(lfs, &prevdir, prevattr.u.pair);
|
2018-05-27 15:15:28 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (prevdir.count > 0 || prevdir.split) {
|
|
|
|
return LFS_ERR_NOTEMPTY;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// check that name fits
|
|
|
|
lfs_size_t nlen = strlen(newpath);
|
|
|
|
if (nlen > lfs->name_size) {
|
|
|
|
return LFS_ERR_NAMETOOLONG;
|
|
|
|
}
|
|
|
|
|
|
|
|
// get next id
|
|
|
|
err = lfs_dir_append(lfs, &newcwd, &newid);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-02 03:29:42 +00:00
|
|
|
// create move to fix later
|
|
|
|
lfs->diff.move.pair[0] = oldcwd.pair[0] ^ lfs->globals.move.pair[0];
|
|
|
|
lfs->diff.move.pair[1] = oldcwd.pair[1] ^ lfs->globals.move.pair[1];
|
|
|
|
lfs->diff.move.id = oldid ^ lfs->globals.move.id;
|
2018-05-27 15:15:28 +00:00
|
|
|
|
2018-07-02 03:29:42 +00:00
|
|
|
// move over all attributes
|
2018-05-29 05:50:47 +00:00
|
|
|
err = lfs_dir_commit(lfs, &newcwd, &(lfs_mattrlist_t){
|
2018-07-09 19:13:31 +00:00
|
|
|
{lfs_mktag(oldtype, newid, strlen(newpath)),
|
2018-05-30 01:08:42 +00:00
|
|
|
.u.buffer=(void*)newpath}, &(lfs_mattrlist_t){
|
2018-07-09 19:51:57 +00:00
|
|
|
{lfs_mktag(LFS_FROM_DIR, newid, oldid),
|
2018-05-30 01:08:42 +00:00
|
|
|
.u.dir=&oldcwd}}});
|
2018-05-27 15:15:28 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-07-02 03:29:42 +00:00
|
|
|
// clean up after ourselves
|
|
|
|
err = lfs_fixmove(lfs);
|
2018-05-27 15:15:28 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-07-09 17:51:31 +00:00
|
|
|
if (prevexists && prevtype == LFS_TYPE_DIR) {
|
2018-07-02 03:29:42 +00:00
|
|
|
int res = lfs_pred(lfs, prevdir.pair, &newcwd);
|
|
|
|
if (res < 0) {
|
|
|
|
return res;
|
2018-05-27 15:15:28 +00:00
|
|
|
}
|
2018-07-02 03:29:42 +00:00
|
|
|
|
2018-07-08 19:21:29 +00:00
|
|
|
// TODO test for global state stealing?
|
|
|
|
// steal global state
|
|
|
|
lfs->globals = lfs_globals_xor(&lfs->globals, &prevdir.globals);
|
|
|
|
|
2018-07-02 03:29:42 +00:00
|
|
|
LFS_ASSERT(res); // must have pred
|
|
|
|
newcwd.tail[0] = prevdir.tail[0];
|
|
|
|
newcwd.tail[1] = prevdir.tail[1];
|
|
|
|
err = lfs_dir_commit(lfs, &newcwd, &(lfs_mattrlist_t){
|
|
|
|
{lfs_mktag(LFS_TYPE_SOFTTAIL, 0x3ff, sizeof(newcwd.tail)),
|
|
|
|
.u.buffer=newcwd.tail}});
|
2018-05-27 15:15:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2018-07-02 03:29:42 +00:00
|
|
|
|
|
|
|
|
|
|
|
// if (samepair) {
|
|
|
|
// // update pair if newcwd == oldcwd
|
|
|
|
// oldcwd = newcwd;
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// err = fix
|
|
|
|
//
|
|
|
|
// // remove old entry
|
|
|
|
// //printf("RENAME DELETE %d %d %d\n", oldcwd.pair[0], oldcwd.pair[1], oldid);
|
|
|
|
// err = lfs_dir_delete(lfs, &oldcwd, oldid);
|
|
|
|
// if (err) {
|
|
|
|
// return err;
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// // if we were a directory, find pred, replace tail
|
|
|
|
// // TODO can this just deorphan?
|
|
|
|
// if (prevexists && lfs_tag_subtype(prevattr.tag) == LFS_TYPE_DIR) {
|
|
|
|
// err = lfs_deorphan(lfs);
|
|
|
|
// if (err) {
|
|
|
|
// return err;
|
|
|
|
// }
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
return 0;
|
2018-05-27 15:15:28 +00:00
|
|
|
}
|
2018-04-06 00:03:58 +00:00
|
|
|
|
2018-05-26 00:04:01 +00:00
|
|
|
//int lfs_getattrs(lfs_t *lfs, const char *path,
|
|
|
|
// const struct lfs_attr *attrs, int count) {
|
2018-05-29 06:11:26 +00:00
|
|
|
// lfs_mdir_t cwd;
|
2018-05-26 00:04:01 +00:00
|
|
|
// int err = lfs_dir_fetch(lfs, &cwd, lfs->root);
|
|
|
|
// if (err) {
|
|
|
|
// return err;
|
|
|
|
// }
|
|
|
|
//
|
2018-05-29 05:50:47 +00:00
|
|
|
// lfs_mattr_t entry;
|
2018-05-26 00:04:01 +00:00
|
|
|
// err = lfs_dir_find(lfs, &cwd, &entry, &path);
|
|
|
|
// if (err) {
|
|
|
|
// return err;
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// return lfs_dir_getattrs(lfs, &cwd, &entry, attrs, count);
|
|
|
|
//}
|
|
|
|
//
|
|
|
|
//int lfs_setattrs(lfs_t *lfs, const char *path,
|
|
|
|
// const struct lfs_attr *attrs, int count) {
|
2018-05-29 06:11:26 +00:00
|
|
|
// lfs_mdir_t cwd;
|
2018-05-26 00:04:01 +00:00
|
|
|
// int err = lfs_dir_fetch(lfs, &cwd, lfs->root);
|
|
|
|
// if (err) {
|
|
|
|
// return err;
|
|
|
|
// }
|
|
|
|
//
|
2018-05-29 05:50:47 +00:00
|
|
|
// lfs_mattr_t entry;
|
2018-05-26 00:04:01 +00:00
|
|
|
// err = lfs_dir_find(lfs, &cwd, &entry, &path);
|
|
|
|
// if (err) {
|
|
|
|
// return err;
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// return lfs_dir_setattrs(lfs, &cwd, &entry, attrs, count);
|
|
|
|
//}
|
2018-04-06 00:03:58 +00:00
|
|
|
|
|
|
|
|
2017-04-24 02:40:03 +00:00
|
|
|
/// Filesystem operations ///
|
2017-04-22 18:30:40 +00:00
|
|
|
static int lfs_init(lfs_t *lfs, const struct lfs_config *cfg) {
|
|
|
|
lfs->cfg = cfg;
|
|
|
|
|
2017-04-22 19:56:12 +00:00
|
|
|
// setup read cache
|
2017-04-30 16:54:27 +00:00
|
|
|
lfs->rcache.block = 0xffffffff;
|
2017-04-22 18:30:40 +00:00
|
|
|
if (lfs->cfg->read_buffer) {
|
|
|
|
lfs->rcache.buffer = lfs->cfg->read_buffer;
|
|
|
|
} else {
|
2018-01-29 21:20:12 +00:00
|
|
|
lfs->rcache.buffer = lfs_malloc(lfs->cfg->read_size);
|
2017-04-22 18:30:40 +00:00
|
|
|
if (!lfs->rcache.buffer) {
|
2017-04-24 03:10:16 +00:00
|
|
|
return LFS_ERR_NOMEM;
|
2017-04-22 18:30:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-22 19:56:12 +00:00
|
|
|
// setup program cache
|
2017-04-30 16:54:27 +00:00
|
|
|
lfs->pcache.block = 0xffffffff;
|
2017-04-22 18:30:40 +00:00
|
|
|
if (lfs->cfg->prog_buffer) {
|
|
|
|
lfs->pcache.buffer = lfs->cfg->prog_buffer;
|
|
|
|
} else {
|
2018-01-29 21:20:12 +00:00
|
|
|
lfs->pcache.buffer = lfs_malloc(lfs->cfg->prog_size);
|
2017-04-22 18:30:40 +00:00
|
|
|
if (!lfs->pcache.buffer) {
|
2017-04-24 03:10:16 +00:00
|
|
|
return LFS_ERR_NOMEM;
|
2017-04-22 18:30:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-19 02:20:33 +00:00
|
|
|
// setup lookahead, round down to nearest 32-bits
|
2018-01-29 21:20:12 +00:00
|
|
|
LFS_ASSERT(lfs->cfg->lookahead % 32 == 0);
|
|
|
|
LFS_ASSERT(lfs->cfg->lookahead > 0);
|
2017-04-22 19:56:12 +00:00
|
|
|
if (lfs->cfg->lookahead_buffer) {
|
2017-09-19 02:20:33 +00:00
|
|
|
lfs->free.buffer = lfs->cfg->lookahead_buffer;
|
2017-04-22 19:56:12 +00:00
|
|
|
} else {
|
2018-01-29 21:20:12 +00:00
|
|
|
lfs->free.buffer = lfs_malloc(lfs->cfg->lookahead/8);
|
2017-09-19 02:20:33 +00:00
|
|
|
if (!lfs->free.buffer) {
|
2017-04-24 03:10:16 +00:00
|
|
|
return LFS_ERR_NOMEM;
|
2017-04-22 19:56:12 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-11 17:56:09 +00:00
|
|
|
// check that program and read sizes are multiples of the block size
|
2018-01-29 21:20:12 +00:00
|
|
|
LFS_ASSERT(lfs->cfg->prog_size % lfs->cfg->read_size == 0);
|
|
|
|
LFS_ASSERT(lfs->cfg->block_size % lfs->cfg->prog_size == 0);
|
2018-01-11 17:56:09 +00:00
|
|
|
|
2017-10-10 23:48:24 +00:00
|
|
|
// check that the block size is large enough to fit ctz pointers
|
2018-01-29 21:20:12 +00:00
|
|
|
LFS_ASSERT(4*lfs_npw2(0xffffffff / (lfs->cfg->block_size-2*4))
|
2017-10-10 23:48:24 +00:00
|
|
|
<= lfs->cfg->block_size);
|
|
|
|
|
Added disk-backed limits on the name/attrs/inline sizes
Being a portable, microcontroller-scale embedded filesystem, littlefs is
presented with a relatively unique challenge. The amount of RAM
available is on completely different scales from machine to machine, and
what is normally a reasonable RAM assumption may break completely on an
embedded system.
A great example of this is file names. On almost every PC these days, the limit
for a file name is 255 bytes. It's a very convenient limit for a number
of reasons. However, on microcontrollers, allocating 255 bytes of RAM to
do a file search can be unreasonable.
The simplest solution (and one that has existing in littlefs for a
while), is to let this limit be redefined to a smaller value on devices
that need to save RAM. However, this presents an interesting portability
issue. If these devices are plugged into a PC with relatively infinite
RAM, nothing stops the PC from writing files with full 255-byte file
names, which can't be read on the small device.
One solution here is to store this limit on the superblock during format
time. When mounting a disk, the filesystem implementation is responsible for
checking this limit in the superblock. If it's larger than what can be
read, raise an error. If it's smaller, respect the limit on the
superblock and raise an error if the user attempts to exceed it.
In this commit, this strategy is adopted for file names, inline files,
and the size of all attributes, since these could impact the memory
consumption of the filesystem. (Recording the attribute's limit is
iffy, but is the only other arbitrary limit and could be used for disabling
support of custom attributes).
Note! This changes makes it very important to configure littlefs
correctly at format time. If littlefs is formatted on a PC without
changing the limits appropriately, it will be rejected by a smaller
device.
2018-04-01 20:36:29 +00:00
|
|
|
// check that the size limits are sane
|
|
|
|
LFS_ASSERT(lfs->cfg->inline_size <= LFS_INLINE_MAX);
|
|
|
|
LFS_ASSERT(lfs->cfg->inline_size <= lfs->cfg->read_size);
|
|
|
|
lfs->inline_size = lfs->cfg->inline_size;
|
|
|
|
if (!lfs->inline_size) {
|
|
|
|
lfs->inline_size = lfs_min(LFS_INLINE_MAX, lfs->cfg->read_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
LFS_ASSERT(lfs->cfg->attrs_size <= LFS_ATTRS_MAX);
|
|
|
|
lfs->attrs_size = lfs->cfg->attrs_size;
|
|
|
|
if (!lfs->attrs_size) {
|
|
|
|
lfs->attrs_size = LFS_ATTRS_MAX;
|
|
|
|
}
|
|
|
|
|
|
|
|
LFS_ASSERT(lfs->cfg->name_size <= LFS_NAME_MAX);
|
|
|
|
lfs->name_size = lfs->cfg->name_size;
|
|
|
|
if (!lfs->name_size) {
|
|
|
|
lfs->name_size = LFS_NAME_MAX;
|
|
|
|
}
|
|
|
|
|
2017-05-14 17:01:45 +00:00
|
|
|
// setup default state
|
|
|
|
lfs->root[0] = 0xffffffff;
|
|
|
|
lfs->root[1] = 0xffffffff;
|
2017-04-29 15:22:01 +00:00
|
|
|
lfs->files = NULL;
|
2017-11-22 02:53:15 +00:00
|
|
|
lfs->dirs = NULL;
|
2017-05-14 17:01:45 +00:00
|
|
|
lfs->deorphaned = false;
|
2018-07-02 03:29:42 +00:00
|
|
|
lfs->globals.move.pair[0] = 0xffffffff;
|
|
|
|
lfs->globals.move.pair[1] = 0xffffffff;
|
|
|
|
lfs->globals.move.id = 0x3ff;
|
|
|
|
lfs->diff = (lfs_globals_t){0};
|
|
|
|
|
|
|
|
// scan for any global updates
|
|
|
|
// TODO rm me? need to grab any inits
|
|
|
|
int err = lfs_scan(lfs);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
2017-04-29 15:22:01 +00:00
|
|
|
|
2017-04-22 18:30:40 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int lfs_deinit(lfs_t *lfs) {
|
2017-05-14 17:01:45 +00:00
|
|
|
// free allocated memory
|
2017-04-22 18:30:40 +00:00
|
|
|
if (!lfs->cfg->read_buffer) {
|
2018-01-29 21:20:12 +00:00
|
|
|
lfs_free(lfs->rcache.buffer);
|
2017-04-22 18:30:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!lfs->cfg->prog_buffer) {
|
2018-01-29 21:20:12 +00:00
|
|
|
lfs_free(lfs->pcache.buffer);
|
2017-04-22 18:30:40 +00:00
|
|
|
}
|
|
|
|
|
2017-04-29 17:50:23 +00:00
|
|
|
if (!lfs->cfg->lookahead_buffer) {
|
2018-01-29 21:20:12 +00:00
|
|
|
lfs_free(lfs->free.buffer);
|
2017-04-29 17:50:23 +00:00
|
|
|
}
|
|
|
|
|
2017-04-22 18:30:40 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int lfs_format(lfs_t *lfs, const struct lfs_config *cfg) {
|
|
|
|
int err = lfs_init(lfs, cfg);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
2017-02-27 00:05:27 +00:00
|
|
|
|
2017-05-14 17:01:45 +00:00
|
|
|
// create free lookahead
|
2017-11-10 01:10:08 +00:00
|
|
|
memset(lfs->free.buffer, 0, lfs->cfg->lookahead/8);
|
2017-04-22 19:56:12 +00:00
|
|
|
lfs->free.off = 0;
|
2018-04-10 20:14:27 +00:00
|
|
|
lfs->free.size = lfs_min(lfs->cfg->lookahead, lfs->cfg->block_count);
|
|
|
|
lfs->free.i = 0;
|
2018-02-08 07:30:21 +00:00
|
|
|
lfs_alloc_ack(lfs);
|
2017-03-20 03:00:56 +00:00
|
|
|
|
2017-05-14 17:01:45 +00:00
|
|
|
// create superblock dir
|
2018-05-29 06:11:26 +00:00
|
|
|
lfs_mdir_t dir;
|
2018-05-26 18:50:06 +00:00
|
|
|
err = lfs_dir_alloc(lfs, &dir, false,
|
2018-05-22 22:43:39 +00:00
|
|
|
(const lfs_block_t[2]){0xffffffff, 0xffffffff});
|
2017-04-18 03:27:06 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-05-14 17:01:45 +00:00
|
|
|
// write root directory
|
2018-05-29 06:11:26 +00:00
|
|
|
lfs_mdir_t root;
|
2018-05-26 18:50:06 +00:00
|
|
|
err = lfs_dir_alloc(lfs, &root, false,
|
2018-05-22 22:43:39 +00:00
|
|
|
(const lfs_block_t[2]){0xffffffff, 0xffffffff});
|
2017-04-18 03:27:06 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-05-29 00:49:20 +00:00
|
|
|
err = lfs_dir_commit(lfs, &root, NULL);
|
2017-03-25 21:20:31 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
2017-02-27 00:05:27 +00:00
|
|
|
}
|
2017-04-18 03:27:06 +00:00
|
|
|
|
2017-03-25 23:11:45 +00:00
|
|
|
lfs->root[0] = root.pair[0];
|
|
|
|
lfs->root[1] = root.pair[1];
|
2018-05-21 05:56:20 +00:00
|
|
|
dir.tail[0] = lfs->root[0];
|
|
|
|
dir.tail[1] = lfs->root[1];
|
2017-03-25 21:20:31 +00:00
|
|
|
|
Added disk-backed limits on the name/attrs/inline sizes
Being a portable, microcontroller-scale embedded filesystem, littlefs is
presented with a relatively unique challenge. The amount of RAM
available is on completely different scales from machine to machine, and
what is normally a reasonable RAM assumption may break completely on an
embedded system.
A great example of this is file names. On almost every PC these days, the limit
for a file name is 255 bytes. It's a very convenient limit for a number
of reasons. However, on microcontrollers, allocating 255 bytes of RAM to
do a file search can be unreasonable.
The simplest solution (and one that has existing in littlefs for a
while), is to let this limit be redefined to a smaller value on devices
that need to save RAM. However, this presents an interesting portability
issue. If these devices are plugged into a PC with relatively infinite
RAM, nothing stops the PC from writing files with full 255-byte file
names, which can't be read on the small device.
One solution here is to store this limit on the superblock during format
time. When mounting a disk, the filesystem implementation is responsible for
checking this limit in the superblock. If it's larger than what can be
read, raise an error. If it's smaller, respect the limit on the
superblock and raise an error if the user attempts to exceed it.
In this commit, this strategy is adopted for file names, inline files,
and the size of all attributes, since these could impact the memory
consumption of the filesystem. (Recording the attribute's limit is
iffy, but is the only other arbitrary limit and could be used for disabling
support of custom attributes).
Note! This changes makes it very important to configure littlefs
correctly at format time. If littlefs is formatted on a PC without
changing the limits appropriately, it will be rejected by a smaller
device.
2018-04-01 20:36:29 +00:00
|
|
|
// write one superblock
|
2018-05-26 18:50:06 +00:00
|
|
|
lfs_superblock_t superblock = {
|
2018-05-21 05:56:20 +00:00
|
|
|
.magic = {"littlefs"},
|
|
|
|
.version = LFS_DISK_VERSION,
|
|
|
|
|
|
|
|
.block_size = lfs->cfg->block_size,
|
|
|
|
.block_count = lfs->cfg->block_count,
|
2018-05-27 15:15:28 +00:00
|
|
|
.inline_size = lfs->inline_size,
|
|
|
|
.attrs_size = lfs->attrs_size,
|
|
|
|
.name_size = lfs->name_size,
|
2018-05-21 05:56:20 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
dir.count += 1;
|
2018-05-29 05:50:47 +00:00
|
|
|
err = lfs_dir_commit(lfs, &dir, &(lfs_mattrlist_t){
|
2018-07-09 17:51:31 +00:00
|
|
|
{lfs_mktag(LFS_TYPE_SUPERBLOCK, 0, sizeof(superblock)),
|
|
|
|
.u.buffer=&superblock}, &(lfs_mattrlist_t){
|
|
|
|
{lfs_mktag(LFS_STRUCT_DIR, 0, sizeof(lfs->root)),
|
|
|
|
.u.buffer=lfs->root}}});
|
2018-03-23 21:11:36 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
2017-02-27 00:05:27 +00:00
|
|
|
}
|
|
|
|
|
2017-04-18 03:27:06 +00:00
|
|
|
// sanity check that fetch works
|
2018-05-26 18:50:06 +00:00
|
|
|
err = lfs_dir_fetch(lfs, &dir, (const lfs_block_t[2]){0, 1});
|
2017-04-22 18:30:40 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return lfs_deinit(lfs);
|
2017-02-27 00:05:27 +00:00
|
|
|
}
|
|
|
|
|
2017-04-22 18:30:40 +00:00
|
|
|
int lfs_mount(lfs_t *lfs, const struct lfs_config *cfg) {
|
|
|
|
int err = lfs_init(lfs, cfg);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
2017-03-13 00:41:08 +00:00
|
|
|
|
2017-04-22 19:56:12 +00:00
|
|
|
// setup free lookahead
|
2018-02-08 07:30:21 +00:00
|
|
|
lfs->free.off = 0;
|
2018-04-10 20:14:27 +00:00
|
|
|
lfs->free.size = 0;
|
|
|
|
lfs->free.i = 0;
|
2018-02-08 07:30:21 +00:00
|
|
|
lfs_alloc_ack(lfs);
|
2017-04-22 19:56:12 +00:00
|
|
|
|
|
|
|
// load superblock
|
2018-05-29 06:11:26 +00:00
|
|
|
lfs_mdir_t dir;
|
2018-05-26 18:50:06 +00:00
|
|
|
err = lfs_dir_fetch(lfs, &dir, (const lfs_block_t[2]){0, 1});
|
Added disk-backed limits on the name/attrs/inline sizes
Being a portable, microcontroller-scale embedded filesystem, littlefs is
presented with a relatively unique challenge. The amount of RAM
available is on completely different scales from machine to machine, and
what is normally a reasonable RAM assumption may break completely on an
embedded system.
A great example of this is file names. On almost every PC these days, the limit
for a file name is 255 bytes. It's a very convenient limit for a number
of reasons. However, on microcontrollers, allocating 255 bytes of RAM to
do a file search can be unreasonable.
The simplest solution (and one that has existing in littlefs for a
while), is to let this limit be redefined to a smaller value on devices
that need to save RAM. However, this presents an interesting portability
issue. If these devices are plugged into a PC with relatively infinite
RAM, nothing stops the PC from writing files with full 255-byte file
names, which can't be read on the small device.
One solution here is to store this limit on the superblock during format
time. When mounting a disk, the filesystem implementation is responsible for
checking this limit in the superblock. If it's larger than what can be
read, raise an error. If it's smaller, respect the limit on the
superblock and raise an error if the user attempts to exceed it.
In this commit, this strategy is adopted for file names, inline files,
and the size of all attributes, since these could impact the memory
consumption of the filesystem. (Recording the attribute's limit is
iffy, but is the only other arbitrary limit and could be used for disabling
support of custom attributes).
Note! This changes makes it very important to configure littlefs
correctly at format time. If littlefs is formatted on a PC without
changing the limits appropriately, it will be rejected by a smaller
device.
2018-04-01 20:36:29 +00:00
|
|
|
if (err) {
|
|
|
|
if (err == LFS_ERR_CORRUPT) {
|
|
|
|
LFS_ERROR("Invalid superblock at %d %d", 0, 1);
|
|
|
|
}
|
2017-10-07 21:56:00 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-05-26 18:50:06 +00:00
|
|
|
lfs_superblock_t superblock;
|
2018-07-09 17:51:31 +00:00
|
|
|
err = lfs_dir_getbuffer(lfs, &dir, 0x7ffff000, &(lfs_mattr_t){
|
2018-05-30 01:08:42 +00:00
|
|
|
lfs_mktag(LFS_TYPE_SUPERBLOCK, 0, sizeof(superblock)),
|
2018-05-26 18:50:06 +00:00
|
|
|
.u.buffer=&superblock});
|
2018-05-21 05:56:20 +00:00
|
|
|
if (err && err != LFS_ERR_RANGE) {
|
Added disk-backed limits on the name/attrs/inline sizes
Being a portable, microcontroller-scale embedded filesystem, littlefs is
presented with a relatively unique challenge. The amount of RAM
available is on completely different scales from machine to machine, and
what is normally a reasonable RAM assumption may break completely on an
embedded system.
A great example of this is file names. On almost every PC these days, the limit
for a file name is 255 bytes. It's a very convenient limit for a number
of reasons. However, on microcontrollers, allocating 255 bytes of RAM to
do a file search can be unreasonable.
The simplest solution (and one that has existing in littlefs for a
while), is to let this limit be redefined to a smaller value on devices
that need to save RAM. However, this presents an interesting portability
issue. If these devices are plugged into a PC with relatively infinite
RAM, nothing stops the PC from writing files with full 255-byte file
names, which can't be read on the small device.
One solution here is to store this limit on the superblock during format
time. When mounting a disk, the filesystem implementation is responsible for
checking this limit in the superblock. If it's larger than what can be
read, raise an error. If it's smaller, respect the limit on the
superblock and raise an error if the user attempts to exceed it.
In this commit, this strategy is adopted for file names, inline files,
and the size of all attributes, since these could impact the memory
consumption of the filesystem. (Recording the attribute's limit is
iffy, but is the only other arbitrary limit and could be used for disabling
support of custom attributes).
Note! This changes makes it very important to configure littlefs
correctly at format time. If littlefs is formatted on a PC without
changing the limits appropriately, it will be rejected by a smaller
device.
2018-04-01 20:36:29 +00:00
|
|
|
return err;
|
|
|
|
}
|
2018-03-23 21:11:36 +00:00
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
if (memcmp(superblock.magic, "littlefs", 8) != 0) {
|
Added disk-backed limits on the name/attrs/inline sizes
Being a portable, microcontroller-scale embedded filesystem, littlefs is
presented with a relatively unique challenge. The amount of RAM
available is on completely different scales from machine to machine, and
what is normally a reasonable RAM assumption may break completely on an
embedded system.
A great example of this is file names. On almost every PC these days, the limit
for a file name is 255 bytes. It's a very convenient limit for a number
of reasons. However, on microcontrollers, allocating 255 bytes of RAM to
do a file search can be unreasonable.
The simplest solution (and one that has existing in littlefs for a
while), is to let this limit be redefined to a smaller value on devices
that need to save RAM. However, this presents an interesting portability
issue. If these devices are plugged into a PC with relatively infinite
RAM, nothing stops the PC from writing files with full 255-byte file
names, which can't be read on the small device.
One solution here is to store this limit on the superblock during format
time. When mounting a disk, the filesystem implementation is responsible for
checking this limit in the superblock. If it's larger than what can be
read, raise an error. If it's smaller, respect the limit on the
superblock and raise an error if the user attempts to exceed it.
In this commit, this strategy is adopted for file names, inline files,
and the size of all attributes, since these could impact the memory
consumption of the filesystem. (Recording the attribute's limit is
iffy, but is the only other arbitrary limit and could be used for disabling
support of custom attributes).
Note! This changes makes it very important to configure littlefs
correctly at format time. If littlefs is formatted on a PC without
changing the limits appropriately, it will be rejected by a smaller
device.
2018-04-01 20:36:29 +00:00
|
|
|
LFS_ERROR("Invalid superblock at %d %d", 0, 1);
|
2017-04-24 03:10:16 +00:00
|
|
|
return LFS_ERR_CORRUPT;
|
2017-03-13 00:41:08 +00:00
|
|
|
}
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
uint16_t major_version = (0xffff & (superblock.version >> 16));
|
|
|
|
uint16_t minor_version = (0xffff & (superblock.version >> 0));
|
2018-01-26 20:26:25 +00:00
|
|
|
if ((major_version != LFS_DISK_VERSION_MAJOR ||
|
|
|
|
minor_version > LFS_DISK_VERSION_MINOR)) {
|
|
|
|
LFS_ERROR("Invalid version %d.%d", major_version, minor_version);
|
2017-04-24 03:10:16 +00:00
|
|
|
return LFS_ERR_INVAL;
|
2017-04-18 03:27:06 +00:00
|
|
|
}
|
|
|
|
|
2018-07-09 17:51:31 +00:00
|
|
|
err = lfs_dir_getbuffer(lfs, &dir, 0x7ffff000, &(lfs_mattr_t){
|
|
|
|
lfs_mktag(LFS_STRUCT_DIR, 0, sizeof(lfs->root)),
|
|
|
|
.u.buffer=lfs->root});
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
if (superblock.inline_size) {
|
|
|
|
if (superblock.inline_size > lfs->inline_size) {
|
Added disk-backed limits on the name/attrs/inline sizes
Being a portable, microcontroller-scale embedded filesystem, littlefs is
presented with a relatively unique challenge. The amount of RAM
available is on completely different scales from machine to machine, and
what is normally a reasonable RAM assumption may break completely on an
embedded system.
A great example of this is file names. On almost every PC these days, the limit
for a file name is 255 bytes. It's a very convenient limit for a number
of reasons. However, on microcontrollers, allocating 255 bytes of RAM to
do a file search can be unreasonable.
The simplest solution (and one that has existing in littlefs for a
while), is to let this limit be redefined to a smaller value on devices
that need to save RAM. However, this presents an interesting portability
issue. If these devices are plugged into a PC with relatively infinite
RAM, nothing stops the PC from writing files with full 255-byte file
names, which can't be read on the small device.
One solution here is to store this limit on the superblock during format
time. When mounting a disk, the filesystem implementation is responsible for
checking this limit in the superblock. If it's larger than what can be
read, raise an error. If it's smaller, respect the limit on the
superblock and raise an error if the user attempts to exceed it.
In this commit, this strategy is adopted for file names, inline files,
and the size of all attributes, since these could impact the memory
consumption of the filesystem. (Recording the attribute's limit is
iffy, but is the only other arbitrary limit and could be used for disabling
support of custom attributes).
Note! This changes makes it very important to configure littlefs
correctly at format time. If littlefs is formatted on a PC without
changing the limits appropriately, it will be rejected by a smaller
device.
2018-04-01 20:36:29 +00:00
|
|
|
LFS_ERROR("Unsupported inline size (%d > %d)",
|
2018-05-21 05:56:20 +00:00
|
|
|
superblock.inline_size, lfs->inline_size);
|
Added disk-backed limits on the name/attrs/inline sizes
Being a portable, microcontroller-scale embedded filesystem, littlefs is
presented with a relatively unique challenge. The amount of RAM
available is on completely different scales from machine to machine, and
what is normally a reasonable RAM assumption may break completely on an
embedded system.
A great example of this is file names. On almost every PC these days, the limit
for a file name is 255 bytes. It's a very convenient limit for a number
of reasons. However, on microcontrollers, allocating 255 bytes of RAM to
do a file search can be unreasonable.
The simplest solution (and one that has existing in littlefs for a
while), is to let this limit be redefined to a smaller value on devices
that need to save RAM. However, this presents an interesting portability
issue. If these devices are plugged into a PC with relatively infinite
RAM, nothing stops the PC from writing files with full 255-byte file
names, which can't be read on the small device.
One solution here is to store this limit on the superblock during format
time. When mounting a disk, the filesystem implementation is responsible for
checking this limit in the superblock. If it's larger than what can be
read, raise an error. If it's smaller, respect the limit on the
superblock and raise an error if the user attempts to exceed it.
In this commit, this strategy is adopted for file names, inline files,
and the size of all attributes, since these could impact the memory
consumption of the filesystem. (Recording the attribute's limit is
iffy, but is the only other arbitrary limit and could be used for disabling
support of custom attributes).
Note! This changes makes it very important to configure littlefs
correctly at format time. If littlefs is formatted on a PC without
changing the limits appropriately, it will be rejected by a smaller
device.
2018-04-01 20:36:29 +00:00
|
|
|
return LFS_ERR_INVAL;
|
|
|
|
}
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
lfs->inline_size = superblock.inline_size;
|
Added disk-backed limits on the name/attrs/inline sizes
Being a portable, microcontroller-scale embedded filesystem, littlefs is
presented with a relatively unique challenge. The amount of RAM
available is on completely different scales from machine to machine, and
what is normally a reasonable RAM assumption may break completely on an
embedded system.
A great example of this is file names. On almost every PC these days, the limit
for a file name is 255 bytes. It's a very convenient limit for a number
of reasons. However, on microcontrollers, allocating 255 bytes of RAM to
do a file search can be unreasonable.
The simplest solution (and one that has existing in littlefs for a
while), is to let this limit be redefined to a smaller value on devices
that need to save RAM. However, this presents an interesting portability
issue. If these devices are plugged into a PC with relatively infinite
RAM, nothing stops the PC from writing files with full 255-byte file
names, which can't be read on the small device.
One solution here is to store this limit on the superblock during format
time. When mounting a disk, the filesystem implementation is responsible for
checking this limit in the superblock. If it's larger than what can be
read, raise an error. If it's smaller, respect the limit on the
superblock and raise an error if the user attempts to exceed it.
In this commit, this strategy is adopted for file names, inline files,
and the size of all attributes, since these could impact the memory
consumption of the filesystem. (Recording the attribute's limit is
iffy, but is the only other arbitrary limit and could be used for disabling
support of custom attributes).
Note! This changes makes it very important to configure littlefs
correctly at format time. If littlefs is formatted on a PC without
changing the limits appropriately, it will be rejected by a smaller
device.
2018-04-01 20:36:29 +00:00
|
|
|
}
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
if (superblock.attrs_size) {
|
|
|
|
if (superblock.attrs_size > lfs->attrs_size) {
|
Added disk-backed limits on the name/attrs/inline sizes
Being a portable, microcontroller-scale embedded filesystem, littlefs is
presented with a relatively unique challenge. The amount of RAM
available is on completely different scales from machine to machine, and
what is normally a reasonable RAM assumption may break completely on an
embedded system.
A great example of this is file names. On almost every PC these days, the limit
for a file name is 255 bytes. It's a very convenient limit for a number
of reasons. However, on microcontrollers, allocating 255 bytes of RAM to
do a file search can be unreasonable.
The simplest solution (and one that has existing in littlefs for a
while), is to let this limit be redefined to a smaller value on devices
that need to save RAM. However, this presents an interesting portability
issue. If these devices are plugged into a PC with relatively infinite
RAM, nothing stops the PC from writing files with full 255-byte file
names, which can't be read on the small device.
One solution here is to store this limit on the superblock during format
time. When mounting a disk, the filesystem implementation is responsible for
checking this limit in the superblock. If it's larger than what can be
read, raise an error. If it's smaller, respect the limit on the
superblock and raise an error if the user attempts to exceed it.
In this commit, this strategy is adopted for file names, inline files,
and the size of all attributes, since these could impact the memory
consumption of the filesystem. (Recording the attribute's limit is
iffy, but is the only other arbitrary limit and could be used for disabling
support of custom attributes).
Note! This changes makes it very important to configure littlefs
correctly at format time. If littlefs is formatted on a PC without
changing the limits appropriately, it will be rejected by a smaller
device.
2018-04-01 20:36:29 +00:00
|
|
|
LFS_ERROR("Unsupported attrs size (%d > %d)",
|
2018-05-21 05:56:20 +00:00
|
|
|
superblock.attrs_size, lfs->attrs_size);
|
Added disk-backed limits on the name/attrs/inline sizes
Being a portable, microcontroller-scale embedded filesystem, littlefs is
presented with a relatively unique challenge. The amount of RAM
available is on completely different scales from machine to machine, and
what is normally a reasonable RAM assumption may break completely on an
embedded system.
A great example of this is file names. On almost every PC these days, the limit
for a file name is 255 bytes. It's a very convenient limit for a number
of reasons. However, on microcontrollers, allocating 255 bytes of RAM to
do a file search can be unreasonable.
The simplest solution (and one that has existing in littlefs for a
while), is to let this limit be redefined to a smaller value on devices
that need to save RAM. However, this presents an interesting portability
issue. If these devices are plugged into a PC with relatively infinite
RAM, nothing stops the PC from writing files with full 255-byte file
names, which can't be read on the small device.
One solution here is to store this limit on the superblock during format
time. When mounting a disk, the filesystem implementation is responsible for
checking this limit in the superblock. If it's larger than what can be
read, raise an error. If it's smaller, respect the limit on the
superblock and raise an error if the user attempts to exceed it.
In this commit, this strategy is adopted for file names, inline files,
and the size of all attributes, since these could impact the memory
consumption of the filesystem. (Recording the attribute's limit is
iffy, but is the only other arbitrary limit and could be used for disabling
support of custom attributes).
Note! This changes makes it very important to configure littlefs
correctly at format time. If littlefs is formatted on a PC without
changing the limits appropriately, it will be rejected by a smaller
device.
2018-04-01 20:36:29 +00:00
|
|
|
return LFS_ERR_INVAL;
|
|
|
|
}
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
lfs->attrs_size = superblock.attrs_size;
|
Added disk-backed limits on the name/attrs/inline sizes
Being a portable, microcontroller-scale embedded filesystem, littlefs is
presented with a relatively unique challenge. The amount of RAM
available is on completely different scales from machine to machine, and
what is normally a reasonable RAM assumption may break completely on an
embedded system.
A great example of this is file names. On almost every PC these days, the limit
for a file name is 255 bytes. It's a very convenient limit for a number
of reasons. However, on microcontrollers, allocating 255 bytes of RAM to
do a file search can be unreasonable.
The simplest solution (and one that has existing in littlefs for a
while), is to let this limit be redefined to a smaller value on devices
that need to save RAM. However, this presents an interesting portability
issue. If these devices are plugged into a PC with relatively infinite
RAM, nothing stops the PC from writing files with full 255-byte file
names, which can't be read on the small device.
One solution here is to store this limit on the superblock during format
time. When mounting a disk, the filesystem implementation is responsible for
checking this limit in the superblock. If it's larger than what can be
read, raise an error. If it's smaller, respect the limit on the
superblock and raise an error if the user attempts to exceed it.
In this commit, this strategy is adopted for file names, inline files,
and the size of all attributes, since these could impact the memory
consumption of the filesystem. (Recording the attribute's limit is
iffy, but is the only other arbitrary limit and could be used for disabling
support of custom attributes).
Note! This changes makes it very important to configure littlefs
correctly at format time. If littlefs is formatted on a PC without
changing the limits appropriately, it will be rejected by a smaller
device.
2018-04-01 20:36:29 +00:00
|
|
|
}
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
if (superblock.name_size) {
|
|
|
|
if (superblock.name_size > lfs->name_size) {
|
Added disk-backed limits on the name/attrs/inline sizes
Being a portable, microcontroller-scale embedded filesystem, littlefs is
presented with a relatively unique challenge. The amount of RAM
available is on completely different scales from machine to machine, and
what is normally a reasonable RAM assumption may break completely on an
embedded system.
A great example of this is file names. On almost every PC these days, the limit
for a file name is 255 bytes. It's a very convenient limit for a number
of reasons. However, on microcontrollers, allocating 255 bytes of RAM to
do a file search can be unreasonable.
The simplest solution (and one that has existing in littlefs for a
while), is to let this limit be redefined to a smaller value on devices
that need to save RAM. However, this presents an interesting portability
issue. If these devices are plugged into a PC with relatively infinite
RAM, nothing stops the PC from writing files with full 255-byte file
names, which can't be read on the small device.
One solution here is to store this limit on the superblock during format
time. When mounting a disk, the filesystem implementation is responsible for
checking this limit in the superblock. If it's larger than what can be
read, raise an error. If it's smaller, respect the limit on the
superblock and raise an error if the user attempts to exceed it.
In this commit, this strategy is adopted for file names, inline files,
and the size of all attributes, since these could impact the memory
consumption of the filesystem. (Recording the attribute's limit is
iffy, but is the only other arbitrary limit and could be used for disabling
support of custom attributes).
Note! This changes makes it very important to configure littlefs
correctly at format time. If littlefs is formatted on a PC without
changing the limits appropriately, it will be rejected by a smaller
device.
2018-04-01 20:36:29 +00:00
|
|
|
LFS_ERROR("Unsupported name size (%d > %d)",
|
2018-05-21 05:56:20 +00:00
|
|
|
superblock.name_size, lfs->name_size);
|
Added disk-backed limits on the name/attrs/inline sizes
Being a portable, microcontroller-scale embedded filesystem, littlefs is
presented with a relatively unique challenge. The amount of RAM
available is on completely different scales from machine to machine, and
what is normally a reasonable RAM assumption may break completely on an
embedded system.
A great example of this is file names. On almost every PC these days, the limit
for a file name is 255 bytes. It's a very convenient limit for a number
of reasons. However, on microcontrollers, allocating 255 bytes of RAM to
do a file search can be unreasonable.
The simplest solution (and one that has existing in littlefs for a
while), is to let this limit be redefined to a smaller value on devices
that need to save RAM. However, this presents an interesting portability
issue. If these devices are plugged into a PC with relatively infinite
RAM, nothing stops the PC from writing files with full 255-byte file
names, which can't be read on the small device.
One solution here is to store this limit on the superblock during format
time. When mounting a disk, the filesystem implementation is responsible for
checking this limit in the superblock. If it's larger than what can be
read, raise an error. If it's smaller, respect the limit on the
superblock and raise an error if the user attempts to exceed it.
In this commit, this strategy is adopted for file names, inline files,
and the size of all attributes, since these could impact the memory
consumption of the filesystem. (Recording the attribute's limit is
iffy, but is the only other arbitrary limit and could be used for disabling
support of custom attributes).
Note! This changes makes it very important to configure littlefs
correctly at format time. If littlefs is formatted on a PC without
changing the limits appropriately, it will be rejected by a smaller
device.
2018-04-01 20:36:29 +00:00
|
|
|
return LFS_ERR_INVAL;
|
|
|
|
}
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
lfs->name_size = superblock.name_size;
|
Added disk-backed limits on the name/attrs/inline sizes
Being a portable, microcontroller-scale embedded filesystem, littlefs is
presented with a relatively unique challenge. The amount of RAM
available is on completely different scales from machine to machine, and
what is normally a reasonable RAM assumption may break completely on an
embedded system.
A great example of this is file names. On almost every PC these days, the limit
for a file name is 255 bytes. It's a very convenient limit for a number
of reasons. However, on microcontrollers, allocating 255 bytes of RAM to
do a file search can be unreasonable.
The simplest solution (and one that has existing in littlefs for a
while), is to let this limit be redefined to a smaller value on devices
that need to save RAM. However, this presents an interesting portability
issue. If these devices are plugged into a PC with relatively infinite
RAM, nothing stops the PC from writing files with full 255-byte file
names, which can't be read on the small device.
One solution here is to store this limit on the superblock during format
time. When mounting a disk, the filesystem implementation is responsible for
checking this limit in the superblock. If it's larger than what can be
read, raise an error. If it's smaller, respect the limit on the
superblock and raise an error if the user attempts to exceed it.
In this commit, this strategy is adopted for file names, inline files,
and the size of all attributes, since these could impact the memory
consumption of the filesystem. (Recording the attribute's limit is
iffy, but is the only other arbitrary limit and could be used for disabling
support of custom attributes).
Note! This changes makes it very important to configure littlefs
correctly at format time. If littlefs is formatted on a PC without
changing the limits appropriately, it will be rejected by a smaller
device.
2018-04-01 20:36:29 +00:00
|
|
|
}
|
|
|
|
|
2018-07-02 03:29:42 +00:00
|
|
|
err = lfs_scan(lfs);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-10-07 21:56:00 +00:00
|
|
|
return 0;
|
2017-04-01 15:44:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int lfs_unmount(lfs_t *lfs) {
|
2017-04-22 18:30:40 +00:00
|
|
|
return lfs_deinit(lfs);
|
2017-04-01 15:44:17 +00:00
|
|
|
}
|
|
|
|
|
2017-04-24 02:40:03 +00:00
|
|
|
|
2018-04-08 21:58:12 +00:00
|
|
|
/// Internal filesystem filesystem operations ///
|
2018-05-26 18:50:06 +00:00
|
|
|
int lfs_fs_traverse(lfs_t *lfs,
|
2018-05-21 05:56:20 +00:00
|
|
|
int (*cb)(lfs_t *lfs, void *data, lfs_block_t block), void *data) {
|
|
|
|
if (lfs_pairisnull(lfs->root)) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// iterate over metadata pairs
|
2018-05-29 06:11:26 +00:00
|
|
|
lfs_mdir_t dir = {.tail = {0, 1}};
|
2018-05-21 05:56:20 +00:00
|
|
|
while (!lfs_pairisnull(dir.tail)) {
|
|
|
|
for (int i = 0; i < 2; i++) {
|
|
|
|
int err = cb(lfs, data, dir.tail[i]);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// iterate through ids in directory
|
2018-05-26 18:50:06 +00:00
|
|
|
int err = lfs_dir_fetch(lfs, &dir, dir.tail);
|
2018-05-21 05:56:20 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-05-26 18:50:06 +00:00
|
|
|
for (uint16_t id = 0; id < dir.count; id++) {
|
2018-05-29 05:50:47 +00:00
|
|
|
lfs_mattr_t attr;
|
2018-07-09 19:13:31 +00:00
|
|
|
int err = lfs_dir_getentry(lfs, &dir, 0x7c3ff000,
|
|
|
|
lfs_mktag(LFS_TYPE_STRUCT, id, 0), &attr);
|
2018-05-21 05:56:20 +00:00
|
|
|
if (err) {
|
|
|
|
if (err == LFS_ERR_NOENT) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-07-09 19:13:31 +00:00
|
|
|
if (lfs_tag_type(attr.tag) == LFS_STRUCT_CTZ) {
|
2018-05-26 18:50:06 +00:00
|
|
|
err = lfs_ctz_traverse(lfs, &lfs->rcache, NULL,
|
2018-05-29 05:50:47 +00:00
|
|
|
attr.u.ctz.head, attr.u.ctz.size, cb, data);
|
2018-05-26 18:50:06 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
2018-05-21 05:56:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// iterate over any open files
|
|
|
|
for (lfs_file_t *f = lfs->files; f; f = f->next) {
|
|
|
|
if ((f->flags & LFS_F_DIRTY) && !(f->flags & LFS_F_INLINE)) {
|
2018-05-26 18:50:06 +00:00
|
|
|
int err = lfs_ctz_traverse(lfs, &lfs->rcache, &f->cache,
|
|
|
|
f->head, f->size, cb, data);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
2018-05-21 05:56:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if ((f->flags & LFS_F_WRITING) && !(f->flags & LFS_F_INLINE)) {
|
2018-05-26 18:50:06 +00:00
|
|
|
int err = lfs_ctz_traverse(lfs, &lfs->rcache, &f->cache,
|
|
|
|
f->block, f->pos, cb, data);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
2018-05-21 05:56:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2018-05-26 18:50:06 +00:00
|
|
|
/*
|
|
|
|
int lfs_fs_traverse(lfs_t *lfs, int (*cb)(void*, lfs_block_t), void *data) {
|
2017-05-14 17:01:45 +00:00
|
|
|
if (lfs_pairisnull(lfs->root)) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-04-01 15:44:17 +00:00
|
|
|
// iterate over metadata pairs
|
|
|
|
lfs_block_t cwd[2] = {0, 1};
|
|
|
|
|
|
|
|
while (true) {
|
|
|
|
for (int i = 0; i < 2; i++) {
|
|
|
|
int err = cb(data, cwd[i]);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-29 06:11:26 +00:00
|
|
|
lfs_mdir_t dir;
|
2017-04-01 15:44:17 +00:00
|
|
|
int err = lfs_dir_fetch(lfs, &dir, cwd);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-04-01 17:23:15 +00:00
|
|
|
// iterate over contents
|
2018-05-29 05:50:47 +00:00
|
|
|
lfs_mattr_t entry;
|
2017-06-24 01:03:44 +00:00
|
|
|
while (dir.off + sizeof(entry.d) <= (0x7fffffff & dir.d.size)-4) {
|
2018-03-23 23:35:55 +00:00
|
|
|
err = lfs_dir_get(lfs, &dir,
|
|
|
|
dir.off, &entry.d, sizeof(entry.d));
|
2018-02-02 11:58:43 +00:00
|
|
|
lfs_entry_fromle32(&entry.d);
|
2017-04-01 15:44:17 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-04-03 13:28:09 +00:00
|
|
|
dir.off += lfs_entry_size(&entry);
|
2018-03-03 16:26:06 +00:00
|
|
|
if ((0x70 & entry.d.type) == LFS_STRUCT_CTZ) {
|
2018-01-29 19:53:28 +00:00
|
|
|
err = lfs_ctz_traverse(lfs, &lfs->rcache, NULL,
|
2017-04-29 15:22:01 +00:00
|
|
|
entry.d.u.file.head, entry.d.u.file.size, cb, data);
|
2017-04-24 04:39:50 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
2017-04-01 15:44:17 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
cwd[0] = dir.d.tail[0];
|
|
|
|
cwd[1] = dir.d.tail[1];
|
|
|
|
|
2017-04-29 15:22:01 +00:00
|
|
|
if (lfs_pairisnull(cwd)) {
|
|
|
|
break;
|
2017-04-01 15:44:17 +00:00
|
|
|
}
|
|
|
|
}
|
2017-04-29 15:22:01 +00:00
|
|
|
|
|
|
|
// iterate over any open files
|
|
|
|
for (lfs_file_t *f = lfs->files; f; f = f->next) {
|
2018-03-19 01:36:48 +00:00
|
|
|
if ((f->flags & LFS_F_DIRTY) && !(f->flags & LFS_F_INLINE)) {
|
2017-10-18 05:41:43 +00:00
|
|
|
int err = lfs_ctz_traverse(lfs, &lfs->rcache, &f->cache,
|
2017-04-30 16:19:37 +00:00
|
|
|
f->head, f->size, cb, data);
|
2017-04-29 15:22:01 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-19 01:36:48 +00:00
|
|
|
if ((f->flags & LFS_F_WRITING) && !(f->flags & LFS_F_INLINE)) {
|
2017-10-18 05:41:43 +00:00
|
|
|
int err = lfs_ctz_traverse(lfs, &lfs->rcache, &f->cache,
|
2017-04-30 16:19:37 +00:00
|
|
|
f->block, f->pos, cb, data);
|
2017-04-29 15:22:01 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-02-02 11:58:43 +00:00
|
|
|
|
2017-04-29 15:22:01 +00:00
|
|
|
return 0;
|
2017-04-01 15:44:17 +00:00
|
|
|
}
|
2018-05-26 18:50:06 +00:00
|
|
|
*/
|
2018-05-29 06:11:26 +00:00
|
|
|
static int lfs_pred(lfs_t *lfs, const lfs_block_t pair[2], lfs_mdir_t *pdir) {
|
2018-05-26 18:50:06 +00:00
|
|
|
// iterate over all directory directory entries
|
2018-05-21 05:56:20 +00:00
|
|
|
pdir->tail[0] = 0;
|
|
|
|
pdir->tail[1] = 1;
|
|
|
|
while (!lfs_pairisnull(pdir->tail)) {
|
|
|
|
if (lfs_paircmp(pdir->tail, pair) == 0) {
|
2018-05-28 14:17:44 +00:00
|
|
|
return true; // TODO should we return true only if pred is part of dir?
|
2018-05-21 05:56:20 +00:00
|
|
|
}
|
|
|
|
|
2018-05-26 18:50:06 +00:00
|
|
|
int err = lfs_dir_fetch(lfs, pdir, pdir->tail);
|
2018-05-21 05:56:20 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
2018-05-26 18:50:06 +00:00
|
|
|
/*
|
2018-05-29 06:11:26 +00:00
|
|
|
static int lfs_pred(lfs_t *lfs, const lfs_block_t dir[2], lfs_mdir_t *pdir) {
|
2017-05-14 17:01:45 +00:00
|
|
|
if (lfs_pairisnull(lfs->root)) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
// iterate directories
|
2017-05-14 17:01:45 +00:00
|
|
|
int err = lfs_dir_fetch(lfs, pdir, (const lfs_block_t[2]){0, 1});
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
2017-04-01 17:23:15 +00:00
|
|
|
|
2017-05-14 17:01:45 +00:00
|
|
|
while (!lfs_pairisnull(pdir->d.tail)) {
|
|
|
|
if (lfs_paircmp(pdir->d.tail, dir) == 0) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-01-29 19:53:28 +00:00
|
|
|
err = lfs_dir_fetch(lfs, pdir, pdir->d.tail);
|
2017-05-14 17:01:45 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
2018-05-26 18:50:06 +00:00
|
|
|
*/
|
Introduced xored-globals logic to fix fundamental problem with moves
This was a big roadblock for a while: with the new feature of inlined
files, the existing move logic was fundamentally flawed.
To pull off atomic moves between two different metadata-pairs, littlefs
uses a simple, if a bit clumsy trick.
1. Marks entry as "moving"
2. Copies entry to new metadata-pair
3. Deletes old entry
If power is lost before the move operation is completed, we will find the
"moving" tag. This means there may or may not be an incomplete move on
the filesystem. In this case, we simply search for the moved entry, if
we find it, we remove the old entry, otherwise we just remove the
"moving" tag.
This worked perfectly, until we introduced inlined files. See, unlike
the existing directory and ctz entries, inlined files have no guarantee
they are unique. There is nothing we can search for that will allow us
to find a moved file unless we assign entries globally-unique ids. (note
that moves are fundamentally rename operations, so searching for names
does not make sense).
---
Solving this problem required completely restructuring how littlefs
handled moves and pulled out a really old idea that had been left in the
cutting room floor back when littlefs was going through many
designs: xored-globals.
The problem xored-globals solves is the need to maintain some global state
via commits to these distributed, independent metadata-pairs. The idea
is that we can use some sort of symmetric operation, such as xor, to
introduces deltas of the global state that can be committed atomically
along with any other info to these metadata-pairs.
This means that to figure out our global state, we xor together the global
delta stored in every metadata-pair.
Which means any commit can update the global state atomically, opening
up a whole new set atomic possibilities.
There is a couple of downsides. These globals may end up with deltas on
every single metadata-pair, effectively duplicating the data for each
block. Additionally, these globals need to have multiple copies in RAM.
This means and globals need to be a bounded size and very small, since even
small globals will have a large footprint.
---
On top of xored-globals, it's trivial to fix our move logic. Here we've
added an indirect delete tag which allows us to atomically specify a
delete of any entry on the filesystem.
Our move operation is now:
1. Copy entry to new metadata-pair and atomically xor globals to
indirectly delete our original entry.
2. Delete the original entry and xor globals to remove the indirect
delete.
Extra exciting is that this now takes our relatively clumsy move
operation into a sexy guaranteed O(1) move operation with no searching
necessary (though we do need to xor globals during mount).
Also reintroduced entry struct, now with a specific purpose to describe
the metadata-pair + id combo needed by indirect deletes to locate an
entry.
2018-05-29 17:35:23 +00:00
|
|
|
|
|
|
|
|
2018-07-04 06:35:04 +00:00
|
|
|
// TODO combine parentscan and findscan?
|
Introduced xored-globals logic to fix fundamental problem with moves
This was a big roadblock for a while: with the new feature of inlined
files, the existing move logic was fundamentally flawed.
To pull off atomic moves between two different metadata-pairs, littlefs
uses a simple, if a bit clumsy trick.
1. Marks entry as "moving"
2. Copies entry to new metadata-pair
3. Deletes old entry
If power is lost before the move operation is completed, we will find the
"moving" tag. This means there may or may not be an incomplete move on
the filesystem. In this case, we simply search for the moved entry, if
we find it, we remove the old entry, otherwise we just remove the
"moving" tag.
This worked perfectly, until we introduced inlined files. See, unlike
the existing directory and ctz entries, inlined files have no guarantee
they are unique. There is nothing we can search for that will allow us
to find a moved file unless we assign entries globally-unique ids. (note
that moves are fundamentally rename operations, so searching for names
does not make sense).
---
Solving this problem required completely restructuring how littlefs
handled moves and pulled out a really old idea that had been left in the
cutting room floor back when littlefs was going through many
designs: xored-globals.
The problem xored-globals solves is the need to maintain some global state
via commits to these distributed, independent metadata-pairs. The idea
is that we can use some sort of symmetric operation, such as xor, to
introduces deltas of the global state that can be committed atomically
along with any other info to these metadata-pairs.
This means that to figure out our global state, we xor together the global
delta stored in every metadata-pair.
Which means any commit can update the global state atomically, opening
up a whole new set atomic possibilities.
There is a couple of downsides. These globals may end up with deltas on
every single metadata-pair, effectively duplicating the data for each
block. Additionally, these globals need to have multiple copies in RAM.
This means and globals need to be a bounded size and very small, since even
small globals will have a large footprint.
---
On top of xored-globals, it's trivial to fix our move logic. Here we've
added an indirect delete tag which allows us to atomically specify a
delete of any entry on the filesystem.
Our move operation is now:
1. Copy entry to new metadata-pair and atomically xor globals to
indirectly delete our original entry.
2. Delete the original entry and xor globals to remove the indirect
delete.
Extra exciting is that this now takes our relatively clumsy move
operation into a sexy guaranteed O(1) move operation with no searching
necessary (though we do need to xor globals during mount).
Also reintroduced entry struct, now with a specific purpose to describe
the metadata-pair + id combo needed by indirect deletes to locate an
entry.
2018-05-29 17:35:23 +00:00
|
|
|
struct lfs_dir_parentscan {
|
|
|
|
lfs_block_t pair[2];
|
|
|
|
int16_t id;
|
2018-07-04 06:35:04 +00:00
|
|
|
int16_t tempid;
|
Introduced xored-globals logic to fix fundamental problem with moves
This was a big roadblock for a while: with the new feature of inlined
files, the existing move logic was fundamentally flawed.
To pull off atomic moves between two different metadata-pairs, littlefs
uses a simple, if a bit clumsy trick.
1. Marks entry as "moving"
2. Copies entry to new metadata-pair
3. Deletes old entry
If power is lost before the move operation is completed, we will find the
"moving" tag. This means there may or may not be an incomplete move on
the filesystem. In this case, we simply search for the moved entry, if
we find it, we remove the old entry, otherwise we just remove the
"moving" tag.
This worked perfectly, until we introduced inlined files. See, unlike
the existing directory and ctz entries, inlined files have no guarantee
they are unique. There is nothing we can search for that will allow us
to find a moved file unless we assign entries globally-unique ids. (note
that moves are fundamentally rename operations, so searching for names
does not make sense).
---
Solving this problem required completely restructuring how littlefs
handled moves and pulled out a really old idea that had been left in the
cutting room floor back when littlefs was going through many
designs: xored-globals.
The problem xored-globals solves is the need to maintain some global state
via commits to these distributed, independent metadata-pairs. The idea
is that we can use some sort of symmetric operation, such as xor, to
introduces deltas of the global state that can be committed atomically
along with any other info to these metadata-pairs.
This means that to figure out our global state, we xor together the global
delta stored in every metadata-pair.
Which means any commit can update the global state atomically, opening
up a whole new set atomic possibilities.
There is a couple of downsides. These globals may end up with deltas on
every single metadata-pair, effectively duplicating the data for each
block. Additionally, these globals need to have multiple copies in RAM.
This means and globals need to be a bounded size and very small, since even
small globals will have a large footprint.
---
On top of xored-globals, it's trivial to fix our move logic. Here we've
added an indirect delete tag which allows us to atomically specify a
delete of any entry on the filesystem.
Our move operation is now:
1. Copy entry to new metadata-pair and atomically xor globals to
indirectly delete our original entry.
2. Delete the original entry and xor globals to remove the indirect
delete.
Extra exciting is that this now takes our relatively clumsy move
operation into a sexy guaranteed O(1) move operation with no searching
necessary (though we do need to xor globals during mount).
Also reintroduced entry struct, now with a specific purpose to describe
the metadata-pair + id combo needed by indirect deletes to locate an
entry.
2018-05-29 17:35:23 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static int lfs_parentscan(lfs_t *lfs, void *p, lfs_mattr_t attr) {
|
|
|
|
struct lfs_dir_parentscan *parentscan = p;
|
|
|
|
|
2018-07-09 19:13:31 +00:00
|
|
|
if (lfs_tag_type(attr.tag) == LFS_STRUCT_DIR) {
|
Introduced xored-globals logic to fix fundamental problem with moves
This was a big roadblock for a while: with the new feature of inlined
files, the existing move logic was fundamentally flawed.
To pull off atomic moves between two different metadata-pairs, littlefs
uses a simple, if a bit clumsy trick.
1. Marks entry as "moving"
2. Copies entry to new metadata-pair
3. Deletes old entry
If power is lost before the move operation is completed, we will find the
"moving" tag. This means there may or may not be an incomplete move on
the filesystem. In this case, we simply search for the moved entry, if
we find it, we remove the old entry, otherwise we just remove the
"moving" tag.
This worked perfectly, until we introduced inlined files. See, unlike
the existing directory and ctz entries, inlined files have no guarantee
they are unique. There is nothing we can search for that will allow us
to find a moved file unless we assign entries globally-unique ids. (note
that moves are fundamentally rename operations, so searching for names
does not make sense).
---
Solving this problem required completely restructuring how littlefs
handled moves and pulled out a really old idea that had been left in the
cutting room floor back when littlefs was going through many
designs: xored-globals.
The problem xored-globals solves is the need to maintain some global state
via commits to these distributed, independent metadata-pairs. The idea
is that we can use some sort of symmetric operation, such as xor, to
introduces deltas of the global state that can be committed atomically
along with any other info to these metadata-pairs.
This means that to figure out our global state, we xor together the global
delta stored in every metadata-pair.
Which means any commit can update the global state atomically, opening
up a whole new set atomic possibilities.
There is a couple of downsides. These globals may end up with deltas on
every single metadata-pair, effectively duplicating the data for each
block. Additionally, these globals need to have multiple copies in RAM.
This means and globals need to be a bounded size and very small, since even
small globals will have a large footprint.
---
On top of xored-globals, it's trivial to fix our move logic. Here we've
added an indirect delete tag which allows us to atomically specify a
delete of any entry on the filesystem.
Our move operation is now:
1. Copy entry to new metadata-pair and atomically xor globals to
indirectly delete our original entry.
2. Delete the original entry and xor globals to remove the indirect
delete.
Extra exciting is that this now takes our relatively clumsy move
operation into a sexy guaranteed O(1) move operation with no searching
necessary (though we do need to xor globals during mount).
Also reintroduced entry struct, now with a specific purpose to describe
the metadata-pair + id combo needed by indirect deletes to locate an
entry.
2018-05-29 17:35:23 +00:00
|
|
|
int err = lfs_bd_read(lfs, attr.u.d.block, attr.u.d.off,
|
|
|
|
&attr.u, sizeof(attr.u));
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (lfs_paircmp(attr.u.pair, parentscan->pair) == 0) {
|
|
|
|
// found a match
|
2018-07-04 06:35:04 +00:00
|
|
|
parentscan->tempid = lfs_tag_id(attr.tag);
|
Introduced xored-globals logic to fix fundamental problem with moves
This was a big roadblock for a while: with the new feature of inlined
files, the existing move logic was fundamentally flawed.
To pull off atomic moves between two different metadata-pairs, littlefs
uses a simple, if a bit clumsy trick.
1. Marks entry as "moving"
2. Copies entry to new metadata-pair
3. Deletes old entry
If power is lost before the move operation is completed, we will find the
"moving" tag. This means there may or may not be an incomplete move on
the filesystem. In this case, we simply search for the moved entry, if
we find it, we remove the old entry, otherwise we just remove the
"moving" tag.
This worked perfectly, until we introduced inlined files. See, unlike
the existing directory and ctz entries, inlined files have no guarantee
they are unique. There is nothing we can search for that will allow us
to find a moved file unless we assign entries globally-unique ids. (note
that moves are fundamentally rename operations, so searching for names
does not make sense).
---
Solving this problem required completely restructuring how littlefs
handled moves and pulled out a really old idea that had been left in the
cutting room floor back when littlefs was going through many
designs: xored-globals.
The problem xored-globals solves is the need to maintain some global state
via commits to these distributed, independent metadata-pairs. The idea
is that we can use some sort of symmetric operation, such as xor, to
introduces deltas of the global state that can be committed atomically
along with any other info to these metadata-pairs.
This means that to figure out our global state, we xor together the global
delta stored in every metadata-pair.
Which means any commit can update the global state atomically, opening
up a whole new set atomic possibilities.
There is a couple of downsides. These globals may end up with deltas on
every single metadata-pair, effectively duplicating the data for each
block. Additionally, these globals need to have multiple copies in RAM.
This means and globals need to be a bounded size and very small, since even
small globals will have a large footprint.
---
On top of xored-globals, it's trivial to fix our move logic. Here we've
added an indirect delete tag which allows us to atomically specify a
delete of any entry on the filesystem.
Our move operation is now:
1. Copy entry to new metadata-pair and atomically xor globals to
indirectly delete our original entry.
2. Delete the original entry and xor globals to remove the indirect
delete.
Extra exciting is that this now takes our relatively clumsy move
operation into a sexy guaranteed O(1) move operation with no searching
necessary (though we do need to xor globals during mount).
Also reintroduced entry struct, now with a specific purpose to describe
the metadata-pair + id combo needed by indirect deletes to locate an
entry.
2018-05-29 17:35:23 +00:00
|
|
|
}
|
2018-07-09 19:13:31 +00:00
|
|
|
} else if (lfs_tag_type(attr.tag) == LFS_TYPE_DELETE) {
|
2018-07-04 06:35:04 +00:00
|
|
|
if (lfs_tag_id(attr.tag) == parentscan->tempid) {
|
|
|
|
parentscan->tempid = -1;
|
|
|
|
} else if (lfs_tag_id(attr.tag) < parentscan->tempid) {
|
|
|
|
parentscan->tempid -= 1;
|
Introduced xored-globals logic to fix fundamental problem with moves
This was a big roadblock for a while: with the new feature of inlined
files, the existing move logic was fundamentally flawed.
To pull off atomic moves between two different metadata-pairs, littlefs
uses a simple, if a bit clumsy trick.
1. Marks entry as "moving"
2. Copies entry to new metadata-pair
3. Deletes old entry
If power is lost before the move operation is completed, we will find the
"moving" tag. This means there may or may not be an incomplete move on
the filesystem. In this case, we simply search for the moved entry, if
we find it, we remove the old entry, otherwise we just remove the
"moving" tag.
This worked perfectly, until we introduced inlined files. See, unlike
the existing directory and ctz entries, inlined files have no guarantee
they are unique. There is nothing we can search for that will allow us
to find a moved file unless we assign entries globally-unique ids. (note
that moves are fundamentally rename operations, so searching for names
does not make sense).
---
Solving this problem required completely restructuring how littlefs
handled moves and pulled out a really old idea that had been left in the
cutting room floor back when littlefs was going through many
designs: xored-globals.
The problem xored-globals solves is the need to maintain some global state
via commits to these distributed, independent metadata-pairs. The idea
is that we can use some sort of symmetric operation, such as xor, to
introduces deltas of the global state that can be committed atomically
along with any other info to these metadata-pairs.
This means that to figure out our global state, we xor together the global
delta stored in every metadata-pair.
Which means any commit can update the global state atomically, opening
up a whole new set atomic possibilities.
There is a couple of downsides. These globals may end up with deltas on
every single metadata-pair, effectively duplicating the data for each
block. Additionally, these globals need to have multiple copies in RAM.
This means and globals need to be a bounded size and very small, since even
small globals will have a large footprint.
---
On top of xored-globals, it's trivial to fix our move logic. Here we've
added an indirect delete tag which allows us to atomically specify a
delete of any entry on the filesystem.
Our move operation is now:
1. Copy entry to new metadata-pair and atomically xor globals to
indirectly delete our original entry.
2. Delete the original entry and xor globals to remove the indirect
delete.
Extra exciting is that this now takes our relatively clumsy move
operation into a sexy guaranteed O(1) move operation with no searching
necessary (though we do need to xor globals during mount).
Also reintroduced entry struct, now with a specific purpose to describe
the metadata-pair + id combo needed by indirect deletes to locate an
entry.
2018-05-29 17:35:23 +00:00
|
|
|
}
|
2018-07-04 06:35:04 +00:00
|
|
|
} else if (lfs_tag_type(attr.tag) == LFS_TYPE_CRC) {
|
|
|
|
parentscan->id = parentscan->tempid;
|
Introduced xored-globals logic to fix fundamental problem with moves
This was a big roadblock for a while: with the new feature of inlined
files, the existing move logic was fundamentally flawed.
To pull off atomic moves between two different metadata-pairs, littlefs
uses a simple, if a bit clumsy trick.
1. Marks entry as "moving"
2. Copies entry to new metadata-pair
3. Deletes old entry
If power is lost before the move operation is completed, we will find the
"moving" tag. This means there may or may not be an incomplete move on
the filesystem. In this case, we simply search for the moved entry, if
we find it, we remove the old entry, otherwise we just remove the
"moving" tag.
This worked perfectly, until we introduced inlined files. See, unlike
the existing directory and ctz entries, inlined files have no guarantee
they are unique. There is nothing we can search for that will allow us
to find a moved file unless we assign entries globally-unique ids. (note
that moves are fundamentally rename operations, so searching for names
does not make sense).
---
Solving this problem required completely restructuring how littlefs
handled moves and pulled out a really old idea that had been left in the
cutting room floor back when littlefs was going through many
designs: xored-globals.
The problem xored-globals solves is the need to maintain some global state
via commits to these distributed, independent metadata-pairs. The idea
is that we can use some sort of symmetric operation, such as xor, to
introduces deltas of the global state that can be committed atomically
along with any other info to these metadata-pairs.
This means that to figure out our global state, we xor together the global
delta stored in every metadata-pair.
Which means any commit can update the global state atomically, opening
up a whole new set atomic possibilities.
There is a couple of downsides. These globals may end up with deltas on
every single metadata-pair, effectively duplicating the data for each
block. Additionally, these globals need to have multiple copies in RAM.
This means and globals need to be a bounded size and very small, since even
small globals will have a large footprint.
---
On top of xored-globals, it's trivial to fix our move logic. Here we've
added an indirect delete tag which allows us to atomically specify a
delete of any entry on the filesystem.
Our move operation is now:
1. Copy entry to new metadata-pair and atomically xor globals to
indirectly delete our original entry.
2. Delete the original entry and xor globals to remove the indirect
delete.
Extra exciting is that this now takes our relatively clumsy move
operation into a sexy guaranteed O(1) move operation with no searching
necessary (though we do need to xor globals during mount).
Also reintroduced entry struct, now with a specific purpose to describe
the metadata-pair + id combo needed by indirect deletes to locate an
entry.
2018-05-29 17:35:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-26 18:50:06 +00:00
|
|
|
static int lfs_parent(lfs_t *lfs, const lfs_block_t pair[2],
|
2018-05-29 06:11:26 +00:00
|
|
|
lfs_mdir_t *parent, lfs_mattr_t *attr) {
|
2018-05-26 18:50:06 +00:00
|
|
|
// iterate over all directory directory entries
|
2018-05-21 05:56:20 +00:00
|
|
|
parent->tail[0] = 0;
|
|
|
|
parent->tail[1] = 1;
|
|
|
|
while (!lfs_pairisnull(parent->tail)) {
|
Introduced xored-globals logic to fix fundamental problem with moves
This was a big roadblock for a while: with the new feature of inlined
files, the existing move logic was fundamentally flawed.
To pull off atomic moves between two different metadata-pairs, littlefs
uses a simple, if a bit clumsy trick.
1. Marks entry as "moving"
2. Copies entry to new metadata-pair
3. Deletes old entry
If power is lost before the move operation is completed, we will find the
"moving" tag. This means there may or may not be an incomplete move on
the filesystem. In this case, we simply search for the moved entry, if
we find it, we remove the old entry, otherwise we just remove the
"moving" tag.
This worked perfectly, until we introduced inlined files. See, unlike
the existing directory and ctz entries, inlined files have no guarantee
they are unique. There is nothing we can search for that will allow us
to find a moved file unless we assign entries globally-unique ids. (note
that moves are fundamentally rename operations, so searching for names
does not make sense).
---
Solving this problem required completely restructuring how littlefs
handled moves and pulled out a really old idea that had been left in the
cutting room floor back when littlefs was going through many
designs: xored-globals.
The problem xored-globals solves is the need to maintain some global state
via commits to these distributed, independent metadata-pairs. The idea
is that we can use some sort of symmetric operation, such as xor, to
introduces deltas of the global state that can be committed atomically
along with any other info to these metadata-pairs.
This means that to figure out our global state, we xor together the global
delta stored in every metadata-pair.
Which means any commit can update the global state atomically, opening
up a whole new set atomic possibilities.
There is a couple of downsides. These globals may end up with deltas on
every single metadata-pair, effectively duplicating the data for each
block. Additionally, these globals need to have multiple copies in RAM.
This means and globals need to be a bounded size and very small, since even
small globals will have a large footprint.
---
On top of xored-globals, it's trivial to fix our move logic. Here we've
added an indirect delete tag which allows us to atomically specify a
delete of any entry on the filesystem.
Our move operation is now:
1. Copy entry to new metadata-pair and atomically xor globals to
indirectly delete our original entry.
2. Delete the original entry and xor globals to remove the indirect
delete.
Extra exciting is that this now takes our relatively clumsy move
operation into a sexy guaranteed O(1) move operation with no searching
necessary (though we do need to xor globals during mount).
Also reintroduced entry struct, now with a specific purpose to describe
the metadata-pair + id combo needed by indirect deletes to locate an
entry.
2018-05-29 17:35:23 +00:00
|
|
|
struct lfs_dir_parentscan parentscan = {
|
|
|
|
.pair[0] = pair[0],
|
|
|
|
.pair[1] = pair[1],
|
2018-07-04 06:35:04 +00:00
|
|
|
.id = -1,
|
|
|
|
.tempid = -1,
|
Introduced xored-globals logic to fix fundamental problem with moves
This was a big roadblock for a while: with the new feature of inlined
files, the existing move logic was fundamentally flawed.
To pull off atomic moves between two different metadata-pairs, littlefs
uses a simple, if a bit clumsy trick.
1. Marks entry as "moving"
2. Copies entry to new metadata-pair
3. Deletes old entry
If power is lost before the move operation is completed, we will find the
"moving" tag. This means there may or may not be an incomplete move on
the filesystem. In this case, we simply search for the moved entry, if
we find it, we remove the old entry, otherwise we just remove the
"moving" tag.
This worked perfectly, until we introduced inlined files. See, unlike
the existing directory and ctz entries, inlined files have no guarantee
they are unique. There is nothing we can search for that will allow us
to find a moved file unless we assign entries globally-unique ids. (note
that moves are fundamentally rename operations, so searching for names
does not make sense).
---
Solving this problem required completely restructuring how littlefs
handled moves and pulled out a really old idea that had been left in the
cutting room floor back when littlefs was going through many
designs: xored-globals.
The problem xored-globals solves is the need to maintain some global state
via commits to these distributed, independent metadata-pairs. The idea
is that we can use some sort of symmetric operation, such as xor, to
introduces deltas of the global state that can be committed atomically
along with any other info to these metadata-pairs.
This means that to figure out our global state, we xor together the global
delta stored in every metadata-pair.
Which means any commit can update the global state atomically, opening
up a whole new set atomic possibilities.
There is a couple of downsides. These globals may end up with deltas on
every single metadata-pair, effectively duplicating the data for each
block. Additionally, these globals need to have multiple copies in RAM.
This means and globals need to be a bounded size and very small, since even
small globals will have a large footprint.
---
On top of xored-globals, it's trivial to fix our move logic. Here we've
added an indirect delete tag which allows us to atomically specify a
delete of any entry on the filesystem.
Our move operation is now:
1. Copy entry to new metadata-pair and atomically xor globals to
indirectly delete our original entry.
2. Delete the original entry and xor globals to remove the indirect
delete.
Extra exciting is that this now takes our relatively clumsy move
operation into a sexy guaranteed O(1) move operation with no searching
necessary (though we do need to xor globals during mount).
Also reintroduced entry struct, now with a specific purpose to describe
the metadata-pair + id combo needed by indirect deletes to locate an
entry.
2018-05-29 17:35:23 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
int err = lfs_dir_fetchwith(lfs, parent, parent->tail,
|
|
|
|
lfs_parentscan, &parentscan);
|
2018-05-21 05:56:20 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
Introduced xored-globals logic to fix fundamental problem with moves
This was a big roadblock for a while: with the new feature of inlined
files, the existing move logic was fundamentally flawed.
To pull off atomic moves between two different metadata-pairs, littlefs
uses a simple, if a bit clumsy trick.
1. Marks entry as "moving"
2. Copies entry to new metadata-pair
3. Deletes old entry
If power is lost before the move operation is completed, we will find the
"moving" tag. This means there may or may not be an incomplete move on
the filesystem. In this case, we simply search for the moved entry, if
we find it, we remove the old entry, otherwise we just remove the
"moving" tag.
This worked perfectly, until we introduced inlined files. See, unlike
the existing directory and ctz entries, inlined files have no guarantee
they are unique. There is nothing we can search for that will allow us
to find a moved file unless we assign entries globally-unique ids. (note
that moves are fundamentally rename operations, so searching for names
does not make sense).
---
Solving this problem required completely restructuring how littlefs
handled moves and pulled out a really old idea that had been left in the
cutting room floor back when littlefs was going through many
designs: xored-globals.
The problem xored-globals solves is the need to maintain some global state
via commits to these distributed, independent metadata-pairs. The idea
is that we can use some sort of symmetric operation, such as xor, to
introduces deltas of the global state that can be committed atomically
along with any other info to these metadata-pairs.
This means that to figure out our global state, we xor together the global
delta stored in every metadata-pair.
Which means any commit can update the global state atomically, opening
up a whole new set atomic possibilities.
There is a couple of downsides. These globals may end up with deltas on
every single metadata-pair, effectively duplicating the data for each
block. Additionally, these globals need to have multiple copies in RAM.
This means and globals need to be a bounded size and very small, since even
small globals will have a large footprint.
---
On top of xored-globals, it's trivial to fix our move logic. Here we've
added an indirect delete tag which allows us to atomically specify a
delete of any entry on the filesystem.
Our move operation is now:
1. Copy entry to new metadata-pair and atomically xor globals to
indirectly delete our original entry.
2. Delete the original entry and xor globals to remove the indirect
delete.
Extra exciting is that this now takes our relatively clumsy move
operation into a sexy guaranteed O(1) move operation with no searching
necessary (though we do need to xor globals during mount).
Also reintroduced entry struct, now with a specific purpose to describe
the metadata-pair + id combo needed by indirect deletes to locate an
entry.
2018-05-29 17:35:23 +00:00
|
|
|
if (parentscan.id != -1) {
|
2018-07-09 19:13:31 +00:00
|
|
|
int err = lfs_dir_getentry(lfs, parent, 0x7ffff000,
|
Introduced xored-globals logic to fix fundamental problem with moves
This was a big roadblock for a while: with the new feature of inlined
files, the existing move logic was fundamentally flawed.
To pull off atomic moves between two different metadata-pairs, littlefs
uses a simple, if a bit clumsy trick.
1. Marks entry as "moving"
2. Copies entry to new metadata-pair
3. Deletes old entry
If power is lost before the move operation is completed, we will find the
"moving" tag. This means there may or may not be an incomplete move on
the filesystem. In this case, we simply search for the moved entry, if
we find it, we remove the old entry, otherwise we just remove the
"moving" tag.
This worked perfectly, until we introduced inlined files. See, unlike
the existing directory and ctz entries, inlined files have no guarantee
they are unique. There is nothing we can search for that will allow us
to find a moved file unless we assign entries globally-unique ids. (note
that moves are fundamentally rename operations, so searching for names
does not make sense).
---
Solving this problem required completely restructuring how littlefs
handled moves and pulled out a really old idea that had been left in the
cutting room floor back when littlefs was going through many
designs: xored-globals.
The problem xored-globals solves is the need to maintain some global state
via commits to these distributed, independent metadata-pairs. The idea
is that we can use some sort of symmetric operation, such as xor, to
introduces deltas of the global state that can be committed atomically
along with any other info to these metadata-pairs.
This means that to figure out our global state, we xor together the global
delta stored in every metadata-pair.
Which means any commit can update the global state atomically, opening
up a whole new set atomic possibilities.
There is a couple of downsides. These globals may end up with deltas on
every single metadata-pair, effectively duplicating the data for each
block. Additionally, these globals need to have multiple copies in RAM.
This means and globals need to be a bounded size and very small, since even
small globals will have a large footprint.
---
On top of xored-globals, it's trivial to fix our move logic. Here we've
added an indirect delete tag which allows us to atomically specify a
delete of any entry on the filesystem.
Our move operation is now:
1. Copy entry to new metadata-pair and atomically xor globals to
indirectly delete our original entry.
2. Delete the original entry and xor globals to remove the indirect
delete.
Extra exciting is that this now takes our relatively clumsy move
operation into a sexy guaranteed O(1) move operation with no searching
necessary (though we do need to xor globals during mount).
Also reintroduced entry struct, now with a specific purpose to describe
the metadata-pair + id combo needed by indirect deletes to locate an
entry.
2018-05-29 17:35:23 +00:00
|
|
|
lfs_mktag(LFS_STRUCT_DIR, parentscan.id, 0), attr);
|
2018-05-26 18:50:06 +00:00
|
|
|
if (err) {
|
2018-05-21 05:56:20 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
Introduced xored-globals logic to fix fundamental problem with moves
This was a big roadblock for a while: with the new feature of inlined
files, the existing move logic was fundamentally flawed.
To pull off atomic moves between two different metadata-pairs, littlefs
uses a simple, if a bit clumsy trick.
1. Marks entry as "moving"
2. Copies entry to new metadata-pair
3. Deletes old entry
If power is lost before the move operation is completed, we will find the
"moving" tag. This means there may or may not be an incomplete move on
the filesystem. In this case, we simply search for the moved entry, if
we find it, we remove the old entry, otherwise we just remove the
"moving" tag.
This worked perfectly, until we introduced inlined files. See, unlike
the existing directory and ctz entries, inlined files have no guarantee
they are unique. There is nothing we can search for that will allow us
to find a moved file unless we assign entries globally-unique ids. (note
that moves are fundamentally rename operations, so searching for names
does not make sense).
---
Solving this problem required completely restructuring how littlefs
handled moves and pulled out a really old idea that had been left in the
cutting room floor back when littlefs was going through many
designs: xored-globals.
The problem xored-globals solves is the need to maintain some global state
via commits to these distributed, independent metadata-pairs. The idea
is that we can use some sort of symmetric operation, such as xor, to
introduces deltas of the global state that can be committed atomically
along with any other info to these metadata-pairs.
This means that to figure out our global state, we xor together the global
delta stored in every metadata-pair.
Which means any commit can update the global state atomically, opening
up a whole new set atomic possibilities.
There is a couple of downsides. These globals may end up with deltas on
every single metadata-pair, effectively duplicating the data for each
block. Additionally, these globals need to have multiple copies in RAM.
This means and globals need to be a bounded size and very small, since even
small globals will have a large footprint.
---
On top of xored-globals, it's trivial to fix our move logic. Here we've
added an indirect delete tag which allows us to atomically specify a
delete of any entry on the filesystem.
Our move operation is now:
1. Copy entry to new metadata-pair and atomically xor globals to
indirectly delete our original entry.
2. Delete the original entry and xor globals to remove the indirect
delete.
Extra exciting is that this now takes our relatively clumsy move
operation into a sexy guaranteed O(1) move operation with no searching
necessary (though we do need to xor globals during mount).
Also reintroduced entry struct, now with a specific purpose to describe
the metadata-pair + id combo needed by indirect deletes to locate an
entry.
2018-05-29 17:35:23 +00:00
|
|
|
return true;
|
2018-05-21 05:56:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
Introduced xored-globals logic to fix fundamental problem with moves
This was a big roadblock for a while: with the new feature of inlined
files, the existing move logic was fundamentally flawed.
To pull off atomic moves between two different metadata-pairs, littlefs
uses a simple, if a bit clumsy trick.
1. Marks entry as "moving"
2. Copies entry to new metadata-pair
3. Deletes old entry
If power is lost before the move operation is completed, we will find the
"moving" tag. This means there may or may not be an incomplete move on
the filesystem. In this case, we simply search for the moved entry, if
we find it, we remove the old entry, otherwise we just remove the
"moving" tag.
This worked perfectly, until we introduced inlined files. See, unlike
the existing directory and ctz entries, inlined files have no guarantee
they are unique. There is nothing we can search for that will allow us
to find a moved file unless we assign entries globally-unique ids. (note
that moves are fundamentally rename operations, so searching for names
does not make sense).
---
Solving this problem required completely restructuring how littlefs
handled moves and pulled out a really old idea that had been left in the
cutting room floor back when littlefs was going through many
designs: xored-globals.
The problem xored-globals solves is the need to maintain some global state
via commits to these distributed, independent metadata-pairs. The idea
is that we can use some sort of symmetric operation, such as xor, to
introduces deltas of the global state that can be committed atomically
along with any other info to these metadata-pairs.
This means that to figure out our global state, we xor together the global
delta stored in every metadata-pair.
Which means any commit can update the global state atomically, opening
up a whole new set atomic possibilities.
There is a couple of downsides. These globals may end up with deltas on
every single metadata-pair, effectively duplicating the data for each
block. Additionally, these globals need to have multiple copies in RAM.
This means and globals need to be a bounded size and very small, since even
small globals will have a large footprint.
---
On top of xored-globals, it's trivial to fix our move logic. Here we've
added an indirect delete tag which allows us to atomically specify a
delete of any entry on the filesystem.
Our move operation is now:
1. Copy entry to new metadata-pair and atomically xor globals to
indirectly delete our original entry.
2. Delete the original entry and xor globals to remove the indirect
delete.
Extra exciting is that this now takes our relatively clumsy move
operation into a sexy guaranteed O(1) move operation with no searching
necessary (though we do need to xor globals during mount).
Also reintroduced entry struct, now with a specific purpose to describe
the metadata-pair + id combo needed by indirect deletes to locate an
entry.
2018-05-29 17:35:23 +00:00
|
|
|
//
|
|
|
|
//static int lfs_parent(lfs_t *lfs, const lfs_block_t pair[2],
|
|
|
|
// lfs_mdir_t *parent, lfs_mattr_t *attr) {
|
|
|
|
// // iterate over all directory directory entries
|
|
|
|
// parent->tail[0] = 0;
|
|
|
|
// parent->tail[1] = 1;
|
|
|
|
// while (!lfs_pairisnull(parent->tail)) {
|
|
|
|
// int err = lfs_dir_fetch(lfs, parent, parent->tail);
|
|
|
|
// if (err) {
|
|
|
|
// return err;
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// // TODO make this O(n) by using fetchwith to match the pointers
|
|
|
|
// for (uint16_t id = 0; id < parent->count; id++) {
|
|
|
|
// int err = lfs_dir_getentry(lfs, parent, 0x43dff000,
|
|
|
|
// lfs_mktag(LFS_STRUCT_DIR, id, 0), attr);
|
|
|
|
// if (err) {
|
|
|
|
// if (err == LFS_ERR_NOENT) {
|
|
|
|
// continue;
|
|
|
|
// }
|
|
|
|
// return err;
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// if (lfs_paircmp(attr->u.pair, pair) == 0) {
|
|
|
|
// return true;
|
|
|
|
// }
|
|
|
|
// }
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// return false;
|
|
|
|
//}
|
2018-05-26 18:50:06 +00:00
|
|
|
/*
|
2017-05-14 17:01:45 +00:00
|
|
|
static int lfs_parent(lfs_t *lfs, const lfs_block_t dir[2],
|
2018-05-29 06:11:26 +00:00
|
|
|
lfs_mdir_t *parent, lfs_mattr_t *attr) {
|
2017-05-14 17:01:45 +00:00
|
|
|
if (lfs_pairisnull(lfs->root)) {
|
|
|
|
return 0;
|
|
|
|
}
|
2018-02-02 11:58:43 +00:00
|
|
|
|
2017-05-14 17:01:45 +00:00
|
|
|
parent->d.tail[0] = 0;
|
|
|
|
parent->d.tail[1] = 1;
|
|
|
|
|
|
|
|
// iterate over all directory directory entries
|
|
|
|
while (!lfs_pairisnull(parent->d.tail)) {
|
|
|
|
int err = lfs_dir_fetch(lfs, parent, parent->d.tail);
|
2017-04-01 17:23:15 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-04-14 22:33:36 +00:00
|
|
|
while (true) {
|
2018-05-29 05:50:47 +00:00
|
|
|
err = lfs_dir_next(lfs, parent, attr);
|
2017-04-24 03:10:16 +00:00
|
|
|
if (err && err != LFS_ERR_NOENT) {
|
2017-04-14 22:33:36 +00:00
|
|
|
return err;
|
|
|
|
}
|
2017-04-01 17:23:15 +00:00
|
|
|
|
2017-04-24 03:10:16 +00:00
|
|
|
if (err == LFS_ERR_NOENT) {
|
2017-04-14 22:33:36 +00:00
|
|
|
break;
|
|
|
|
}
|
2017-04-01 17:23:15 +00:00
|
|
|
|
2018-05-29 05:50:47 +00:00
|
|
|
if (((0x70 & attr->d.type) == LFS_STRUCT_DIR) &&
|
|
|
|
lfs_paircmp(attr->d.u.dir, dir) == 0) {
|
2017-04-14 22:33:36 +00:00
|
|
|
return true;
|
2017-04-01 17:23:15 +00:00
|
|
|
}
|
|
|
|
}
|
2017-05-14 17:01:45 +00:00
|
|
|
}
|
2017-04-01 17:23:15 +00:00
|
|
|
|
2017-05-14 17:01:45 +00:00
|
|
|
return false;
|
|
|
|
}
|
2018-05-26 18:50:06 +00:00
|
|
|
*/
|
2018-05-29 05:50:47 +00:00
|
|
|
|
|
|
|
// TODO rename to lfs_dir_relocate?
|
2017-05-14 17:01:45 +00:00
|
|
|
static int lfs_relocate(lfs_t *lfs,
|
|
|
|
const lfs_block_t oldpair[2], const lfs_block_t newpair[2]) {
|
|
|
|
// find parent
|
2018-05-29 06:11:26 +00:00
|
|
|
lfs_mdir_t parent;
|
2018-05-29 05:50:47 +00:00
|
|
|
lfs_mattr_t attr;
|
|
|
|
int res = lfs_parent(lfs, oldpair, &parent, &attr);
|
2017-05-14 17:01:45 +00:00
|
|
|
if (res < 0) {
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (res) {
|
|
|
|
// update disk, this creates a desync
|
2018-05-29 05:50:47 +00:00
|
|
|
attr.u.pair[0] = newpair[0];
|
|
|
|
attr.u.pair[1] = newpair[1];
|
|
|
|
int err = lfs_dir_commit(lfs, &parent, &(lfs_mattrlist_t){attr});
|
2017-05-14 17:01:45 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
2017-04-29 17:41:53 +00:00
|
|
|
}
|
2017-05-14 17:01:45 +00:00
|
|
|
|
|
|
|
// update internal root
|
|
|
|
if (lfs_paircmp(oldpair, lfs->root) == 0) {
|
|
|
|
LFS_DEBUG("Relocating root %d %d", newpair[0], newpair[1]);
|
|
|
|
lfs->root[0] = newpair[0];
|
|
|
|
lfs->root[1] = newpair[1];
|
|
|
|
}
|
|
|
|
|
2018-05-29 05:50:47 +00:00
|
|
|
// TODO update dir list!!?
|
|
|
|
|
2017-05-14 17:01:45 +00:00
|
|
|
// clean up bad block, which should now be a desync
|
|
|
|
return lfs_deorphan(lfs);
|
|
|
|
}
|
|
|
|
|
|
|
|
// find pred
|
|
|
|
res = lfs_pred(lfs, oldpair, &parent);
|
|
|
|
if (res < 0) {
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (res) {
|
|
|
|
// just replace bad pair, no desync can occur
|
2018-05-26 18:50:06 +00:00
|
|
|
parent.tail[0] = newpair[0];
|
|
|
|
parent.tail[1] = newpair[1];
|
2018-05-29 05:50:47 +00:00
|
|
|
int err = lfs_dir_commit(lfs, &parent, &(lfs_mattrlist_t){
|
2018-07-09 19:13:31 +00:00
|
|
|
{lfs_mktag(LFS_TYPE_TAIL + parent.split, // TODO hm
|
2018-05-30 01:08:42 +00:00
|
|
|
0x3ff, sizeof(lfs_block_t[2])),
|
2018-05-26 18:50:06 +00:00
|
|
|
.u.pair[0]=newpair[0], .u.pair[1]=newpair[1]}});
|
2018-05-29 05:50:47 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// shift over any dirs/files that are affected
|
|
|
|
for (int i = 0; i < 2; i++) {
|
|
|
|
for (lfs_dir_t *d = ((void*[2]){lfs->dirs, lfs->files})[i];
|
|
|
|
d; d = d->next) {
|
2018-05-29 06:11:26 +00:00
|
|
|
if (lfs_paircmp(d->m.pair, oldpair) == 0) {
|
|
|
|
d->m.pair[0] = newpair[0];
|
|
|
|
d->m.pair[1] = newpair[1];
|
2018-05-29 05:50:47 +00:00
|
|
|
}
|
|
|
|
}
|
2017-04-29 17:41:53 +00:00
|
|
|
}
|
2017-05-14 17:01:45 +00:00
|
|
|
|
|
|
|
// couldn't find dir, must be new
|
|
|
|
return 0;
|
2017-04-14 22:33:36 +00:00
|
|
|
}
|
2017-04-01 17:23:15 +00:00
|
|
|
|
2018-07-02 03:29:42 +00:00
|
|
|
int lfs_scan(lfs_t *lfs) {
|
|
|
|
if (lfs_pairisnull(lfs->root)) { // TODO rm me
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
lfs_mdir_t dir = {.tail = {0, 1}};
|
|
|
|
lfs_globals_t globals = {{{0xffffffff, 0xffffffff}, 0x3ff}};
|
|
|
|
|
|
|
|
// iterate over all directory directory entries
|
|
|
|
while (!lfs_pairisnull(dir.tail)) {
|
|
|
|
int err = lfs_dir_fetch(lfs, &dir, dir.tail);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
// xor together indirect deletes
|
|
|
|
globals = lfs_globals_xor(&globals, &dir.globals);
|
|
|
|
}
|
|
|
|
|
|
|
|
// update littlefs with globals
|
|
|
|
lfs->globals = globals;
|
|
|
|
lfs->diff = (lfs_globals_t){0};
|
|
|
|
if (!lfs_pairisnull(lfs->globals.move.pair)) {
|
|
|
|
LFS_DEBUG("Found move %d %d %d",
|
|
|
|
lfs->globals.move.pair[0],
|
|
|
|
lfs->globals.move.pair[1],
|
|
|
|
lfs->globals.move.id);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int lfs_fixmove(lfs_t *lfs) {
|
|
|
|
LFS_DEBUG("Fixing move %d %d %d", // TODO move to just deorphan
|
|
|
|
lfs->globals.move.pair[0],
|
|
|
|
lfs->globals.move.pair[1],
|
|
|
|
lfs->globals.move.id);
|
|
|
|
|
|
|
|
// mark global state to clear move entry
|
|
|
|
lfs->diff.move.pair[0] = 0xffffffff ^ lfs->globals.move.pair[0];
|
|
|
|
lfs->diff.move.pair[1] = 0xffffffff ^ lfs->globals.move.pair[1];
|
|
|
|
lfs->diff.move.id = 0x3ff ^ lfs->globals.move.id;
|
|
|
|
|
|
|
|
// fetch and delete the moved entry
|
|
|
|
lfs_mdir_t movedir;
|
|
|
|
int err = lfs_dir_fetch(lfs, &movedir, lfs->globals.move.pair);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = lfs_dir_delete(lfs, &movedir, lfs->globals.move.id);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-26 18:50:06 +00:00
|
|
|
int lfs_deorphan(lfs_t *lfs) {
|
2018-05-21 05:56:20 +00:00
|
|
|
lfs->deorphaned = true;
|
2018-07-02 03:29:42 +00:00
|
|
|
if (lfs_pairisnull(lfs->root)) { // TODO rm me?
|
2018-05-21 05:56:20 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-07-02 03:29:42 +00:00
|
|
|
// Fix bad moves
|
|
|
|
if (!lfs_pairisnull(lfs->globals.move.pair)) {
|
|
|
|
int err = lfs_fixmove(lfs);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-29 06:11:26 +00:00
|
|
|
lfs_mdir_t pdir = {.split = true};
|
|
|
|
lfs_mdir_t dir = {.tail = {0, 1}};
|
2018-05-21 05:56:20 +00:00
|
|
|
|
|
|
|
// iterate over all directory directory entries
|
|
|
|
while (!lfs_pairisnull(dir.tail)) {
|
2018-05-26 18:50:06 +00:00
|
|
|
int err = lfs_dir_fetch(lfs, &dir, dir.tail);
|
2018-05-21 05:56:20 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
// check head blocks for orphans
|
|
|
|
if (!pdir.split) {
|
|
|
|
// check if we have a parent
|
2018-05-29 06:11:26 +00:00
|
|
|
lfs_mdir_t parent;
|
2018-05-29 05:50:47 +00:00
|
|
|
lfs_mattr_t attr;
|
|
|
|
int res = lfs_parent(lfs, pdir.tail, &parent, &attr);
|
2018-05-21 05:56:20 +00:00
|
|
|
if (res < 0) {
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!res) {
|
|
|
|
// we are an orphan
|
|
|
|
LFS_DEBUG("Found orphan %d %d",
|
|
|
|
pdir.tail[0], pdir.tail[1]);
|
|
|
|
|
|
|
|
pdir.tail[0] = dir.tail[0];
|
|
|
|
pdir.tail[1] = dir.tail[1];
|
2018-05-29 05:50:47 +00:00
|
|
|
err = lfs_dir_commit(lfs, &pdir, &(lfs_mattrlist_t){
|
2018-05-30 01:08:42 +00:00
|
|
|
{lfs_mktag(LFS_TYPE_SOFTTAIL,
|
|
|
|
0x3ff, sizeof(pdir.tail)),
|
|
|
|
.u.buffer=pdir.tail}});
|
2018-05-21 05:56:20 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-05-29 05:50:47 +00:00
|
|
|
if (!lfs_pairsync(attr.u.pair, pdir.tail)) {
|
2018-05-21 05:56:20 +00:00
|
|
|
// we have desynced
|
Introduced xored-globals logic to fix fundamental problem with moves
This was a big roadblock for a while: with the new feature of inlined
files, the existing move logic was fundamentally flawed.
To pull off atomic moves between two different metadata-pairs, littlefs
uses a simple, if a bit clumsy trick.
1. Marks entry as "moving"
2. Copies entry to new metadata-pair
3. Deletes old entry
If power is lost before the move operation is completed, we will find the
"moving" tag. This means there may or may not be an incomplete move on
the filesystem. In this case, we simply search for the moved entry, if
we find it, we remove the old entry, otherwise we just remove the
"moving" tag.
This worked perfectly, until we introduced inlined files. See, unlike
the existing directory and ctz entries, inlined files have no guarantee
they are unique. There is nothing we can search for that will allow us
to find a moved file unless we assign entries globally-unique ids. (note
that moves are fundamentally rename operations, so searching for names
does not make sense).
---
Solving this problem required completely restructuring how littlefs
handled moves and pulled out a really old idea that had been left in the
cutting room floor back when littlefs was going through many
designs: xored-globals.
The problem xored-globals solves is the need to maintain some global state
via commits to these distributed, independent metadata-pairs. The idea
is that we can use some sort of symmetric operation, such as xor, to
introduces deltas of the global state that can be committed atomically
along with any other info to these metadata-pairs.
This means that to figure out our global state, we xor together the global
delta stored in every metadata-pair.
Which means any commit can update the global state atomically, opening
up a whole new set atomic possibilities.
There is a couple of downsides. These globals may end up with deltas on
every single metadata-pair, effectively duplicating the data for each
block. Additionally, these globals need to have multiple copies in RAM.
This means and globals need to be a bounded size and very small, since even
small globals will have a large footprint.
---
On top of xored-globals, it's trivial to fix our move logic. Here we've
added an indirect delete tag which allows us to atomically specify a
delete of any entry on the filesystem.
Our move operation is now:
1. Copy entry to new metadata-pair and atomically xor globals to
indirectly delete our original entry.
2. Delete the original entry and xor globals to remove the indirect
delete.
Extra exciting is that this now takes our relatively clumsy move
operation into a sexy guaranteed O(1) move operation with no searching
necessary (though we do need to xor globals during mount).
Also reintroduced entry struct, now with a specific purpose to describe
the metadata-pair + id combo needed by indirect deletes to locate an
entry.
2018-05-29 17:35:23 +00:00
|
|
|
LFS_DEBUG("Found half-orphan %d %d",
|
2018-05-29 05:50:47 +00:00
|
|
|
attr.u.pair[0], attr.u.pair[1]);
|
2018-05-21 05:56:20 +00:00
|
|
|
|
2018-05-29 05:50:47 +00:00
|
|
|
pdir.tail[0] = attr.u.pair[0];
|
|
|
|
pdir.tail[1] = attr.u.pair[1];
|
|
|
|
err = lfs_dir_commit(lfs, &pdir, &(lfs_mattrlist_t){
|
2018-05-30 01:08:42 +00:00
|
|
|
{lfs_mktag(LFS_TYPE_SOFTTAIL,
|
|
|
|
0x3ff, sizeof(pdir.tail)),
|
|
|
|
.u.buffer=pdir.tail}});
|
2018-05-21 05:56:20 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(&pdir, &dir, sizeof(pdir));
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2018-04-08 21:58:12 +00:00
|
|
|
|
|
|
|
/// External filesystem filesystem operations ///
|
2018-05-26 00:04:01 +00:00
|
|
|
//int lfs_fs_getattrs(lfs_t *lfs, const struct lfs_attr *attrs, int count) {
|
2018-05-29 06:11:26 +00:00
|
|
|
// lfs_mdir_t dir;
|
2018-05-26 00:04:01 +00:00
|
|
|
// int err = lfs_dir_fetch(lfs, &dir, (const lfs_block_t[2]){0, 1});
|
|
|
|
// if (err) {
|
|
|
|
// return err;
|
|
|
|
// }
|
|
|
|
//
|
2018-05-29 05:50:47 +00:00
|
|
|
// lfs_mattr_t entry = {.off = sizeof(dir.d)};
|
2018-05-26 00:04:01 +00:00
|
|
|
// err = lfs_dir_get(lfs, &dir, entry.off, &entry.d, 4);
|
|
|
|
// if (err) {
|
|
|
|
// return err;
|
|
|
|
// }
|
|
|
|
// entry.size = lfs_entry_size(&entry);
|
|
|
|
//
|
|
|
|
// if (err != LFS_ERR_NOENT) {
|
|
|
|
// if (!err) {
|
|
|
|
// break;
|
|
|
|
// }
|
|
|
|
// return err;
|
|
|
|
// }
|
|
|
|
//
|
2018-05-29 06:11:26 +00:00
|
|
|
// lfs_mdir_t cwd;
|
2018-05-26 00:04:01 +00:00
|
|
|
// int err = lfs_dir_fetch(lfs, &cwd, lfs->root);
|
|
|
|
// if (err) {
|
|
|
|
// return err;
|
|
|
|
// }
|
|
|
|
//
|
2018-05-29 05:50:47 +00:00
|
|
|
// lfs_mattr_t entry;
|
2018-05-26 00:04:01 +00:00
|
|
|
// err = lfs_dir_find(lfs, &cwd, &entry, &path);
|
|
|
|
// if (err) {
|
|
|
|
// return err;
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// return lfs_dir_getinfo(lfs, &cwd, &entry, info);
|
|
|
|
// return lfs_dir_getattrs(lfs, &dir, &entry, attrs, count);
|
|
|
|
//}
|
|
|
|
//
|
|
|
|
//int lfs_fs_setattrs(lfs_t *lfs, const struct lfs_attr *attrs, int count) {
|
2018-05-29 06:11:26 +00:00
|
|
|
// lfs_mdir_t dir;
|
2018-05-26 00:04:01 +00:00
|
|
|
// int err = lfs_dir_fetch(lfs, &dir, (const lfs_block_t[2]){0, 1});
|
|
|
|
// if (err) {
|
|
|
|
// return err;
|
|
|
|
// }
|
|
|
|
//
|
2018-05-29 05:50:47 +00:00
|
|
|
// lfs_mattr_t entry = {.off = sizeof(dir.d)};
|
2018-05-26 00:04:01 +00:00
|
|
|
// err = lfs_dir_get(lfs, &dir, entry.off, &entry.d, 4);
|
|
|
|
// if (err) {
|
|
|
|
// return err;
|
|
|
|
// }
|
|
|
|
// entry.size = lfs_entry_size(&entry);
|
|
|
|
//
|
|
|
|
// return lfs_dir_setattrs(lfs, &dir, &entry, attrs, count);
|
|
|
|
//}
|
2018-04-09 03:25:58 +00:00
|
|
|
|
2018-05-29 06:21:55 +00:00
|
|
|
//static int lfs_fs_size_count(void *p, lfs_block_t block) {
|
|
|
|
// lfs_size_t *size = p;
|
|
|
|
// *size += 1;
|
|
|
|
// return 0;
|
|
|
|
//}
|
|
|
|
//
|
|
|
|
//lfs_ssize_t lfs_fs_size(lfs_t *lfs) {
|
|
|
|
// lfs_size_t size = 0;
|
|
|
|
// int err = lfs_fs_traverse(lfs, lfs_fs_size_count, &size);
|
|
|
|
// if (err) {
|
|
|
|
// return err;
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// return size;
|
|
|
|
//}
|