2017-02-27 00:05:27 +00:00
|
|
|
/*
|
|
|
|
* The little filesystem
|
|
|
|
*
|
2017-10-13 01:27:33 +00:00
|
|
|
* Copyright (c) 2017 ARM Limited
|
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
2017-02-27 00:05:27 +00:00
|
|
|
*/
|
|
|
|
#include "lfs.h"
|
2017-03-25 21:20:31 +00:00
|
|
|
#include "lfs_util.h"
|
2017-02-27 00:05:27 +00:00
|
|
|
|
|
|
|
|
2017-04-30 16:19:37 +00:00
|
|
|
/// Caching block device operations ///
|
|
|
|
static int lfs_cache_read(lfs_t *lfs, lfs_cache_t *rcache,
|
|
|
|
const lfs_cache_t *pcache, lfs_block_t block,
|
2017-04-24 04:49:21 +00:00
|
|
|
lfs_off_t off, void *buffer, lfs_size_t size) {
|
2017-04-22 18:30:40 +00:00
|
|
|
uint8_t *data = buffer;
|
2018-03-17 15:28:14 +00:00
|
|
|
LFS_ASSERT(block != 0xffffffff);
|
2017-04-22 18:30:40 +00:00
|
|
|
|
|
|
|
while (size > 0) {
|
2017-04-30 16:19:37 +00:00
|
|
|
if (pcache && block == pcache->block && off >= pcache->off &&
|
|
|
|
off < pcache->off + lfs->cfg->prog_size) {
|
|
|
|
// is already in pcache?
|
2017-04-22 18:30:40 +00:00
|
|
|
lfs_size_t diff = lfs_min(size,
|
2017-04-30 16:19:37 +00:00
|
|
|
lfs->cfg->prog_size - (off-pcache->off));
|
|
|
|
memcpy(data, &pcache->buffer[off-pcache->off], diff);
|
2017-04-22 18:30:40 +00:00
|
|
|
|
|
|
|
data += diff;
|
|
|
|
off += diff;
|
|
|
|
size -= diff;
|
|
|
|
continue;
|
2017-04-30 16:19:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (block == rcache->block && off >= rcache->off &&
|
|
|
|
off < rcache->off + lfs->cfg->read_size) {
|
|
|
|
// is already in rcache?
|
2017-04-22 18:30:40 +00:00
|
|
|
lfs_size_t diff = lfs_min(size,
|
2017-04-30 16:19:37 +00:00
|
|
|
lfs->cfg->read_size - (off-rcache->off));
|
|
|
|
memcpy(data, &rcache->buffer[off-rcache->off], diff);
|
2017-04-22 18:30:40 +00:00
|
|
|
|
|
|
|
data += diff;
|
|
|
|
off += diff;
|
|
|
|
size -= diff;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2017-04-30 16:19:37 +00:00
|
|
|
if (off % lfs->cfg->read_size == 0 && size >= lfs->cfg->read_size) {
|
2017-04-22 18:30:40 +00:00
|
|
|
// bypass cache?
|
|
|
|
lfs_size_t diff = size - (size % lfs->cfg->read_size);
|
2017-04-24 04:49:21 +00:00
|
|
|
int err = lfs->cfg->read(lfs->cfg, block, off, data, diff);
|
2017-04-22 18:30:40 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
data += diff;
|
|
|
|
off += diff;
|
|
|
|
size -= diff;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// load to cache, first condition can no longer fail
|
2018-03-17 15:28:14 +00:00
|
|
|
LFS_ASSERT(block < lfs->cfg->block_count);
|
2017-04-30 16:19:37 +00:00
|
|
|
rcache->block = block;
|
|
|
|
rcache->off = off - (off % lfs->cfg->read_size);
|
|
|
|
int err = lfs->cfg->read(lfs->cfg, rcache->block,
|
|
|
|
rcache->off, rcache->buffer, lfs->cfg->read_size);
|
2017-04-22 18:30:40 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2017-03-25 21:20:31 +00:00
|
|
|
}
|
2017-02-27 00:05:27 +00:00
|
|
|
|
2017-06-24 05:43:05 +00:00
|
|
|
static int lfs_cache_cmp(lfs_t *lfs, lfs_cache_t *rcache,
|
|
|
|
const lfs_cache_t *pcache, lfs_block_t block,
|
|
|
|
lfs_off_t off, const void *buffer, lfs_size_t size) {
|
|
|
|
const uint8_t *data = buffer;
|
|
|
|
|
|
|
|
for (lfs_off_t i = 0; i < size; i++) {
|
|
|
|
uint8_t c;
|
|
|
|
int err = lfs_cache_read(lfs, rcache, pcache,
|
|
|
|
block, off+i, &c, 1);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (c != data[i]) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int lfs_cache_crc(lfs_t *lfs, lfs_cache_t *rcache,
|
|
|
|
const lfs_cache_t *pcache, lfs_block_t block,
|
|
|
|
lfs_off_t off, lfs_size_t size, uint32_t *crc) {
|
|
|
|
for (lfs_off_t i = 0; i < size; i++) {
|
|
|
|
uint8_t c;
|
|
|
|
int err = lfs_cache_read(lfs, rcache, pcache,
|
|
|
|
block, off+i, &c, 1);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
lfs_crc(crc, &c, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int lfs_cache_flush(lfs_t *lfs,
|
|
|
|
lfs_cache_t *pcache, lfs_cache_t *rcache) {
|
|
|
|
if (pcache->block != 0xffffffff) {
|
2018-03-17 15:28:14 +00:00
|
|
|
LFS_ASSERT(pcache->block < lfs->cfg->block_count);
|
2017-06-24 05:43:05 +00:00
|
|
|
int err = lfs->cfg->prog(lfs->cfg, pcache->block,
|
|
|
|
pcache->off, pcache->buffer, lfs->cfg->prog_size);
|
2017-04-30 16:19:37 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
2017-04-22 18:30:40 +00:00
|
|
|
|
2017-06-24 05:43:05 +00:00
|
|
|
if (rcache) {
|
|
|
|
int res = lfs_cache_cmp(lfs, rcache, NULL, pcache->block,
|
|
|
|
pcache->off, pcache->buffer, lfs->cfg->prog_size);
|
|
|
|
if (res < 0) {
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!res) {
|
|
|
|
return LFS_ERR_CORRUPT;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pcache->block = 0xffffffff;
|
2017-04-22 18:30:40 +00:00
|
|
|
}
|
|
|
|
|
2017-04-30 16:19:37 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-06-24 05:43:05 +00:00
|
|
|
static int lfs_cache_prog(lfs_t *lfs, lfs_cache_t *pcache,
|
|
|
|
lfs_cache_t *rcache, lfs_block_t block,
|
2017-04-30 16:19:37 +00:00
|
|
|
lfs_off_t off, const void *buffer, lfs_size_t size) {
|
|
|
|
const uint8_t *data = buffer;
|
2018-03-17 15:28:14 +00:00
|
|
|
LFS_ASSERT(block != 0xffffffff);
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
LFS_ASSERT(off + size <= lfs->cfg->block_size);
|
2017-04-30 16:19:37 +00:00
|
|
|
|
2017-04-22 18:30:40 +00:00
|
|
|
while (size > 0) {
|
2017-06-24 05:43:05 +00:00
|
|
|
if (block == pcache->block && off >= pcache->off &&
|
|
|
|
off < pcache->off + lfs->cfg->prog_size) {
|
|
|
|
// is already in pcache?
|
2017-04-22 18:30:40 +00:00
|
|
|
lfs_size_t diff = lfs_min(size,
|
2017-06-24 05:43:05 +00:00
|
|
|
lfs->cfg->prog_size - (off-pcache->off));
|
|
|
|
memcpy(&pcache->buffer[off-pcache->off], data, diff);
|
2017-04-22 18:30:40 +00:00
|
|
|
|
|
|
|
data += diff;
|
|
|
|
off += diff;
|
|
|
|
size -= diff;
|
2017-04-30 16:19:37 +00:00
|
|
|
|
|
|
|
if (off % lfs->cfg->prog_size == 0) {
|
2017-06-24 05:43:05 +00:00
|
|
|
// eagerly flush out pcache if we fill up
|
|
|
|
int err = lfs_cache_flush(lfs, pcache, rcache);
|
2017-04-30 16:19:37 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-22 18:30:40 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2017-06-24 05:43:05 +00:00
|
|
|
// pcache must have been flushed, either by programming and
|
|
|
|
// entire block or manually flushing the pcache
|
2018-01-29 21:20:12 +00:00
|
|
|
LFS_ASSERT(pcache->block == 0xffffffff);
|
2017-04-22 18:30:40 +00:00
|
|
|
|
|
|
|
if (off % lfs->cfg->prog_size == 0 &&
|
|
|
|
size >= lfs->cfg->prog_size) {
|
2017-06-24 05:43:05 +00:00
|
|
|
// bypass pcache?
|
2018-03-17 15:28:14 +00:00
|
|
|
LFS_ASSERT(block < lfs->cfg->block_count);
|
2017-04-22 18:30:40 +00:00
|
|
|
lfs_size_t diff = size - (size % lfs->cfg->prog_size);
|
2017-04-24 04:49:21 +00:00
|
|
|
int err = lfs->cfg->prog(lfs->cfg, block, off, data, diff);
|
2017-04-22 18:30:40 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-06-24 05:43:05 +00:00
|
|
|
if (rcache) {
|
|
|
|
int res = lfs_cache_cmp(lfs, rcache, NULL,
|
|
|
|
block, off, data, diff);
|
|
|
|
if (res < 0) {
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!res) {
|
|
|
|
return LFS_ERR_CORRUPT;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-22 18:30:40 +00:00
|
|
|
data += diff;
|
|
|
|
off += diff;
|
|
|
|
size -= diff;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2017-06-24 05:43:05 +00:00
|
|
|
// prepare pcache, first condition can no longer fail
|
|
|
|
pcache->block = block;
|
|
|
|
pcache->off = off - (off % lfs->cfg->prog_size);
|
2017-04-22 18:30:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2017-03-25 21:20:31 +00:00
|
|
|
}
|
|
|
|
|
2017-04-30 16:19:37 +00:00
|
|
|
|
|
|
|
/// General lfs block device operations ///
|
2017-06-24 05:43:05 +00:00
|
|
|
static int lfs_bd_read(lfs_t *lfs, lfs_block_t block,
|
2017-04-30 16:19:37 +00:00
|
|
|
lfs_off_t off, void *buffer, lfs_size_t size) {
|
2018-05-19 23:25:47 +00:00
|
|
|
return lfs_cache_read(lfs, &lfs->rcache, &lfs->pcache,
|
2017-04-30 16:19:37 +00:00
|
|
|
block, off, buffer, size);
|
|
|
|
}
|
|
|
|
|
2017-06-24 05:43:05 +00:00
|
|
|
static int lfs_bd_prog(lfs_t *lfs, lfs_block_t block,
|
2017-04-30 16:19:37 +00:00
|
|
|
lfs_off_t off, const void *buffer, lfs_size_t size) {
|
2017-06-24 05:43:05 +00:00
|
|
|
return lfs_cache_prog(lfs, &lfs->pcache, NULL,
|
2017-04-30 16:19:37 +00:00
|
|
|
block, off, buffer, size);
|
|
|
|
}
|
|
|
|
|
2017-06-24 05:43:05 +00:00
|
|
|
static int lfs_bd_cmp(lfs_t *lfs, lfs_block_t block,
|
|
|
|
lfs_off_t off, const void *buffer, lfs_size_t size) {
|
|
|
|
return lfs_cache_cmp(lfs, &lfs->rcache, NULL, block, off, buffer, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int lfs_bd_crc(lfs_t *lfs, lfs_block_t block,
|
|
|
|
lfs_off_t off, lfs_size_t size, uint32_t *crc) {
|
|
|
|
return lfs_cache_crc(lfs, &lfs->rcache, NULL, block, off, size, crc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int lfs_bd_erase(lfs_t *lfs, lfs_block_t block) {
|
2018-03-17 15:28:14 +00:00
|
|
|
LFS_ASSERT(block < lfs->cfg->block_count);
|
2017-04-22 16:42:05 +00:00
|
|
|
return lfs->cfg->erase(lfs->cfg, block);
|
2017-03-25 21:20:31 +00:00
|
|
|
}
|
2017-02-27 00:05:27 +00:00
|
|
|
|
2017-06-24 05:43:05 +00:00
|
|
|
static int lfs_bd_sync(lfs_t *lfs) {
|
2017-06-25 21:21:14 +00:00
|
|
|
lfs->rcache.block = 0xffffffff;
|
|
|
|
|
2017-06-24 05:43:05 +00:00
|
|
|
int err = lfs_cache_flush(lfs, &lfs->pcache, NULL);
|
2017-04-22 18:30:40 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-04-22 16:42:05 +00:00
|
|
|
return lfs->cfg->sync(lfs->cfg);
|
2017-02-27 00:05:27 +00:00
|
|
|
}
|
|
|
|
|
2017-03-25 21:20:31 +00:00
|
|
|
|
2017-05-14 17:01:45 +00:00
|
|
|
/// Internal operations predeclared here ///
|
|
|
|
int lfs_traverse(lfs_t *lfs, int (*cb)(void*, lfs_block_t), void *data);
|
|
|
|
static int lfs_pred(lfs_t *lfs, const lfs_block_t dir[2], lfs_dir_t *pdir);
|
|
|
|
static int lfs_parent(lfs_t *lfs, const lfs_block_t dir[2],
|
|
|
|
lfs_dir_t *parent, lfs_entry_t *entry);
|
2017-10-07 14:19:08 +00:00
|
|
|
static int lfs_moved(lfs_t *lfs, const void *e);
|
2017-05-14 17:01:45 +00:00
|
|
|
static int lfs_relocate(lfs_t *lfs,
|
|
|
|
const lfs_block_t oldpair[2], const lfs_block_t newpair[2]);
|
|
|
|
int lfs_deorphan(lfs_t *lfs);
|
|
|
|
|
|
|
|
|
2017-03-25 21:20:31 +00:00
|
|
|
/// Block allocator ///
|
2018-05-21 05:56:20 +00:00
|
|
|
static int lfs_alloc_lookahead(lfs_t *lfs, void *p, lfs_block_t block) {
|
2018-04-10 20:14:27 +00:00
|
|
|
lfs_block_t off = ((block - lfs->free.off)
|
2017-09-17 19:52:25 +00:00
|
|
|
+ lfs->cfg->block_count) % lfs->cfg->block_count;
|
|
|
|
|
2018-02-08 07:30:21 +00:00
|
|
|
if (off < lfs->free.size) {
|
2017-09-19 02:20:33 +00:00
|
|
|
lfs->free.buffer[off / 32] |= 1U << (off % 32);
|
2017-04-01 15:44:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-05-14 17:01:45 +00:00
|
|
|
static int lfs_alloc(lfs_t *lfs, lfs_block_t *block) {
|
2017-04-01 15:44:17 +00:00
|
|
|
while (true) {
|
2018-04-10 20:14:27 +00:00
|
|
|
while (lfs->free.i != lfs->free.size) {
|
|
|
|
lfs_block_t off = lfs->free.i;
|
|
|
|
lfs->free.i += 1;
|
|
|
|
lfs->free.ack -= 1;
|
2017-04-22 19:56:12 +00:00
|
|
|
|
2017-09-19 02:20:33 +00:00
|
|
|
if (!(lfs->free.buffer[off / 32] & (1U << (off % 32)))) {
|
2017-04-22 19:56:12 +00:00
|
|
|
// found a free block
|
2018-04-10 20:14:27 +00:00
|
|
|
*block = (lfs->free.off + off) % lfs->cfg->block_count;
|
2018-04-06 22:00:29 +00:00
|
|
|
|
|
|
|
// eagerly find next off so an alloc ack can
|
|
|
|
// discredit old lookahead blocks
|
2018-04-10 20:14:27 +00:00
|
|
|
while (lfs->free.i != lfs->free.size &&
|
|
|
|
(lfs->free.buffer[lfs->free.i / 32]
|
|
|
|
& (1U << (lfs->free.i % 32)))) {
|
|
|
|
lfs->free.i += 1;
|
|
|
|
lfs->free.ack -= 1;
|
2018-04-06 22:00:29 +00:00
|
|
|
}
|
|
|
|
|
2017-04-22 19:56:12 +00:00
|
|
|
return 0;
|
2017-04-01 15:44:17 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-08 07:30:21 +00:00
|
|
|
// check if we have looked at all blocks since last ack
|
2018-04-10 20:14:27 +00:00
|
|
|
if (lfs->free.ack == 0) {
|
|
|
|
LFS_WARN("No more free space %d", lfs->free.i + lfs->free.off);
|
2018-02-08 07:30:21 +00:00
|
|
|
return LFS_ERR_NOSPC;
|
|
|
|
}
|
|
|
|
|
2018-04-10 20:14:27 +00:00
|
|
|
lfs->free.off = (lfs->free.off + lfs->free.size)
|
|
|
|
% lfs->cfg->block_count;
|
|
|
|
lfs->free.size = lfs_min(lfs->cfg->lookahead, lfs->free.ack);
|
|
|
|
lfs->free.i = 0;
|
2017-04-01 15:44:17 +00:00
|
|
|
|
2017-04-22 19:56:12 +00:00
|
|
|
// find mask of free blocks from tree
|
2017-11-10 01:10:08 +00:00
|
|
|
memset(lfs->free.buffer, 0, lfs->cfg->lookahead/8);
|
2018-05-21 05:56:20 +00:00
|
|
|
int err = lfs_traverse_(lfs, lfs_alloc_lookahead, NULL);
|
2017-04-01 15:44:17 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
2017-04-01 17:23:15 +00:00
|
|
|
}
|
2017-03-25 21:20:31 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-14 17:01:45 +00:00
|
|
|
static void lfs_alloc_ack(lfs_t *lfs) {
|
2018-04-10 20:14:27 +00:00
|
|
|
lfs->free.ack = lfs->cfg->block_count;
|
2017-03-25 21:20:31 +00:00
|
|
|
}
|
2017-03-13 00:41:08 +00:00
|
|
|
|
2017-02-27 00:05:27 +00:00
|
|
|
|
2018-02-02 11:58:43 +00:00
|
|
|
/// Endian swapping functions ///
|
|
|
|
static void lfs_dir_fromle32(struct lfs_disk_dir *d) {
|
|
|
|
d->rev = lfs_fromle32(d->rev);
|
|
|
|
d->size = lfs_fromle32(d->size);
|
|
|
|
d->tail[0] = lfs_fromle32(d->tail[0]);
|
|
|
|
d->tail[1] = lfs_fromle32(d->tail[1]);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void lfs_dir_tole32(struct lfs_disk_dir *d) {
|
|
|
|
d->rev = lfs_tole32(d->rev);
|
|
|
|
d->size = lfs_tole32(d->size);
|
|
|
|
d->tail[0] = lfs_tole32(d->tail[0]);
|
|
|
|
d->tail[1] = lfs_tole32(d->tail[1]);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void lfs_entry_fromle32(struct lfs_disk_entry *d) {
|
|
|
|
d->u.dir[0] = lfs_fromle32(d->u.dir[0]);
|
|
|
|
d->u.dir[1] = lfs_fromle32(d->u.dir[1]);
|
|
|
|
}
|
|
|
|
|
2018-03-16 02:26:03 +00:00
|
|
|
static void lfs_entry_tole32(struct lfs_disk_entry *d) {
|
|
|
|
d->u.dir[0] = lfs_tole32(d->u.dir[0]);
|
|
|
|
d->u.dir[1] = lfs_tole32(d->u.dir[1]);
|
|
|
|
}
|
2018-02-02 11:58:43 +00:00
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
/*static*/ void lfs_superblock_fromle32(struct lfs_disk_superblock *d) {
|
2018-02-02 11:58:43 +00:00
|
|
|
d->root[0] = lfs_fromle32(d->root[0]);
|
|
|
|
d->root[1] = lfs_fromle32(d->root[1]);
|
|
|
|
d->block_size = lfs_fromle32(d->block_size);
|
|
|
|
d->block_count = lfs_fromle32(d->block_count);
|
|
|
|
d->version = lfs_fromle32(d->version);
|
Added disk-backed limits on the name/attrs/inline sizes
Being a portable, microcontroller-scale embedded filesystem, littlefs is
presented with a relatively unique challenge. The amount of RAM
available is on completely different scales from machine to machine, and
what is normally a reasonable RAM assumption may break completely on an
embedded system.
A great example of this is file names. On almost every PC these days, the limit
for a file name is 255 bytes. It's a very convenient limit for a number
of reasons. However, on microcontrollers, allocating 255 bytes of RAM to
do a file search can be unreasonable.
The simplest solution (and one that has existing in littlefs for a
while), is to let this limit be redefined to a smaller value on devices
that need to save RAM. However, this presents an interesting portability
issue. If these devices are plugged into a PC with relatively infinite
RAM, nothing stops the PC from writing files with full 255-byte file
names, which can't be read on the small device.
One solution here is to store this limit on the superblock during format
time. When mounting a disk, the filesystem implementation is responsible for
checking this limit in the superblock. If it's larger than what can be
read, raise an error. If it's smaller, respect the limit on the
superblock and raise an error if the user attempts to exceed it.
In this commit, this strategy is adopted for file names, inline files,
and the size of all attributes, since these could impact the memory
consumption of the filesystem. (Recording the attribute's limit is
iffy, but is the only other arbitrary limit and could be used for disabling
support of custom attributes).
Note! This changes makes it very important to configure littlefs
correctly at format time. If littlefs is formatted on a PC without
changing the limits appropriately, it will be rejected by a smaller
device.
2018-04-01 20:36:29 +00:00
|
|
|
d->inline_size = lfs_fromle32(d->inline_size);
|
|
|
|
d->attrs_size = lfs_fromle32(d->attrs_size);
|
|
|
|
d->name_size = lfs_fromle32(d->name_size);
|
2018-02-02 11:58:43 +00:00
|
|
|
}
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
/*static*/ void lfs_superblock_tole32(struct lfs_disk_superblock *d) {
|
2018-02-02 11:58:43 +00:00
|
|
|
d->root[0] = lfs_tole32(d->root[0]);
|
|
|
|
d->root[1] = lfs_tole32(d->root[1]);
|
|
|
|
d->block_size = lfs_tole32(d->block_size);
|
|
|
|
d->block_count = lfs_tole32(d->block_count);
|
|
|
|
d->version = lfs_tole32(d->version);
|
Added disk-backed limits on the name/attrs/inline sizes
Being a portable, microcontroller-scale embedded filesystem, littlefs is
presented with a relatively unique challenge. The amount of RAM
available is on completely different scales from machine to machine, and
what is normally a reasonable RAM assumption may break completely on an
embedded system.
A great example of this is file names. On almost every PC these days, the limit
for a file name is 255 bytes. It's a very convenient limit for a number
of reasons. However, on microcontrollers, allocating 255 bytes of RAM to
do a file search can be unreasonable.
The simplest solution (and one that has existing in littlefs for a
while), is to let this limit be redefined to a smaller value on devices
that need to save RAM. However, this presents an interesting portability
issue. If these devices are plugged into a PC with relatively infinite
RAM, nothing stops the PC from writing files with full 255-byte file
names, which can't be read on the small device.
One solution here is to store this limit on the superblock during format
time. When mounting a disk, the filesystem implementation is responsible for
checking this limit in the superblock. If it's larger than what can be
read, raise an error. If it's smaller, respect the limit on the
superblock and raise an error if the user attempts to exceed it.
In this commit, this strategy is adopted for file names, inline files,
and the size of all attributes, since these could impact the memory
consumption of the filesystem. (Recording the attribute's limit is
iffy, but is the only other arbitrary limit and could be used for disabling
support of custom attributes).
Note! This changes makes it very important to configure littlefs
correctly at format time. If littlefs is formatted on a PC without
changing the limits appropriately, it will be rejected by a smaller
device.
2018-04-01 20:36:29 +00:00
|
|
|
d->inline_size = lfs_tole32(d->inline_size);
|
|
|
|
d->attrs_size = lfs_tole32(d->attrs_size);
|
|
|
|
d->name_size = lfs_tole32(d->name_size);
|
2018-02-02 11:58:43 +00:00
|
|
|
}
|
|
|
|
|
2018-04-03 13:28:09 +00:00
|
|
|
/// Other struct functions ///
|
|
|
|
static inline lfs_size_t lfs_entry_elen(const lfs_entry_t *entry) {
|
|
|
|
return (lfs_size_t)(entry->d.elen) |
|
|
|
|
((lfs_size_t)(entry->d.alen & 0xc0) << 2);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline lfs_size_t lfs_entry_alen(const lfs_entry_t *entry) {
|
|
|
|
return entry->d.alen & 0x3f;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline lfs_size_t lfs_entry_nlen(const lfs_entry_t *entry) {
|
|
|
|
return entry->d.nlen;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline lfs_size_t lfs_entry_size(const lfs_entry_t *entry) {
|
|
|
|
return 4 + lfs_entry_elen(entry) +
|
|
|
|
lfs_entry_alen(entry) +
|
|
|
|
lfs_entry_nlen(entry);
|
|
|
|
}
|
|
|
|
|
2018-02-02 11:58:43 +00:00
|
|
|
|
2017-04-18 03:27:06 +00:00
|
|
|
/// Metadata pair and directory operations ///
|
2017-04-01 15:44:17 +00:00
|
|
|
static inline void lfs_pairswap(lfs_block_t pair[2]) {
|
2017-03-25 21:20:31 +00:00
|
|
|
lfs_block_t t = pair[0];
|
|
|
|
pair[0] = pair[1];
|
|
|
|
pair[1] = t;
|
2017-03-12 20:11:52 +00:00
|
|
|
}
|
|
|
|
|
2017-04-18 03:27:06 +00:00
|
|
|
static inline bool lfs_pairisnull(const lfs_block_t pair[2]) {
|
2017-04-30 16:54:27 +00:00
|
|
|
return pair[0] == 0xffffffff || pair[1] == 0xffffffff;
|
2017-04-18 03:27:06 +00:00
|
|
|
}
|
|
|
|
|
2017-04-01 15:44:17 +00:00
|
|
|
static inline int lfs_paircmp(
|
|
|
|
const lfs_block_t paira[2],
|
|
|
|
const lfs_block_t pairb[2]) {
|
2017-04-29 17:41:53 +00:00
|
|
|
return !(paira[0] == pairb[0] || paira[1] == pairb[1] ||
|
|
|
|
paira[0] == pairb[1] || paira[1] == pairb[0]);
|
2017-04-01 15:44:17 +00:00
|
|
|
}
|
|
|
|
|
2017-05-14 17:01:45 +00:00
|
|
|
static inline bool lfs_pairsync(
|
|
|
|
const lfs_block_t paira[2],
|
|
|
|
const lfs_block_t pairb[2]) {
|
|
|
|
return (paira[0] == pairb[0] && paira[1] == pairb[1]) ||
|
|
|
|
(paira[0] == pairb[1] && paira[1] == pairb[0]);
|
|
|
|
}
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
/// Entry tag operations ///
|
|
|
|
static inline lfs_tag_t lfs_mktag(
|
|
|
|
uint16_t type, uint16_t id, lfs_size_t size) {
|
2018-05-19 23:25:47 +00:00
|
|
|
return (type << 22) | (id << 12) | size;
|
|
|
|
}
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
static inline bool lfs_tag_valid(lfs_tag_t tag) {
|
2018-05-19 23:25:47 +00:00
|
|
|
return !(tag & 0x80000000);
|
|
|
|
}
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
static inline uint16_t lfs_tag_type(lfs_tag_t tag) {
|
|
|
|
return (tag & 0x7fc00000) >> 22;
|
2018-05-19 23:25:47 +00:00
|
|
|
}
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
static inline uint8_t lfs_tag_supertype(lfs_tag_t tag) {
|
|
|
|
return (tag & 0x70000000) >> 22;
|
2018-05-19 23:25:47 +00:00
|
|
|
}
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
static inline uint8_t lfs_tag_subtype(lfs_tag_t tag) {
|
|
|
|
return (tag & 0x7c000000) >> 22;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint8_t lfs_tag_struct(lfs_tag_t tag) {
|
|
|
|
return (tag & 0x03c00000) >> 22;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint16_t lfs_tag_id(lfs_tag_t tag) {
|
2018-05-19 23:25:47 +00:00
|
|
|
return (tag & 0x001ff000) >> 12;
|
|
|
|
}
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
static inline lfs_size_t lfs_tag_size(lfs_tag_t tag) {
|
2018-05-19 23:25:47 +00:00
|
|
|
return tag & 0x00000fff;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct lfs_commit {
|
|
|
|
lfs_block_t block;
|
|
|
|
lfs_off_t off;
|
|
|
|
lfs_off_t begin;
|
|
|
|
lfs_off_t end;
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
lfs_tag_t ptag;
|
2018-05-19 23:25:47 +00:00
|
|
|
uint32_t crc;
|
|
|
|
|
|
|
|
struct {
|
|
|
|
int16_t id;
|
|
|
|
uint16_t type;
|
|
|
|
} compact;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int lfs_commit_traverse(lfs_t *lfs, struct lfs_commit *commit,
|
2018-05-21 05:56:20 +00:00
|
|
|
int (*cb)(lfs_t *lfs, void *data, lfs_entry_t_ entry),
|
2018-05-19 23:25:47 +00:00
|
|
|
void *data) {
|
|
|
|
// iterate over dir block backwards (for faster lookups)
|
|
|
|
lfs_block_t block = commit->block;
|
|
|
|
lfs_off_t off = commit->off;
|
2018-05-21 05:56:20 +00:00
|
|
|
lfs_tag_t tag = commit->ptag;
|
2018-05-19 23:25:47 +00:00
|
|
|
|
|
|
|
while (off != sizeof(uint32_t)) {
|
2018-05-21 05:56:20 +00:00
|
|
|
printf("tag r %#010x (%x:%x)\n", tag, block, off-lfs_tag_size(tag));
|
|
|
|
int err = cb(lfs, data, (lfs_entry_t_){
|
|
|
|
(0x80000000 | tag),
|
2018-05-19 23:25:47 +00:00
|
|
|
.u.d.block=block,
|
|
|
|
.u.d.off=off-lfs_tag_size(tag)});
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
LFS_ASSERT(off > sizeof(tag)+lfs_tag_size(tag));
|
|
|
|
off -= sizeof(tag)+lfs_tag_size(tag);
|
2018-05-19 23:25:47 +00:00
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
lfs_tag_t ntag;
|
2018-05-19 23:25:47 +00:00
|
|
|
err = lfs_bd_read(lfs, block, off, &ntag, sizeof(ntag));
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
tag ^= lfs_fromle32(ntag);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
static int lfs_commit_compactcheck(lfs_t *lfs, void *p, lfs_entry_t_ entry) {
|
2018-05-19 23:25:47 +00:00
|
|
|
struct lfs_commit *commit = p;
|
2018-05-21 05:56:20 +00:00
|
|
|
if (lfs_tag_id(entry.tag) != commit->compact.id) {
|
2018-05-19 23:25:47 +00:00
|
|
|
return 1;
|
2018-05-21 05:56:20 +00:00
|
|
|
} else if (lfs_tag_type(entry.tag) == commit->compact.type) {
|
2018-05-19 23:25:47 +00:00
|
|
|
return 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int lfs_commit_commit(lfs_t *lfs,
|
2018-05-21 05:56:20 +00:00
|
|
|
struct lfs_commit *commit, lfs_entry_t_ entry) {
|
2018-05-19 23:25:47 +00:00
|
|
|
// request for compaction?
|
|
|
|
if (commit->compact.id >= 0) {
|
2018-05-21 05:56:20 +00:00
|
|
|
if (lfs_tag_id(entry.tag) != commit->compact.id) {
|
2018-05-19 23:25:47 +00:00
|
|
|
// ignore non-matching ids
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
commit->compact.type = lfs_tag_type(entry.tag);
|
2018-05-19 23:25:47 +00:00
|
|
|
int res = lfs_commit_traverse(lfs, commit,
|
|
|
|
lfs_commit_compactcheck, commit);
|
|
|
|
if (res < 0) {
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (res == 2) {
|
|
|
|
// already committed
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// check if we fit
|
2018-05-21 05:56:20 +00:00
|
|
|
lfs_size_t size = lfs_tag_size(entry.tag);
|
|
|
|
if (commit->off + sizeof(lfs_tag_t)+size > commit->end) {
|
2018-05-19 23:25:47 +00:00
|
|
|
return LFS_ERR_NOSPC;
|
|
|
|
}
|
|
|
|
|
|
|
|
// write out tag
|
2018-05-21 05:56:20 +00:00
|
|
|
printf("tag w %#010x (%x:%x)\n", entry.tag, commit->block, commit->off+sizeof(lfs_tag_t));
|
|
|
|
lfs_tag_t tag = lfs_tole32((entry.tag & 0x7fffffff) ^ commit->ptag);
|
2018-05-19 23:25:47 +00:00
|
|
|
lfs_crc(&commit->crc, &tag, sizeof(tag));
|
|
|
|
int err = lfs_bd_prog(lfs, commit->block, commit->off, &tag, sizeof(tag));
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
commit->off += sizeof(tag);
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
if (!(entry.tag & 0x80000000)) {
|
2018-05-19 23:25:47 +00:00
|
|
|
// from memory
|
2018-05-21 05:56:20 +00:00
|
|
|
lfs_crc(&commit->crc, entry.u.buffer, size);
|
2018-05-19 23:25:47 +00:00
|
|
|
err = lfs_bd_prog(lfs, commit->block, commit->off,
|
2018-05-21 05:56:20 +00:00
|
|
|
entry.u.buffer, size);
|
2018-05-19 23:25:47 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// from disk
|
|
|
|
for (lfs_off_t i = 0; i < size; i++) {
|
|
|
|
uint8_t dat;
|
|
|
|
int err = lfs_bd_read(lfs,
|
2018-05-21 05:56:20 +00:00
|
|
|
entry.u.d.block, entry.u.d.off+i, &dat, 1);
|
2018-05-19 23:25:47 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
lfs_crc(&commit->crc, &dat, 1);
|
|
|
|
err = lfs_bd_prog(lfs, commit->block, commit->off+i, &dat, 1);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
commit->off += size;
|
2018-05-21 05:56:20 +00:00
|
|
|
commit->ptag = entry.tag;
|
2018-05-19 23:25:47 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int lfs_commit_crc(lfs_t *lfs, struct lfs_commit *commit) {
|
|
|
|
// align to program units
|
|
|
|
lfs_off_t noff = lfs_alignup(
|
|
|
|
commit->off + 2*sizeof(uint32_t), lfs->cfg->prog_size);
|
|
|
|
|
|
|
|
// read erased state from next program unit
|
2018-05-21 05:56:20 +00:00
|
|
|
lfs_tag_t tag;
|
2018-05-19 23:25:47 +00:00
|
|
|
int err = lfs_bd_read(lfs, commit->block, noff, &tag, sizeof(tag));
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
// build crc tag
|
|
|
|
tag = (0x80000000 & ~lfs_fromle32(tag)) |
|
|
|
|
lfs_mktag(LFS_TYPE_CRC_, 0x1ff,
|
|
|
|
noff - (commit->off+sizeof(uint32_t)));
|
|
|
|
|
|
|
|
// write out crc
|
2018-05-21 05:56:20 +00:00
|
|
|
printf("tag w %#010x (%x:%x)\n", tag, commit->block, commit->off+sizeof(tag));
|
2018-05-19 23:25:47 +00:00
|
|
|
uint32_t footer[2];
|
|
|
|
footer[0] = lfs_tole32(tag ^ commit->ptag);
|
|
|
|
lfs_crc(&commit->crc, &footer[0], sizeof(footer[0]));
|
|
|
|
footer[1] = lfs_tole32(commit->crc);
|
|
|
|
err = lfs_bd_prog(lfs, commit->block, commit->off,
|
|
|
|
footer, sizeof(footer));
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
2018-05-21 05:56:20 +00:00
|
|
|
commit->off += sizeof(tag)+lfs_tag_size(tag);
|
2018-05-19 23:25:47 +00:00
|
|
|
commit->ptag = tag;
|
|
|
|
|
|
|
|
// flush buffers
|
|
|
|
err = lfs_bd_sync(lfs);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
// successful commit, check checksum to make sure
|
|
|
|
uint32_t crc = 0xffffffff;
|
|
|
|
err = lfs_bd_crc(lfs, commit->block, commit->begin,
|
|
|
|
commit->off-lfs_tag_size(tag) - commit->begin, &crc);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (crc != commit->crc) {
|
|
|
|
return LFS_ERR_CORRUPT;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-22 22:43:39 +00:00
|
|
|
/*static*/ int lfs_dir_alloc_(lfs_t *lfs, lfs_dir_t_ *dir,
|
|
|
|
const lfs_block_t tail[2]) {
|
2018-05-19 23:25:47 +00:00
|
|
|
// allocate pair of dir blocks (backwards, so we write to block 1 first)
|
|
|
|
for (int i = 0; i < 2; i++) {
|
|
|
|
int err = lfs_alloc(lfs, &dir->pair[(i+1)%2]);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// rather than clobbering one of the blocks we just pretend
|
|
|
|
// the revision may be valid
|
|
|
|
int err = lfs_bd_read(lfs, dir->pair[0], 0, &dir->rev, 4);
|
|
|
|
dir->rev = lfs_fromle32(dir->rev);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
// set defaults
|
|
|
|
dir->off = sizeof(dir->rev);
|
|
|
|
dir->etag = 0;
|
|
|
|
dir->count = 0;
|
2018-05-22 22:43:39 +00:00
|
|
|
dir->tail[0] = tail[0];
|
|
|
|
dir->tail[1] = tail[1];
|
2018-05-21 05:56:20 +00:00
|
|
|
dir->erased = false;
|
|
|
|
dir->split = false;
|
2018-05-19 23:25:47 +00:00
|
|
|
|
|
|
|
// don't write out yet, let caller take care of that
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-22 22:43:39 +00:00
|
|
|
/*static*/ int lfs_dir_fetchwith_(lfs_t *lfs,
|
2018-05-19 23:25:47 +00:00
|
|
|
lfs_dir_t_ *dir, const lfs_block_t pair[2],
|
2018-05-21 05:56:20 +00:00
|
|
|
int (*cb)(lfs_t *lfs, void *data, lfs_entry_t_ entry),
|
2018-05-19 23:25:47 +00:00
|
|
|
void *data) {
|
|
|
|
dir->pair[0] = pair[0];
|
|
|
|
dir->pair[1] = pair[1];
|
|
|
|
|
|
|
|
// find the block with the most recent revision
|
|
|
|
uint32_t rev[2];
|
|
|
|
for (int i = 0; i < 2; i++) {
|
|
|
|
int err = lfs_bd_read(lfs, dir->pair[i], 0, &rev[i], sizeof(rev[i]));
|
|
|
|
rev[i] = lfs_fromle32(rev[i]);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (lfs_scmp(rev[1], rev[0]) > 0) {
|
|
|
|
lfs_pairswap(dir->pair);
|
|
|
|
lfs_pairswap(rev);
|
|
|
|
}
|
|
|
|
|
|
|
|
// load blocks and check crc
|
|
|
|
for (int i = 0; i < 2; i++) {
|
|
|
|
lfs_off_t off = sizeof(dir->rev);
|
2018-05-21 05:56:20 +00:00
|
|
|
lfs_tag_t ptag = 0;
|
2018-05-19 23:25:47 +00:00
|
|
|
uint32_t crc = 0xffffffff;
|
2018-05-21 05:56:20 +00:00
|
|
|
dir->tail[0] = 0xffffffff;
|
|
|
|
dir->tail[1] = 0xffffffff;
|
|
|
|
dir->count = 0;
|
|
|
|
dir->split = false;
|
2018-05-19 23:25:47 +00:00
|
|
|
|
|
|
|
dir->rev = lfs_tole32(rev[0]);
|
|
|
|
lfs_crc(&crc, &dir->rev, sizeof(dir->rev));
|
|
|
|
dir->rev = lfs_fromle32(dir->rev);
|
|
|
|
|
|
|
|
while (true) {
|
|
|
|
// extract next tag
|
2018-05-21 05:56:20 +00:00
|
|
|
lfs_tag_t tag;
|
2018-05-19 23:25:47 +00:00
|
|
|
int err = lfs_bd_read(lfs, dir->pair[0], off, &tag, sizeof(tag));
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
lfs_crc(&crc, &tag, sizeof(tag));
|
|
|
|
tag = lfs_fromle32(tag) ^ ptag;
|
|
|
|
|
|
|
|
// next commit not yet programmed
|
2018-05-21 05:56:20 +00:00
|
|
|
if (lfs_tag_type(ptag) == LFS_TYPE_CRC_ && !lfs_tag_valid(tag)) {
|
2018-05-19 23:25:47 +00:00
|
|
|
dir->erased = true;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// check we're in valid range
|
2018-05-21 05:56:20 +00:00
|
|
|
if (off + sizeof(tag)+lfs_tag_size(tag) >
|
2018-05-19 23:25:47 +00:00
|
|
|
lfs->cfg->block_size - 2*sizeof(uint32_t)) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
printf("tag r %#010x (%x:%x)\n", tag, dir->pair[0], off+sizeof(tag));
|
2018-05-19 23:25:47 +00:00
|
|
|
if (lfs_tag_type(tag) == LFS_TYPE_CRC_) {
|
|
|
|
// check the crc entry
|
|
|
|
uint32_t dcrc;
|
|
|
|
int err = lfs_bd_read(lfs, dir->pair[0],
|
2018-05-21 05:56:20 +00:00
|
|
|
off+sizeof(tag), &dcrc, sizeof(dcrc));
|
2018-05-19 23:25:47 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (crc != lfs_fromle32(dcrc)) {
|
|
|
|
if (off == sizeof(dir->rev)) {
|
|
|
|
// try other block
|
|
|
|
break;
|
|
|
|
} else {
|
|
|
|
// consider what we have good enough
|
|
|
|
dir->erased = false;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
dir->off = off + sizeof(tag)+lfs_tag_size(tag);
|
2018-05-19 23:25:47 +00:00
|
|
|
dir->etag = tag;
|
|
|
|
crc = 0xffffffff;
|
|
|
|
} else {
|
|
|
|
err = lfs_bd_crc(lfs, dir->pair[0],
|
2018-05-21 05:56:20 +00:00
|
|
|
off+sizeof(tag), lfs_tag_size(tag), &crc);
|
2018-05-19 23:25:47 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
// TODO handle deletes and stuff
|
|
|
|
if (lfs_tag_id(tag) < 0x1ff && lfs_tag_id(tag) >= dir->count) {
|
|
|
|
dir->count = lfs_tag_id(tag)+1;
|
|
|
|
}
|
|
|
|
|
2018-05-22 22:43:39 +00:00
|
|
|
if (lfs_tag_type(tag) == LFS_TYPE_SOFTTAIL_ ||
|
|
|
|
lfs_tag_type(tag) == LFS_TYPE_HARDTAIL_) {
|
|
|
|
dir->split = lfs_tag_type(tag) == LFS_TYPE_HARDTAIL_;
|
2018-05-21 05:56:20 +00:00
|
|
|
err = lfs_bd_read(lfs, dir->pair[0], off+sizeof(tag),
|
|
|
|
dir->tail, sizeof(dir->tail));
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
} else if (cb) {
|
|
|
|
err = cb(lfs, data, (lfs_entry_t_){
|
|
|
|
(tag | 0x80000000),
|
2018-05-19 23:25:47 +00:00
|
|
|
.u.d.block=dir->pair[0],
|
2018-05-21 05:56:20 +00:00
|
|
|
.u.d.off=off+sizeof(tag)});
|
2018-05-19 23:25:47 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ptag = tag;
|
2018-05-21 05:56:20 +00:00
|
|
|
off += sizeof(tag)+lfs_tag_size(tag);
|
2018-05-19 23:25:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// failed, try the other crc?
|
|
|
|
lfs_pairswap(dir->pair);
|
|
|
|
lfs_pairswap(rev);
|
|
|
|
}
|
|
|
|
|
|
|
|
LFS_ERROR("Corrupted dir pair at %d %d", dir->pair[0], dir->pair[1]);
|
|
|
|
return LFS_ERR_CORRUPT;
|
|
|
|
}
|
|
|
|
|
2018-05-22 22:43:39 +00:00
|
|
|
/*static*/ int lfs_dir_fetch_(lfs_t *lfs,
|
|
|
|
lfs_dir_t_ *dir, const lfs_block_t pair[2]) {
|
|
|
|
return lfs_dir_fetchwith_(lfs, dir, pair, NULL, NULL);
|
|
|
|
}
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
static int lfs_dir_traverse_(lfs_t *lfs, lfs_dir_t_ *dir,
|
|
|
|
int (*cb)(lfs_t *lfs, void *data, lfs_entry_t_ entry),
|
2018-05-19 23:25:47 +00:00
|
|
|
void *data) {
|
|
|
|
return lfs_commit_traverse(lfs, &(struct lfs_commit){
|
|
|
|
.block=dir->pair[0], .off=dir->off, .ptag=dir->etag},
|
|
|
|
cb, data);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct lfs_dir_mover {
|
|
|
|
// traversal things
|
|
|
|
lfs_dir_t_ *dir;
|
|
|
|
int (*cb)(lfs_t *lfs, void *data, struct lfs_commit *commit);
|
|
|
|
void *data;
|
|
|
|
|
|
|
|
// ids to iterate through
|
|
|
|
uint16_t begin;
|
|
|
|
uint16_t end;
|
|
|
|
uint16_t ack;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int lfs_dir_mover_commit(lfs_t *lfs, void *p,
|
2018-05-21 05:56:20 +00:00
|
|
|
lfs_entry_t_ entry) {
|
|
|
|
return lfs_commit_commit(lfs, p, entry);
|
2018-05-19 23:25:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int lfs_dir_mover(lfs_t *lfs, void *p, struct lfs_commit *commit) {
|
|
|
|
struct lfs_dir_mover *mover = p;
|
|
|
|
for (int i = mover->begin; i < mover->end; i++) {
|
|
|
|
// tell the committer to check for duplicates
|
|
|
|
uint16_t old = commit->compact.id;
|
|
|
|
if (commit->compact.id < 0) {
|
|
|
|
commit->compact.id = i;
|
|
|
|
}
|
|
|
|
|
|
|
|
// commit pending commits
|
|
|
|
int err = mover->cb(lfs, mover->data, commit);
|
|
|
|
if (err) {
|
|
|
|
commit->compact.id = old;
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
// iterate over on-disk regions
|
2018-05-21 05:56:20 +00:00
|
|
|
err = lfs_dir_traverse_(lfs, mover->dir,
|
2018-05-19 23:25:47 +00:00
|
|
|
lfs_dir_mover_commit, commit);
|
|
|
|
if (err) {
|
|
|
|
commit->compact.id = old;
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
mover->ack = i;
|
|
|
|
commit->compact.id = old;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
/*static*/ int lfs_dir_compact_(lfs_t *lfs, lfs_dir_t_ *dir,
|
2018-05-19 23:25:47 +00:00
|
|
|
int (*cb)(lfs_t *lfs, void *data, struct lfs_commit *commit),
|
|
|
|
void *data) {
|
|
|
|
// save some state in case block is bad
|
|
|
|
const lfs_block_t oldpair[2] = {dir->pair[1], dir->pair[0]};
|
|
|
|
bool relocated = false;
|
|
|
|
|
|
|
|
// increment revision count
|
|
|
|
dir->rev += 1;
|
|
|
|
|
|
|
|
while (true) {
|
|
|
|
// setup mover
|
|
|
|
struct lfs_dir_mover mover = {
|
|
|
|
.dir = dir,
|
|
|
|
.cb = cb,
|
|
|
|
.data = data,
|
|
|
|
|
|
|
|
.begin = 0,
|
|
|
|
.end = dir->count,
|
|
|
|
.ack = 0,
|
|
|
|
};
|
|
|
|
|
|
|
|
if (true) {
|
|
|
|
// erase block to write to
|
|
|
|
int err = lfs_bd_erase(lfs, dir->pair[1]);
|
|
|
|
if (err) {
|
|
|
|
if (err == LFS_ERR_CORRUPT) {
|
|
|
|
goto relocate;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
// write out header
|
|
|
|
uint32_t crc = 0xffffffff;
|
|
|
|
uint32_t rev = lfs_tole32(dir->rev);
|
|
|
|
lfs_crc(&crc, &rev, sizeof(rev));
|
|
|
|
err = lfs_bd_prog(lfs, dir->pair[1], 0, &rev, sizeof(rev));
|
|
|
|
if (err) {
|
|
|
|
if (err == LFS_ERR_CORRUPT) {
|
|
|
|
goto relocate;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
// setup compaction
|
|
|
|
struct lfs_commit commit = {
|
|
|
|
.block = dir->pair[1],
|
|
|
|
.off = sizeof(dir->rev),
|
|
|
|
// leave space for tail pointer
|
|
|
|
.begin = 0,
|
|
|
|
.end = lfs_min(lfs->cfg->block_size - 5*sizeof(uint32_t),
|
|
|
|
lfs_alignup(lfs->cfg->block_size / 2,
|
|
|
|
lfs->cfg->prog_size)),
|
|
|
|
.crc = crc,
|
|
|
|
.ptag = 0,
|
|
|
|
.compact.id = -1,
|
|
|
|
};
|
|
|
|
|
|
|
|
// run compaction over mover
|
|
|
|
err = lfs_dir_mover(lfs, &mover, &commit);
|
|
|
|
if (err) {
|
|
|
|
if (err == LFS_ERR_NOSPC) {
|
|
|
|
goto split;
|
|
|
|
} else if (err == LFS_ERR_CORRUPT) {
|
|
|
|
goto relocate;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!lfs_pairisnull(dir->tail)) {
|
|
|
|
// TODO le32
|
|
|
|
commit.end = lfs->cfg->block_size - 2*sizeof(uint32_t),
|
2018-05-21 05:56:20 +00:00
|
|
|
err = lfs_commit_commit(lfs, &commit, (lfs_entry_t_){
|
2018-05-22 22:43:39 +00:00
|
|
|
lfs_mktag(LFS_TYPE_SOFTTAIL_ + dir->split,
|
|
|
|
0x1ff, sizeof(dir->tail)),
|
2018-05-19 23:25:47 +00:00
|
|
|
.u.buffer=dir->tail});
|
|
|
|
if (err) {
|
|
|
|
if (err == LFS_ERR_CORRUPT) {
|
|
|
|
goto relocate;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
err = lfs_commit_crc(lfs, &commit);
|
|
|
|
if (err) {
|
|
|
|
if (err == LFS_ERR_CORRUPT) {
|
|
|
|
goto relocate;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
// successful compaction, swap dir pair to indicate most recent
|
|
|
|
lfs_pairswap(dir->pair);
|
|
|
|
dir->off = commit.off;
|
|
|
|
dir->etag = commit.ptag;
|
|
|
|
dir->erased = true;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
split:
|
|
|
|
// commit no longer fits, need to split dir
|
|
|
|
dir->count = mover.ack;
|
|
|
|
mover.begin = mover.ack+1;
|
|
|
|
|
|
|
|
// drop caches and create tail
|
|
|
|
lfs->pcache.block = 0xffffffff;
|
|
|
|
|
|
|
|
lfs_dir_t_ tail;
|
2018-05-22 22:43:39 +00:00
|
|
|
int err = lfs_dir_alloc_(lfs, &tail, dir->tail);
|
2018-05-19 23:25:47 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
err = lfs_dir_compact_(lfs, &tail, lfs_dir_mover, &mover);
|
2018-05-19 23:25:47 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
dir->tail[0] = tail.pair[0];
|
|
|
|
dir->tail[1] = tail.pair[1];
|
2018-05-21 05:56:20 +00:00
|
|
|
dir->split = true;
|
2018-05-19 23:25:47 +00:00
|
|
|
continue;
|
|
|
|
|
|
|
|
relocate:
|
|
|
|
//commit was corrupted
|
|
|
|
LFS_DEBUG("Bad block at %d", dir->pair[1]);
|
|
|
|
|
|
|
|
// drop caches and prepare to relocate block
|
|
|
|
relocated = true;
|
|
|
|
lfs->pcache.block = 0xffffffff;
|
|
|
|
|
|
|
|
// can't relocate superblock, filesystem is now frozen
|
|
|
|
if (lfs_paircmp(oldpair, (const lfs_block_t[2]){0, 1}) == 0) {
|
|
|
|
LFS_WARN("Superblock %d has become unwritable", oldpair[1]);
|
|
|
|
return LFS_ERR_CORRUPT;
|
|
|
|
}
|
|
|
|
|
|
|
|
// relocate half of pair
|
|
|
|
err = lfs_alloc(lfs, &dir->pair[1]);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (relocated) {
|
|
|
|
// update references if we relocated
|
|
|
|
LFS_DEBUG("Relocating %d %d to %d %d",
|
|
|
|
oldpair[0], oldpair[1], dir->pair[0], dir->pair[1]);
|
|
|
|
int err = lfs_relocate(lfs, oldpair, dir->pair);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// shift over any directories that are affected
|
|
|
|
for (lfs_dir_t *d = lfs->dirs; d; d = d->next) {
|
|
|
|
if (lfs_paircmp(d->pair, dir->pair) == 0) {
|
|
|
|
d->pair[0] = dir->pair[0];
|
|
|
|
d->pair[1] = dir->pair[1];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-22 22:43:39 +00:00
|
|
|
/*static*/ int lfs_dir_commitwith_(lfs_t *lfs, lfs_dir_t_ *dir,
|
2018-05-19 23:25:47 +00:00
|
|
|
int (*cb)(lfs_t *lfs, void *data, struct lfs_commit *commit),
|
|
|
|
void *data) {
|
|
|
|
if (!dir->erased) {
|
|
|
|
// not erased, must compact
|
2018-05-21 05:56:20 +00:00
|
|
|
return lfs_dir_compact_(lfs, dir, cb, data);
|
2018-05-19 23:25:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
struct lfs_commit commit = {
|
|
|
|
.block = dir->pair[0],
|
|
|
|
.begin = dir->off,
|
|
|
|
.off = dir->off,
|
|
|
|
.end = lfs->cfg->block_size - 2*sizeof(uint32_t),
|
|
|
|
.crc = 0xffffffff,
|
|
|
|
.ptag = dir->etag,
|
|
|
|
.compact.id = -1,
|
|
|
|
};
|
|
|
|
|
|
|
|
int err = cb(lfs, data, &commit);
|
|
|
|
if (err) {
|
|
|
|
if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) {
|
2018-05-21 05:56:20 +00:00
|
|
|
return lfs_dir_compact_(lfs, dir, cb, data);
|
2018-05-19 23:25:47 +00:00
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = lfs_commit_crc(lfs, &commit);
|
|
|
|
if (err) {
|
|
|
|
if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) {
|
2018-05-21 05:56:20 +00:00
|
|
|
return lfs_dir_compact_(lfs, dir, cb, data);
|
2018-05-19 23:25:47 +00:00
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
// successful commit, lets update dir
|
|
|
|
dir->off = commit.off;
|
|
|
|
dir->etag = commit.ptag;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-22 22:43:39 +00:00
|
|
|
struct lfs_dir_committer {
|
2018-05-21 05:56:20 +00:00
|
|
|
const lfs_entry_t_ *regions;
|
2018-05-19 23:25:47 +00:00
|
|
|
int count;
|
|
|
|
};
|
|
|
|
|
2018-05-22 22:43:39 +00:00
|
|
|
int lfs_dir_committer(lfs_t *lfs, void *p, struct lfs_commit *commit) {
|
|
|
|
struct lfs_dir_committer *set = p;
|
2018-05-21 05:56:20 +00:00
|
|
|
for (int i = 0; i < set->count; i++) {
|
|
|
|
int err = lfs_commit_commit(lfs, commit, set->regions[i]);
|
2018-05-19 23:25:47 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-22 22:43:39 +00:00
|
|
|
/*static*/ int lfs_dir_commit_(lfs_t *lfs, lfs_dir_t_ *dir,
|
2018-05-21 05:56:20 +00:00
|
|
|
const lfs_entry_t_ *regions, int count) {
|
2018-05-22 22:43:39 +00:00
|
|
|
return lfs_dir_commitwith_(lfs, dir, lfs_dir_committer,
|
|
|
|
&(struct lfs_dir_committer){regions, count});
|
2018-05-19 23:25:47 +00:00
|
|
|
}
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
/*static*/ int lfs_dir_add(lfs_t *lfs, lfs_dir_t_ *dir, uint16_t *id) {
|
|
|
|
*id = dir->count;
|
|
|
|
dir->count += 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*static*/ int lfs_dir_drop(lfs_t *lfs, lfs_dir_t_ *dir, uint16_t id) {
|
|
|
|
dir->count -= 1;
|
|
|
|
// TODO compact during traverse when compacting?
|
2018-05-22 22:43:39 +00:00
|
|
|
return lfs_dir_commit_(lfs, dir, (lfs_entry_t_[]){{
|
2018-05-21 05:56:20 +00:00
|
|
|
lfs_mktag(LFS_TYPE_DROP_, id, 0)}}, 1);
|
|
|
|
}
|
|
|
|
|
2018-05-22 22:43:39 +00:00
|
|
|
struct lfs_dir_getter {
|
|
|
|
uint32_t mask;
|
|
|
|
lfs_tag_t tag;
|
|
|
|
lfs_entry_t_ *entry;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int lfs_dir_getter(lfs_t *lfs, void *p, lfs_entry_t_ entry) {
|
|
|
|
struct lfs_dir_getter *get = p;
|
|
|
|
if ((entry.tag & get->mask) == (get->tag & get->mask)) {
|
|
|
|
if (get->entry) {
|
|
|
|
*get->entry = entry;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*static*/ int lfs_dir_get_(lfs_t *lfs, lfs_dir_t_ *dir,
|
|
|
|
uint32_t mask, lfs_tag_t tag, lfs_entry_t_ *entry) {
|
|
|
|
int res = lfs_dir_traverse_(lfs, dir, lfs_dir_getter,
|
|
|
|
&(struct lfs_dir_getter){mask, tag, entry});
|
|
|
|
if (res < 0) {
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!res) {
|
|
|
|
return LFS_ERR_NOENT;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*static*/ int lfs_dir_getbuffer_(lfs_t *lfs, lfs_dir_t_ *dir,
|
|
|
|
uint32_t mask, lfs_tag_t tag, lfs_entry_t_ *entry) {
|
|
|
|
void *buffer = entry->u.buffer;
|
|
|
|
lfs_size_t size = lfs_tag_size(tag);
|
|
|
|
int err = lfs_dir_get_(lfs, dir, mask, tag, entry);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
lfs_size_t diff = lfs_min(size, lfs_tag_size(entry->tag));
|
|
|
|
memset((uint8_t*)buffer + diff, 0, size - diff);
|
|
|
|
err = lfs_bd_read(lfs, entry->u.d.block, entry->u.d.off, buffer, diff);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (lfs_tag_size(entry->tag) > size) {
|
|
|
|
return LFS_ERR_RANGE;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*static*/ int lfs_dir_getentry_(lfs_t *lfs, lfs_dir_t_ *dir,
|
|
|
|
uint32_t mask, lfs_tag_t tag, lfs_entry_t_ *entry) {
|
|
|
|
entry->u.buffer = &entry->u;
|
|
|
|
return lfs_dir_getbuffer_(lfs, dir, mask, tag, entry);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct lfs_dir_finder {
|
|
|
|
const char *name;
|
|
|
|
lfs_size_t len;
|
|
|
|
|
|
|
|
int16_t id;
|
|
|
|
lfs_entry_t_ *entry;
|
|
|
|
};
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
static int lfs_dir_finder(lfs_t *lfs, void *p, lfs_entry_t_ entry) {
|
2018-05-19 23:25:47 +00:00
|
|
|
struct lfs_dir_finder *find = p;
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
if (lfs_tag_type(entry.tag) == LFS_TYPE_NAME_ &&
|
|
|
|
lfs_tag_size(entry.tag) == find->len) {
|
|
|
|
int res = lfs_bd_cmp(lfs, entry.u.d.block, entry.u.d.off,
|
2018-05-19 23:25:47 +00:00
|
|
|
find->name, find->len);
|
|
|
|
if (res < 0) {
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (res) {
|
|
|
|
// found a match
|
2018-05-21 05:56:20 +00:00
|
|
|
find->id = lfs_tag_id(entry.tag);
|
2018-05-19 23:25:47 +00:00
|
|
|
find->entry->tag = 0xffffffff;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
if (find->id >= 0 && lfs_tag_id(entry.tag) == find->id &&
|
2018-05-22 22:43:39 +00:00
|
|
|
lfs_tag_supertype(entry.tag) == LFS_TYPE_REG_) {
|
|
|
|
*find->entry = entry;
|
2018-05-19 23:25:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
/*static*/ int lfs_dir_find_(lfs_t *lfs, lfs_dir_t_ *dir,
|
2018-05-22 22:43:39 +00:00
|
|
|
const char **path, lfs_entry_t_ *entry) {
|
2018-05-19 23:25:47 +00:00
|
|
|
struct lfs_dir_finder find = {
|
|
|
|
.name = *path,
|
|
|
|
.entry = entry,
|
|
|
|
};
|
|
|
|
|
|
|
|
// TODO make superblock
|
2018-05-21 05:56:20 +00:00
|
|
|
entry->u.pair[0] = lfs->root[0];
|
|
|
|
entry->u.pair[1] = lfs->root[1];
|
2018-05-19 23:25:47 +00:00
|
|
|
|
|
|
|
while (true) {
|
|
|
|
nextname:
|
|
|
|
// skip slashes
|
|
|
|
find.name += strspn(find.name, "/");
|
|
|
|
find.len = strcspn(find.name, "/");
|
|
|
|
|
|
|
|
// special case for root dir
|
|
|
|
if (find.name[0] == '\0') {
|
|
|
|
// TODO set up root?
|
|
|
|
entry->tag = LFS_STRUCT_DIR | LFS_TYPE_DIR;
|
|
|
|
entry->u.pair[0] = lfs->root[0];
|
|
|
|
entry->u.pair[1] = lfs->root[1];
|
|
|
|
return lfs_mktag(LFS_TYPE_DIR_, 0x1ff, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
// skip '.' and root '..'
|
|
|
|
if ((find.len == 1 && memcmp(find.name, ".", 1) == 0) ||
|
|
|
|
(find.len == 2 && memcmp(find.name, "..", 2) == 0)) {
|
|
|
|
find.name += find.len;
|
|
|
|
goto nextname;
|
|
|
|
}
|
|
|
|
|
|
|
|
// skip if matched by '..' in name
|
|
|
|
const char *suffix = find.name + find.len;
|
|
|
|
lfs_size_t sufflen;
|
|
|
|
int depth = 1;
|
|
|
|
while (true) {
|
|
|
|
suffix += strspn(suffix, "/");
|
|
|
|
sufflen = strcspn(suffix, "/");
|
|
|
|
if (sufflen == 0) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sufflen == 2 && memcmp(suffix, "..", 2) == 0) {
|
|
|
|
depth -= 1;
|
|
|
|
if (depth == 0) {
|
|
|
|
find.name = suffix + sufflen;
|
|
|
|
goto nextname;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
depth += 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
suffix += sufflen;
|
|
|
|
}
|
|
|
|
|
|
|
|
// update what we've found
|
|
|
|
*path = find.name;
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
// find path
|
2018-05-19 23:25:47 +00:00
|
|
|
while (true) {
|
|
|
|
find.id = -1;
|
2018-05-22 22:43:39 +00:00
|
|
|
int err = lfs_dir_fetchwith_(lfs, dir, entry->u.pair,
|
2018-05-19 23:25:47 +00:00
|
|
|
lfs_dir_finder, &find);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (find.id >= 0) {
|
|
|
|
// found it
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
if (lfs_pairisnull(dir->tail)) {
|
2018-05-19 23:25:47 +00:00
|
|
|
return LFS_ERR_NOENT;
|
|
|
|
}
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
entry->u.pair[0] = dir->tail[0];
|
|
|
|
entry->u.pair[1] = dir->tail[1];
|
2018-05-19 23:25:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// TODO handle moves
|
|
|
|
// // check that entry has not been moved
|
|
|
|
// if (entry->d.type & LFS_STRUCT_MOVED) {
|
|
|
|
// int moved = lfs_moved(lfs, &entry->d.u);
|
|
|
|
// if (moved < 0 || moved) {
|
|
|
|
// return (moved < 0) ? moved : LFS_ERR_NOENT;
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// entry->d.type &= ~LFS_STRUCT_MOVED;
|
|
|
|
// }
|
|
|
|
|
|
|
|
find.name += find.len;
|
|
|
|
find.name += strspn(find.name, "/");
|
|
|
|
if (find.name[0] == '\0') {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// continue on if we hit a directory
|
|
|
|
// TODO update with what's on master?
|
|
|
|
if (lfs_tag_type(entry->tag) != LFS_TYPE_DIR_) {
|
|
|
|
return LFS_ERR_NOTDIR;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-22 22:43:39 +00:00
|
|
|
/*static*/ int lfs_dir_findbuffer_(lfs_t *lfs, lfs_dir_t_ *dir,
|
|
|
|
const char **path, lfs_entry_t_ *entry) {
|
|
|
|
void *buffer = entry->u.buffer;
|
|
|
|
lfs_size_t size = lfs_tag_size(entry->tag);
|
|
|
|
int err = lfs_dir_find_(lfs, dir, path, entry);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
lfs_size_t diff = lfs_min(size, lfs_tag_size(entry->tag));
|
|
|
|
memset((uint8_t*)buffer + diff, 0, size - diff);
|
|
|
|
err = lfs_bd_read(lfs, entry->u.d.block, entry->u.d.off, buffer, diff);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (lfs_tag_size(entry->tag) > size) {
|
|
|
|
return LFS_ERR_RANGE;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*static*/ int lfs_dir_findentry_(lfs_t *lfs, lfs_dir_t_ *dir,
|
|
|
|
const char **path, lfs_entry_t_ *entry) {
|
|
|
|
entry->tag = sizeof(entry->u);
|
|
|
|
entry->u.buffer = &entry->u;
|
|
|
|
return lfs_dir_findbuffer_(lfs, dir, path, entry);
|
|
|
|
}
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
|
2018-05-19 23:25:47 +00:00
|
|
|
//////////////////////////////////////////////////////////
|
|
|
|
|
2017-04-18 03:27:06 +00:00
|
|
|
static int lfs_dir_alloc(lfs_t *lfs, lfs_dir_t *dir) {
|
2017-04-29 17:41:53 +00:00
|
|
|
// allocate pair of dir blocks
|
2017-04-18 03:27:06 +00:00
|
|
|
for (int i = 0; i < 2; i++) {
|
|
|
|
int err = lfs_alloc(lfs, &dir->pair[i]);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-29 17:41:53 +00:00
|
|
|
// rather than clobbering one of the blocks we just pretend
|
2017-04-18 03:27:06 +00:00
|
|
|
// the revision may be valid
|
2017-06-24 05:43:05 +00:00
|
|
|
int err = lfs_bd_read(lfs, dir->pair[0], 0, &dir->d.rev, 4);
|
2018-02-02 11:58:43 +00:00
|
|
|
dir->d.rev = lfs_fromle32(dir->d.rev);
|
2017-04-18 03:27:06 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
2017-03-05 20:11:52 +00:00
|
|
|
|
2017-04-29 17:41:53 +00:00
|
|
|
// set defaults
|
2017-04-18 03:27:06 +00:00
|
|
|
dir->d.rev += 1;
|
2017-06-24 01:03:44 +00:00
|
|
|
dir->d.size = sizeof(dir->d)+4;
|
2017-10-17 00:31:56 +00:00
|
|
|
dir->d.tail[0] = 0xffffffff;
|
|
|
|
dir->d.tail[1] = 0xffffffff;
|
2017-04-18 03:27:06 +00:00
|
|
|
dir->off = sizeof(dir->d);
|
|
|
|
|
2017-04-29 17:41:53 +00:00
|
|
|
// don't write out yet, let caller take care of that
|
2017-04-18 03:27:06 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int lfs_dir_fetch(lfs_t *lfs,
|
|
|
|
lfs_dir_t *dir, const lfs_block_t pair[2]) {
|
|
|
|
// copy out pair, otherwise may be aliasing dir
|
2017-04-22 19:56:12 +00:00
|
|
|
const lfs_block_t tpair[2] = {pair[0], pair[1]};
|
2017-04-18 03:27:06 +00:00
|
|
|
bool valid = false;
|
|
|
|
|
|
|
|
// check both blocks for the most recent revision
|
2017-03-12 20:11:52 +00:00
|
|
|
for (int i = 0; i < 2; i++) {
|
2017-04-18 03:27:06 +00:00
|
|
|
struct lfs_disk_dir test;
|
2017-06-24 05:43:05 +00:00
|
|
|
int err = lfs_bd_read(lfs, tpair[i], 0, &test, sizeof(test));
|
2018-02-02 11:58:43 +00:00
|
|
|
lfs_dir_fromle32(&test);
|
2017-03-12 20:11:52 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
2017-03-05 20:11:52 +00:00
|
|
|
|
2017-04-18 03:27:06 +00:00
|
|
|
if (valid && lfs_scmp(test.rev, dir->d.rev) < 0) {
|
2017-03-12 20:11:52 +00:00
|
|
|
continue;
|
|
|
|
}
|
2017-03-05 20:11:52 +00:00
|
|
|
|
2017-06-24 05:43:05 +00:00
|
|
|
if ((0x7fffffff & test.size) < sizeof(test)+4 ||
|
|
|
|
(0x7fffffff & test.size) > lfs->cfg->block_size) {
|
2017-06-24 01:03:44 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2017-03-25 21:20:31 +00:00
|
|
|
uint32_t crc = 0xffffffff;
|
2018-02-02 11:58:43 +00:00
|
|
|
lfs_dir_tole32(&test);
|
2017-06-24 05:43:05 +00:00
|
|
|
lfs_crc(&crc, &test, sizeof(test));
|
2018-02-02 11:58:43 +00:00
|
|
|
lfs_dir_fromle32(&test);
|
2017-06-24 05:43:05 +00:00
|
|
|
err = lfs_bd_crc(lfs, tpair[i], sizeof(test),
|
|
|
|
(0x7fffffff & test.size) - sizeof(test), &crc);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
2017-03-05 20:11:52 +00:00
|
|
|
}
|
|
|
|
|
2017-03-25 21:20:31 +00:00
|
|
|
if (crc != 0) {
|
2017-04-18 03:27:06 +00:00
|
|
|
continue;
|
2017-03-25 21:20:31 +00:00
|
|
|
}
|
|
|
|
|
2017-04-18 03:27:06 +00:00
|
|
|
valid = true;
|
2017-03-12 20:11:52 +00:00
|
|
|
|
2017-04-18 03:27:06 +00:00
|
|
|
// setup dir in case it's valid
|
|
|
|
dir->pair[0] = tpair[(i+0) % 2];
|
|
|
|
dir->pair[1] = tpair[(i+1) % 2];
|
|
|
|
dir->off = sizeof(dir->d);
|
|
|
|
dir->d = test;
|
2017-03-12 20:11:52 +00:00
|
|
|
}
|
|
|
|
|
2017-04-18 03:27:06 +00:00
|
|
|
if (!valid) {
|
|
|
|
LFS_ERROR("Corrupted dir pair at %d %d", tpair[0], tpair[1]);
|
2017-04-24 03:10:16 +00:00
|
|
|
return LFS_ERR_CORRUPT;
|
2017-03-05 20:11:52 +00:00
|
|
|
}
|
|
|
|
|
2017-03-12 20:11:52 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-03-11 16:28:13 +00:00
|
|
|
struct lfs_region {
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
enum {
|
|
|
|
LFS_FROM_MEM,
|
|
|
|
LFS_FROM_REGION,
|
Added support for atomically committing custom attributes
Although it's simple and probably what most users expect, the previous
custom attributes API suffered from one problem: the inability to update
attributes atomically.
If we consider our timestamp use case, updating a file would require:
1. Update the file
2. Update the timestamp
If a power loss occurs during this sequence of updates, we could end up
with a file with an incorrect timestamp.
Is this a big deal? Probably not, but it could be a surprise only found
after a power-loss. And littlefs was developed with the _specifically_
to avoid suprises during power-loss.
The littlefs is perfectly capable of bundling multiple attribute updates
in a single directory commit. That's kind of what it was designed to do.
So all we need is a new committer opcode for list of attributes, and
then poking that list of attributes through the API.
We could provide the single-attribute functions, but don't, because the
fewer functions makes for a smaller codebase, and these are already the
more advanced functions so we can expect more from users. This also
changes semantics about what happens when we don't find an attribute,
since erroring would throw away all of the other attributes we're
processing.
To atomically commit both custom attributes and file updates, we need a
new API, lfs_file_setattr. Unfortunately the semantics are a bit more
confusing than lfs_setattr, since the attributes aren't written out
immediately.
2018-04-06 04:23:14 +00:00
|
|
|
LFS_FROM_ATTRS,
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
} type;
|
2017-05-14 17:01:45 +00:00
|
|
|
|
2018-04-08 09:23:23 +00:00
|
|
|
lfs_off_t oldoff;
|
|
|
|
lfs_size_t oldsize;
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
const void *buffer;
|
2018-04-08 09:23:23 +00:00
|
|
|
lfs_size_t newsize;
|
2018-03-11 16:28:13 +00:00
|
|
|
};
|
|
|
|
|
2018-04-08 09:23:23 +00:00
|
|
|
struct lfs_region_attrs {
|
Added support for atomically committing custom attributes
Although it's simple and probably what most users expect, the previous
custom attributes API suffered from one problem: the inability to update
attributes atomically.
If we consider our timestamp use case, updating a file would require:
1. Update the file
2. Update the timestamp
If a power loss occurs during this sequence of updates, we could end up
with a file with an incorrect timestamp.
Is this a big deal? Probably not, but it could be a surprise only found
after a power-loss. And littlefs was developed with the _specifically_
to avoid suprises during power-loss.
The littlefs is perfectly capable of bundling multiple attribute updates
in a single directory commit. That's kind of what it was designed to do.
So all we need is a new committer opcode for list of attributes, and
then poking that list of attributes through the API.
We could provide the single-attribute functions, but don't, because the
fewer functions makes for a smaller codebase, and these are already the
more advanced functions so we can expect more from users. This also
changes semantics about what happens when we don't find an attribute,
since erroring would throw away all of the other attributes we're
processing.
To atomically commit both custom attributes and file updates, we need a
new API, lfs_file_setattr. Unfortunately the semantics are a bit more
confusing than lfs_setattr, since the attributes aren't written out
immediately.
2018-04-06 04:23:14 +00:00
|
|
|
const struct lfs_attr *attrs;
|
|
|
|
int count;
|
|
|
|
};
|
|
|
|
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
struct lfs_region_region {
|
2018-03-11 16:28:13 +00:00
|
|
|
lfs_block_t block;
|
|
|
|
lfs_off_t off;
|
|
|
|
struct lfs_region *regions;
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
int count;
|
2018-03-11 16:28:13 +00:00
|
|
|
};
|
|
|
|
|
2018-04-08 09:23:23 +00:00
|
|
|
static int lfs_commit_region(lfs_t *lfs, uint32_t *crc,
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
lfs_block_t oldblock, lfs_off_t oldoff,
|
|
|
|
lfs_block_t newblock, lfs_off_t newoff,
|
2018-04-08 09:23:23 +00:00
|
|
|
lfs_off_t regionoff, lfs_size_t regionsize,
|
|
|
|
const struct lfs_region *regions, int count) {
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
int i = 0;
|
2018-04-08 09:23:23 +00:00
|
|
|
lfs_size_t newend = newoff + regionsize;
|
|
|
|
while (newoff < newend) {
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
// commit from different types of regions
|
2018-04-08 09:23:23 +00:00
|
|
|
if (i < count && regions[i].oldoff == oldoff - regionoff) {
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
switch (regions[i].type) {
|
|
|
|
case LFS_FROM_MEM: {
|
2018-04-08 09:23:23 +00:00
|
|
|
lfs_crc(crc, regions[i].buffer, regions[i].newsize);
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
int err = lfs_bd_prog(lfs, newblock, newoff,
|
2018-04-08 09:23:23 +00:00
|
|
|
regions[i].buffer, regions[i].newsize);
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
2018-04-08 09:23:23 +00:00
|
|
|
newoff += regions[i].newsize;
|
|
|
|
oldoff += regions[i].oldsize;
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case LFS_FROM_REGION: {
|
|
|
|
const struct lfs_region_region *disk = regions[i].buffer;
|
2018-04-08 09:23:23 +00:00
|
|
|
int err = lfs_commit_region(lfs, crc,
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
disk->block, disk->off,
|
|
|
|
newblock, newoff,
|
2018-04-08 09:23:23 +00:00
|
|
|
disk->off, regions[i].newsize,
|
|
|
|
disk->regions, disk->count);
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
2018-04-08 09:23:23 +00:00
|
|
|
newoff += regions[i].newsize;
|
|
|
|
oldoff -= regions[i].oldsize;
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
break;
|
|
|
|
}
|
Added support for atomically committing custom attributes
Although it's simple and probably what most users expect, the previous
custom attributes API suffered from one problem: the inability to update
attributes atomically.
If we consider our timestamp use case, updating a file would require:
1. Update the file
2. Update the timestamp
If a power loss occurs during this sequence of updates, we could end up
with a file with an incorrect timestamp.
Is this a big deal? Probably not, but it could be a surprise only found
after a power-loss. And littlefs was developed with the _specifically_
to avoid suprises during power-loss.
The littlefs is perfectly capable of bundling multiple attribute updates
in a single directory commit. That's kind of what it was designed to do.
So all we need is a new committer opcode for list of attributes, and
then poking that list of attributes through the API.
We could provide the single-attribute functions, but don't, because the
fewer functions makes for a smaller codebase, and these are already the
more advanced functions so we can expect more from users. This also
changes semantics about what happens when we don't find an attribute,
since erroring would throw away all of the other attributes we're
processing.
To atomically commit both custom attributes and file updates, we need a
new API, lfs_file_setattr. Unfortunately the semantics are a bit more
confusing than lfs_setattr, since the attributes aren't written out
immediately.
2018-04-06 04:23:14 +00:00
|
|
|
case LFS_FROM_ATTRS: {
|
2018-04-08 09:23:23 +00:00
|
|
|
const struct lfs_region_attrs *attrs = regions[i].buffer;
|
Added support for atomically committing custom attributes
Although it's simple and probably what most users expect, the previous
custom attributes API suffered from one problem: the inability to update
attributes atomically.
If we consider our timestamp use case, updating a file would require:
1. Update the file
2. Update the timestamp
If a power loss occurs during this sequence of updates, we could end up
with a file with an incorrect timestamp.
Is this a big deal? Probably not, but it could be a surprise only found
after a power-loss. And littlefs was developed with the _specifically_
to avoid suprises during power-loss.
The littlefs is perfectly capable of bundling multiple attribute updates
in a single directory commit. That's kind of what it was designed to do.
So all we need is a new committer opcode for list of attributes, and
then poking that list of attributes through the API.
We could provide the single-attribute functions, but don't, because the
fewer functions makes for a smaller codebase, and these are already the
more advanced functions so we can expect more from users. This also
changes semantics about what happens when we don't find an attribute,
since erroring would throw away all of the other attributes we're
processing.
To atomically commit both custom attributes and file updates, we need a
new API, lfs_file_setattr. Unfortunately the semantics are a bit more
confusing than lfs_setattr, since the attributes aren't written out
immediately.
2018-04-06 04:23:14 +00:00
|
|
|
|
|
|
|
// order doesn't matter, so we write new attrs first. this
|
|
|
|
// is still O(n^2) but only O(n) disk access
|
|
|
|
for (int j = 0; j < attrs->count; j++) {
|
|
|
|
if (attrs->attrs[j].size == 0) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-04-08 09:23:23 +00:00
|
|
|
lfs_entry_attr_t attr;
|
|
|
|
attr.d.type = attrs->attrs[j].type;
|
|
|
|
attr.d.len = attrs->attrs[j].size;
|
Added support for atomically committing custom attributes
Although it's simple and probably what most users expect, the previous
custom attributes API suffered from one problem: the inability to update
attributes atomically.
If we consider our timestamp use case, updating a file would require:
1. Update the file
2. Update the timestamp
If a power loss occurs during this sequence of updates, we could end up
with a file with an incorrect timestamp.
Is this a big deal? Probably not, but it could be a surprise only found
after a power-loss. And littlefs was developed with the _specifically_
to avoid suprises during power-loss.
The littlefs is perfectly capable of bundling multiple attribute updates
in a single directory commit. That's kind of what it was designed to do.
So all we need is a new committer opcode for list of attributes, and
then poking that list of attributes through the API.
We could provide the single-attribute functions, but don't, because the
fewer functions makes for a smaller codebase, and these are already the
more advanced functions so we can expect more from users. This also
changes semantics about what happens when we don't find an attribute,
since erroring would throw away all of the other attributes we're
processing.
To atomically commit both custom attributes and file updates, we need a
new API, lfs_file_setattr. Unfortunately the semantics are a bit more
confusing than lfs_setattr, since the attributes aren't written out
immediately.
2018-04-06 04:23:14 +00:00
|
|
|
|
|
|
|
lfs_crc(crc, &attr.d, sizeof(attr.d));
|
|
|
|
int err = lfs_bd_prog(lfs, newblock, newoff,
|
|
|
|
&attr.d, sizeof(attr.d));
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
lfs_crc(crc,
|
|
|
|
attrs->attrs[j].buffer, attrs->attrs[j].size);
|
|
|
|
err = lfs_bd_prog(lfs, newblock, newoff+sizeof(attr.d),
|
|
|
|
attrs->attrs[j].buffer, attrs->attrs[j].size);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-04-08 09:23:23 +00:00
|
|
|
newoff += 2+attrs->attrs[j].size;
|
Added support for atomically committing custom attributes
Although it's simple and probably what most users expect, the previous
custom attributes API suffered from one problem: the inability to update
attributes atomically.
If we consider our timestamp use case, updating a file would require:
1. Update the file
2. Update the timestamp
If a power loss occurs during this sequence of updates, we could end up
with a file with an incorrect timestamp.
Is this a big deal? Probably not, but it could be a surprise only found
after a power-loss. And littlefs was developed with the _specifically_
to avoid suprises during power-loss.
The littlefs is perfectly capable of bundling multiple attribute updates
in a single directory commit. That's kind of what it was designed to do.
So all we need is a new committer opcode for list of attributes, and
then poking that list of attributes through the API.
We could provide the single-attribute functions, but don't, because the
fewer functions makes for a smaller codebase, and these are already the
more advanced functions so we can expect more from users. This also
changes semantics about what happens when we don't find an attribute,
since erroring would throw away all of the other attributes we're
processing.
To atomically commit both custom attributes and file updates, we need a
new API, lfs_file_setattr. Unfortunately the semantics are a bit more
confusing than lfs_setattr, since the attributes aren't written out
immediately.
2018-04-06 04:23:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// copy over attributes without updates
|
2018-04-08 09:23:23 +00:00
|
|
|
lfs_off_t oldend = oldoff + regions[i].oldsize;
|
|
|
|
while (oldoff < oldend) {
|
|
|
|
lfs_entry_attr_t attr;
|
Added support for atomically committing custom attributes
Although it's simple and probably what most users expect, the previous
custom attributes API suffered from one problem: the inability to update
attributes atomically.
If we consider our timestamp use case, updating a file would require:
1. Update the file
2. Update the timestamp
If a power loss occurs during this sequence of updates, we could end up
with a file with an incorrect timestamp.
Is this a big deal? Probably not, but it could be a surprise only found
after a power-loss. And littlefs was developed with the _specifically_
to avoid suprises during power-loss.
The littlefs is perfectly capable of bundling multiple attribute updates
in a single directory commit. That's kind of what it was designed to do.
So all we need is a new committer opcode for list of attributes, and
then poking that list of attributes through the API.
We could provide the single-attribute functions, but don't, because the
fewer functions makes for a smaller codebase, and these are already the
more advanced functions so we can expect more from users. This also
changes semantics about what happens when we don't find an attribute,
since erroring would throw away all of the other attributes we're
processing.
To atomically commit both custom attributes and file updates, we need a
new API, lfs_file_setattr. Unfortunately the semantics are a bit more
confusing than lfs_setattr, since the attributes aren't written out
immediately.
2018-04-06 04:23:14 +00:00
|
|
|
int err = lfs_bd_read(lfs, oldblock, oldoff,
|
|
|
|
&attr.d, sizeof(attr.d));
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool updating = false;
|
|
|
|
for (int j = 0; j < attrs->count; j++) {
|
|
|
|
if (attr.d.type == attrs->attrs[j].type) {
|
|
|
|
updating = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!updating) {
|
2018-04-08 09:23:23 +00:00
|
|
|
err = lfs_commit_region(lfs, crc,
|
Added support for atomically committing custom attributes
Although it's simple and probably what most users expect, the previous
custom attributes API suffered from one problem: the inability to update
attributes atomically.
If we consider our timestamp use case, updating a file would require:
1. Update the file
2. Update the timestamp
If a power loss occurs during this sequence of updates, we could end up
with a file with an incorrect timestamp.
Is this a big deal? Probably not, but it could be a surprise only found
after a power-loss. And littlefs was developed with the _specifically_
to avoid suprises during power-loss.
The littlefs is perfectly capable of bundling multiple attribute updates
in a single directory commit. That's kind of what it was designed to do.
So all we need is a new committer opcode for list of attributes, and
then poking that list of attributes through the API.
We could provide the single-attribute functions, but don't, because the
fewer functions makes for a smaller codebase, and these are already the
more advanced functions so we can expect more from users. This also
changes semantics about what happens when we don't find an attribute,
since erroring would throw away all of the other attributes we're
processing.
To atomically commit both custom attributes and file updates, we need a
new API, lfs_file_setattr. Unfortunately the semantics are a bit more
confusing than lfs_setattr, since the attributes aren't written out
immediately.
2018-04-06 04:23:14 +00:00
|
|
|
oldblock, oldoff,
|
|
|
|
newblock, newoff,
|
2018-04-08 09:23:23 +00:00
|
|
|
0, 2+attr.d.len,
|
|
|
|
NULL, 0);
|
Added support for atomically committing custom attributes
Although it's simple and probably what most users expect, the previous
custom attributes API suffered from one problem: the inability to update
attributes atomically.
If we consider our timestamp use case, updating a file would require:
1. Update the file
2. Update the timestamp
If a power loss occurs during this sequence of updates, we could end up
with a file with an incorrect timestamp.
Is this a big deal? Probably not, but it could be a surprise only found
after a power-loss. And littlefs was developed with the _specifically_
to avoid suprises during power-loss.
The littlefs is perfectly capable of bundling multiple attribute updates
in a single directory commit. That's kind of what it was designed to do.
So all we need is a new committer opcode for list of attributes, and
then poking that list of attributes through the API.
We could provide the single-attribute functions, but don't, because the
fewer functions makes for a smaller codebase, and these are already the
more advanced functions so we can expect more from users. This also
changes semantics about what happens when we don't find an attribute,
since erroring would throw away all of the other attributes we're
processing.
To atomically commit both custom attributes and file updates, we need a
new API, lfs_file_setattr. Unfortunately the semantics are a bit more
confusing than lfs_setattr, since the attributes aren't written out
immediately.
2018-04-06 04:23:14 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
newoff += 2+attr.d.len;
|
|
|
|
}
|
|
|
|
|
|
|
|
oldoff += 2+attr.d.len;
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
2018-03-11 16:28:13 +00:00
|
|
|
}
|
|
|
|
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
i += 1;
|
|
|
|
} else {
|
2018-05-21 05:56:20 +00:00
|
|
|
// copy data from old block if not covered by entry
|
2018-03-11 16:28:13 +00:00
|
|
|
uint8_t data;
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
int err = lfs_bd_read(lfs, oldblock, oldoff, &data, 1);
|
2018-03-11 16:28:13 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
lfs_crc(crc, &data, 1);
|
|
|
|
err = lfs_bd_prog(lfs, newblock, newoff, &data, 1);
|
2018-03-11 16:28:13 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
oldoff += 1;
|
|
|
|
newoff += 1;
|
2018-03-11 16:28:13 +00:00
|
|
|
}
|
|
|
|
}
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
|
Added support for atomically committing custom attributes
Although it's simple and probably what most users expect, the previous
custom attributes API suffered from one problem: the inability to update
attributes atomically.
If we consider our timestamp use case, updating a file would require:
1. Update the file
2. Update the timestamp
If a power loss occurs during this sequence of updates, we could end up
with a file with an incorrect timestamp.
Is this a big deal? Probably not, but it could be a surprise only found
after a power-loss. And littlefs was developed with the _specifically_
to avoid suprises during power-loss.
The littlefs is perfectly capable of bundling multiple attribute updates
in a single directory commit. That's kind of what it was designed to do.
So all we need is a new committer opcode for list of attributes, and
then poking that list of attributes through the API.
We could provide the single-attribute functions, but don't, because the
fewer functions makes for a smaller codebase, and these are already the
more advanced functions so we can expect more from users. This also
changes semantics about what happens when we don't find an attribute,
since erroring would throw away all of the other attributes we're
processing.
To atomically commit both custom attributes and file updates, we need a
new API, lfs_file_setattr. Unfortunately the semantics are a bit more
confusing than lfs_setattr, since the attributes aren't written out
immediately.
2018-04-06 04:23:14 +00:00
|
|
|
// sanity check our commit math
|
2018-04-08 09:23:23 +00:00
|
|
|
LFS_ASSERT(newoff == newend);
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
return 0;
|
2018-03-11 16:28:13 +00:00
|
|
|
}
|
|
|
|
|
2018-04-03 13:28:09 +00:00
|
|
|
static int lfs_dir_commit(lfs_t *lfs, lfs_dir_t *dir,
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
const struct lfs_region *regions, int count) {
|
2018-03-11 16:28:13 +00:00
|
|
|
// state for copying over
|
|
|
|
const lfs_block_t oldpair[2] = {dir->pair[1], dir->pair[0]};
|
|
|
|
bool relocated = false;
|
|
|
|
|
2017-10-07 21:56:00 +00:00
|
|
|
// increment revision count
|
2017-04-18 03:27:06 +00:00
|
|
|
dir->d.rev += 1;
|
2017-10-07 21:56:00 +00:00
|
|
|
|
|
|
|
// keep pairs in order such that pair[0] is most recent
|
2017-04-18 03:27:06 +00:00
|
|
|
lfs_pairswap(dir->pair);
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
for (int i = 0; i < count; i++) {
|
2018-04-08 09:23:23 +00:00
|
|
|
dir->d.size += regions[i].newsize;
|
|
|
|
dir->d.size -= regions[i].oldsize;
|
2017-04-18 03:27:06 +00:00
|
|
|
}
|
2017-03-12 20:11:52 +00:00
|
|
|
|
2017-05-14 17:01:45 +00:00
|
|
|
while (true) {
|
2017-10-17 00:31:56 +00:00
|
|
|
if (true) {
|
|
|
|
int err = lfs_bd_erase(lfs, dir->pair[0]);
|
|
|
|
if (err) {
|
|
|
|
if (err == LFS_ERR_CORRUPT) {
|
|
|
|
goto relocate;
|
|
|
|
}
|
|
|
|
return err;
|
2017-03-12 20:11:52 +00:00
|
|
|
}
|
|
|
|
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
// commit header
|
|
|
|
uint32_t crc = 0xffffffff;
|
2018-02-02 11:58:43 +00:00
|
|
|
lfs_dir_tole32(&dir->d);
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
lfs_crc(&crc, &dir->d, sizeof(dir->d));
|
|
|
|
err = lfs_bd_prog(lfs, dir->pair[0], 0, &dir->d, sizeof(dir->d));
|
2018-02-02 11:58:43 +00:00
|
|
|
lfs_dir_fromle32(&dir->d);
|
2017-10-17 00:31:56 +00:00
|
|
|
if (err) {
|
|
|
|
if (err == LFS_ERR_CORRUPT) {
|
|
|
|
goto relocate;
|
|
|
|
}
|
|
|
|
return err;
|
2017-05-14 17:01:45 +00:00
|
|
|
}
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
// commit entry
|
2018-04-08 09:23:23 +00:00
|
|
|
err = lfs_commit_region(lfs, &crc,
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
dir->pair[1], sizeof(dir->d),
|
|
|
|
dir->pair[0], sizeof(dir->d),
|
2018-04-08 09:23:23 +00:00
|
|
|
0, (0x7fffffff & dir->d.size)-sizeof(dir->d)-4,
|
|
|
|
regions, count);
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
if (err) {
|
|
|
|
if (err == LFS_ERR_CORRUPT) {
|
|
|
|
goto relocate;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
// commit crc
|
|
|
|
crc = lfs_tole32(crc);
|
|
|
|
err = lfs_bd_prog(lfs, dir->pair[0],
|
|
|
|
(0x7fffffff & dir->d.size)-4, &crc, 4);
|
|
|
|
crc = lfs_fromle32(crc);
|
2017-10-17 00:31:56 +00:00
|
|
|
if (err) {
|
|
|
|
if (err == LFS_ERR_CORRUPT) {
|
|
|
|
goto relocate;
|
|
|
|
}
|
|
|
|
return err;
|
2017-05-14 17:01:45 +00:00
|
|
|
}
|
2017-04-18 03:27:06 +00:00
|
|
|
|
2017-10-17 00:31:56 +00:00
|
|
|
err = lfs_bd_sync(lfs);
|
|
|
|
if (err) {
|
|
|
|
if (err == LFS_ERR_CORRUPT) {
|
|
|
|
goto relocate;
|
|
|
|
}
|
|
|
|
return err;
|
2017-05-14 17:01:45 +00:00
|
|
|
}
|
2017-04-01 15:44:17 +00:00
|
|
|
|
2017-10-17 00:31:56 +00:00
|
|
|
// successful commit, check checksum to make sure
|
2017-11-16 20:53:45 +00:00
|
|
|
uint32_t ncrc = 0xffffffff;
|
2017-10-17 00:31:56 +00:00
|
|
|
err = lfs_bd_crc(lfs, dir->pair[0], 0,
|
2017-11-16 20:53:45 +00:00
|
|
|
(0x7fffffff & dir->d.size)-4, &ncrc);
|
2017-10-17 00:31:56 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
2017-06-24 05:43:05 +00:00
|
|
|
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
if (ncrc != crc) {
|
2017-11-16 20:53:45 +00:00
|
|
|
goto relocate;
|
2017-10-17 00:31:56 +00:00
|
|
|
}
|
2017-05-14 17:01:45 +00:00
|
|
|
}
|
2017-04-01 15:44:17 +00:00
|
|
|
|
2017-11-16 20:53:45 +00:00
|
|
|
break;
|
2018-03-11 16:28:13 +00:00
|
|
|
|
2017-05-14 17:01:45 +00:00
|
|
|
relocate:
|
2017-06-24 05:43:05 +00:00
|
|
|
//commit was corrupted
|
2017-05-14 17:01:45 +00:00
|
|
|
LFS_DEBUG("Bad block at %d", dir->pair[0]);
|
2017-04-01 15:44:17 +00:00
|
|
|
|
2017-05-14 17:01:45 +00:00
|
|
|
// drop caches and prepare to relocate block
|
|
|
|
relocated = true;
|
|
|
|
lfs->pcache.block = 0xffffffff;
|
2017-04-01 15:44:17 +00:00
|
|
|
|
2017-05-14 17:01:45 +00:00
|
|
|
// can't relocate superblock, filesystem is now frozen
|
|
|
|
if (lfs_paircmp(oldpair, (const lfs_block_t[2]){0, 1}) == 0) {
|
|
|
|
LFS_WARN("Superblock %d has become unwritable", oldpair[0]);
|
|
|
|
return LFS_ERR_CORRUPT;
|
2017-04-01 15:44:17 +00:00
|
|
|
}
|
|
|
|
|
2017-05-14 17:01:45 +00:00
|
|
|
// relocate half of pair
|
2017-10-17 00:31:56 +00:00
|
|
|
int err = lfs_alloc(lfs, &dir->pair[0]);
|
2017-04-18 03:27:06 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
2017-04-01 15:44:17 +00:00
|
|
|
}
|
2017-06-24 05:43:05 +00:00
|
|
|
|
|
|
|
if (relocated) {
|
|
|
|
// update references if we relocated
|
|
|
|
LFS_DEBUG("Relocating %d %d to %d %d",
|
|
|
|
oldpair[0], oldpair[1], dir->pair[0], dir->pair[1]);
|
2017-11-22 02:53:15 +00:00
|
|
|
int err = lfs_relocate(lfs, oldpair, dir->pair);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// shift over any directories that are affected
|
|
|
|
for (lfs_dir_t *d = lfs->dirs; d; d = d->next) {
|
|
|
|
if (lfs_paircmp(d->pair, dir->pair) == 0) {
|
|
|
|
d->pair[0] = dir->pair[0];
|
|
|
|
d->pair[1] = dir->pair[1];
|
|
|
|
}
|
2017-06-24 05:43:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2017-05-14 17:01:45 +00:00
|
|
|
}
|
2017-04-01 15:44:17 +00:00
|
|
|
|
2018-03-23 23:35:55 +00:00
|
|
|
static int lfs_dir_get(lfs_t *lfs, const lfs_dir_t *dir,
|
|
|
|
lfs_off_t off, void *buffer, lfs_size_t size) {
|
|
|
|
return lfs_bd_read(lfs, dir->pair[0], off, buffer, size);
|
|
|
|
}
|
|
|
|
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
static int lfs_dir_set(lfs_t *lfs, lfs_dir_t *dir, lfs_entry_t *entry,
|
|
|
|
struct lfs_region *regions, int count) {
|
2018-04-08 21:58:12 +00:00
|
|
|
lfs_ssize_t diff = 0;
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
for (int i = 0; i < count; i++) {
|
2018-04-08 09:23:23 +00:00
|
|
|
diff += regions[i].newsize;
|
|
|
|
diff -= regions[i].oldsize;
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
}
|
2018-03-11 00:27:43 +00:00
|
|
|
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
lfs_size_t oldsize = entry->size;
|
|
|
|
if (entry->off == 0) {
|
|
|
|
entry->off = (0x7fffffff & dir->d.size) - 4;
|
|
|
|
}
|
2018-03-11 00:27:43 +00:00
|
|
|
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
if ((0x7fffffff & dir->d.size) + diff > lfs->cfg->block_size) {
|
|
|
|
lfs_dir_t olddir = *dir;
|
|
|
|
lfs_off_t oldoff = entry->off;
|
|
|
|
|
|
|
|
if (oldsize) {
|
|
|
|
// mark as moving
|
|
|
|
uint8_t type;
|
|
|
|
int err = lfs_dir_get(lfs, &olddir, oldoff, &type, 1);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
type |= LFS_STRUCT_MOVED;
|
2018-04-03 13:28:09 +00:00
|
|
|
err = lfs_dir_commit(lfs, &olddir, (struct lfs_region[]){
|
2018-04-08 09:23:23 +00:00
|
|
|
{LFS_FROM_MEM, oldoff, 1, &type, 1}}, 1);
|
2018-03-11 00:27:43 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
}
|
2018-03-11 00:27:43 +00:00
|
|
|
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
lfs_dir_t pdir = olddir;
|
|
|
|
|
|
|
|
// find available block or create a new one
|
|
|
|
while ((0x7fffffff & dir->d.size) + oldsize + diff
|
|
|
|
> lfs->cfg->block_size) {
|
|
|
|
// we need to allocate a new dir block
|
|
|
|
if (!(0x80000000 & dir->d.size)) {
|
|
|
|
pdir = *dir;
|
|
|
|
int err = lfs_dir_alloc(lfs, dir);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
dir->d.tail[0] = pdir.d.tail[0];
|
|
|
|
dir->d.tail[1] = pdir.d.tail[1];
|
|
|
|
|
|
|
|
break;
|
2018-03-11 01:27:57 +00:00
|
|
|
}
|
|
|
|
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
int err = lfs_dir_fetch(lfs, dir, dir->d.tail);
|
2018-03-11 00:27:43 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
}
|
2018-03-11 00:27:43 +00:00
|
|
|
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
// writing out new entry
|
|
|
|
entry->off = dir->d.size - 4;
|
|
|
|
entry->size += diff;
|
2018-04-03 13:28:09 +00:00
|
|
|
int err = lfs_dir_commit(lfs, dir, (struct lfs_region[]){
|
2018-04-08 09:23:23 +00:00
|
|
|
{LFS_FROM_REGION, entry->off, 0, &(struct lfs_region_region){
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
olddir.pair[0], oldoff,
|
|
|
|
regions, count}, entry->size}}, 1);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
// update pred dir, unless pred == old we can coalesce
|
|
|
|
if (!oldsize || lfs_paircmp(pdir.pair, olddir.pair) != 0) {
|
|
|
|
pdir.d.size |= 0x80000000;
|
|
|
|
pdir.d.tail[0] = dir->pair[0];
|
|
|
|
pdir.d.tail[1] = dir->pair[1];
|
|
|
|
|
2018-04-03 13:28:09 +00:00
|
|
|
err = lfs_dir_commit(lfs, &pdir, NULL, 0);
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
} else if (oldsize) {
|
2018-03-11 00:27:43 +00:00
|
|
|
olddir.d.size |= 0x80000000;
|
|
|
|
olddir.d.tail[0] = dir->pair[0];
|
|
|
|
olddir.d.tail[1] = dir->pair[1];
|
|
|
|
}
|
|
|
|
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
// remove old entry
|
|
|
|
if (oldsize) {
|
|
|
|
lfs_entry_t oldentry;
|
|
|
|
oldentry.off = oldoff;
|
|
|
|
err = lfs_dir_set(lfs, &olddir, &oldentry, (struct lfs_region[]){
|
2018-04-08 09:23:23 +00:00
|
|
|
{LFS_FROM_MEM, 0, oldsize, NULL, 0}}, 1);
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
2018-03-11 00:27:43 +00:00
|
|
|
}
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
|
|
|
|
goto shift;
|
2018-03-11 00:27:43 +00:00
|
|
|
}
|
|
|
|
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
if ((0x7fffffff & dir->d.size) + diff == sizeof(dir->d)+4) {
|
2018-03-16 01:32:00 +00:00
|
|
|
lfs_dir_t pdir;
|
|
|
|
int res = lfs_pred(lfs, dir->pair, &pdir);
|
|
|
|
if (res < 0) {
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pdir.d.size & 0x80000000) {
|
|
|
|
pdir.d.size &= dir->d.size | 0x7fffffff;
|
|
|
|
pdir.d.tail[0] = dir->d.tail[0];
|
|
|
|
pdir.d.tail[1] = dir->d.tail[1];
|
2018-04-03 13:28:09 +00:00
|
|
|
int err = lfs_dir_commit(lfs, &pdir, NULL, 0);
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
goto shift;
|
2018-03-16 01:32:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
for (int i = 0; i < count; i++) {
|
2018-04-08 09:23:23 +00:00
|
|
|
regions[i].oldoff += entry->off;
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
}
|
|
|
|
|
2018-04-03 13:28:09 +00:00
|
|
|
int err = lfs_dir_commit(lfs, dir, regions, count);
|
2018-03-16 01:32:00 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
entry->size += diff;
|
|
|
|
|
|
|
|
shift:
|
2018-03-16 01:32:00 +00:00
|
|
|
// shift over any files/directories that are affected
|
|
|
|
for (lfs_file_t *f = lfs->files; f; f = f->next) {
|
|
|
|
if (lfs_paircmp(f->pair, dir->pair) == 0) {
|
2018-04-08 21:58:12 +00:00
|
|
|
if (f->pairoff == entry->off && entry->size == 0) {
|
2018-03-16 01:32:00 +00:00
|
|
|
f->pair[0] = 0xffffffff;
|
|
|
|
f->pair[1] = 0xffffffff;
|
2018-04-08 21:58:12 +00:00
|
|
|
} else if (f->pairoff > entry->off) {
|
|
|
|
f->pairoff += diff;
|
2018-03-16 01:32:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (lfs_dir_t *d = lfs->dirs; d; d = d->next) {
|
|
|
|
if (lfs_paircmp(d->pair, dir->pair) == 0) {
|
2018-03-16 01:58:29 +00:00
|
|
|
if (d->off > entry->off) {
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
d->off += diff;
|
|
|
|
d->pos += diff;
|
2018-03-16 01:32:00 +00:00
|
|
|
}
|
|
|
|
}
|
2017-04-18 03:27:06 +00:00
|
|
|
}
|
2017-11-22 02:53:15 +00:00
|
|
|
|
|
|
|
return 0;
|
2017-03-12 20:11:52 +00:00
|
|
|
}
|
2017-03-05 20:11:52 +00:00
|
|
|
|
2017-03-25 21:20:31 +00:00
|
|
|
static int lfs_dir_next(lfs_t *lfs, lfs_dir_t *dir, lfs_entry_t *entry) {
|
2018-03-17 15:28:14 +00:00
|
|
|
while (dir->off >= (0x7fffffff & dir->d.size)-4) {
|
2017-05-14 17:01:45 +00:00
|
|
|
if (!(0x80000000 & dir->d.size)) {
|
|
|
|
entry->off = dir->off;
|
|
|
|
return LFS_ERR_NOENT;
|
2017-03-13 00:41:08 +00:00
|
|
|
}
|
|
|
|
|
2017-05-14 17:01:45 +00:00
|
|
|
int err = lfs_dir_fetch(lfs, dir, dir->d.tail);
|
2017-03-13 00:41:08 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-05-14 17:01:45 +00:00
|
|
|
dir->off = sizeof(dir->d);
|
2017-06-24 01:03:44 +00:00
|
|
|
dir->pos += sizeof(dir->d) + 4;
|
2017-03-13 00:41:08 +00:00
|
|
|
}
|
2017-05-14 17:01:45 +00:00
|
|
|
|
2018-03-23 23:35:55 +00:00
|
|
|
int err = lfs_dir_get(lfs, dir, dir->off, &entry->d, sizeof(entry->d));
|
2018-02-02 11:58:43 +00:00
|
|
|
lfs_entry_fromle32(&entry->d);
|
2017-05-14 17:01:45 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-07-18 07:09:35 +00:00
|
|
|
entry->off = dir->off;
|
2018-04-03 13:28:09 +00:00
|
|
|
entry->size = lfs_entry_size(entry);
|
2018-03-16 01:58:29 +00:00
|
|
|
dir->off += entry->size;
|
|
|
|
dir->pos += entry->size;
|
2017-05-14 17:01:45 +00:00
|
|
|
return 0;
|
2017-03-13 00:41:08 +00:00
|
|
|
}
|
|
|
|
|
2017-03-25 21:20:31 +00:00
|
|
|
static int lfs_dir_find(lfs_t *lfs, lfs_dir_t *dir,
|
2017-04-18 03:27:06 +00:00
|
|
|
lfs_entry_t *entry, const char **path) {
|
2017-04-14 23:27:06 +00:00
|
|
|
const char *pathname = *path;
|
2018-04-06 00:03:58 +00:00
|
|
|
lfs_size_t pathlen;
|
2017-04-14 23:27:06 +00:00
|
|
|
|
2017-03-13 00:41:08 +00:00
|
|
|
while (true) {
|
2017-04-14 23:27:06 +00:00
|
|
|
nextname:
|
|
|
|
// skip slashes
|
|
|
|
pathname += strspn(pathname, "/");
|
|
|
|
pathlen = strcspn(pathname, "/");
|
|
|
|
|
2018-02-22 21:24:17 +00:00
|
|
|
// special case for root dir
|
|
|
|
if (pathname[0] == '\0') {
|
|
|
|
*entry = (lfs_entry_t){
|
2018-03-03 16:26:06 +00:00
|
|
|
.d.type = LFS_STRUCT_DIR | LFS_TYPE_DIR,
|
2018-02-22 21:24:17 +00:00
|
|
|
.d.u.dir[0] = lfs->root[0],
|
|
|
|
.d.u.dir[1] = lfs->root[1],
|
|
|
|
};
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-04-14 23:27:06 +00:00
|
|
|
// skip '.' and root '..'
|
|
|
|
if ((pathlen == 1 && memcmp(pathname, ".", 1) == 0) ||
|
|
|
|
(pathlen == 2 && memcmp(pathname, "..", 2) == 0)) {
|
|
|
|
pathname += pathlen;
|
|
|
|
goto nextname;
|
|
|
|
}
|
|
|
|
|
|
|
|
// skip if matched by '..' in name
|
|
|
|
const char *suffix = pathname + pathlen;
|
2018-04-06 00:03:58 +00:00
|
|
|
lfs_size_t sufflen;
|
2017-04-14 23:27:06 +00:00
|
|
|
int depth = 1;
|
|
|
|
while (true) {
|
|
|
|
suffix += strspn(suffix, "/");
|
|
|
|
sufflen = strcspn(suffix, "/");
|
|
|
|
if (sufflen == 0) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sufflen == 2 && memcmp(suffix, "..", 2) == 0) {
|
|
|
|
depth -= 1;
|
|
|
|
if (depth == 0) {
|
|
|
|
pathname = suffix + sufflen;
|
|
|
|
goto nextname;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
depth += 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
suffix += sufflen;
|
|
|
|
}
|
|
|
|
|
2017-09-17 21:46:09 +00:00
|
|
|
// update what we've found
|
|
|
|
*path = pathname;
|
|
|
|
|
2017-04-14 23:27:06 +00:00
|
|
|
// find path
|
2017-04-01 15:09:17 +00:00
|
|
|
while (true) {
|
|
|
|
int err = lfs_dir_next(lfs, dir, entry);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-03-17 15:28:14 +00:00
|
|
|
if (((0xf & entry->d.type) != LFS_TYPE_REG &&
|
|
|
|
(0xf & entry->d.type) != LFS_TYPE_DIR) ||
|
2017-07-18 07:09:35 +00:00
|
|
|
entry->d.nlen != pathlen) {
|
2017-04-01 15:09:17 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2017-06-24 05:43:05 +00:00
|
|
|
int res = lfs_bd_cmp(lfs, dir->pair[0],
|
2018-03-16 01:58:29 +00:00
|
|
|
entry->off + entry->size - pathlen,
|
2017-07-18 07:09:35 +00:00
|
|
|
pathname, pathlen);
|
2017-06-24 05:43:05 +00:00
|
|
|
if (res < 0) {
|
|
|
|
return res;
|
2017-04-01 15:09:17 +00:00
|
|
|
}
|
|
|
|
|
2017-05-14 17:01:45 +00:00
|
|
|
// found match
|
2017-06-24 05:43:05 +00:00
|
|
|
if (res) {
|
2017-04-01 15:09:17 +00:00
|
|
|
break;
|
|
|
|
}
|
2017-03-13 00:41:08 +00:00
|
|
|
}
|
|
|
|
|
2017-10-07 14:19:08 +00:00
|
|
|
// check that entry has not been moved
|
2018-03-03 16:26:06 +00:00
|
|
|
if (entry->d.type & LFS_STRUCT_MOVED) {
|
2017-10-07 14:19:08 +00:00
|
|
|
int moved = lfs_moved(lfs, &entry->d.u);
|
|
|
|
if (moved < 0 || moved) {
|
|
|
|
return (moved < 0) ? moved : LFS_ERR_NOENT;
|
|
|
|
}
|
|
|
|
|
2018-03-03 16:26:06 +00:00
|
|
|
entry->d.type &= ~LFS_STRUCT_MOVED;
|
2017-10-07 14:19:08 +00:00
|
|
|
}
|
|
|
|
|
2017-04-01 15:09:17 +00:00
|
|
|
pathname += pathlen;
|
|
|
|
pathname += strspn(pathname, "/");
|
|
|
|
if (pathname[0] == '\0') {
|
|
|
|
return 0;
|
2017-03-13 00:41:08 +00:00
|
|
|
}
|
|
|
|
|
2017-04-14 23:27:06 +00:00
|
|
|
// continue on if we hit a directory
|
2018-03-03 16:26:06 +00:00
|
|
|
if ((0xf & entry->d.type) != LFS_TYPE_DIR) {
|
2017-04-24 03:10:16 +00:00
|
|
|
return LFS_ERR_NOTDIR;
|
2017-03-13 00:41:08 +00:00
|
|
|
}
|
|
|
|
|
2017-04-01 15:09:17 +00:00
|
|
|
int err = lfs_dir_fetch(lfs, dir, entry->d.u.dir);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
2017-03-13 00:41:08 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-06 00:03:58 +00:00
|
|
|
/// Internal attribute operations ///
|
|
|
|
static int lfs_dir_getinfo(lfs_t *lfs,
|
|
|
|
lfs_dir_t *dir, const lfs_entry_t *entry, struct lfs_info *info) {
|
|
|
|
memset(info, 0, sizeof(*info));
|
|
|
|
info->type = 0xf & entry->d.type;
|
|
|
|
if (entry->d.type == (LFS_STRUCT_CTZ | LFS_TYPE_REG)) {
|
|
|
|
info->size = entry->d.u.file.size;
|
|
|
|
} else if (entry->d.type == (LFS_STRUCT_INLINE | LFS_TYPE_REG)) {
|
|
|
|
info->size = lfs_entry_elen(entry);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (lfs_paircmp(entry->d.u.dir, lfs->root) == 0) {
|
|
|
|
strcpy(info->name, "/");
|
|
|
|
} else {
|
|
|
|
int err = lfs_dir_get(lfs, dir,
|
|
|
|
entry->off + entry->size - entry->d.nlen,
|
|
|
|
info->name, entry->d.nlen);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
Added support for atomically committing custom attributes
Although it's simple and probably what most users expect, the previous
custom attributes API suffered from one problem: the inability to update
attributes atomically.
If we consider our timestamp use case, updating a file would require:
1. Update the file
2. Update the timestamp
If a power loss occurs during this sequence of updates, we could end up
with a file with an incorrect timestamp.
Is this a big deal? Probably not, but it could be a surprise only found
after a power-loss. And littlefs was developed with the _specifically_
to avoid suprises during power-loss.
The littlefs is perfectly capable of bundling multiple attribute updates
in a single directory commit. That's kind of what it was designed to do.
So all we need is a new committer opcode for list of attributes, and
then poking that list of attributes through the API.
We could provide the single-attribute functions, but don't, because the
fewer functions makes for a smaller codebase, and these are already the
more advanced functions so we can expect more from users. This also
changes semantics about what happens when we don't find an attribute,
since erroring would throw away all of the other attributes we're
processing.
To atomically commit both custom attributes and file updates, we need a
new API, lfs_file_setattr. Unfortunately the semantics are a bit more
confusing than lfs_setattr, since the attributes aren't written out
immediately.
2018-04-06 04:23:14 +00:00
|
|
|
static int lfs_dir_getattrs(lfs_t *lfs,
|
2018-04-06 00:03:58 +00:00
|
|
|
lfs_dir_t *dir, const lfs_entry_t *entry,
|
Added support for atomically committing custom attributes
Although it's simple and probably what most users expect, the previous
custom attributes API suffered from one problem: the inability to update
attributes atomically.
If we consider our timestamp use case, updating a file would require:
1. Update the file
2. Update the timestamp
If a power loss occurs during this sequence of updates, we could end up
with a file with an incorrect timestamp.
Is this a big deal? Probably not, but it could be a surprise only found
after a power-loss. And littlefs was developed with the _specifically_
to avoid suprises during power-loss.
The littlefs is perfectly capable of bundling multiple attribute updates
in a single directory commit. That's kind of what it was designed to do.
So all we need is a new committer opcode for list of attributes, and
then poking that list of attributes through the API.
We could provide the single-attribute functions, but don't, because the
fewer functions makes for a smaller codebase, and these are already the
more advanced functions so we can expect more from users. This also
changes semantics about what happens when we don't find an attribute,
since erroring would throw away all of the other attributes we're
processing.
To atomically commit both custom attributes and file updates, we need a
new API, lfs_file_setattr. Unfortunately the semantics are a bit more
confusing than lfs_setattr, since the attributes aren't written out
immediately.
2018-04-06 04:23:14 +00:00
|
|
|
const struct lfs_attr *attrs, int count) {
|
|
|
|
// set to zero in case we can't find the attributes or size mismatch
|
|
|
|
for (int j = 0; j < count; j++) {
|
|
|
|
memset(attrs[j].buffer, 0, attrs[j].size);
|
|
|
|
}
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
// search for attribute in attribute entry
|
2018-04-08 09:23:23 +00:00
|
|
|
lfs_off_t off = entry->off + 4+lfs_entry_elen(entry);
|
|
|
|
lfs_off_t end = off + lfs_entry_alen(entry);
|
|
|
|
while (off < end) {
|
|
|
|
lfs_entry_attr_t attr;
|
|
|
|
int err = lfs_dir_get(lfs, dir, off, &attr.d, sizeof(attr.d));
|
2018-04-06 00:03:58 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
Added support for atomically committing custom attributes
Although it's simple and probably what most users expect, the previous
custom attributes API suffered from one problem: the inability to update
attributes atomically.
If we consider our timestamp use case, updating a file would require:
1. Update the file
2. Update the timestamp
If a power loss occurs during this sequence of updates, we could end up
with a file with an incorrect timestamp.
Is this a big deal? Probably not, but it could be a surprise only found
after a power-loss. And littlefs was developed with the _specifically_
to avoid suprises during power-loss.
The littlefs is perfectly capable of bundling multiple attribute updates
in a single directory commit. That's kind of what it was designed to do.
So all we need is a new committer opcode for list of attributes, and
then poking that list of attributes through the API.
We could provide the single-attribute functions, but don't, because the
fewer functions makes for a smaller codebase, and these are already the
more advanced functions so we can expect more from users. This also
changes semantics about what happens when we don't find an attribute,
since erroring would throw away all of the other attributes we're
processing.
To atomically commit both custom attributes and file updates, we need a
new API, lfs_file_setattr. Unfortunately the semantics are a bit more
confusing than lfs_setattr, since the attributes aren't written out
immediately.
2018-04-06 04:23:14 +00:00
|
|
|
for (int j = 0; j < count; j++) {
|
2018-04-08 21:58:12 +00:00
|
|
|
if (attrs[j].type == attr.d.type) {
|
|
|
|
if (attrs[j].size < attr.d.len) {
|
Added support for atomically committing custom attributes
Although it's simple and probably what most users expect, the previous
custom attributes API suffered from one problem: the inability to update
attributes atomically.
If we consider our timestamp use case, updating a file would require:
1. Update the file
2. Update the timestamp
If a power loss occurs during this sequence of updates, we could end up
with a file with an incorrect timestamp.
Is this a big deal? Probably not, but it could be a surprise only found
after a power-loss. And littlefs was developed with the _specifically_
to avoid suprises during power-loss.
The littlefs is perfectly capable of bundling multiple attribute updates
in a single directory commit. That's kind of what it was designed to do.
So all we need is a new committer opcode for list of attributes, and
then poking that list of attributes through the API.
We could provide the single-attribute functions, but don't, because the
fewer functions makes for a smaller codebase, and these are already the
more advanced functions so we can expect more from users. This also
changes semantics about what happens when we don't find an attribute,
since erroring would throw away all of the other attributes we're
processing.
To atomically commit both custom attributes and file updates, we need a
new API, lfs_file_setattr. Unfortunately the semantics are a bit more
confusing than lfs_setattr, since the attributes aren't written out
immediately.
2018-04-06 04:23:14 +00:00
|
|
|
return LFS_ERR_RANGE;
|
|
|
|
}
|
2018-04-06 00:03:58 +00:00
|
|
|
|
2018-04-08 09:23:23 +00:00
|
|
|
err = lfs_dir_get(lfs, dir, off+sizeof(attr.d),
|
Added support for atomically committing custom attributes
Although it's simple and probably what most users expect, the previous
custom attributes API suffered from one problem: the inability to update
attributes atomically.
If we consider our timestamp use case, updating a file would require:
1. Update the file
2. Update the timestamp
If a power loss occurs during this sequence of updates, we could end up
with a file with an incorrect timestamp.
Is this a big deal? Probably not, but it could be a surprise only found
after a power-loss. And littlefs was developed with the _specifically_
to avoid suprises during power-loss.
The littlefs is perfectly capable of bundling multiple attribute updates
in a single directory commit. That's kind of what it was designed to do.
So all we need is a new committer opcode for list of attributes, and
then poking that list of attributes through the API.
We could provide the single-attribute functions, but don't, because the
fewer functions makes for a smaller codebase, and these are already the
more advanced functions so we can expect more from users. This also
changes semantics about what happens when we don't find an attribute,
since erroring would throw away all of the other attributes we're
processing.
To atomically commit both custom attributes and file updates, we need a
new API, lfs_file_setattr. Unfortunately the semantics are a bit more
confusing than lfs_setattr, since the attributes aren't written out
immediately.
2018-04-06 04:23:14 +00:00
|
|
|
attrs[j].buffer, attr.d.len);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
2018-04-06 00:03:58 +00:00
|
|
|
}
|
2018-04-08 09:23:23 +00:00
|
|
|
|
|
|
|
off += 2+attr.d.len;
|
2018-04-06 00:03:58 +00:00
|
|
|
}
|
|
|
|
|
Added support for atomically committing custom attributes
Although it's simple and probably what most users expect, the previous
custom attributes API suffered from one problem: the inability to update
attributes atomically.
If we consider our timestamp use case, updating a file would require:
1. Update the file
2. Update the timestamp
If a power loss occurs during this sequence of updates, we could end up
with a file with an incorrect timestamp.
Is this a big deal? Probably not, but it could be a surprise only found
after a power-loss. And littlefs was developed with the _specifically_
to avoid suprises during power-loss.
The littlefs is perfectly capable of bundling multiple attribute updates
in a single directory commit. That's kind of what it was designed to do.
So all we need is a new committer opcode for list of attributes, and
then poking that list of attributes through the API.
We could provide the single-attribute functions, but don't, because the
fewer functions makes for a smaller codebase, and these are already the
more advanced functions so we can expect more from users. This also
changes semantics about what happens when we don't find an attribute,
since erroring would throw away all of the other attributes we're
processing.
To atomically commit both custom attributes and file updates, we need a
new API, lfs_file_setattr. Unfortunately the semantics are a bit more
confusing than lfs_setattr, since the attributes aren't written out
immediately.
2018-04-06 04:23:14 +00:00
|
|
|
return 0;
|
2018-04-06 00:03:58 +00:00
|
|
|
}
|
|
|
|
|
Added support for atomically committing custom attributes
Although it's simple and probably what most users expect, the previous
custom attributes API suffered from one problem: the inability to update
attributes atomically.
If we consider our timestamp use case, updating a file would require:
1. Update the file
2. Update the timestamp
If a power loss occurs during this sequence of updates, we could end up
with a file with an incorrect timestamp.
Is this a big deal? Probably not, but it could be a surprise only found
after a power-loss. And littlefs was developed with the _specifically_
to avoid suprises during power-loss.
The littlefs is perfectly capable of bundling multiple attribute updates
in a single directory commit. That's kind of what it was designed to do.
So all we need is a new committer opcode for list of attributes, and
then poking that list of attributes through the API.
We could provide the single-attribute functions, but don't, because the
fewer functions makes for a smaller codebase, and these are already the
more advanced functions so we can expect more from users. This also
changes semantics about what happens when we don't find an attribute,
since erroring would throw away all of the other attributes we're
processing.
To atomically commit both custom attributes and file updates, we need a
new API, lfs_file_setattr. Unfortunately the semantics are a bit more
confusing than lfs_setattr, since the attributes aren't written out
immediately.
2018-04-06 04:23:14 +00:00
|
|
|
static lfs_ssize_t lfs_dir_checkattrs(lfs_t *lfs,
|
2018-04-06 00:03:58 +00:00
|
|
|
lfs_dir_t *dir, lfs_entry_t *entry,
|
Added support for atomically committing custom attributes
Although it's simple and probably what most users expect, the previous
custom attributes API suffered from one problem: the inability to update
attributes atomically.
If we consider our timestamp use case, updating a file would require:
1. Update the file
2. Update the timestamp
If a power loss occurs during this sequence of updates, we could end up
with a file with an incorrect timestamp.
Is this a big deal? Probably not, but it could be a surprise only found
after a power-loss. And littlefs was developed with the _specifically_
to avoid suprises during power-loss.
The littlefs is perfectly capable of bundling multiple attribute updates
in a single directory commit. That's kind of what it was designed to do.
So all we need is a new committer opcode for list of attributes, and
then poking that list of attributes through the API.
We could provide the single-attribute functions, but don't, because the
fewer functions makes for a smaller codebase, and these are already the
more advanced functions so we can expect more from users. This also
changes semantics about what happens when we don't find an attribute,
since erroring would throw away all of the other attributes we're
processing.
To atomically commit both custom attributes and file updates, we need a
new API, lfs_file_setattr. Unfortunately the semantics are a bit more
confusing than lfs_setattr, since the attributes aren't written out
immediately.
2018-04-06 04:23:14 +00:00
|
|
|
const struct lfs_attr *attrs, int count) {
|
|
|
|
// check that attributes fit
|
2018-04-08 09:23:23 +00:00
|
|
|
// two separate passes so disk access is O(n)
|
Added support for atomically committing custom attributes
Although it's simple and probably what most users expect, the previous
custom attributes API suffered from one problem: the inability to update
attributes atomically.
If we consider our timestamp use case, updating a file would require:
1. Update the file
2. Update the timestamp
If a power loss occurs during this sequence of updates, we could end up
with a file with an incorrect timestamp.
Is this a big deal? Probably not, but it could be a surprise only found
after a power-loss. And littlefs was developed with the _specifically_
to avoid suprises during power-loss.
The littlefs is perfectly capable of bundling multiple attribute updates
in a single directory commit. That's kind of what it was designed to do.
So all we need is a new committer opcode for list of attributes, and
then poking that list of attributes through the API.
We could provide the single-attribute functions, but don't, because the
fewer functions makes for a smaller codebase, and these are already the
more advanced functions so we can expect more from users. This also
changes semantics about what happens when we don't find an attribute,
since erroring would throw away all of the other attributes we're
processing.
To atomically commit both custom attributes and file updates, we need a
new API, lfs_file_setattr. Unfortunately the semantics are a bit more
confusing than lfs_setattr, since the attributes aren't written out
immediately.
2018-04-06 04:23:14 +00:00
|
|
|
lfs_size_t nsize = 0;
|
|
|
|
for (int j = 0; j < count; j++) {
|
2018-04-08 09:23:23 +00:00
|
|
|
if (attrs[j].size > 0) {
|
|
|
|
nsize += 2+attrs[j].size;
|
|
|
|
}
|
Added support for atomically committing custom attributes
Although it's simple and probably what most users expect, the previous
custom attributes API suffered from one problem: the inability to update
attributes atomically.
If we consider our timestamp use case, updating a file would require:
1. Update the file
2. Update the timestamp
If a power loss occurs during this sequence of updates, we could end up
with a file with an incorrect timestamp.
Is this a big deal? Probably not, but it could be a surprise only found
after a power-loss. And littlefs was developed with the _specifically_
to avoid suprises during power-loss.
The littlefs is perfectly capable of bundling multiple attribute updates
in a single directory commit. That's kind of what it was designed to do.
So all we need is a new committer opcode for list of attributes, and
then poking that list of attributes through the API.
We could provide the single-attribute functions, but don't, because the
fewer functions makes for a smaller codebase, and these are already the
more advanced functions so we can expect more from users. This also
changes semantics about what happens when we don't find an attribute,
since erroring would throw away all of the other attributes we're
processing.
To atomically commit both custom attributes and file updates, we need a
new API, lfs_file_setattr. Unfortunately the semantics are a bit more
confusing than lfs_setattr, since the attributes aren't written out
immediately.
2018-04-06 04:23:14 +00:00
|
|
|
}
|
|
|
|
|
2018-04-08 09:23:23 +00:00
|
|
|
lfs_off_t off = entry->off + 4+lfs_entry_elen(entry);
|
|
|
|
lfs_off_t end = off + lfs_entry_alen(entry);
|
|
|
|
while (off < end) {
|
|
|
|
lfs_entry_attr_t attr;
|
|
|
|
int err = lfs_dir_get(lfs, dir, off, &attr.d, sizeof(attr.d));
|
2018-04-06 00:03:58 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
Added support for atomically committing custom attributes
Although it's simple and probably what most users expect, the previous
custom attributes API suffered from one problem: the inability to update
attributes atomically.
If we consider our timestamp use case, updating a file would require:
1. Update the file
2. Update the timestamp
If a power loss occurs during this sequence of updates, we could end up
with a file with an incorrect timestamp.
Is this a big deal? Probably not, but it could be a surprise only found
after a power-loss. And littlefs was developed with the _specifically_
to avoid suprises during power-loss.
The littlefs is perfectly capable of bundling multiple attribute updates
in a single directory commit. That's kind of what it was designed to do.
So all we need is a new committer opcode for list of attributes, and
then poking that list of attributes through the API.
We could provide the single-attribute functions, but don't, because the
fewer functions makes for a smaller codebase, and these are already the
more advanced functions so we can expect more from users. This also
changes semantics about what happens when we don't find an attribute,
since erroring would throw away all of the other attributes we're
processing.
To atomically commit both custom attributes and file updates, we need a
new API, lfs_file_setattr. Unfortunately the semantics are a bit more
confusing than lfs_setattr, since the attributes aren't written out
immediately.
2018-04-06 04:23:14 +00:00
|
|
|
bool updated = false;
|
|
|
|
for (int j = 0; j < count; j++) {
|
|
|
|
if (attr.d.type == attrs[j].type) {
|
|
|
|
updated = true;
|
|
|
|
}
|
2018-04-06 00:03:58 +00:00
|
|
|
}
|
|
|
|
|
Added support for atomically committing custom attributes
Although it's simple and probably what most users expect, the previous
custom attributes API suffered from one problem: the inability to update
attributes atomically.
If we consider our timestamp use case, updating a file would require:
1. Update the file
2. Update the timestamp
If a power loss occurs during this sequence of updates, we could end up
with a file with an incorrect timestamp.
Is this a big deal? Probably not, but it could be a surprise only found
after a power-loss. And littlefs was developed with the _specifically_
to avoid suprises during power-loss.
The littlefs is perfectly capable of bundling multiple attribute updates
in a single directory commit. That's kind of what it was designed to do.
So all we need is a new committer opcode for list of attributes, and
then poking that list of attributes through the API.
We could provide the single-attribute functions, but don't, because the
fewer functions makes for a smaller codebase, and these are already the
more advanced functions so we can expect more from users. This also
changes semantics about what happens when we don't find an attribute,
since erroring would throw away all of the other attributes we're
processing.
To atomically commit both custom attributes and file updates, we need a
new API, lfs_file_setattr. Unfortunately the semantics are a bit more
confusing than lfs_setattr, since the attributes aren't written out
immediately.
2018-04-06 04:23:14 +00:00
|
|
|
if (!updated) {
|
|
|
|
nsize += 2+attr.d.len;
|
|
|
|
}
|
2018-04-08 09:23:23 +00:00
|
|
|
|
|
|
|
off += 2+attr.d.len;
|
2018-04-06 00:03:58 +00:00
|
|
|
}
|
|
|
|
|
Added support for atomically committing custom attributes
Although it's simple and probably what most users expect, the previous
custom attributes API suffered from one problem: the inability to update
attributes atomically.
If we consider our timestamp use case, updating a file would require:
1. Update the file
2. Update the timestamp
If a power loss occurs during this sequence of updates, we could end up
with a file with an incorrect timestamp.
Is this a big deal? Probably not, but it could be a surprise only found
after a power-loss. And littlefs was developed with the _specifically_
to avoid suprises during power-loss.
The littlefs is perfectly capable of bundling multiple attribute updates
in a single directory commit. That's kind of what it was designed to do.
So all we need is a new committer opcode for list of attributes, and
then poking that list of attributes through the API.
We could provide the single-attribute functions, but don't, because the
fewer functions makes for a smaller codebase, and these are already the
more advanced functions so we can expect more from users. This also
changes semantics about what happens when we don't find an attribute,
since erroring would throw away all of the other attributes we're
processing.
To atomically commit both custom attributes and file updates, we need a
new API, lfs_file_setattr. Unfortunately the semantics are a bit more
confusing than lfs_setattr, since the attributes aren't written out
immediately.
2018-04-06 04:23:14 +00:00
|
|
|
if (nsize > lfs->attrs_size || (
|
2018-04-08 21:58:12 +00:00
|
|
|
lfs_entry_size(entry) - lfs_entry_alen(entry) + nsize
|
|
|
|
> lfs->cfg->block_size)) {
|
2018-04-06 00:03:58 +00:00
|
|
|
return LFS_ERR_NOSPC;
|
|
|
|
}
|
|
|
|
|
Added support for atomically committing custom attributes
Although it's simple and probably what most users expect, the previous
custom attributes API suffered from one problem: the inability to update
attributes atomically.
If we consider our timestamp use case, updating a file would require:
1. Update the file
2. Update the timestamp
If a power loss occurs during this sequence of updates, we could end up
with a file with an incorrect timestamp.
Is this a big deal? Probably not, but it could be a surprise only found
after a power-loss. And littlefs was developed with the _specifically_
to avoid suprises during power-loss.
The littlefs is perfectly capable of bundling multiple attribute updates
in a single directory commit. That's kind of what it was designed to do.
So all we need is a new committer opcode for list of attributes, and
then poking that list of attributes through the API.
We could provide the single-attribute functions, but don't, because the
fewer functions makes for a smaller codebase, and these are already the
more advanced functions so we can expect more from users. This also
changes semantics about what happens when we don't find an attribute,
since erroring would throw away all of the other attributes we're
processing.
To atomically commit both custom attributes and file updates, we need a
new API, lfs_file_setattr. Unfortunately the semantics are a bit more
confusing than lfs_setattr, since the attributes aren't written out
immediately.
2018-04-06 04:23:14 +00:00
|
|
|
return nsize;
|
2018-04-06 00:03:58 +00:00
|
|
|
}
|
|
|
|
|
Added support for atomically committing custom attributes
Although it's simple and probably what most users expect, the previous
custom attributes API suffered from one problem: the inability to update
attributes atomically.
If we consider our timestamp use case, updating a file would require:
1. Update the file
2. Update the timestamp
If a power loss occurs during this sequence of updates, we could end up
with a file with an incorrect timestamp.
Is this a big deal? Probably not, but it could be a surprise only found
after a power-loss. And littlefs was developed with the _specifically_
to avoid suprises during power-loss.
The littlefs is perfectly capable of bundling multiple attribute updates
in a single directory commit. That's kind of what it was designed to do.
So all we need is a new committer opcode for list of attributes, and
then poking that list of attributes through the API.
We could provide the single-attribute functions, but don't, because the
fewer functions makes for a smaller codebase, and these are already the
more advanced functions so we can expect more from users. This also
changes semantics about what happens when we don't find an attribute,
since erroring would throw away all of the other attributes we're
processing.
To atomically commit both custom attributes and file updates, we need a
new API, lfs_file_setattr. Unfortunately the semantics are a bit more
confusing than lfs_setattr, since the attributes aren't written out
immediately.
2018-04-06 04:23:14 +00:00
|
|
|
static int lfs_dir_setattrs(lfs_t *lfs,
|
|
|
|
lfs_dir_t *dir, lfs_entry_t *entry,
|
|
|
|
const struct lfs_attr *attrs, int count) {
|
|
|
|
// make sure attributes fit
|
2018-04-08 09:23:23 +00:00
|
|
|
lfs_size_t oldlen = lfs_entry_alen(entry);
|
|
|
|
lfs_ssize_t newlen = lfs_dir_checkattrs(lfs, dir, entry, attrs, count);
|
|
|
|
if (newlen < 0) {
|
|
|
|
return newlen;
|
2018-04-06 00:03:58 +00:00
|
|
|
}
|
|
|
|
|
Added support for atomically committing custom attributes
Although it's simple and probably what most users expect, the previous
custom attributes API suffered from one problem: the inability to update
attributes atomically.
If we consider our timestamp use case, updating a file would require:
1. Update the file
2. Update the timestamp
If a power loss occurs during this sequence of updates, we could end up
with a file with an incorrect timestamp.
Is this a big deal? Probably not, but it could be a surprise only found
after a power-loss. And littlefs was developed with the _specifically_
to avoid suprises during power-loss.
The littlefs is perfectly capable of bundling multiple attribute updates
in a single directory commit. That's kind of what it was designed to do.
So all we need is a new committer opcode for list of attributes, and
then poking that list of attributes through the API.
We could provide the single-attribute functions, but don't, because the
fewer functions makes for a smaller codebase, and these are already the
more advanced functions so we can expect more from users. This also
changes semantics about what happens when we don't find an attribute,
since erroring would throw away all of the other attributes we're
processing.
To atomically commit both custom attributes and file updates, we need a
new API, lfs_file_setattr. Unfortunately the semantics are a bit more
confusing than lfs_setattr, since the attributes aren't written out
immediately.
2018-04-06 04:23:14 +00:00
|
|
|
// commit to entry, majority of work is in LFS_FROM_ATTRS
|
2018-04-08 09:23:23 +00:00
|
|
|
entry->d.alen = (0xc0 & entry->d.alen) | newlen;
|
Added support for atomically committing custom attributes
Although it's simple and probably what most users expect, the previous
custom attributes API suffered from one problem: the inability to update
attributes atomically.
If we consider our timestamp use case, updating a file would require:
1. Update the file
2. Update the timestamp
If a power loss occurs during this sequence of updates, we could end up
with a file with an incorrect timestamp.
Is this a big deal? Probably not, but it could be a surprise only found
after a power-loss. And littlefs was developed with the _specifically_
to avoid suprises during power-loss.
The littlefs is perfectly capable of bundling multiple attribute updates
in a single directory commit. That's kind of what it was designed to do.
So all we need is a new committer opcode for list of attributes, and
then poking that list of attributes through the API.
We could provide the single-attribute functions, but don't, because the
fewer functions makes for a smaller codebase, and these are already the
more advanced functions so we can expect more from users. This also
changes semantics about what happens when we don't find an attribute,
since erroring would throw away all of the other attributes we're
processing.
To atomically commit both custom attributes and file updates, we need a
new API, lfs_file_setattr. Unfortunately the semantics are a bit more
confusing than lfs_setattr, since the attributes aren't written out
immediately.
2018-04-06 04:23:14 +00:00
|
|
|
return lfs_dir_set(lfs, dir, entry, (struct lfs_region[]){
|
2018-04-08 09:23:23 +00:00
|
|
|
{LFS_FROM_MEM, 0, 4, &entry->d, 4},
|
|
|
|
{LFS_FROM_ATTRS, 4+lfs_entry_elen(entry), oldlen,
|
|
|
|
&(struct lfs_region_attrs){attrs, count}, newlen}}, 2);
|
2018-04-06 00:03:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-04-18 03:27:06 +00:00
|
|
|
/// Top level directory operations ///
|
2018-05-21 05:56:20 +00:00
|
|
|
int lfs_mkdir(lfs_t *lfs, const char *path) {
|
|
|
|
// deorphan if we haven't yet, needed at most once after poweron
|
|
|
|
if (!lfs->deorphaned) {
|
|
|
|
int err = lfs_deorphan_(lfs);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// fetch parent directory
|
|
|
|
lfs_dir_t_ cwd;
|
2018-05-22 22:43:39 +00:00
|
|
|
int err = lfs_dir_findentry_(lfs, &cwd, &path, &(lfs_entry_t_){0});
|
2018-05-21 05:56:20 +00:00
|
|
|
if (err != LFS_ERR_NOENT || strchr(path, '/') != NULL) {
|
|
|
|
if (!err) {
|
|
|
|
return LFS_ERR_EXIST;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
// check that name fits
|
|
|
|
lfs_size_t nlen = strlen(path);
|
|
|
|
if (nlen > lfs->name_size) {
|
|
|
|
return LFS_ERR_NAMETOOLONG;
|
|
|
|
}
|
|
|
|
|
|
|
|
// build up new directory
|
|
|
|
lfs_alloc_ack(lfs);
|
|
|
|
|
|
|
|
lfs_dir_t_ dir;
|
2018-05-22 22:43:39 +00:00
|
|
|
err = lfs_dir_alloc_(lfs, &dir, cwd.tail);
|
2018-05-21 05:56:20 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-05-22 22:43:39 +00:00
|
|
|
err = lfs_dir_commit_(lfs, &dir, NULL, 0);
|
2018-05-21 05:56:20 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
// get next slot and commit
|
|
|
|
uint16_t id;
|
|
|
|
err = lfs_dir_add(lfs, &cwd, &id);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-05-22 22:43:39 +00:00
|
|
|
err = lfs_dir_commit_(lfs, &cwd, (lfs_entry_t_[]){
|
|
|
|
{lfs_mktag(LFS_TYPE_NAME_, id, nlen), .u.buffer=(void*)path},
|
2018-05-21 05:56:20 +00:00
|
|
|
{lfs_mktag(LFS_TYPE_DIR_ | LFS_STRUCT_DIR_, id,
|
|
|
|
sizeof(dir.pair)), .u.buffer=dir.pair}}, 2);
|
|
|
|
|
|
|
|
// TODO need ack here?
|
|
|
|
lfs_alloc_ack(lfs);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#if 0
|
2017-03-25 21:20:31 +00:00
|
|
|
int lfs_mkdir(lfs_t *lfs, const char *path) {
|
2017-10-07 21:56:00 +00:00
|
|
|
// deorphan if we haven't yet, needed at most once after poweron
|
|
|
|
if (!lfs->deorphaned) {
|
|
|
|
int err = lfs_deorphan(lfs);
|
2017-10-07 14:19:08 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-18 03:27:06 +00:00
|
|
|
// fetch parent directory
|
2017-03-13 00:41:08 +00:00
|
|
|
lfs_dir_t cwd;
|
2017-04-18 03:27:06 +00:00
|
|
|
int err = lfs_dir_fetch(lfs, &cwd, lfs->root);
|
2017-03-13 00:41:08 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
lfs_entry_t entry;
|
2017-04-18 03:27:06 +00:00
|
|
|
err = lfs_dir_find(lfs, &cwd, &entry, &path);
|
2017-09-17 21:46:09 +00:00
|
|
|
if (err != LFS_ERR_NOENT || strchr(path, '/') != NULL) {
|
2017-11-16 23:50:14 +00:00
|
|
|
return err ? err : LFS_ERR_EXIST;
|
2017-03-13 00:41:08 +00:00
|
|
|
}
|
|
|
|
|
Added disk-backed limits on the name/attrs/inline sizes
Being a portable, microcontroller-scale embedded filesystem, littlefs is
presented with a relatively unique challenge. The amount of RAM
available is on completely different scales from machine to machine, and
what is normally a reasonable RAM assumption may break completely on an
embedded system.
A great example of this is file names. On almost every PC these days, the limit
for a file name is 255 bytes. It's a very convenient limit for a number
of reasons. However, on microcontrollers, allocating 255 bytes of RAM to
do a file search can be unreasonable.
The simplest solution (and one that has existing in littlefs for a
while), is to let this limit be redefined to a smaller value on devices
that need to save RAM. However, this presents an interesting portability
issue. If these devices are plugged into a PC with relatively infinite
RAM, nothing stops the PC from writing files with full 255-byte file
names, which can't be read on the small device.
One solution here is to store this limit on the superblock during format
time. When mounting a disk, the filesystem implementation is responsible for
checking this limit in the superblock. If it's larger than what can be
read, raise an error. If it's smaller, respect the limit on the
superblock and raise an error if the user attempts to exceed it.
In this commit, this strategy is adopted for file names, inline files,
and the size of all attributes, since these could impact the memory
consumption of the filesystem. (Recording the attribute's limit is
iffy, but is the only other arbitrary limit and could be used for disabling
support of custom attributes).
Note! This changes makes it very important to configure littlefs
correctly at format time. If littlefs is formatted on a PC without
changing the limits appropriately, it will be rejected by a smaller
device.
2018-04-01 20:36:29 +00:00
|
|
|
// check that name fits
|
|
|
|
lfs_size_t nlen = strlen(path);
|
|
|
|
if (nlen > lfs->name_size) {
|
|
|
|
return LFS_ERR_NAMETOOLONG;
|
|
|
|
}
|
|
|
|
|
2017-05-14 17:01:45 +00:00
|
|
|
// build up new directory
|
|
|
|
lfs_alloc_ack(lfs);
|
|
|
|
|
2017-03-13 00:41:08 +00:00
|
|
|
lfs_dir_t dir;
|
2017-04-18 03:27:06 +00:00
|
|
|
err = lfs_dir_alloc(lfs, &dir);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
dir.d.tail[0] = cwd.d.tail[0];
|
|
|
|
dir.d.tail[1] = cwd.d.tail[1];
|
|
|
|
|
2018-04-03 13:28:09 +00:00
|
|
|
err = lfs_dir_commit(lfs, &dir, NULL, 0);
|
2017-03-13 00:41:08 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-03-03 16:26:06 +00:00
|
|
|
entry.d.type = LFS_STRUCT_DIR | LFS_TYPE_DIR;
|
2017-07-18 07:09:35 +00:00
|
|
|
entry.d.elen = sizeof(entry.d) - 4;
|
|
|
|
entry.d.alen = 0;
|
Added disk-backed limits on the name/attrs/inline sizes
Being a portable, microcontroller-scale embedded filesystem, littlefs is
presented with a relatively unique challenge. The amount of RAM
available is on completely different scales from machine to machine, and
what is normally a reasonable RAM assumption may break completely on an
embedded system.
A great example of this is file names. On almost every PC these days, the limit
for a file name is 255 bytes. It's a very convenient limit for a number
of reasons. However, on microcontrollers, allocating 255 bytes of RAM to
do a file search can be unreasonable.
The simplest solution (and one that has existing in littlefs for a
while), is to let this limit be redefined to a smaller value on devices
that need to save RAM. However, this presents an interesting portability
issue. If these devices are plugged into a PC with relatively infinite
RAM, nothing stops the PC from writing files with full 255-byte file
names, which can't be read on the small device.
One solution here is to store this limit on the superblock during format
time. When mounting a disk, the filesystem implementation is responsible for
checking this limit in the superblock. If it's larger than what can be
read, raise an error. If it's smaller, respect the limit on the
superblock and raise an error if the user attempts to exceed it.
In this commit, this strategy is adopted for file names, inline files,
and the size of all attributes, since these could impact the memory
consumption of the filesystem. (Recording the attribute's limit is
iffy, but is the only other arbitrary limit and could be used for disabling
support of custom attributes).
Note! This changes makes it very important to configure littlefs
correctly at format time. If littlefs is formatted on a PC without
changing the limits appropriately, it will be rejected by a smaller
device.
2018-04-01 20:36:29 +00:00
|
|
|
entry.d.nlen = nlen;
|
2017-03-13 00:41:08 +00:00
|
|
|
entry.d.u.dir[0] = dir.pair[0];
|
|
|
|
entry.d.u.dir[1] = dir.pair[1];
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
entry.size = 0;
|
2017-03-13 00:41:08 +00:00
|
|
|
|
2017-04-01 15:44:17 +00:00
|
|
|
cwd.d.tail[0] = dir.pair[0];
|
|
|
|
cwd.d.tail[1] = dir.pair[1];
|
2018-04-10 21:35:29 +00:00
|
|
|
lfs_entry_tole32(&entry.d);
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
err = lfs_dir_set(lfs, &cwd, &entry, (struct lfs_region[]){
|
2018-04-08 09:23:23 +00:00
|
|
|
{LFS_FROM_MEM, 0, 0, &entry.d, sizeof(entry.d)},
|
|
|
|
{LFS_FROM_MEM, 0, 0, path, nlen}}, 2);
|
2017-05-14 17:01:45 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
lfs_alloc_ack(lfs);
|
|
|
|
return 0;
|
2017-03-13 00:41:08 +00:00
|
|
|
}
|
2018-05-21 05:56:20 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
int lfs_dir_open_(lfs_t *lfs, lfs_dir_t_ *dir, const char *path) {
|
|
|
|
lfs_entry_t_ entry;
|
2018-05-22 22:43:39 +00:00
|
|
|
int err = lfs_dir_findentry_(lfs, dir, &path, &entry);
|
2018-05-21 05:56:20 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((lfs_tag_type(entry.tag) & 0x1f0) != LFS_TYPE_DIR_) {
|
|
|
|
return LFS_ERR_NOTDIR;
|
|
|
|
}
|
|
|
|
|
2018-05-22 22:43:39 +00:00
|
|
|
err = lfs_dir_fetch_(lfs, dir, entry.u.pair);
|
2018-05-21 05:56:20 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
// setup head dir
|
|
|
|
dir->head[0] = dir->pair[0];
|
|
|
|
dir->head[1] = dir->pair[1];
|
|
|
|
dir->pos = 0;
|
|
|
|
dir->id = 0;
|
|
|
|
|
|
|
|
// add to list of directories
|
|
|
|
dir->next = lfs->dirs;
|
|
|
|
lfs->dirs = dir;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int lfs_dir_close_(lfs_t *lfs, lfs_dir_t_ *dir) {
|
|
|
|
// remove from list of directories
|
|
|
|
for (lfs_dir_t_ **p = &lfs->dirs; *p; p = &(*p)->next) {
|
|
|
|
if (*p == dir) {
|
|
|
|
*p = dir->next;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO move me?
|
|
|
|
static int lfs_dir_getinfo_(lfs_t *lfs, lfs_dir_t_ *dir,
|
|
|
|
uint16_t id, struct lfs_info *info) {
|
2018-05-22 22:43:39 +00:00
|
|
|
lfs_entry_t_ entry;
|
|
|
|
int err = lfs_dir_getentry_(lfs, dir,
|
|
|
|
0x701ff000, lfs_mktag(LFS_TYPE_REG, id, 8), &entry);
|
2018-05-21 05:56:20 +00:00
|
|
|
if (err && err != LFS_ERR_RANGE) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
info->type = lfs_tag_subtype(entry.tag);
|
2018-05-22 22:43:39 +00:00
|
|
|
if (lfs_tag_type(entry.tag) == (LFS_TYPE_REG_ | LFS_STRUCT_CTZ_)) {
|
2018-05-21 05:56:20 +00:00
|
|
|
info->size = entry.u.ctz.size;
|
2018-05-22 22:43:39 +00:00
|
|
|
} else if (lfs_tag_type(entry.tag) == (LFS_TYPE_REG_ | LFS_STRUCT_INLINE_)) {
|
2018-05-21 05:56:20 +00:00
|
|
|
info->size = lfs_tag_size(entry.tag);
|
|
|
|
}
|
|
|
|
|
2018-05-22 22:43:39 +00:00
|
|
|
err = lfs_dir_getbuffer_(lfs, dir,
|
|
|
|
0x7ffff000, lfs_mktag(LFS_TYPE_NAME_, id, lfs->cfg->name_size+1),
|
|
|
|
&(lfs_entry_t_){.u.buffer=info->name});
|
2018-05-21 05:56:20 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int lfs_dir_read_(lfs_t *lfs, lfs_dir_t_ *dir, struct lfs_info *info) {
|
|
|
|
memset(info, 0, sizeof(*info));
|
|
|
|
|
|
|
|
// special offset for '.' and '..'
|
|
|
|
if (dir->pos == 0) {
|
|
|
|
info->type = LFS_TYPE_DIR;
|
|
|
|
strcpy(info->name, ".");
|
|
|
|
dir->pos += 1;
|
|
|
|
return 1;
|
|
|
|
} else if (dir->pos == 1) {
|
|
|
|
info->type = LFS_TYPE_DIR;
|
|
|
|
strcpy(info->name, "..");
|
|
|
|
dir->pos += 1;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (true) {
|
|
|
|
if (dir->id == dir->count) {
|
|
|
|
if (!dir->split) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-05-22 22:43:39 +00:00
|
|
|
int err = lfs_dir_fetch_(lfs, dir, dir->tail);
|
2018-05-21 05:56:20 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
dir->id = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int err = lfs_dir_getinfo_(lfs, dir, dir->id, info);
|
|
|
|
if (err != LFS_ERR_NOENT) {
|
|
|
|
if (!err) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
dir->id += 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
dir->pos += 1;
|
|
|
|
return true;
|
|
|
|
}
|
2017-03-13 00:41:08 +00:00
|
|
|
|
2017-03-25 21:20:31 +00:00
|
|
|
int lfs_dir_open(lfs_t *lfs, lfs_dir_t *dir, const char *path) {
|
2017-04-18 03:27:06 +00:00
|
|
|
dir->pair[0] = lfs->root[0];
|
|
|
|
dir->pair[1] = lfs->root[1];
|
2017-03-25 23:11:45 +00:00
|
|
|
|
|
|
|
int err = lfs_dir_fetch(lfs, dir, dir->pair);
|
2017-03-25 21:20:31 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
2017-04-23 04:11:13 +00:00
|
|
|
}
|
|
|
|
|
2017-03-25 21:20:31 +00:00
|
|
|
lfs_entry_t entry;
|
2017-04-18 03:27:06 +00:00
|
|
|
err = lfs_dir_find(lfs, dir, &entry, &path);
|
2017-03-25 21:20:31 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
2018-03-03 16:26:06 +00:00
|
|
|
} else if (entry.d.type != (LFS_STRUCT_DIR | LFS_TYPE_DIR)) {
|
2017-04-24 03:10:16 +00:00
|
|
|
return LFS_ERR_NOTDIR;
|
2017-03-25 21:20:31 +00:00
|
|
|
}
|
|
|
|
|
2017-04-15 16:26:37 +00:00
|
|
|
err = lfs_dir_fetch(lfs, dir, entry.d.u.dir);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-04-23 04:11:13 +00:00
|
|
|
// setup head dir
|
2017-04-15 16:26:37 +00:00
|
|
|
// special offset for '.' and '..'
|
2017-04-23 04:11:13 +00:00
|
|
|
dir->head[0] = dir->pair[0];
|
|
|
|
dir->head[1] = dir->pair[1];
|
|
|
|
dir->pos = sizeof(dir->d) - 2;
|
|
|
|
dir->off = sizeof(dir->d);
|
2017-11-22 02:53:15 +00:00
|
|
|
|
|
|
|
// add to list of directories
|
|
|
|
dir->next = lfs->dirs;
|
|
|
|
lfs->dirs = dir;
|
|
|
|
|
2017-04-15 16:26:37 +00:00
|
|
|
return 0;
|
2017-03-25 21:20:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int lfs_dir_close(lfs_t *lfs, lfs_dir_t *dir) {
|
2017-11-22 02:53:15 +00:00
|
|
|
// remove from list of directories
|
|
|
|
for (lfs_dir_t **p = &lfs->dirs; *p; p = &(*p)->next) {
|
|
|
|
if (*p == dir) {
|
|
|
|
*p = dir->next;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-25 21:20:31 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-03-25 23:11:45 +00:00
|
|
|
int lfs_dir_read(lfs_t *lfs, lfs_dir_t *dir, struct lfs_info *info) {
|
|
|
|
memset(info, 0, sizeof(*info));
|
|
|
|
|
2017-04-23 04:11:13 +00:00
|
|
|
// special offset for '.' and '..'
|
|
|
|
if (dir->pos == sizeof(dir->d) - 2) {
|
2017-04-15 16:26:37 +00:00
|
|
|
info->type = LFS_TYPE_DIR;
|
|
|
|
strcpy(info->name, ".");
|
2017-04-23 04:11:13 +00:00
|
|
|
dir->pos += 1;
|
2017-04-15 16:26:37 +00:00
|
|
|
return 1;
|
2017-04-23 04:11:13 +00:00
|
|
|
} else if (dir->pos == sizeof(dir->d) - 1) {
|
2017-04-15 16:26:37 +00:00
|
|
|
info->type = LFS_TYPE_DIR;
|
|
|
|
strcpy(info->name, "..");
|
2017-04-23 04:11:13 +00:00
|
|
|
dir->pos += 1;
|
2017-04-15 16:26:37 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2017-03-25 23:11:45 +00:00
|
|
|
lfs_entry_t entry;
|
2017-05-14 17:01:45 +00:00
|
|
|
while (true) {
|
|
|
|
int err = lfs_dir_next(lfs, dir, &entry);
|
|
|
|
if (err) {
|
|
|
|
return (err == LFS_ERR_NOENT) ? 0 : err;
|
|
|
|
}
|
|
|
|
|
2018-03-17 15:28:14 +00:00
|
|
|
if ((0xf & entry.d.type) != LFS_TYPE_REG &&
|
|
|
|
(0xf & entry.d.type) != LFS_TYPE_DIR) {
|
2017-10-07 14:19:08 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// check that entry has not been moved
|
2018-03-03 16:26:06 +00:00
|
|
|
if (entry.d.type & LFS_STRUCT_MOVED) {
|
2017-10-07 14:19:08 +00:00
|
|
|
int moved = lfs_moved(lfs, &entry.d.u);
|
|
|
|
if (moved < 0) {
|
|
|
|
return moved;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (moved) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-03-03 16:26:06 +00:00
|
|
|
entry.d.type &= ~LFS_STRUCT_MOVED;
|
2017-05-14 17:01:45 +00:00
|
|
|
}
|
2017-10-07 14:19:08 +00:00
|
|
|
|
|
|
|
break;
|
2017-03-25 23:11:45 +00:00
|
|
|
}
|
|
|
|
|
2018-04-06 00:03:58 +00:00
|
|
|
int err = lfs_dir_getinfo(lfs, dir, &entry, info);
|
2017-03-25 23:11:45 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2017-04-23 04:11:13 +00:00
|
|
|
int lfs_dir_seek(lfs_t *lfs, lfs_dir_t *dir, lfs_off_t off) {
|
|
|
|
// simply walk from head dir
|
|
|
|
int err = lfs_dir_rewind(lfs, dir);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
dir->pos = off;
|
|
|
|
|
|
|
|
while (off > (0x7fffffff & dir->d.size)) {
|
|
|
|
off -= 0x7fffffff & dir->d.size;
|
|
|
|
if (!(0x80000000 & dir->d.size)) {
|
2017-04-24 03:10:16 +00:00
|
|
|
return LFS_ERR_INVAL;
|
2017-04-23 04:11:13 +00:00
|
|
|
}
|
|
|
|
|
2018-01-29 19:53:28 +00:00
|
|
|
err = lfs_dir_fetch(lfs, dir, dir->d.tail);
|
2017-04-23 04:11:13 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
dir->off = off;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
lfs_soff_t lfs_dir_tell(lfs_t *lfs, lfs_dir_t *dir) {
|
2018-02-04 19:10:07 +00:00
|
|
|
(void)lfs;
|
2017-04-23 04:11:13 +00:00
|
|
|
return dir->pos;
|
|
|
|
}
|
|
|
|
|
|
|
|
int lfs_dir_rewind(lfs_t *lfs, lfs_dir_t *dir) {
|
|
|
|
// reload the head dir
|
|
|
|
int err = lfs_dir_fetch(lfs, dir, dir->head);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
dir->pair[0] = dir->head[0];
|
|
|
|
dir->pair[1] = dir->head[1];
|
|
|
|
dir->pos = sizeof(dir->d) - 2;
|
|
|
|
dir->off = sizeof(dir->d);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-03-25 21:20:31 +00:00
|
|
|
|
2017-04-30 16:19:37 +00:00
|
|
|
/// File index list operations ///
|
2017-10-18 05:41:43 +00:00
|
|
|
static int lfs_ctz_index(lfs_t *lfs, lfs_off_t *off) {
|
2017-10-17 00:08:47 +00:00
|
|
|
lfs_off_t size = *off;
|
2017-10-18 05:33:59 +00:00
|
|
|
lfs_off_t b = lfs->cfg->block_size - 2*4;
|
|
|
|
lfs_off_t i = size / b;
|
2017-10-17 00:08:47 +00:00
|
|
|
if (i == 0) {
|
|
|
|
return 0;
|
2017-04-23 00:48:31 +00:00
|
|
|
}
|
|
|
|
|
2017-10-18 05:33:59 +00:00
|
|
|
i = (size - 4*(lfs_popc(i-1)+2)) / b;
|
|
|
|
*off = size - b*i - 4*lfs_popc(i);
|
|
|
|
return i;
|
2017-04-23 00:48:31 +00:00
|
|
|
}
|
|
|
|
|
2017-10-18 05:41:43 +00:00
|
|
|
static int lfs_ctz_find(lfs_t *lfs,
|
2017-04-30 16:19:37 +00:00
|
|
|
lfs_cache_t *rcache, const lfs_cache_t *pcache,
|
|
|
|
lfs_block_t head, lfs_size_t size,
|
2017-04-23 00:48:31 +00:00
|
|
|
lfs_size_t pos, lfs_block_t *block, lfs_off_t *off) {
|
|
|
|
if (size == 0) {
|
2017-10-17 00:31:56 +00:00
|
|
|
*block = 0xffffffff;
|
2017-04-23 00:48:31 +00:00
|
|
|
*off = 0;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-10-18 05:41:43 +00:00
|
|
|
lfs_off_t current = lfs_ctz_index(lfs, &(lfs_off_t){size-1});
|
|
|
|
lfs_off_t target = lfs_ctz_index(lfs, &pos);
|
2017-04-23 00:48:31 +00:00
|
|
|
|
2017-04-23 02:42:22 +00:00
|
|
|
while (current > target) {
|
2017-04-23 00:48:31 +00:00
|
|
|
lfs_size_t skip = lfs_min(
|
|
|
|
lfs_npw2(current-target+1) - 1,
|
2017-10-10 23:48:24 +00:00
|
|
|
lfs_ctz(current));
|
2017-04-23 00:48:31 +00:00
|
|
|
|
2017-04-30 16:19:37 +00:00
|
|
|
int err = lfs_cache_read(lfs, rcache, pcache, head, 4*skip, &head, 4);
|
2018-02-02 11:58:43 +00:00
|
|
|
head = lfs_fromle32(head);
|
2017-04-23 00:48:31 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-01-29 21:20:12 +00:00
|
|
|
LFS_ASSERT(head >= 2 && head <= lfs->cfg->block_count);
|
2017-04-23 00:48:31 +00:00
|
|
|
current -= 1 << skip;
|
|
|
|
}
|
|
|
|
|
|
|
|
*block = head;
|
|
|
|
*off = pos;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-10-18 05:41:43 +00:00
|
|
|
static int lfs_ctz_extend(lfs_t *lfs,
|
2017-04-30 16:19:37 +00:00
|
|
|
lfs_cache_t *rcache, lfs_cache_t *pcache,
|
2017-04-23 00:48:31 +00:00
|
|
|
lfs_block_t head, lfs_size_t size,
|
2017-11-16 21:10:17 +00:00
|
|
|
lfs_block_t *block, lfs_off_t *off) {
|
2017-05-14 17:01:45 +00:00
|
|
|
while (true) {
|
2017-11-16 21:10:17 +00:00
|
|
|
// go ahead and grab a block
|
|
|
|
lfs_block_t nblock;
|
|
|
|
int err = lfs_alloc(lfs, &nblock);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
2018-01-29 21:20:12 +00:00
|
|
|
LFS_ASSERT(nblock >= 2 && nblock <= lfs->cfg->block_count);
|
2017-04-23 00:48:31 +00:00
|
|
|
|
2017-11-16 21:10:17 +00:00
|
|
|
if (true) {
|
|
|
|
err = lfs_bd_erase(lfs, nblock);
|
2017-10-17 00:31:56 +00:00
|
|
|
if (err) {
|
|
|
|
if (err == LFS_ERR_CORRUPT) {
|
|
|
|
goto relocate;
|
|
|
|
}
|
|
|
|
return err;
|
2017-05-14 17:01:45 +00:00
|
|
|
}
|
2017-04-23 00:48:31 +00:00
|
|
|
|
2017-10-17 00:31:56 +00:00
|
|
|
if (size == 0) {
|
2017-11-16 21:10:17 +00:00
|
|
|
*block = nblock;
|
2017-10-17 00:31:56 +00:00
|
|
|
*off = 0;
|
|
|
|
return 0;
|
|
|
|
}
|
2017-04-23 00:48:31 +00:00
|
|
|
|
2017-10-17 00:31:56 +00:00
|
|
|
size -= 1;
|
|
|
|
lfs_off_t index = lfs_ctz_index(lfs, &size);
|
|
|
|
size += 1;
|
|
|
|
|
|
|
|
// just copy out the last block if it is incomplete
|
|
|
|
if (size != lfs->cfg->block_size) {
|
|
|
|
for (lfs_off_t i = 0; i < size; i++) {
|
|
|
|
uint8_t data;
|
2018-01-29 19:53:28 +00:00
|
|
|
err = lfs_cache_read(lfs, rcache, NULL,
|
2017-10-17 00:31:56 +00:00
|
|
|
head, i, &data, 1);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
2017-04-23 02:42:22 +00:00
|
|
|
|
2017-10-17 00:31:56 +00:00
|
|
|
err = lfs_cache_prog(lfs, pcache, rcache,
|
2017-11-16 21:10:17 +00:00
|
|
|
nblock, i, &data, 1);
|
2017-10-17 00:31:56 +00:00
|
|
|
if (err) {
|
|
|
|
if (err == LFS_ERR_CORRUPT) {
|
|
|
|
goto relocate;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
2017-05-14 17:01:45 +00:00
|
|
|
}
|
|
|
|
|
2017-11-16 21:10:17 +00:00
|
|
|
*block = nblock;
|
2017-10-17 00:31:56 +00:00
|
|
|
*off = size;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// append block
|
|
|
|
index += 1;
|
|
|
|
lfs_size_t skips = lfs_ctz(index) + 1;
|
|
|
|
|
|
|
|
for (lfs_off_t i = 0; i < skips; i++) {
|
2018-02-02 11:58:43 +00:00
|
|
|
head = lfs_tole32(head);
|
2018-01-29 19:53:28 +00:00
|
|
|
err = lfs_cache_prog(lfs, pcache, rcache,
|
2017-11-16 21:10:17 +00:00
|
|
|
nblock, 4*i, &head, 4);
|
2018-02-02 11:58:43 +00:00
|
|
|
head = lfs_fromle32(head);
|
2017-05-14 17:01:45 +00:00
|
|
|
if (err) {
|
|
|
|
if (err == LFS_ERR_CORRUPT) {
|
|
|
|
goto relocate;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-10-17 00:31:56 +00:00
|
|
|
if (i != skips-1) {
|
|
|
|
err = lfs_cache_read(lfs, rcache, NULL,
|
|
|
|
head, 4*i, &head, 4);
|
2018-02-02 11:58:43 +00:00
|
|
|
head = lfs_fromle32(head);
|
2017-10-17 00:31:56 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
2017-05-14 17:01:45 +00:00
|
|
|
}
|
|
|
|
|
2018-01-29 21:20:12 +00:00
|
|
|
LFS_ASSERT(head >= 2 && head <= lfs->cfg->block_count);
|
2017-05-14 17:01:45 +00:00
|
|
|
}
|
2017-09-17 17:53:18 +00:00
|
|
|
|
2017-11-16 21:10:17 +00:00
|
|
|
*block = nblock;
|
2017-10-17 00:31:56 +00:00
|
|
|
*off = 4*skips;
|
|
|
|
return 0;
|
2017-04-23 02:42:22 +00:00
|
|
|
}
|
|
|
|
|
2017-05-14 17:01:45 +00:00
|
|
|
relocate:
|
2017-11-16 21:10:17 +00:00
|
|
|
LFS_DEBUG("Bad block at %d", nblock);
|
2017-04-23 00:48:31 +00:00
|
|
|
|
2017-05-14 17:01:45 +00:00
|
|
|
// just clear cache and try a new block
|
|
|
|
pcache->block = 0xffffffff;
|
2017-04-23 00:48:31 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-18 05:41:43 +00:00
|
|
|
static int lfs_ctz_traverse(lfs_t *lfs,
|
2017-04-30 16:19:37 +00:00
|
|
|
lfs_cache_t *rcache, const lfs_cache_t *pcache,
|
2017-04-23 00:48:31 +00:00
|
|
|
lfs_block_t head, lfs_size_t size,
|
|
|
|
int (*cb)(void*, lfs_block_t), void *data) {
|
|
|
|
if (size == 0) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-10-18 05:41:43 +00:00
|
|
|
lfs_off_t index = lfs_ctz_index(lfs, &(lfs_off_t){size-1});
|
2017-04-23 00:48:31 +00:00
|
|
|
|
|
|
|
while (true) {
|
|
|
|
int err = cb(data, head);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (index == 0) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-12-27 18:30:01 +00:00
|
|
|
lfs_block_t heads[2];
|
|
|
|
int count = 2 - (index & 1);
|
|
|
|
err = lfs_cache_read(lfs, rcache, pcache, head, 0, &heads, count*4);
|
2018-02-02 11:58:43 +00:00
|
|
|
heads[0] = lfs_fromle32(heads[0]);
|
|
|
|
heads[1] = lfs_fromle32(heads[1]);
|
2017-04-23 00:48:31 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-12-27 18:30:01 +00:00
|
|
|
for (int i = 0; i < count-1; i++) {
|
|
|
|
err = cb(data, heads[i]);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
head = heads[count-1];
|
|
|
|
index -= count;
|
2017-04-23 00:48:31 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-04-18 03:27:06 +00:00
|
|
|
/// Top level file operations ///
|
2018-05-22 22:43:39 +00:00
|
|
|
int lfs_file_open_(lfs_t *lfs, lfs_file_t_ *file,
|
|
|
|
const char *path, int flags) {
|
|
|
|
// deorphan if we haven't yet, needed at most once after poweron
|
|
|
|
if ((flags & 3) != LFS_O_RDONLY && !lfs->deorphaned) {
|
|
|
|
int err = lfs_deorphan_(lfs);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// allocate entry for file if it doesn't exist
|
|
|
|
lfs_dir_t_ cwd;
|
|
|
|
lfs_entry_t_ entry;
|
|
|
|
int err = lfs_dir_find_(lfs, &cwd, &path, &entry);
|
|
|
|
if (err && (err != LFS_ERR_NOENT || strchr(path, '/') != NULL)) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (err == LFS_ERR_NOENT) {
|
|
|
|
if (!(flags & LFS_O_CREAT)) {
|
|
|
|
return LFS_ERR_NOENT;
|
|
|
|
}
|
|
|
|
|
|
|
|
// check that name fits
|
|
|
|
lfs_size_t nlen = strlen(path);
|
|
|
|
if (nlen > lfs->name_size) {
|
|
|
|
return LFS_ERR_NAMETOOLONG;
|
|
|
|
}
|
|
|
|
|
|
|
|
// get next slot and create entry to remember name
|
|
|
|
uint16_t id;
|
|
|
|
err = lfs_dir_add(lfs, &cwd, &id);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = lfs_dir_commit_(lfs, &cwd, (lfs_entry_t_[]){
|
|
|
|
{lfs_mktag(LFS_TYPE_NAME_, id, nlen), .u.buffer=(void*)path},
|
|
|
|
{lfs_mktag(LFS_TYPE_REG_ | LFS_STRUCT_INLINE_, id, 0)}}, 2);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
} else if (lfs_tag_subtype(entry.tag) != LFS_TYPE_REG_) {
|
|
|
|
return LFS_ERR_ISDIR;
|
|
|
|
} else if (flags & LFS_O_EXCL) {
|
|
|
|
return LFS_ERR_EXIST;
|
|
|
|
}
|
|
|
|
|
|
|
|
// allocate buffer if needed
|
|
|
|
file->cache.block = 0xffffffff;
|
|
|
|
if (lfs->cfg->file_buffer) {
|
|
|
|
file->cache.buffer = lfs->cfg->file_buffer;
|
|
|
|
} else if ((file->flags & 3) == LFS_O_RDONLY) {
|
|
|
|
file->cache.buffer = lfs_malloc(lfs->cfg->read_size);
|
|
|
|
if (!file->cache.buffer) {
|
|
|
|
return LFS_ERR_NOMEM;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
file->cache.buffer = lfs_malloc(lfs->cfg->prog_size);
|
|
|
|
if (!file->cache.buffer) {
|
|
|
|
return LFS_ERR_NOMEM;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// setup file struct
|
|
|
|
file->pair[0] = cwd.pair[0];
|
|
|
|
file->pair[1] = cwd.pair[1];
|
|
|
|
file->id = lfs_tag_id(entry.tag);
|
|
|
|
file->flags = flags;
|
|
|
|
file->pos = 0;
|
|
|
|
|
|
|
|
if (lfs_tag_struct(entry.tag) == LFS_STRUCT_INLINE_) {
|
|
|
|
// load inline files
|
|
|
|
file->head = 0xfffffffe;
|
|
|
|
file->size = lfs_tag_size(entry.tag);
|
|
|
|
file->flags |= LFS_F_INLINE;
|
|
|
|
file->cache.block = file->head;
|
|
|
|
file->cache.off = 0;
|
|
|
|
err = lfs_bd_read(lfs, entry.u.d.block, entry.u.d.off,
|
|
|
|
file->cache.buffer, file->size);
|
|
|
|
if (err) {
|
|
|
|
lfs_free(file->cache.buffer);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// use ctz list from entry
|
|
|
|
err = lfs_bd_read(lfs, entry.u.d.block, entry.u.d.off,
|
|
|
|
&entry.u, sizeof(entry.u));
|
|
|
|
// TODO move to disk struct directly?
|
|
|
|
file->head = entry.u.ctz.head;
|
|
|
|
file->size = entry.u.ctz.size;
|
|
|
|
}
|
|
|
|
|
|
|
|
// truncate if requested
|
|
|
|
if (flags & LFS_O_TRUNC) {
|
|
|
|
if (file->size != 0) {
|
|
|
|
file->flags |= LFS_F_DIRTY;
|
|
|
|
}
|
|
|
|
|
|
|
|
file->head = 0xfffffffe;
|
|
|
|
file->size = 0;
|
|
|
|
file->flags |= LFS_F_INLINE;
|
|
|
|
file->cache.block = file->head;
|
|
|
|
file->cache.off = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// add to list of files
|
|
|
|
file->next = lfs->files;
|
|
|
|
lfs->files = file;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-03-25 21:20:31 +00:00
|
|
|
int lfs_file_open(lfs_t *lfs, lfs_file_t *file,
|
2017-03-20 03:00:56 +00:00
|
|
|
const char *path, int flags) {
|
2017-10-07 21:56:00 +00:00
|
|
|
// deorphan if we haven't yet, needed at most once after poweron
|
|
|
|
if ((flags & 3) != LFS_O_RDONLY && !lfs->deorphaned) {
|
|
|
|
int err = lfs_deorphan(lfs);
|
2017-10-07 14:19:08 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-14 17:01:45 +00:00
|
|
|
// allocate entry for file if it doesn't exist
|
2017-03-20 03:00:56 +00:00
|
|
|
lfs_dir_t cwd;
|
2017-04-18 03:27:06 +00:00
|
|
|
int err = lfs_dir_fetch(lfs, &cwd, lfs->root);
|
2017-03-20 03:00:56 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-04-24 04:39:50 +00:00
|
|
|
lfs_entry_t entry;
|
|
|
|
err = lfs_dir_find(lfs, &cwd, &entry, &path);
|
2017-09-17 21:46:09 +00:00
|
|
|
if (err && (err != LFS_ERR_NOENT || strchr(path, '/') != NULL)) {
|
2017-04-18 03:27:06 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-04-24 03:10:16 +00:00
|
|
|
if (err == LFS_ERR_NOENT) {
|
2017-04-23 00:48:31 +00:00
|
|
|
if (!(flags & LFS_O_CREAT)) {
|
2017-04-24 03:10:16 +00:00
|
|
|
return LFS_ERR_NOENT;
|
2017-04-23 00:48:31 +00:00
|
|
|
}
|
|
|
|
|
Added disk-backed limits on the name/attrs/inline sizes
Being a portable, microcontroller-scale embedded filesystem, littlefs is
presented with a relatively unique challenge. The amount of RAM
available is on completely different scales from machine to machine, and
what is normally a reasonable RAM assumption may break completely on an
embedded system.
A great example of this is file names. On almost every PC these days, the limit
for a file name is 255 bytes. It's a very convenient limit for a number
of reasons. However, on microcontrollers, allocating 255 bytes of RAM to
do a file search can be unreasonable.
The simplest solution (and one that has existing in littlefs for a
while), is to let this limit be redefined to a smaller value on devices
that need to save RAM. However, this presents an interesting portability
issue. If these devices are plugged into a PC with relatively infinite
RAM, nothing stops the PC from writing files with full 255-byte file
names, which can't be read on the small device.
One solution here is to store this limit on the superblock during format
time. When mounting a disk, the filesystem implementation is responsible for
checking this limit in the superblock. If it's larger than what can be
read, raise an error. If it's smaller, respect the limit on the
superblock and raise an error if the user attempts to exceed it.
In this commit, this strategy is adopted for file names, inline files,
and the size of all attributes, since these could impact the memory
consumption of the filesystem. (Recording the attribute's limit is
iffy, but is the only other arbitrary limit and could be used for disabling
support of custom attributes).
Note! This changes makes it very important to configure littlefs
correctly at format time. If littlefs is formatted on a PC without
changing the limits appropriately, it will be rejected by a smaller
device.
2018-04-01 20:36:29 +00:00
|
|
|
// check that name fits
|
|
|
|
lfs_size_t nlen = strlen(path);
|
|
|
|
if (nlen > lfs->name_size) {
|
|
|
|
return LFS_ERR_NAMETOOLONG;
|
|
|
|
}
|
|
|
|
|
2017-04-18 03:27:06 +00:00
|
|
|
// create entry to remember name
|
2018-03-17 15:28:14 +00:00
|
|
|
entry.d.type = LFS_STRUCT_INLINE | LFS_TYPE_REG;
|
|
|
|
entry.d.elen = 0;
|
2017-07-18 07:09:35 +00:00
|
|
|
entry.d.alen = 0;
|
Added disk-backed limits on the name/attrs/inline sizes
Being a portable, microcontroller-scale embedded filesystem, littlefs is
presented with a relatively unique challenge. The amount of RAM
available is on completely different scales from machine to machine, and
what is normally a reasonable RAM assumption may break completely on an
embedded system.
A great example of this is file names. On almost every PC these days, the limit
for a file name is 255 bytes. It's a very convenient limit for a number
of reasons. However, on microcontrollers, allocating 255 bytes of RAM to
do a file search can be unreasonable.
The simplest solution (and one that has existing in littlefs for a
while), is to let this limit be redefined to a smaller value on devices
that need to save RAM. However, this presents an interesting portability
issue. If these devices are plugged into a PC with relatively infinite
RAM, nothing stops the PC from writing files with full 255-byte file
names, which can't be read on the small device.
One solution here is to store this limit on the superblock during format
time. When mounting a disk, the filesystem implementation is responsible for
checking this limit in the superblock. If it's larger than what can be
read, raise an error. If it's smaller, respect the limit on the
superblock and raise an error if the user attempts to exceed it.
In this commit, this strategy is adopted for file names, inline files,
and the size of all attributes, since these could impact the memory
consumption of the filesystem. (Recording the attribute's limit is
iffy, but is the only other arbitrary limit and could be used for disabling
support of custom attributes).
Note! This changes makes it very important to configure littlefs
correctly at format time. If littlefs is formatted on a PC without
changing the limits appropriately, it will be rejected by a smaller
device.
2018-04-01 20:36:29 +00:00
|
|
|
entry.d.nlen = nlen;
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
entry.size = 0;
|
2018-03-16 01:58:29 +00:00
|
|
|
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
err = lfs_dir_set(lfs, &cwd, &entry, (struct lfs_region[]){
|
2018-04-08 09:23:23 +00:00
|
|
|
{LFS_FROM_MEM, 0, 0, &entry.d, 4},
|
|
|
|
{LFS_FROM_MEM, 0, 0, path, nlen}}, 2);
|
2017-03-20 03:00:56 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
2018-03-03 16:26:06 +00:00
|
|
|
} else if ((0xf & entry.d.type) == LFS_TYPE_DIR) {
|
2017-04-24 03:10:16 +00:00
|
|
|
return LFS_ERR_ISDIR;
|
2017-04-23 00:48:31 +00:00
|
|
|
} else if (flags & LFS_O_EXCL) {
|
2017-11-16 23:50:14 +00:00
|
|
|
return LFS_ERR_EXIST;
|
2017-03-20 03:00:56 +00:00
|
|
|
}
|
|
|
|
|
2017-04-30 16:19:37 +00:00
|
|
|
// allocate buffer if needed
|
2017-04-30 16:54:27 +00:00
|
|
|
file->cache.block = 0xffffffff;
|
2017-04-30 16:19:37 +00:00
|
|
|
if (lfs->cfg->file_buffer) {
|
|
|
|
file->cache.buffer = lfs->cfg->file_buffer;
|
|
|
|
} else if ((file->flags & 3) == LFS_O_RDONLY) {
|
2018-01-29 21:20:12 +00:00
|
|
|
file->cache.buffer = lfs_malloc(lfs->cfg->read_size);
|
2017-04-30 16:19:37 +00:00
|
|
|
if (!file->cache.buffer) {
|
|
|
|
return LFS_ERR_NOMEM;
|
|
|
|
}
|
|
|
|
} else {
|
2018-01-29 21:20:12 +00:00
|
|
|
file->cache.buffer = lfs_malloc(lfs->cfg->prog_size);
|
2017-04-30 16:19:37 +00:00
|
|
|
if (!file->cache.buffer) {
|
|
|
|
return LFS_ERR_NOMEM;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-18 01:32:16 +00:00
|
|
|
// setup file struct
|
|
|
|
file->pair[0] = cwd.pair[0];
|
|
|
|
file->pair[1] = cwd.pair[1];
|
2018-04-08 21:58:12 +00:00
|
|
|
file->pairoff = entry.off;
|
2018-03-18 01:32:16 +00:00
|
|
|
file->flags = flags;
|
|
|
|
file->pos = 0;
|
|
|
|
|
2018-04-03 13:28:09 +00:00
|
|
|
// calculate max inline size based on the size of the entry
|
|
|
|
file->inline_size = lfs_min(lfs->inline_size,
|
|
|
|
lfs->cfg->block_size - (sizeof(cwd.d)+4) -
|
|
|
|
(lfs_entry_size(&entry) - lfs_entry_elen(&entry)));
|
2018-03-18 01:32:16 +00:00
|
|
|
|
2018-03-17 15:28:14 +00:00
|
|
|
if ((0x70 & entry.d.type) == LFS_STRUCT_INLINE) {
|
2018-04-03 13:28:09 +00:00
|
|
|
// load inline files
|
2018-03-17 15:28:14 +00:00
|
|
|
file->head = 0xfffffffe;
|
2018-04-03 13:28:09 +00:00
|
|
|
file->size = lfs_entry_elen(&entry);
|
2018-03-17 15:28:14 +00:00
|
|
|
file->flags |= LFS_F_INLINE;
|
|
|
|
file->cache.block = file->head;
|
|
|
|
file->cache.off = 0;
|
2018-03-23 23:35:55 +00:00
|
|
|
err = lfs_dir_get(lfs, &cwd,
|
2018-03-17 15:28:14 +00:00
|
|
|
entry.off + 4,
|
|
|
|
file->cache.buffer, file->size);
|
|
|
|
if (err) {
|
|
|
|
lfs_free(file->cache.buffer);
|
|
|
|
return err;
|
|
|
|
}
|
2018-04-03 13:28:09 +00:00
|
|
|
} else {
|
|
|
|
// use ctz list from entry
|
|
|
|
file->head = entry.d.u.file.head;
|
|
|
|
file->size = entry.d.u.file.size;
|
|
|
|
}
|
|
|
|
|
|
|
|
// truncate if requested
|
|
|
|
if (flags & LFS_O_TRUNC) {
|
|
|
|
if (file->size != 0) {
|
|
|
|
file->flags |= LFS_F_DIRTY;
|
|
|
|
}
|
|
|
|
|
|
|
|
file->head = 0xfffffffe;
|
|
|
|
file->size = 0;
|
|
|
|
file->flags |= LFS_F_INLINE;
|
|
|
|
file->cache.block = file->head;
|
|
|
|
file->cache.off = 0;
|
2018-03-17 15:28:14 +00:00
|
|
|
}
|
|
|
|
|
2017-04-29 15:22:01 +00:00
|
|
|
// add to list of files
|
|
|
|
file->next = lfs->files;
|
|
|
|
lfs->files = file;
|
|
|
|
|
2017-04-18 03:27:06 +00:00
|
|
|
return 0;
|
2017-03-20 03:00:56 +00:00
|
|
|
}
|
|
|
|
|
2017-03-25 21:20:31 +00:00
|
|
|
int lfs_file_close(lfs_t *lfs, lfs_file_t *file) {
|
2018-04-11 00:55:17 +00:00
|
|
|
int err = lfs_file_sync(lfs, file);
|
2017-04-29 15:22:01 +00:00
|
|
|
|
2018-04-11 00:55:17 +00:00
|
|
|
// remove from list of files
|
|
|
|
for (lfs_file_t **p = &lfs->files; *p; p = &(*p)->next) {
|
|
|
|
if (*p == file) {
|
|
|
|
*p = file->next;
|
|
|
|
break;
|
2017-04-29 15:22:01 +00:00
|
|
|
}
|
2018-04-11 00:55:17 +00:00
|
|
|
}
|
2017-04-23 02:42:22 +00:00
|
|
|
|
2018-04-11 00:55:17 +00:00
|
|
|
// clean up memory
|
|
|
|
if (!lfs->cfg->file_buffer) {
|
|
|
|
lfs_free(file->cache.buffer);
|
|
|
|
}
|
2018-04-08 21:58:12 +00:00
|
|
|
|
2018-04-11 00:55:17 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-05-22 22:43:39 +00:00
|
|
|
static int lfs_file_relocate_(lfs_t *lfs, lfs_file_t_ *file) {
|
|
|
|
relocate:;
|
|
|
|
// just relocate what exists into new block
|
|
|
|
lfs_block_t nblock;
|
|
|
|
int err = lfs_alloc(lfs, &nblock);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = lfs_bd_erase(lfs, nblock);
|
|
|
|
if (err) {
|
|
|
|
if (err == LFS_ERR_CORRUPT) {
|
|
|
|
goto relocate;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
// either read from dirty cache or disk
|
|
|
|
for (lfs_off_t i = 0; i < file->off; i++) {
|
|
|
|
uint8_t data;
|
|
|
|
err = lfs_cache_read(lfs, &lfs->rcache, &file->cache,
|
|
|
|
file->block, i, &data, 1);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = lfs_cache_prog(lfs, &lfs->pcache, &lfs->rcache,
|
|
|
|
nblock, i, &data, 1);
|
|
|
|
if (err) {
|
|
|
|
if (err == LFS_ERR_CORRUPT) {
|
|
|
|
goto relocate;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// copy over new state of file
|
|
|
|
memcpy(file->cache.buffer, lfs->pcache.buffer, lfs->cfg->prog_size);
|
|
|
|
file->cache.block = lfs->pcache.block;
|
|
|
|
file->cache.off = lfs->pcache.off;
|
|
|
|
lfs->pcache.block = 0xffffffff;
|
|
|
|
|
|
|
|
file->block = nblock;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-04-11 00:55:17 +00:00
|
|
|
static int lfs_file_relocate(lfs_t *lfs, lfs_file_t *file) {
|
|
|
|
relocate:;
|
|
|
|
// just relocate what exists into new block
|
|
|
|
lfs_block_t nblock;
|
|
|
|
int err = lfs_alloc(lfs, &nblock);
|
|
|
|
if (err) {
|
2017-06-25 21:56:12 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-04-11 00:55:17 +00:00
|
|
|
err = lfs_bd_erase(lfs, nblock);
|
|
|
|
if (err) {
|
|
|
|
if (err == LFS_ERR_CORRUPT) {
|
|
|
|
goto relocate;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
// either read from dirty cache or disk
|
|
|
|
for (lfs_off_t i = 0; i < file->off; i++) {
|
|
|
|
uint8_t data;
|
|
|
|
err = lfs_cache_read(lfs, &lfs->rcache, &file->cache,
|
|
|
|
file->block, i, &data, 1);
|
2017-06-25 22:23:40 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
2017-06-25 19:01:33 +00:00
|
|
|
}
|
|
|
|
|
2018-04-11 00:55:17 +00:00
|
|
|
err = lfs_cache_prog(lfs, &lfs->pcache, &lfs->rcache,
|
|
|
|
nblock, i, &data, 1);
|
2017-06-25 19:01:33 +00:00
|
|
|
if (err) {
|
|
|
|
if (err == LFS_ERR_CORRUPT) {
|
|
|
|
goto relocate;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
2018-04-11 00:55:17 +00:00
|
|
|
}
|
2017-06-25 19:01:33 +00:00
|
|
|
|
2018-04-11 00:55:17 +00:00
|
|
|
// copy over new state of file
|
|
|
|
memcpy(file->cache.buffer, lfs->pcache.buffer, lfs->cfg->prog_size);
|
|
|
|
file->cache.block = lfs->pcache.block;
|
|
|
|
file->cache.off = lfs->pcache.off;
|
|
|
|
lfs->pcache.block = 0xffffffff;
|
2017-06-25 19:01:33 +00:00
|
|
|
|
2018-04-11 00:55:17 +00:00
|
|
|
file->block = nblock;
|
|
|
|
return 0;
|
|
|
|
}
|
2018-04-08 21:58:12 +00:00
|
|
|
|
2018-05-22 22:43:39 +00:00
|
|
|
static int lfs_file_flush_(lfs_t *lfs, lfs_file_t_ *file) {
|
|
|
|
if (file->flags & LFS_F_READING) {
|
|
|
|
file->flags &= ~LFS_F_READING;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (file->flags & LFS_F_WRITING) {
|
|
|
|
lfs_off_t pos = file->pos;
|
|
|
|
|
|
|
|
if (!(file->flags & LFS_F_INLINE)) {
|
|
|
|
// copy over anything after current branch
|
|
|
|
lfs_file_t_ orig = {
|
|
|
|
.head = file->head,
|
|
|
|
.size = file->size,
|
|
|
|
.flags = LFS_O_RDONLY,
|
|
|
|
.pos = file->pos,
|
|
|
|
.cache = lfs->rcache,
|
|
|
|
};
|
|
|
|
lfs->rcache.block = 0xffffffff;
|
|
|
|
|
|
|
|
while (file->pos < file->size) {
|
|
|
|
// copy over a byte at a time, leave it up to caching
|
|
|
|
// to make this efficient
|
|
|
|
uint8_t data;
|
|
|
|
lfs_ssize_t res = lfs_file_read(lfs, &orig, &data, 1);
|
|
|
|
if (res < 0) {
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
res = lfs_file_write(lfs, file, &data, 1);
|
|
|
|
if (res < 0) {
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
// keep our reference to the rcache in sync
|
|
|
|
if (lfs->rcache.block != 0xffffffff) {
|
|
|
|
orig.cache.block = 0xffffffff;
|
|
|
|
lfs->rcache.block = 0xffffffff;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// write out what we have
|
|
|
|
while (true) {
|
|
|
|
int err = lfs_cache_flush(lfs, &file->cache, &lfs->rcache);
|
|
|
|
if (err) {
|
|
|
|
if (err == LFS_ERR_CORRUPT) {
|
|
|
|
goto relocate;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
relocate:
|
|
|
|
LFS_DEBUG("Bad block at %d", file->block);
|
|
|
|
err = lfs_file_relocate_(lfs, file);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
file->size = lfs_max(file->pos, file->size);
|
|
|
|
}
|
|
|
|
|
|
|
|
// actual file updates
|
|
|
|
file->head = file->block;
|
|
|
|
file->size = file->pos;
|
|
|
|
file->flags &= ~LFS_F_WRITING;
|
|
|
|
file->flags |= LFS_F_DIRTY;
|
|
|
|
|
|
|
|
file->pos = pos;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-04-11 00:55:17 +00:00
|
|
|
static int lfs_file_flush(lfs_t *lfs, lfs_file_t *file) {
|
|
|
|
if (file->flags & LFS_F_READING) {
|
|
|
|
file->flags &= ~LFS_F_READING;
|
2017-04-23 02:42:22 +00:00
|
|
|
}
|
|
|
|
|
2018-04-11 00:55:17 +00:00
|
|
|
if (file->flags & LFS_F_WRITING) {
|
|
|
|
lfs_off_t pos = file->pos;
|
2018-04-08 21:58:12 +00:00
|
|
|
|
2018-04-11 00:55:17 +00:00
|
|
|
if (!(file->flags & LFS_F_INLINE)) {
|
|
|
|
// copy over anything after current branch
|
|
|
|
lfs_file_t orig = {
|
|
|
|
.head = file->head,
|
|
|
|
.size = file->size,
|
|
|
|
.flags = LFS_O_RDONLY,
|
|
|
|
.pos = file->pos,
|
|
|
|
.cache = lfs->rcache,
|
|
|
|
};
|
2018-03-19 01:36:48 +00:00
|
|
|
lfs->rcache.block = 0xffffffff;
|
2017-04-24 04:39:50 +00:00
|
|
|
|
2018-03-19 01:36:48 +00:00
|
|
|
while (file->pos < file->size) {
|
|
|
|
// copy over a byte at a time, leave it up to caching
|
|
|
|
// to make this efficient
|
|
|
|
uint8_t data;
|
|
|
|
lfs_ssize_t res = lfs_file_read(lfs, &orig, &data, 1);
|
|
|
|
if (res < 0) {
|
|
|
|
return res;
|
|
|
|
}
|
2017-04-23 02:42:22 +00:00
|
|
|
|
2018-03-19 01:36:48 +00:00
|
|
|
res = lfs_file_write(lfs, file, &data, 1);
|
|
|
|
if (res < 0) {
|
|
|
|
return res;
|
|
|
|
}
|
2017-04-30 16:19:37 +00:00
|
|
|
|
2018-03-19 01:36:48 +00:00
|
|
|
// keep our reference to the rcache in sync
|
|
|
|
if (lfs->rcache.block != 0xffffffff) {
|
|
|
|
orig.cache.block = 0xffffffff;
|
|
|
|
lfs->rcache.block = 0xffffffff;
|
|
|
|
}
|
2017-04-30 16:19:37 +00:00
|
|
|
}
|
|
|
|
|
2018-03-19 01:36:48 +00:00
|
|
|
// write out what we have
|
|
|
|
while (true) {
|
|
|
|
int err = lfs_cache_flush(lfs, &file->cache, &lfs->rcache);
|
|
|
|
if (err) {
|
|
|
|
if (err == LFS_ERR_CORRUPT) {
|
|
|
|
goto relocate;
|
|
|
|
}
|
|
|
|
return err;
|
2017-06-25 19:01:33 +00:00
|
|
|
}
|
|
|
|
|
2018-03-19 01:36:48 +00:00
|
|
|
break;
|
2017-06-25 19:01:33 +00:00
|
|
|
relocate:
|
2018-03-19 01:36:48 +00:00
|
|
|
LFS_DEBUG("Bad block at %d", file->block);
|
|
|
|
err = lfs_file_relocate(lfs, file);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
2017-06-25 19:01:33 +00:00
|
|
|
}
|
2018-03-19 01:36:48 +00:00
|
|
|
} else {
|
|
|
|
file->size = lfs_max(file->pos, file->size);
|
2017-04-23 02:42:22 +00:00
|
|
|
}
|
|
|
|
|
2017-04-24 04:39:50 +00:00
|
|
|
// actual file updates
|
2017-04-30 16:19:37 +00:00
|
|
|
file->head = file->block;
|
2017-04-24 04:39:50 +00:00
|
|
|
file->size = file->pos;
|
2017-04-30 16:19:37 +00:00
|
|
|
file->flags &= ~LFS_F_WRITING;
|
|
|
|
file->flags |= LFS_F_DIRTY;
|
2017-04-24 04:39:50 +00:00
|
|
|
|
|
|
|
file->pos = pos;
|
|
|
|
}
|
2017-04-23 02:42:22 +00:00
|
|
|
|
2017-04-23 04:11:13 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-22 22:43:39 +00:00
|
|
|
int lfs_file_sync_(lfs_t *lfs, lfs_file_t_ *file) {
|
|
|
|
int err = lfs_file_flush_(lfs, file);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((file->flags & LFS_F_DIRTY) &&
|
|
|
|
!(file->flags & LFS_F_ERRED) &&
|
|
|
|
!lfs_pairisnull(file->pair)) {
|
|
|
|
// update dir entry
|
|
|
|
// TODO keep list of dirs including these guys for no
|
|
|
|
// need of another reload?
|
|
|
|
lfs_dir_t_ cwd;
|
|
|
|
err = lfs_dir_fetch_(lfs, &cwd, file->pair);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
// either update the references or inline the whole file
|
|
|
|
// TODO handle attributes
|
|
|
|
// if (!(file->flags & LFS_F_INLINE)) {
|
|
|
|
// entry.d.type = LFS_STRUCT_CTZ | LFS_TYPE_REG;
|
|
|
|
// entry.d.u.file.head = file->head;
|
|
|
|
// entry.d.u.file.size = file->size;
|
|
|
|
//
|
|
|
|
// lfs_entry_tole32(&entry.d);
|
|
|
|
// buffer = (const uint8_t *)&entry.d + 4;
|
|
|
|
// size = sizeof(entry.d) - 4;
|
|
|
|
// } else {
|
|
|
|
// entry.d.type = LFS_STRUCT_INLINE | LFS_TYPE_REG;
|
|
|
|
//
|
|
|
|
// buffer = file->cache.buffer;
|
|
|
|
// size = file->size;
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// // get new alen from disk
|
|
|
|
// lfs_ssize_t newalen = lfs_dir_checkattrs(lfs, &cwd, &entry,
|
|
|
|
// file->attrs, file->attrcount);
|
|
|
|
// if (newalen < 0) {
|
|
|
|
// return newalen;
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// entry.d.elen = size & 0xff;
|
|
|
|
// entry.d.alen = (newalen & 0x3f) | ((size >> 2) & 0xc0);
|
|
|
|
//
|
|
|
|
// // write out update
|
|
|
|
// err = lfs_dir_set(lfs, &cwd, &entry, (struct lfs_region[]){
|
|
|
|
// {LFS_FROM_MEM, 0, 4, &entry.d, 4},
|
|
|
|
// {LFS_FROM_MEM, 4, oldelen, buffer, size},
|
|
|
|
// {LFS_FROM_ATTRS, 4+oldelen, oldalen,
|
|
|
|
// &(struct lfs_region_attrs){file->attrs, file->attrcount},
|
|
|
|
// newalen}}, 3);
|
|
|
|
// if (err) {
|
|
|
|
// return err;
|
|
|
|
// }
|
|
|
|
|
|
|
|
file->flags &= ~LFS_F_DIRTY;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-04-23 04:11:13 +00:00
|
|
|
int lfs_file_sync(lfs_t *lfs, lfs_file_t *file) {
|
|
|
|
int err = lfs_file_flush(lfs, file);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
2017-04-23 02:42:22 +00:00
|
|
|
|
2017-11-16 23:25:41 +00:00
|
|
|
if ((file->flags & LFS_F_DIRTY) &&
|
|
|
|
!(file->flags & LFS_F_ERRED) &&
|
|
|
|
!lfs_pairisnull(file->pair)) {
|
2017-04-24 04:39:50 +00:00
|
|
|
// update dir entry
|
|
|
|
lfs_dir_t cwd;
|
2018-01-29 19:53:28 +00:00
|
|
|
err = lfs_dir_fetch(lfs, &cwd, file->pair);
|
2017-04-24 04:39:50 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-04-08 21:58:12 +00:00
|
|
|
lfs_entry_t entry = {.off = file->pairoff};
|
2018-04-10 21:35:29 +00:00
|
|
|
err = lfs_dir_get(lfs, &cwd, entry.off, &entry.d, 4);
|
2017-04-24 04:39:50 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
2018-04-08 21:58:12 +00:00
|
|
|
entry.size = lfs_entry_size(&entry);
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
|
2018-03-17 15:28:14 +00:00
|
|
|
LFS_ASSERT((0xf & entry.d.type) == LFS_TYPE_REG);
|
2018-04-08 21:58:12 +00:00
|
|
|
lfs_size_t oldelen = lfs_entry_elen(&entry);
|
|
|
|
lfs_size_t oldalen = lfs_entry_alen(&entry);
|
|
|
|
const void *buffer;
|
|
|
|
lfs_size_t size;
|
2017-04-24 04:39:50 +00:00
|
|
|
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
// either update the references or inline the whole file
|
2018-03-19 01:36:48 +00:00
|
|
|
if (!(file->flags & LFS_F_INLINE)) {
|
|
|
|
entry.d.type = LFS_STRUCT_CTZ | LFS_TYPE_REG;
|
|
|
|
entry.d.u.file.head = file->head;
|
|
|
|
entry.d.u.file.size = file->size;
|
2017-04-24 04:39:50 +00:00
|
|
|
|
2018-04-10 21:35:29 +00:00
|
|
|
lfs_entry_tole32(&entry.d);
|
2018-04-08 21:58:12 +00:00
|
|
|
buffer = (const uint8_t *)&entry.d + 4;
|
|
|
|
size = sizeof(entry.d) - 4;
|
2018-03-17 15:28:14 +00:00
|
|
|
} else {
|
2018-03-19 01:36:48 +00:00
|
|
|
entry.d.type = LFS_STRUCT_INLINE | LFS_TYPE_REG;
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
|
2018-04-08 21:58:12 +00:00
|
|
|
buffer = file->cache.buffer;
|
|
|
|
size = file->size;
|
|
|
|
}
|
|
|
|
|
|
|
|
// get new alen from disk
|
|
|
|
lfs_ssize_t newalen = lfs_dir_checkattrs(lfs, &cwd, &entry,
|
|
|
|
file->attrs, file->attrcount);
|
|
|
|
if (newalen < 0) {
|
|
|
|
return newalen;
|
|
|
|
}
|
|
|
|
|
|
|
|
entry.d.elen = size & 0xff;
|
|
|
|
entry.d.alen = (newalen & 0x3f) | ((size >> 2) & 0xc0);
|
|
|
|
|
|
|
|
// write out update
|
|
|
|
err = lfs_dir_set(lfs, &cwd, &entry, (struct lfs_region[]){
|
|
|
|
{LFS_FROM_MEM, 0, 4, &entry.d, 4},
|
|
|
|
{LFS_FROM_MEM, 4, oldelen, buffer, size},
|
|
|
|
{LFS_FROM_ATTRS, 4+oldelen, oldalen,
|
|
|
|
&(struct lfs_region_attrs){file->attrs, file->attrcount},
|
|
|
|
newalen}}, 3);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
2017-04-24 04:39:50 +00:00
|
|
|
}
|
|
|
|
|
2017-04-30 16:19:37 +00:00
|
|
|
file->flags &= ~LFS_F_DIRTY;
|
2017-03-20 03:00:56 +00:00
|
|
|
}
|
|
|
|
|
2017-04-24 04:39:50 +00:00
|
|
|
return 0;
|
2017-03-20 03:00:56 +00:00
|
|
|
}
|
|
|
|
|
2017-04-24 02:40:03 +00:00
|
|
|
lfs_ssize_t lfs_file_read(lfs_t *lfs, lfs_file_t *file,
|
|
|
|
void *buffer, lfs_size_t size) {
|
|
|
|
uint8_t *data = buffer;
|
|
|
|
lfs_size_t nsize = size;
|
|
|
|
|
|
|
|
if ((file->flags & 3) == LFS_O_WRONLY) {
|
2018-02-04 20:36:36 +00:00
|
|
|
return LFS_ERR_BADF;
|
2017-04-24 02:40:03 +00:00
|
|
|
}
|
|
|
|
|
2017-04-30 16:19:37 +00:00
|
|
|
if (file->flags & LFS_F_WRITING) {
|
2017-04-24 04:39:50 +00:00
|
|
|
// flush out any writes
|
|
|
|
int err = lfs_file_flush(lfs, file);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-17 22:57:12 +00:00
|
|
|
if (file->pos >= file->size) {
|
|
|
|
// eof if past end
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-04-24 04:39:50 +00:00
|
|
|
size = lfs_min(size, file->size - file->pos);
|
|
|
|
nsize = size;
|
|
|
|
|
2017-04-24 02:40:03 +00:00
|
|
|
while (nsize > 0) {
|
|
|
|
// check if we need a new block
|
2017-04-30 16:19:37 +00:00
|
|
|
if (!(file->flags & LFS_F_READING) ||
|
|
|
|
file->off == lfs->cfg->block_size) {
|
2018-03-19 01:36:48 +00:00
|
|
|
if (!(file->flags & LFS_F_INLINE)) {
|
2018-03-17 15:28:14 +00:00
|
|
|
int err = lfs_ctz_find(lfs, &file->cache, NULL,
|
|
|
|
file->head, file->size,
|
|
|
|
file->pos, &file->block, &file->off);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
2018-03-19 01:36:48 +00:00
|
|
|
} else {
|
|
|
|
file->block = 0xfffffffe;
|
|
|
|
file->off = file->pos;
|
2017-04-24 02:40:03 +00:00
|
|
|
}
|
2017-04-30 16:19:37 +00:00
|
|
|
|
|
|
|
file->flags |= LFS_F_READING;
|
2017-04-24 02:40:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// read as much as we can in current block
|
2017-04-30 16:19:37 +00:00
|
|
|
lfs_size_t diff = lfs_min(nsize, lfs->cfg->block_size - file->off);
|
|
|
|
int err = lfs_cache_read(lfs, &file->cache, NULL,
|
|
|
|
file->block, file->off, data, diff);
|
2017-04-24 02:40:03 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-04-24 04:39:50 +00:00
|
|
|
file->pos += diff;
|
2017-04-30 16:19:37 +00:00
|
|
|
file->off += diff;
|
2017-04-24 02:40:03 +00:00
|
|
|
data += diff;
|
|
|
|
nsize -= diff;
|
|
|
|
}
|
|
|
|
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
2017-03-20 03:00:56 +00:00
|
|
|
lfs_ssize_t lfs_file_write(lfs_t *lfs, lfs_file_t *file,
|
|
|
|
const void *buffer, lfs_size_t size) {
|
|
|
|
const uint8_t *data = buffer;
|
|
|
|
lfs_size_t nsize = size;
|
|
|
|
|
2017-04-23 04:11:13 +00:00
|
|
|
if ((file->flags & 3) == LFS_O_RDONLY) {
|
2018-02-04 20:36:36 +00:00
|
|
|
return LFS_ERR_BADF;
|
2017-04-23 04:11:13 +00:00
|
|
|
}
|
|
|
|
|
2017-04-30 16:19:37 +00:00
|
|
|
if (file->flags & LFS_F_READING) {
|
2017-04-24 04:39:50 +00:00
|
|
|
// drop any reads
|
|
|
|
int err = lfs_file_flush(lfs, file);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((file->flags & LFS_O_APPEND) && file->pos < file->size) {
|
|
|
|
file->pos = file->size;
|
|
|
|
}
|
|
|
|
|
2017-09-17 22:57:12 +00:00
|
|
|
if (!(file->flags & LFS_F_WRITING) && file->pos > file->size) {
|
|
|
|
// fill with zeros
|
|
|
|
lfs_off_t pos = file->pos;
|
|
|
|
file->pos = file->size;
|
|
|
|
|
|
|
|
while (file->pos < pos) {
|
|
|
|
lfs_ssize_t res = lfs_file_write(lfs, file, &(uint8_t){0}, 1);
|
|
|
|
if (res < 0) {
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Added disk-backed limits on the name/attrs/inline sizes
Being a portable, microcontroller-scale embedded filesystem, littlefs is
presented with a relatively unique challenge. The amount of RAM
available is on completely different scales from machine to machine, and
what is normally a reasonable RAM assumption may break completely on an
embedded system.
A great example of this is file names. On almost every PC these days, the limit
for a file name is 255 bytes. It's a very convenient limit for a number
of reasons. However, on microcontrollers, allocating 255 bytes of RAM to
do a file search can be unreasonable.
The simplest solution (and one that has existing in littlefs for a
while), is to let this limit be redefined to a smaller value on devices
that need to save RAM. However, this presents an interesting portability
issue. If these devices are plugged into a PC with relatively infinite
RAM, nothing stops the PC from writing files with full 255-byte file
names, which can't be read on the small device.
One solution here is to store this limit on the superblock during format
time. When mounting a disk, the filesystem implementation is responsible for
checking this limit in the superblock. If it's larger than what can be
read, raise an error. If it's smaller, respect the limit on the
superblock and raise an error if the user attempts to exceed it.
In this commit, this strategy is adopted for file names, inline files,
and the size of all attributes, since these could impact the memory
consumption of the filesystem. (Recording the attribute's limit is
iffy, but is the only other arbitrary limit and could be used for disabling
support of custom attributes).
Note! This changes makes it very important to configure littlefs
correctly at format time. If littlefs is formatted on a PC without
changing the limits appropriately, it will be rejected by a smaller
device.
2018-04-01 20:36:29 +00:00
|
|
|
if ((file->flags & LFS_F_INLINE) &&
|
2018-04-03 13:28:09 +00:00
|
|
|
file->pos + nsize >= file->inline_size) {
|
2018-04-03 13:29:28 +00:00
|
|
|
// inline file doesn't fit anymore
|
2018-03-19 01:36:48 +00:00
|
|
|
file->block = 0xfffffffe;
|
|
|
|
file->off = file->pos;
|
|
|
|
|
|
|
|
lfs_alloc_ack(lfs);
|
2018-03-18 01:32:16 +00:00
|
|
|
int err = lfs_file_relocate(lfs, file);
|
|
|
|
if (err) {
|
|
|
|
file->flags |= LFS_F_ERRED;
|
|
|
|
return err;
|
2018-03-17 15:28:14 +00:00
|
|
|
}
|
|
|
|
|
2018-03-18 01:32:16 +00:00
|
|
|
file->flags &= ~LFS_F_INLINE;
|
|
|
|
file->flags |= LFS_F_WRITING;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (nsize > 0) {
|
2017-04-23 02:42:22 +00:00
|
|
|
// check if we need a new block
|
2017-04-30 16:19:37 +00:00
|
|
|
if (!(file->flags & LFS_F_WRITING) ||
|
|
|
|
file->off == lfs->cfg->block_size) {
|
2018-03-19 01:36:48 +00:00
|
|
|
if (!(file->flags & LFS_F_INLINE)) {
|
2018-03-17 15:28:14 +00:00
|
|
|
if (!(file->flags & LFS_F_WRITING) && file->pos > 0) {
|
|
|
|
// find out which block we're extending from
|
|
|
|
int err = lfs_ctz_find(lfs, &file->cache, NULL,
|
|
|
|
file->head, file->size,
|
|
|
|
file->pos-1, &file->block, &file->off);
|
|
|
|
if (err) {
|
|
|
|
file->flags |= LFS_F_ERRED;
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
// mark cache as dirty since we may have read data into it
|
|
|
|
file->cache.block = 0xffffffff;
|
|
|
|
}
|
|
|
|
|
|
|
|
// extend file with new blocks
|
|
|
|
lfs_alloc_ack(lfs);
|
|
|
|
int err = lfs_ctz_extend(lfs, &lfs->rcache, &file->cache,
|
|
|
|
file->block, file->pos,
|
|
|
|
&file->block, &file->off);
|
2017-04-23 04:11:13 +00:00
|
|
|
if (err) {
|
2017-11-16 23:25:41 +00:00
|
|
|
file->flags |= LFS_F_ERRED;
|
2017-04-23 04:11:13 +00:00
|
|
|
return err;
|
|
|
|
}
|
2018-03-19 01:36:48 +00:00
|
|
|
} else {
|
|
|
|
file->block = 0xfffffffe;
|
|
|
|
file->off = file->pos;
|
2017-03-20 03:00:56 +00:00
|
|
|
}
|
2017-09-17 17:53:18 +00:00
|
|
|
|
|
|
|
file->flags |= LFS_F_WRITING;
|
2017-03-20 03:00:56 +00:00
|
|
|
}
|
|
|
|
|
2017-04-23 02:42:22 +00:00
|
|
|
// program as much as we can in current block
|
2017-04-30 16:19:37 +00:00
|
|
|
lfs_size_t diff = lfs_min(nsize, lfs->cfg->block_size - file->off);
|
2017-05-14 17:01:45 +00:00
|
|
|
while (true) {
|
2017-06-24 05:43:05 +00:00
|
|
|
int err = lfs_cache_prog(lfs, &file->cache, &lfs->rcache,
|
2017-05-14 17:01:45 +00:00
|
|
|
file->block, file->off, data, diff);
|
|
|
|
if (err) {
|
|
|
|
if (err == LFS_ERR_CORRUPT) {
|
|
|
|
goto relocate;
|
|
|
|
}
|
2017-11-16 23:25:41 +00:00
|
|
|
file->flags |= LFS_F_ERRED;
|
2017-05-14 17:01:45 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
relocate:
|
2017-06-25 19:01:33 +00:00
|
|
|
err = lfs_file_relocate(lfs, file);
|
2017-05-14 17:01:45 +00:00
|
|
|
if (err) {
|
2017-11-16 23:25:41 +00:00
|
|
|
file->flags |= LFS_F_ERRED;
|
2017-05-14 17:01:45 +00:00
|
|
|
return err;
|
|
|
|
}
|
2017-03-20 03:00:56 +00:00
|
|
|
}
|
|
|
|
|
2017-04-24 04:39:50 +00:00
|
|
|
file->pos += diff;
|
2017-04-30 16:19:37 +00:00
|
|
|
file->off += diff;
|
2017-03-20 03:00:56 +00:00
|
|
|
data += diff;
|
|
|
|
nsize -= diff;
|
2017-05-14 17:01:45 +00:00
|
|
|
|
|
|
|
lfs_alloc_ack(lfs);
|
2017-04-23 02:42:22 +00:00
|
|
|
}
|
|
|
|
|
2017-11-16 23:25:41 +00:00
|
|
|
file->flags &= ~LFS_F_ERRED;
|
2017-03-20 03:00:56 +00:00
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
2017-04-23 04:11:13 +00:00
|
|
|
lfs_soff_t lfs_file_seek(lfs_t *lfs, lfs_file_t *file,
|
|
|
|
lfs_soff_t off, int whence) {
|
|
|
|
// write out everything beforehand, may be noop if rdonly
|
|
|
|
int err = lfs_file_flush(lfs, file);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-04-24 04:39:50 +00:00
|
|
|
// update pos
|
|
|
|
if (whence == LFS_SEEK_SET) {
|
|
|
|
file->pos = off;
|
|
|
|
} else if (whence == LFS_SEEK_CUR) {
|
2018-01-03 21:00:04 +00:00
|
|
|
if (off < 0 && (lfs_off_t)-off > file->pos) {
|
2017-09-17 22:57:12 +00:00
|
|
|
return LFS_ERR_INVAL;
|
|
|
|
}
|
|
|
|
|
2017-04-24 04:39:50 +00:00
|
|
|
file->pos = file->pos + off;
|
|
|
|
} else if (whence == LFS_SEEK_END) {
|
2018-01-03 21:00:04 +00:00
|
|
|
if (off < 0 && (lfs_off_t)-off > file->size) {
|
2017-09-17 22:57:12 +00:00
|
|
|
return LFS_ERR_INVAL;
|
|
|
|
}
|
|
|
|
|
2017-04-24 04:39:50 +00:00
|
|
|
file->pos = file->size + off;
|
2017-04-23 04:11:13 +00:00
|
|
|
}
|
|
|
|
|
2017-09-27 00:50:39 +00:00
|
|
|
return file->pos;
|
2017-04-23 04:11:13 +00:00
|
|
|
}
|
|
|
|
|
2018-01-20 23:30:40 +00:00
|
|
|
int lfs_file_truncate(lfs_t *lfs, lfs_file_t *file, lfs_off_t size) {
|
|
|
|
if ((file->flags & 3) == LFS_O_RDONLY) {
|
2018-02-04 20:36:36 +00:00
|
|
|
return LFS_ERR_BADF;
|
2018-01-20 23:30:40 +00:00
|
|
|
}
|
|
|
|
|
2018-02-04 19:10:07 +00:00
|
|
|
lfs_off_t oldsize = lfs_file_size(lfs, file);
|
|
|
|
if (size < oldsize) {
|
2018-01-20 23:30:40 +00:00
|
|
|
// need to flush since directly changing metadata
|
|
|
|
int err = lfs_file_flush(lfs, file);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
// lookup new head in ctz skip list
|
|
|
|
err = lfs_ctz_find(lfs, &file->cache, NULL,
|
|
|
|
file->head, file->size,
|
|
|
|
size, &file->head, &(lfs_off_t){0});
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
file->size = size;
|
|
|
|
file->flags |= LFS_F_DIRTY;
|
2018-02-04 19:10:07 +00:00
|
|
|
} else if (size > oldsize) {
|
2018-01-20 23:30:40 +00:00
|
|
|
lfs_off_t pos = file->pos;
|
|
|
|
|
|
|
|
// flush+seek if not already at end
|
2018-02-04 19:10:07 +00:00
|
|
|
if (file->pos != oldsize) {
|
2018-01-29 21:20:12 +00:00
|
|
|
int err = lfs_file_seek(lfs, file, 0, LFS_SEEK_END);
|
2018-02-04 19:48:44 +00:00
|
|
|
if (err < 0) {
|
2018-01-20 23:30:40 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// fill with zeros
|
|
|
|
while (file->pos < size) {
|
|
|
|
lfs_ssize_t res = lfs_file_write(lfs, file, &(uint8_t){0}, 1);
|
|
|
|
if (res < 0) {
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// restore pos
|
|
|
|
int err = lfs_file_seek(lfs, file, pos, LFS_SEEK_SET);
|
|
|
|
if (err < 0) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-04-23 04:11:13 +00:00
|
|
|
lfs_soff_t lfs_file_tell(lfs_t *lfs, lfs_file_t *file) {
|
2018-02-04 19:10:07 +00:00
|
|
|
(void)lfs;
|
2017-04-24 04:39:50 +00:00
|
|
|
return file->pos;
|
2017-04-23 04:11:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int lfs_file_rewind(lfs_t *lfs, lfs_file_t *file) {
|
|
|
|
lfs_soff_t res = lfs_file_seek(lfs, file, 0, LFS_SEEK_SET);
|
|
|
|
if (res < 0) {
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-04-24 02:40:03 +00:00
|
|
|
lfs_soff_t lfs_file_size(lfs_t *lfs, lfs_file_t *file) {
|
2018-02-04 19:10:07 +00:00
|
|
|
(void)lfs;
|
2018-01-20 23:30:40 +00:00
|
|
|
if (file->flags & LFS_F_WRITING) {
|
|
|
|
return lfs_max(file->pos, file->size);
|
|
|
|
} else {
|
|
|
|
return file->size;
|
|
|
|
}
|
2017-04-24 02:40:03 +00:00
|
|
|
}
|
|
|
|
|
2018-04-08 21:58:12 +00:00
|
|
|
int lfs_file_getattrs(lfs_t *lfs, lfs_file_t *file,
|
|
|
|
const struct lfs_attr *attrs, int count) {
|
|
|
|
// set to null in case we can't find the attrs (missing file?)
|
|
|
|
for (int j = 0; j < count; j++) {
|
|
|
|
memset(attrs[j].buffer, 0, attrs[j].size);
|
|
|
|
}
|
|
|
|
|
|
|
|
// load from disk if we haven't already been deleted
|
|
|
|
if (!lfs_pairisnull(file->pair)) {
|
|
|
|
lfs_dir_t cwd;
|
|
|
|
int err = lfs_dir_fetch(lfs, &cwd, file->pair);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
lfs_entry_t entry = {.off = file->pairoff};
|
2018-04-10 21:35:29 +00:00
|
|
|
err = lfs_dir_get(lfs, &cwd, entry.off, &entry.d, 4);
|
2018-04-08 21:58:12 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
entry.size = lfs_entry_size(&entry);
|
|
|
|
|
|
|
|
err = lfs_dir_getattrs(lfs, &cwd, &entry, attrs, count);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// override an attrs we have stored locally
|
|
|
|
for (int i = 0; i < file->attrcount; i++) {
|
|
|
|
for (int j = 0; j < count; j++) {
|
|
|
|
if (attrs[j].type == file->attrs[i].type) {
|
|
|
|
if (attrs[j].size < file->attrs[i].size) {
|
|
|
|
return LFS_ERR_RANGE;
|
|
|
|
}
|
|
|
|
|
2018-04-11 00:55:17 +00:00
|
|
|
memset(attrs[j].buffer, 0, attrs[j].size);
|
2018-04-08 21:58:12 +00:00
|
|
|
memcpy(attrs[j].buffer,
|
|
|
|
file->attrs[i].buffer, file->attrs[i].size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int lfs_file_setattrs(lfs_t *lfs, lfs_file_t *file,
|
|
|
|
const struct lfs_attr *attrs, int count) {
|
2018-04-11 00:55:17 +00:00
|
|
|
if ((file->flags & 3) == LFS_O_RDONLY) {
|
|
|
|
return LFS_ERR_BADF;
|
|
|
|
}
|
2018-04-08 21:58:12 +00:00
|
|
|
|
|
|
|
// at least make sure attributes fit
|
|
|
|
if (!lfs_pairisnull(file->pair)) {
|
|
|
|
lfs_dir_t cwd;
|
|
|
|
int err = lfs_dir_fetch(lfs, &cwd, file->pair);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
lfs_entry_t entry = {.off = file->pairoff};
|
2018-04-10 21:35:29 +00:00
|
|
|
err = lfs_dir_get(lfs, &cwd, entry.off, &entry.d, 4);
|
2018-04-08 21:58:12 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
entry.size = lfs_entry_size(&entry);
|
|
|
|
|
|
|
|
lfs_ssize_t res = lfs_dir_checkattrs(lfs, &cwd, &entry, attrs, count);
|
|
|
|
if (res < 0) {
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-11 00:55:17 +00:00
|
|
|
// just tack to the file, will be written at sync time
|
|
|
|
file->attrs = attrs;
|
|
|
|
file->attrcount = count;
|
|
|
|
file->flags |= LFS_F_DIRTY;
|
|
|
|
|
2018-04-08 21:58:12 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-04-24 02:40:03 +00:00
|
|
|
|
2018-01-30 19:07:37 +00:00
|
|
|
/// General fs operations ///
|
2017-04-24 02:40:03 +00:00
|
|
|
int lfs_stat(lfs_t *lfs, const char *path, struct lfs_info *info) {
|
|
|
|
lfs_dir_t cwd;
|
|
|
|
int err = lfs_dir_fetch(lfs, &cwd, lfs->root);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
lfs_entry_t entry;
|
|
|
|
err = lfs_dir_find(lfs, &cwd, &entry, &path);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-04-06 00:03:58 +00:00
|
|
|
return lfs_dir_getinfo(lfs, &cwd, &entry, info);
|
2017-04-24 02:40:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int lfs_remove(lfs_t *lfs, const char *path) {
|
2017-10-07 21:56:00 +00:00
|
|
|
// deorphan if we haven't yet, needed at most once after poweron
|
|
|
|
if (!lfs->deorphaned) {
|
|
|
|
int err = lfs_deorphan(lfs);
|
2017-10-07 14:19:08 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-24 02:40:03 +00:00
|
|
|
lfs_dir_t cwd;
|
|
|
|
int err = lfs_dir_fetch(lfs, &cwd, lfs->root);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
lfs_entry_t entry;
|
|
|
|
err = lfs_dir_find(lfs, &cwd, &entry, &path);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
lfs_dir_t dir;
|
2018-03-03 16:26:06 +00:00
|
|
|
if ((0xf & entry.d.type) == LFS_TYPE_DIR) {
|
2017-04-24 02:40:03 +00:00
|
|
|
// must be empty before removal, checking size
|
|
|
|
// without masking top bit checks for any case where
|
|
|
|
// dir is not empty
|
2018-01-29 19:53:28 +00:00
|
|
|
err = lfs_dir_fetch(lfs, &dir, entry.d.u.dir);
|
2017-04-24 02:40:03 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
2017-06-24 01:03:44 +00:00
|
|
|
} else if (dir.d.size != sizeof(dir.d)+4) {
|
2017-12-27 17:47:48 +00:00
|
|
|
return LFS_ERR_NOTEMPTY;
|
2017-04-24 02:40:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// remove the entry
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
err = lfs_dir_set(lfs, &cwd, &entry, (struct lfs_region[]){
|
2018-04-08 09:23:23 +00:00
|
|
|
{LFS_FROM_MEM, 0, entry.size, NULL, 0}}, 1);
|
2017-04-24 02:40:03 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-10-07 21:56:00 +00:00
|
|
|
// if we were a directory, find pred, replace tail
|
2018-03-03 16:26:06 +00:00
|
|
|
if ((0xf & entry.d.type) == LFS_TYPE_DIR) {
|
2017-10-07 21:56:00 +00:00
|
|
|
int res = lfs_pred(lfs, dir.pair, &cwd);
|
|
|
|
if (res < 0) {
|
|
|
|
return res;
|
2017-04-30 17:35:50 +00:00
|
|
|
}
|
|
|
|
|
2018-01-29 21:20:12 +00:00
|
|
|
LFS_ASSERT(res); // must have pred
|
2017-10-07 21:56:00 +00:00
|
|
|
cwd.d.tail[0] = dir.d.tail[0];
|
|
|
|
cwd.d.tail[1] = dir.d.tail[1];
|
|
|
|
|
2018-04-03 13:28:09 +00:00
|
|
|
err = lfs_dir_commit(lfs, &cwd, NULL, 0);
|
2017-04-24 02:40:03 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int lfs_rename(lfs_t *lfs, const char *oldpath, const char *newpath) {
|
2017-10-07 21:56:00 +00:00
|
|
|
// deorphan if we haven't yet, needed at most once after poweron
|
|
|
|
if (!lfs->deorphaned) {
|
|
|
|
int err = lfs_deorphan(lfs);
|
2017-10-07 14:19:08 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-24 02:40:03 +00:00
|
|
|
// find old entry
|
|
|
|
lfs_dir_t oldcwd;
|
|
|
|
int err = lfs_dir_fetch(lfs, &oldcwd, lfs->root);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
lfs_entry_t oldentry;
|
|
|
|
err = lfs_dir_find(lfs, &oldcwd, &oldentry, &oldpath);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
// allocate new entry
|
|
|
|
lfs_dir_t newcwd;
|
|
|
|
err = lfs_dir_fetch(lfs, &newcwd, lfs->root);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
lfs_entry_t preventry;
|
|
|
|
err = lfs_dir_find(lfs, &newcwd, &preventry, &newpath);
|
2017-09-17 21:46:09 +00:00
|
|
|
if (err && (err != LFS_ERR_NOENT || strchr(newpath, '/') != NULL)) {
|
2017-04-24 02:40:03 +00:00
|
|
|
return err;
|
|
|
|
}
|
2017-10-07 21:56:00 +00:00
|
|
|
|
2017-04-24 03:10:16 +00:00
|
|
|
bool prevexists = (err != LFS_ERR_NOENT);
|
2017-10-07 21:56:00 +00:00
|
|
|
bool samepair = (lfs_paircmp(oldcwd.pair, newcwd.pair) == 0);
|
2017-04-24 02:40:03 +00:00
|
|
|
|
Added disk-backed limits on the name/attrs/inline sizes
Being a portable, microcontroller-scale embedded filesystem, littlefs is
presented with a relatively unique challenge. The amount of RAM
available is on completely different scales from machine to machine, and
what is normally a reasonable RAM assumption may break completely on an
embedded system.
A great example of this is file names. On almost every PC these days, the limit
for a file name is 255 bytes. It's a very convenient limit for a number
of reasons. However, on microcontrollers, allocating 255 bytes of RAM to
do a file search can be unreasonable.
The simplest solution (and one that has existing in littlefs for a
while), is to let this limit be redefined to a smaller value on devices
that need to save RAM. However, this presents an interesting portability
issue. If these devices are plugged into a PC with relatively infinite
RAM, nothing stops the PC from writing files with full 255-byte file
names, which can't be read on the small device.
One solution here is to store this limit on the superblock during format
time. When mounting a disk, the filesystem implementation is responsible for
checking this limit in the superblock. If it's larger than what can be
read, raise an error. If it's smaller, respect the limit on the
superblock and raise an error if the user attempts to exceed it.
In this commit, this strategy is adopted for file names, inline files,
and the size of all attributes, since these could impact the memory
consumption of the filesystem. (Recording the attribute's limit is
iffy, but is the only other arbitrary limit and could be used for disabling
support of custom attributes).
Note! This changes makes it very important to configure littlefs
correctly at format time. If littlefs is formatted on a PC without
changing the limits appropriately, it will be rejected by a smaller
device.
2018-04-01 20:36:29 +00:00
|
|
|
// check that name fits
|
|
|
|
lfs_size_t nlen = strlen(newpath);
|
|
|
|
if (nlen > lfs->name_size) {
|
|
|
|
return LFS_ERR_NAMETOOLONG;
|
|
|
|
}
|
|
|
|
|
2018-04-11 00:55:17 +00:00
|
|
|
if (oldentry.size - oldentry.d.nlen + nlen > lfs->cfg->block_size) {
|
|
|
|
return LFS_ERR_NOSPC;
|
|
|
|
}
|
|
|
|
|
2017-04-24 02:40:03 +00:00
|
|
|
// must have same type
|
|
|
|
if (prevexists && preventry.d.type != oldentry.d.type) {
|
2018-02-04 20:36:36 +00:00
|
|
|
return LFS_ERR_ISDIR;
|
2017-04-24 02:40:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
lfs_dir_t dir;
|
2018-03-03 16:26:06 +00:00
|
|
|
if (prevexists && (0xf & preventry.d.type) == LFS_TYPE_DIR) {
|
2017-04-24 02:40:03 +00:00
|
|
|
// must be empty before removal, checking size
|
|
|
|
// without masking top bit checks for any case where
|
|
|
|
// dir is not empty
|
2018-01-29 19:53:28 +00:00
|
|
|
err = lfs_dir_fetch(lfs, &dir, preventry.d.u.dir);
|
2017-04-24 02:40:03 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
2017-06-24 01:03:44 +00:00
|
|
|
} else if (dir.d.size != sizeof(dir.d)+4) {
|
2018-02-04 20:36:36 +00:00
|
|
|
return LFS_ERR_NOTEMPTY;
|
2017-04-24 02:40:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-07 21:56:00 +00:00
|
|
|
// mark as moving
|
2018-03-03 16:26:06 +00:00
|
|
|
oldentry.d.type |= LFS_STRUCT_MOVED;
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
err = lfs_dir_set(lfs, &oldcwd, &oldentry, (struct lfs_region[]){
|
2018-04-08 09:23:23 +00:00
|
|
|
{LFS_FROM_MEM, 0, 1, &oldentry.d.type, 1}}, 1);
|
2018-03-19 01:36:48 +00:00
|
|
|
oldentry.d.type &= ~LFS_STRUCT_MOVED;
|
2017-10-07 21:56:00 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
// update pair if newcwd == oldcwd
|
|
|
|
if (samepair) {
|
|
|
|
newcwd = oldcwd;
|
|
|
|
}
|
|
|
|
|
2017-04-24 02:40:03 +00:00
|
|
|
// move to new location
|
|
|
|
lfs_entry_t newentry = preventry;
|
|
|
|
newentry.d = oldentry.d;
|
2018-03-03 16:26:06 +00:00
|
|
|
newentry.d.type &= ~LFS_STRUCT_MOVED;
|
Added disk-backed limits on the name/attrs/inline sizes
Being a portable, microcontroller-scale embedded filesystem, littlefs is
presented with a relatively unique challenge. The amount of RAM
available is on completely different scales from machine to machine, and
what is normally a reasonable RAM assumption may break completely on an
embedded system.
A great example of this is file names. On almost every PC these days, the limit
for a file name is 255 bytes. It's a very convenient limit for a number
of reasons. However, on microcontrollers, allocating 255 bytes of RAM to
do a file search can be unreasonable.
The simplest solution (and one that has existing in littlefs for a
while), is to let this limit be redefined to a smaller value on devices
that need to save RAM. However, this presents an interesting portability
issue. If these devices are plugged into a PC with relatively infinite
RAM, nothing stops the PC from writing files with full 255-byte file
names, which can't be read on the small device.
One solution here is to store this limit on the superblock during format
time. When mounting a disk, the filesystem implementation is responsible for
checking this limit in the superblock. If it's larger than what can be
read, raise an error. If it's smaller, respect the limit on the
superblock and raise an error if the user attempts to exceed it.
In this commit, this strategy is adopted for file names, inline files,
and the size of all attributes, since these could impact the memory
consumption of the filesystem. (Recording the attribute's limit is
iffy, but is the only other arbitrary limit and could be used for disabling
support of custom attributes).
Note! This changes makes it very important to configure littlefs
correctly at format time. If littlefs is formatted on a PC without
changing the limits appropriately, it will be rejected by a smaller
device.
2018-04-01 20:36:29 +00:00
|
|
|
newentry.d.nlen = nlen;
|
2018-04-08 09:23:23 +00:00
|
|
|
newentry.size = prevexists ? preventry.size : 0;
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
|
|
|
|
lfs_size_t newsize = oldentry.size - oldentry.d.nlen + newentry.d.nlen;
|
|
|
|
err = lfs_dir_set(lfs, &newcwd, &newentry, (struct lfs_region[]){
|
2018-04-08 09:23:23 +00:00
|
|
|
{LFS_FROM_REGION, 0, prevexists ? preventry.size : 0,
|
|
|
|
&(struct lfs_region_region){
|
|
|
|
oldcwd.pair[0], oldentry.off, (struct lfs_region[]){
|
|
|
|
{LFS_FROM_MEM, 0, 4, &newentry.d, 4},
|
|
|
|
{LFS_FROM_MEM, newsize-nlen, 0, newpath, nlen}}, 2},
|
|
|
|
newsize}}, 1);
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
2017-04-24 02:40:03 +00:00
|
|
|
}
|
|
|
|
|
2017-10-07 21:56:00 +00:00
|
|
|
// update pair if newcwd == oldcwd
|
|
|
|
if (samepair) {
|
|
|
|
oldcwd = newcwd;
|
2017-04-24 02:40:03 +00:00
|
|
|
}
|
|
|
|
|
2017-10-07 21:56:00 +00:00
|
|
|
// remove old entry
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
err = lfs_dir_set(lfs, &oldcwd, &oldentry, (struct lfs_region[]){
|
2018-04-08 09:23:23 +00:00
|
|
|
{LFS_FROM_MEM, 0, oldentry.size, NULL, 0}}, 1);
|
2017-10-07 14:19:08 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-10-07 21:56:00 +00:00
|
|
|
// if we were a directory, find pred, replace tail
|
2018-03-03 16:26:06 +00:00
|
|
|
if (prevexists && (0xf & preventry.d.type) == LFS_TYPE_DIR) {
|
2017-10-07 21:56:00 +00:00
|
|
|
int res = lfs_pred(lfs, dir.pair, &newcwd);
|
|
|
|
if (res < 0) {
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2018-01-29 21:20:12 +00:00
|
|
|
LFS_ASSERT(res); // must have pred
|
2017-10-07 21:56:00 +00:00
|
|
|
newcwd.d.tail[0] = dir.d.tail[0];
|
|
|
|
newcwd.d.tail[1] = dir.d.tail[1];
|
|
|
|
|
2018-04-03 13:28:09 +00:00
|
|
|
err = lfs_dir_commit(lfs, &newcwd, NULL, 0);
|
2017-10-07 21:56:00 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-24 02:40:03 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
Added support for atomically committing custom attributes
Although it's simple and probably what most users expect, the previous
custom attributes API suffered from one problem: the inability to update
attributes atomically.
If we consider our timestamp use case, updating a file would require:
1. Update the file
2. Update the timestamp
If a power loss occurs during this sequence of updates, we could end up
with a file with an incorrect timestamp.
Is this a big deal? Probably not, but it could be a surprise only found
after a power-loss. And littlefs was developed with the _specifically_
to avoid suprises during power-loss.
The littlefs is perfectly capable of bundling multiple attribute updates
in a single directory commit. That's kind of what it was designed to do.
So all we need is a new committer opcode for list of attributes, and
then poking that list of attributes through the API.
We could provide the single-attribute functions, but don't, because the
fewer functions makes for a smaller codebase, and these are already the
more advanced functions so we can expect more from users. This also
changes semantics about what happens when we don't find an attribute,
since erroring would throw away all of the other attributes we're
processing.
To atomically commit both custom attributes and file updates, we need a
new API, lfs_file_setattr. Unfortunately the semantics are a bit more
confusing than lfs_setattr, since the attributes aren't written out
immediately.
2018-04-06 04:23:14 +00:00
|
|
|
int lfs_getattrs(lfs_t *lfs, const char *path,
|
|
|
|
const struct lfs_attr *attrs, int count) {
|
2018-04-06 00:03:58 +00:00
|
|
|
lfs_dir_t cwd;
|
|
|
|
int err = lfs_dir_fetch(lfs, &cwd, lfs->root);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
lfs_entry_t entry;
|
|
|
|
err = lfs_dir_find(lfs, &cwd, &entry, &path);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
Added support for atomically committing custom attributes
Although it's simple and probably what most users expect, the previous
custom attributes API suffered from one problem: the inability to update
attributes atomically.
If we consider our timestamp use case, updating a file would require:
1. Update the file
2. Update the timestamp
If a power loss occurs during this sequence of updates, we could end up
with a file with an incorrect timestamp.
Is this a big deal? Probably not, but it could be a surprise only found
after a power-loss. And littlefs was developed with the _specifically_
to avoid suprises during power-loss.
The littlefs is perfectly capable of bundling multiple attribute updates
in a single directory commit. That's kind of what it was designed to do.
So all we need is a new committer opcode for list of attributes, and
then poking that list of attributes through the API.
We could provide the single-attribute functions, but don't, because the
fewer functions makes for a smaller codebase, and these are already the
more advanced functions so we can expect more from users. This also
changes semantics about what happens when we don't find an attribute,
since erroring would throw away all of the other attributes we're
processing.
To atomically commit both custom attributes and file updates, we need a
new API, lfs_file_setattr. Unfortunately the semantics are a bit more
confusing than lfs_setattr, since the attributes aren't written out
immediately.
2018-04-06 04:23:14 +00:00
|
|
|
return lfs_dir_getattrs(lfs, &cwd, &entry, attrs, count);
|
2018-04-06 00:03:58 +00:00
|
|
|
}
|
|
|
|
|
Added support for atomically committing custom attributes
Although it's simple and probably what most users expect, the previous
custom attributes API suffered from one problem: the inability to update
attributes atomically.
If we consider our timestamp use case, updating a file would require:
1. Update the file
2. Update the timestamp
If a power loss occurs during this sequence of updates, we could end up
with a file with an incorrect timestamp.
Is this a big deal? Probably not, but it could be a surprise only found
after a power-loss. And littlefs was developed with the _specifically_
to avoid suprises during power-loss.
The littlefs is perfectly capable of bundling multiple attribute updates
in a single directory commit. That's kind of what it was designed to do.
So all we need is a new committer opcode for list of attributes, and
then poking that list of attributes through the API.
We could provide the single-attribute functions, but don't, because the
fewer functions makes for a smaller codebase, and these are already the
more advanced functions so we can expect more from users. This also
changes semantics about what happens when we don't find an attribute,
since erroring would throw away all of the other attributes we're
processing.
To atomically commit both custom attributes and file updates, we need a
new API, lfs_file_setattr. Unfortunately the semantics are a bit more
confusing than lfs_setattr, since the attributes aren't written out
immediately.
2018-04-06 04:23:14 +00:00
|
|
|
int lfs_setattrs(lfs_t *lfs, const char *path,
|
|
|
|
const struct lfs_attr *attrs, int count) {
|
2018-04-06 00:03:58 +00:00
|
|
|
lfs_dir_t cwd;
|
|
|
|
int err = lfs_dir_fetch(lfs, &cwd, lfs->root);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
lfs_entry_t entry;
|
|
|
|
err = lfs_dir_find(lfs, &cwd, &entry, &path);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
Added support for atomically committing custom attributes
Although it's simple and probably what most users expect, the previous
custom attributes API suffered from one problem: the inability to update
attributes atomically.
If we consider our timestamp use case, updating a file would require:
1. Update the file
2. Update the timestamp
If a power loss occurs during this sequence of updates, we could end up
with a file with an incorrect timestamp.
Is this a big deal? Probably not, but it could be a surprise only found
after a power-loss. And littlefs was developed with the _specifically_
to avoid suprises during power-loss.
The littlefs is perfectly capable of bundling multiple attribute updates
in a single directory commit. That's kind of what it was designed to do.
So all we need is a new committer opcode for list of attributes, and
then poking that list of attributes through the API.
We could provide the single-attribute functions, but don't, because the
fewer functions makes for a smaller codebase, and these are already the
more advanced functions so we can expect more from users. This also
changes semantics about what happens when we don't find an attribute,
since erroring would throw away all of the other attributes we're
processing.
To atomically commit both custom attributes and file updates, we need a
new API, lfs_file_setattr. Unfortunately the semantics are a bit more
confusing than lfs_setattr, since the attributes aren't written out
immediately.
2018-04-06 04:23:14 +00:00
|
|
|
return lfs_dir_setattrs(lfs, &cwd, &entry, attrs, count);
|
2018-04-06 00:03:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-04-24 02:40:03 +00:00
|
|
|
/// Filesystem operations ///
|
2017-04-22 18:30:40 +00:00
|
|
|
static int lfs_init(lfs_t *lfs, const struct lfs_config *cfg) {
|
|
|
|
lfs->cfg = cfg;
|
|
|
|
|
2017-04-22 19:56:12 +00:00
|
|
|
// setup read cache
|
2017-04-30 16:54:27 +00:00
|
|
|
lfs->rcache.block = 0xffffffff;
|
2017-04-22 18:30:40 +00:00
|
|
|
if (lfs->cfg->read_buffer) {
|
|
|
|
lfs->rcache.buffer = lfs->cfg->read_buffer;
|
|
|
|
} else {
|
2018-01-29 21:20:12 +00:00
|
|
|
lfs->rcache.buffer = lfs_malloc(lfs->cfg->read_size);
|
2017-04-22 18:30:40 +00:00
|
|
|
if (!lfs->rcache.buffer) {
|
2017-04-24 03:10:16 +00:00
|
|
|
return LFS_ERR_NOMEM;
|
2017-04-22 18:30:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-22 19:56:12 +00:00
|
|
|
// setup program cache
|
2017-04-30 16:54:27 +00:00
|
|
|
lfs->pcache.block = 0xffffffff;
|
2017-04-22 18:30:40 +00:00
|
|
|
if (lfs->cfg->prog_buffer) {
|
|
|
|
lfs->pcache.buffer = lfs->cfg->prog_buffer;
|
|
|
|
} else {
|
2018-01-29 21:20:12 +00:00
|
|
|
lfs->pcache.buffer = lfs_malloc(lfs->cfg->prog_size);
|
2017-04-22 18:30:40 +00:00
|
|
|
if (!lfs->pcache.buffer) {
|
2017-04-24 03:10:16 +00:00
|
|
|
return LFS_ERR_NOMEM;
|
2017-04-22 18:30:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-19 02:20:33 +00:00
|
|
|
// setup lookahead, round down to nearest 32-bits
|
2018-01-29 21:20:12 +00:00
|
|
|
LFS_ASSERT(lfs->cfg->lookahead % 32 == 0);
|
|
|
|
LFS_ASSERT(lfs->cfg->lookahead > 0);
|
2017-04-22 19:56:12 +00:00
|
|
|
if (lfs->cfg->lookahead_buffer) {
|
2017-09-19 02:20:33 +00:00
|
|
|
lfs->free.buffer = lfs->cfg->lookahead_buffer;
|
2017-04-22 19:56:12 +00:00
|
|
|
} else {
|
2018-01-29 21:20:12 +00:00
|
|
|
lfs->free.buffer = lfs_malloc(lfs->cfg->lookahead/8);
|
2017-09-19 02:20:33 +00:00
|
|
|
if (!lfs->free.buffer) {
|
2017-04-24 03:10:16 +00:00
|
|
|
return LFS_ERR_NOMEM;
|
2017-04-22 19:56:12 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-11 17:56:09 +00:00
|
|
|
// check that program and read sizes are multiples of the block size
|
2018-01-29 21:20:12 +00:00
|
|
|
LFS_ASSERT(lfs->cfg->prog_size % lfs->cfg->read_size == 0);
|
|
|
|
LFS_ASSERT(lfs->cfg->block_size % lfs->cfg->prog_size == 0);
|
2018-01-11 17:56:09 +00:00
|
|
|
|
2017-10-10 23:48:24 +00:00
|
|
|
// check that the block size is large enough to fit ctz pointers
|
2018-01-29 21:20:12 +00:00
|
|
|
LFS_ASSERT(4*lfs_npw2(0xffffffff / (lfs->cfg->block_size-2*4))
|
2017-10-10 23:48:24 +00:00
|
|
|
<= lfs->cfg->block_size);
|
|
|
|
|
Added disk-backed limits on the name/attrs/inline sizes
Being a portable, microcontroller-scale embedded filesystem, littlefs is
presented with a relatively unique challenge. The amount of RAM
available is on completely different scales from machine to machine, and
what is normally a reasonable RAM assumption may break completely on an
embedded system.
A great example of this is file names. On almost every PC these days, the limit
for a file name is 255 bytes. It's a very convenient limit for a number
of reasons. However, on microcontrollers, allocating 255 bytes of RAM to
do a file search can be unreasonable.
The simplest solution (and one that has existing in littlefs for a
while), is to let this limit be redefined to a smaller value on devices
that need to save RAM. However, this presents an interesting portability
issue. If these devices are plugged into a PC with relatively infinite
RAM, nothing stops the PC from writing files with full 255-byte file
names, which can't be read on the small device.
One solution here is to store this limit on the superblock during format
time. When mounting a disk, the filesystem implementation is responsible for
checking this limit in the superblock. If it's larger than what can be
read, raise an error. If it's smaller, respect the limit on the
superblock and raise an error if the user attempts to exceed it.
In this commit, this strategy is adopted for file names, inline files,
and the size of all attributes, since these could impact the memory
consumption of the filesystem. (Recording the attribute's limit is
iffy, but is the only other arbitrary limit and could be used for disabling
support of custom attributes).
Note! This changes makes it very important to configure littlefs
correctly at format time. If littlefs is formatted on a PC without
changing the limits appropriately, it will be rejected by a smaller
device.
2018-04-01 20:36:29 +00:00
|
|
|
// check that the size limits are sane
|
|
|
|
LFS_ASSERT(lfs->cfg->inline_size <= LFS_INLINE_MAX);
|
|
|
|
LFS_ASSERT(lfs->cfg->inline_size <= lfs->cfg->read_size);
|
|
|
|
lfs->inline_size = lfs->cfg->inline_size;
|
|
|
|
if (!lfs->inline_size) {
|
|
|
|
lfs->inline_size = lfs_min(LFS_INLINE_MAX, lfs->cfg->read_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
LFS_ASSERT(lfs->cfg->attrs_size <= LFS_ATTRS_MAX);
|
|
|
|
lfs->attrs_size = lfs->cfg->attrs_size;
|
|
|
|
if (!lfs->attrs_size) {
|
|
|
|
lfs->attrs_size = LFS_ATTRS_MAX;
|
|
|
|
}
|
|
|
|
|
|
|
|
LFS_ASSERT(lfs->cfg->name_size <= LFS_NAME_MAX);
|
|
|
|
lfs->name_size = lfs->cfg->name_size;
|
|
|
|
if (!lfs->name_size) {
|
|
|
|
lfs->name_size = LFS_NAME_MAX;
|
|
|
|
}
|
|
|
|
|
2017-05-14 17:01:45 +00:00
|
|
|
// setup default state
|
|
|
|
lfs->root[0] = 0xffffffff;
|
|
|
|
lfs->root[1] = 0xffffffff;
|
2017-04-29 15:22:01 +00:00
|
|
|
lfs->files = NULL;
|
2017-11-22 02:53:15 +00:00
|
|
|
lfs->dirs = NULL;
|
2017-05-14 17:01:45 +00:00
|
|
|
lfs->deorphaned = false;
|
2017-04-29 15:22:01 +00:00
|
|
|
|
2017-04-22 18:30:40 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int lfs_deinit(lfs_t *lfs) {
|
2017-05-14 17:01:45 +00:00
|
|
|
// free allocated memory
|
2017-04-22 18:30:40 +00:00
|
|
|
if (!lfs->cfg->read_buffer) {
|
2018-01-29 21:20:12 +00:00
|
|
|
lfs_free(lfs->rcache.buffer);
|
2017-04-22 18:30:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!lfs->cfg->prog_buffer) {
|
2018-01-29 21:20:12 +00:00
|
|
|
lfs_free(lfs->pcache.buffer);
|
2017-04-22 18:30:40 +00:00
|
|
|
}
|
|
|
|
|
2017-04-29 17:50:23 +00:00
|
|
|
if (!lfs->cfg->lookahead_buffer) {
|
2018-01-29 21:20:12 +00:00
|
|
|
lfs_free(lfs->free.buffer);
|
2017-04-29 17:50:23 +00:00
|
|
|
}
|
|
|
|
|
2017-04-22 18:30:40 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int lfs_format(lfs_t *lfs, const struct lfs_config *cfg) {
|
|
|
|
int err = lfs_init(lfs, cfg);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
2017-02-27 00:05:27 +00:00
|
|
|
|
2017-05-14 17:01:45 +00:00
|
|
|
// create free lookahead
|
2017-11-10 01:10:08 +00:00
|
|
|
memset(lfs->free.buffer, 0, lfs->cfg->lookahead/8);
|
2017-04-22 19:56:12 +00:00
|
|
|
lfs->free.off = 0;
|
2018-04-10 20:14:27 +00:00
|
|
|
lfs->free.size = lfs_min(lfs->cfg->lookahead, lfs->cfg->block_count);
|
|
|
|
lfs->free.i = 0;
|
2018-02-08 07:30:21 +00:00
|
|
|
lfs_alloc_ack(lfs);
|
2017-03-20 03:00:56 +00:00
|
|
|
|
2017-05-14 17:01:45 +00:00
|
|
|
// create superblock dir
|
2018-05-21 05:56:20 +00:00
|
|
|
lfs_dir_t_ dir;
|
2018-05-22 22:43:39 +00:00
|
|
|
err = lfs_dir_alloc_(lfs, &dir,
|
|
|
|
(const lfs_block_t[2]){0xffffffff, 0xffffffff});
|
2017-04-18 03:27:06 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-05-14 17:01:45 +00:00
|
|
|
// write root directory
|
2018-05-21 05:56:20 +00:00
|
|
|
lfs_dir_t_ root;
|
2018-05-22 22:43:39 +00:00
|
|
|
err = lfs_dir_alloc_(lfs, &root,
|
|
|
|
(const lfs_block_t[2]){0xffffffff, 0xffffffff});
|
2017-04-18 03:27:06 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
err = lfs_dir_commit_(lfs, &root, NULL, 0);
|
2017-03-25 21:20:31 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
2017-02-27 00:05:27 +00:00
|
|
|
}
|
2017-04-18 03:27:06 +00:00
|
|
|
|
2017-03-25 23:11:45 +00:00
|
|
|
lfs->root[0] = root.pair[0];
|
|
|
|
lfs->root[1] = root.pair[1];
|
2018-05-21 05:56:20 +00:00
|
|
|
dir.tail[0] = lfs->root[0];
|
|
|
|
dir.tail[1] = lfs->root[1];
|
2017-03-25 21:20:31 +00:00
|
|
|
|
Added disk-backed limits on the name/attrs/inline sizes
Being a portable, microcontroller-scale embedded filesystem, littlefs is
presented with a relatively unique challenge. The amount of RAM
available is on completely different scales from machine to machine, and
what is normally a reasonable RAM assumption may break completely on an
embedded system.
A great example of this is file names. On almost every PC these days, the limit
for a file name is 255 bytes. It's a very convenient limit for a number
of reasons. However, on microcontrollers, allocating 255 bytes of RAM to
do a file search can be unreasonable.
The simplest solution (and one that has existing in littlefs for a
while), is to let this limit be redefined to a smaller value on devices
that need to save RAM. However, this presents an interesting portability
issue. If these devices are plugged into a PC with relatively infinite
RAM, nothing stops the PC from writing files with full 255-byte file
names, which can't be read on the small device.
One solution here is to store this limit on the superblock during format
time. When mounting a disk, the filesystem implementation is responsible for
checking this limit in the superblock. If it's larger than what can be
read, raise an error. If it's smaller, respect the limit on the
superblock and raise an error if the user attempts to exceed it.
In this commit, this strategy is adopted for file names, inline files,
and the size of all attributes, since these could impact the memory
consumption of the filesystem. (Recording the attribute's limit is
iffy, but is the only other arbitrary limit and could be used for disabling
support of custom attributes).
Note! This changes makes it very important to configure littlefs
correctly at format time. If littlefs is formatted on a PC without
changing the limits appropriately, it will be rejected by a smaller
device.
2018-04-01 20:36:29 +00:00
|
|
|
// write one superblock
|
2018-05-21 05:56:20 +00:00
|
|
|
lfs_superblock_t_ superblock = {
|
|
|
|
.root[0] = lfs->root[0],
|
|
|
|
.root[1] = lfs->root[1],
|
|
|
|
.magic = {"littlefs"},
|
|
|
|
.version = LFS_DISK_VERSION,
|
|
|
|
|
|
|
|
.block_size = lfs->cfg->block_size,
|
|
|
|
.block_count = lfs->cfg->block_count,
|
|
|
|
.inline_size = lfs->cfg->inline_size,
|
|
|
|
.attrs_size = lfs->cfg->attrs_size,
|
|
|
|
.name_size = lfs->cfg->name_size,
|
|
|
|
};
|
|
|
|
|
|
|
|
dir.count += 1;
|
2018-05-22 22:43:39 +00:00
|
|
|
err = lfs_dir_commit_(lfs, &dir, (lfs_entry_t_[]){
|
|
|
|
{lfs_mktag(LFS_TYPE_SUPERBLOCK_ | LFS_STRUCT_DIR_, 0,
|
|
|
|
sizeof(superblock)), .u.buffer=&superblock}}, 1);
|
2018-03-23 21:11:36 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
2017-02-27 00:05:27 +00:00
|
|
|
}
|
|
|
|
|
2017-04-18 03:27:06 +00:00
|
|
|
// sanity check that fetch works
|
2018-05-22 22:43:39 +00:00
|
|
|
err = lfs_dir_fetch_(lfs, &dir, (const lfs_block_t[2]){0, 1});
|
2017-04-22 18:30:40 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return lfs_deinit(lfs);
|
2017-02-27 00:05:27 +00:00
|
|
|
}
|
|
|
|
|
2017-04-22 18:30:40 +00:00
|
|
|
int lfs_mount(lfs_t *lfs, const struct lfs_config *cfg) {
|
|
|
|
int err = lfs_init(lfs, cfg);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
2017-03-13 00:41:08 +00:00
|
|
|
|
2017-04-22 19:56:12 +00:00
|
|
|
// setup free lookahead
|
2018-02-08 07:30:21 +00:00
|
|
|
lfs->free.off = 0;
|
2018-04-10 20:14:27 +00:00
|
|
|
lfs->free.size = 0;
|
|
|
|
lfs->free.i = 0;
|
2018-02-08 07:30:21 +00:00
|
|
|
lfs_alloc_ack(lfs);
|
2017-04-22 19:56:12 +00:00
|
|
|
|
|
|
|
// load superblock
|
2018-05-21 05:56:20 +00:00
|
|
|
lfs_dir_t_ dir;
|
2018-05-22 22:43:39 +00:00
|
|
|
err = lfs_dir_fetch_(lfs, &dir, (const lfs_block_t[2]){0, 1});
|
Added disk-backed limits on the name/attrs/inline sizes
Being a portable, microcontroller-scale embedded filesystem, littlefs is
presented with a relatively unique challenge. The amount of RAM
available is on completely different scales from machine to machine, and
what is normally a reasonable RAM assumption may break completely on an
embedded system.
A great example of this is file names. On almost every PC these days, the limit
for a file name is 255 bytes. It's a very convenient limit for a number
of reasons. However, on microcontrollers, allocating 255 bytes of RAM to
do a file search can be unreasonable.
The simplest solution (and one that has existing in littlefs for a
while), is to let this limit be redefined to a smaller value on devices
that need to save RAM. However, this presents an interesting portability
issue. If these devices are plugged into a PC with relatively infinite
RAM, nothing stops the PC from writing files with full 255-byte file
names, which can't be read on the small device.
One solution here is to store this limit on the superblock during format
time. When mounting a disk, the filesystem implementation is responsible for
checking this limit in the superblock. If it's larger than what can be
read, raise an error. If it's smaller, respect the limit on the
superblock and raise an error if the user attempts to exceed it.
In this commit, this strategy is adopted for file names, inline files,
and the size of all attributes, since these could impact the memory
consumption of the filesystem. (Recording the attribute's limit is
iffy, but is the only other arbitrary limit and could be used for disabling
support of custom attributes).
Note! This changes makes it very important to configure littlefs
correctly at format time. If littlefs is formatted on a PC without
changing the limits appropriately, it will be rejected by a smaller
device.
2018-04-01 20:36:29 +00:00
|
|
|
if (err) {
|
|
|
|
if (err == LFS_ERR_CORRUPT) {
|
|
|
|
LFS_ERROR("Invalid superblock at %d %d", 0, 1);
|
|
|
|
}
|
2017-10-07 21:56:00 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
lfs_superblock_t_ superblock;
|
2018-05-22 22:43:39 +00:00
|
|
|
err = lfs_dir_getbuffer_(lfs, &dir,
|
|
|
|
0x7ffff000, lfs_mktag(LFS_TYPE_SUPERBLOCK_ | LFS_STRUCT_DIR_, 0,
|
|
|
|
sizeof(superblock)), &(lfs_entry_t_){.u.buffer=&superblock});
|
2018-05-21 05:56:20 +00:00
|
|
|
if (err && err != LFS_ERR_RANGE) {
|
Added disk-backed limits on the name/attrs/inline sizes
Being a portable, microcontroller-scale embedded filesystem, littlefs is
presented with a relatively unique challenge. The amount of RAM
available is on completely different scales from machine to machine, and
what is normally a reasonable RAM assumption may break completely on an
embedded system.
A great example of this is file names. On almost every PC these days, the limit
for a file name is 255 bytes. It's a very convenient limit for a number
of reasons. However, on microcontrollers, allocating 255 bytes of RAM to
do a file search can be unreasonable.
The simplest solution (and one that has existing in littlefs for a
while), is to let this limit be redefined to a smaller value on devices
that need to save RAM. However, this presents an interesting portability
issue. If these devices are plugged into a PC with relatively infinite
RAM, nothing stops the PC from writing files with full 255-byte file
names, which can't be read on the small device.
One solution here is to store this limit on the superblock during format
time. When mounting a disk, the filesystem implementation is responsible for
checking this limit in the superblock. If it's larger than what can be
read, raise an error. If it's smaller, respect the limit on the
superblock and raise an error if the user attempts to exceed it.
In this commit, this strategy is adopted for file names, inline files,
and the size of all attributes, since these could impact the memory
consumption of the filesystem. (Recording the attribute's limit is
iffy, but is the only other arbitrary limit and could be used for disabling
support of custom attributes).
Note! This changes makes it very important to configure littlefs
correctly at format time. If littlefs is formatted on a PC without
changing the limits appropriately, it will be rejected by a smaller
device.
2018-04-01 20:36:29 +00:00
|
|
|
return err;
|
|
|
|
}
|
2018-03-23 21:11:36 +00:00
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
if (memcmp(superblock.magic, "littlefs", 8) != 0) {
|
Added disk-backed limits on the name/attrs/inline sizes
Being a portable, microcontroller-scale embedded filesystem, littlefs is
presented with a relatively unique challenge. The amount of RAM
available is on completely different scales from machine to machine, and
what is normally a reasonable RAM assumption may break completely on an
embedded system.
A great example of this is file names. On almost every PC these days, the limit
for a file name is 255 bytes. It's a very convenient limit for a number
of reasons. However, on microcontrollers, allocating 255 bytes of RAM to
do a file search can be unreasonable.
The simplest solution (and one that has existing in littlefs for a
while), is to let this limit be redefined to a smaller value on devices
that need to save RAM. However, this presents an interesting portability
issue. If these devices are plugged into a PC with relatively infinite
RAM, nothing stops the PC from writing files with full 255-byte file
names, which can't be read on the small device.
One solution here is to store this limit on the superblock during format
time. When mounting a disk, the filesystem implementation is responsible for
checking this limit in the superblock. If it's larger than what can be
read, raise an error. If it's smaller, respect the limit on the
superblock and raise an error if the user attempts to exceed it.
In this commit, this strategy is adopted for file names, inline files,
and the size of all attributes, since these could impact the memory
consumption of the filesystem. (Recording the attribute's limit is
iffy, but is the only other arbitrary limit and could be used for disabling
support of custom attributes).
Note! This changes makes it very important to configure littlefs
correctly at format time. If littlefs is formatted on a PC without
changing the limits appropriately, it will be rejected by a smaller
device.
2018-04-01 20:36:29 +00:00
|
|
|
LFS_ERROR("Invalid superblock at %d %d", 0, 1);
|
2017-04-24 03:10:16 +00:00
|
|
|
return LFS_ERR_CORRUPT;
|
2017-03-13 00:41:08 +00:00
|
|
|
}
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
uint16_t major_version = (0xffff & (superblock.version >> 16));
|
|
|
|
uint16_t minor_version = (0xffff & (superblock.version >> 0));
|
2018-01-26 20:26:25 +00:00
|
|
|
if ((major_version != LFS_DISK_VERSION_MAJOR ||
|
|
|
|
minor_version > LFS_DISK_VERSION_MINOR)) {
|
|
|
|
LFS_ERROR("Invalid version %d.%d", major_version, minor_version);
|
2017-04-24 03:10:16 +00:00
|
|
|
return LFS_ERR_INVAL;
|
2017-04-18 03:27:06 +00:00
|
|
|
}
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
if (superblock.inline_size) {
|
|
|
|
if (superblock.inline_size > lfs->inline_size) {
|
Added disk-backed limits on the name/attrs/inline sizes
Being a portable, microcontroller-scale embedded filesystem, littlefs is
presented with a relatively unique challenge. The amount of RAM
available is on completely different scales from machine to machine, and
what is normally a reasonable RAM assumption may break completely on an
embedded system.
A great example of this is file names. On almost every PC these days, the limit
for a file name is 255 bytes. It's a very convenient limit for a number
of reasons. However, on microcontrollers, allocating 255 bytes of RAM to
do a file search can be unreasonable.
The simplest solution (and one that has existing in littlefs for a
while), is to let this limit be redefined to a smaller value on devices
that need to save RAM. However, this presents an interesting portability
issue. If these devices are plugged into a PC with relatively infinite
RAM, nothing stops the PC from writing files with full 255-byte file
names, which can't be read on the small device.
One solution here is to store this limit on the superblock during format
time. When mounting a disk, the filesystem implementation is responsible for
checking this limit in the superblock. If it's larger than what can be
read, raise an error. If it's smaller, respect the limit on the
superblock and raise an error if the user attempts to exceed it.
In this commit, this strategy is adopted for file names, inline files,
and the size of all attributes, since these could impact the memory
consumption of the filesystem. (Recording the attribute's limit is
iffy, but is the only other arbitrary limit and could be used for disabling
support of custom attributes).
Note! This changes makes it very important to configure littlefs
correctly at format time. If littlefs is formatted on a PC without
changing the limits appropriately, it will be rejected by a smaller
device.
2018-04-01 20:36:29 +00:00
|
|
|
LFS_ERROR("Unsupported inline size (%d > %d)",
|
2018-05-21 05:56:20 +00:00
|
|
|
superblock.inline_size, lfs->inline_size);
|
Added disk-backed limits on the name/attrs/inline sizes
Being a portable, microcontroller-scale embedded filesystem, littlefs is
presented with a relatively unique challenge. The amount of RAM
available is on completely different scales from machine to machine, and
what is normally a reasonable RAM assumption may break completely on an
embedded system.
A great example of this is file names. On almost every PC these days, the limit
for a file name is 255 bytes. It's a very convenient limit for a number
of reasons. However, on microcontrollers, allocating 255 bytes of RAM to
do a file search can be unreasonable.
The simplest solution (and one that has existing in littlefs for a
while), is to let this limit be redefined to a smaller value on devices
that need to save RAM. However, this presents an interesting portability
issue. If these devices are plugged into a PC with relatively infinite
RAM, nothing stops the PC from writing files with full 255-byte file
names, which can't be read on the small device.
One solution here is to store this limit on the superblock during format
time. When mounting a disk, the filesystem implementation is responsible for
checking this limit in the superblock. If it's larger than what can be
read, raise an error. If it's smaller, respect the limit on the
superblock and raise an error if the user attempts to exceed it.
In this commit, this strategy is adopted for file names, inline files,
and the size of all attributes, since these could impact the memory
consumption of the filesystem. (Recording the attribute's limit is
iffy, but is the only other arbitrary limit and could be used for disabling
support of custom attributes).
Note! This changes makes it very important to configure littlefs
correctly at format time. If littlefs is formatted on a PC without
changing the limits appropriately, it will be rejected by a smaller
device.
2018-04-01 20:36:29 +00:00
|
|
|
return LFS_ERR_INVAL;
|
|
|
|
}
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
lfs->inline_size = superblock.inline_size;
|
Added disk-backed limits on the name/attrs/inline sizes
Being a portable, microcontroller-scale embedded filesystem, littlefs is
presented with a relatively unique challenge. The amount of RAM
available is on completely different scales from machine to machine, and
what is normally a reasonable RAM assumption may break completely on an
embedded system.
A great example of this is file names. On almost every PC these days, the limit
for a file name is 255 bytes. It's a very convenient limit for a number
of reasons. However, on microcontrollers, allocating 255 bytes of RAM to
do a file search can be unreasonable.
The simplest solution (and one that has existing in littlefs for a
while), is to let this limit be redefined to a smaller value on devices
that need to save RAM. However, this presents an interesting portability
issue. If these devices are plugged into a PC with relatively infinite
RAM, nothing stops the PC from writing files with full 255-byte file
names, which can't be read on the small device.
One solution here is to store this limit on the superblock during format
time. When mounting a disk, the filesystem implementation is responsible for
checking this limit in the superblock. If it's larger than what can be
read, raise an error. If it's smaller, respect the limit on the
superblock and raise an error if the user attempts to exceed it.
In this commit, this strategy is adopted for file names, inline files,
and the size of all attributes, since these could impact the memory
consumption of the filesystem. (Recording the attribute's limit is
iffy, but is the only other arbitrary limit and could be used for disabling
support of custom attributes).
Note! This changes makes it very important to configure littlefs
correctly at format time. If littlefs is formatted on a PC without
changing the limits appropriately, it will be rejected by a smaller
device.
2018-04-01 20:36:29 +00:00
|
|
|
}
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
if (superblock.attrs_size) {
|
|
|
|
if (superblock.attrs_size > lfs->attrs_size) {
|
Added disk-backed limits on the name/attrs/inline sizes
Being a portable, microcontroller-scale embedded filesystem, littlefs is
presented with a relatively unique challenge. The amount of RAM
available is on completely different scales from machine to machine, and
what is normally a reasonable RAM assumption may break completely on an
embedded system.
A great example of this is file names. On almost every PC these days, the limit
for a file name is 255 bytes. It's a very convenient limit for a number
of reasons. However, on microcontrollers, allocating 255 bytes of RAM to
do a file search can be unreasonable.
The simplest solution (and one that has existing in littlefs for a
while), is to let this limit be redefined to a smaller value on devices
that need to save RAM. However, this presents an interesting portability
issue. If these devices are plugged into a PC with relatively infinite
RAM, nothing stops the PC from writing files with full 255-byte file
names, which can't be read on the small device.
One solution here is to store this limit on the superblock during format
time. When mounting a disk, the filesystem implementation is responsible for
checking this limit in the superblock. If it's larger than what can be
read, raise an error. If it's smaller, respect the limit on the
superblock and raise an error if the user attempts to exceed it.
In this commit, this strategy is adopted for file names, inline files,
and the size of all attributes, since these could impact the memory
consumption of the filesystem. (Recording the attribute's limit is
iffy, but is the only other arbitrary limit and could be used for disabling
support of custom attributes).
Note! This changes makes it very important to configure littlefs
correctly at format time. If littlefs is formatted on a PC without
changing the limits appropriately, it will be rejected by a smaller
device.
2018-04-01 20:36:29 +00:00
|
|
|
LFS_ERROR("Unsupported attrs size (%d > %d)",
|
2018-05-21 05:56:20 +00:00
|
|
|
superblock.attrs_size, lfs->attrs_size);
|
Added disk-backed limits on the name/attrs/inline sizes
Being a portable, microcontroller-scale embedded filesystem, littlefs is
presented with a relatively unique challenge. The amount of RAM
available is on completely different scales from machine to machine, and
what is normally a reasonable RAM assumption may break completely on an
embedded system.
A great example of this is file names. On almost every PC these days, the limit
for a file name is 255 bytes. It's a very convenient limit for a number
of reasons. However, on microcontrollers, allocating 255 bytes of RAM to
do a file search can be unreasonable.
The simplest solution (and one that has existing in littlefs for a
while), is to let this limit be redefined to a smaller value on devices
that need to save RAM. However, this presents an interesting portability
issue. If these devices are plugged into a PC with relatively infinite
RAM, nothing stops the PC from writing files with full 255-byte file
names, which can't be read on the small device.
One solution here is to store this limit on the superblock during format
time. When mounting a disk, the filesystem implementation is responsible for
checking this limit in the superblock. If it's larger than what can be
read, raise an error. If it's smaller, respect the limit on the
superblock and raise an error if the user attempts to exceed it.
In this commit, this strategy is adopted for file names, inline files,
and the size of all attributes, since these could impact the memory
consumption of the filesystem. (Recording the attribute's limit is
iffy, but is the only other arbitrary limit and could be used for disabling
support of custom attributes).
Note! This changes makes it very important to configure littlefs
correctly at format time. If littlefs is formatted on a PC without
changing the limits appropriately, it will be rejected by a smaller
device.
2018-04-01 20:36:29 +00:00
|
|
|
return LFS_ERR_INVAL;
|
|
|
|
}
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
lfs->attrs_size = superblock.attrs_size;
|
Added disk-backed limits on the name/attrs/inline sizes
Being a portable, microcontroller-scale embedded filesystem, littlefs is
presented with a relatively unique challenge. The amount of RAM
available is on completely different scales from machine to machine, and
what is normally a reasonable RAM assumption may break completely on an
embedded system.
A great example of this is file names. On almost every PC these days, the limit
for a file name is 255 bytes. It's a very convenient limit for a number
of reasons. However, on microcontrollers, allocating 255 bytes of RAM to
do a file search can be unreasonable.
The simplest solution (and one that has existing in littlefs for a
while), is to let this limit be redefined to a smaller value on devices
that need to save RAM. However, this presents an interesting portability
issue. If these devices are plugged into a PC with relatively infinite
RAM, nothing stops the PC from writing files with full 255-byte file
names, which can't be read on the small device.
One solution here is to store this limit on the superblock during format
time. When mounting a disk, the filesystem implementation is responsible for
checking this limit in the superblock. If it's larger than what can be
read, raise an error. If it's smaller, respect the limit on the
superblock and raise an error if the user attempts to exceed it.
In this commit, this strategy is adopted for file names, inline files,
and the size of all attributes, since these could impact the memory
consumption of the filesystem. (Recording the attribute's limit is
iffy, but is the only other arbitrary limit and could be used for disabling
support of custom attributes).
Note! This changes makes it very important to configure littlefs
correctly at format time. If littlefs is formatted on a PC without
changing the limits appropriately, it will be rejected by a smaller
device.
2018-04-01 20:36:29 +00:00
|
|
|
}
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
if (superblock.name_size) {
|
|
|
|
if (superblock.name_size > lfs->name_size) {
|
Added disk-backed limits on the name/attrs/inline sizes
Being a portable, microcontroller-scale embedded filesystem, littlefs is
presented with a relatively unique challenge. The amount of RAM
available is on completely different scales from machine to machine, and
what is normally a reasonable RAM assumption may break completely on an
embedded system.
A great example of this is file names. On almost every PC these days, the limit
for a file name is 255 bytes. It's a very convenient limit for a number
of reasons. However, on microcontrollers, allocating 255 bytes of RAM to
do a file search can be unreasonable.
The simplest solution (and one that has existing in littlefs for a
while), is to let this limit be redefined to a smaller value on devices
that need to save RAM. However, this presents an interesting portability
issue. If these devices are plugged into a PC with relatively infinite
RAM, nothing stops the PC from writing files with full 255-byte file
names, which can't be read on the small device.
One solution here is to store this limit on the superblock during format
time. When mounting a disk, the filesystem implementation is responsible for
checking this limit in the superblock. If it's larger than what can be
read, raise an error. If it's smaller, respect the limit on the
superblock and raise an error if the user attempts to exceed it.
In this commit, this strategy is adopted for file names, inline files,
and the size of all attributes, since these could impact the memory
consumption of the filesystem. (Recording the attribute's limit is
iffy, but is the only other arbitrary limit and could be used for disabling
support of custom attributes).
Note! This changes makes it very important to configure littlefs
correctly at format time. If littlefs is formatted on a PC without
changing the limits appropriately, it will be rejected by a smaller
device.
2018-04-01 20:36:29 +00:00
|
|
|
LFS_ERROR("Unsupported name size (%d > %d)",
|
2018-05-21 05:56:20 +00:00
|
|
|
superblock.name_size, lfs->name_size);
|
Added disk-backed limits on the name/attrs/inline sizes
Being a portable, microcontroller-scale embedded filesystem, littlefs is
presented with a relatively unique challenge. The amount of RAM
available is on completely different scales from machine to machine, and
what is normally a reasonable RAM assumption may break completely on an
embedded system.
A great example of this is file names. On almost every PC these days, the limit
for a file name is 255 bytes. It's a very convenient limit for a number
of reasons. However, on microcontrollers, allocating 255 bytes of RAM to
do a file search can be unreasonable.
The simplest solution (and one that has existing in littlefs for a
while), is to let this limit be redefined to a smaller value on devices
that need to save RAM. However, this presents an interesting portability
issue. If these devices are plugged into a PC with relatively infinite
RAM, nothing stops the PC from writing files with full 255-byte file
names, which can't be read on the small device.
One solution here is to store this limit on the superblock during format
time. When mounting a disk, the filesystem implementation is responsible for
checking this limit in the superblock. If it's larger than what can be
read, raise an error. If it's smaller, respect the limit on the
superblock and raise an error if the user attempts to exceed it.
In this commit, this strategy is adopted for file names, inline files,
and the size of all attributes, since these could impact the memory
consumption of the filesystem. (Recording the attribute's limit is
iffy, but is the only other arbitrary limit and could be used for disabling
support of custom attributes).
Note! This changes makes it very important to configure littlefs
correctly at format time. If littlefs is formatted on a PC without
changing the limits appropriately, it will be rejected by a smaller
device.
2018-04-01 20:36:29 +00:00
|
|
|
return LFS_ERR_INVAL;
|
|
|
|
}
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
lfs->name_size = superblock.name_size;
|
Added disk-backed limits on the name/attrs/inline sizes
Being a portable, microcontroller-scale embedded filesystem, littlefs is
presented with a relatively unique challenge. The amount of RAM
available is on completely different scales from machine to machine, and
what is normally a reasonable RAM assumption may break completely on an
embedded system.
A great example of this is file names. On almost every PC these days, the limit
for a file name is 255 bytes. It's a very convenient limit for a number
of reasons. However, on microcontrollers, allocating 255 bytes of RAM to
do a file search can be unreasonable.
The simplest solution (and one that has existing in littlefs for a
while), is to let this limit be redefined to a smaller value on devices
that need to save RAM. However, this presents an interesting portability
issue. If these devices are plugged into a PC with relatively infinite
RAM, nothing stops the PC from writing files with full 255-byte file
names, which can't be read on the small device.
One solution here is to store this limit on the superblock during format
time. When mounting a disk, the filesystem implementation is responsible for
checking this limit in the superblock. If it's larger than what can be
read, raise an error. If it's smaller, respect the limit on the
superblock and raise an error if the user attempts to exceed it.
In this commit, this strategy is adopted for file names, inline files,
and the size of all attributes, since these could impact the memory
consumption of the filesystem. (Recording the attribute's limit is
iffy, but is the only other arbitrary limit and could be used for disabling
support of custom attributes).
Note! This changes makes it very important to configure littlefs
correctly at format time. If littlefs is formatted on a PC without
changing the limits appropriately, it will be rejected by a smaller
device.
2018-04-01 20:36:29 +00:00
|
|
|
}
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
lfs->root[0] = superblock.root[0];
|
|
|
|
lfs->root[1] = superblock.root[1];
|
Added disk-backed limits on the name/attrs/inline sizes
Being a portable, microcontroller-scale embedded filesystem, littlefs is
presented with a relatively unique challenge. The amount of RAM
available is on completely different scales from machine to machine, and
what is normally a reasonable RAM assumption may break completely on an
embedded system.
A great example of this is file names. On almost every PC these days, the limit
for a file name is 255 bytes. It's a very convenient limit for a number
of reasons. However, on microcontrollers, allocating 255 bytes of RAM to
do a file search can be unreasonable.
The simplest solution (and one that has existing in littlefs for a
while), is to let this limit be redefined to a smaller value on devices
that need to save RAM. However, this presents an interesting portability
issue. If these devices are plugged into a PC with relatively infinite
RAM, nothing stops the PC from writing files with full 255-byte file
names, which can't be read on the small device.
One solution here is to store this limit on the superblock during format
time. When mounting a disk, the filesystem implementation is responsible for
checking this limit in the superblock. If it's larger than what can be
read, raise an error. If it's smaller, respect the limit on the
superblock and raise an error if the user attempts to exceed it.
In this commit, this strategy is adopted for file names, inline files,
and the size of all attributes, since these could impact the memory
consumption of the filesystem. (Recording the attribute's limit is
iffy, but is the only other arbitrary limit and could be used for disabling
support of custom attributes).
Note! This changes makes it very important to configure littlefs
correctly at format time. If littlefs is formatted on a PC without
changing the limits appropriately, it will be rejected by a smaller
device.
2018-04-01 20:36:29 +00:00
|
|
|
|
2017-10-07 21:56:00 +00:00
|
|
|
return 0;
|
2017-04-01 15:44:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int lfs_unmount(lfs_t *lfs) {
|
2017-04-22 18:30:40 +00:00
|
|
|
return lfs_deinit(lfs);
|
2017-04-01 15:44:17 +00:00
|
|
|
}
|
|
|
|
|
2017-04-24 02:40:03 +00:00
|
|
|
|
2018-04-08 21:58:12 +00:00
|
|
|
/// Internal filesystem filesystem operations ///
|
2018-05-21 05:56:20 +00:00
|
|
|
int lfs_traverse_(lfs_t *lfs,
|
|
|
|
int (*cb)(lfs_t *lfs, void *data, lfs_block_t block), void *data) {
|
|
|
|
if (lfs_pairisnull(lfs->root)) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// iterate over metadata pairs
|
|
|
|
lfs_dir_t_ dir = {.tail = {0, 1}};
|
|
|
|
while (!lfs_pairisnull(dir.tail)) {
|
|
|
|
for (int i = 0; i < 2; i++) {
|
|
|
|
int err = cb(lfs, data, dir.tail[i]);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// iterate through ids in directory
|
2018-05-22 22:43:39 +00:00
|
|
|
int err = lfs_dir_fetch_(lfs, &dir, dir.tail);
|
2018-05-21 05:56:20 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (int i = 0; i < dir.count; i++) {
|
2018-05-22 22:43:39 +00:00
|
|
|
lfs_entry_t_ entry;
|
|
|
|
int err = lfs_dir_getentry_(lfs, &dir,
|
|
|
|
0x701ff000, lfs_mktag(LFS_TYPE_REG_, i, 8), &entry);
|
2018-05-21 05:56:20 +00:00
|
|
|
if (err) {
|
|
|
|
if (err == LFS_ERR_NOENT) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (lfs_tag_struct(entry.tag) == LFS_STRUCT_CTZ_) {
|
|
|
|
// TODO
|
|
|
|
// err = lfs_ctz_traverse(lfs, &lfs->rcache, NULL,
|
|
|
|
// entry.d.u.file.head, entry.d.u.file.size, cb, data);
|
|
|
|
// if (err) {
|
|
|
|
// return err;
|
|
|
|
// }
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// iterate over any open files
|
|
|
|
for (lfs_file_t *f = lfs->files; f; f = f->next) {
|
|
|
|
if ((f->flags & LFS_F_DIRTY) && !(f->flags & LFS_F_INLINE)) {
|
|
|
|
// int err = lfs_ctz_traverse(lfs, &lfs->rcache, &f->cache,
|
|
|
|
// f->head, f->size, cb, data);
|
|
|
|
// if (err) {
|
|
|
|
// return err;
|
|
|
|
// }
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((f->flags & LFS_F_WRITING) && !(f->flags & LFS_F_INLINE)) {
|
|
|
|
// int err = lfs_ctz_traverse(lfs, &lfs->rcache, &f->cache,
|
|
|
|
// f->block, f->pos, cb, data);
|
|
|
|
// if (err) {
|
|
|
|
// return err;
|
|
|
|
// }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-04-14 22:33:36 +00:00
|
|
|
int lfs_traverse(lfs_t *lfs, int (*cb)(void*, lfs_block_t), void *data) {
|
2017-05-14 17:01:45 +00:00
|
|
|
if (lfs_pairisnull(lfs->root)) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-04-01 15:44:17 +00:00
|
|
|
// iterate over metadata pairs
|
|
|
|
lfs_block_t cwd[2] = {0, 1};
|
|
|
|
|
|
|
|
while (true) {
|
|
|
|
for (int i = 0; i < 2; i++) {
|
|
|
|
int err = cb(data, cwd[i]);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-16 01:58:29 +00:00
|
|
|
lfs_dir_t dir;
|
2017-04-01 15:44:17 +00:00
|
|
|
int err = lfs_dir_fetch(lfs, &dir, cwd);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-04-01 17:23:15 +00:00
|
|
|
// iterate over contents
|
2018-03-16 01:58:29 +00:00
|
|
|
lfs_entry_t entry;
|
2017-06-24 01:03:44 +00:00
|
|
|
while (dir.off + sizeof(entry.d) <= (0x7fffffff & dir.d.size)-4) {
|
2018-03-23 23:35:55 +00:00
|
|
|
err = lfs_dir_get(lfs, &dir,
|
|
|
|
dir.off, &entry.d, sizeof(entry.d));
|
2018-02-02 11:58:43 +00:00
|
|
|
lfs_entry_fromle32(&entry.d);
|
2017-04-01 15:44:17 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-04-03 13:28:09 +00:00
|
|
|
dir.off += lfs_entry_size(&entry);
|
2018-03-03 16:26:06 +00:00
|
|
|
if ((0x70 & entry.d.type) == LFS_STRUCT_CTZ) {
|
2018-01-29 19:53:28 +00:00
|
|
|
err = lfs_ctz_traverse(lfs, &lfs->rcache, NULL,
|
2017-04-29 15:22:01 +00:00
|
|
|
entry.d.u.file.head, entry.d.u.file.size, cb, data);
|
2017-04-24 04:39:50 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
2017-04-01 15:44:17 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
cwd[0] = dir.d.tail[0];
|
|
|
|
cwd[1] = dir.d.tail[1];
|
|
|
|
|
2017-04-29 15:22:01 +00:00
|
|
|
if (lfs_pairisnull(cwd)) {
|
|
|
|
break;
|
2017-04-01 15:44:17 +00:00
|
|
|
}
|
|
|
|
}
|
2017-04-29 15:22:01 +00:00
|
|
|
|
|
|
|
// iterate over any open files
|
|
|
|
for (lfs_file_t *f = lfs->files; f; f = f->next) {
|
2018-03-19 01:36:48 +00:00
|
|
|
if ((f->flags & LFS_F_DIRTY) && !(f->flags & LFS_F_INLINE)) {
|
2017-10-18 05:41:43 +00:00
|
|
|
int err = lfs_ctz_traverse(lfs, &lfs->rcache, &f->cache,
|
2017-04-30 16:19:37 +00:00
|
|
|
f->head, f->size, cb, data);
|
2017-04-29 15:22:01 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-19 01:36:48 +00:00
|
|
|
if ((f->flags & LFS_F_WRITING) && !(f->flags & LFS_F_INLINE)) {
|
2017-10-18 05:41:43 +00:00
|
|
|
int err = lfs_ctz_traverse(lfs, &lfs->rcache, &f->cache,
|
2017-04-30 16:19:37 +00:00
|
|
|
f->block, f->pos, cb, data);
|
2017-04-29 15:22:01 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-02-02 11:58:43 +00:00
|
|
|
|
2017-04-29 15:22:01 +00:00
|
|
|
return 0;
|
2017-04-01 15:44:17 +00:00
|
|
|
}
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
static int lfs_pred_(lfs_t *lfs, const lfs_block_t pair[2], lfs_dir_t_ *pdir) {
|
|
|
|
pdir->tail[0] = 0;
|
|
|
|
pdir->tail[1] = 1;
|
|
|
|
|
|
|
|
// iterate over all directory directory entries
|
|
|
|
while (!lfs_pairisnull(pdir->tail)) {
|
|
|
|
if (lfs_paircmp(pdir->tail, pair) == 0) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-05-22 22:43:39 +00:00
|
|
|
int err = lfs_dir_fetch_(lfs, pdir, pdir->tail);
|
2018-05-21 05:56:20 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-05-14 17:01:45 +00:00
|
|
|
static int lfs_pred(lfs_t *lfs, const lfs_block_t dir[2], lfs_dir_t *pdir) {
|
|
|
|
if (lfs_pairisnull(lfs->root)) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
// iterate directories
|
2017-05-14 17:01:45 +00:00
|
|
|
int err = lfs_dir_fetch(lfs, pdir, (const lfs_block_t[2]){0, 1});
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
2017-04-01 17:23:15 +00:00
|
|
|
|
2017-05-14 17:01:45 +00:00
|
|
|
while (!lfs_pairisnull(pdir->d.tail)) {
|
|
|
|
if (lfs_paircmp(pdir->d.tail, dir) == 0) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-01-29 19:53:28 +00:00
|
|
|
err = lfs_dir_fetch(lfs, pdir, pdir->d.tail);
|
2017-05-14 17:01:45 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
static int lfs_parent_(lfs_t *lfs, const lfs_block_t pair[2],
|
|
|
|
lfs_dir_t_ *parent, lfs_entry_t_ *entry) {
|
|
|
|
parent->tail[0] = 0;
|
|
|
|
parent->tail[1] = 1;
|
|
|
|
|
|
|
|
// iterate over all directory directory entries
|
|
|
|
while (!lfs_pairisnull(parent->tail)) {
|
2018-05-22 22:43:39 +00:00
|
|
|
int err = lfs_dir_fetch_(lfs, parent, parent->tail);
|
2018-05-21 05:56:20 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (int i = 0; i < parent->count; i++) {
|
2018-05-22 22:43:39 +00:00
|
|
|
int err = lfs_dir_getentry_(lfs, parent,
|
|
|
|
0x43dff000, lfs_mktag(LFS_STRUCT_DIR_, i, 8), entry);
|
2018-05-21 05:56:20 +00:00
|
|
|
if (err && err != LFS_ERR_RANGE) {
|
|
|
|
if (err == LFS_ERR_NOENT) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (lfs_paircmp(entry->u.pair, pair) == 0) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-05-14 17:01:45 +00:00
|
|
|
static int lfs_parent(lfs_t *lfs, const lfs_block_t dir[2],
|
|
|
|
lfs_dir_t *parent, lfs_entry_t *entry) {
|
|
|
|
if (lfs_pairisnull(lfs->root)) {
|
|
|
|
return 0;
|
|
|
|
}
|
2018-02-02 11:58:43 +00:00
|
|
|
|
2017-05-14 17:01:45 +00:00
|
|
|
parent->d.tail[0] = 0;
|
|
|
|
parent->d.tail[1] = 1;
|
|
|
|
|
|
|
|
// iterate over all directory directory entries
|
|
|
|
while (!lfs_pairisnull(parent->d.tail)) {
|
|
|
|
int err = lfs_dir_fetch(lfs, parent, parent->d.tail);
|
2017-04-01 17:23:15 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-04-14 22:33:36 +00:00
|
|
|
while (true) {
|
2018-01-29 19:53:28 +00:00
|
|
|
err = lfs_dir_next(lfs, parent, entry);
|
2017-04-24 03:10:16 +00:00
|
|
|
if (err && err != LFS_ERR_NOENT) {
|
2017-04-14 22:33:36 +00:00
|
|
|
return err;
|
|
|
|
}
|
2017-04-01 17:23:15 +00:00
|
|
|
|
2017-04-24 03:10:16 +00:00
|
|
|
if (err == LFS_ERR_NOENT) {
|
2017-04-14 22:33:36 +00:00
|
|
|
break;
|
|
|
|
}
|
2017-04-01 17:23:15 +00:00
|
|
|
|
2018-03-03 16:26:06 +00:00
|
|
|
if (((0x70 & entry->d.type) == LFS_STRUCT_DIR) &&
|
2017-05-14 17:01:45 +00:00
|
|
|
lfs_paircmp(entry->d.u.dir, dir) == 0) {
|
2017-04-14 22:33:36 +00:00
|
|
|
return true;
|
2017-04-01 17:23:15 +00:00
|
|
|
}
|
|
|
|
}
|
2017-05-14 17:01:45 +00:00
|
|
|
}
|
2017-04-01 17:23:15 +00:00
|
|
|
|
2017-05-14 17:01:45 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-05-21 05:56:20 +00:00
|
|
|
static int lfs_moved_(lfs_t *lfs, const lfs_block_t pair[2]) {
|
|
|
|
// skip superblock
|
|
|
|
lfs_dir_t_ dir;
|
2018-05-22 22:43:39 +00:00
|
|
|
int err = lfs_dir_fetch_(lfs, &dir, (const lfs_block_t[2]){0, 1});
|
2018-05-21 05:56:20 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
// iterate over all directory directory entries
|
|
|
|
while (!lfs_pairisnull(dir.tail)) {
|
2018-05-22 22:43:39 +00:00
|
|
|
int err = lfs_dir_fetch_(lfs, &dir, dir.tail);
|
2018-05-21 05:56:20 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (int i = 0; i < dir.count; i++) {
|
2018-05-22 22:43:39 +00:00
|
|
|
lfs_entry_t_ entry;
|
|
|
|
int err = lfs_dir_getentry_(lfs, &dir,
|
|
|
|
0x43dff000, lfs_mktag(LFS_STRUCT_DIR_, i, 8), &entry);
|
2018-05-21 05:56:20 +00:00
|
|
|
if (err) {
|
|
|
|
if (err == LFS_ERR_NOENT) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-05-22 22:43:39 +00:00
|
|
|
err = lfs_dir_get_(lfs, &dir,
|
|
|
|
0x7ffff000, lfs_mktag(LFS_TYPE_MOVE_, i, 0), NULL);
|
2018-05-21 05:56:20 +00:00
|
|
|
if (err != LFS_ERR_NOENT) {
|
|
|
|
if (!err) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (lfs_paircmp(entry.u.pair, pair) == 0) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-10-07 14:19:08 +00:00
|
|
|
static int lfs_moved(lfs_t *lfs, const void *e) {
|
|
|
|
if (lfs_pairisnull(lfs->root)) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// skip superblock
|
|
|
|
lfs_dir_t cwd;
|
|
|
|
int err = lfs_dir_fetch(lfs, &cwd, (const lfs_block_t[2]){0, 1});
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
// iterate over all directory directory entries
|
|
|
|
lfs_entry_t entry;
|
|
|
|
while (!lfs_pairisnull(cwd.d.tail)) {
|
2018-01-29 19:53:28 +00:00
|
|
|
err = lfs_dir_fetch(lfs, &cwd, cwd.d.tail);
|
2017-10-07 14:19:08 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (true) {
|
2018-01-29 19:53:28 +00:00
|
|
|
err = lfs_dir_next(lfs, &cwd, &entry);
|
2017-10-07 14:19:08 +00:00
|
|
|
if (err && err != LFS_ERR_NOENT) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (err == LFS_ERR_NOENT) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-03-03 16:26:06 +00:00
|
|
|
if (!(LFS_STRUCT_MOVED & entry.d.type) &&
|
2017-10-07 14:19:08 +00:00
|
|
|
memcmp(&entry.d.u, e, sizeof(entry.d.u)) == 0) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-05-14 17:01:45 +00:00
|
|
|
static int lfs_relocate(lfs_t *lfs,
|
|
|
|
const lfs_block_t oldpair[2], const lfs_block_t newpair[2]) {
|
|
|
|
// find parent
|
|
|
|
lfs_dir_t parent;
|
|
|
|
lfs_entry_t entry;
|
|
|
|
int res = lfs_parent(lfs, oldpair, &parent, &entry);
|
|
|
|
if (res < 0) {
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (res) {
|
|
|
|
// update disk, this creates a desync
|
|
|
|
entry.d.u.dir[0] = newpair[0];
|
|
|
|
entry.d.u.dir[1] = newpair[1];
|
2018-04-10 21:35:29 +00:00
|
|
|
lfs_entry_tole32(&entry.d);
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
int err = lfs_dir_set(lfs, &parent, &entry, (struct lfs_region[]){
|
2018-04-08 09:23:23 +00:00
|
|
|
{LFS_FROM_MEM, 0, sizeof(entry.d),
|
|
|
|
&entry.d, sizeof(entry.d)}}, 1);
|
2017-05-14 17:01:45 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
2017-04-29 17:41:53 +00:00
|
|
|
}
|
2017-05-14 17:01:45 +00:00
|
|
|
|
|
|
|
// update internal root
|
|
|
|
if (lfs_paircmp(oldpair, lfs->root) == 0) {
|
|
|
|
LFS_DEBUG("Relocating root %d %d", newpair[0], newpair[1]);
|
|
|
|
lfs->root[0] = newpair[0];
|
|
|
|
lfs->root[1] = newpair[1];
|
|
|
|
}
|
|
|
|
|
|
|
|
// clean up bad block, which should now be a desync
|
|
|
|
return lfs_deorphan(lfs);
|
|
|
|
}
|
|
|
|
|
|
|
|
// find pred
|
|
|
|
res = lfs_pred(lfs, oldpair, &parent);
|
|
|
|
if (res < 0) {
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (res) {
|
|
|
|
// just replace bad pair, no desync can occur
|
|
|
|
parent.d.tail[0] = newpair[0];
|
2017-10-07 21:56:00 +00:00
|
|
|
parent.d.tail[1] = newpair[1];
|
2017-05-14 17:01:45 +00:00
|
|
|
|
2018-04-03 13:28:09 +00:00
|
|
|
return lfs_dir_commit(lfs, &parent, NULL, 0);
|
2017-04-29 17:41:53 +00:00
|
|
|
}
|
2017-05-14 17:01:45 +00:00
|
|
|
|
|
|
|
// couldn't find dir, must be new
|
|
|
|
return 0;
|
2017-04-14 22:33:36 +00:00
|
|
|
}
|
2017-04-01 17:23:15 +00:00
|
|
|
|
2018-05-22 22:43:39 +00:00
|
|
|
// TODO use this in lfs_move?
|
2018-05-21 05:56:20 +00:00
|
|
|
int lfs_deorphan_check(lfs_t *lfs, void *p, lfs_entry_t_ entry) {
|
|
|
|
int16_t *id = p;
|
|
|
|
|
|
|
|
// TODO this fine for only grabbing the last one?
|
|
|
|
// TODO should I also grab deletes? I should, move will always be last yay
|
|
|
|
if (lfs_tag_type(entry.tag) == LFS_TYPE_MOVE_) {
|
|
|
|
*id = lfs_tag_id(entry.tag);
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO handle unrelated deletes
|
|
|
|
if (lfs_tag_type(entry.tag) == LFS_TYPE_DROP_ &&
|
|
|
|
lfs_tag_id(entry.tag) == *id) {
|
|
|
|
*id = -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int lfs_deorphan_(lfs_t *lfs) {
|
|
|
|
lfs->deorphaned = true;
|
|
|
|
if (lfs_pairisnull(lfs->root)) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
lfs_dir_t_ pdir = {.split = true};
|
|
|
|
lfs_dir_t_ dir = {.tail = {0, 1}};
|
|
|
|
|
|
|
|
// iterate over all directory directory entries
|
|
|
|
while (!lfs_pairisnull(dir.tail)) {
|
|
|
|
int16_t moveid = -1;
|
2018-05-22 22:43:39 +00:00
|
|
|
int err = lfs_dir_fetchwith_(lfs, &dir, dir.tail,
|
2018-05-21 05:56:20 +00:00
|
|
|
lfs_deorphan_check, &moveid);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
// check head blocks for orphans
|
|
|
|
if (!pdir.split) {
|
|
|
|
// check if we have a parent
|
|
|
|
lfs_dir_t_ parent;
|
|
|
|
lfs_entry_t_ entry;
|
|
|
|
int res = lfs_parent_(lfs, pdir.tail, &parent, &entry);
|
|
|
|
if (res < 0) {
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!res) {
|
|
|
|
// we are an orphan
|
|
|
|
LFS_DEBUG("Found orphan %d %d",
|
|
|
|
pdir.tail[0], pdir.tail[1]);
|
|
|
|
|
|
|
|
pdir.tail[0] = dir.tail[0];
|
|
|
|
pdir.tail[1] = dir.tail[1];
|
2018-05-22 22:43:39 +00:00
|
|
|
err = lfs_dir_commit_(lfs, &pdir, &(lfs_entry_t_){
|
|
|
|
lfs_mktag(LFS_TYPE_SOFTTAIL_, 0x1ff, sizeof(pdir.tail)),
|
2018-05-21 05:56:20 +00:00
|
|
|
.u.buffer=pdir.tail}, 1);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!lfs_pairsync(entry.u.pair, pdir.tail)) {
|
|
|
|
// we have desynced
|
|
|
|
LFS_DEBUG("Found desync %d %d",
|
|
|
|
entry.u.pair[0], entry.u.pair[1]);
|
|
|
|
|
|
|
|
pdir.tail[0] = entry.u.pair[0];
|
|
|
|
pdir.tail[1] = entry.u.pair[1];
|
2018-05-22 22:43:39 +00:00
|
|
|
err = lfs_dir_commit_(lfs, &pdir, &(lfs_entry_t_){
|
|
|
|
lfs_mktag(LFS_TYPE_SOFTTAIL_, 0x1ff, sizeof(pdir.tail)),
|
2018-05-21 05:56:20 +00:00
|
|
|
.u.buffer=pdir.tail}, 1);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// check entries for moves
|
|
|
|
if (moveid >= 0) {
|
|
|
|
// TODO moves and stuff
|
|
|
|
// TODO need to load entry to find it
|
|
|
|
// // found moved entry
|
|
|
|
// int moved = lfs_moved(lfs, &entry.u);
|
|
|
|
// if (moved < 0) {
|
|
|
|
// return moved;
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// if (moved) {
|
|
|
|
// LFS_DEBUG("Found move %d %d",
|
|
|
|
// entry.d.u.dir[0], entry.d.u.dir[1]);
|
|
|
|
// err = lfs_dir_set(lfs, &dir, &entry, (struct lfs_region[]){
|
|
|
|
// {LFS_FROM_MEM, 0, entry.size, NULL, 0}}, 1);
|
|
|
|
// if (err) {
|
|
|
|
// return err;
|
|
|
|
// }
|
|
|
|
// } else {
|
|
|
|
// LFS_DEBUG("Found partial move %d %d",
|
|
|
|
// entry.d.u.dir[0], entry.d.u.dir[1]);
|
|
|
|
// entry.d.type &= ~LFS_STRUCT_MOVED;
|
|
|
|
// err = lfs_dir_set(lfs, &dir, &entry, (struct lfs_region[]){
|
|
|
|
// {LFS_FROM_MEM, 0, 1, &entry.d, 1}}, 1);
|
|
|
|
// if (err) {
|
|
|
|
// return err;
|
|
|
|
// }
|
|
|
|
// }
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(&pdir, &dir, sizeof(pdir));
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-04-14 22:33:36 +00:00
|
|
|
int lfs_deorphan(lfs_t *lfs) {
|
2017-05-14 17:01:45 +00:00
|
|
|
lfs->deorphaned = true;
|
2017-10-07 21:56:00 +00:00
|
|
|
|
2017-05-14 17:01:45 +00:00
|
|
|
if (lfs_pairisnull(lfs->root)) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-10-07 21:56:00 +00:00
|
|
|
lfs_dir_t pdir = {.d.size = 0x80000000};
|
|
|
|
lfs_dir_t cwd = {.d.tail[0] = 0, .d.tail[1] = 1};
|
2017-04-14 22:33:36 +00:00
|
|
|
|
2017-10-07 21:56:00 +00:00
|
|
|
// iterate over all directory directory entries
|
|
|
|
while (!lfs_pairisnull(cwd.d.tail)) {
|
|
|
|
int err = lfs_dir_fetch(lfs, &cwd, cwd.d.tail);
|
2017-04-14 22:33:36 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-10-07 21:56:00 +00:00
|
|
|
// check head blocks for orphans
|
2017-04-29 17:41:53 +00:00
|
|
|
if (!(0x80000000 & pdir.d.size)) {
|
|
|
|
// check if we have a parent
|
2017-05-14 17:01:45 +00:00
|
|
|
lfs_dir_t parent;
|
|
|
|
lfs_entry_t entry;
|
|
|
|
int res = lfs_parent(lfs, pdir.d.tail, &parent, &entry);
|
|
|
|
if (res < 0) {
|
|
|
|
return res;
|
2017-04-29 17:41:53 +00:00
|
|
|
}
|
2017-04-14 22:33:36 +00:00
|
|
|
|
2017-05-14 17:01:45 +00:00
|
|
|
if (!res) {
|
2017-04-29 17:41:53 +00:00
|
|
|
// we are an orphan
|
2017-10-07 21:56:00 +00:00
|
|
|
LFS_DEBUG("Found orphan %d %d",
|
|
|
|
pdir.d.tail[0], pdir.d.tail[1]);
|
2017-04-14 22:33:36 +00:00
|
|
|
|
2017-10-07 21:56:00 +00:00
|
|
|
pdir.d.tail[0] = cwd.d.tail[0];
|
|
|
|
pdir.d.tail[1] = cwd.d.tail[1];
|
2017-04-14 22:33:36 +00:00
|
|
|
|
2018-04-03 13:28:09 +00:00
|
|
|
err = lfs_dir_commit(lfs, &pdir, NULL, 0);
|
2017-05-14 17:01:45 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!lfs_pairsync(entry.d.u.dir, pdir.d.tail)) {
|
|
|
|
// we have desynced
|
2017-10-07 21:56:00 +00:00
|
|
|
LFS_DEBUG("Found desync %d %d",
|
|
|
|
entry.d.u.dir[0], entry.d.u.dir[1]);
|
2017-05-14 17:01:45 +00:00
|
|
|
|
|
|
|
pdir.d.tail[0] = entry.d.u.dir[0];
|
|
|
|
pdir.d.tail[1] = entry.d.u.dir[1];
|
|
|
|
|
2018-04-03 13:28:09 +00:00
|
|
|
err = lfs_dir_commit(lfs, &pdir, NULL, 0);
|
2017-04-29 17:41:53 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
2017-04-14 22:33:36 +00:00
|
|
|
|
2017-04-29 17:41:53 +00:00
|
|
|
break;
|
|
|
|
}
|
2017-04-14 22:33:36 +00:00
|
|
|
}
|
|
|
|
|
2017-10-07 21:56:00 +00:00
|
|
|
// check entries for moves
|
|
|
|
lfs_entry_t entry;
|
2017-10-07 14:19:08 +00:00
|
|
|
while (true) {
|
2018-01-29 19:53:28 +00:00
|
|
|
err = lfs_dir_next(lfs, &cwd, &entry);
|
2017-10-07 14:19:08 +00:00
|
|
|
if (err && err != LFS_ERR_NOENT) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (err == LFS_ERR_NOENT) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// found moved entry
|
2018-03-03 16:26:06 +00:00
|
|
|
if (entry.d.type & LFS_STRUCT_MOVED) {
|
2017-10-07 14:19:08 +00:00
|
|
|
int moved = lfs_moved(lfs, &entry.d.u);
|
|
|
|
if (moved < 0) {
|
|
|
|
return moved;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (moved) {
|
|
|
|
LFS_DEBUG("Found move %d %d",
|
|
|
|
entry.d.u.dir[0], entry.d.u.dir[1]);
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
err = lfs_dir_set(lfs, &cwd, &entry, (struct lfs_region[]){
|
2018-04-08 09:23:23 +00:00
|
|
|
{LFS_FROM_MEM, 0, entry.size, NULL, 0}}, 1);
|
2017-10-07 14:19:08 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
LFS_DEBUG("Found partial move %d %d",
|
|
|
|
entry.d.u.dir[0], entry.d.u.dir[1]);
|
2018-03-03 16:26:06 +00:00
|
|
|
entry.d.type &= ~LFS_STRUCT_MOVED;
|
Added internal lfs_dir_set, an umbrella to dir append/update/remove operations
This move was surprisingly complex, but offers the ultimate opportunity for
code reuse in terms of resizable entries. Instead of needing to provide
separate functions for adding and removing entries, adding and removing
entries can just be viewed as changing an entry's size to-and-from zero.
Unfortunately, it's not _quite_ that simple, since append and remove
hide some relatively complex operations for when directory blocks
overflow or need to be cleaned up.
However, with enough shoehorning, and a new committer type that allows
specifying recursive commit lists (is this now a push-down automata?),
it does seem to be possible to shove all of the entry update logic into
a single function.
Sidenote, I switched back to an enum-based DSL, since the addition of a
recursive region opcode breaks the consistency of what needs to be
passed to the DSL callback functions. It's much simpler to handle each
opcode explicitly inside a recursive lfs_commit_region function.
2018-03-27 22:57:07 +00:00
|
|
|
err = lfs_dir_set(lfs, &cwd, &entry, (struct lfs_region[]){
|
2018-04-10 21:35:29 +00:00
|
|
|
{LFS_FROM_MEM, 0, 1, &entry.d, 1}}, 1);
|
2017-10-07 14:19:08 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-10-07 21:56:00 +00:00
|
|
|
|
|
|
|
memcpy(&pdir, &cwd, sizeof(pdir));
|
2017-10-07 14:19:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-04-08 21:58:12 +00:00
|
|
|
|
|
|
|
/// External filesystem filesystem operations ///
|
|
|
|
int lfs_fs_getattrs(lfs_t *lfs, const struct lfs_attr *attrs, int count) {
|
|
|
|
lfs_dir_t dir;
|
|
|
|
int err = lfs_dir_fetch(lfs, &dir, (const lfs_block_t[2]){0, 1});
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
lfs_entry_t entry = {.off = sizeof(dir.d)};
|
2018-04-10 21:35:29 +00:00
|
|
|
err = lfs_dir_get(lfs, &dir, entry.off, &entry.d, 4);
|
2018-04-08 21:58:12 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
entry.size = lfs_entry_size(&entry);
|
|
|
|
|
|
|
|
return lfs_dir_getattrs(lfs, &dir, &entry, attrs, count);
|
|
|
|
}
|
|
|
|
|
|
|
|
int lfs_fs_setattrs(lfs_t *lfs, const struct lfs_attr *attrs, int count) {
|
|
|
|
lfs_dir_t dir;
|
|
|
|
int err = lfs_dir_fetch(lfs, &dir, (const lfs_block_t[2]){0, 1});
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
lfs_entry_t entry = {.off = sizeof(dir.d)};
|
2018-04-10 21:35:29 +00:00
|
|
|
err = lfs_dir_get(lfs, &dir, entry.off, &entry.d, 4);
|
2018-04-08 21:58:12 +00:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
entry.size = lfs_entry_size(&entry);
|
|
|
|
|
|
|
|
return lfs_dir_setattrs(lfs, &dir, &entry, attrs, count);
|
|
|
|
}
|
2018-04-09 03:25:58 +00:00
|
|
|
|
|
|
|
static int lfs_fs_size_count(void *p, lfs_block_t block) {
|
|
|
|
lfs_size_t *size = p;
|
|
|
|
*size += 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
lfs_ssize_t lfs_fs_size(lfs_t *lfs) {
|
|
|
|
lfs_size_t size = 0;
|
|
|
|
int err = lfs_traverse(lfs, lfs_fs_size_count, &size);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return size;
|
|
|
|
}
|