feat: update to v2.8.0

Signed-off-by: JKANG94 <446326515@qq.com>
This commit is contained in:
JKANG94 2024-04-17 19:05:36 +08:00
parent 5eb76d10cf
commit 9bb47943d7
59 changed files with 26821 additions and 3720 deletions

30
.gitignore vendored
View File

@ -4,11 +4,31 @@
*.a
*.ci
*.csv
*.t.*
*.b.*
*.gcno
*.gcda
*.perf
lfs
liblfs.a
# Testing things
blocks/
lfs
test.c
tests/*.toml.*
scripts/__pycache__
runners/test_runner
runners/bench_runner
lfs.code.csv
lfs.data.csv
lfs.stack.csv
lfs.structs.csv
lfs.cov.csv
lfs.perf.csv
lfs.perfbd.csv
lfs.test.csv
lfs.bench.csv
# Misc
tags
.gdb_history
scripts/__pycache__
# Historical, probably should remove at some point
tests/*.toml.*

621
Makefile
View File

@ -1,173 +1,582 @@
ifdef BUILDDIR
# make sure BUILDDIR ends with a slash
override BUILDDIR := $(BUILDDIR)/
# bit of a hack, but we want to make sure BUILDDIR directory structure
# is correct before any commands
$(if $(findstring n,$(MAKEFLAGS)),, $(shell mkdir -p \
$(BUILDDIR) \
$(BUILDDIR)bd \
$(BUILDDIR)tests))
endif
# overrideable build dir, default is in-place
BUILDDIR ?= .
# overridable target/src/tools/flags/etc
ifneq ($(wildcard test.c main.c),)
TARGET ?= $(BUILDDIR)lfs
TARGET ?= $(BUILDDIR)/lfs
else
TARGET ?= $(BUILDDIR)lfs.a
TARGET ?= $(BUILDDIR)/liblfs.a
endif
CC ?= gcc
AR ?= ar
SIZE ?= size
CTAGS ?= ctags
NM ?= nm
OBJDUMP ?= objdump
LCOV ?= lcov
CC ?= gcc
AR ?= ar
SIZE ?= size
CTAGS ?= ctags
NM ?= nm
OBJDUMP ?= objdump
VALGRIND ?= valgrind
GDB ?= gdb
PERF ?= perf
SRC ?= $(wildcard *.c)
OBJ := $(SRC:%.c=$(BUILDDIR)%.o)
DEP := $(SRC:%.c=$(BUILDDIR)%.d)
ASM := $(SRC:%.c=$(BUILDDIR)%.s)
CGI := $(SRC:%.c=$(BUILDDIR)%.ci)
SRC ?= $(filter-out $(wildcard *.t.* *.b.*),$(wildcard *.c))
OBJ := $(SRC:%.c=$(BUILDDIR)/%.o)
DEP := $(SRC:%.c=$(BUILDDIR)/%.d)
ASM := $(SRC:%.c=$(BUILDDIR)/%.s)
CI := $(SRC:%.c=$(BUILDDIR)/%.ci)
GCDA := $(SRC:%.c=$(BUILDDIR)/%.t.gcda)
TESTS ?= $(wildcard tests/*.toml)
TEST_SRC ?= $(SRC) \
$(filter-out $(wildcard bd/*.t.* bd/*.b.*),$(wildcard bd/*.c)) \
runners/test_runner.c
TEST_RUNNER ?= $(BUILDDIR)/runners/test_runner
TEST_A := $(TESTS:%.toml=$(BUILDDIR)/%.t.a.c) \
$(TEST_SRC:%.c=$(BUILDDIR)/%.t.a.c)
TEST_C := $(TEST_A:%.t.a.c=%.t.c)
TEST_OBJ := $(TEST_C:%.t.c=%.t.o)
TEST_DEP := $(TEST_C:%.t.c=%.t.d)
TEST_CI := $(TEST_C:%.t.c=%.t.ci)
TEST_GCNO := $(TEST_C:%.t.c=%.t.gcno)
TEST_GCDA := $(TEST_C:%.t.c=%.t.gcda)
TEST_PERF := $(TEST_RUNNER:%=%.perf)
TEST_TRACE := $(TEST_RUNNER:%=%.trace)
TEST_CSV := $(TEST_RUNNER:%=%.csv)
BENCHES ?= $(wildcard benches/*.toml)
BENCH_SRC ?= $(SRC) \
$(filter-out $(wildcard bd/*.t.* bd/*.b.*),$(wildcard bd/*.c)) \
runners/bench_runner.c
BENCH_RUNNER ?= $(BUILDDIR)/runners/bench_runner
BENCH_A := $(BENCHES:%.toml=$(BUILDDIR)/%.b.a.c) \
$(BENCH_SRC:%.c=$(BUILDDIR)/%.b.a.c)
BENCH_C := $(BENCH_A:%.b.a.c=%.b.c)
BENCH_OBJ := $(BENCH_C:%.b.c=%.b.o)
BENCH_DEP := $(BENCH_C:%.b.c=%.b.d)
BENCH_CI := $(BENCH_C:%.b.c=%.b.ci)
BENCH_GCNO := $(BENCH_C:%.b.c=%.b.gcno)
BENCH_GCDA := $(BENCH_C:%.b.c=%.b.gcda)
BENCH_PERF := $(BENCH_RUNNER:%=%.perf)
BENCH_TRACE := $(BENCH_RUNNER:%=%.trace)
BENCH_CSV := $(BENCH_RUNNER:%=%.csv)
CFLAGS += -fcallgraph-info=su
CFLAGS += -g3
CFLAGS += -I.
CFLAGS += -std=c99 -Wall -Wextra -pedantic
CFLAGS += -ftrack-macro-expansion=0
ifdef DEBUG
override CFLAGS += -O0
CFLAGS += -O0
else
override CFLAGS += -Os
CFLAGS += -Os
endif
ifdef TRACE
override CFLAGS += -DLFS_YES_TRACE
CFLAGS += -DLFS_YES_TRACE
endif
ifdef YES_COV
CFLAGS += --coverage
endif
ifdef YES_PERF
CFLAGS += -fno-omit-frame-pointer
endif
ifdef YES_PERFBD
CFLAGS += -fno-omit-frame-pointer
endif
override CFLAGS += -g3
override CFLAGS += -I.
override CFLAGS += -std=c99 -Wall -pedantic
override CFLAGS += -Wextra -Wshadow -Wjump-misses-init -Wundef
ifdef VERBOSE
override TESTFLAGS += -v
override CALLSFLAGS += -v
override CODEFLAGS += -v
override DATAFLAGS += -v
override STACKFLAGS += -v
override STRUCTSFLAGS += -v
override COVERAGEFLAGS += -v
endif
ifdef EXEC
override TESTFLAGS += --exec="$(EXEC)"
endif
ifdef COVERAGE
override TESTFLAGS += --coverage
endif
ifdef BUILDDIR
override TESTFLAGS += --build-dir="$(BUILDDIR:/=)"
override CALLSFLAGS += --build-dir="$(BUILDDIR:/=)"
override CODEFLAGS += --build-dir="$(BUILDDIR:/=)"
override DATAFLAGS += --build-dir="$(BUILDDIR:/=)"
override STACKFLAGS += --build-dir="$(BUILDDIR:/=)"
override STRUCTSFLAGS += --build-dir="$(BUILDDIR:/=)"
override COVERAGEFLAGS += --build-dir="$(BUILDDIR:/=)"
CODEFLAGS += -v
DATAFLAGS += -v
STACKFLAGS += -v
STRUCTSFLAGS += -v
COVFLAGS += -v
PERFFLAGS += -v
PERFBDFLAGS += -v
endif
# forward -j flag
PERFFLAGS += $(filter -j%,$(MAKEFLAGS))
PERFBDFLAGS += $(filter -j%,$(MAKEFLAGS))
ifneq ($(NM),nm)
override CODEFLAGS += --nm-tool="$(NM)"
override DATAFLAGS += --nm-tool="$(NM)"
CODEFLAGS += --nm-path="$(NM)"
DATAFLAGS += --nm-path="$(NM)"
endif
ifneq ($(OBJDUMP),objdump)
override STRUCTSFLAGS += --objdump-tool="$(OBJDUMP)"
CODEFLAGS += --objdump-path="$(OBJDUMP)"
DATAFLAGS += --objdump-path="$(OBJDUMP)"
STRUCTSFLAGS += --objdump-path="$(OBJDUMP)"
PERFFLAGS += --objdump-path="$(OBJDUMP)"
PERFBDFLAGS += --objdump-path="$(OBJDUMP)"
endif
ifneq ($(PERF),perf)
PERFFLAGS += --perf-path="$(PERF)"
endif
TESTFLAGS += -b
BENCHFLAGS += -b
# forward -j flag
TESTFLAGS += $(filter -j%,$(MAKEFLAGS))
BENCHFLAGS += $(filter -j%,$(MAKEFLAGS))
ifdef YES_PERF
TESTFLAGS += -p $(TEST_PERF)
BENCHFLAGS += -p $(BENCH_PERF)
endif
ifdef YES_PERFBD
TESTFLAGS += -t $(TEST_TRACE) --trace-backtrace --trace-freq=100
endif
ifndef NO_PERFBD
BENCHFLAGS += -t $(BENCH_TRACE) --trace-backtrace --trace-freq=100
endif
ifdef YES_TESTMARKS
TESTFLAGS += -o $(TEST_CSV)
endif
ifndef NO_BENCHMARKS
BENCHFLAGS += -o $(BENCH_CSV)
endif
ifdef VERBOSE
TESTFLAGS += -v
TESTCFLAGS += -v
BENCHFLAGS += -v
BENCHCFLAGS += -v
endif
ifdef EXEC
TESTFLAGS += --exec="$(EXEC)"
BENCHFLAGS += --exec="$(EXEC)"
endif
ifneq ($(GDB),gdb)
TESTFLAGS += --gdb-path="$(GDB)"
BENCHFLAGS += --gdb-path="$(GDB)"
endif
ifneq ($(VALGRIND),valgrind)
TESTFLAGS += --valgrind-path="$(VALGRIND)"
BENCHFLAGS += --valgrind-path="$(VALGRIND)"
endif
ifneq ($(PERF),perf)
TESTFLAGS += --perf-path="$(PERF)"
BENCHFLAGS += --perf-path="$(PERF)"
endif
# this is a bit of a hack, but we want to make sure the BUILDDIR
# directory structure is correct before we run any commands
ifneq ($(BUILDDIR),.)
$(if $(findstring n,$(MAKEFLAGS)),, $(shell mkdir -p \
$(addprefix $(BUILDDIR)/,$(dir \
$(SRC) \
$(TESTS) \
$(TEST_SRC) \
$(BENCHES) \
$(BENCH_SRC)))))
endif
# commands
## Build littlefs
.PHONY: all build
all build: $(TARGET)
## Build assembly files
.PHONY: asm
asm: $(ASM)
## Find the total size
.PHONY: size
size: $(OBJ)
$(SIZE) -t $^
## Generate a ctags file
.PHONY: tags
tags:
$(CTAGS) --totals --c-types=+p $(shell find -H -name '*.h') $(SRC)
.PHONY: calls
calls: $(CGI)
./scripts/calls.py $^ $(CALLSFLAGS)
.PHONY: test
test:
./scripts/test.py $(TESTFLAGS)
.SECONDEXPANSION:
test%: tests/test$$(firstword $$(subst \#, ,%)).toml
./scripts/test.py $@ $(TESTFLAGS)
## Show this help text
.PHONY: help
help:
@$(strip awk '/^## / { \
sub(/^## /,""); \
getline rule; \
while (rule ~ /^(#|\.PHONY|ifdef|ifndef)/) getline rule; \
gsub(/:.*/, "", rule); \
printf " "" %-25s %s\n", rule, $$0 \
}' $(MAKEFILE_LIST))
## Find the per-function code size
.PHONY: code
code: $(OBJ)
./scripts/code.py $^ -S $(CODEFLAGS)
code: CODEFLAGS+=-S
code: $(OBJ) $(BUILDDIR)/lfs.code.csv
./scripts/code.py $(OBJ) $(CODEFLAGS)
## Compare per-function code size
.PHONY: code-diff
code-diff: $(OBJ)
./scripts/code.py $^ $(CODEFLAGS) -d $(BUILDDIR)/lfs.code.csv
## Find the per-function data size
.PHONY: data
data: $(OBJ)
./scripts/data.py $^ -S $(DATAFLAGS)
data: DATAFLAGS+=-S
data: $(OBJ) $(BUILDDIR)/lfs.data.csv
./scripts/data.py $(OBJ) $(DATAFLAGS)
## Compare per-function data size
.PHONY: data-diff
data-diff: $(OBJ)
./scripts/data.py $^ $(DATAFLAGS) -d $(BUILDDIR)/lfs.data.csv
## Find the per-function stack usage
.PHONY: stack
stack: $(CGI)
./scripts/stack.py $^ -S $(STACKFLAGS)
stack: STACKFLAGS+=-S
stack: $(CI) $(BUILDDIR)/lfs.stack.csv
./scripts/stack.py $(CI) $(STACKFLAGS)
## Compare per-function stack usage
.PHONY: stack-diff
stack-diff: $(CI)
./scripts/stack.py $^ $(STACKFLAGS) -d $(BUILDDIR)/lfs.stack.csv
## Find function sizes
.PHONY: funcs
funcs: SUMMARYFLAGS+=-S
funcs: \
$(BUILDDIR)/lfs.code.csv \
$(BUILDDIR)/lfs.data.csv \
$(BUILDDIR)/lfs.stack.csv
$(strip ./scripts/summary.py $^ \
-bfunction \
-fcode=code_size \
-fdata=data_size \
-fstack=stack_limit --max=stack \
$(SUMMARYFLAGS))
## Compare function sizes
.PHONY: funcs-diff
funcs-diff: SHELL=/bin/bash
funcs-diff: $(OBJ) $(CI)
$(strip ./scripts/summary.py \
<(./scripts/code.py $(OBJ) -q $(CODEFLAGS) -o-) \
<(./scripts/data.py $(OBJ) -q $(DATAFLAGS) -o-) \
<(./scripts/stack.py $(CI) -q $(STACKFLAGS) -o-) \
-bfunction \
-fcode=code_size \
-fdata=data_size \
-fstack=stack_limit --max=stack \
$(SUMMARYFLAGS) -d <(./scripts/summary.py \
$(BUILDDIR)/lfs.code.csv \
$(BUILDDIR)/lfs.data.csv \
$(BUILDDIR)/lfs.stack.csv \
-q $(SUMMARYFLAGS) -o-))
## Find struct sizes
.PHONY: structs
structs: $(OBJ)
./scripts/structs.py $^ -S $(STRUCTSFLAGS)
structs: STRUCTSFLAGS+=-S
structs: $(OBJ) $(BUILDDIR)/lfs.structs.csv
./scripts/structs.py $(OBJ) $(STRUCTSFLAGS)
.PHONY: coverage
coverage:
./scripts/coverage.py $(BUILDDIR)tests/*.toml.info -s $(COVERAGEFLAGS)
## Compare struct sizes
.PHONY: structs-diff
structs-diff: $(OBJ)
./scripts/structs.py $^ $(STRUCTSFLAGS) -d $(BUILDDIR)/lfs.structs.csv
## Find the line/branch coverage after a test run
.PHONY: cov
cov: COVFLAGS+=-s
cov: $(GCDA) $(BUILDDIR)/lfs.cov.csv
$(strip ./scripts/cov.py $(GCDA) \
$(patsubst %,-F%,$(SRC)) \
$(COVFLAGS))
## Compare line/branch coverage
.PHONY: cov-diff
cov-diff: $(GCDA)
$(strip ./scripts/cov.py $^ \
$(patsubst %,-F%,$(SRC)) \
$(COVFLAGS) -d $(BUILDDIR)/lfs.cov.csv)
## Find the perf results after bench run with YES_PERF
.PHONY: perf
perf: PERFFLAGS+=-S
perf: $(BENCH_PERF) $(BUILDDIR)/lfs.perf.csv
$(strip ./scripts/perf.py $(BENCH_PERF) \
$(patsubst %,-F%,$(SRC)) \
$(PERFFLAGS))
## Compare perf results
.PHONY: perf-diff
perf-diff: $(BENCH_PERF)
$(strip ./scripts/perf.py $^ \
$(patsubst %,-F%,$(SRC)) \
$(PERFFLAGS) -d $(BUILDDIR)/lfs.perf.csv)
## Find the perfbd results after a bench run
.PHONY: perfbd
perfbd: PERFBDFLAGS+=-S
perfbd: $(BENCH_TRACE) $(BUILDDIR)/lfs.perfbd.csv
$(strip ./scripts/perfbd.py $(BENCH_RUNNER) $(BENCH_TRACE) \
$(patsubst %,-F%,$(SRC)) \
$(PERFBDFLAGS))
## Compare perfbd results
.PHONY: perfbd-diff
perfbd-diff: $(BENCH_TRACE)
$(strip ./scripts/perfbd.py $(BENCH_RUNNER) $^ \
$(patsubst %,-F%,$(SRC)) \
$(PERFBDFLAGS) -d $(BUILDDIR)/lfs.perfbd.csv)
## Find a summary of compile-time sizes
.PHONY: summary sizes
summary sizes: \
$(BUILDDIR)/lfs.code.csv \
$(BUILDDIR)/lfs.data.csv \
$(BUILDDIR)/lfs.stack.csv \
$(BUILDDIR)/lfs.structs.csv
$(strip ./scripts/summary.py $^ \
-fcode=code_size \
-fdata=data_size \
-fstack=stack_limit --max=stack \
-fstructs=struct_size \
-Y $(SUMMARYFLAGS))
## Compare compile-time sizes
.PHONY: summary-diff sizes-diff
summary-diff sizes-diff: SHELL=/bin/bash
summary-diff sizes-diff: $(OBJ) $(CI)
$(strip ./scripts/summary.py \
<(./scripts/code.py $(OBJ) -q $(CODEFLAGS) -o-) \
<(./scripts/data.py $(OBJ) -q $(DATAFLAGS) -o-) \
<(./scripts/stack.py $(CI) -q $(STACKFLAGS) -o-) \
<(./scripts/structs.py $(OBJ) -q $(STRUCTSFLAGS) -o-) \
-fcode=code_size \
-fdata=data_size \
-fstack=stack_limit --max=stack \
-fstructs=struct_size \
-Y $(SUMMARYFLAGS) -d <(./scripts/summary.py \
$(BUILDDIR)/lfs.code.csv \
$(BUILDDIR)/lfs.data.csv \
$(BUILDDIR)/lfs.stack.csv \
$(BUILDDIR)/lfs.structs.csv \
-q $(SUMMARYFLAGS) -o-))
## Build the test-runner
.PHONY: test-runner build-test
ifndef NO_COV
test-runner build-test: CFLAGS+=--coverage
endif
ifdef YES_PERF
test-runner build-test: CFLAGS+=-fno-omit-frame-pointer
endif
ifdef YES_PERFBD
test-runner build-test: CFLAGS+=-fno-omit-frame-pointer
endif
# note we remove some binary dependent files during compilation,
# otherwise it's way to easy to end up with outdated results
test-runner build-test: $(TEST_RUNNER)
ifndef NO_COV
rm -f $(TEST_GCDA)
endif
ifdef YES_PERF
rm -f $(TEST_PERF)
endif
ifdef YES_PERFBD
rm -f $(TEST_TRACE)
endif
## Run the tests, -j enables parallel tests
.PHONY: test
test: test-runner
./scripts/test.py $(TEST_RUNNER) $(TESTFLAGS)
## List the tests
.PHONY: test-list
test-list: test-runner
./scripts/test.py $(TEST_RUNNER) $(TESTFLAGS) -l
## Summarize the testmarks
.PHONY: testmarks
testmarks: SUMMARYFLAGS+=-spassed
testmarks: $(TEST_CSV) $(BUILDDIR)/lfs.test.csv
$(strip ./scripts/summary.py $(TEST_CSV) \
-bsuite \
-fpassed=test_passed \
$(SUMMARYFLAGS))
## Compare testmarks against a previous run
.PHONY: testmarks-diff
testmarks-diff: $(TEST_CSV)
$(strip ./scripts/summary.py $^ \
-bsuite \
-fpassed=test_passed \
$(SUMMARYFLAGS) -d $(BUILDDIR)/lfs.test.csv)
## Build the bench-runner
.PHONY: bench-runner build-bench
ifdef YES_COV
bench-runner build-bench: CFLAGS+=--coverage
endif
ifdef YES_PERF
bench-runner build-bench: CFLAGS+=-fno-omit-frame-pointer
endif
ifndef NO_PERFBD
bench-runner build-bench: CFLAGS+=-fno-omit-frame-pointer
endif
# note we remove some binary dependent files during compilation,
# otherwise it's way to easy to end up with outdated results
bench-runner build-bench: $(BENCH_RUNNER)
ifdef YES_COV
rm -f $(BENCH_GCDA)
endif
ifdef YES_PERF
rm -f $(BENCH_PERF)
endif
ifndef NO_PERFBD
rm -f $(BENCH_TRACE)
endif
## Run the benchmarks, -j enables parallel benchmarks
.PHONY: bench
bench: bench-runner
./scripts/bench.py $(BENCH_RUNNER) $(BENCHFLAGS)
## List the benchmarks
.PHONY: bench-list
bench-list: bench-runner
./scripts/bench.py $(BENCH_RUNNER) $(BENCHFLAGS) -l
## Summarize the benchmarks
.PHONY: benchmarks
benchmarks: SUMMARYFLAGS+=-Serased -Sproged -Sreaded
benchmarks: $(BENCH_CSV) $(BUILDDIR)/lfs.bench.csv
$(strip ./scripts/summary.py $(BENCH_CSV) \
-bsuite \
-freaded=bench_readed \
-fproged=bench_proged \
-ferased=bench_erased \
$(SUMMARYFLAGS))
## Compare benchmarks against a previous run
.PHONY: benchmarks-diff
benchmarks-diff: $(BENCH_CSV)
$(strip ./scripts/summary.py $^ \
-bsuite \
-freaded=bench_readed \
-fproged=bench_proged \
-ferased=bench_erased \
$(SUMMARYFLAGS) -d $(BUILDDIR)/lfs.bench.csv)
.PHONY: summary
summary: $(BUILDDIR)lfs.csv
./scripts/summary.py -Y $^ $(SUMMARYFLAGS)
# rules
-include $(DEP)
-include $(TEST_DEP)
.SUFFIXES:
.SECONDARY:
$(BUILDDIR)lfs: $(OBJ)
$(BUILDDIR)/lfs: $(OBJ)
$(CC) $(CFLAGS) $^ $(LFLAGS) -o $@
$(BUILDDIR)lfs.a: $(OBJ)
$(BUILDDIR)/liblfs.a: $(OBJ)
$(AR) rcs $@ $^
$(BUILDDIR)lfs.csv: $(OBJ) $(CGI)
./scripts/code.py $(OBJ) -q $(CODEFLAGS) -o $@
./scripts/data.py $(OBJ) -q -m $@ $(DATAFLAGS) -o $@
./scripts/stack.py $(CGI) -q -m $@ $(STACKFLAGS) -o $@
./scripts/structs.py $(OBJ) -q -m $@ $(STRUCTSFLAGS) -o $@
$(if $(COVERAGE),\
./scripts/coverage.py $(BUILDDIR)tests/*.toml.info \
-q -m $@ $(COVERAGEFLAGS) -o $@)
$(BUILDDIR)/lfs.code.csv: $(OBJ)
./scripts/code.py $^ -q $(CODEFLAGS) -o $@
$(BUILDDIR)%.o: %.c
$(CC) -c -MMD $(CFLAGS) $< -o $@
$(BUILDDIR)/lfs.data.csv: $(OBJ)
./scripts/data.py $^ -q $(DATAFLAGS) -o $@
$(BUILDDIR)%.s: %.c
$(BUILDDIR)/lfs.stack.csv: $(CI)
./scripts/stack.py $^ -q $(STACKFLAGS) -o $@
$(BUILDDIR)/lfs.structs.csv: $(OBJ)
./scripts/structs.py $^ -q $(STRUCTSFLAGS) -o $@
$(BUILDDIR)/lfs.cov.csv: $(GCDA)
$(strip ./scripts/cov.py $^ \
$(patsubst %,-F%,$(SRC)) \
-q $(COVFLAGS) -o $@)
$(BUILDDIR)/lfs.perf.csv: $(BENCH_PERF)
$(strip ./scripts/perf.py $^ \
$(patsubst %,-F%,$(SRC)) \
-q $(PERFFLAGS) -o $@)
$(BUILDDIR)/lfs.perfbd.csv: $(BENCH_TRACE)
$(strip ./scripts/perfbd.py $(BENCH_RUNNER) $^ \
$(patsubst %,-F%,$(SRC)) \
-q $(PERFBDFLAGS) -o $@)
$(BUILDDIR)/lfs.test.csv: $(TEST_CSV)
cp $^ $@
$(BUILDDIR)/lfs.bench.csv: $(BENCH_CSV)
cp $^ $@
$(BUILDDIR)/runners/test_runner: $(TEST_OBJ)
$(CC) $(CFLAGS) $^ $(LFLAGS) -o $@
$(BUILDDIR)/runners/bench_runner: $(BENCH_OBJ)
$(CC) $(CFLAGS) $^ $(LFLAGS) -o $@
# our main build rule generates .o, .d, and .ci files, the latter
# used for stack analysis
$(BUILDDIR)/%.o $(BUILDDIR)/%.ci: %.c
$(CC) -c -MMD $(CFLAGS) $< -o $(BUILDDIR)/$*.o
$(BUILDDIR)/%.o $(BUILDDIR)/%.ci: $(BUILDDIR)/%.c
$(CC) -c -MMD $(CFLAGS) $< -o $(BUILDDIR)/$*.o
$(BUILDDIR)/%.s: %.c
$(CC) -S $(CFLAGS) $< -o $@
# gcc depends on the output file for intermediate file names, so
# we can't omit to .o output. We also need to serialize with the
# normal .o rule because otherwise we can end up with multiprocess
# problems with two instances of gcc modifying the same .o
$(BUILDDIR)%.ci: %.c | $(BUILDDIR)%.o
$(CC) -c -MMD -fcallgraph-info=su $(CFLAGS) $< -o $|
$(BUILDDIR)/%.c: %.a.c
./scripts/prettyasserts.py -p LFS_ASSERT $< -o $@
# clean everything
$(BUILDDIR)/%.c: $(BUILDDIR)/%.a.c
./scripts/prettyasserts.py -p LFS_ASSERT $< -o $@
$(BUILDDIR)/%.t.a.c: %.toml
./scripts/test.py -c $< $(TESTCFLAGS) -o $@
$(BUILDDIR)/%.t.a.c: %.c $(TESTS)
./scripts/test.py -c $(TESTS) -s $< $(TESTCFLAGS) -o $@
$(BUILDDIR)/%.b.a.c: %.toml
./scripts/bench.py -c $< $(BENCHCFLAGS) -o $@
$(BUILDDIR)/%.b.a.c: %.c $(BENCHES)
./scripts/bench.py -c $(BENCHES) -s $< $(BENCHCFLAGS) -o $@
## Clean everything
.PHONY: clean
clean:
rm -f $(BUILDDIR)lfs
rm -f $(BUILDDIR)lfs.a
rm -f $(BUILDDIR)lfs.csv
rm -f $(BUILDDIR)/lfs
rm -f $(BUILDDIR)/liblfs.a
rm -f $(BUILDDIR)/lfs.code.csv
rm -f $(BUILDDIR)/lfs.data.csv
rm -f $(BUILDDIR)/lfs.stack.csv
rm -f $(BUILDDIR)/lfs.structs.csv
rm -f $(BUILDDIR)/lfs.cov.csv
rm -f $(BUILDDIR)/lfs.perf.csv
rm -f $(BUILDDIR)/lfs.perfbd.csv
rm -f $(BUILDDIR)/lfs.test.csv
rm -f $(BUILDDIR)/lfs.bench.csv
rm -f $(OBJ)
rm -f $(CGI)
rm -f $(DEP)
rm -f $(ASM)
rm -f $(BUILDDIR)tests/*.toml.*
rm -f $(CI)
rm -f $(TEST_RUNNER)
rm -f $(TEST_A)
rm -f $(TEST_C)
rm -f $(TEST_OBJ)
rm -f $(TEST_DEP)
rm -f $(TEST_CI)
rm -f $(TEST_GCNO)
rm -f $(TEST_GCDA)
rm -f $(TEST_PERF)
rm -f $(TEST_TRACE)
rm -f $(TEST_CSV)
rm -f $(BENCH_RUNNER)
rm -f $(BENCH_A)
rm -f $(BENCH_C)
rm -f $(BENCH_OBJ)
rm -f $(BENCH_DEP)
rm -f $(BENCH_CI)
rm -f $(BENCH_GCNO)
rm -f $(BENCH_GCDA)
rm -f $(BENCH_PERF)
rm -f $(BENCH_TRACE)
rm -f $(BENCH_CSV)

View File

@ -3,7 +3,7 @@
"Name" : "littlefs",
"License" : "BSD 3-Clause License",
"License File" : "LICENSE.md",
"Version Number" : "V2.5",
"Version Number" : "V2.8",
"Owner" : "wangmihu@huawei.com",
"Upstream URL" : "https://github.com/littlefs-project/littlefs",
"Description" : "A little fail-safe filesystem designed for microcontrollers."

View File

@ -226,6 +226,13 @@ License Identifiers that are here available: http://spdx.org/licenses/
to create images of the filesystem on your PC. Check if littlefs will fit
your needs, create images for a later download to the target memory or
inspect the content of a binary image of the target memory.
- [littlefs2-rust] - A Rust wrapper for littlefs. This project allows you
to use littlefs in a Rust-friendly API, reaping the benefits of Rust's memory
safety and other guarantees.
- [littlefs-disk-img-viewer] - A memory-efficient web application for viewing
littlefs disk images in your web browser.
- [mklfs] - A command line tool built by the [Lua RTOS] guys for making
littlefs images from a host PC. Supports Windows, Mac OS, and Linux.
@ -243,8 +250,16 @@ License Identifiers that are here available: http://spdx.org/licenses/
MCUs. It offers static wear-leveling and power-resilience with only a fixed
_O(|address|)_ pointer structure stored on each block and in RAM.
- [ChaN's FatFs] - A lightweight reimplementation of the infamous FAT filesystem
for microcontroller-scale devices. Due to limitations of FAT it can't provide
power-loss resilience, but it does allow easy interop with PCs.
- [chamelon] - A pure-OCaml implementation of (most of) littlefs, designed for
use with the MirageOS library operating system project. It is interoperable
with the reference implementation, with some caveats.
[BSD-3-Clause]: https://spdx.org/licenses/BSD-3-Clause.html
[littlefs-disk-img-viewer]: https://github.com/tniessen/littlefs-disk-img-viewer
[littlefs-fuse]: https://github.com/geky/littlefs-fuse
[FUSE]: https://github.com/libfuse/libfuse
[littlefs-js]: https://github.com/geky/littlefs-js
@ -252,7 +267,10 @@ License Identifiers that are here available: http://spdx.org/licenses/
[mklfs]: https://github.com/whitecatboard/Lua-RTOS-ESP32/tree/master/components/mklfs/src
[Lua RTOS]: https://github.com/whitecatboard/Lua-RTOS-ESP32
[Mbed OS]: https://github.com/armmbed/mbed-os
[LittleFileSystem]: https://os.mbed.com/docs/mbed-os/v5.12/apis/littlefilesystem.html
[LittleFileSystem]: https://os.mbed.com/docs/mbed-os/latest/apis/littlefilesystem.html
[SPIFFS]: https://github.com/pellepl/spiffs
[Dhara]: https://github.com/dlbeer/dhara
[ChaN's FatFs]: http://elm-chan.org/fsw/ff/00index_e.html
[littlefs-python]: https://pypi.org/project/littlefs-python/
[littlefs2-rust]: https://crates.io/crates/littlefs2
[chamelon]: https://github.com/yomimono/chamelon

101
SPEC.md
View File

@ -1,10 +1,10 @@
## littlefs technical specification
This is the technical specification of the little filesystem. This document
covers the technical details of how the littlefs is stored on disk for
introspection and tooling. This document assumes you are familiar with the
design of the littlefs, for more info on how littlefs works check
out [DESIGN.md](DESIGN.md).
This is the technical specification of the little filesystem with on-disk
version lfs2.1. This document covers the technical details of how the littlefs
is stored on disk for introspection and tooling. This document assumes you are
familiar with the design of the littlefs, for more info on how littlefs works
check out [DESIGN.md](DESIGN.md).
```
| | | .---._____
@ -133,12 +133,6 @@ tags XORed together, starting with `0xffffffff`.
'-------------------' '-------------------'
```
One last thing to note before we get into the details around tag encoding. Each
tag contains a valid bit used to indicate if the tag and containing commit is
valid. This valid bit is the first bit found in the tag and the commit and can
be used to tell if we've attempted to write to the remaining space in the
block.
Here's a more complete example of metadata block containing 4 entries:
```
@ -191,6 +185,53 @@ Here's a more complete example of metadata block containing 4 entries:
'---- most recent D
```
Two things to note before we get into the details around tag encoding:
1. Each tag contains a valid bit used to indicate if the tag and containing
commit is valid. After XORing, this bit should always be zero.
At the end of each commit, the valid bit of the previous tag is XORed
with the lowest bit in the type field of the CRC tag. This allows
the CRC tag to force the next commit to fail the valid bit test if it
has not yet been written to.
2. The valid bit alone is not enough info to know if the next commit has been
erased. We don't know the order bits will be programmed in a program block,
so it's possible that the next commit had an attempted program that left the
valid bit unchanged.
To ensure we only ever program erased bytes, each commit can contain an
optional forward-CRC (FCRC). An FCRC contains a checksum of some amount of
bytes in the next commit at the time it was erased.
```
.-------------------. \ \
| revision count | | |
|-------------------| | |
| metadata | | |
| | +---. +-- current commit
| | | | |
|-------------------| | | |
| FCRC ---|-. | |
|-------------------| / | | |
| CRC -----|-' /
|-------------------| |
| padding | | padding (does't need CRC)
| | |
|-------------------| \ | \
| erased? | +-' |
| | | | +-- next commit
| v | / |
| | /
| |
'-------------------'
```
If the FCRC is missing or the checksum does not match, we must assume a
commit was attempted but failed due to power-loss.
Note that end-of-block commits do not need an FCRC.
## Metadata tags
So in littlefs, 32-bit tags describe every type of metadata. And this means
@ -785,3 +826,41 @@ CRC fields:
are made about the contents.
---
#### `0x5ff` LFS_TYPE_FCRC
Added in lfs2.1, the optional FCRC tag contains a checksum of some amount of
bytes in the next commit at the time it was erased. This allows us to ensure
that we only ever program erased bytes, even if a previous commit failed due
to power-loss.
When programming a commit, the FCRC size must be at least as large as the
program block size. However, the program block is not saved on disk, and can
change between mounts, so the FCRC size on disk may be different than the
current program block size.
If the FCRC is missing or the checksum does not match, we must assume a
commit was attempted but failed due to power-loss.
Layout of the FCRC tag:
```
tag data
[-- 32 --][-- 32 --|-- 32 --]
[1|- 11 -| 10 | 10 ][-- 32 --|-- 32 --]
^ ^ ^ ^ ^- fcrc size ^- fcrc
| | | '- size (8)
| | '------ id (0x3ff)
| '------------ type (0x5ff)
'----------------- valid bit
```
FCRC fields:
1. **FCRC size (32-bits)** - Number of bytes after this commit's CRC tag's
padding to include in the FCRC.
2. **FCRC (32-bits)** - CRC of the bytes after this commit's CRC tag's padding
when erased. Like the CRC tag, this uses a CRC-32 with a polynomial of
`0x04c11db7` initialized with `0xffffffff`.
---

645
bd/lfs_emubd.c Normal file
View File

@ -0,0 +1,645 @@
/*
* Emulating block device, wraps filebd and rambd while providing a bunch
* of hooks for testing littlefs in various conditions.
*
* Copyright (c) 2022, The littlefs authors.
* Copyright (c) 2017, Arm Limited. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef _POSIX_C_SOURCE
#define _POSIX_C_SOURCE 199309L
#endif
#include "bd/lfs_emubd.h"
#include <stdlib.h>
#include <fcntl.h>
#include <unistd.h>
#include <errno.h>
#include <time.h>
#ifdef _WIN32
#include <windows.h>
#endif
// access to lazily-allocated/copy-on-write blocks
//
// Note we can only modify a block if we have exclusive access to it (rc == 1)
//
static lfs_emubd_block_t *lfs_emubd_incblock(lfs_emubd_block_t *block) {
if (block) {
block->rc += 1;
}
return block;
}
static void lfs_emubd_decblock(lfs_emubd_block_t *block) {
if (block) {
block->rc -= 1;
if (block->rc == 0) {
free(block);
}
}
}
static lfs_emubd_block_t *lfs_emubd_mutblock(
const struct lfs_config *cfg,
lfs_emubd_block_t **block) {
lfs_emubd_t *bd = cfg->context;
lfs_emubd_block_t *block_ = *block;
if (block_ && block_->rc == 1) {
// rc == 1? can modify
return block_;
} else if (block_) {
// rc > 1? need to create a copy
lfs_emubd_block_t *nblock = malloc(
sizeof(lfs_emubd_block_t) + bd->cfg->erase_size);
if (!nblock) {
return NULL;
}
memcpy(nblock, block_,
sizeof(lfs_emubd_block_t) + bd->cfg->erase_size);
nblock->rc = 1;
lfs_emubd_decblock(block_);
*block = nblock;
return nblock;
} else {
// no block? need to allocate
lfs_emubd_block_t *nblock = malloc(
sizeof(lfs_emubd_block_t) + bd->cfg->erase_size);
if (!nblock) {
return NULL;
}
nblock->rc = 1;
nblock->wear = 0;
// zero for consistency
memset(nblock->data,
(bd->cfg->erase_value != -1) ? bd->cfg->erase_value : 0,
bd->cfg->erase_size);
*block = nblock;
return nblock;
}
}
// emubd create/destroy
int lfs_emubd_create(const struct lfs_config *cfg,
const struct lfs_emubd_config *bdcfg) {
LFS_EMUBD_TRACE("lfs_emubd_create(%p {.context=%p, "
".read=%p, .prog=%p, .erase=%p, .sync=%p}, "
"%p {.read_size=%"PRIu32", .prog_size=%"PRIu32", "
".erase_size=%"PRIu32", .erase_count=%"PRIu32", "
".erase_value=%"PRId32", .erase_cycles=%"PRIu32", "
".badblock_behavior=%"PRIu8", .power_cycles=%"PRIu32", "
".powerloss_behavior=%"PRIu8", .powerloss_cb=%p, "
".powerloss_data=%p, .track_branches=%d})",
(void*)cfg, cfg->context,
(void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
(void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
(void*)bdcfg,
bdcfg->read_size, bdcfg->prog_size, bdcfg->erase_size,
bdcfg->erase_count, bdcfg->erase_value, bdcfg->erase_cycles,
bdcfg->badblock_behavior, bdcfg->power_cycles,
bdcfg->powerloss_behavior, (void*)(uintptr_t)bdcfg->powerloss_cb,
bdcfg->powerloss_data, bdcfg->track_branches);
lfs_emubd_t *bd = cfg->context;
bd->cfg = bdcfg;
// allocate our block array, all blocks start as uninitialized
bd->blocks = malloc(bd->cfg->erase_count * sizeof(lfs_emubd_block_t*));
if (!bd->blocks) {
LFS_EMUBD_TRACE("lfs_emubd_create -> %d", LFS_ERR_NOMEM);
return LFS_ERR_NOMEM;
}
memset(bd->blocks, 0, bd->cfg->erase_count * sizeof(lfs_emubd_block_t*));
// setup testing things
bd->readed = 0;
bd->proged = 0;
bd->erased = 0;
bd->power_cycles = bd->cfg->power_cycles;
bd->disk = NULL;
if (bd->cfg->disk_path) {
bd->disk = malloc(sizeof(lfs_emubd_disk_t));
if (!bd->disk) {
LFS_EMUBD_TRACE("lfs_emubd_create -> %d", LFS_ERR_NOMEM);
return LFS_ERR_NOMEM;
}
bd->disk->rc = 1;
bd->disk->scratch = NULL;
#ifdef _WIN32
bd->disk->fd = open(bd->cfg->disk_path,
O_RDWR | O_CREAT | O_BINARY, 0666);
#else
bd->disk->fd = open(bd->cfg->disk_path,
O_RDWR | O_CREAT, 0666);
#endif
if (bd->disk->fd < 0) {
int err = -errno;
LFS_EMUBD_TRACE("lfs_emubd_create -> %d", err);
return err;
}
// if we're emulating erase values, we can keep a block around in
// memory of just the erase state to speed up emulated erases
if (bd->cfg->erase_value != -1) {
bd->disk->scratch = malloc(bd->cfg->erase_size);
if (!bd->disk->scratch) {
LFS_EMUBD_TRACE("lfs_emubd_create -> %d", LFS_ERR_NOMEM);
return LFS_ERR_NOMEM;
}
memset(bd->disk->scratch,
bd->cfg->erase_value,
bd->cfg->erase_size);
// go ahead and erase all of the disk, otherwise the file will not
// match our internal representation
for (size_t i = 0; i < bd->cfg->erase_count; i++) {
ssize_t res = write(bd->disk->fd,
bd->disk->scratch,
bd->cfg->erase_size);
if (res < 0) {
int err = -errno;
LFS_EMUBD_TRACE("lfs_emubd_create -> %d", err);
return err;
}
}
}
}
LFS_EMUBD_TRACE("lfs_emubd_create -> %d", 0);
return 0;
}
int lfs_emubd_destroy(const struct lfs_config *cfg) {
LFS_EMUBD_TRACE("lfs_emubd_destroy(%p)", (void*)cfg);
lfs_emubd_t *bd = cfg->context;
// decrement reference counts
for (lfs_block_t i = 0; i < bd->cfg->erase_count; i++) {
lfs_emubd_decblock(bd->blocks[i]);
}
free(bd->blocks);
// clean up other resources
if (bd->disk) {
bd->disk->rc -= 1;
if (bd->disk->rc == 0) {
close(bd->disk->fd);
free(bd->disk->scratch);
free(bd->disk);
}
}
LFS_EMUBD_TRACE("lfs_emubd_destroy -> %d", 0);
return 0;
}
// block device API
int lfs_emubd_read(const struct lfs_config *cfg, lfs_block_t block,
lfs_off_t off, void *buffer, lfs_size_t size) {
LFS_EMUBD_TRACE("lfs_emubd_read(%p, "
"0x%"PRIx32", %"PRIu32", %p, %"PRIu32")",
(void*)cfg, block, off, buffer, size);
lfs_emubd_t *bd = cfg->context;
// check if read is valid
LFS_ASSERT(block < bd->cfg->erase_count);
LFS_ASSERT(off % bd->cfg->read_size == 0);
LFS_ASSERT(size % bd->cfg->read_size == 0);
LFS_ASSERT(off+size <= bd->cfg->erase_size);
// get the block
const lfs_emubd_block_t *b = bd->blocks[block];
if (b) {
// block bad?
if (bd->cfg->erase_cycles && b->wear >= bd->cfg->erase_cycles &&
bd->cfg->badblock_behavior == LFS_EMUBD_BADBLOCK_READERROR) {
LFS_EMUBD_TRACE("lfs_emubd_read -> %d", LFS_ERR_CORRUPT);
return LFS_ERR_CORRUPT;
}
// read data
memcpy(buffer, &b->data[off], size);
} else {
// zero for consistency
memset(buffer,
(bd->cfg->erase_value != -1) ? bd->cfg->erase_value : 0,
size);
}
// track reads
bd->readed += size;
if (bd->cfg->read_sleep) {
int err = nanosleep(&(struct timespec){
.tv_sec=bd->cfg->read_sleep/1000000000,
.tv_nsec=bd->cfg->read_sleep%1000000000},
NULL);
if (err) {
err = -errno;
LFS_EMUBD_TRACE("lfs_emubd_read -> %d", err);
return err;
}
}
LFS_EMUBD_TRACE("lfs_emubd_read -> %d", 0);
return 0;
}
int lfs_emubd_prog(const struct lfs_config *cfg, lfs_block_t block,
lfs_off_t off, const void *buffer, lfs_size_t size) {
LFS_EMUBD_TRACE("lfs_emubd_prog(%p, "
"0x%"PRIx32", %"PRIu32", %p, %"PRIu32")",
(void*)cfg, block, off, buffer, size);
lfs_emubd_t *bd = cfg->context;
// check if write is valid
LFS_ASSERT(block < bd->cfg->erase_count);
LFS_ASSERT(off % bd->cfg->prog_size == 0);
LFS_ASSERT(size % bd->cfg->prog_size == 0);
LFS_ASSERT(off+size <= bd->cfg->erase_size);
// get the block
lfs_emubd_block_t *b = lfs_emubd_mutblock(cfg, &bd->blocks[block]);
if (!b) {
LFS_EMUBD_TRACE("lfs_emubd_prog -> %d", LFS_ERR_NOMEM);
return LFS_ERR_NOMEM;
}
// block bad?
if (bd->cfg->erase_cycles && b->wear >= bd->cfg->erase_cycles) {
if (bd->cfg->badblock_behavior ==
LFS_EMUBD_BADBLOCK_PROGERROR) {
LFS_EMUBD_TRACE("lfs_emubd_prog -> %d", LFS_ERR_CORRUPT);
return LFS_ERR_CORRUPT;
} else if (bd->cfg->badblock_behavior ==
LFS_EMUBD_BADBLOCK_PROGNOOP ||
bd->cfg->badblock_behavior ==
LFS_EMUBD_BADBLOCK_ERASENOOP) {
LFS_EMUBD_TRACE("lfs_emubd_prog -> %d", 0);
return 0;
}
}
// were we erased properly?
if (bd->cfg->erase_value != -1) {
for (lfs_off_t i = 0; i < size; i++) {
LFS_ASSERT(b->data[off+i] == bd->cfg->erase_value);
}
}
// prog data
memcpy(&b->data[off], buffer, size);
// mirror to disk file?
if (bd->disk) {
off_t res1 = lseek(bd->disk->fd,
(off_t)block*bd->cfg->erase_size + (off_t)off,
SEEK_SET);
if (res1 < 0) {
int err = -errno;
LFS_EMUBD_TRACE("lfs_emubd_prog -> %d", err);
return err;
}
ssize_t res2 = write(bd->disk->fd, buffer, size);
if (res2 < 0) {
int err = -errno;
LFS_EMUBD_TRACE("lfs_emubd_prog -> %d", err);
return err;
}
}
// track progs
bd->proged += size;
if (bd->cfg->prog_sleep) {
int err = nanosleep(&(struct timespec){
.tv_sec=bd->cfg->prog_sleep/1000000000,
.tv_nsec=bd->cfg->prog_sleep%1000000000},
NULL);
if (err) {
err = -errno;
LFS_EMUBD_TRACE("lfs_emubd_prog -> %d", err);
return err;
}
}
// lose power?
if (bd->power_cycles > 0) {
bd->power_cycles -= 1;
if (bd->power_cycles == 0) {
// simulate power loss
bd->cfg->powerloss_cb(bd->cfg->powerloss_data);
}
}
LFS_EMUBD_TRACE("lfs_emubd_prog -> %d", 0);
return 0;
}
int lfs_emubd_erase(const struct lfs_config *cfg, lfs_block_t block) {
LFS_EMUBD_TRACE("lfs_emubd_erase(%p, 0x%"PRIx32" (%"PRIu32"))",
(void*)cfg, block, ((lfs_emubd_t*)cfg->context)->cfg->erase_size);
lfs_emubd_t *bd = cfg->context;
// check if erase is valid
LFS_ASSERT(block < bd->cfg->erase_count);
// get the block
lfs_emubd_block_t *b = lfs_emubd_mutblock(cfg, &bd->blocks[block]);
if (!b) {
LFS_EMUBD_TRACE("lfs_emubd_prog -> %d", LFS_ERR_NOMEM);
return LFS_ERR_NOMEM;
}
// block bad?
if (bd->cfg->erase_cycles) {
if (b->wear >= bd->cfg->erase_cycles) {
if (bd->cfg->badblock_behavior ==
LFS_EMUBD_BADBLOCK_ERASEERROR) {
LFS_EMUBD_TRACE("lfs_emubd_erase -> %d", LFS_ERR_CORRUPT);
return LFS_ERR_CORRUPT;
} else if (bd->cfg->badblock_behavior ==
LFS_EMUBD_BADBLOCK_ERASENOOP) {
LFS_EMUBD_TRACE("lfs_emubd_erase -> %d", 0);
return 0;
}
} else {
// mark wear
b->wear += 1;
}
}
// emulate an erase value?
if (bd->cfg->erase_value != -1) {
memset(b->data, bd->cfg->erase_value, bd->cfg->erase_size);
// mirror to disk file?
if (bd->disk) {
off_t res1 = lseek(bd->disk->fd,
(off_t)block*bd->cfg->erase_size,
SEEK_SET);
if (res1 < 0) {
int err = -errno;
LFS_EMUBD_TRACE("lfs_emubd_erase -> %d", err);
return err;
}
ssize_t res2 = write(bd->disk->fd,
bd->disk->scratch,
bd->cfg->erase_size);
if (res2 < 0) {
int err = -errno;
LFS_EMUBD_TRACE("lfs_emubd_erase -> %d", err);
return err;
}
}
}
// track erases
bd->erased += bd->cfg->erase_size;
if (bd->cfg->erase_sleep) {
int err = nanosleep(&(struct timespec){
.tv_sec=bd->cfg->erase_sleep/1000000000,
.tv_nsec=bd->cfg->erase_sleep%1000000000},
NULL);
if (err) {
err = -errno;
LFS_EMUBD_TRACE("lfs_emubd_erase -> %d", err);
return err;
}
}
// lose power?
if (bd->power_cycles > 0) {
bd->power_cycles -= 1;
if (bd->power_cycles == 0) {
// simulate power loss
bd->cfg->powerloss_cb(bd->cfg->powerloss_data);
}
}
LFS_EMUBD_TRACE("lfs_emubd_erase -> %d", 0);
return 0;
}
int lfs_emubd_sync(const struct lfs_config *cfg) {
LFS_EMUBD_TRACE("lfs_emubd_sync(%p)", (void*)cfg);
// do nothing
(void)cfg;
LFS_EMUBD_TRACE("lfs_emubd_sync -> %d", 0);
return 0;
}
/// Additional extended API for driving test features ///
static int lfs_emubd_rawcrc(const struct lfs_config *cfg,
lfs_block_t block, uint32_t *crc) {
lfs_emubd_t *bd = cfg->context;
// check if crc is valid
LFS_ASSERT(block < cfg->block_count);
// crc the block
uint32_t crc_ = 0xffffffff;
const lfs_emubd_block_t *b = bd->blocks[block];
if (b) {
crc_ = lfs_crc(crc_, b->data, cfg->block_size);
} else {
uint8_t erase_value = (bd->cfg->erase_value != -1)
? bd->cfg->erase_value
: 0;
for (lfs_size_t i = 0; i < cfg->block_size; i++) {
crc_ = lfs_crc(crc_, &erase_value, 1);
}
}
*crc = 0xffffffff ^ crc_;
return 0;
}
int lfs_emubd_crc(const struct lfs_config *cfg,
lfs_block_t block, uint32_t *crc) {
LFS_EMUBD_TRACE("lfs_emubd_crc(%p, %"PRIu32", %p)",
(void*)cfg, block, crc);
int err = lfs_emubd_rawcrc(cfg, block, crc);
LFS_EMUBD_TRACE("lfs_emubd_crc -> %d", err);
return err;
}
int lfs_emubd_bdcrc(const struct lfs_config *cfg, uint32_t *crc) {
LFS_EMUBD_TRACE("lfs_emubd_bdcrc(%p, %p)", (void*)cfg, crc);
uint32_t crc_ = 0xffffffff;
for (lfs_block_t i = 0; i < cfg->block_count; i++) {
uint32_t i_crc;
int err = lfs_emubd_rawcrc(cfg, i, &i_crc);
if (err) {
LFS_EMUBD_TRACE("lfs_emubd_bdcrc -> %d", err);
return err;
}
crc_ = lfs_crc(crc_, &i_crc, sizeof(uint32_t));
}
*crc = 0xffffffff ^ crc_;
LFS_EMUBD_TRACE("lfs_emubd_bdcrc -> %d", 0);
return 0;
}
lfs_emubd_sio_t lfs_emubd_readed(const struct lfs_config *cfg) {
LFS_EMUBD_TRACE("lfs_emubd_readed(%p)", (void*)cfg);
lfs_emubd_t *bd = cfg->context;
LFS_EMUBD_TRACE("lfs_emubd_readed -> %"PRIu64, bd->readed);
return bd->readed;
}
lfs_emubd_sio_t lfs_emubd_proged(const struct lfs_config *cfg) {
LFS_EMUBD_TRACE("lfs_emubd_proged(%p)", (void*)cfg);
lfs_emubd_t *bd = cfg->context;
LFS_EMUBD_TRACE("lfs_emubd_proged -> %"PRIu64, bd->proged);
return bd->proged;
}
lfs_emubd_sio_t lfs_emubd_erased(const struct lfs_config *cfg) {
LFS_EMUBD_TRACE("lfs_emubd_erased(%p)", (void*)cfg);
lfs_emubd_t *bd = cfg->context;
LFS_EMUBD_TRACE("lfs_emubd_erased -> %"PRIu64, bd->erased);
return bd->erased;
}
int lfs_emubd_setreaded(const struct lfs_config *cfg, lfs_emubd_io_t readed) {
LFS_EMUBD_TRACE("lfs_emubd_setreaded(%p, %"PRIu64")", (void*)cfg, readed);
lfs_emubd_t *bd = cfg->context;
bd->readed = readed;
LFS_EMUBD_TRACE("lfs_emubd_setreaded -> %d", 0);
return 0;
}
int lfs_emubd_setproged(const struct lfs_config *cfg, lfs_emubd_io_t proged) {
LFS_EMUBD_TRACE("lfs_emubd_setproged(%p, %"PRIu64")", (void*)cfg, proged);
lfs_emubd_t *bd = cfg->context;
bd->proged = proged;
LFS_EMUBD_TRACE("lfs_emubd_setproged -> %d", 0);
return 0;
}
int lfs_emubd_seterased(const struct lfs_config *cfg, lfs_emubd_io_t erased) {
LFS_EMUBD_TRACE("lfs_emubd_seterased(%p, %"PRIu64")", (void*)cfg, erased);
lfs_emubd_t *bd = cfg->context;
bd->erased = erased;
LFS_EMUBD_TRACE("lfs_emubd_seterased -> %d", 0);
return 0;
}
lfs_emubd_swear_t lfs_emubd_wear(const struct lfs_config *cfg,
lfs_block_t block) {
LFS_EMUBD_TRACE("lfs_emubd_wear(%p, %"PRIu32")", (void*)cfg, block);
lfs_emubd_t *bd = cfg->context;
// check if block is valid
LFS_ASSERT(block < bd->cfg->erase_count);
// get the wear
lfs_emubd_wear_t wear;
const lfs_emubd_block_t *b = bd->blocks[block];
if (b) {
wear = b->wear;
} else {
wear = 0;
}
LFS_EMUBD_TRACE("lfs_emubd_wear -> %"PRIi32, wear);
return wear;
}
int lfs_emubd_setwear(const struct lfs_config *cfg,
lfs_block_t block, lfs_emubd_wear_t wear) {
LFS_EMUBD_TRACE("lfs_emubd_setwear(%p, %"PRIu32", %"PRIi32")",
(void*)cfg, block, wear);
lfs_emubd_t *bd = cfg->context;
// check if block is valid
LFS_ASSERT(block < bd->cfg->erase_count);
// set the wear
lfs_emubd_block_t *b = lfs_emubd_mutblock(cfg, &bd->blocks[block]);
if (!b) {
LFS_EMUBD_TRACE("lfs_emubd_setwear -> %d", LFS_ERR_NOMEM);
return LFS_ERR_NOMEM;
}
b->wear = wear;
LFS_EMUBD_TRACE("lfs_emubd_setwear -> %d", 0);
return 0;
}
lfs_emubd_spowercycles_t lfs_emubd_powercycles(
const struct lfs_config *cfg) {
LFS_EMUBD_TRACE("lfs_emubd_powercycles(%p)", (void*)cfg);
lfs_emubd_t *bd = cfg->context;
LFS_EMUBD_TRACE("lfs_emubd_powercycles -> %"PRIi32, bd->power_cycles);
return bd->power_cycles;
}
int lfs_emubd_setpowercycles(const struct lfs_config *cfg,
lfs_emubd_powercycles_t power_cycles) {
LFS_EMUBD_TRACE("lfs_emubd_setpowercycles(%p, %"PRIi32")",
(void*)cfg, power_cycles);
lfs_emubd_t *bd = cfg->context;
bd->power_cycles = power_cycles;
LFS_EMUBD_TRACE("lfs_emubd_powercycles -> %d", 0);
return 0;
}
int lfs_emubd_copy(const struct lfs_config *cfg, lfs_emubd_t *copy) {
LFS_EMUBD_TRACE("lfs_emubd_copy(%p, %p)", (void*)cfg, (void*)copy);
lfs_emubd_t *bd = cfg->context;
// lazily copy over our block array
copy->blocks = malloc(bd->cfg->erase_count * sizeof(lfs_emubd_block_t*));
if (!copy->blocks) {
LFS_EMUBD_TRACE("lfs_emubd_copy -> %d", LFS_ERR_NOMEM);
return LFS_ERR_NOMEM;
}
for (size_t i = 0; i < bd->cfg->erase_count; i++) {
copy->blocks[i] = lfs_emubd_incblock(bd->blocks[i]);
}
// other state
copy->readed = bd->readed;
copy->proged = bd->proged;
copy->erased = bd->erased;
copy->power_cycles = bd->power_cycles;
copy->disk = bd->disk;
if (copy->disk) {
copy->disk->rc += 1;
}
copy->cfg = bd->cfg;
LFS_EMUBD_TRACE("lfs_emubd_copy -> %d", 0);
return 0;
}

241
bd/lfs_emubd.h Normal file
View File

@ -0,0 +1,241 @@
/*
* Emulating block device, wraps filebd and rambd while providing a bunch
* of hooks for testing littlefs in various conditions.
*
* Copyright (c) 2022, The littlefs authors.
* Copyright (c) 2017, Arm Limited. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef LFS_EMUBD_H
#define LFS_EMUBD_H
#include "lfs.h"
#include "lfs_util.h"
#include "bd/lfs_rambd.h"
#include "bd/lfs_filebd.h"
#ifdef __cplusplus
extern "C"
{
#endif
// Block device specific tracing
#ifndef LFS_EMUBD_TRACE
#ifdef LFS_EMUBD_YES_TRACE
#define LFS_EMUBD_TRACE(...) LFS_TRACE(__VA_ARGS__)
#else
#define LFS_EMUBD_TRACE(...)
#endif
#endif
// Mode determining how "bad-blocks" behave during testing. This simulates
// some real-world circumstances such as progs not sticking (prog-noop),
// a readonly disk (erase-noop), and ECC failures (read-error).
//
// Not that read-noop is not allowed. Read _must_ return a consistent (but
// may be arbitrary) value on every read.
typedef enum lfs_emubd_badblock_behavior {
LFS_EMUBD_BADBLOCK_PROGERROR,
LFS_EMUBD_BADBLOCK_ERASEERROR,
LFS_EMUBD_BADBLOCK_READERROR,
LFS_EMUBD_BADBLOCK_PROGNOOP,
LFS_EMUBD_BADBLOCK_ERASENOOP,
} lfs_emubd_badblock_behavior_t;
// Mode determining how power-loss behaves during testing. For now this
// only supports a noop behavior, leaving the data on-disk untouched.
typedef enum lfs_emubd_powerloss_behavior {
LFS_EMUBD_POWERLOSS_NOOP,
} lfs_emubd_powerloss_behavior_t;
// Type for measuring read/program/erase operations
typedef uint64_t lfs_emubd_io_t;
typedef int64_t lfs_emubd_sio_t;
// Type for measuring wear
typedef uint32_t lfs_emubd_wear_t;
typedef int32_t lfs_emubd_swear_t;
// Type for tracking power-cycles
typedef uint32_t lfs_emubd_powercycles_t;
typedef int32_t lfs_emubd_spowercycles_t;
// Type for delays in nanoseconds
typedef uint64_t lfs_emubd_sleep_t;
typedef int64_t lfs_emubd_ssleep_t;
// emubd config, this is required for testing
struct lfs_emubd_config {
// Minimum size of a read operation in bytes.
lfs_size_t read_size;
// Minimum size of a program operation in bytes.
lfs_size_t prog_size;
// Size of an erase operation in bytes.
lfs_size_t erase_size;
// Number of erase blocks on the device.
lfs_size_t erase_count;
// 8-bit erase value to use for simulating erases. -1 does not simulate
// erases, which can speed up testing by avoiding the extra block-device
// operations to store the erase value.
int32_t erase_value;
// Number of erase cycles before a block becomes "bad". The exact behavior
// of bad blocks is controlled by badblock_behavior.
uint32_t erase_cycles;
// The mode determining how bad-blocks fail
lfs_emubd_badblock_behavior_t badblock_behavior;
// Number of write operations (erase/prog) before triggering a power-loss.
// power_cycles=0 disables this. The exact behavior of power-loss is
// controlled by a combination of powerloss_behavior and powerloss_cb.
lfs_emubd_powercycles_t power_cycles;
// The mode determining how power-loss affects disk
lfs_emubd_powerloss_behavior_t powerloss_behavior;
// Function to call to emulate power-loss. The exact behavior of power-loss
// is up to the runner to provide.
void (*powerloss_cb)(void*);
// Data for power-loss callback
void *powerloss_data;
// True to track when power-loss could have occured. Note this involves
// heavy memory usage!
bool track_branches;
// Path to file to use as a mirror of the disk. This provides a way to view
// the current state of the block device.
const char *disk_path;
// Artificial delay in nanoseconds, there is no purpose for this other
// than slowing down the simulation.
lfs_emubd_sleep_t read_sleep;
// Artificial delay in nanoseconds, there is no purpose for this other
// than slowing down the simulation.
lfs_emubd_sleep_t prog_sleep;
// Artificial delay in nanoseconds, there is no purpose for this other
// than slowing down the simulation.
lfs_emubd_sleep_t erase_sleep;
};
// A reference counted block
typedef struct lfs_emubd_block {
uint32_t rc;
lfs_emubd_wear_t wear;
uint8_t data[];
} lfs_emubd_block_t;
// Disk mirror
typedef struct lfs_emubd_disk {
uint32_t rc;
int fd;
uint8_t *scratch;
} lfs_emubd_disk_t;
// emubd state
typedef struct lfs_emubd {
// array of copy-on-write blocks
lfs_emubd_block_t **blocks;
// some other test state
lfs_emubd_io_t readed;
lfs_emubd_io_t proged;
lfs_emubd_io_t erased;
lfs_emubd_powercycles_t power_cycles;
lfs_emubd_disk_t *disk;
const struct lfs_emubd_config *cfg;
} lfs_emubd_t;
/// Block device API ///
// Create an emulating block device using the geometry in lfs_config
int lfs_emubd_create(const struct lfs_config *cfg,
const struct lfs_emubd_config *bdcfg);
// Clean up memory associated with block device
int lfs_emubd_destroy(const struct lfs_config *cfg);
// Read a block
int lfs_emubd_read(const struct lfs_config *cfg, lfs_block_t block,
lfs_off_t off, void *buffer, lfs_size_t size);
// Program a block
//
// The block must have previously been erased.
int lfs_emubd_prog(const struct lfs_config *cfg, lfs_block_t block,
lfs_off_t off, const void *buffer, lfs_size_t size);
// Erase a block
//
// A block must be erased before being programmed. The
// state of an erased block is undefined.
int lfs_emubd_erase(const struct lfs_config *cfg, lfs_block_t block);
// Sync the block device
int lfs_emubd_sync(const struct lfs_config *cfg);
/// Additional extended API for driving test features ///
// A CRC of a block for debugging purposes
int lfs_emubd_crc(const struct lfs_config *cfg,
lfs_block_t block, uint32_t *crc);
// A CRC of the entire block device for debugging purposes
int lfs_emubd_bdcrc(const struct lfs_config *cfg, uint32_t *crc);
// Get total amount of bytes read
lfs_emubd_sio_t lfs_emubd_readed(const struct lfs_config *cfg);
// Get total amount of bytes programmed
lfs_emubd_sio_t lfs_emubd_proged(const struct lfs_config *cfg);
// Get total amount of bytes erased
lfs_emubd_sio_t lfs_emubd_erased(const struct lfs_config *cfg);
// Manually set amount of bytes read
int lfs_emubd_setreaded(const struct lfs_config *cfg, lfs_emubd_io_t readed);
// Manually set amount of bytes programmed
int lfs_emubd_setproged(const struct lfs_config *cfg, lfs_emubd_io_t proged);
// Manually set amount of bytes erased
int lfs_emubd_seterased(const struct lfs_config *cfg, lfs_emubd_io_t erased);
// Get simulated wear on a given block
lfs_emubd_swear_t lfs_emubd_wear(const struct lfs_config *cfg,
lfs_block_t block);
// Manually set simulated wear on a given block
int lfs_emubd_setwear(const struct lfs_config *cfg,
lfs_block_t block, lfs_emubd_wear_t wear);
// Get the remaining power-cycles
lfs_emubd_spowercycles_t lfs_emubd_powercycles(
const struct lfs_config *cfg);
// Manually set the remaining power-cycles
int lfs_emubd_setpowercycles(const struct lfs_config *cfg,
lfs_emubd_powercycles_t power_cycles);
// Create a copy-on-write copy of the state of this block device
int lfs_emubd_copy(const struct lfs_config *cfg, lfs_emubd_t *copy);
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif

View File

@ -15,19 +15,20 @@
#include <windows.h>
#endif
int lfs_filebd_createcfg(const struct lfs_config *cfg, const char *path,
int lfs_filebd_create(const struct lfs_config *cfg, const char *path,
const struct lfs_filebd_config *bdcfg) {
LFS_FILEBD_TRACE("lfs_filebd_createcfg(%p {.context=%p, "
".read=%p, .prog=%p, .erase=%p, .sync=%p, "
".read_size=%"PRIu32", .prog_size=%"PRIu32", "
".block_size=%"PRIu32", .block_count=%"PRIu32"}, "
LFS_FILEBD_TRACE("lfs_filebd_create(%p {.context=%p, "
".read=%p, .prog=%p, .erase=%p, .sync=%p}, "
"\"%s\", "
"%p {.erase_value=%"PRId32"})",
"%p {.read_size=%"PRIu32", .prog_size=%"PRIu32", "
".erase_size=%"PRIu32", .erase_count=%"PRIu32"})",
(void*)cfg, cfg->context,
(void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
(void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
path, (void*)bdcfg, bdcfg->erase_value);
path,
(void*)bdcfg,
bdcfg->read_size, bdcfg->prog_size, bdcfg->erase_size,
bdcfg->erase_count);
lfs_filebd_t *bd = cfg->context;
bd->cfg = bdcfg;
@ -40,31 +41,14 @@ int lfs_filebd_createcfg(const struct lfs_config *cfg, const char *path,
if (bd->fd < 0) {
int err = -errno;
LFS_FILEBD_TRACE("lfs_filebd_createcfg -> %d", err);
LFS_FILEBD_TRACE("lfs_filebd_create -> %d", err);
return err;
}
LFS_FILEBD_TRACE("lfs_filebd_createcfg -> %d", 0);
LFS_FILEBD_TRACE("lfs_filebd_create -> %d", 0);
return 0;
}
int lfs_filebd_create(const struct lfs_config *cfg, const char *path) {
LFS_FILEBD_TRACE("lfs_filebd_create(%p {.context=%p, "
".read=%p, .prog=%p, .erase=%p, .sync=%p, "
".read_size=%"PRIu32", .prog_size=%"PRIu32", "
".block_size=%"PRIu32", .block_count=%"PRIu32"}, "
"\"%s\")",
(void*)cfg, cfg->context,
(void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
(void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
path);
static const struct lfs_filebd_config defaults = {.erase_value=-1};
int err = lfs_filebd_createcfg(cfg, path, &defaults);
LFS_FILEBD_TRACE("lfs_filebd_create -> %d", err);
return err;
}
int lfs_filebd_destroy(const struct lfs_config *cfg) {
LFS_FILEBD_TRACE("lfs_filebd_destroy(%p)", (void*)cfg);
lfs_filebd_t *bd = cfg->context;
@ -86,18 +70,17 @@ int lfs_filebd_read(const struct lfs_config *cfg, lfs_block_t block,
lfs_filebd_t *bd = cfg->context;
// check if read is valid
LFS_ASSERT(off % cfg->read_size == 0);
LFS_ASSERT(size % cfg->read_size == 0);
LFS_ASSERT(block < cfg->block_count);
LFS_ASSERT(block < bd->cfg->erase_count);
LFS_ASSERT(off % bd->cfg->read_size == 0);
LFS_ASSERT(size % bd->cfg->read_size == 0);
LFS_ASSERT(off+size <= bd->cfg->erase_size);
// zero for reproducibility (in case file is truncated)
if (bd->cfg->erase_value != -1) {
memset(buffer, bd->cfg->erase_value, size);
}
memset(buffer, 0, size);
// read
off_t res1 = lseek(bd->fd,
(off_t)block*cfg->block_size + (off_t)off, SEEK_SET);
(off_t)block*bd->cfg->erase_size + (off_t)off, SEEK_SET);
if (res1 < 0) {
int err = -errno;
LFS_FILEBD_TRACE("lfs_filebd_read -> %d", err);
@ -117,41 +100,20 @@ int lfs_filebd_read(const struct lfs_config *cfg, lfs_block_t block,
int lfs_filebd_prog(const struct lfs_config *cfg, lfs_block_t block,
lfs_off_t off, const void *buffer, lfs_size_t size) {
LFS_FILEBD_TRACE("lfs_filebd_prog(%p, 0x%"PRIx32", %"PRIu32", %p, %"PRIu32")",
LFS_FILEBD_TRACE("lfs_filebd_prog(%p, "
"0x%"PRIx32", %"PRIu32", %p, %"PRIu32")",
(void*)cfg, block, off, buffer, size);
lfs_filebd_t *bd = cfg->context;
// check if write is valid
LFS_ASSERT(off % cfg->prog_size == 0);
LFS_ASSERT(size % cfg->prog_size == 0);
LFS_ASSERT(block < cfg->block_count);
// check that data was erased? only needed for testing
if (bd->cfg->erase_value != -1) {
off_t res1 = lseek(bd->fd,
(off_t)block*cfg->block_size + (off_t)off, SEEK_SET);
if (res1 < 0) {
int err = -errno;
LFS_FILEBD_TRACE("lfs_filebd_prog -> %d", err);
return err;
}
for (lfs_off_t i = 0; i < size; i++) {
uint8_t c;
ssize_t res2 = read(bd->fd, &c, 1);
if (res2 < 0) {
int err = -errno;
LFS_FILEBD_TRACE("lfs_filebd_prog -> %d", err);
return err;
}
LFS_ASSERT(c == bd->cfg->erase_value);
}
}
LFS_ASSERT(block < bd->cfg->erase_count);
LFS_ASSERT(off % bd->cfg->prog_size == 0);
LFS_ASSERT(size % bd->cfg->prog_size == 0);
LFS_ASSERT(off+size <= bd->cfg->erase_size);
// program data
off_t res1 = lseek(bd->fd,
(off_t)block*cfg->block_size + (off_t)off, SEEK_SET);
(off_t)block*bd->cfg->erase_size + (off_t)off, SEEK_SET);
if (res1 < 0) {
int err = -errno;
LFS_FILEBD_TRACE("lfs_filebd_prog -> %d", err);
@ -170,30 +132,15 @@ int lfs_filebd_prog(const struct lfs_config *cfg, lfs_block_t block,
}
int lfs_filebd_erase(const struct lfs_config *cfg, lfs_block_t block) {
LFS_FILEBD_TRACE("lfs_filebd_erase(%p, 0x%"PRIx32")", (void*)cfg, block);
LFS_FILEBD_TRACE("lfs_filebd_erase(%p, 0x%"PRIx32" (%"PRIu32"))",
(void*)cfg, block, ((lfs_file_t*)cfg->context)->cfg->erase_size);
lfs_filebd_t *bd = cfg->context;
// check if erase is valid
LFS_ASSERT(block < cfg->block_count);
LFS_ASSERT(block < bd->cfg->erase_count);
// erase, only needed for testing
if (bd->cfg->erase_value != -1) {
off_t res1 = lseek(bd->fd, (off_t)block*cfg->block_size, SEEK_SET);
if (res1 < 0) {
int err = -errno;
LFS_FILEBD_TRACE("lfs_filebd_erase -> %d", err);
return err;
}
for (lfs_off_t i = 0; i < cfg->block_size; i++) {
ssize_t res2 = write(bd->fd, &(uint8_t){bd->cfg->erase_value}, 1);
if (res2 < 0) {
int err = -errno;
LFS_FILEBD_TRACE("lfs_filebd_erase -> %d", err);
return err;
}
}
}
// erase is a noop
(void)block;
LFS_FILEBD_TRACE("lfs_filebd_erase -> %d", 0);
return 0;
@ -201,10 +148,11 @@ int lfs_filebd_erase(const struct lfs_config *cfg, lfs_block_t block) {
int lfs_filebd_sync(const struct lfs_config *cfg) {
LFS_FILEBD_TRACE("lfs_filebd_sync(%p)", (void*)cfg);
// file sync
lfs_filebd_t *bd = cfg->context;
#ifdef _WIN32
int err = FlushFileBuffers((HANDLE) _get_osfhandle(fd)) ? 0 : -1;
int err = FlushFileBuffers((HANDLE) _get_osfhandle(bd->fd)) ? 0 : -1;
#else
int err = fsync(bd->fd);
#endif

View File

@ -18,18 +18,27 @@ extern "C"
// Block device specific tracing
#ifndef LFS_FILEBD_TRACE
#ifdef LFS_FILEBD_YES_TRACE
#define LFS_FILEBD_TRACE(...) LFS_TRACE(__VA_ARGS__)
#else
#define LFS_FILEBD_TRACE(...)
#endif
#endif
// filebd config (optional)
// filebd config
struct lfs_filebd_config {
// 8-bit erase value to use for simulating erases. -1 does not simulate
// erases, which can speed up testing by avoiding all the extra block-device
// operations to store the erase value.
int32_t erase_value;
// Minimum size of a read operation in bytes.
lfs_size_t read_size;
// Minimum size of a program operation in bytes.
lfs_size_t prog_size;
// Size of an erase operation in bytes.
lfs_size_t erase_size;
// Number of erase blocks on the device.
lfs_size_t erase_count;
};
// filebd state
@ -39,9 +48,8 @@ typedef struct lfs_filebd {
} lfs_filebd_t;
// Create a file block device using the geometry in lfs_config
int lfs_filebd_create(const struct lfs_config *cfg, const char *path);
int lfs_filebd_createcfg(const struct lfs_config *cfg, const char *path,
// Create a file block device
int lfs_filebd_create(const struct lfs_config *cfg, const char *path,
const struct lfs_filebd_config *bdcfg);
// Clean up memory associated with block device

View File

@ -7,18 +7,19 @@
*/
#include "bd/lfs_rambd.h"
int lfs_rambd_createcfg(const struct lfs_config *cfg,
int lfs_rambd_create(const struct lfs_config *cfg,
const struct lfs_rambd_config *bdcfg) {
LFS_RAMBD_TRACE("lfs_rambd_createcfg(%p {.context=%p, "
".read=%p, .prog=%p, .erase=%p, .sync=%p, "
".read_size=%"PRIu32", .prog_size=%"PRIu32", "
".block_size=%"PRIu32", .block_count=%"PRIu32"}, "
"%p {.erase_value=%"PRId32", .buffer=%p})",
LFS_RAMBD_TRACE("lfs_rambd_create(%p {.context=%p, "
".read=%p, .prog=%p, .erase=%p, .sync=%p}, "
"%p {.read_size=%"PRIu32", .prog_size=%"PRIu32", "
".erase_size=%"PRIu32", .erase_count=%"PRIu32", "
".buffer=%p})",
(void*)cfg, cfg->context,
(void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
(void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
(void*)bdcfg, bdcfg->erase_value, bdcfg->buffer);
(void*)bdcfg,
bdcfg->read_size, bdcfg->prog_size, bdcfg->erase_size,
bdcfg->erase_count, bdcfg->buffer);
lfs_rambd_t *bd = cfg->context;
bd->cfg = bdcfg;
@ -26,40 +27,20 @@ int lfs_rambd_createcfg(const struct lfs_config *cfg,
if (bd->cfg->buffer) {
bd->buffer = bd->cfg->buffer;
} else {
bd->buffer = lfs_malloc(cfg->block_size * cfg->block_count);
bd->buffer = lfs_malloc(bd->cfg->erase_size * bd->cfg->erase_count);
if (!bd->buffer) {
LFS_RAMBD_TRACE("lfs_rambd_createcfg -> %d", LFS_ERR_NOMEM);
LFS_RAMBD_TRACE("lfs_rambd_create -> %d", LFS_ERR_NOMEM);
return LFS_ERR_NOMEM;
}
}
// zero for reproducibility?
if (bd->cfg->erase_value != -1) {
memset(bd->buffer, bd->cfg->erase_value,
cfg->block_size * cfg->block_count);
} else {
memset(bd->buffer, 0, cfg->block_size * cfg->block_count);
}
// zero for reproducibility
memset(bd->buffer, 0, bd->cfg->erase_size * bd->cfg->erase_count);
LFS_RAMBD_TRACE("lfs_rambd_createcfg -> %d", 0);
LFS_RAMBD_TRACE("lfs_rambd_create -> %d", 0);
return 0;
}
int lfs_rambd_create(const struct lfs_config *cfg) {
LFS_RAMBD_TRACE("lfs_rambd_create(%p {.context=%p, "
".read=%p, .prog=%p, .erase=%p, .sync=%p, "
".read_size=%"PRIu32", .prog_size=%"PRIu32", "
".block_size=%"PRIu32", .block_count=%"PRIu32"})",
(void*)cfg, cfg->context,
(void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
(void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count);
static const struct lfs_rambd_config defaults = {.erase_value=-1};
int err = lfs_rambd_createcfg(cfg, &defaults);
LFS_RAMBD_TRACE("lfs_rambd_create -> %d", err);
return err;
}
int lfs_rambd_destroy(const struct lfs_config *cfg) {
LFS_RAMBD_TRACE("lfs_rambd_destroy(%p)", (void*)cfg);
// clean up memory
@ -79,12 +60,13 @@ int lfs_rambd_read(const struct lfs_config *cfg, lfs_block_t block,
lfs_rambd_t *bd = cfg->context;
// check if read is valid
LFS_ASSERT(off % cfg->read_size == 0);
LFS_ASSERT(size % cfg->read_size == 0);
LFS_ASSERT(block < cfg->block_count);
LFS_ASSERT(block < bd->cfg->erase_count);
LFS_ASSERT(off % bd->cfg->read_size == 0);
LFS_ASSERT(size % bd->cfg->read_size == 0);
LFS_ASSERT(off+size <= bd->cfg->erase_size);
// read data
memcpy(buffer, &bd->buffer[block*cfg->block_size + off], size);
memcpy(buffer, &bd->buffer[block*bd->cfg->erase_size + off], size);
LFS_RAMBD_TRACE("lfs_rambd_read -> %d", 0);
return 0;
@ -98,37 +80,28 @@ int lfs_rambd_prog(const struct lfs_config *cfg, lfs_block_t block,
lfs_rambd_t *bd = cfg->context;
// check if write is valid
LFS_ASSERT(off % cfg->prog_size == 0);
LFS_ASSERT(size % cfg->prog_size == 0);
LFS_ASSERT(block < cfg->block_count);
// check that data was erased? only needed for testing
if (bd->cfg->erase_value != -1) {
for (lfs_off_t i = 0; i < size; i++) {
LFS_ASSERT(bd->buffer[block*cfg->block_size + off + i] ==
bd->cfg->erase_value);
}
}
LFS_ASSERT(block < bd->cfg->erase_count);
LFS_ASSERT(off % bd->cfg->prog_size == 0);
LFS_ASSERT(size % bd->cfg->prog_size == 0);
LFS_ASSERT(off+size <= bd->cfg->erase_size);
// program data
memcpy(&bd->buffer[block*cfg->block_size + off], buffer, size);
memcpy(&bd->buffer[block*bd->cfg->erase_size + off], buffer, size);
LFS_RAMBD_TRACE("lfs_rambd_prog -> %d", 0);
return 0;
}
int lfs_rambd_erase(const struct lfs_config *cfg, lfs_block_t block) {
LFS_RAMBD_TRACE("lfs_rambd_erase(%p, 0x%"PRIx32")", (void*)cfg, block);
LFS_RAMBD_TRACE("lfs_rambd_erase(%p, 0x%"PRIx32" (%"PRIu32"))",
(void*)cfg, block, ((lfs_rambd_t*)cfg->context)->cfg->erase_size);
lfs_rambd_t *bd = cfg->context;
// check if erase is valid
LFS_ASSERT(block < cfg->block_count);
LFS_ASSERT(block < bd->cfg->erase_count);
// erase, only needed for testing
if (bd->cfg->erase_value != -1) {
memset(&bd->buffer[block*cfg->block_size],
bd->cfg->erase_value, cfg->block_size);
}
// erase is a noop
(void)block;
LFS_RAMBD_TRACE("lfs_rambd_erase -> %d", 0);
return 0;
@ -136,8 +109,10 @@ int lfs_rambd_erase(const struct lfs_config *cfg, lfs_block_t block) {
int lfs_rambd_sync(const struct lfs_config *cfg) {
LFS_RAMBD_TRACE("lfs_rambd_sync(%p)", (void*)cfg);
// sync does nothing because we aren't backed by anything real
// sync is a noop
(void)cfg;
LFS_RAMBD_TRACE("lfs_rambd_sync -> %d", 0);
return 0;
}

View File

@ -18,17 +18,27 @@ extern "C"
// Block device specific tracing
#ifndef LFS_RAMBD_TRACE
#ifdef LFS_RAMBD_YES_TRACE
#define LFS_RAMBD_TRACE(...) LFS_TRACE(__VA_ARGS__)
#else
#define LFS_RAMBD_TRACE(...)
#endif
#endif
// rambd config (optional)
// rambd config
struct lfs_rambd_config {
// 8-bit erase value to simulate erasing with. -1 indicates no erase
// occurs, which is still a valid block device
int32_t erase_value;
// Minimum size of a read operation in bytes.
lfs_size_t read_size;
// Minimum size of a program operation in bytes.
lfs_size_t prog_size;
// Size of an erase operation in bytes.
lfs_size_t erase_size;
// Number of erase blocks on the device.
lfs_size_t erase_count;
// Optional statically allocated buffer for the block device.
void *buffer;
@ -41,9 +51,8 @@ typedef struct lfs_rambd {
} lfs_rambd_t;
// Create a RAM block device using the geometry in lfs_config
int lfs_rambd_create(const struct lfs_config *cfg);
int lfs_rambd_createcfg(const struct lfs_config *cfg,
// Create a RAM block device
int lfs_rambd_create(const struct lfs_config *cfg,
const struct lfs_rambd_config *bdcfg);
// Clean up memory associated with block device

270
benches/bench_dir.toml Normal file
View File

@ -0,0 +1,270 @@
[cases.bench_dir_open]
# 0 = in-order
# 1 = reversed-order
# 2 = random-order
defines.ORDER = [0, 1, 2]
defines.N = 1024
defines.FILE_SIZE = 8
defines.CHUNK_SIZE = 8
code = '''
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
// first create the files
char name[256];
uint8_t buffer[CHUNK_SIZE];
for (lfs_size_t i = 0; i < N; i++) {
sprintf(name, "file%08x", i);
lfs_file_t file;
lfs_file_open(&lfs, &file, name,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
uint32_t file_prng = i;
for (lfs_size_t j = 0; j < FILE_SIZE; j += CHUNK_SIZE) {
for (lfs_size_t k = 0; k < CHUNK_SIZE; k++) {
buffer[k] = BENCH_PRNG(&file_prng);
}
lfs_file_write(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE;
}
lfs_file_close(&lfs, &file) => 0;
}
// then read the files
BENCH_START();
uint32_t prng = 42;
for (lfs_size_t i = 0; i < N; i++) {
lfs_off_t i_
= (ORDER == 0) ? i
: (ORDER == 1) ? (N-1-i)
: BENCH_PRNG(&prng) % N;
sprintf(name, "file%08x", i_);
lfs_file_t file;
lfs_file_open(&lfs, &file, name, LFS_O_RDONLY) => 0;
uint32_t file_prng = i_;
for (lfs_size_t j = 0; j < FILE_SIZE; j += CHUNK_SIZE) {
lfs_file_read(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE;
for (lfs_size_t k = 0; k < CHUNK_SIZE; k++) {
assert(buffer[k] == BENCH_PRNG(&file_prng));
}
}
lfs_file_close(&lfs, &file) => 0;
}
BENCH_STOP();
lfs_unmount(&lfs) => 0;
'''
[cases.bench_dir_creat]
# 0 = in-order
# 1 = reversed-order
# 2 = random-order
defines.ORDER = [0, 1, 2]
defines.N = 1024
defines.FILE_SIZE = 8
defines.CHUNK_SIZE = 8
code = '''
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
BENCH_START();
uint32_t prng = 42;
char name[256];
uint8_t buffer[CHUNK_SIZE];
for (lfs_size_t i = 0; i < N; i++) {
lfs_off_t i_
= (ORDER == 0) ? i
: (ORDER == 1) ? (N-1-i)
: BENCH_PRNG(&prng) % N;
sprintf(name, "file%08x", i_);
lfs_file_t file;
lfs_file_open(&lfs, &file, name,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
uint32_t file_prng = i_;
for (lfs_size_t j = 0; j < FILE_SIZE; j += CHUNK_SIZE) {
for (lfs_size_t k = 0; k < CHUNK_SIZE; k++) {
buffer[k] = BENCH_PRNG(&file_prng);
}
lfs_file_write(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE;
}
lfs_file_close(&lfs, &file) => 0;
}
BENCH_STOP();
lfs_unmount(&lfs) => 0;
'''
[cases.bench_dir_remove]
# 0 = in-order
# 1 = reversed-order
# 2 = random-order
defines.ORDER = [0, 1, 2]
defines.N = 1024
defines.FILE_SIZE = 8
defines.CHUNK_SIZE = 8
code = '''
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
// first create the files
char name[256];
uint8_t buffer[CHUNK_SIZE];
for (lfs_size_t i = 0; i < N; i++) {
sprintf(name, "file%08x", i);
lfs_file_t file;
lfs_file_open(&lfs, &file, name,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
uint32_t file_prng = i;
for (lfs_size_t j = 0; j < FILE_SIZE; j += CHUNK_SIZE) {
for (lfs_size_t k = 0; k < CHUNK_SIZE; k++) {
buffer[k] = BENCH_PRNG(&file_prng);
}
lfs_file_write(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE;
}
lfs_file_close(&lfs, &file) => 0;
}
// then remove the files
BENCH_START();
uint32_t prng = 42;
for (lfs_size_t i = 0; i < N; i++) {
lfs_off_t i_
= (ORDER == 0) ? i
: (ORDER == 1) ? (N-1-i)
: BENCH_PRNG(&prng) % N;
sprintf(name, "file%08x", i_);
int err = lfs_remove(&lfs, name);
assert(!err || err == LFS_ERR_NOENT);
}
BENCH_STOP();
lfs_unmount(&lfs) => 0;
'''
[cases.bench_dir_read]
defines.N = 1024
defines.FILE_SIZE = 8
defines.CHUNK_SIZE = 8
code = '''
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
// first create the files
char name[256];
uint8_t buffer[CHUNK_SIZE];
for (lfs_size_t i = 0; i < N; i++) {
sprintf(name, "file%08x", i);
lfs_file_t file;
lfs_file_open(&lfs, &file, name,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
uint32_t file_prng = i;
for (lfs_size_t j = 0; j < FILE_SIZE; j += CHUNK_SIZE) {
for (lfs_size_t k = 0; k < CHUNK_SIZE; k++) {
buffer[k] = BENCH_PRNG(&file_prng);
}
lfs_file_write(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE;
}
lfs_file_close(&lfs, &file) => 0;
}
// then read the directory
BENCH_START();
lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "/") => 0;
struct lfs_info info;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
for (int i = 0; i < N; i++) {
sprintf(name, "file%08x", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_REG);
assert(strcmp(info.name, name) == 0);
}
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
BENCH_STOP();
lfs_unmount(&lfs) => 0;
'''
[cases.bench_dir_mkdir]
# 0 = in-order
# 1 = reversed-order
# 2 = random-order
defines.ORDER = [0, 1, 2]
defines.N = 8
code = '''
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
BENCH_START();
uint32_t prng = 42;
char name[256];
for (lfs_size_t i = 0; i < N; i++) {
lfs_off_t i_
= (ORDER == 0) ? i
: (ORDER == 1) ? (N-1-i)
: BENCH_PRNG(&prng) % N;
printf("hm %d\n", i);
sprintf(name, "dir%08x", i_);
int err = lfs_mkdir(&lfs, name);
assert(!err || err == LFS_ERR_EXIST);
}
BENCH_STOP();
lfs_unmount(&lfs) => 0;
'''
[cases.bench_dir_rmdir]
# 0 = in-order
# 1 = reversed-order
# 2 = random-order
defines.ORDER = [0, 1, 2]
defines.N = 8
code = '''
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
// first create the dirs
char name[256];
for (lfs_size_t i = 0; i < N; i++) {
sprintf(name, "dir%08x", i);
lfs_mkdir(&lfs, name) => 0;
}
// then remove the dirs
BENCH_START();
uint32_t prng = 42;
for (lfs_size_t i = 0; i < N; i++) {
lfs_off_t i_
= (ORDER == 0) ? i
: (ORDER == 1) ? (N-1-i)
: BENCH_PRNG(&prng) % N;
sprintf(name, "dir%08x", i_);
int err = lfs_remove(&lfs, name);
assert(!err || err == LFS_ERR_NOENT);
}
BENCH_STOP();
lfs_unmount(&lfs) => 0;
'''

95
benches/bench_file.toml Normal file
View File

@ -0,0 +1,95 @@
[cases.bench_file_read]
# 0 = in-order
# 1 = reversed-order
# 2 = random-order
defines.ORDER = [0, 1, 2]
defines.SIZE = '128*1024'
defines.CHUNK_SIZE = 64
code = '''
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_size_t chunks = (SIZE+CHUNK_SIZE-1)/CHUNK_SIZE;
// first write the file
lfs_file_t file;
uint8_t buffer[CHUNK_SIZE];
lfs_file_open(&lfs, &file, "file",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
for (lfs_size_t i = 0; i < chunks; i++) {
uint32_t chunk_prng = i;
for (lfs_size_t j = 0; j < CHUNK_SIZE; j++) {
buffer[j] = BENCH_PRNG(&chunk_prng);
}
lfs_file_write(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE;
}
lfs_file_write(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE;
lfs_file_close(&lfs, &file) => 0;
// then read the file
BENCH_START();
lfs_file_open(&lfs, &file, "file", LFS_O_RDONLY) => 0;
uint32_t prng = 42;
for (lfs_size_t i = 0; i < chunks; i++) {
lfs_off_t i_
= (ORDER == 0) ? i
: (ORDER == 1) ? (chunks-1-i)
: BENCH_PRNG(&prng) % chunks;
lfs_file_seek(&lfs, &file, i_*CHUNK_SIZE, LFS_SEEK_SET)
=> i_*CHUNK_SIZE;
lfs_file_read(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE;
uint32_t chunk_prng = i_;
for (lfs_size_t j = 0; j < CHUNK_SIZE; j++) {
assert(buffer[j] == BENCH_PRNG(&chunk_prng));
}
}
lfs_file_close(&lfs, &file) => 0;
BENCH_STOP();
lfs_unmount(&lfs) => 0;
'''
[cases.bench_file_write]
# 0 = in-order
# 1 = reversed-order
# 2 = random-order
defines.ORDER = [0, 1, 2]
defines.SIZE = '128*1024'
defines.CHUNK_SIZE = 64
code = '''
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_size_t chunks = (SIZE+CHUNK_SIZE-1)/CHUNK_SIZE;
BENCH_START();
lfs_file_t file;
lfs_file_open(&lfs, &file, "file",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
uint8_t buffer[CHUNK_SIZE];
uint32_t prng = 42;
for (lfs_size_t i = 0; i < chunks; i++) {
lfs_off_t i_
= (ORDER == 0) ? i
: (ORDER == 1) ? (chunks-1-i)
: BENCH_PRNG(&prng) % chunks;
uint32_t chunk_prng = i_;
for (lfs_size_t j = 0; j < CHUNK_SIZE; j++) {
buffer[j] = BENCH_PRNG(&chunk_prng);
}
lfs_file_seek(&lfs, &file, i_*CHUNK_SIZE, LFS_SEEK_SET)
=> i_*CHUNK_SIZE;
lfs_file_write(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE;
}
lfs_file_close(&lfs, &file) => 0;
BENCH_STOP();
lfs_unmount(&lfs) => 0;
'''

View File

@ -0,0 +1,56 @@
[cases.bench_superblocks_found]
# support benchmarking with files
defines.N = [0, 1024]
defines.FILE_SIZE = 8
defines.CHUNK_SIZE = 8
code = '''
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
// create files?
lfs_mount(&lfs, cfg) => 0;
char name[256];
uint8_t buffer[CHUNK_SIZE];
for (lfs_size_t i = 0; i < N; i++) {
sprintf(name, "file%08x", i);
lfs_file_t file;
lfs_file_open(&lfs, &file, name,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
for (lfs_size_t j = 0; j < FILE_SIZE; j += CHUNK_SIZE) {
for (lfs_size_t k = 0; k < CHUNK_SIZE; k++) {
buffer[k] = i+j+k;
}
lfs_file_write(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE;
}
lfs_file_close(&lfs, &file) => 0;
}
lfs_unmount(&lfs) => 0;
BENCH_START();
lfs_mount(&lfs, cfg) => 0;
BENCH_STOP();
lfs_unmount(&lfs) => 0;
'''
[cases.bench_superblocks_missing]
code = '''
lfs_t lfs;
BENCH_START();
int err = lfs_mount(&lfs, cfg);
assert(err != 0);
BENCH_STOP();
'''
[cases.bench_superblocks_format]
code = '''
lfs_t lfs;
BENCH_START();
lfs_format(&lfs, cfg) => 0;
BENCH_STOP();
'''

1020
lfs.c

File diff suppressed because it is too large Load Diff

82
lfs.h
View File

@ -8,8 +8,6 @@
#ifndef LFS_H
#define LFS_H
#include <stdint.h>
#include <stdbool.h>
#include "lfs_util.h"
#ifdef __cplusplus
@ -23,14 +21,14 @@ extern "C"
// Software library version
// Major (top-nibble), incremented on backwards incompatible changes
// Minor (bottom-nibble), incremented on feature additions
#define LFS_VERSION 0x00020005
#define LFS_VERSION 0x00020008
#define LFS_VERSION_MAJOR (0xffff & (LFS_VERSION >> 16))
#define LFS_VERSION_MINOR (0xffff & (LFS_VERSION >> 0))
// Version of On-disk data structures
// Major (top-nibble), incremented on backwards incompatible changes
// Minor (bottom-nibble), incremented on feature additions
#define LFS_DISK_VERSION 0x00020000
#define LFS_DISK_VERSION 0x00020001
#define LFS_DISK_VERSION_MAJOR (0xffff & (LFS_DISK_VERSION >> 16))
#define LFS_DISK_VERSION_MINOR (0xffff & (LFS_DISK_VERSION >> 0))
@ -114,6 +112,8 @@ enum lfs_type {
LFS_TYPE_SOFTTAIL = 0x600,
LFS_TYPE_HARDTAIL = 0x601,
LFS_TYPE_MOVESTATE = 0x7ff,
LFS_TYPE_CCRC = 0x500,
LFS_TYPE_FCRC = 0x5ff,
// internal chip sources
LFS_FROM_NOOP = 0x000,
@ -263,6 +263,14 @@ struct lfs_config {
// can help bound the metadata compaction time. Must be <= block_size.
// Defaults to block_size when zero.
lfs_size_t metadata_max;
#ifdef LFS_MULTIVERSION
// On-disk version to use when writing in the form of 16-bit major version
// + 16-bit minor version. This limiting metadata to what is supported by
// older minor versions. Note that some features will be lost. Defaults to
// to the most recent minor version when zero.
uint32_t disk_version;
#endif
};
// File info structure
@ -280,6 +288,27 @@ struct lfs_info {
char name[LFS_NAME_MAX+1];
};
// Filesystem info structure
struct lfs_fsinfo {
// On-disk version.
uint32_t disk_version;
// Size of a logical block in bytes.
lfs_size_t block_size;
// Number of logical blocks in filesystem.
lfs_size_t block_count;
// Upper limit on the length of file names in bytes.
lfs_size_t name_max;
// Upper limit on the size of files in bytes.
lfs_size_t file_max;
// Upper limit on the size of custom attributes in bytes.
lfs_size_t attr_max;
};
// Custom attribute structure, used to describe custom attributes
// committed atomically during file writes.
struct lfs_attr {
@ -410,6 +439,7 @@ typedef struct lfs {
} free;
const struct lfs_config *cfg;
lfs_size_t block_count;
lfs_size_t name_max;
lfs_size_t file_max;
lfs_size_t attr_max;
@ -534,8 +564,8 @@ int lfs_file_open(lfs_t *lfs, lfs_file_t *file,
// are values from the enum lfs_open_flags that are bitwise-ored together.
//
// The config struct provides additional config options per file as described
// above. The config struct must be allocated while the file is open, and the
// config struct must be zeroed for defaults and backwards compatibility.
// above. The config struct must remain allocated while the file is open, and
// the config struct must be zeroed for defaults and backwards compatibility.
//
// Returns a negative error code on failure.
int lfs_file_opencfg(lfs_t *lfs, lfs_file_t *file,
@ -659,6 +689,12 @@ int lfs_dir_rewind(lfs_t *lfs, lfs_dir_t *dir);
/// Filesystem-level filesystem operations
// Find on-disk info about the filesystem
//
// Fills out the fsinfo structure based on the filesystem found on-disk.
// Returns a negative error code on failure.
int lfs_fs_stat(lfs_t *lfs, struct lfs_fsinfo *fsinfo);
// Finds the current size of the filesystem
//
// Note: Result is best effort. If files share COW structures, the returned
@ -676,6 +712,40 @@ lfs_ssize_t lfs_fs_size(lfs_t *lfs);
// Returns a negative error code on failure.
int lfs_fs_traverse(lfs_t *lfs, int (*cb)(void*, lfs_block_t), void *data);
// Attempt to proactively find free blocks
//
// Calling this function is not required, but may allowing the offloading of
// the expensive block allocation scan to a less time-critical code path.
//
// Note: littlefs currently does not persist any found free blocks to disk.
// This may change in the future.
//
// Returns a negative error code on failure. Finding no free blocks is
// not an error.
int lfs_fs_gc(lfs_t *lfs);
#ifndef LFS_READONLY
// Attempt to make the filesystem consistent and ready for writing
//
// Calling this function is not required, consistency will be implicitly
// enforced on the first operation that writes to the filesystem, but this
// function allows the work to be performed earlier and without other
// filesystem changes.
//
// Returns a negative error code on failure.
int lfs_fs_mkconsistent(lfs_t *lfs);
#endif
#ifndef LFS_READONLY
// Grows the filesystem to a new size, updating the superblock with the new
// block count.
//
// Note: This is irreversible.
//
// Returns a negative error code on failure.
int lfs_fs_grow(lfs_t *lfs, lfs_size_t block_count);
#endif
#ifndef LFS_READONLY
#ifdef LFS_MIGRATE
// Attempts to migrate a previous version of littlefs

View File

@ -167,10 +167,9 @@ static inline int lfs_scmp(uint32_t a, uint32_t b) {
// Convert between 32-bit little-endian and native order
static inline uint32_t lfs_fromle32(uint32_t a) {
#if !defined(LFS_NO_INTRINSICS) && ( \
(defined( BYTE_ORDER ) && defined( ORDER_LITTLE_ENDIAN ) && BYTE_ORDER == ORDER_LITTLE_ENDIAN ) || \
#if (defined( BYTE_ORDER ) && defined( ORDER_LITTLE_ENDIAN ) && BYTE_ORDER == ORDER_LITTLE_ENDIAN ) || \
(defined(__BYTE_ORDER ) && defined(__ORDER_LITTLE_ENDIAN ) && __BYTE_ORDER == __ORDER_LITTLE_ENDIAN ) || \
(defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__))
(defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
return a;
#elif !defined(LFS_NO_INTRINSICS) && ( \
(defined( BYTE_ORDER ) && defined( ORDER_BIG_ENDIAN ) && BYTE_ORDER == ORDER_BIG_ENDIAN ) || \
@ -196,10 +195,9 @@ static inline uint32_t lfs_frombe32(uint32_t a) {
(defined(__BYTE_ORDER ) && defined(__ORDER_LITTLE_ENDIAN ) && __BYTE_ORDER == __ORDER_LITTLE_ENDIAN ) || \
(defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__))
return __builtin_bswap32(a);
#elif !defined(LFS_NO_INTRINSICS) && ( \
(defined( BYTE_ORDER ) && defined( ORDER_BIG_ENDIAN ) && BYTE_ORDER == ORDER_BIG_ENDIAN ) || \
#elif (defined( BYTE_ORDER ) && defined( ORDER_BIG_ENDIAN ) && BYTE_ORDER == ORDER_BIG_ENDIAN ) || \
(defined(__BYTE_ORDER ) && defined(__ORDER_BIG_ENDIAN ) && __BYTE_ORDER == __ORDER_BIG_ENDIAN ) || \
(defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__))
(defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
return a;
#else
return (((uint8_t*)&a)[0] << 24) |

2055
runners/bench_runner.c Normal file

File diff suppressed because it is too large Load Diff

137
runners/bench_runner.h Normal file
View File

@ -0,0 +1,137 @@
/*
* Runner for littlefs benchmarks
*
* Copyright (c) 2022, The littlefs authors.
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef BENCH_RUNNER_H
#define BENCH_RUNNER_H
// override LFS_TRACE
void bench_trace(const char *fmt, ...);
#define LFS_TRACE_(fmt, ...) \
bench_trace("%s:%d:trace: " fmt "%s\n", \
__FILE__, \
__LINE__, \
__VA_ARGS__)
#define LFS_TRACE(...) LFS_TRACE_(__VA_ARGS__, "")
#define LFS_EMUBD_TRACE(...) LFS_TRACE_(__VA_ARGS__, "")
// provide BENCH_START/BENCH_STOP macros
void bench_start(void);
void bench_stop(void);
#define BENCH_START() bench_start()
#define BENCH_STOP() bench_stop()
// note these are indirectly included in any generated files
#include "bd/lfs_emubd.h"
#include <stdio.h>
// give source a chance to define feature macros
#undef _FEATURES_H
#undef _STDIO_H
// generated bench configurations
struct lfs_config;
enum bench_flags {
BENCH_REENTRANT = 0x1,
};
typedef uint8_t bench_flags_t;
typedef struct bench_define {
intmax_t (*cb)(void *data);
void *data;
} bench_define_t;
struct bench_case {
const char *name;
const char *path;
bench_flags_t flags;
size_t permutations;
const bench_define_t *defines;
bool (*filter)(void);
void (*run)(struct lfs_config *cfg);
};
struct bench_suite {
const char *name;
const char *path;
bench_flags_t flags;
const char *const *define_names;
size_t define_count;
const struct bench_case *cases;
size_t case_count;
};
// deterministic prng for pseudo-randomness in benches
uint32_t bench_prng(uint32_t *state);
#define BENCH_PRNG(state) bench_prng(state)
// access generated bench defines
intmax_t bench_define(size_t define);
#define BENCH_DEFINE(i) bench_define(i)
// a few preconfigured defines that control how benches run
#define READ_SIZE_i 0
#define PROG_SIZE_i 1
#define ERASE_SIZE_i 2
#define ERASE_COUNT_i 3
#define BLOCK_SIZE_i 4
#define BLOCK_COUNT_i 5
#define CACHE_SIZE_i 6
#define LOOKAHEAD_SIZE_i 7
#define BLOCK_CYCLES_i 8
#define ERASE_VALUE_i 9
#define ERASE_CYCLES_i 10
#define BADBLOCK_BEHAVIOR_i 11
#define POWERLOSS_BEHAVIOR_i 12
#define READ_SIZE bench_define(READ_SIZE_i)
#define PROG_SIZE bench_define(PROG_SIZE_i)
#define ERASE_SIZE bench_define(ERASE_SIZE_i)
#define ERASE_COUNT bench_define(ERASE_COUNT_i)
#define BLOCK_SIZE bench_define(BLOCK_SIZE_i)
#define BLOCK_COUNT bench_define(BLOCK_COUNT_i)
#define CACHE_SIZE bench_define(CACHE_SIZE_i)
#define LOOKAHEAD_SIZE bench_define(LOOKAHEAD_SIZE_i)
#define BLOCK_CYCLES bench_define(BLOCK_CYCLES_i)
#define ERASE_VALUE bench_define(ERASE_VALUE_i)
#define ERASE_CYCLES bench_define(ERASE_CYCLES_i)
#define BADBLOCK_BEHAVIOR bench_define(BADBLOCK_BEHAVIOR_i)
#define POWERLOSS_BEHAVIOR bench_define(POWERLOSS_BEHAVIOR_i)
#define BENCH_IMPLICIT_DEFINES \
BENCH_DEF(READ_SIZE, PROG_SIZE) \
BENCH_DEF(PROG_SIZE, ERASE_SIZE) \
BENCH_DEF(ERASE_SIZE, 0) \
BENCH_DEF(ERASE_COUNT, (1024*1024)/BLOCK_SIZE) \
BENCH_DEF(BLOCK_SIZE, ERASE_SIZE) \
BENCH_DEF(BLOCK_COUNT, ERASE_COUNT/lfs_max(BLOCK_SIZE/ERASE_SIZE,1))\
BENCH_DEF(CACHE_SIZE, lfs_max(64,lfs_max(READ_SIZE,PROG_SIZE))) \
BENCH_DEF(LOOKAHEAD_SIZE, 16) \
BENCH_DEF(BLOCK_CYCLES, -1) \
BENCH_DEF(ERASE_VALUE, 0xff) \
BENCH_DEF(ERASE_CYCLES, 0) \
BENCH_DEF(BADBLOCK_BEHAVIOR, LFS_EMUBD_BADBLOCK_PROGERROR) \
BENCH_DEF(POWERLOSS_BEHAVIOR, LFS_EMUBD_POWERLOSS_NOOP)
#define BENCH_GEOMETRY_DEFINE_COUNT 4
#define BENCH_IMPLICIT_DEFINE_COUNT 13
#endif

2798
runners/test_runner.c Normal file

File diff suppressed because it is too large Load Diff

133
runners/test_runner.h Normal file
View File

@ -0,0 +1,133 @@
/*
* Runner for littlefs tests
*
* Copyright (c) 2022, The littlefs authors.
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef TEST_RUNNER_H
#define TEST_RUNNER_H
// override LFS_TRACE
void test_trace(const char *fmt, ...);
#define LFS_TRACE_(fmt, ...) \
test_trace("%s:%d:trace: " fmt "%s\n", \
__FILE__, \
__LINE__, \
__VA_ARGS__)
#define LFS_TRACE(...) LFS_TRACE_(__VA_ARGS__, "")
#define LFS_EMUBD_TRACE(...) LFS_TRACE_(__VA_ARGS__, "")
// note these are indirectly included in any generated files
#include "bd/lfs_emubd.h"
#include <stdio.h>
// give source a chance to define feature macros
#undef _FEATURES_H
#undef _STDIO_H
// generated test configurations
struct lfs_config;
enum test_flags {
TEST_REENTRANT = 0x1,
};
typedef uint8_t test_flags_t;
typedef struct test_define {
intmax_t (*cb)(void *data);
void *data;
} test_define_t;
struct test_case {
const char *name;
const char *path;
test_flags_t flags;
size_t permutations;
const test_define_t *defines;
bool (*filter)(void);
void (*run)(struct lfs_config *cfg);
};
struct test_suite {
const char *name;
const char *path;
test_flags_t flags;
const char *const *define_names;
size_t define_count;
const struct test_case *cases;
size_t case_count;
};
// deterministic prng for pseudo-randomness in testes
uint32_t test_prng(uint32_t *state);
#define TEST_PRNG(state) test_prng(state)
// access generated test defines
intmax_t test_define(size_t define);
#define TEST_DEFINE(i) test_define(i)
// a few preconfigured defines that control how tests run
#define READ_SIZE_i 0
#define PROG_SIZE_i 1
#define ERASE_SIZE_i 2
#define ERASE_COUNT_i 3
#define BLOCK_SIZE_i 4
#define BLOCK_COUNT_i 5
#define CACHE_SIZE_i 6
#define LOOKAHEAD_SIZE_i 7
#define BLOCK_CYCLES_i 8
#define ERASE_VALUE_i 9
#define ERASE_CYCLES_i 10
#define BADBLOCK_BEHAVIOR_i 11
#define POWERLOSS_BEHAVIOR_i 12
#define DISK_VERSION_i 13
#define READ_SIZE TEST_DEFINE(READ_SIZE_i)
#define PROG_SIZE TEST_DEFINE(PROG_SIZE_i)
#define ERASE_SIZE TEST_DEFINE(ERASE_SIZE_i)
#define ERASE_COUNT TEST_DEFINE(ERASE_COUNT_i)
#define BLOCK_SIZE TEST_DEFINE(BLOCK_SIZE_i)
#define BLOCK_COUNT TEST_DEFINE(BLOCK_COUNT_i)
#define CACHE_SIZE TEST_DEFINE(CACHE_SIZE_i)
#define LOOKAHEAD_SIZE TEST_DEFINE(LOOKAHEAD_SIZE_i)
#define BLOCK_CYCLES TEST_DEFINE(BLOCK_CYCLES_i)
#define ERASE_VALUE TEST_DEFINE(ERASE_VALUE_i)
#define ERASE_CYCLES TEST_DEFINE(ERASE_CYCLES_i)
#define BADBLOCK_BEHAVIOR TEST_DEFINE(BADBLOCK_BEHAVIOR_i)
#define POWERLOSS_BEHAVIOR TEST_DEFINE(POWERLOSS_BEHAVIOR_i)
#define DISK_VERSION TEST_DEFINE(DISK_VERSION_i)
#define TEST_IMPLICIT_DEFINES \
TEST_DEF(READ_SIZE, PROG_SIZE) \
TEST_DEF(PROG_SIZE, ERASE_SIZE) \
TEST_DEF(ERASE_SIZE, 0) \
TEST_DEF(ERASE_COUNT, (1024*1024)/ERASE_SIZE) \
TEST_DEF(BLOCK_SIZE, ERASE_SIZE) \
TEST_DEF(BLOCK_COUNT, ERASE_COUNT/lfs_max(BLOCK_SIZE/ERASE_SIZE,1)) \
TEST_DEF(CACHE_SIZE, lfs_max(64,lfs_max(READ_SIZE,PROG_SIZE))) \
TEST_DEF(LOOKAHEAD_SIZE, 16) \
TEST_DEF(BLOCK_CYCLES, -1) \
TEST_DEF(ERASE_VALUE, 0xff) \
TEST_DEF(ERASE_CYCLES, 0) \
TEST_DEF(BADBLOCK_BEHAVIOR, LFS_EMUBD_BADBLOCK_PROGERROR) \
TEST_DEF(POWERLOSS_BEHAVIOR, LFS_EMUBD_POWERLOSS_NOOP) \
TEST_DEF(DISK_VERSION, 0)
#define TEST_GEOMETRY_DEFINE_COUNT 4
#define TEST_IMPLICIT_DEFINE_COUNT 14
#endif

1430
scripts/bench.py Normal file

File diff suppressed because it is too large Load Diff

181
scripts/changeprefix.py Normal file
View File

@ -0,0 +1,181 @@
#!/usr/bin/env python3
#
# Change prefixes in files/filenames. Useful for creating different versions
# of a codebase that don't conflict at compile time.
#
# Example:
# $ ./scripts/changeprefix.py lfs lfs3
#
# Copyright (c) 2022, The littlefs authors.
# Copyright (c) 2019, Arm Limited. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
import glob
import itertools
import os
import os.path
import re
import shlex
import shutil
import subprocess
import tempfile
GIT_PATH = ['git']
def openio(path, mode='r', buffering=-1):
# allow '-' for stdin/stdout
if path == '-':
if mode == 'r':
return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
else:
return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
else:
return open(path, mode, buffering)
def changeprefix(from_prefix, to_prefix, line):
line, count1 = re.subn(
'\\b'+from_prefix,
to_prefix,
line)
line, count2 = re.subn(
'\\b'+from_prefix.upper(),
to_prefix.upper(),
line)
line, count3 = re.subn(
'\\B-D'+from_prefix.upper(),
'-D'+to_prefix.upper(),
line)
return line, count1+count2+count3
def changefile(from_prefix, to_prefix, from_path, to_path, *,
no_replacements=False):
# rename any prefixes in file
count = 0
# create a temporary file to avoid overwriting ourself
if from_path == to_path and to_path != '-':
to_path_temp = tempfile.NamedTemporaryFile('w', delete=False)
to_path = to_path_temp.name
else:
to_path_temp = None
with openio(from_path) as from_f:
with openio(to_path, 'w') as to_f:
for line in from_f:
if not no_replacements:
line, n = changeprefix(from_prefix, to_prefix, line)
count += n
to_f.write(line)
if from_path != '-' and to_path != '-':
shutil.copystat(from_path, to_path)
if to_path_temp:
os.rename(to_path, from_path)
elif from_path != '-':
os.remove(from_path)
# Summary
print('%s: %d replacements' % (
'%s -> %s' % (from_path, to_path) if not to_path_temp else from_path,
count))
def main(from_prefix, to_prefix, paths=[], *,
verbose=False,
output=None,
no_replacements=False,
no_renames=False,
git=False,
no_stage=False,
git_path=GIT_PATH):
if not paths:
if git:
cmd = git_path + ['ls-tree', '-r', '--name-only', 'HEAD']
if verbose:
print(' '.join(shlex.quote(c) for c in cmd))
paths = subprocess.check_output(cmd, encoding='utf8').split()
else:
print('no paths?', file=sys.stderr)
sys.exit(1)
for from_path in paths:
# rename filename?
if output:
to_path = output
elif no_renames:
to_path = from_path
else:
to_path = os.path.join(
os.path.dirname(from_path),
changeprefix(from_prefix, to_prefix,
os.path.basename(from_path))[0])
# rename contents
changefile(from_prefix, to_prefix, from_path, to_path,
no_replacements=no_replacements)
# stage?
if git and not no_stage:
if from_path != to_path:
cmd = git_path + ['rm', '-q', from_path]
if verbose:
print(' '.join(shlex.quote(c) for c in cmd))
subprocess.check_call(cmd)
cmd = git_path + ['add', to_path]
if verbose:
print(' '.join(shlex.quote(c) for c in cmd))
subprocess.check_call(cmd)
if __name__ == "__main__":
import argparse
import sys
parser = argparse.ArgumentParser(
description="Change prefixes in files/filenames. Useful for creating "
"different versions of a codebase that don't conflict at compile "
"time.",
allow_abbrev=False)
parser.add_argument(
'from_prefix',
help="Prefix to replace.")
parser.add_argument(
'to_prefix',
help="Prefix to replace with.")
parser.add_argument(
'paths',
nargs='*',
help="Files to operate on.")
parser.add_argument(
'-v', '--verbose',
action='store_true',
help="Output commands that run behind the scenes.")
parser.add_argument(
'-o', '--output',
help="Output file.")
parser.add_argument(
'-N', '--no-replacements',
action='store_true',
help="Don't change prefixes in files")
parser.add_argument(
'-R', '--no-renames',
action='store_true',
help="Don't rename files")
parser.add_argument(
'--git',
action='store_true',
help="Use git to find/update files.")
parser.add_argument(
'--no-stage',
action='store_true',
help="Don't stage changes with git.")
parser.add_argument(
'--git-path',
type=lambda x: x.split(),
default=GIT_PATH,
help="Path to git executable, may include flags. "
"Defaults to %r." % GIT_PATH)
sys.exit(main(**{k: v
for k, v in vars(parser.parse_intermixed_args()).items()
if v is not None}))

View File

@ -1,42 +1,188 @@
#!/usr/bin/env python3
#
# Script to find code size at the function level. Basically just a bit wrapper
# Script to find code size at the function level. Basically just a big wrapper
# around nm with some extra conveniences for comparing builds. Heavily inspired
# by Linux's Bloat-O-Meter.
#
# Example:
# ./scripts/code.py lfs.o lfs_util.o -Ssize
#
# Copyright (c) 2022, The littlefs authors.
# Copyright (c) 2020, Arm Limited. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
import os
import glob
import itertools as it
import subprocess as sp
import shlex
import re
import csv
import collections as co
import csv
import difflib
import itertools as it
import math as m
import os
import re
import shlex
import subprocess as sp
OBJ_PATHS = ['*.o']
NM_PATH = ['nm']
NM_TYPES = 'tTrRdD'
OBJDUMP_PATH = ['objdump']
def collect(paths, **args):
results = co.defaultdict(lambda: 0)
pattern = re.compile(
# integer fields
class Int(co.namedtuple('Int', 'x')):
__slots__ = ()
def __new__(cls, x=0):
if isinstance(x, Int):
return x
if isinstance(x, str):
try:
x = int(x, 0)
except ValueError:
# also accept +-∞ and +-inf
if re.match('^\s*\+?\s*(?:∞|inf)\s*$', x):
x = m.inf
elif re.match('^\s*-\s*(?:∞|inf)\s*$', x):
x = -m.inf
else:
raise
assert isinstance(x, int) or m.isinf(x), x
return super().__new__(cls, x)
def __str__(self):
if self.x == m.inf:
return ''
elif self.x == -m.inf:
return '-∞'
else:
return str(self.x)
def __int__(self):
assert not m.isinf(self.x)
return self.x
def __float__(self):
return float(self.x)
none = '%7s' % '-'
def table(self):
return '%7s' % (self,)
diff_none = '%7s' % '-'
diff_table = table
def diff_diff(self, other):
new = self.x if self else 0
old = other.x if other else 0
diff = new - old
if diff == +m.inf:
return '%7s' % '+∞'
elif diff == -m.inf:
return '%7s' % '-∞'
else:
return '%+7d' % diff
def ratio(self, other):
new = self.x if self else 0
old = other.x if other else 0
if m.isinf(new) and m.isinf(old):
return 0.0
elif m.isinf(new):
return +m.inf
elif m.isinf(old):
return -m.inf
elif not old and not new:
return 0.0
elif not old:
return 1.0
else:
return (new-old) / old
def __add__(self, other):
return self.__class__(self.x + other.x)
def __sub__(self, other):
return self.__class__(self.x - other.x)
def __mul__(self, other):
return self.__class__(self.x * other.x)
# code size results
class CodeResult(co.namedtuple('CodeResult', [
'file', 'function',
'size'])):
_by = ['file', 'function']
_fields = ['size']
_sort = ['size']
_types = {'size': Int}
__slots__ = ()
def __new__(cls, file='', function='', size=0):
return super().__new__(cls, file, function,
Int(size))
def __add__(self, other):
return CodeResult(self.file, self.function,
self.size + other.size)
def openio(path, mode='r', buffering=-1):
# allow '-' for stdin/stdout
if path == '-':
if mode == 'r':
return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
else:
return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
else:
return open(path, mode, buffering)
def collect(obj_paths, *,
nm_path=NM_PATH,
nm_types=NM_TYPES,
objdump_path=OBJDUMP_PATH,
sources=None,
everything=False,
**args):
size_pattern = re.compile(
'^(?P<size>[0-9a-fA-F]+)' +
' (?P<type>[%s])' % re.escape(args['type']) +
' (?P<type>[%s])' % re.escape(nm_types) +
' (?P<func>.+?)$')
for path in paths:
# note nm-tool may contain extra args
cmd = args['nm_tool'] + ['--size-sort', path]
line_pattern = re.compile(
'^\s+(?P<no>[0-9]+)'
'(?:\s+(?P<dir>[0-9]+))?'
'\s+.*'
'\s+(?P<path>[^\s]+)$')
info_pattern = re.compile(
'^(?:.*(?P<tag>DW_TAG_[a-z_]+).*'
'|.*DW_AT_name.*:\s*(?P<name>[^:\s]+)\s*'
'|.*DW_AT_decl_file.*:\s*(?P<file>[0-9]+)\s*)$')
results = []
for path in obj_paths:
# guess the source, if we have debug-info we'll replace this later
file = re.sub('(\.o)?$', '.c', path, 1)
# find symbol sizes
results_ = []
# note nm-path may contain extra args
cmd = nm_path + ['--size-sort', path]
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd,
stdout=sp.PIPE,
stderr=sp.PIPE if not args.get('verbose') else None,
universal_newlines=True,
errors='replace')
errors='replace',
close_fds=False)
for line in proc.stdout:
m = pattern.match(line)
m = size_pattern.match(line)
if m:
results[(path, m.group('func'))] += int(m.group('size'), 16)
func = m.group('func')
# discard internal functions
if not everything and func.startswith('__'):
continue
results_.append(CodeResult(
file, func,
int(m.group('size'), 16)))
proc.wait()
if proc.returncode != 0:
if not args.get('verbose'):
@ -44,241 +190,518 @@ def collect(paths, **args):
sys.stdout.write(line)
sys.exit(-1)
flat_results = []
for (file, func), size in results.items():
# map to source files
if args.get('build_dir'):
file = re.sub('%s/*' % re.escape(args['build_dir']), '', file)
# replace .o with .c, different scripts report .o/.c, we need to
# choose one if we want to deduplicate csv files
file = re.sub('\.o$', '.c', file)
# discard internal functions
if not args.get('everything'):
if func.startswith('__'):
continue
# discard .8449 suffixes created by optimizer
func = re.sub('\.[0-9]+', '', func)
flat_results.append((file, func, size))
# try to figure out the source file if we have debug-info
dirs = {}
files = {}
# note objdump-path may contain extra args
cmd = objdump_path + ['--dwarf=rawline', path]
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd,
stdout=sp.PIPE,
stderr=sp.PIPE if not args.get('verbose') else None,
universal_newlines=True,
errors='replace',
close_fds=False)
for line in proc.stdout:
# note that files contain references to dirs, which we
# dereference as soon as we see them as each file table follows a
# dir table
m = line_pattern.match(line)
if m:
if not m.group('dir'):
# found a directory entry
dirs[int(m.group('no'))] = m.group('path')
else:
# found a file entry
dir = int(m.group('dir'))
if dir in dirs:
files[int(m.group('no'))] = os.path.join(
dirs[dir],
m.group('path'))
else:
files[int(m.group('no'))] = m.group('path')
proc.wait()
if proc.returncode != 0:
if not args.get('verbose'):
for line in proc.stderr:
sys.stdout.write(line)
# do nothing on error, we don't need objdump to work, source files
# may just be inaccurate
pass
return flat_results
defs = {}
is_func = False
f_name = None
f_file = None
# note objdump-path may contain extra args
cmd = objdump_path + ['--dwarf=info', path]
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd,
stdout=sp.PIPE,
stderr=sp.PIPE if not args.get('verbose') else None,
universal_newlines=True,
errors='replace',
close_fds=False)
for line in proc.stdout:
# state machine here to find definitions
m = info_pattern.match(line)
if m:
if m.group('tag'):
if is_func:
defs[f_name] = files.get(f_file, '?')
is_func = (m.group('tag') == 'DW_TAG_subprogram')
elif m.group('name'):
f_name = m.group('name')
elif m.group('file'):
f_file = int(m.group('file'))
if is_func:
defs[f_name] = files.get(f_file, '?')
proc.wait()
if proc.returncode != 0:
if not args.get('verbose'):
for line in proc.stderr:
sys.stdout.write(line)
# do nothing on error, we don't need objdump to work, source files
# may just be inaccurate
pass
def main(**args):
def openio(path, mode='r'):
if path == '-':
if 'r' in mode:
return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
for r in results_:
# find best matching debug symbol, this may be slightly different
# due to optimizations
if defs:
# exact match? avoid difflib if we can for speed
if r.function in defs:
file = defs[r.function]
else:
_, file = max(
defs.items(),
key=lambda d: difflib.SequenceMatcher(None,
d[0],
r.function, False).ratio())
else:
return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
else:
return open(path, mode)
file = r.file
# find sizes
if not args.get('use', None):
# find .o files
paths = []
for path in args['obj_paths']:
if os.path.isdir(path):
path = path + '/*.o'
# ignore filtered sources
if sources is not None:
if not any(
os.path.abspath(file) == os.path.abspath(s)
for s in sources):
continue
else:
# default to only cwd
if not everything and not os.path.commonpath([
os.getcwd(),
os.path.abspath(file)]) == os.getcwd():
continue
for path in glob.glob(path):
paths.append(path)
# simplify path
if os.path.commonpath([
os.getcwd(),
os.path.abspath(file)]) == os.getcwd():
file = os.path.relpath(file)
else:
file = os.path.abspath(file)
if not paths:
print('no .obj files found in %r?' % args['obj_paths'])
results.append(r._replace(file=file))
return results
def fold(Result, results, *,
by=None,
defines=None,
**_):
if by is None:
by = Result._by
for k in it.chain(by or [], (k for k, _ in defines or [])):
if k not in Result._by and k not in Result._fields:
print("error: could not find field %r?" % k)
sys.exit(-1)
results = collect(paths, **args)
# filter by matching defines
if defines is not None:
results_ = []
for r in results:
if all(getattr(r, k) in vs for k, vs in defines):
results_.append(r)
results = results_
# organize results into conflicts
folding = co.OrderedDict()
for r in results:
name = tuple(getattr(r, k) for k in by)
if name not in folding:
folding[name] = []
folding[name].append(r)
# merge conflicts
folded = []
for name, rs in folding.items():
folded.append(sum(rs[1:], start=rs[0]))
return folded
def table(Result, results, diff_results=None, *,
by=None,
fields=None,
sort=None,
summary=False,
all=False,
percent=False,
**_):
all_, all = all, __builtins__.all
if by is None:
by = Result._by
if fields is None:
fields = Result._fields
types = Result._types
# fold again
results = fold(Result, results, by=by)
if diff_results is not None:
diff_results = fold(Result, diff_results, by=by)
# organize by name
table = {
','.join(str(getattr(r, k) or '') for k in by): r
for r in results}
diff_table = {
','.join(str(getattr(r, k) or '') for k in by): r
for r in diff_results or []}
names = list(table.keys() | diff_table.keys())
# sort again, now with diff info, note that python's sort is stable
names.sort()
if diff_results is not None:
names.sort(key=lambda n: tuple(
types[k].ratio(
getattr(table.get(n), k, None),
getattr(diff_table.get(n), k, None))
for k in fields),
reverse=True)
if sort:
for k, reverse in reversed(sort):
names.sort(
key=lambda n: tuple(
(getattr(table[n], k),)
if getattr(table.get(n), k, None) is not None else ()
for k in ([k] if k else [
k for k in Result._sort if k in fields])),
reverse=reverse ^ (not k or k in Result._fields))
# build up our lines
lines = []
# header
header = []
header.append('%s%s' % (
','.join(by),
' (%d added, %d removed)' % (
sum(1 for n in table if n not in diff_table),
sum(1 for n in diff_table if n not in table))
if diff_results is not None and not percent else '')
if not summary else '')
if diff_results is None:
for k in fields:
header.append(k)
elif percent:
for k in fields:
header.append(k)
else:
for k in fields:
header.append('o'+k)
for k in fields:
header.append('n'+k)
for k in fields:
header.append('d'+k)
header.append('')
lines.append(header)
def table_entry(name, r, diff_r=None, ratios=[]):
entry = []
entry.append(name)
if diff_results is None:
for k in fields:
entry.append(getattr(r, k).table()
if getattr(r, k, None) is not None
else types[k].none)
elif percent:
for k in fields:
entry.append(getattr(r, k).diff_table()
if getattr(r, k, None) is not None
else types[k].diff_none)
else:
for k in fields:
entry.append(getattr(diff_r, k).diff_table()
if getattr(diff_r, k, None) is not None
else types[k].diff_none)
for k in fields:
entry.append(getattr(r, k).diff_table()
if getattr(r, k, None) is not None
else types[k].diff_none)
for k in fields:
entry.append(types[k].diff_diff(
getattr(r, k, None),
getattr(diff_r, k, None)))
if diff_results is None:
entry.append('')
elif percent:
entry.append(' (%s)' % ', '.join(
'+∞%' if t == +m.inf
else '-∞%' if t == -m.inf
else '%+.1f%%' % (100*t)
for t in ratios))
else:
entry.append(' (%s)' % ', '.join(
'+∞%' if t == +m.inf
else '-∞%' if t == -m.inf
else '%+.1f%%' % (100*t)
for t in ratios
if t)
if any(ratios) else '')
return entry
# entries
if not summary:
for name in names:
r = table.get(name)
if diff_results is None:
diff_r = None
ratios = None
else:
diff_r = diff_table.get(name)
ratios = [
types[k].ratio(
getattr(r, k, None),
getattr(diff_r, k, None))
for k in fields]
if not all_ and not any(ratios):
continue
lines.append(table_entry(name, r, diff_r, ratios))
# total
r = next(iter(fold(Result, results, by=[])), None)
if diff_results is None:
diff_r = None
ratios = None
else:
diff_r = next(iter(fold(Result, diff_results, by=[])), None)
ratios = [
types[k].ratio(
getattr(r, k, None),
getattr(diff_r, k, None))
for k in fields]
lines.append(table_entry('TOTAL', r, diff_r, ratios))
# find the best widths, note that column 0 contains the names and column -1
# the ratios, so those are handled a bit differently
widths = [
((max(it.chain([w], (len(l[i]) for l in lines)))+1+4-1)//4)*4-1
for w, i in zip(
it.chain([23], it.repeat(7)),
range(len(lines[0])-1))]
# print our table
for line in lines:
print('%-*s %s%s' % (
widths[0], line[0],
' '.join('%*s' % (w, x)
for w, x in zip(widths[1:], line[1:-1])),
line[-1]))
def main(obj_paths, *,
by=None,
fields=None,
defines=None,
sort=None,
**args):
# find sizes
if not args.get('use', None):
results = collect(obj_paths, **args)
else:
results = []
with openio(args['use']) as f:
r = csv.DictReader(f)
results = [
( result['file'],
result['name'],
int(result['code_size']))
for result in r
if result.get('code_size') not in {None, ''}]
reader = csv.DictReader(f, restval='')
for r in reader:
if not any('code_'+k in r and r['code_'+k].strip()
for k in CodeResult._fields):
continue
try:
results.append(CodeResult(
**{k: r[k] for k in CodeResult._by
if k in r and r[k].strip()},
**{k: r['code_'+k] for k in CodeResult._fields
if 'code_'+k in r and r['code_'+k].strip()}))
except TypeError:
pass
total = 0
for _, _, size in results:
total += size
# fold
results = fold(CodeResult, results, by=by, defines=defines)
# find previous results?
if args.get('diff'):
try:
with openio(args['diff']) as f:
r = csv.DictReader(f)
prev_results = [
( result['file'],
result['name'],
int(result['code_size']))
for result in r
if result.get('code_size') not in {None, ''}]
except FileNotFoundError:
prev_results = []
prev_total = 0
for _, _, size in prev_results:
prev_total += size
# sort, note that python's sort is stable
results.sort()
if sort:
for k, reverse in reversed(sort):
results.sort(
key=lambda r: tuple(
(getattr(r, k),) if getattr(r, k) is not None else ()
for k in ([k] if k else CodeResult._sort)),
reverse=reverse ^ (not k or k in CodeResult._fields))
# write results to CSV
if args.get('output'):
merged_results = co.defaultdict(lambda: {})
other_fields = []
# merge?
if args.get('merge'):
try:
with openio(args['merge']) as f:
r = csv.DictReader(f)
for result in r:
file = result.pop('file', '')
func = result.pop('name', '')
result.pop('code_size', None)
merged_results[(file, func)] = result
other_fields = result.keys()
except FileNotFoundError:
pass
for file, func, size in results:
merged_results[(file, func)]['code_size'] = size
with openio(args['output'], 'w') as f:
w = csv.DictWriter(f, ['file', 'name', *other_fields, 'code_size'])
w.writeheader()
for (file, func), result in sorted(merged_results.items()):
w.writerow({'file': file, 'name': func, **result})
writer = csv.DictWriter(f,
(by if by is not None else CodeResult._by)
+ ['code_'+k for k in (
fields if fields is not None else CodeResult._fields)])
writer.writeheader()
for r in results:
writer.writerow(
{k: getattr(r, k) for k in (
by if by is not None else CodeResult._by)}
| {'code_'+k: getattr(r, k) for k in (
fields if fields is not None else CodeResult._fields)})
# print results
def dedup_entries(results, by='name'):
entries = co.defaultdict(lambda: 0)
for file, func, size in results:
entry = (file if by == 'file' else func)
entries[entry] += size
return entries
# find previous results?
if args.get('diff'):
diff_results = []
try:
with openio(args['diff']) as f:
reader = csv.DictReader(f, restval='')
for r in reader:
if not any('code_'+k in r and r['code_'+k].strip()
for k in CodeResult._fields):
continue
try:
diff_results.append(CodeResult(
**{k: r[k] for k in CodeResult._by
if k in r and r[k].strip()},
**{k: r['code_'+k] for k in CodeResult._fields
if 'code_'+k in r and r['code_'+k].strip()}))
except TypeError:
pass
except FileNotFoundError:
pass
def diff_entries(olds, news):
diff = co.defaultdict(lambda: (0, 0, 0, 0))
for name, new in news.items():
diff[name] = (0, new, new, 1.0)
for name, old in olds.items():
_, new, _, _ = diff[name]
diff[name] = (old, new, new-old, (new-old)/old if old else 1.0)
return diff
# fold
diff_results = fold(CodeResult, diff_results, by=by, defines=defines)
def sorted_entries(entries):
if args.get('size_sort'):
return sorted(entries, key=lambda x: (-x[1], x))
elif args.get('reverse_size_sort'):
return sorted(entries, key=lambda x: (+x[1], x))
else:
return sorted(entries)
# print table
if not args.get('quiet'):
table(CodeResult, results,
diff_results if args.get('diff') else None,
by=by if by is not None else ['function'],
fields=fields,
sort=sort,
**args)
def sorted_diff_entries(entries):
if args.get('size_sort'):
return sorted(entries, key=lambda x: (-x[1][1], x))
elif args.get('reverse_size_sort'):
return sorted(entries, key=lambda x: (+x[1][1], x))
else:
return sorted(entries, key=lambda x: (-x[1][3], x))
def print_header(by=''):
if not args.get('diff'):
print('%-36s %7s' % (by, 'size'))
else:
print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff'))
def print_entry(name, size):
print("%-36s %7d" % (name, size))
def print_diff_entry(name, old, new, diff, ratio):
print("%-36s %7s %7s %+7d%s" % (name,
old or "-",
new or "-",
diff,
' (%+.1f%%)' % (100*ratio) if ratio else ''))
def print_entries(by='name'):
entries = dedup_entries(results, by=by)
if not args.get('diff'):
print_header(by=by)
for name, size in sorted_entries(entries.items()):
print_entry(name, size)
else:
prev_entries = dedup_entries(prev_results, by=by)
diff = diff_entries(prev_entries, entries)
print_header(by='%s (%d added, %d removed)' % (by,
sum(1 for old, _, _, _ in diff.values() if not old),
sum(1 for _, new, _, _ in diff.values() if not new)))
for name, (old, new, diff, ratio) in sorted_diff_entries(
diff.items()):
if ratio or args.get('all'):
print_diff_entry(name, old, new, diff, ratio)
def print_totals():
if not args.get('diff'):
print_entry('TOTAL', total)
else:
ratio = (0.0 if not prev_total and not total
else 1.0 if not prev_total
else (total-prev_total)/prev_total)
print_diff_entry('TOTAL',
prev_total, total,
total-prev_total,
ratio)
if args.get('quiet'):
pass
elif args.get('summary'):
print_header()
print_totals()
elif args.get('files'):
print_entries(by='file')
print_totals()
else:
print_entries(by='name')
print_totals()
if __name__ == "__main__":
import argparse
import sys
parser = argparse.ArgumentParser(
description="Find code size at the function level.")
parser.add_argument('obj_paths', nargs='*', default=OBJ_PATHS,
help="Description of where to find *.o files. May be a directory \
or a list of paths. Defaults to %r." % OBJ_PATHS)
parser.add_argument('-v', '--verbose', action='store_true',
description="Find code size at the function level.",
allow_abbrev=False)
parser.add_argument(
'obj_paths',
nargs='*',
help="Input *.o files.")
parser.add_argument(
'-v', '--verbose',
action='store_true',
help="Output commands that run behind the scenes.")
parser.add_argument('-q', '--quiet', action='store_true',
parser.add_argument(
'-q', '--quiet',
action='store_true',
help="Don't show anything, useful with -o.")
parser.add_argument('-o', '--output',
parser.add_argument(
'-o', '--output',
help="Specify CSV file to store results.")
parser.add_argument('-u', '--use',
help="Don't compile and find code sizes, instead use this CSV file.")
parser.add_argument('-d', '--diff',
help="Specify CSV file to diff code size against.")
parser.add_argument('-m', '--merge',
help="Merge with an existing CSV file when writing to output.")
parser.add_argument('-a', '--all', action='store_true',
help="Show all functions, not just the ones that changed.")
parser.add_argument('-A', '--everything', action='store_true',
parser.add_argument(
'-u', '--use',
help="Don't parse anything, use this CSV file.")
parser.add_argument(
'-d', '--diff',
help="Specify CSV file to diff against.")
parser.add_argument(
'-a', '--all',
action='store_true',
help="Show all, not just the ones that changed.")
parser.add_argument(
'-p', '--percent',
action='store_true',
help="Only show percentage change, not a full diff.")
parser.add_argument(
'-b', '--by',
action='append',
choices=CodeResult._by,
help="Group by this field.")
parser.add_argument(
'-f', '--field',
dest='fields',
action='append',
choices=CodeResult._fields,
help="Show this field.")
parser.add_argument(
'-D', '--define',
dest='defines',
action='append',
type=lambda x: (lambda k,v: (k, set(v.split(','))))(*x.split('=', 1)),
help="Only include results where this field is this value.")
class AppendSort(argparse.Action):
def __call__(self, parser, namespace, value, option):
if namespace.sort is None:
namespace.sort = []
namespace.sort.append((value, True if option == '-S' else False))
parser.add_argument(
'-s', '--sort',
nargs='?',
action=AppendSort,
help="Sort by this field.")
parser.add_argument(
'-S', '--reverse-sort',
nargs='?',
action=AppendSort,
help="Sort by this field, but backwards.")
parser.add_argument(
'-Y', '--summary',
action='store_true',
help="Only show the total.")
parser.add_argument(
'-F', '--source',
dest='sources',
action='append',
help="Only consider definitions in this file. Defaults to anything "
"in the current directory.")
parser.add_argument(
'--everything',
action='store_true',
help="Include builtin and libc specific symbols.")
parser.add_argument('-s', '--size-sort', action='store_true',
help="Sort by size.")
parser.add_argument('-S', '--reverse-size-sort', action='store_true',
help="Sort by size, but backwards.")
parser.add_argument('-F', '--files', action='store_true',
help="Show file-level code sizes. Note this does not include padding! "
"So sizes may differ from other tools.")
parser.add_argument('-Y', '--summary', action='store_true',
help="Only show the total code size.")
parser.add_argument('--type', default='tTrRdD',
parser.add_argument(
'--nm-types',
default=NM_TYPES,
help="Type of symbols to report, this uses the same single-character "
"type-names emitted by nm. Defaults to %(default)r.")
parser.add_argument('--nm-tool', default=['nm'], type=lambda x: x.split(),
help="Path to the nm tool to use.")
parser.add_argument('--build-dir',
help="Specify the relative build directory. Used to map object files \
to the correct source files.")
sys.exit(main(**vars(parser.parse_args())))
"type-names emitted by nm. Defaults to %r." % NM_TYPES)
parser.add_argument(
'--nm-path',
type=lambda x: x.split(),
default=NM_PATH,
help="Path to the nm executable, may include flags. "
"Defaults to %r." % NM_PATH)
parser.add_argument(
'--objdump-path',
type=lambda x: x.split(),
default=OBJDUMP_PATH,
help="Path to the objdump executable, may include flags. "
"Defaults to %r." % OBJDUMP_PATH)
sys.exit(main(**{k: v
for k, v in vars(parser.parse_intermixed_args()).items()
if v is not None}))

828
scripts/cov.py Normal file
View File

@ -0,0 +1,828 @@
#!/usr/bin/env python3
#
# Script to find coverage info after running tests.
#
# Example:
# ./scripts/cov.py \
# lfs.t.a.gcda lfs_util.t.a.gcda \
# -Flfs.c -Flfs_util.c -slines
#
# Copyright (c) 2022, The littlefs authors.
# Copyright (c) 2020, Arm Limited. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
import collections as co
import csv
import itertools as it
import json
import math as m
import os
import re
import shlex
import subprocess as sp
# TODO use explode_asserts to avoid counting assert branches?
# TODO use dwarf=info to find functions for inline functions?
GCOV_PATH = ['gcov']
# integer fields
class Int(co.namedtuple('Int', 'x')):
__slots__ = ()
def __new__(cls, x=0):
if isinstance(x, Int):
return x
if isinstance(x, str):
try:
x = int(x, 0)
except ValueError:
# also accept +-∞ and +-inf
if re.match('^\s*\+?\s*(?:∞|inf)\s*$', x):
x = m.inf
elif re.match('^\s*-\s*(?:∞|inf)\s*$', x):
x = -m.inf
else:
raise
assert isinstance(x, int) or m.isinf(x), x
return super().__new__(cls, x)
def __str__(self):
if self.x == m.inf:
return ''
elif self.x == -m.inf:
return '-∞'
else:
return str(self.x)
def __int__(self):
assert not m.isinf(self.x)
return self.x
def __float__(self):
return float(self.x)
none = '%7s' % '-'
def table(self):
return '%7s' % (self,)
diff_none = '%7s' % '-'
diff_table = table
def diff_diff(self, other):
new = self.x if self else 0
old = other.x if other else 0
diff = new - old
if diff == +m.inf:
return '%7s' % '+∞'
elif diff == -m.inf:
return '%7s' % '-∞'
else:
return '%+7d' % diff
def ratio(self, other):
new = self.x if self else 0
old = other.x if other else 0
if m.isinf(new) and m.isinf(old):
return 0.0
elif m.isinf(new):
return +m.inf
elif m.isinf(old):
return -m.inf
elif not old and not new:
return 0.0
elif not old:
return 1.0
else:
return (new-old) / old
def __add__(self, other):
return self.__class__(self.x + other.x)
def __sub__(self, other):
return self.__class__(self.x - other.x)
def __mul__(self, other):
return self.__class__(self.x * other.x)
# fractional fields, a/b
class Frac(co.namedtuple('Frac', 'a,b')):
__slots__ = ()
def __new__(cls, a=0, b=None):
if isinstance(a, Frac) and b is None:
return a
if isinstance(a, str) and b is None:
a, b = a.split('/', 1)
if b is None:
b = a
return super().__new__(cls, Int(a), Int(b))
def __str__(self):
return '%s/%s' % (self.a, self.b)
def __float__(self):
return float(self.a)
none = '%11s %7s' % ('-', '-')
def table(self):
t = self.a.x/self.b.x if self.b.x else 1.0
return '%11s %7s' % (
self,
'%' if t == +m.inf
else '-∞%' if t == -m.inf
else '%.1f%%' % (100*t))
diff_none = '%11s' % '-'
def diff_table(self):
return '%11s' % (self,)
def diff_diff(self, other):
new_a, new_b = self if self else (Int(0), Int(0))
old_a, old_b = other if other else (Int(0), Int(0))
return '%11s' % ('%s/%s' % (
new_a.diff_diff(old_a).strip(),
new_b.diff_diff(old_b).strip()))
def ratio(self, other):
new_a, new_b = self if self else (Int(0), Int(0))
old_a, old_b = other if other else (Int(0), Int(0))
new = new_a.x/new_b.x if new_b.x else 1.0
old = old_a.x/old_b.x if old_b.x else 1.0
return new - old
def __add__(self, other):
return self.__class__(self.a + other.a, self.b + other.b)
def __sub__(self, other):
return self.__class__(self.a - other.a, self.b - other.b)
def __mul__(self, other):
return self.__class__(self.a * other.a, self.b + other.b)
def __lt__(self, other):
self_t = self.a.x/self.b.x if self.b.x else 1.0
other_t = other.a.x/other.b.x if other.b.x else 1.0
return (self_t, self.a.x) < (other_t, other.a.x)
def __gt__(self, other):
return self.__class__.__lt__(other, self)
def __le__(self, other):
return not self.__gt__(other)
def __ge__(self, other):
return not self.__lt__(other)
# coverage results
class CovResult(co.namedtuple('CovResult', [
'file', 'function', 'line',
'calls', 'hits', 'funcs', 'lines', 'branches'])):
_by = ['file', 'function', 'line']
_fields = ['calls', 'hits', 'funcs', 'lines', 'branches']
_sort = ['funcs', 'lines', 'branches', 'hits', 'calls']
_types = {
'calls': Int, 'hits': Int,
'funcs': Frac, 'lines': Frac, 'branches': Frac}
__slots__ = ()
def __new__(cls, file='', function='', line=0,
calls=0, hits=0, funcs=0, lines=0, branches=0):
return super().__new__(cls, file, function, int(Int(line)),
Int(calls), Int(hits), Frac(funcs), Frac(lines), Frac(branches))
def __add__(self, other):
return CovResult(self.file, self.function, self.line,
max(self.calls, other.calls),
max(self.hits, other.hits),
self.funcs + other.funcs,
self.lines + other.lines,
self.branches + other.branches)
def openio(path, mode='r', buffering=-1):
# allow '-' for stdin/stdout
if path == '-':
if mode == 'r':
return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
else:
return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
else:
return open(path, mode, buffering)
def collect(gcda_paths, *,
gcov_path=GCOV_PATH,
sources=None,
everything=False,
**args):
results = []
for path in gcda_paths:
# get coverage info through gcov's json output
# note, gcov-path may contain extra args
cmd = GCOV_PATH + ['-b', '-t', '--json-format', path]
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd,
stdout=sp.PIPE,
stderr=sp.PIPE if not args.get('verbose') else None,
universal_newlines=True,
errors='replace',
close_fds=False)
data = json.load(proc.stdout)
proc.wait()
if proc.returncode != 0:
if not args.get('verbose'):
for line in proc.stderr:
sys.stdout.write(line)
sys.exit(-1)
# collect line/branch coverage
for file in data['files']:
# ignore filtered sources
if sources is not None:
if not any(
os.path.abspath(file['file']) == os.path.abspath(s)
for s in sources):
continue
else:
# default to only cwd
if not everything and not os.path.commonpath([
os.getcwd(),
os.path.abspath(file['file'])]) == os.getcwd():
continue
# simplify path
if os.path.commonpath([
os.getcwd(),
os.path.abspath(file['file'])]) == os.getcwd():
file_name = os.path.relpath(file['file'])
else:
file_name = os.path.abspath(file['file'])
for func in file['functions']:
func_name = func.get('name', '(inlined)')
# discard internal functions (this includes injected test cases)
if not everything:
if func_name.startswith('__'):
continue
# go ahead and add functions, later folding will merge this if
# there are other hits on this line
results.append(CovResult(
file_name, func_name, func['start_line'],
func['execution_count'], 0,
Frac(1 if func['execution_count'] > 0 else 0, 1),
0,
0))
for line in file['lines']:
func_name = line.get('function_name', '(inlined)')
# discard internal function (this includes injected test cases)
if not everything:
if func_name.startswith('__'):
continue
# go ahead and add lines, later folding will merge this if
# there are other hits on this line
results.append(CovResult(
file_name, func_name, line['line_number'],
0, line['count'],
0,
Frac(1 if line['count'] > 0 else 0, 1),
Frac(
sum(1 if branch['count'] > 0 else 0
for branch in line['branches']),
len(line['branches']))))
return results
def fold(Result, results, *,
by=None,
defines=None,
**_):
if by is None:
by = Result._by
for k in it.chain(by or [], (k for k, _ in defines or [])):
if k not in Result._by and k not in Result._fields:
print("error: could not find field %r?" % k)
sys.exit(-1)
# filter by matching defines
if defines is not None:
results_ = []
for r in results:
if all(getattr(r, k) in vs for k, vs in defines):
results_.append(r)
results = results_
# organize results into conflicts
folding = co.OrderedDict()
for r in results:
name = tuple(getattr(r, k) for k in by)
if name not in folding:
folding[name] = []
folding[name].append(r)
# merge conflicts
folded = []
for name, rs in folding.items():
folded.append(sum(rs[1:], start=rs[0]))
return folded
def table(Result, results, diff_results=None, *,
by=None,
fields=None,
sort=None,
summary=False,
all=False,
percent=False,
**_):
all_, all = all, __builtins__.all
if by is None:
by = Result._by
if fields is None:
fields = Result._fields
types = Result._types
# fold again
results = fold(Result, results, by=by)
if diff_results is not None:
diff_results = fold(Result, diff_results, by=by)
# organize by name
table = {
','.join(str(getattr(r, k) or '') for k in by): r
for r in results}
diff_table = {
','.join(str(getattr(r, k) or '') for k in by): r
for r in diff_results or []}
names = list(table.keys() | diff_table.keys())
# sort again, now with diff info, note that python's sort is stable
names.sort()
if diff_results is not None:
names.sort(key=lambda n: tuple(
types[k].ratio(
getattr(table.get(n), k, None),
getattr(diff_table.get(n), k, None))
for k in fields),
reverse=True)
if sort:
for k, reverse in reversed(sort):
names.sort(
key=lambda n: tuple(
(getattr(table[n], k),)
if getattr(table.get(n), k, None) is not None else ()
for k in ([k] if k else [
k for k in Result._sort if k in fields])),
reverse=reverse ^ (not k or k in Result._fields))
# build up our lines
lines = []
# header
header = []
header.append('%s%s' % (
','.join(by),
' (%d added, %d removed)' % (
sum(1 for n in table if n not in diff_table),
sum(1 for n in diff_table if n not in table))
if diff_results is not None and not percent else '')
if not summary else '')
if diff_results is None:
for k in fields:
header.append(k)
elif percent:
for k in fields:
header.append(k)
else:
for k in fields:
header.append('o'+k)
for k in fields:
header.append('n'+k)
for k in fields:
header.append('d'+k)
header.append('')
lines.append(header)
def table_entry(name, r, diff_r=None, ratios=[]):
entry = []
entry.append(name)
if diff_results is None:
for k in fields:
entry.append(getattr(r, k).table()
if getattr(r, k, None) is not None
else types[k].none)
elif percent:
for k in fields:
entry.append(getattr(r, k).diff_table()
if getattr(r, k, None) is not None
else types[k].diff_none)
else:
for k in fields:
entry.append(getattr(diff_r, k).diff_table()
if getattr(diff_r, k, None) is not None
else types[k].diff_none)
for k in fields:
entry.append(getattr(r, k).diff_table()
if getattr(r, k, None) is not None
else types[k].diff_none)
for k in fields:
entry.append(types[k].diff_diff(
getattr(r, k, None),
getattr(diff_r, k, None)))
if diff_results is None:
entry.append('')
elif percent:
entry.append(' (%s)' % ', '.join(
'+∞%' if t == +m.inf
else '-∞%' if t == -m.inf
else '%+.1f%%' % (100*t)
for t in ratios))
else:
entry.append(' (%s)' % ', '.join(
'+∞%' if t == +m.inf
else '-∞%' if t == -m.inf
else '%+.1f%%' % (100*t)
for t in ratios
if t)
if any(ratios) else '')
return entry
# entries
if not summary:
for name in names:
r = table.get(name)
if diff_results is None:
diff_r = None
ratios = None
else:
diff_r = diff_table.get(name)
ratios = [
types[k].ratio(
getattr(r, k, None),
getattr(diff_r, k, None))
for k in fields]
if not all_ and not any(ratios):
continue
lines.append(table_entry(name, r, diff_r, ratios))
# total
r = next(iter(fold(Result, results, by=[])), None)
if diff_results is None:
diff_r = None
ratios = None
else:
diff_r = next(iter(fold(Result, diff_results, by=[])), None)
ratios = [
types[k].ratio(
getattr(r, k, None),
getattr(diff_r, k, None))
for k in fields]
lines.append(table_entry('TOTAL', r, diff_r, ratios))
# find the best widths, note that column 0 contains the names and column -1
# the ratios, so those are handled a bit differently
widths = [
((max(it.chain([w], (len(l[i]) for l in lines)))+1+4-1)//4)*4-1
for w, i in zip(
it.chain([23], it.repeat(7)),
range(len(lines[0])-1))]
# print our table
for line in lines:
print('%-*s %s%s' % (
widths[0], line[0],
' '.join('%*s' % (w, x)
for w, x in zip(widths[1:], line[1:-1])),
line[-1]))
def annotate(Result, results, *,
annotate=False,
lines=False,
branches=False,
**args):
# if neither branches/lines specified, color both
if annotate and not lines and not branches:
lines, branches = True, True
for path in co.OrderedDict.fromkeys(r.file for r in results).keys():
# flatten to line info
results = fold(Result, results, by=['file', 'line'])
table = {r.line: r for r in results if r.file == path}
# calculate spans to show
if not annotate:
spans = []
last = None
func = None
for line, r in sorted(table.items()):
if ((lines and int(r.hits) == 0)
or (branches and r.branches.a < r.branches.b)):
if last is not None and line - last.stop <= args['context']:
last = range(
last.start,
line+1+args['context'])
else:
if last is not None:
spans.append((last, func))
last = range(
line-args['context'],
line+1+args['context'])
func = r.function
if last is not None:
spans.append((last, func))
with open(path) as f:
skipped = False
for i, line in enumerate(f):
# skip lines not in spans?
if not annotate and not any(i+1 in s for s, _ in spans):
skipped = True
continue
if skipped:
skipped = False
print('%s@@ %s:%d: %s @@%s' % (
'\x1b[36m' if args['color'] else '',
path,
i+1,
next(iter(f for _, f in spans)),
'\x1b[m' if args['color'] else ''))
# build line
if line.endswith('\n'):
line = line[:-1]
if i+1 in table:
r = table[i+1]
line = '%-*s // %s hits%s' % (
args['width'],
line,
r.hits,
', %s branches' % (r.branches,)
if int(r.branches.b) else '')
if args['color']:
if lines and int(r.hits) == 0:
line = '\x1b[1;31m%s\x1b[m' % line
elif branches and r.branches.a < r.branches.b:
line = '\x1b[35m%s\x1b[m' % line
print(line)
def main(gcda_paths, *,
by=None,
fields=None,
defines=None,
sort=None,
hits=False,
**args):
# figure out what color should be
if args.get('color') == 'auto':
args['color'] = sys.stdout.isatty()
elif args.get('color') == 'always':
args['color'] = True
else:
args['color'] = False
# find sizes
if not args.get('use', None):
results = collect(gcda_paths, **args)
else:
results = []
with openio(args['use']) as f:
reader = csv.DictReader(f, restval='')
for r in reader:
if not any('cov_'+k in r and r['cov_'+k].strip()
for k in CovResult._fields):
continue
try:
results.append(CovResult(
**{k: r[k] for k in CovResult._by
if k in r and r[k].strip()},
**{k: r['cov_'+k]
for k in CovResult._fields
if 'cov_'+k in r
and r['cov_'+k].strip()}))
except TypeError:
pass
# fold
results = fold(CovResult, results, by=by, defines=defines)
# sort, note that python's sort is stable
results.sort()
if sort:
for k, reverse in reversed(sort):
results.sort(
key=lambda r: tuple(
(getattr(r, k),) if getattr(r, k) is not None else ()
for k in ([k] if k else CovResult._sort)),
reverse=reverse ^ (not k or k in CovResult._fields))
# write results to CSV
if args.get('output'):
with openio(args['output'], 'w') as f:
writer = csv.DictWriter(f,
(by if by is not None else CovResult._by)
+ ['cov_'+k for k in (
fields if fields is not None else CovResult._fields)])
writer.writeheader()
for r in results:
writer.writerow(
{k: getattr(r, k) for k in (
by if by is not None else CovResult._by)}
| {'cov_'+k: getattr(r, k) for k in (
fields if fields is not None else CovResult._fields)})
# find previous results?
if args.get('diff'):
diff_results = []
try:
with openio(args['diff']) as f:
reader = csv.DictReader(f, restval='')
for r in reader:
if not any('cov_'+k in r and r['cov_'+k].strip()
for k in CovResult._fields):
continue
try:
diff_results.append(CovResult(
**{k: r[k] for k in CovResult._by
if k in r and r[k].strip()},
**{k: r['cov_'+k]
for k in CovResult._fields
if 'cov_'+k in r
and r['cov_'+k].strip()}))
except TypeError:
pass
except FileNotFoundError:
pass
# fold
diff_results = fold(CovResult, diff_results,
by=by, defines=defines)
# print table
if not args.get('quiet'):
if (args.get('annotate')
or args.get('lines')
or args.get('branches')):
# annotate sources
annotate(CovResult, results, **args)
else:
# print table
table(CovResult, results,
diff_results if args.get('diff') else None,
by=by if by is not None else ['function'],
fields=fields if fields is not None
else ['lines', 'branches'] if not hits
else ['calls', 'hits'],
sort=sort,
**args)
# catch lack of coverage
if args.get('error_on_lines') and any(
r.lines.a < r.lines.b for r in results):
sys.exit(2)
elif args.get('error_on_branches') and any(
r.branches.a < r.branches.b for r in results):
sys.exit(3)
if __name__ == "__main__":
import argparse
import sys
parser = argparse.ArgumentParser(
description="Find coverage info after running tests.",
allow_abbrev=False)
parser.add_argument(
'gcda_paths',
nargs='*',
help="Input *.gcda files.")
parser.add_argument(
'-v', '--verbose',
action='store_true',
help="Output commands that run behind the scenes.")
parser.add_argument(
'-q', '--quiet',
action='store_true',
help="Don't show anything, useful with -o.")
parser.add_argument(
'-o', '--output',
help="Specify CSV file to store results.")
parser.add_argument(
'-u', '--use',
help="Don't parse anything, use this CSV file.")
parser.add_argument(
'-d', '--diff',
help="Specify CSV file to diff against.")
parser.add_argument(
'-a', '--all',
action='store_true',
help="Show all, not just the ones that changed.")
parser.add_argument(
'-p', '--percent',
action='store_true',
help="Only show percentage change, not a full diff.")
parser.add_argument(
'-b', '--by',
action='append',
choices=CovResult._by,
help="Group by this field.")
parser.add_argument(
'-f', '--field',
dest='fields',
action='append',
choices=CovResult._fields,
help="Show this field.")
parser.add_argument(
'-D', '--define',
dest='defines',
action='append',
type=lambda x: (lambda k,v: (k, set(v.split(','))))(*x.split('=', 1)),
help="Only include results where this field is this value.")
class AppendSort(argparse.Action):
def __call__(self, parser, namespace, value, option):
if namespace.sort is None:
namespace.sort = []
namespace.sort.append((value, True if option == '-S' else False))
parser.add_argument(
'-s', '--sort',
nargs='?',
action=AppendSort,
help="Sort by this field.")
parser.add_argument(
'-S', '--reverse-sort',
nargs='?',
action=AppendSort,
help="Sort by this field, but backwards.")
parser.add_argument(
'-Y', '--summary',
action='store_true',
help="Only show the total.")
parser.add_argument(
'-F', '--source',
dest='sources',
action='append',
help="Only consider definitions in this file. Defaults to anything "
"in the current directory.")
parser.add_argument(
'--everything',
action='store_true',
help="Include builtin and libc specific symbols.")
parser.add_argument(
'--hits',
action='store_true',
help="Show total hits instead of coverage.")
parser.add_argument(
'-A', '--annotate',
action='store_true',
help="Show source files annotated with coverage info.")
parser.add_argument(
'-L', '--lines',
action='store_true',
help="Show uncovered lines.")
parser.add_argument(
'-B', '--branches',
action='store_true',
help="Show uncovered branches.")
parser.add_argument(
'-c', '--context',
type=lambda x: int(x, 0),
default=3,
help="Show n additional lines of context. Defaults to 3.")
parser.add_argument(
'-W', '--width',
type=lambda x: int(x, 0),
default=80,
help="Assume source is styled with this many columns. Defaults to 80.")
parser.add_argument(
'--color',
choices=['never', 'always', 'auto'],
default='auto',
help="When to use terminal colors. Defaults to 'auto'.")
parser.add_argument(
'-e', '--error-on-lines',
action='store_true',
help="Error if any lines are not covered.")
parser.add_argument(
'-E', '--error-on-branches',
action='store_true',
help="Error if any branches are not covered.")
parser.add_argument(
'--gcov-path',
default=GCOV_PATH,
type=lambda x: x.split(),
help="Path to the gcov executable, may include paths. "
"Defaults to %r." % GCOV_PATH)
sys.exit(main(**{k: v
for k, v in vars(parser.parse_intermixed_args()).items()
if v is not None}))

View File

@ -1,42 +1,188 @@
#!/usr/bin/env python3
#
# Script to find data size at the function level. Basically just a bit wrapper
# Script to find data size at the function level. Basically just a big wrapper
# around nm with some extra conveniences for comparing builds. Heavily inspired
# by Linux's Bloat-O-Meter.
#
# Example:
# ./scripts/data.py lfs.o lfs_util.o -Ssize
#
# Copyright (c) 2022, The littlefs authors.
# Copyright (c) 2020, Arm Limited. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
import os
import glob
import itertools as it
import subprocess as sp
import shlex
import re
import csv
import collections as co
import csv
import difflib
import itertools as it
import math as m
import os
import re
import shlex
import subprocess as sp
OBJ_PATHS = ['*.o']
NM_PATH = ['nm']
NM_TYPES = 'dDbB'
OBJDUMP_PATH = ['objdump']
def collect(paths, **args):
results = co.defaultdict(lambda: 0)
pattern = re.compile(
# integer fields
class Int(co.namedtuple('Int', 'x')):
__slots__ = ()
def __new__(cls, x=0):
if isinstance(x, Int):
return x
if isinstance(x, str):
try:
x = int(x, 0)
except ValueError:
# also accept +-∞ and +-inf
if re.match('^\s*\+?\s*(?:∞|inf)\s*$', x):
x = m.inf
elif re.match('^\s*-\s*(?:∞|inf)\s*$', x):
x = -m.inf
else:
raise
assert isinstance(x, int) or m.isinf(x), x
return super().__new__(cls, x)
def __str__(self):
if self.x == m.inf:
return ''
elif self.x == -m.inf:
return '-∞'
else:
return str(self.x)
def __int__(self):
assert not m.isinf(self.x)
return self.x
def __float__(self):
return float(self.x)
none = '%7s' % '-'
def table(self):
return '%7s' % (self,)
diff_none = '%7s' % '-'
diff_table = table
def diff_diff(self, other):
new = self.x if self else 0
old = other.x if other else 0
diff = new - old
if diff == +m.inf:
return '%7s' % '+∞'
elif diff == -m.inf:
return '%7s' % '-∞'
else:
return '%+7d' % diff
def ratio(self, other):
new = self.x if self else 0
old = other.x if other else 0
if m.isinf(new) and m.isinf(old):
return 0.0
elif m.isinf(new):
return +m.inf
elif m.isinf(old):
return -m.inf
elif not old and not new:
return 0.0
elif not old:
return 1.0
else:
return (new-old) / old
def __add__(self, other):
return self.__class__(self.x + other.x)
def __sub__(self, other):
return self.__class__(self.x - other.x)
def __mul__(self, other):
return self.__class__(self.x * other.x)
# data size results
class DataResult(co.namedtuple('DataResult', [
'file', 'function',
'size'])):
_by = ['file', 'function']
_fields = ['size']
_sort = ['size']
_types = {'size': Int}
__slots__ = ()
def __new__(cls, file='', function='', size=0):
return super().__new__(cls, file, function,
Int(size))
def __add__(self, other):
return DataResult(self.file, self.function,
self.size + other.size)
def openio(path, mode='r', buffering=-1):
# allow '-' for stdin/stdout
if path == '-':
if mode == 'r':
return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
else:
return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
else:
return open(path, mode, buffering)
def collect(obj_paths, *,
nm_path=NM_PATH,
nm_types=NM_TYPES,
objdump_path=OBJDUMP_PATH,
sources=None,
everything=False,
**args):
size_pattern = re.compile(
'^(?P<size>[0-9a-fA-F]+)' +
' (?P<type>[%s])' % re.escape(args['type']) +
' (?P<type>[%s])' % re.escape(nm_types) +
' (?P<func>.+?)$')
for path in paths:
# note nm-tool may contain extra args
cmd = args['nm_tool'] + ['--size-sort', path]
line_pattern = re.compile(
'^\s+(?P<no>[0-9]+)'
'(?:\s+(?P<dir>[0-9]+))?'
'\s+.*'
'\s+(?P<path>[^\s]+)$')
info_pattern = re.compile(
'^(?:.*(?P<tag>DW_TAG_[a-z_]+).*'
'|.*DW_AT_name.*:\s*(?P<name>[^:\s]+)\s*'
'|.*DW_AT_decl_file.*:\s*(?P<file>[0-9]+)\s*)$')
results = []
for path in obj_paths:
# guess the source, if we have debug-info we'll replace this later
file = re.sub('(\.o)?$', '.c', path, 1)
# find symbol sizes
results_ = []
# note nm-path may contain extra args
cmd = nm_path + ['--size-sort', path]
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd,
stdout=sp.PIPE,
stderr=sp.PIPE if not args.get('verbose') else None,
universal_newlines=True,
errors='replace')
errors='replace',
close_fds=False)
for line in proc.stdout:
m = pattern.match(line)
m = size_pattern.match(line)
if m:
results[(path, m.group('func'))] += int(m.group('size'), 16)
func = m.group('func')
# discard internal functions
if not everything and func.startswith('__'):
continue
results_.append(DataResult(
file, func,
int(m.group('size'), 16)))
proc.wait()
if proc.returncode != 0:
if not args.get('verbose'):
@ -44,240 +190,515 @@ def collect(paths, **args):
sys.stdout.write(line)
sys.exit(-1)
flat_results = []
for (file, func), size in results.items():
# map to source files
if args.get('build_dir'):
file = re.sub('%s/*' % re.escape(args['build_dir']), '', file)
# replace .o with .c, different scripts report .o/.c, we need to
# choose one if we want to deduplicate csv files
file = re.sub('\.o$', '.c', file)
# discard internal functions
if not args.get('everything'):
if func.startswith('__'):
continue
# discard .8449 suffixes created by optimizer
func = re.sub('\.[0-9]+', '', func)
flat_results.append((file, func, size))
return flat_results
# try to figure out the source file if we have debug-info
dirs = {}
files = {}
# note objdump-path may contain extra args
cmd = objdump_path + ['--dwarf=rawline', path]
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd,
stdout=sp.PIPE,
stderr=sp.PIPE if not args.get('verbose') else None,
universal_newlines=True,
errors='replace',
close_fds=False)
for line in proc.stdout:
# note that files contain references to dirs, which we
# dereference as soon as we see them as each file table follows a
# dir table
m = line_pattern.match(line)
if m:
if not m.group('dir'):
# found a directory entry
dirs[int(m.group('no'))] = m.group('path')
else:
# found a file entry
dir = int(m.group('dir'))
if dir in dirs:
files[int(m.group('no'))] = os.path.join(
dirs[dir],
m.group('path'))
else:
files[int(m.group('no'))] = m.group('path')
proc.wait()
if proc.returncode != 0:
if not args.get('verbose'):
for line in proc.stderr:
sys.stdout.write(line)
# do nothing on error, we don't need objdump to work, source files
# may just be inaccurate
pass
def main(**args):
def openio(path, mode='r'):
if path == '-':
if 'r' in mode:
return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
defs = {}
is_func = False
f_name = None
f_file = None
# note objdump-path may contain extra args
cmd = objdump_path + ['--dwarf=info', path]
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd,
stdout=sp.PIPE,
stderr=sp.PIPE if not args.get('verbose') else None,
universal_newlines=True,
errors='replace',
close_fds=False)
for line in proc.stdout:
# state machine here to find definitions
m = info_pattern.match(line)
if m:
if m.group('tag'):
if is_func:
defs[f_name] = files.get(f_file, '?')
is_func = (m.group('tag') == 'DW_TAG_subprogram')
elif m.group('name'):
f_name = m.group('name')
elif m.group('file'):
f_file = int(m.group('file'))
if is_func:
defs[f_name] = files.get(f_file, '?')
proc.wait()
if proc.returncode != 0:
if not args.get('verbose'):
for line in proc.stderr:
sys.stdout.write(line)
# do nothing on error, we don't need objdump to work, source files
# may just be inaccurate
pass
for r in results_:
# find best matching debug symbol, this may be slightly different
# due to optimizations
if defs:
# exact match? avoid difflib if we can for speed
if r.function in defs:
file = defs[r.function]
else:
_, file = max(
defs.items(),
key=lambda d: difflib.SequenceMatcher(None,
d[0],
r.function, False).ratio())
else:
return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
else:
return open(path, mode)
file = r.file
# find sizes
if not args.get('use', None):
# find .o files
paths = []
for path in args['obj_paths']:
if os.path.isdir(path):
path = path + '/*.o'
# ignore filtered sources
if sources is not None:
if not any(
os.path.abspath(file) == os.path.abspath(s)
for s in sources):
continue
else:
# default to only cwd
if not everything and not os.path.commonpath([
os.getcwd(),
os.path.abspath(file)]) == os.getcwd():
continue
for path in glob.glob(path):
paths.append(path)
# simplify path
if os.path.commonpath([
os.getcwd(),
os.path.abspath(file)]) == os.getcwd():
file = os.path.relpath(file)
else:
file = os.path.abspath(file)
if not paths:
print('no .obj files found in %r?' % args['obj_paths'])
results.append(r._replace(file=file))
return results
def fold(Result, results, *,
by=None,
defines=None,
**_):
if by is None:
by = Result._by
for k in it.chain(by or [], (k for k, _ in defines or [])):
if k not in Result._by and k not in Result._fields:
print("error: could not find field %r?" % k)
sys.exit(-1)
results = collect(paths, **args)
# filter by matching defines
if defines is not None:
results_ = []
for r in results:
if all(getattr(r, k) in vs for k, vs in defines):
results_.append(r)
results = results_
# organize results into conflicts
folding = co.OrderedDict()
for r in results:
name = tuple(getattr(r, k) for k in by)
if name not in folding:
folding[name] = []
folding[name].append(r)
# merge conflicts
folded = []
for name, rs in folding.items():
folded.append(sum(rs[1:], start=rs[0]))
return folded
def table(Result, results, diff_results=None, *,
by=None,
fields=None,
sort=None,
summary=False,
all=False,
percent=False,
**_):
all_, all = all, __builtins__.all
if by is None:
by = Result._by
if fields is None:
fields = Result._fields
types = Result._types
# fold again
results = fold(Result, results, by=by)
if diff_results is not None:
diff_results = fold(Result, diff_results, by=by)
# organize by name
table = {
','.join(str(getattr(r, k) or '') for k in by): r
for r in results}
diff_table = {
','.join(str(getattr(r, k) or '') for k in by): r
for r in diff_results or []}
names = list(table.keys() | diff_table.keys())
# sort again, now with diff info, note that python's sort is stable
names.sort()
if diff_results is not None:
names.sort(key=lambda n: tuple(
types[k].ratio(
getattr(table.get(n), k, None),
getattr(diff_table.get(n), k, None))
for k in fields),
reverse=True)
if sort:
for k, reverse in reversed(sort):
names.sort(
key=lambda n: tuple(
(getattr(table[n], k),)
if getattr(table.get(n), k, None) is not None else ()
for k in ([k] if k else [
k for k in Result._sort if k in fields])),
reverse=reverse ^ (not k or k in Result._fields))
# build up our lines
lines = []
# header
header = []
header.append('%s%s' % (
','.join(by),
' (%d added, %d removed)' % (
sum(1 for n in table if n not in diff_table),
sum(1 for n in diff_table if n not in table))
if diff_results is not None and not percent else '')
if not summary else '')
if diff_results is None:
for k in fields:
header.append(k)
elif percent:
for k in fields:
header.append(k)
else:
for k in fields:
header.append('o'+k)
for k in fields:
header.append('n'+k)
for k in fields:
header.append('d'+k)
header.append('')
lines.append(header)
def table_entry(name, r, diff_r=None, ratios=[]):
entry = []
entry.append(name)
if diff_results is None:
for k in fields:
entry.append(getattr(r, k).table()
if getattr(r, k, None) is not None
else types[k].none)
elif percent:
for k in fields:
entry.append(getattr(r, k).diff_table()
if getattr(r, k, None) is not None
else types[k].diff_none)
else:
for k in fields:
entry.append(getattr(diff_r, k).diff_table()
if getattr(diff_r, k, None) is not None
else types[k].diff_none)
for k in fields:
entry.append(getattr(r, k).diff_table()
if getattr(r, k, None) is not None
else types[k].diff_none)
for k in fields:
entry.append(types[k].diff_diff(
getattr(r, k, None),
getattr(diff_r, k, None)))
if diff_results is None:
entry.append('')
elif percent:
entry.append(' (%s)' % ', '.join(
'+∞%' if t == +m.inf
else '-∞%' if t == -m.inf
else '%+.1f%%' % (100*t)
for t in ratios))
else:
entry.append(' (%s)' % ', '.join(
'+∞%' if t == +m.inf
else '-∞%' if t == -m.inf
else '%+.1f%%' % (100*t)
for t in ratios
if t)
if any(ratios) else '')
return entry
# entries
if not summary:
for name in names:
r = table.get(name)
if diff_results is None:
diff_r = None
ratios = None
else:
diff_r = diff_table.get(name)
ratios = [
types[k].ratio(
getattr(r, k, None),
getattr(diff_r, k, None))
for k in fields]
if not all_ and not any(ratios):
continue
lines.append(table_entry(name, r, diff_r, ratios))
# total
r = next(iter(fold(Result, results, by=[])), None)
if diff_results is None:
diff_r = None
ratios = None
else:
diff_r = next(iter(fold(Result, diff_results, by=[])), None)
ratios = [
types[k].ratio(
getattr(r, k, None),
getattr(diff_r, k, None))
for k in fields]
lines.append(table_entry('TOTAL', r, diff_r, ratios))
# find the best widths, note that column 0 contains the names and column -1
# the ratios, so those are handled a bit differently
widths = [
((max(it.chain([w], (len(l[i]) for l in lines)))+1+4-1)//4)*4-1
for w, i in zip(
it.chain([23], it.repeat(7)),
range(len(lines[0])-1))]
# print our table
for line in lines:
print('%-*s %s%s' % (
widths[0], line[0],
' '.join('%*s' % (w, x)
for w, x in zip(widths[1:], line[1:-1])),
line[-1]))
def main(obj_paths, *,
by=None,
fields=None,
defines=None,
sort=None,
**args):
# find sizes
if not args.get('use', None):
results = collect(obj_paths, **args)
else:
results = []
with openio(args['use']) as f:
r = csv.DictReader(f)
results = [
( result['file'],
result['name'],
int(result['data_size']))
for result in r
if result.get('data_size') not in {None, ''}]
reader = csv.DictReader(f, restval='')
for r in reader:
try:
results.append(DataResult(
**{k: r[k] for k in DataResult._by
if k in r and r[k].strip()},
**{k: r['data_'+k] for k in DataResult._fields
if 'data_'+k in r and r['data_'+k].strip()}))
except TypeError:
pass
total = 0
for _, _, size in results:
total += size
# fold
results = fold(DataResult, results, by=by, defines=defines)
# find previous results?
if args.get('diff'):
try:
with openio(args['diff']) as f:
r = csv.DictReader(f)
prev_results = [
( result['file'],
result['name'],
int(result['data_size']))
for result in r
if result.get('data_size') not in {None, ''}]
except FileNotFoundError:
prev_results = []
prev_total = 0
for _, _, size in prev_results:
prev_total += size
# sort, note that python's sort is stable
results.sort()
if sort:
for k, reverse in reversed(sort):
results.sort(
key=lambda r: tuple(
(getattr(r, k),) if getattr(r, k) is not None else ()
for k in ([k] if k else DataResult._sort)),
reverse=reverse ^ (not k or k in DataResult._fields))
# write results to CSV
if args.get('output'):
merged_results = co.defaultdict(lambda: {})
other_fields = []
# merge?
if args.get('merge'):
try:
with openio(args['merge']) as f:
r = csv.DictReader(f)
for result in r:
file = result.pop('file', '')
func = result.pop('name', '')
result.pop('data_size', None)
merged_results[(file, func)] = result
other_fields = result.keys()
except FileNotFoundError:
pass
for file, func, size in results:
merged_results[(file, func)]['data_size'] = size
with openio(args['output'], 'w') as f:
w = csv.DictWriter(f, ['file', 'name', *other_fields, 'data_size'])
w.writeheader()
for (file, func), result in sorted(merged_results.items()):
w.writerow({'file': file, 'name': func, **result})
writer = csv.DictWriter(f,
(by if by is not None else DataResult._by)
+ ['data_'+k for k in (
fields if fields is not None else DataResult._fields)])
writer.writeheader()
for r in results:
writer.writerow(
{k: getattr(r, k) for k in (
by if by is not None else DataResult._by)}
| {'data_'+k: getattr(r, k) for k in (
fields if fields is not None else DataResult._fields)})
# print results
def dedup_entries(results, by='name'):
entries = co.defaultdict(lambda: 0)
for file, func, size in results:
entry = (file if by == 'file' else func)
entries[entry] += size
return entries
# find previous results?
if args.get('diff'):
diff_results = []
try:
with openio(args['diff']) as f:
reader = csv.DictReader(f, restval='')
for r in reader:
if not any('data_'+k in r and r['data_'+k].strip()
for k in DataResult._fields):
continue
try:
diff_results.append(DataResult(
**{k: r[k] for k in DataResult._by
if k in r and r[k].strip()},
**{k: r['data_'+k] for k in DataResult._fields
if 'data_'+k in r and r['data_'+k].strip()}))
except TypeError:
pass
except FileNotFoundError:
pass
def diff_entries(olds, news):
diff = co.defaultdict(lambda: (0, 0, 0, 0))
for name, new in news.items():
diff[name] = (0, new, new, 1.0)
for name, old in olds.items():
_, new, _, _ = diff[name]
diff[name] = (old, new, new-old, (new-old)/old if old else 1.0)
return diff
# fold
diff_results = fold(DataResult, diff_results, by=by, defines=defines)
def sorted_entries(entries):
if args.get('size_sort'):
return sorted(entries, key=lambda x: (-x[1], x))
elif args.get('reverse_size_sort'):
return sorted(entries, key=lambda x: (+x[1], x))
else:
return sorted(entries)
# print table
if not args.get('quiet'):
table(DataResult, results,
diff_results if args.get('diff') else None,
by=by if by is not None else ['function'],
fields=fields,
sort=sort,
**args)
def sorted_diff_entries(entries):
if args.get('size_sort'):
return sorted(entries, key=lambda x: (-x[1][1], x))
elif args.get('reverse_size_sort'):
return sorted(entries, key=lambda x: (+x[1][1], x))
else:
return sorted(entries, key=lambda x: (-x[1][3], x))
def print_header(by=''):
if not args.get('diff'):
print('%-36s %7s' % (by, 'size'))
else:
print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff'))
def print_entry(name, size):
print("%-36s %7d" % (name, size))
def print_diff_entry(name, old, new, diff, ratio):
print("%-36s %7s %7s %+7d%s" % (name,
old or "-",
new or "-",
diff,
' (%+.1f%%)' % (100*ratio) if ratio else ''))
def print_entries(by='name'):
entries = dedup_entries(results, by=by)
if not args.get('diff'):
print_header(by=by)
for name, size in sorted_entries(entries.items()):
print_entry(name, size)
else:
prev_entries = dedup_entries(prev_results, by=by)
diff = diff_entries(prev_entries, entries)
print_header(by='%s (%d added, %d removed)' % (by,
sum(1 for old, _, _, _ in diff.values() if not old),
sum(1 for _, new, _, _ in diff.values() if not new)))
for name, (old, new, diff, ratio) in sorted_diff_entries(
diff.items()):
if ratio or args.get('all'):
print_diff_entry(name, old, new, diff, ratio)
def print_totals():
if not args.get('diff'):
print_entry('TOTAL', total)
else:
ratio = (0.0 if not prev_total and not total
else 1.0 if not prev_total
else (total-prev_total)/prev_total)
print_diff_entry('TOTAL',
prev_total, total,
total-prev_total,
ratio)
if args.get('quiet'):
pass
elif args.get('summary'):
print_header()
print_totals()
elif args.get('files'):
print_entries(by='file')
print_totals()
else:
print_entries(by='name')
print_totals()
if __name__ == "__main__":
import argparse
import sys
parser = argparse.ArgumentParser(
description="Find data size at the function level.")
parser.add_argument('obj_paths', nargs='*', default=OBJ_PATHS,
help="Description of where to find *.o files. May be a directory \
or a list of paths. Defaults to %r." % OBJ_PATHS)
parser.add_argument('-v', '--verbose', action='store_true',
description="Find data size at the function level.",
allow_abbrev=False)
parser.add_argument(
'obj_paths',
nargs='*',
help="Input *.o files.")
parser.add_argument(
'-v', '--verbose',
action='store_true',
help="Output commands that run behind the scenes.")
parser.add_argument('-q', '--quiet', action='store_true',
parser.add_argument(
'-q', '--quiet',
action='store_true',
help="Don't show anything, useful with -o.")
parser.add_argument('-o', '--output',
parser.add_argument(
'-o', '--output',
help="Specify CSV file to store results.")
parser.add_argument('-u', '--use',
help="Don't compile and find data sizes, instead use this CSV file.")
parser.add_argument('-d', '--diff',
help="Specify CSV file to diff data size against.")
parser.add_argument('-m', '--merge',
help="Merge with an existing CSV file when writing to output.")
parser.add_argument('-a', '--all', action='store_true',
help="Show all functions, not just the ones that changed.")
parser.add_argument('-A', '--everything', action='store_true',
parser.add_argument(
'-u', '--use',
help="Don't parse anything, use this CSV file.")
parser.add_argument(
'-d', '--diff',
help="Specify CSV file to diff against.")
parser.add_argument(
'-a', '--all',
action='store_true',
help="Show all, not just the ones that changed.")
parser.add_argument(
'-p', '--percent',
action='store_true',
help="Only show percentage change, not a full diff.")
parser.add_argument(
'-b', '--by',
action='append',
choices=DataResult._by,
help="Group by this field.")
parser.add_argument(
'-f', '--field',
dest='fields',
action='append',
choices=DataResult._fields,
help="Show this field.")
parser.add_argument(
'-D', '--define',
dest='defines',
action='append',
type=lambda x: (lambda k,v: (k, set(v.split(','))))(*x.split('=', 1)),
help="Only include results where this field is this value.")
class AppendSort(argparse.Action):
def __call__(self, parser, namespace, value, option):
if namespace.sort is None:
namespace.sort = []
namespace.sort.append((value, True if option == '-S' else False))
parser.add_argument(
'-s', '--sort',
nargs='?',
action=AppendSort,
help="Sort by this field.")
parser.add_argument(
'-S', '--reverse-sort',
nargs='?',
action=AppendSort,
help="Sort by this field, but backwards.")
parser.add_argument(
'-Y', '--summary',
action='store_true',
help="Only show the total.")
parser.add_argument(
'-F', '--source',
dest='sources',
action='append',
help="Only consider definitions in this file. Defaults to anything "
"in the current directory.")
parser.add_argument(
'--everything',
action='store_true',
help="Include builtin and libc specific symbols.")
parser.add_argument('-s', '--size-sort', action='store_true',
help="Sort by size.")
parser.add_argument('-S', '--reverse-size-sort', action='store_true',
help="Sort by size, but backwards.")
parser.add_argument('-F', '--files', action='store_true',
help="Show file-level data sizes. Note this does not include padding! "
"So sizes may differ from other tools.")
parser.add_argument('-Y', '--summary', action='store_true',
help="Only show the total data size.")
parser.add_argument('--type', default='dDbB',
parser.add_argument(
'--nm-types',
default=NM_TYPES,
help="Type of symbols to report, this uses the same single-character "
"type-names emitted by nm. Defaults to %(default)r.")
parser.add_argument('--nm-tool', default=['nm'], type=lambda x: x.split(),
help="Path to the nm tool to use.")
parser.add_argument('--build-dir',
help="Specify the relative build directory. Used to map object files \
to the correct source files.")
sys.exit(main(**vars(parser.parse_args())))
"type-names emitted by nm. Defaults to %r." % NM_TYPES)
parser.add_argument(
'--nm-path',
type=lambda x: x.split(),
default=NM_PATH,
help="Path to the nm executable, may include flags. "
"Defaults to %r." % NM_PATH)
parser.add_argument(
'--objdump-path',
type=lambda x: x.split(),
default=OBJDUMP_PATH,
help="Path to the objdump executable, may include flags. "
"Defaults to %r." % OBJDUMP_PATH)
sys.exit(main(**{k: v
for k, v in vars(parser.parse_intermixed_args()).items()
if v is not None}))

1344
scripts/perf.py Normal file

File diff suppressed because it is too large Load Diff

1276
scripts/perfbd.py Normal file

File diff suppressed because it is too large Load Diff

1592
scripts/plot.py Normal file

File diff suppressed because it is too large Load Diff

1262
scripts/plotmpl.py Normal file

File diff suppressed because it is too large Load Diff

452
scripts/prettyasserts.py Normal file
View File

@ -0,0 +1,452 @@
#!/usr/bin/env python3
#
# Preprocessor that makes asserts easier to debug.
#
# Example:
# ./scripts/prettyasserts.py -p LFS_ASSERT lfs.c -o lfs.a.c
#
# Copyright (c) 2022, The littlefs authors.
# Copyright (c) 2020, Arm Limited. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
import re
import sys
# NOTE the use of macros here helps keep a consistent stack depth which
# tools may rely on.
#
# If compilation errors are noisy consider using -ftrack-macro-expansion=0.
#
LIMIT = 16
CMP = {
'==': 'eq',
'!=': 'ne',
'<=': 'le',
'>=': 'ge',
'<': 'lt',
'>': 'gt',
}
LEXEMES = {
'ws': [r'(?:\s|\n|#.*?\n|//.*?\n|/\*.*?\*/)+'],
'assert': ['assert'],
'arrow': ['=>'],
'string': [r'"(?:\\.|[^"])*"', r"'(?:\\.|[^'])\'"],
'paren': ['\(', '\)'],
'cmp': CMP.keys(),
'logic': ['\&\&', '\|\|'],
'sep': [':', ';', '\{', '\}', ','],
'op': ['->'], # specifically ops that conflict with cmp
}
def openio(path, mode='r', buffering=-1):
# allow '-' for stdin/stdout
if path == '-':
if mode == 'r':
return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
else:
return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
else:
return open(path, mode, buffering)
def write_header(f, limit=LIMIT):
f.writeln("// Generated by %s:" % sys.argv[0])
f.writeln("//")
f.writeln("// %s" % ' '.join(sys.argv))
f.writeln("//")
f.writeln()
f.writeln("#include <stdbool.h>")
f.writeln("#include <stdint.h>")
f.writeln("#include <inttypes.h>")
f.writeln("#include <stdio.h>")
f.writeln("#include <string.h>")
f.writeln("#include <signal.h>")
# give source a chance to define feature macros
f.writeln("#undef _FEATURES_H")
f.writeln()
# write print macros
f.writeln("__attribute__((unused))")
f.writeln("static void __pretty_assert_print_bool(")
f.writeln(" const void *v, size_t size) {")
f.writeln(" (void)size;")
f.writeln(" printf(\"%s\", *(const bool*)v ? \"true\" : \"false\");")
f.writeln("}")
f.writeln()
f.writeln("__attribute__((unused))")
f.writeln("static void __pretty_assert_print_int(")
f.writeln(" const void *v, size_t size) {")
f.writeln(" (void)size;")
f.writeln(" printf(\"%\"PRIiMAX, *(const intmax_t*)v);")
f.writeln("}")
f.writeln()
f.writeln("__attribute__((unused))")
f.writeln("static void __pretty_assert_print_mem(")
f.writeln(" const void *v, size_t size) {")
f.writeln(" const uint8_t *v_ = v;")
f.writeln(" printf(\"\\\"\");")
f.writeln(" for (size_t i = 0; i < size && i < %d; i++) {" % limit)
f.writeln(" if (v_[i] >= ' ' && v_[i] <= '~') {")
f.writeln(" printf(\"%c\", v_[i]);")
f.writeln(" } else {")
f.writeln(" printf(\"\\\\x%02x\", v_[i]);")
f.writeln(" }")
f.writeln(" }")
f.writeln(" if (size > %d) {" % limit)
f.writeln(" printf(\"...\");")
f.writeln(" }")
f.writeln(" printf(\"\\\"\");")
f.writeln("}")
f.writeln()
f.writeln("__attribute__((unused))")
f.writeln("static void __pretty_assert_print_str(")
f.writeln(" const void *v, size_t size) {")
f.writeln(" __pretty_assert_print_mem(v, size);")
f.writeln("}")
f.writeln()
f.writeln("__attribute__((unused, noinline))")
f.writeln("static void __pretty_assert_fail(")
f.writeln(" const char *file, int line,")
f.writeln(" void (*type_print_cb)(const void*, size_t),")
f.writeln(" const char *cmp,")
f.writeln(" const void *lh, size_t lsize,")
f.writeln(" const void *rh, size_t rsize) {")
f.writeln(" printf(\"%s:%d:assert: assert failed with \", file, line);")
f.writeln(" type_print_cb(lh, lsize);")
f.writeln(" printf(\", expected %s \", cmp);")
f.writeln(" type_print_cb(rh, rsize);")
f.writeln(" printf(\"\\n\");")
f.writeln(" fflush(NULL);")
f.writeln(" raise(SIGABRT);")
f.writeln("}")
f.writeln()
# write assert macros
for op, cmp in sorted(CMP.items()):
f.writeln("#define __PRETTY_ASSERT_BOOL_%s(lh, rh) do { \\"
% cmp.upper())
f.writeln(" bool _lh = !!(lh); \\")
f.writeln(" bool _rh = !!(rh); \\")
f.writeln(" if (!(_lh %s _rh)) { \\" % op)
f.writeln(" __pretty_assert_fail( \\")
f.writeln(" __FILE__, __LINE__, \\")
f.writeln(" __pretty_assert_print_bool, \"%s\", \\"
% cmp)
f.writeln(" &_lh, 0, \\")
f.writeln(" &_rh, 0); \\")
f.writeln(" } \\")
f.writeln("} while (0)")
for op, cmp in sorted(CMP.items()):
f.writeln("#define __PRETTY_ASSERT_INT_%s(lh, rh) do { \\"
% cmp.upper())
f.writeln(" __typeof__(lh) _lh = lh; \\")
f.writeln(" __typeof__(lh) _rh = rh; \\")
f.writeln(" if (!(_lh %s _rh)) { \\" % op)
f.writeln(" __pretty_assert_fail( \\")
f.writeln(" __FILE__, __LINE__, \\")
f.writeln(" __pretty_assert_print_int, \"%s\", \\"
% cmp)
f.writeln(" &(intmax_t){_lh}, 0, \\")
f.writeln(" &(intmax_t){_rh}, 0); \\")
f.writeln(" } \\")
f.writeln("} while (0)")
for op, cmp in sorted(CMP.items()):
f.writeln("#define __PRETTY_ASSERT_MEM_%s(lh, rh, size) do { \\"
% cmp.upper())
f.writeln(" const void *_lh = lh; \\")
f.writeln(" const void *_rh = rh; \\")
f.writeln(" if (!(memcmp(_lh, _rh, size) %s 0)) { \\" % op)
f.writeln(" __pretty_assert_fail( \\")
f.writeln(" __FILE__, __LINE__, \\")
f.writeln(" __pretty_assert_print_mem, \"%s\", \\"
% cmp)
f.writeln(" _lh, size, \\")
f.writeln(" _rh, size); \\")
f.writeln(" } \\")
f.writeln("} while (0)")
for op, cmp in sorted(CMP.items()):
f.writeln("#define __PRETTY_ASSERT_STR_%s(lh, rh) do { \\"
% cmp.upper())
f.writeln(" const char *_lh = lh; \\")
f.writeln(" const char *_rh = rh; \\")
f.writeln(" if (!(strcmp(_lh, _rh) %s 0)) { \\" % op)
f.writeln(" __pretty_assert_fail( \\")
f.writeln(" __FILE__, __LINE__, \\")
f.writeln(" __pretty_assert_print_str, \"%s\", \\"
% cmp)
f.writeln(" _lh, strlen(_lh), \\")
f.writeln(" _rh, strlen(_rh)); \\")
f.writeln(" } \\")
f.writeln("} while (0)")
f.writeln()
f.writeln()
def mkassert(type, cmp, lh, rh, size=None):
if size is not None:
return ("__PRETTY_ASSERT_%s_%s(%s, %s, %s)"
% (type.upper(), cmp.upper(), lh, rh, size))
else:
return ("__PRETTY_ASSERT_%s_%s(%s, %s)"
% (type.upper(), cmp.upper(), lh, rh))
# simple recursive descent parser
class ParseFailure(Exception):
def __init__(self, expected, found):
self.expected = expected
self.found = found
def __str__(self):
return "expected %r, found %s..." % (
self.expected, repr(self.found)[:70])
class Parser:
def __init__(self, in_f, lexemes=LEXEMES):
p = '|'.join('(?P<%s>%s)' % (n, '|'.join(l))
for n, l in lexemes.items())
p = re.compile(p, re.DOTALL)
data = in_f.read()
tokens = []
line = 1
col = 0
while True:
m = p.search(data)
if m:
if m.start() > 0:
tokens.append((None, data[:m.start()], line, col))
tokens.append((m.lastgroup, m.group(), line, col))
data = data[m.end():]
else:
tokens.append((None, data, line, col))
break
self.tokens = tokens
self.off = 0
def lookahead(self, *pattern):
if self.off < len(self.tokens):
token = self.tokens[self.off]
if token[0] in pattern or token[1] in pattern:
self.m = token[1]
return self.m
self.m = None
return self.m
def accept(self, *patterns):
m = self.lookahead(*patterns)
if m is not None:
self.off += 1
return m
def expect(self, *patterns):
m = self.accept(*patterns)
if not m:
raise ParseFailure(patterns, self.tokens[self.off:])
return m
def push(self):
return self.off
def pop(self, state):
self.off = state
def p_assert(p):
state = p.push()
# assert(memcmp(a,b,size) cmp 0)?
try:
p.expect('assert') ; p.accept('ws')
p.expect('(') ; p.accept('ws')
p.expect('memcmp') ; p.accept('ws')
p.expect('(') ; p.accept('ws')
lh = p_expr(p) ; p.accept('ws')
p.expect(',') ; p.accept('ws')
rh = p_expr(p) ; p.accept('ws')
p.expect(',') ; p.accept('ws')
size = p_expr(p) ; p.accept('ws')
p.expect(')') ; p.accept('ws')
cmp = p.expect('cmp') ; p.accept('ws')
p.expect('0') ; p.accept('ws')
p.expect(')')
return mkassert('mem', CMP[cmp], lh, rh, size)
except ParseFailure:
p.pop(state)
# assert(strcmp(a,b) cmp 0)?
try:
p.expect('assert') ; p.accept('ws')
p.expect('(') ; p.accept('ws')
p.expect('strcmp') ; p.accept('ws')
p.expect('(') ; p.accept('ws')
lh = p_expr(p) ; p.accept('ws')
p.expect(',') ; p.accept('ws')
rh = p_expr(p) ; p.accept('ws')
p.expect(')') ; p.accept('ws')
cmp = p.expect('cmp') ; p.accept('ws')
p.expect('0') ; p.accept('ws')
p.expect(')')
return mkassert('str', CMP[cmp], lh, rh)
except ParseFailure:
p.pop(state)
# assert(a cmp b)?
try:
p.expect('assert') ; p.accept('ws')
p.expect('(') ; p.accept('ws')
lh = p_expr(p) ; p.accept('ws')
cmp = p.expect('cmp') ; p.accept('ws')
rh = p_expr(p) ; p.accept('ws')
p.expect(')')
return mkassert('int', CMP[cmp], lh, rh)
except ParseFailure:
p.pop(state)
# assert(a)?
p.expect('assert') ; p.accept('ws')
p.expect('(') ; p.accept('ws')
lh = p_exprs(p) ; p.accept('ws')
p.expect(')')
return mkassert('bool', 'eq', lh, 'true')
def p_expr(p):
res = []
while True:
if p.accept('('):
res.append(p.m)
while True:
res.append(p_exprs(p))
if p.accept('sep'):
res.append(p.m)
else:
break
res.append(p.expect(')'))
elif p.lookahead('assert'):
state = p.push()
try:
res.append(p_assert(p))
except ParseFailure:
p.pop(state)
res.append(p.expect('assert'))
elif p.accept('string', 'op', 'ws', None):
res.append(p.m)
else:
return ''.join(res)
def p_exprs(p):
res = []
while True:
res.append(p_expr(p))
if p.accept('cmp', 'logic', ','):
res.append(p.m)
else:
return ''.join(res)
def p_stmt(p):
ws = p.accept('ws') or ''
# memcmp(lh,rh,size) => 0?
if p.lookahead('memcmp'):
state = p.push()
try:
p.expect('memcmp') ; p.accept('ws')
p.expect('(') ; p.accept('ws')
lh = p_expr(p) ; p.accept('ws')
p.expect(',') ; p.accept('ws')
rh = p_expr(p) ; p.accept('ws')
p.expect(',') ; p.accept('ws')
size = p_expr(p) ; p.accept('ws')
p.expect(')') ; p.accept('ws')
p.expect('=>') ; p.accept('ws')
p.expect('0') ; p.accept('ws')
return ws + mkassert('mem', 'eq', lh, rh, size)
except ParseFailure:
p.pop(state)
# strcmp(lh,rh) => 0?
if p.lookahead('strcmp'):
state = p.push()
try:
p.expect('strcmp') ; p.accept('ws') ; p.expect('(') ; p.accept('ws')
lh = p_expr(p) ; p.accept('ws')
p.expect(',') ; p.accept('ws')
rh = p_expr(p) ; p.accept('ws')
p.expect(')') ; p.accept('ws')
p.expect('=>') ; p.accept('ws')
p.expect('0') ; p.accept('ws')
return ws + mkassert('str', 'eq', lh, rh)
except ParseFailure:
p.pop(state)
# lh => rh?
lh = p_exprs(p)
if p.accept('=>'):
rh = p_exprs(p)
return ws + mkassert('int', 'eq', lh, rh)
else:
return ws + lh
def main(input=None, output=None, pattern=[], limit=LIMIT):
with openio(input or '-', 'r') as in_f:
# create parser
lexemes = LEXEMES.copy()
lexemes['assert'] += pattern
p = Parser(in_f, lexemes)
with openio(output or '-', 'w') as f:
def writeln(s=''):
f.write(s)
f.write('\n')
f.writeln = writeln
# write extra verbose asserts
write_header(f, limit=limit)
if input is not None:
f.writeln("#line %d \"%s\"" % (1, input))
# parse and write out stmt at a time
try:
while True:
f.write(p_stmt(p))
if p.accept('sep'):
f.write(p.m)
else:
break
except ParseFailure as e:
print('warning: %s' % e)
pass
for i in range(p.off, len(p.tokens)):
f.write(p.tokens[i][1])
if __name__ == "__main__":
import argparse
import sys
parser = argparse.ArgumentParser(
description="Preprocessor that makes asserts easier to debug.",
allow_abbrev=False)
parser.add_argument(
'input',
help="Input C file.")
parser.add_argument(
'-o', '--output',
required=True,
help="Output C file.")
parser.add_argument(
'-p', '--pattern',
action='append',
help="Regex patterns to search for starting an assert statement. This"
" implicitly includes \"assert\" and \"=>\".")
parser.add_argument(
'-l', '--limit',
type=lambda x: int(x, 0),
default=LIMIT,
help="Maximum number of characters to display in strcmp and memcmp. "
"Defaults to %r." % LIMIT)
sys.exit(main(**{k: v
for k, v in vars(parser.parse_intermixed_args()).items()
if v is not None}))

View File

@ -24,6 +24,8 @@ TAG_TYPES = {
'gstate': (0x700, 0x700),
'movestate': (0x7ff, 0x7ff),
'crc': (0x700, 0x500),
'ccrc': (0x780, 0x500),
'fcrc': (0x7ff, 0x5ff),
}
class Tag:
@ -99,7 +101,16 @@ class Tag:
return struct.unpack('b', struct.pack('B', self.chunk))[0]
def is_(self, type):
return (self.type & TAG_TYPES[type][0]) == TAG_TYPES[type][1]
try:
if ' ' in type:
type1, type3 = type.split()
return (self.is_(type1) and
(self.type & ~TAG_TYPES[type1][0]) == int(type3, 0))
return self.type == int(type, 0)
except (ValueError, KeyError):
return (self.type & TAG_TYPES[type][0]) == TAG_TYPES[type][1]
def mkmask(self):
return Tag(
@ -109,14 +120,20 @@ class Tag:
def chid(self, nid):
ntag = Tag(self.type, nid, self.size)
if hasattr(self, 'off'): ntag.off = self.off
if hasattr(self, 'data'): ntag.data = self.data
if hasattr(self, 'crc'): ntag.crc = self.crc
if hasattr(self, 'off'): ntag.off = self.off
if hasattr(self, 'data'): ntag.data = self.data
if hasattr(self, 'ccrc'): ntag.crc = self.crc
if hasattr(self, 'erased'): ntag.erased = self.erased
return ntag
def typerepr(self):
if self.is_('crc') and getattr(self, 'crc', 0xffffffff) != 0xffffffff:
return 'crc (bad)'
if (self.is_('ccrc')
and getattr(self, 'ccrc', 0xffffffff) != 0xffffffff):
crc_status = ' (bad)'
elif self.is_('fcrc') and getattr(self, 'erased', False):
crc_status = ' (era)'
else:
crc_status = ''
reverse_types = {v: k for k, v in TAG_TYPES.items()}
for prefix in range(12):
@ -124,12 +141,12 @@ class Tag:
if (mask, self.type & mask) in reverse_types:
type = reverse_types[mask, self.type & mask]
if prefix > 0:
return '%s %#0*x' % (
type, prefix//4, self.type & ((1 << prefix)-1))
return '%s %#x%s' % (
type, self.type & ((1 << prefix)-1), crc_status)
else:
return type
return '%s%s' % (type, crc_status)
else:
return '%02x' % self.type
return '%02x%s' % (self.type, crc_status)
def idrepr(self):
return repr(self.id) if self.id != 0x3ff else '.'
@ -172,6 +189,8 @@ class MetadataPair:
self.rev, = struct.unpack('<I', block[0:4])
crc = binascii.crc32(block[0:4])
fcrctag = None
fcrcdata = None
# parse tags
corrupt = False
@ -182,11 +201,11 @@ class MetadataPair:
while len(block) - off >= 4:
ntag, = struct.unpack('>I', block[off:off+4])
tag = Tag(int(tag) ^ ntag)
tag = Tag((int(tag) ^ ntag) & 0x7fffffff)
tag.off = off + 4
tag.data = block[off+4:off+tag.dsize]
if tag.is_('crc'):
crc = binascii.crc32(block[off:off+4+4], crc)
if tag.is_('ccrc'):
crc = binascii.crc32(block[off:off+2*4], crc)
else:
crc = binascii.crc32(block[off:off+tag.dsize], crc)
tag.crc = crc
@ -194,16 +213,29 @@ class MetadataPair:
self.all_.append(tag)
if tag.is_('crc'):
if tag.is_('fcrc') and len(tag.data) == 8:
fcrctag = tag
fcrcdata = struct.unpack('<II', tag.data)
elif tag.is_('ccrc'):
# is valid commit?
if crc != 0xffffffff:
corrupt = True
if not corrupt:
self.log = self.all_.copy()
# end of commit?
if fcrcdata:
fcrcsize, fcrc = fcrcdata
fcrc_ = 0xffffffff ^ binascii.crc32(
block[off:off+fcrcsize])
if fcrc_ == fcrc:
fcrctag.erased = True
corrupt = True
# reset tag parsing
crc = 0
tag = Tag(int(tag) ^ ((tag.type & 1) << 31))
fcrctag = None
fcrcdata = None
# find active ids
self.ids = list(it.takewhile(
@ -280,7 +312,7 @@ class MetadataPair:
f.write('\n')
for tag in tags:
f.write("%08x: %08x %-13s %4s %4s" % (
f.write("%08x: %08x %-14s %3s %4s" % (
tag.off, tag,
tag.typerepr(), tag.idrepr(), tag.sizerepr()))
if truncate:

File diff suppressed because it is too large Load Diff

View File

@ -2,49 +2,183 @@
#
# Script to find struct sizes.
#
# Example:
# ./scripts/structs.py lfs.o lfs_util.o -Ssize
#
# Copyright (c) 2022, The littlefs authors.
# SPDX-License-Identifier: BSD-3-Clause
#
import os
import glob
import itertools as it
import subprocess as sp
import shlex
import re
import csv
import collections as co
import csv
import difflib
import itertools as it
import math as m
import os
import re
import shlex
import subprocess as sp
OBJ_PATHS = ['*.o']
OBJDUMP_PATH = ['objdump']
def collect(paths, **args):
decl_pattern = re.compile(
# integer fields
class Int(co.namedtuple('Int', 'x')):
__slots__ = ()
def __new__(cls, x=0):
if isinstance(x, Int):
return x
if isinstance(x, str):
try:
x = int(x, 0)
except ValueError:
# also accept +-∞ and +-inf
if re.match('^\s*\+?\s*(?:∞|inf)\s*$', x):
x = m.inf
elif re.match('^\s*-\s*(?:∞|inf)\s*$', x):
x = -m.inf
else:
raise
assert isinstance(x, int) or m.isinf(x), x
return super().__new__(cls, x)
def __str__(self):
if self.x == m.inf:
return ''
elif self.x == -m.inf:
return '-∞'
else:
return str(self.x)
def __int__(self):
assert not m.isinf(self.x)
return self.x
def __float__(self):
return float(self.x)
none = '%7s' % '-'
def table(self):
return '%7s' % (self,)
diff_none = '%7s' % '-'
diff_table = table
def diff_diff(self, other):
new = self.x if self else 0
old = other.x if other else 0
diff = new - old
if diff == +m.inf:
return '%7s' % '+∞'
elif diff == -m.inf:
return '%7s' % '-∞'
else:
return '%+7d' % diff
def ratio(self, other):
new = self.x if self else 0
old = other.x if other else 0
if m.isinf(new) and m.isinf(old):
return 0.0
elif m.isinf(new):
return +m.inf
elif m.isinf(old):
return -m.inf
elif not old and not new:
return 0.0
elif not old:
return 1.0
else:
return (new-old) / old
def __add__(self, other):
return self.__class__(self.x + other.x)
def __sub__(self, other):
return self.__class__(self.x - other.x)
def __mul__(self, other):
return self.__class__(self.x * other.x)
# struct size results
class StructResult(co.namedtuple('StructResult', ['file', 'struct', 'size'])):
_by = ['file', 'struct']
_fields = ['size']
_sort = ['size']
_types = {'size': Int}
__slots__ = ()
def __new__(cls, file='', struct='', size=0):
return super().__new__(cls, file, struct,
Int(size))
def __add__(self, other):
return StructResult(self.file, self.struct,
self.size + other.size)
def openio(path, mode='r', buffering=-1):
# allow '-' for stdin/stdout
if path == '-':
if mode == 'r':
return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
else:
return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
else:
return open(path, mode, buffering)
def collect(obj_paths, *,
objdump_path=OBJDUMP_PATH,
sources=None,
everything=False,
internal=False,
**args):
line_pattern = re.compile(
'^\s+(?P<no>[0-9]+)'
'\s+(?P<dir>[0-9]+)'
'(?:\s+(?P<dir>[0-9]+))?'
'\s+.*'
'\s+(?P<file>[^\s]+)$')
struct_pattern = re.compile(
'^(?:.*DW_TAG_(?P<tag>[a-z_]+).*'
'|^.*DW_AT_name.*:\s*(?P<name>[^:\s]+)\s*'
'|^.*DW_AT_decl_file.*:\s*(?P<decl>[0-9]+)\s*'
'|^.*DW_AT_byte_size.*:\s*(?P<size>[0-9]+)\s*)$')
'\s+(?P<path>[^\s]+)$')
info_pattern = re.compile(
'^(?:.*(?P<tag>DW_TAG_[a-z_]+).*'
'|.*DW_AT_name.*:\s*(?P<name>[^:\s]+)\s*'
'|.*DW_AT_decl_file.*:\s*(?P<file>[0-9]+)\s*'
'|.*DW_AT_byte_size.*:\s*(?P<size>[0-9]+)\s*)$')
results = co.defaultdict(lambda: 0)
for path in paths:
# find decl, we want to filter by structs in .h files
decls = {}
# note objdump-tool may contain extra args
cmd = args['objdump_tool'] + ['--dwarf=rawline', path]
results = []
for path in obj_paths:
# find files, we want to filter by structs in .h files
dirs = {}
files = {}
# note objdump-path may contain extra args
cmd = objdump_path + ['--dwarf=rawline', path]
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd,
stdout=sp.PIPE,
stderr=sp.PIPE if not args.get('verbose') else None,
universal_newlines=True,
errors='replace')
errors='replace',
close_fds=False)
for line in proc.stdout:
# find file numbers
m = decl_pattern.match(line)
# note that files contain references to dirs, which we
# dereference as soon as we see them as each file table follows a
# dir table
m = line_pattern.match(line)
if m:
decls[int(m.group('no'))] = m.group('file')
if not m.group('dir'):
# found a directory entry
dirs[int(m.group('no'))] = m.group('path')
else:
# found a file entry
dir = int(m.group('dir'))
if dir in dirs:
files[int(m.group('no'))] = os.path.join(
dirs[dir],
m.group('path'))
else:
files[int(m.group('no'))] = m.group('path')
proc.wait()
if proc.returncode != 0:
if not args.get('verbose'):
@ -53,40 +187,39 @@ def collect(paths, **args):
sys.exit(-1)
# collect structs as we parse dwarf info
found = False
name = None
decl = None
size = None
# note objdump-tool may contain extra args
cmd = args['objdump_tool'] + ['--dwarf=info', path]
results_ = []
is_struct = False
s_name = None
s_file = None
s_size = None
# note objdump-path may contain extra args
cmd = objdump_path + ['--dwarf=info', path]
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd,
stdout=sp.PIPE,
stderr=sp.PIPE if not args.get('verbose') else None,
universal_newlines=True,
errors='replace')
errors='replace',
close_fds=False)
for line in proc.stdout:
# state machine here to find structs
m = struct_pattern.match(line)
m = info_pattern.match(line)
if m:
if m.group('tag'):
if (name is not None
and decl is not None
and size is not None):
decl = decls.get(decl, '?')
results[(decl, name)] = size
found = (m.group('tag') == 'structure_type')
name = None
decl = None
size = None
elif found and m.group('name'):
name = m.group('name')
elif found and name and m.group('decl'):
decl = int(m.group('decl'))
elif found and name and m.group('size'):
size = int(m.group('size'))
if is_struct:
file = files.get(s_file, '?')
results_.append(StructResult(file, s_name, s_size))
is_struct = (m.group('tag') == 'DW_TAG_structure_type')
elif m.group('name'):
s_name = m.group('name')
elif m.group('file'):
s_file = int(m.group('file'))
elif m.group('size'):
s_size = int(m.group('size'))
if is_struct:
file = files.get(s_file, '?')
results_.append(StructResult(file, s_name, s_size))
proc.wait()
if proc.returncode != 0:
if not args.get('verbose'):
@ -94,238 +227,426 @@ def collect(paths, **args):
sys.stdout.write(line)
sys.exit(-1)
flat_results = []
for (file, struct), size in results.items():
# map to source files
if args.get('build_dir'):
file = re.sub('%s/*' % re.escape(args['build_dir']), '', file)
# only include structs declared in header files in the current
# directory, ignore internal-only # structs (these are represented
# in other measurements)
if not args.get('everything'):
if not file.endswith('.h'):
continue
# replace .o with .c, different scripts report .o/.c, we need to
# choose one if we want to deduplicate csv files
file = re.sub('\.o$', '.c', file)
flat_results.append((file, struct, size))
return flat_results
def main(**args):
def openio(path, mode='r'):
if path == '-':
if 'r' in mode:
return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
for r in results_:
# ignore filtered sources
if sources is not None:
if not any(
os.path.abspath(r.file) == os.path.abspath(s)
for s in sources):
continue
else:
return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
else:
return open(path, mode)
# default to only cwd
if not everything and not os.path.commonpath([
os.getcwd(),
os.path.abspath(r.file)]) == os.getcwd():
continue
# find sizes
if not args.get('use', None):
# find .o files
paths = []
for path in args['obj_paths']:
if os.path.isdir(path):
path = path + '/*.o'
# limit to .h files unless --internal
if not internal and not r.file.endswith('.h'):
continue
for path in glob.glob(path):
paths.append(path)
# simplify path
if os.path.commonpath([
os.getcwd(),
os.path.abspath(r.file)]) == os.getcwd():
file = os.path.relpath(r.file)
else:
file = os.path.abspath(r.file)
if not paths:
print('no .obj files found in %r?' % args['obj_paths'])
results.append(r._replace(file=file))
return results
def fold(Result, results, *,
by=None,
defines=None,
**_):
if by is None:
by = Result._by
for k in it.chain(by or [], (k for k, _ in defines or [])):
if k not in Result._by and k not in Result._fields:
print("error: could not find field %r?" % k)
sys.exit(-1)
results = collect(paths, **args)
# filter by matching defines
if defines is not None:
results_ = []
for r in results:
if all(getattr(r, k) in vs for k, vs in defines):
results_.append(r)
results = results_
# organize results into conflicts
folding = co.OrderedDict()
for r in results:
name = tuple(getattr(r, k) for k in by)
if name not in folding:
folding[name] = []
folding[name].append(r)
# merge conflicts
folded = []
for name, rs in folding.items():
folded.append(sum(rs[1:], start=rs[0]))
return folded
def table(Result, results, diff_results=None, *,
by=None,
fields=None,
sort=None,
summary=False,
all=False,
percent=False,
**_):
all_, all = all, __builtins__.all
if by is None:
by = Result._by
if fields is None:
fields = Result._fields
types = Result._types
# fold again
results = fold(Result, results, by=by)
if diff_results is not None:
diff_results = fold(Result, diff_results, by=by)
# organize by name
table = {
','.join(str(getattr(r, k) or '') for k in by): r
for r in results}
diff_table = {
','.join(str(getattr(r, k) or '') for k in by): r
for r in diff_results or []}
names = list(table.keys() | diff_table.keys())
# sort again, now with diff info, note that python's sort is stable
names.sort()
if diff_results is not None:
names.sort(key=lambda n: tuple(
types[k].ratio(
getattr(table.get(n), k, None),
getattr(diff_table.get(n), k, None))
for k in fields),
reverse=True)
if sort:
for k, reverse in reversed(sort):
names.sort(
key=lambda n: tuple(
(getattr(table[n], k),)
if getattr(table.get(n), k, None) is not None else ()
for k in ([k] if k else [
k for k in Result._sort if k in fields])),
reverse=reverse ^ (not k or k in Result._fields))
# build up our lines
lines = []
# header
header = []
header.append('%s%s' % (
','.join(by),
' (%d added, %d removed)' % (
sum(1 for n in table if n not in diff_table),
sum(1 for n in diff_table if n not in table))
if diff_results is not None and not percent else '')
if not summary else '')
if diff_results is None:
for k in fields:
header.append(k)
elif percent:
for k in fields:
header.append(k)
else:
for k in fields:
header.append('o'+k)
for k in fields:
header.append('n'+k)
for k in fields:
header.append('d'+k)
header.append('')
lines.append(header)
def table_entry(name, r, diff_r=None, ratios=[]):
entry = []
entry.append(name)
if diff_results is None:
for k in fields:
entry.append(getattr(r, k).table()
if getattr(r, k, None) is not None
else types[k].none)
elif percent:
for k in fields:
entry.append(getattr(r, k).diff_table()
if getattr(r, k, None) is not None
else types[k].diff_none)
else:
for k in fields:
entry.append(getattr(diff_r, k).diff_table()
if getattr(diff_r, k, None) is not None
else types[k].diff_none)
for k in fields:
entry.append(getattr(r, k).diff_table()
if getattr(r, k, None) is not None
else types[k].diff_none)
for k in fields:
entry.append(types[k].diff_diff(
getattr(r, k, None),
getattr(diff_r, k, None)))
if diff_results is None:
entry.append('')
elif percent:
entry.append(' (%s)' % ', '.join(
'+∞%' if t == +m.inf
else '-∞%' if t == -m.inf
else '%+.1f%%' % (100*t)
for t in ratios))
else:
entry.append(' (%s)' % ', '.join(
'+∞%' if t == +m.inf
else '-∞%' if t == -m.inf
else '%+.1f%%' % (100*t)
for t in ratios
if t)
if any(ratios) else '')
return entry
# entries
if not summary:
for name in names:
r = table.get(name)
if diff_results is None:
diff_r = None
ratios = None
else:
diff_r = diff_table.get(name)
ratios = [
types[k].ratio(
getattr(r, k, None),
getattr(diff_r, k, None))
for k in fields]
if not all_ and not any(ratios):
continue
lines.append(table_entry(name, r, diff_r, ratios))
# total
r = next(iter(fold(Result, results, by=[])), None)
if diff_results is None:
diff_r = None
ratios = None
else:
diff_r = next(iter(fold(Result, diff_results, by=[])), None)
ratios = [
types[k].ratio(
getattr(r, k, None),
getattr(diff_r, k, None))
for k in fields]
lines.append(table_entry('TOTAL', r, diff_r, ratios))
# find the best widths, note that column 0 contains the names and column -1
# the ratios, so those are handled a bit differently
widths = [
((max(it.chain([w], (len(l[i]) for l in lines)))+1+4-1)//4)*4-1
for w, i in zip(
it.chain([23], it.repeat(7)),
range(len(lines[0])-1))]
# print our table
for line in lines:
print('%-*s %s%s' % (
widths[0], line[0],
' '.join('%*s' % (w, x)
for w, x in zip(widths[1:], line[1:-1])),
line[-1]))
def main(obj_paths, *,
by=None,
fields=None,
defines=None,
sort=None,
**args):
# find sizes
if not args.get('use', None):
results = collect(obj_paths, **args)
else:
results = []
with openio(args['use']) as f:
r = csv.DictReader(f)
results = [
( result['file'],
result['name'],
int(result['struct_size']))
for result in r
if result.get('struct_size') not in {None, ''}]
reader = csv.DictReader(f, restval='')
for r in reader:
if not any('struct_'+k in r and r['struct_'+k].strip()
for k in StructResult._fields):
continue
try:
results.append(StructResult(
**{k: r[k] for k in StructResult._by
if k in r and r[k].strip()},
**{k: r['struct_'+k]
for k in StructResult._fields
if 'struct_'+k in r
and r['struct_'+k].strip()}))
except TypeError:
pass
total = 0
for _, _, size in results:
total += size
# fold
results = fold(StructResult, results, by=by, defines=defines)
# find previous results?
if args.get('diff'):
try:
with openio(args['diff']) as f:
r = csv.DictReader(f)
prev_results = [
( result['file'],
result['name'],
int(result['struct_size']))
for result in r
if result.get('struct_size') not in {None, ''}]
except FileNotFoundError:
prev_results = []
prev_total = 0
for _, _, size in prev_results:
prev_total += size
# sort, note that python's sort is stable
results.sort()
if sort:
for k, reverse in reversed(sort):
results.sort(
key=lambda r: tuple(
(getattr(r, k),) if getattr(r, k) is not None else ()
for k in ([k] if k else StructResult._sort)),
reverse=reverse ^ (not k or k in StructResult._fields))
# write results to CSV
if args.get('output'):
merged_results = co.defaultdict(lambda: {})
other_fields = []
# merge?
if args.get('merge'):
try:
with openio(args['merge']) as f:
r = csv.DictReader(f)
for result in r:
file = result.pop('file', '')
struct = result.pop('name', '')
result.pop('struct_size', None)
merged_results[(file, struct)] = result
other_fields = result.keys()
except FileNotFoundError:
pass
for file, struct, size in results:
merged_results[(file, struct)]['struct_size'] = size
with openio(args['output'], 'w') as f:
w = csv.DictWriter(f, ['file', 'name', *other_fields, 'struct_size'])
w.writeheader()
for (file, struct), result in sorted(merged_results.items()):
w.writerow({'file': file, 'name': struct, **result})
writer = csv.DictWriter(f,
(by if by is not None else StructResult._by)
+ ['struct_'+k for k in (
fields if fields is not None else StructResult._fields)])
writer.writeheader()
for r in results:
writer.writerow(
{k: getattr(r, k) for k in (
by if by is not None else StructResult._by)}
| {'struct_'+k: getattr(r, k) for k in (
fields if fields is not None else StructResult._fields)})
# print results
def dedup_entries(results, by='name'):
entries = co.defaultdict(lambda: 0)
for file, struct, size in results:
entry = (file if by == 'file' else struct)
entries[entry] += size
return entries
# find previous results?
if args.get('diff'):
diff_results = []
try:
with openio(args['diff']) as f:
reader = csv.DictReader(f, restval='')
for r in reader:
if not any('struct_'+k in r and r['struct_'+k].strip()
for k in StructResult._fields):
continue
try:
diff_results.append(StructResult(
**{k: r[k] for k in StructResult._by
if k in r and r[k].strip()},
**{k: r['struct_'+k]
for k in StructResult._fields
if 'struct_'+k in r
and r['struct_'+k].strip()}))
except TypeError:
pass
except FileNotFoundError:
pass
def diff_entries(olds, news):
diff = co.defaultdict(lambda: (0, 0, 0, 0))
for name, new in news.items():
diff[name] = (0, new, new, 1.0)
for name, old in olds.items():
_, new, _, _ = diff[name]
diff[name] = (old, new, new-old, (new-old)/old if old else 1.0)
return diff
# fold
diff_results = fold(StructResult, diff_results, by=by, defines=defines)
def sorted_entries(entries):
if args.get('size_sort'):
return sorted(entries, key=lambda x: (-x[1], x))
elif args.get('reverse_size_sort'):
return sorted(entries, key=lambda x: (+x[1], x))
else:
return sorted(entries)
# print table
if not args.get('quiet'):
table(StructResult, results,
diff_results if args.get('diff') else None,
by=by if by is not None else ['struct'],
fields=fields,
sort=sort,
**args)
def sorted_diff_entries(entries):
if args.get('size_sort'):
return sorted(entries, key=lambda x: (-x[1][1], x))
elif args.get('reverse_size_sort'):
return sorted(entries, key=lambda x: (+x[1][1], x))
else:
return sorted(entries, key=lambda x: (-x[1][3], x))
def print_header(by=''):
if not args.get('diff'):
print('%-36s %7s' % (by, 'size'))
else:
print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff'))
def print_entry(name, size):
print("%-36s %7d" % (name, size))
def print_diff_entry(name, old, new, diff, ratio):
print("%-36s %7s %7s %+7d%s" % (name,
old or "-",
new or "-",
diff,
' (%+.1f%%)' % (100*ratio) if ratio else ''))
def print_entries(by='name'):
entries = dedup_entries(results, by=by)
if not args.get('diff'):
print_header(by=by)
for name, size in sorted_entries(entries.items()):
print_entry(name, size)
else:
prev_entries = dedup_entries(prev_results, by=by)
diff = diff_entries(prev_entries, entries)
print_header(by='%s (%d added, %d removed)' % (by,
sum(1 for old, _, _, _ in diff.values() if not old),
sum(1 for _, new, _, _ in diff.values() if not new)))
for name, (old, new, diff, ratio) in sorted_diff_entries(
diff.items()):
if ratio or args.get('all'):
print_diff_entry(name, old, new, diff, ratio)
def print_totals():
if not args.get('diff'):
print_entry('TOTAL', total)
else:
ratio = (0.0 if not prev_total and not total
else 1.0 if not prev_total
else (total-prev_total)/prev_total)
print_diff_entry('TOTAL',
prev_total, total,
total-prev_total,
ratio)
if args.get('quiet'):
pass
elif args.get('summary'):
print_header()
print_totals()
elif args.get('files'):
print_entries(by='file')
print_totals()
else:
print_entries(by='name')
print_totals()
if __name__ == "__main__":
import argparse
import sys
parser = argparse.ArgumentParser(
description="Find struct sizes.")
parser.add_argument('obj_paths', nargs='*', default=OBJ_PATHS,
help="Description of where to find *.o files. May be a directory \
or a list of paths. Defaults to %r." % OBJ_PATHS)
parser.add_argument('-v', '--verbose', action='store_true',
description="Find struct sizes.",
allow_abbrev=False)
parser.add_argument(
'obj_paths',
nargs='*',
help="Input *.o files.")
parser.add_argument(
'-v', '--verbose',
action='store_true',
help="Output commands that run behind the scenes.")
parser.add_argument('-q', '--quiet', action='store_true',
parser.add_argument(
'-q', '--quiet',
action='store_true',
help="Don't show anything, useful with -o.")
parser.add_argument('-o', '--output',
parser.add_argument(
'-o', '--output',
help="Specify CSV file to store results.")
parser.add_argument('-u', '--use',
help="Don't compile and find struct sizes, instead use this CSV file.")
parser.add_argument('-d', '--diff',
help="Specify CSV file to diff struct size against.")
parser.add_argument('-m', '--merge',
help="Merge with an existing CSV file when writing to output.")
parser.add_argument('-a', '--all', action='store_true',
help="Show all functions, not just the ones that changed.")
parser.add_argument('-A', '--everything', action='store_true',
parser.add_argument(
'-u', '--use',
help="Don't parse anything, use this CSV file.")
parser.add_argument(
'-d', '--diff',
help="Specify CSV file to diff against.")
parser.add_argument(
'-a', '--all',
action='store_true',
help="Show all, not just the ones that changed.")
parser.add_argument(
'-p', '--percent',
action='store_true',
help="Only show percentage change, not a full diff.")
parser.add_argument(
'-b', '--by',
action='append',
choices=StructResult._by,
help="Group by this field.")
parser.add_argument(
'-f', '--field',
dest='fields',
action='append',
choices=StructResult._fields,
help="Show this field.")
parser.add_argument(
'-D', '--define',
dest='defines',
action='append',
type=lambda x: (lambda k,v: (k, set(v.split(','))))(*x.split('=', 1)),
help="Only include results where this field is this value.")
class AppendSort(argparse.Action):
def __call__(self, parser, namespace, value, option):
if namespace.sort is None:
namespace.sort = []
namespace.sort.append((value, True if option == '-S' else False))
parser.add_argument(
'-s', '--sort',
nargs='?',
action=AppendSort,
help="Sort by this field.")
parser.add_argument(
'-S', '--reverse-sort',
nargs='?',
action=AppendSort,
help="Sort by this field, but backwards.")
parser.add_argument(
'-Y', '--summary',
action='store_true',
help="Only show the total.")
parser.add_argument(
'-F', '--source',
dest='sources',
action='append',
help="Only consider definitions in this file. Defaults to anything "
"in the current directory.")
parser.add_argument(
'--everything',
action='store_true',
help="Include builtin and libc specific symbols.")
parser.add_argument('-s', '--size-sort', action='store_true',
help="Sort by size.")
parser.add_argument('-S', '--reverse-size-sort', action='store_true',
help="Sort by size, but backwards.")
parser.add_argument('-F', '--files', action='store_true',
help="Show file-level struct sizes.")
parser.add_argument('-Y', '--summary', action='store_true',
help="Only show the total struct size.")
parser.add_argument('--objdump-tool', default=['objdump'], type=lambda x: x.split(),
help="Path to the objdump tool to use.")
parser.add_argument('--build-dir',
help="Specify the relative build directory. Used to map object files \
to the correct source files.")
sys.exit(main(**vars(parser.parse_args())))
parser.add_argument(
'--internal',
action='store_true',
help="Also show structs in .c files.")
parser.add_argument(
'--objdump-path',
type=lambda x: x.split(),
default=OBJDUMP_PATH,
help="Path to the objdump executable, may include flags. "
"Defaults to %r." % OBJDUMP_PATH)
sys.exit(main(**{k: v
for k, v in vars(parser.parse_intermixed_args()).items()
if v is not None}))

File diff suppressed because it is too large Load Diff

177
scripts/tailpipe.py Normal file
View File

@ -0,0 +1,177 @@
#!/usr/bin/env python3
#
# Efficiently displays the last n lines of a file/pipe.
#
# Example:
# ./scripts/tailpipe.py trace -n5
#
# Copyright (c) 2022, The littlefs authors.
# SPDX-License-Identifier: BSD-3-Clause
#
import collections as co
import io
import os
import select
import shutil
import sys
import threading as th
import time
def openio(path, mode='r', buffering=-1):
# allow '-' for stdin/stdout
if path == '-':
if mode == 'r':
return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
else:
return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
else:
return open(path, mode, buffering)
class LinesIO:
def __init__(self, maxlen=None):
self.maxlen = maxlen
self.lines = co.deque(maxlen=maxlen)
self.tail = io.StringIO()
# trigger automatic sizing
if maxlen == 0:
self.resize(0)
def write(self, s):
# note using split here ensures the trailing string has no newline
lines = s.split('\n')
if len(lines) > 1 and self.tail.getvalue():
self.tail.write(lines[0])
lines[0] = self.tail.getvalue()
self.tail = io.StringIO()
self.lines.extend(lines[:-1])
if lines[-1]:
self.tail.write(lines[-1])
def resize(self, maxlen):
self.maxlen = maxlen
if maxlen == 0:
maxlen = shutil.get_terminal_size((80, 5))[1]
if maxlen != self.lines.maxlen:
self.lines = co.deque(self.lines, maxlen=maxlen)
canvas_lines = 1
def draw(self):
# did terminal size change?
if self.maxlen == 0:
self.resize(0)
# first thing first, give ourself a canvas
while LinesIO.canvas_lines < len(self.lines):
sys.stdout.write('\n')
LinesIO.canvas_lines += 1
# clear the bottom of the canvas if we shrink
shrink = LinesIO.canvas_lines - len(self.lines)
if shrink > 0:
for i in range(shrink):
sys.stdout.write('\r')
if shrink-1-i > 0:
sys.stdout.write('\x1b[%dA' % (shrink-1-i))
sys.stdout.write('\x1b[K')
if shrink-1-i > 0:
sys.stdout.write('\x1b[%dB' % (shrink-1-i))
sys.stdout.write('\x1b[%dA' % shrink)
LinesIO.canvas_lines = len(self.lines)
for i, line in enumerate(self.lines):
# move cursor, clear line, disable/reenable line wrapping
sys.stdout.write('\r')
if len(self.lines)-1-i > 0:
sys.stdout.write('\x1b[%dA' % (len(self.lines)-1-i))
sys.stdout.write('\x1b[K')
sys.stdout.write('\x1b[?7l')
sys.stdout.write(line)
sys.stdout.write('\x1b[?7h')
if len(self.lines)-1-i > 0:
sys.stdout.write('\x1b[%dB' % (len(self.lines)-1-i))
sys.stdout.flush()
def main(path='-', *, lines=5, cat=False, sleep=None, keep_open=False):
if cat:
ring = sys.stdout
else:
ring = LinesIO(lines)
# if sleep print in background thread to avoid getting stuck in a read call
event = th.Event()
lock = th.Lock()
if not cat:
done = False
def background():
while not done:
event.wait()
event.clear()
with lock:
ring.draw()
time.sleep(sleep or 0.01)
th.Thread(target=background, daemon=True).start()
try:
while True:
with openio(path) as f:
for line in f:
with lock:
ring.write(line)
event.set()
if not keep_open:
break
# don't just flood open calls
time.sleep(sleep or 0.1)
except FileNotFoundError as e:
print("error: file not found %r" % path)
sys.exit(-1)
except KeyboardInterrupt:
pass
if not cat:
done = True
lock.acquire() # avoids https://bugs.python.org/issue42717
sys.stdout.write('\n')
if __name__ == "__main__":
import sys
import argparse
parser = argparse.ArgumentParser(
description="Efficiently displays the last n lines of a file/pipe.",
allow_abbrev=False)
parser.add_argument(
'path',
nargs='?',
help="Path to read from.")
parser.add_argument(
'-n', '--lines',
nargs='?',
type=lambda x: int(x, 0),
const=0,
help="Show this many lines of history. 0 uses the terminal height. "
"Defaults to 5.")
parser.add_argument(
'-z', '--cat',
action='store_true',
help="Pipe directly to stdout.")
parser.add_argument(
'-s', '--sleep',
type=float,
help="Seconds to sleep between reads. Defaults to 0.01.")
parser.add_argument(
'-k', '--keep-open',
action='store_true',
help="Reopen the pipe on EOF, useful when multiple "
"processes are writing.")
sys.exit(main(**{k: v
for k, v in vars(parser.parse_intermixed_args()).items()
if v is not None}))

73
scripts/teepipe.py Normal file
View File

@ -0,0 +1,73 @@
#!/usr/bin/env python3
#
# tee, but for pipes
#
# Example:
# ./scripts/tee.py in_pipe out_pipe1 out_pipe2
#
# Copyright (c) 2022, The littlefs authors.
# SPDX-License-Identifier: BSD-3-Clause
#
import os
import io
import time
import sys
def openio(path, mode='r', buffering=-1):
# allow '-' for stdin/stdout
if path == '-':
if mode == 'r':
return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
else:
return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
else:
return open(path, mode, buffering)
def main(in_path, out_paths, *, keep_open=False):
out_pipes = [openio(p, 'wb', 0) for p in out_paths]
try:
with openio(in_path, 'rb', 0) as f:
while True:
buf = f.read(io.DEFAULT_BUFFER_SIZE)
if not buf:
if not keep_open:
break
# don't just flood reads
time.sleep(0.1)
continue
for p in out_pipes:
try:
p.write(buf)
except BrokenPipeError:
pass
except FileNotFoundError as e:
print("error: file not found %r" % in_path)
sys.exit(-1)
except KeyboardInterrupt:
pass
if __name__ == "__main__":
import sys
import argparse
parser = argparse.ArgumentParser(
description="tee, but for pipes.",
allow_abbrev=False)
parser.add_argument(
'in_path',
help="Path to read from.")
parser.add_argument(
'out_paths',
nargs='+',
help="Path to write to.")
parser.add_argument(
'-k', '--keep-open',
action='store_true',
help="Reopen the pipe on EOF, useful when multiple "
"processes are writing.")
sys.exit(main(**{k: v
for k, v in vars(parser.parse_intermixed_args()).items()
if v is not None}))

File diff suppressed because it is too large Load Diff

1002
scripts/tracebd.py Normal file

File diff suppressed because it is too large Load Diff

265
scripts/watch.py Normal file
View File

@ -0,0 +1,265 @@
#!/usr/bin/env python3
#
# Traditional watch command, but with higher resolution updates and a bit
# different options/output format
#
# Example:
# ./scripts/watch.py -s0.1 date
#
# Copyright (c) 2022, The littlefs authors.
# SPDX-License-Identifier: BSD-3-Clause
#
import collections as co
import errno
import fcntl
import io
import os
import pty
import re
import shutil
import struct
import subprocess as sp
import sys
import termios
import time
try:
import inotify_simple
except ModuleNotFoundError:
inotify_simple = None
def openio(path, mode='r', buffering=-1):
# allow '-' for stdin/stdout
if path == '-':
if mode == 'r':
return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
else:
return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
else:
return open(path, mode, buffering)
def inotifywait(paths):
# wait for interesting events
inotify = inotify_simple.INotify()
flags = (inotify_simple.flags.ATTRIB
| inotify_simple.flags.CREATE
| inotify_simple.flags.DELETE
| inotify_simple.flags.DELETE_SELF
| inotify_simple.flags.MODIFY
| inotify_simple.flags.MOVED_FROM
| inotify_simple.flags.MOVED_TO
| inotify_simple.flags.MOVE_SELF)
# recurse into directories
for path in paths:
if os.path.isdir(path):
for dir, _, files in os.walk(path):
inotify.add_watch(dir, flags)
for f in files:
inotify.add_watch(os.path.join(dir, f), flags)
else:
inotify.add_watch(path, flags)
# wait for event
inotify.read()
class LinesIO:
def __init__(self, maxlen=None):
self.maxlen = maxlen
self.lines = co.deque(maxlen=maxlen)
self.tail = io.StringIO()
# trigger automatic sizing
if maxlen == 0:
self.resize(0)
def write(self, s):
# note using split here ensures the trailing string has no newline
lines = s.split('\n')
if len(lines) > 1 and self.tail.getvalue():
self.tail.write(lines[0])
lines[0] = self.tail.getvalue()
self.tail = io.StringIO()
self.lines.extend(lines[:-1])
if lines[-1]:
self.tail.write(lines[-1])
def resize(self, maxlen):
self.maxlen = maxlen
if maxlen == 0:
maxlen = shutil.get_terminal_size((80, 5))[1]
if maxlen != self.lines.maxlen:
self.lines = co.deque(self.lines, maxlen=maxlen)
canvas_lines = 1
def draw(self):
# did terminal size change?
if self.maxlen == 0:
self.resize(0)
# first thing first, give ourself a canvas
while LinesIO.canvas_lines < len(self.lines):
sys.stdout.write('\n')
LinesIO.canvas_lines += 1
# clear the bottom of the canvas if we shrink
shrink = LinesIO.canvas_lines - len(self.lines)
if shrink > 0:
for i in range(shrink):
sys.stdout.write('\r')
if shrink-1-i > 0:
sys.stdout.write('\x1b[%dA' % (shrink-1-i))
sys.stdout.write('\x1b[K')
if shrink-1-i > 0:
sys.stdout.write('\x1b[%dB' % (shrink-1-i))
sys.stdout.write('\x1b[%dA' % shrink)
LinesIO.canvas_lines = len(self.lines)
for i, line in enumerate(self.lines):
# move cursor, clear line, disable/reenable line wrapping
sys.stdout.write('\r')
if len(self.lines)-1-i > 0:
sys.stdout.write('\x1b[%dA' % (len(self.lines)-1-i))
sys.stdout.write('\x1b[K')
sys.stdout.write('\x1b[?7l')
sys.stdout.write(line)
sys.stdout.write('\x1b[?7h')
if len(self.lines)-1-i > 0:
sys.stdout.write('\x1b[%dB' % (len(self.lines)-1-i))
sys.stdout.flush()
def main(command, *,
lines=0,
cat=False,
sleep=None,
keep_open=False,
keep_open_paths=None,
exit_on_error=False):
returncode = 0
try:
while True:
# reset ring each run
if cat:
ring = sys.stdout
else:
ring = LinesIO(lines)
try:
# run the command under a pseudoterminal
mpty, spty = pty.openpty()
# forward terminal size
w, h = shutil.get_terminal_size((80, 5))
if lines:
h = lines
fcntl.ioctl(spty, termios.TIOCSWINSZ,
struct.pack('HHHH', h, w, 0, 0))
proc = sp.Popen(command,
stdout=spty,
stderr=spty,
close_fds=False)
os.close(spty)
mpty = os.fdopen(mpty, 'r', 1)
while True:
try:
line = mpty.readline()
except OSError as e:
if e.errno != errno.EIO:
raise
break
if not line:
break
ring.write(line)
if not cat:
ring.draw()
mpty.close()
proc.wait()
if exit_on_error and proc.returncode != 0:
returncode = proc.returncode
break
except OSError as e:
if e.errno != errno.ETXTBSY:
raise
pass
# try to inotifywait
if keep_open and inotify_simple is not None:
if keep_open_paths:
paths = set(keep_paths)
else:
# guess inotify paths from command
paths = set()
for p in command:
for p in {
p,
re.sub('^-.', '', p),
re.sub('^--[^=]+=', '', p)}:
if p and os.path.exists(p):
paths.add(p)
ptime = time.time()
inotifywait(paths)
# sleep for a minimum amount of time, this helps issues around
# rapidly updating files
time.sleep(max(0, (sleep or 0.1) - (time.time()-ptime)))
else:
time.sleep(sleep or 0.1)
except KeyboardInterrupt:
pass
if not cat:
sys.stdout.write('\n')
sys.exit(returncode)
if __name__ == "__main__":
import sys
import argparse
parser = argparse.ArgumentParser(
description="Traditional watch command, but with higher resolution "
"updates and a bit different options/output format.",
allow_abbrev=False)
parser.add_argument(
'command',
nargs=argparse.REMAINDER,
help="Command to run.")
parser.add_argument(
'-n', '--lines',
nargs='?',
type=lambda x: int(x, 0),
const=0,
help="Show this many lines of history. 0 uses the terminal height. "
"Defaults to 0.")
parser.add_argument(
'-z', '--cat',
action='store_true',
help="Pipe directly to stdout.")
parser.add_argument(
'-s', '--sleep',
type=float,
help="Seconds to sleep between runs. Defaults to 0.1.")
parser.add_argument(
'-k', '--keep-open',
action='store_true',
help="Try to use inotify to wait for changes.")
parser.add_argument(
'-K', '--keep-open-path',
dest='keep_open_paths',
action='append',
help="Use this path for inotify. Defaults to guessing.")
parser.add_argument(
'-e', '--exit-on-error',
action='store_true',
help="Exit on error.")
sys.exit(main(**{k: v
for k, v in vars(parser.parse_args()).items()
if v is not None}))

View File

@ -1,27 +1,34 @@
# allocator tests
# note for these to work there are a number constraints on the device geometry
if = 'LFS_BLOCK_CYCLES == -1'
if = 'BLOCK_CYCLES == -1'
[[case]] # parallel allocation test
define.FILES = 3
define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-6)) / FILES)'
# parallel allocation test
[cases.test_alloc_parallel]
defines.FILES = 3
defines.SIZE = '(((BLOCK_SIZE-8)*(BLOCK_COUNT-6)) / FILES)'
defines.GC = [false, true]
code = '''
const char *names[FILES] = {"bacon", "eggs", "pancakes"};
const char *names[] = {"bacon", "eggs", "pancakes"};
lfs_file_t files[FILES];
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "breakfast") => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
for (int n = 0; n < FILES; n++) {
char path[1024];
sprintf(path, "breakfast/%s", names[n]);
lfs_file_open(&lfs, &files[n], path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
}
for (int n = 0; n < FILES; n++) {
size = strlen(names[n]);
if (GC) {
lfs_fs_gc(&lfs) => 0;
}
size_t size = strlen(names[n]);
for (lfs_size_t i = 0; i < SIZE; i += size) {
lfs_file_write(&lfs, &files[n], names[n], size) => size;
}
@ -31,12 +38,15 @@ code = '''
}
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
for (int n = 0; n < FILES; n++) {
char path[1024];
sprintf(path, "breakfast/%s", names[n]);
lfs_file_t file;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
size = strlen(names[n]);
size_t size = strlen(names[n]);
for (lfs_size_t i = 0; i < SIZE; i += size) {
uint8_t buffer[1024];
lfs_file_read(&lfs, &file, buffer, size) => size;
assert(memcmp(buffer, names[n], size) == 0);
}
@ -45,37 +55,49 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # serial allocation test
define.FILES = 3
define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-6)) / FILES)'
# serial allocation test
[cases.test_alloc_serial]
defines.FILES = 3
defines.SIZE = '(((BLOCK_SIZE-8)*(BLOCK_COUNT-6)) / FILES)'
defines.GC = [false, true]
code = '''
const char *names[FILES] = {"bacon", "eggs", "pancakes"};
const char *names[] = {"bacon", "eggs", "pancakes"};
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "breakfast") => 0;
lfs_unmount(&lfs) => 0;
for (int n = 0; n < FILES; n++) {
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
char path[1024];
sprintf(path, "breakfast/%s", names[n]);
lfs_file_t file;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
size = strlen(names[n]);
size_t size = strlen(names[n]);
uint8_t buffer[1024];
memcpy(buffer, names[n], size);
for (int i = 0; i < SIZE; i += size) {
if (GC) {
lfs_fs_gc(&lfs) => 0;
}
lfs_file_write(&lfs, &file, buffer, size) => size;
}
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
}
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
for (int n = 0; n < FILES; n++) {
char path[1024];
sprintf(path, "breakfast/%s", names[n]);
lfs_file_t file;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
size = strlen(names[n]);
size_t size = strlen(names[n]);
for (int i = 0; i < SIZE; i += size) {
uint8_t buffer[1024];
lfs_file_read(&lfs, &file, buffer, size) => size;
assert(memcmp(buffer, names[n], size) == 0);
}
@ -84,29 +106,32 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # parallel allocation reuse test
define.FILES = 3
define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-6)) / FILES)'
define.CYCLES = [1, 10]
# parallel allocation reuse test
[cases.test_alloc_parallel_reuse]
defines.FILES = 3
defines.SIZE = '(((BLOCK_SIZE-8)*(BLOCK_COUNT-6)) / FILES)'
defines.CYCLES = [1, 10]
code = '''
const char *names[FILES] = {"bacon", "eggs", "pancakes"};
const char *names[] = {"bacon", "eggs", "pancakes"};
lfs_file_t files[FILES];
lfs_format(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
for (int c = 0; c < CYCLES; c++) {
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "breakfast") => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
for (int n = 0; n < FILES; n++) {
char path[1024];
sprintf(path, "breakfast/%s", names[n]);
lfs_file_open(&lfs, &files[n], path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
}
for (int n = 0; n < FILES; n++) {
size = strlen(names[n]);
size_t size = strlen(names[n]);
for (int i = 0; i < SIZE; i += size) {
lfs_file_write(&lfs, &files[n], names[n], size) => size;
}
@ -116,12 +141,15 @@ code = '''
}
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
for (int n = 0; n < FILES; n++) {
char path[1024];
sprintf(path, "breakfast/%s", names[n]);
lfs_file_t file;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
size = strlen(names[n]);
size_t size = strlen(names[n]);
for (int i = 0; i < SIZE; i += size) {
uint8_t buffer[1024];
lfs_file_read(&lfs, &file, buffer, size) => size;
assert(memcmp(buffer, names[n], size) == 0);
}
@ -129,8 +157,9 @@ code = '''
}
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
for (int n = 0; n < FILES; n++) {
char path[1024];
sprintf(path, "breakfast/%s", names[n]);
lfs_remove(&lfs, path) => 0;
}
@ -139,26 +168,31 @@ code = '''
}
'''
[[case]] # serial allocation reuse test
define.FILES = 3
define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-6)) / FILES)'
define.CYCLES = [1, 10]
# serial allocation reuse test
[cases.test_alloc_serial_reuse]
defines.FILES = 3
defines.SIZE = '(((BLOCK_SIZE-8)*(BLOCK_COUNT-6)) / FILES)'
defines.CYCLES = [1, 10]
code = '''
const char *names[FILES] = {"bacon", "eggs", "pancakes"};
const char *names[] = {"bacon", "eggs", "pancakes"};
lfs_format(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
for (int c = 0; c < CYCLES; c++) {
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "breakfast") => 0;
lfs_unmount(&lfs) => 0;
for (int n = 0; n < FILES; n++) {
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
char path[1024];
sprintf(path, "breakfast/%s", names[n]);
lfs_file_t file;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
size = strlen(names[n]);
size_t size = strlen(names[n]);
uint8_t buffer[1024];
memcpy(buffer, names[n], size);
for (int i = 0; i < SIZE; i += size) {
lfs_file_write(&lfs, &file, buffer, size) => size;
@ -167,12 +201,15 @@ code = '''
lfs_unmount(&lfs) => 0;
}
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
for (int n = 0; n < FILES; n++) {
char path[1024];
sprintf(path, "breakfast/%s", names[n]);
lfs_file_t file;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
size = strlen(names[n]);
size_t size = strlen(names[n]);
for (int i = 0; i < SIZE; i += size) {
uint8_t buffer[1024];
lfs_file_read(&lfs, &file, buffer, size) => size;
assert(memcmp(buffer, names[n], size) == 0);
}
@ -180,8 +217,9 @@ code = '''
}
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
for (int n = 0; n < FILES; n++) {
char path[1024];
sprintf(path, "breakfast/%s", names[n]);
lfs_remove(&lfs, path) => 0;
}
@ -190,12 +228,16 @@ code = '''
}
'''
[[case]] # exhaustion test
# exhaustion test
[cases.test_alloc_exhaustion]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_t file;
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
size = strlen("exhaustion");
size_t size = strlen("exhaustion");
uint8_t buffer[1024];
memcpy(buffer, "exhaustion", size);
lfs_file_write(&lfs, &file, buffer, size) => size;
lfs_file_sync(&lfs, &file) => 0;
@ -213,10 +255,13 @@ code = '''
}
res => LFS_ERR_NOSPC;
// note that lfs_fs_gc should not error here
lfs_fs_gc(&lfs) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_RDONLY);
size = strlen("exhaustion");
lfs_file_size(&lfs, &file) => size;
@ -226,14 +271,18 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # exhaustion wraparound test
define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-4)) / 3)'
# exhaustion wraparound test
[cases.test_alloc_exhaustion_wraparound]
defines.SIZE = '(((BLOCK_SIZE-8)*(BLOCK_COUNT-4)) / 3)'
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_t file;
lfs_file_open(&lfs, &file, "padding", LFS_O_WRONLY | LFS_O_CREAT);
size = strlen("buffering");
size_t size = strlen("buffering");
uint8_t buffer[1024];
memcpy(buffer, "buffering", size);
for (int i = 0; i < SIZE; i += size) {
lfs_file_write(&lfs, &file, buffer, size) => size;
@ -260,10 +309,13 @@ code = '''
}
res => LFS_ERR_NOSPC;
// note that lfs_fs_gc should not error here
lfs_fs_gc(&lfs) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_RDONLY);
size = strlen("exhaustion");
lfs_file_size(&lfs, &file) => size;
@ -274,17 +326,22 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # dir exhaustion test
# dir exhaustion test
[cases.test_alloc_dir_exhaustion]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
// find out max file size
lfs_mkdir(&lfs, "exhaustiondir") => 0;
size = strlen("blahblahblahblah");
size_t size = strlen("blahblahblahblah");
uint8_t buffer[1024];
memcpy(buffer, "blahblahblahblah", size);
lfs_file_t file;
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
int count = 0;
int err;
while (true) {
err = lfs_file_write(&lfs, &file, buffer, size);
if (err < 0) {
@ -294,6 +351,8 @@ code = '''
count += 1;
}
err => LFS_ERR_NOSPC;
// note that lfs_fs_gc should not error here
lfs_fs_gc(&lfs) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_remove(&lfs, "exhaustion") => 0;
@ -323,17 +382,21 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # what if we have a bad block during an allocation scan?
# what if we have a bad block during an allocation scan?
[cases.test_alloc_bad_blocks]
in = "lfs.c"
define.LFS_ERASE_CYCLES = 0xffffffff
define.LFS_BADBLOCK_BEHAVIOR = 'LFS_TESTBD_BADBLOCK_READERROR'
defines.ERASE_CYCLES = 0xffffffff
defines.BADBLOCK_BEHAVIOR = 'LFS_EMUBD_BADBLOCK_READERROR'
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
// first fill to exhaustion to find available space
lfs_file_t file;
lfs_file_open(&lfs, &file, "pacman", LFS_O_WRONLY | LFS_O_CREAT) => 0;
uint8_t buffer[1024];
strcpy((char*)buffer, "waka");
size = strlen("waka");
size_t size = strlen("waka");
lfs_size_t filesize = 0;
while (true) {
lfs_ssize_t res = lfs_file_write(&lfs, &file, buffer, size);
@ -345,7 +408,7 @@ code = '''
}
lfs_file_close(&lfs, &file) => 0;
// now fill all but a couple of blocks of the filesystem with data
filesize -= 3*LFS_BLOCK_SIZE;
filesize -= 3*BLOCK_SIZE;
lfs_file_open(&lfs, &file, "pacman", LFS_O_WRONLY | LFS_O_CREAT) => 0;
strcpy((char*)buffer, "waka");
size = strlen("waka");
@ -358,11 +421,11 @@ code = '''
lfs_unmount(&lfs) => 0;
// remount to force an alloc scan
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
// but mark the head of our file as a "bad block", this is force our
// scan to bail early
lfs_testbd_setwear(&cfg, fileblock, 0xffffffff) => 0;
lfs_emubd_setwear(cfg, fileblock, 0xffffffff) => 0;
lfs_file_open(&lfs, &file, "ghost", LFS_O_WRONLY | LFS_O_CREAT) => 0;
strcpy((char*)buffer, "chomp");
size = strlen("chomp");
@ -377,7 +440,7 @@ code = '''
// now reverse the "bad block" and try to write the file again until we
// run out of space
lfs_testbd_setwear(&cfg, fileblock, 0) => 0;
lfs_emubd_setwear(cfg, fileblock, 0) => 0;
lfs_file_open(&lfs, &file, "ghost", LFS_O_WRONLY | LFS_O_CREAT) => 0;
strcpy((char*)buffer, "chomp");
size = strlen("chomp");
@ -388,12 +451,14 @@ code = '''
break;
}
}
// note that lfs_fs_gc should not error here
lfs_fs_gc(&lfs) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
// check that the disk isn't hurt
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "pacman", LFS_O_RDONLY) => 0;
strcpy((char*)buffer, "waka");
size = strlen("waka");
@ -411,24 +476,29 @@ code = '''
# on the geometry of the block device. But they are valuable. Eventually they
# should be removed and replaced with generalized tests.
[[case]] # chained dir exhaustion test
define.LFS_BLOCK_SIZE = 512
define.LFS_BLOCK_COUNT = 1024
if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024'
# chained dir exhaustion test
[cases.test_alloc_chained_dir_exhaustion]
if = 'ERASE_SIZE == 512'
defines.ERASE_COUNT = 1024
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
// find out max file size
lfs_mkdir(&lfs, "exhaustiondir") => 0;
for (int i = 0; i < 10; i++) {
char path[1024];
sprintf(path, "dirwithanexhaustivelylongnameforpadding%d", i);
lfs_mkdir(&lfs, path) => 0;
}
size = strlen("blahblahblahblah");
size_t size = strlen("blahblahblahblah");
uint8_t buffer[1024];
memcpy(buffer, "blahblahblahblah", size);
lfs_file_t file;
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
int count = 0;
int err;
while (true) {
err = lfs_file_write(&lfs, &file, buffer, size);
if (err < 0) {
@ -443,6 +513,7 @@ code = '''
lfs_remove(&lfs, "exhaustion") => 0;
lfs_remove(&lfs, "exhaustiondir") => 0;
for (int i = 0; i < 10; i++) {
char path[1024];
sprintf(path, "dirwithanexhaustivelylongnameforpadding%d", i);
lfs_remove(&lfs, path) => 0;
}
@ -455,6 +526,7 @@ code = '''
lfs_file_sync(&lfs, &file) => 0;
for (int i = 0; i < 10; i++) {
char path[1024];
sprintf(path, "dirwithanexhaustivelylongnameforpadding%d", i);
lfs_mkdir(&lfs, path) => 0;
}
@ -482,27 +554,31 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # split dir test
define.LFS_BLOCK_SIZE = 512
define.LFS_BLOCK_COUNT = 1024
if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024'
# split dir test
[cases.test_alloc_split_dir]
if = 'ERASE_SIZE == 512'
defines.ERASE_COUNT = 1024
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
// create one block hole for half a directory
lfs_file_t file;
lfs_file_open(&lfs, &file, "bump", LFS_O_WRONLY | LFS_O_CREAT) => 0;
for (lfs_size_t i = 0; i < cfg.block_size; i += 2) {
for (lfs_size_t i = 0; i < cfg->block_size; i += 2) {
uint8_t buffer[1024];
memcpy(&buffer[i], "hi", 2);
}
lfs_file_write(&lfs, &file, buffer, cfg.block_size) => cfg.block_size;
uint8_t buffer[1024];
lfs_file_write(&lfs, &file, buffer, cfg->block_size) => cfg->block_size;
lfs_file_close(&lfs, &file) => 0;
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
size = strlen("blahblahblahblah");
size_t size = strlen("blahblahblahblah");
memcpy(buffer, "blahblahblahblah", size);
for (lfs_size_t i = 0;
i < (cfg.block_count-4)*(cfg.block_size-8);
i < (cfg->block_count-4)*(cfg->block_size-8);
i += size) {
lfs_file_write(&lfs, &file, buffer, size) => size;
}
@ -510,7 +586,7 @@ code = '''
// remount to force reset of lookahead
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
// open hole
lfs_remove(&lfs, "bump") => 0;
@ -518,30 +594,33 @@ code = '''
lfs_mkdir(&lfs, "splitdir") => 0;
lfs_file_open(&lfs, &file, "splitdir/bump",
LFS_O_WRONLY | LFS_O_CREAT) => 0;
for (lfs_size_t i = 0; i < cfg.block_size; i += 2) {
for (lfs_size_t i = 0; i < cfg->block_size; i += 2) {
memcpy(&buffer[i], "hi", 2);
}
lfs_file_write(&lfs, &file, buffer, 2*cfg.block_size) => LFS_ERR_NOSPC;
lfs_file_write(&lfs, &file, buffer, 2*cfg->block_size) => LFS_ERR_NOSPC;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
'''
[[case]] # outdated lookahead test
define.LFS_BLOCK_SIZE = 512
define.LFS_BLOCK_COUNT = 1024
if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024'
# outdated lookahead test
[cases.test_alloc_outdated_lookahead]
if = 'ERASE_SIZE == 512'
defines.ERASE_COUNT = 1024
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
// fill completely with two files
lfs_file_t file;
lfs_file_open(&lfs, &file, "exhaustion1",
LFS_O_WRONLY | LFS_O_CREAT) => 0;
size = strlen("blahblahblahblah");
size_t size = strlen("blahblahblahblah");
uint8_t buffer[1024];
memcpy(buffer, "blahblahblahblah", size);
for (lfs_size_t i = 0;
i < ((cfg.block_count-2)/2)*(cfg.block_size-8);
i < ((cfg->block_count-2)/2)*(cfg->block_size-8);
i += size) {
lfs_file_write(&lfs, &file, buffer, size) => size;
}
@ -552,7 +631,7 @@ code = '''
size = strlen("blahblahblahblah");
memcpy(buffer, "blahblahblahblah", size);
for (lfs_size_t i = 0;
i < ((cfg.block_count-2+1)/2)*(cfg.block_size-8);
i < ((cfg->block_count-2+1)/2)*(cfg->block_size-8);
i += size) {
lfs_file_write(&lfs, &file, buffer, size) => size;
}
@ -560,7 +639,7 @@ code = '''
// remount to force reset of lookahead
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
// rewrite one file
lfs_file_open(&lfs, &file, "exhaustion1",
@ -569,7 +648,7 @@ code = '''
size = strlen("blahblahblahblah");
memcpy(buffer, "blahblahblahblah", size);
for (lfs_size_t i = 0;
i < ((cfg.block_count-2)/2)*(cfg.block_size-8);
i < ((cfg->block_count-2)/2)*(cfg->block_size-8);
i += size) {
lfs_file_write(&lfs, &file, buffer, size) => size;
}
@ -583,7 +662,7 @@ code = '''
size = strlen("blahblahblahblah");
memcpy(buffer, "blahblahblahblah", size);
for (lfs_size_t i = 0;
i < ((cfg.block_count-2+1)/2)*(cfg.block_size-8);
i < ((cfg->block_count-2+1)/2)*(cfg->block_size-8);
i += size) {
lfs_file_write(&lfs, &file, buffer, size) => size;
}
@ -592,21 +671,24 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # outdated lookahead and split dir test
define.LFS_BLOCK_SIZE = 512
define.LFS_BLOCK_COUNT = 1024
if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024'
# outdated lookahead and split dir test
[cases.test_alloc_outdated_lookahead_split_dir]
if = 'ERASE_SIZE == 512'
defines.ERASE_COUNT = 1024
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
// fill completely with two files
lfs_file_t file;
lfs_file_open(&lfs, &file, "exhaustion1",
LFS_O_WRONLY | LFS_O_CREAT) => 0;
size = strlen("blahblahblahblah");
size_t size = strlen("blahblahblahblah");
uint8_t buffer[1024];
memcpy(buffer, "blahblahblahblah", size);
for (lfs_size_t i = 0;
i < ((cfg.block_count-2)/2)*(cfg.block_size-8);
i < ((cfg->block_count-2)/2)*(cfg->block_size-8);
i += size) {
lfs_file_write(&lfs, &file, buffer, size) => size;
}
@ -617,7 +699,7 @@ code = '''
size = strlen("blahblahblahblah");
memcpy(buffer, "blahblahblahblah", size);
for (lfs_size_t i = 0;
i < ((cfg.block_count-2+1)/2)*(cfg.block_size-8);
i < ((cfg->block_count-2+1)/2)*(cfg->block_size-8);
i += size) {
lfs_file_write(&lfs, &file, buffer, size) => size;
}
@ -625,7 +707,7 @@ code = '''
// remount to force reset of lookahead
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
// rewrite one file with a hole of one block
lfs_file_open(&lfs, &file, "exhaustion1",
@ -634,7 +716,7 @@ code = '''
size = strlen("blahblahblahblah");
memcpy(buffer, "blahblahblahblah", size);
for (lfs_size_t i = 0;
i < ((cfg.block_count-2)/2 - 1)*(cfg.block_size-8);
i < ((cfg->block_count-2)/2 - 1)*(cfg->block_size-8);
i += size) {
lfs_file_write(&lfs, &file, buffer, size) => size;
}

View File

@ -1,14 +1,17 @@
[[case]] # set/get attribute
[cases.test_attrs_get_set]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "hello") => 0;
lfs_file_t file;
lfs_file_open(&lfs, &file, "hello/hello", LFS_O_WRONLY | LFS_O_CREAT) => 0;
lfs_file_write(&lfs, &file, "hello", strlen("hello")) => strlen("hello");
lfs_file_close(&lfs, &file);
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
uint8_t buffer[1024];
memset(buffer, 0, sizeof(buffer));
lfs_setattr(&lfs, "hello", 'A', "aaaa", 4) => 0;
lfs_setattr(&lfs, "hello", 'B', "bbbbbb", 6) => 0;
@ -60,7 +63,7 @@ code = '''
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
memset(buffer, 0, sizeof(buffer));
lfs_getattr(&lfs, "hello", 'A', buffer, 4) => 4;
lfs_getattr(&lfs, "hello", 'B', buffer+4, 9) => 9;
@ -76,17 +79,20 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # set/get root attribute
[cases.test_attrs_get_set_root]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "hello") => 0;
lfs_file_t file;
lfs_file_open(&lfs, &file, "hello/hello", LFS_O_WRONLY | LFS_O_CREAT) => 0;
lfs_file_write(&lfs, &file, "hello", strlen("hello")) => strlen("hello");
lfs_file_close(&lfs, &file);
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
uint8_t buffer[1024];
memset(buffer, 0, sizeof(buffer));
lfs_setattr(&lfs, "/", 'A', "aaaa", 4) => 0;
lfs_setattr(&lfs, "/", 'B', "bbbbbb", 6) => 0;
@ -137,7 +143,7 @@ code = '''
lfs_getattr(&lfs, "/", 'C', buffer+10, 5) => 5;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
memset(buffer, 0, sizeof(buffer));
lfs_getattr(&lfs, "/", 'A', buffer, 4) => 4;
lfs_getattr(&lfs, "/", 'B', buffer+4, 9) => 9;
@ -153,17 +159,20 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # set/get file attribute
[cases.test_attrs_get_set_file]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "hello") => 0;
lfs_file_t file;
lfs_file_open(&lfs, &file, "hello/hello", LFS_O_WRONLY | LFS_O_CREAT) => 0;
lfs_file_write(&lfs, &file, "hello", strlen("hello")) => strlen("hello");
lfs_file_close(&lfs, &file);
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
uint8_t buffer[1024];
memset(buffer, 0, sizeof(buffer));
struct lfs_attr attrs1[] = {
{'A', buffer, 4},
@ -238,7 +247,7 @@ code = '''
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
memset(buffer, 0, sizeof(buffer));
struct lfs_attr attrs3[] = {
{'A', buffer, 4},
@ -260,20 +269,23 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # deferred file attributes
[cases.test_attrs_deferred_file]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "hello") => 0;
lfs_file_t file;
lfs_file_open(&lfs, &file, "hello/hello", LFS_O_WRONLY | LFS_O_CREAT) => 0;
lfs_file_write(&lfs, &file, "hello", strlen("hello")) => strlen("hello");
lfs_file_close(&lfs, &file);
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_setattr(&lfs, "hello/hello", 'B', "fffffffff", 9) => 0;
lfs_setattr(&lfs, "hello/hello", 'C', "ccccc", 5) => 0;
uint8_t buffer[1024];
memset(buffer, 0, sizeof(buffer));
struct lfs_attr attrs1[] = {
{'B', "gggg", 4},

View File

@ -1,28 +1,30 @@
# bad blocks with block cycles should be tested in test_relocations
if = 'LFS_BLOCK_CYCLES == -1'
if = '(int32_t)BLOCK_CYCLES == -1'
[[case]] # single bad blocks
define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster
define.LFS_ERASE_CYCLES = 0xffffffff
define.LFS_ERASE_VALUE = [0x00, 0xff, -1]
define.LFS_BADBLOCK_BEHAVIOR = [
'LFS_TESTBD_BADBLOCK_PROGERROR',
'LFS_TESTBD_BADBLOCK_ERASEERROR',
'LFS_TESTBD_BADBLOCK_READERROR',
'LFS_TESTBD_BADBLOCK_PROGNOOP',
'LFS_TESTBD_BADBLOCK_ERASENOOP',
[cases.test_badblocks_single]
defines.ERASE_COUNT = 256 # small bd so test runs faster
defines.ERASE_CYCLES = 0xffffffff
defines.ERASE_VALUE = [0x00, 0xff, -1]
defines.BADBLOCK_BEHAVIOR = [
'LFS_EMUBD_BADBLOCK_PROGERROR',
'LFS_EMUBD_BADBLOCK_ERASEERROR',
'LFS_EMUBD_BADBLOCK_READERROR',
'LFS_EMUBD_BADBLOCK_PROGNOOP',
'LFS_EMUBD_BADBLOCK_ERASENOOP',
]
define.NAMEMULT = 64
define.FILEMULT = 1
defines.NAMEMULT = 64
defines.FILEMULT = 1
code = '''
for (lfs_block_t badblock = 2; badblock < LFS_BLOCK_COUNT; badblock++) {
lfs_testbd_setwear(&cfg, badblock-1, 0) => 0;
lfs_testbd_setwear(&cfg, badblock, 0xffffffff) => 0;
lfs_format(&lfs, &cfg) => 0;
for (lfs_block_t badblock = 2; badblock < BLOCK_COUNT; badblock++) {
lfs_emubd_setwear(cfg, badblock-1, 0) => 0;
lfs_emubd_setwear(cfg, badblock, 0xffffffff) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
for (int i = 1; i < 10; i++) {
uint8_t buffer[1024];
for (int j = 0; j < NAMEMULT; j++) {
buffer[j] = '0'+i;
}
@ -34,10 +36,11 @@ code = '''
buffer[j+NAMEMULT+1] = '0'+i;
}
buffer[2*NAMEMULT+1] = '\0';
lfs_file_t file;
lfs_file_open(&lfs, &file, (char*)buffer,
LFS_O_WRONLY | LFS_O_CREAT) => 0;
size = NAMEMULT;
lfs_size_t size = NAMEMULT;
for (int j = 0; j < i*FILEMULT; j++) {
lfs_file_write(&lfs, &file, buffer, size) => size;
}
@ -46,12 +49,14 @@ code = '''
}
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
for (int i = 1; i < 10; i++) {
uint8_t buffer[1024];
for (int j = 0; j < NAMEMULT; j++) {
buffer[j] = '0'+i;
}
buffer[NAMEMULT] = '\0';
struct lfs_info info;
lfs_stat(&lfs, (char*)buffer, &info) => 0;
info.type => LFS_TYPE_DIR;
@ -60,9 +65,10 @@ code = '''
buffer[j+NAMEMULT+1] = '0'+i;
}
buffer[2*NAMEMULT+1] = '\0';
lfs_file_t file;
lfs_file_open(&lfs, &file, (char*)buffer, LFS_O_RDONLY) => 0;
size = NAMEMULT;
int size = NAMEMULT;
for (int j = 0; j < i*FILEMULT; j++) {
uint8_t rbuffer[1024];
lfs_file_read(&lfs, &file, rbuffer, size) => size;
@ -75,28 +81,30 @@ code = '''
}
'''
[[case]] # region corruption (causes cascading failures)
define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster
define.LFS_ERASE_CYCLES = 0xffffffff
define.LFS_ERASE_VALUE = [0x00, 0xff, -1]
define.LFS_BADBLOCK_BEHAVIOR = [
'LFS_TESTBD_BADBLOCK_PROGERROR',
'LFS_TESTBD_BADBLOCK_ERASEERROR',
'LFS_TESTBD_BADBLOCK_READERROR',
'LFS_TESTBD_BADBLOCK_PROGNOOP',
'LFS_TESTBD_BADBLOCK_ERASENOOP',
[cases.test_badblocks_region_corruption] # (causes cascading failures)
defines.ERASE_COUNT = 256 # small bd so test runs faster
defines.ERASE_CYCLES = 0xffffffff
defines.ERASE_VALUE = [0x00, 0xff, -1]
defines.BADBLOCK_BEHAVIOR = [
'LFS_EMUBD_BADBLOCK_PROGERROR',
'LFS_EMUBD_BADBLOCK_ERASEERROR',
'LFS_EMUBD_BADBLOCK_READERROR',
'LFS_EMUBD_BADBLOCK_PROGNOOP',
'LFS_EMUBD_BADBLOCK_ERASENOOP',
]
define.NAMEMULT = 64
define.FILEMULT = 1
defines.NAMEMULT = 64
defines.FILEMULT = 1
code = '''
for (lfs_block_t i = 0; i < (LFS_BLOCK_COUNT-2)/2; i++) {
lfs_testbd_setwear(&cfg, i+2, 0xffffffff) => 0;
for (lfs_block_t i = 0; i < (BLOCK_COUNT-2)/2; i++) {
lfs_emubd_setwear(cfg, i+2, 0xffffffff) => 0;
}
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
for (int i = 1; i < 10; i++) {
uint8_t buffer[1024];
for (int j = 0; j < NAMEMULT; j++) {
buffer[j] = '0'+i;
}
@ -108,10 +116,11 @@ code = '''
buffer[j+NAMEMULT+1] = '0'+i;
}
buffer[2*NAMEMULT+1] = '\0';
lfs_file_t file;
lfs_file_open(&lfs, &file, (char*)buffer,
LFS_O_WRONLY | LFS_O_CREAT) => 0;
size = NAMEMULT;
lfs_size_t size = NAMEMULT;
for (int j = 0; j < i*FILEMULT; j++) {
lfs_file_write(&lfs, &file, buffer, size) => size;
}
@ -120,12 +129,14 @@ code = '''
}
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
for (int i = 1; i < 10; i++) {
uint8_t buffer[1024];
for (int j = 0; j < NAMEMULT; j++) {
buffer[j] = '0'+i;
}
buffer[NAMEMULT] = '\0';
struct lfs_info info;
lfs_stat(&lfs, (char*)buffer, &info) => 0;
info.type => LFS_TYPE_DIR;
@ -134,9 +145,10 @@ code = '''
buffer[j+NAMEMULT+1] = '0'+i;
}
buffer[2*NAMEMULT+1] = '\0';
lfs_file_t file;
lfs_file_open(&lfs, &file, (char*)buffer, LFS_O_RDONLY) => 0;
size = NAMEMULT;
lfs_size_t size = NAMEMULT;
for (int j = 0; j < i*FILEMULT; j++) {
uint8_t rbuffer[1024];
lfs_file_read(&lfs, &file, rbuffer, size) => size;
@ -148,28 +160,30 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # alternating corruption (causes cascading failures)
define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster
define.LFS_ERASE_CYCLES = 0xffffffff
define.LFS_ERASE_VALUE = [0x00, 0xff, -1]
define.LFS_BADBLOCK_BEHAVIOR = [
'LFS_TESTBD_BADBLOCK_PROGERROR',
'LFS_TESTBD_BADBLOCK_ERASEERROR',
'LFS_TESTBD_BADBLOCK_READERROR',
'LFS_TESTBD_BADBLOCK_PROGNOOP',
'LFS_TESTBD_BADBLOCK_ERASENOOP',
[cases.test_badblocks_alternating_corruption] # (causes cascading failures)
defines.ERASE_COUNT = 256 # small bd so test runs faster
defines.ERASE_CYCLES = 0xffffffff
defines.ERASE_VALUE = [0x00, 0xff, -1]
defines.BADBLOCK_BEHAVIOR = [
'LFS_EMUBD_BADBLOCK_PROGERROR',
'LFS_EMUBD_BADBLOCK_ERASEERROR',
'LFS_EMUBD_BADBLOCK_READERROR',
'LFS_EMUBD_BADBLOCK_PROGNOOP',
'LFS_EMUBD_BADBLOCK_ERASENOOP',
]
define.NAMEMULT = 64
define.FILEMULT = 1
defines.NAMEMULT = 64
defines.FILEMULT = 1
code = '''
for (lfs_block_t i = 0; i < (LFS_BLOCK_COUNT-2)/2; i++) {
lfs_testbd_setwear(&cfg, (2*i) + 2, 0xffffffff) => 0;
for (lfs_block_t i = 0; i < (BLOCK_COUNT-2)/2; i++) {
lfs_emubd_setwear(cfg, (2*i) + 2, 0xffffffff) => 0;
}
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
for (int i = 1; i < 10; i++) {
uint8_t buffer[1024];
for (int j = 0; j < NAMEMULT; j++) {
buffer[j] = '0'+i;
}
@ -181,10 +195,11 @@ code = '''
buffer[j+NAMEMULT+1] = '0'+i;
}
buffer[2*NAMEMULT+1] = '\0';
lfs_file_t file;
lfs_file_open(&lfs, &file, (char*)buffer,
LFS_O_WRONLY | LFS_O_CREAT) => 0;
size = NAMEMULT;
lfs_size_t size = NAMEMULT;
for (int j = 0; j < i*FILEMULT; j++) {
lfs_file_write(&lfs, &file, buffer, size) => size;
}
@ -193,12 +208,14 @@ code = '''
}
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
for (int i = 1; i < 10; i++) {
uint8_t buffer[1024];
for (int j = 0; j < NAMEMULT; j++) {
buffer[j] = '0'+i;
}
buffer[NAMEMULT] = '\0';
struct lfs_info info;
lfs_stat(&lfs, (char*)buffer, &info) => 0;
info.type => LFS_TYPE_DIR;
@ -207,9 +224,10 @@ code = '''
buffer[j+NAMEMULT+1] = '0'+i;
}
buffer[2*NAMEMULT+1] = '\0';
lfs_file_t file;
lfs_file_open(&lfs, &file, (char*)buffer, LFS_O_RDONLY) => 0;
size = NAMEMULT;
lfs_size_t size = NAMEMULT;
for (int j = 0; j < i*FILEMULT; j++) {
uint8_t rbuffer[1024];
lfs_file_read(&lfs, &file, rbuffer, size) => size;
@ -222,20 +240,21 @@ code = '''
'''
# other corner cases
[[case]] # bad superblocks (corrupt 1 or 0)
define.LFS_ERASE_CYCLES = 0xffffffff
define.LFS_ERASE_VALUE = [0x00, 0xff, -1]
define.LFS_BADBLOCK_BEHAVIOR = [
'LFS_TESTBD_BADBLOCK_PROGERROR',
'LFS_TESTBD_BADBLOCK_ERASEERROR',
'LFS_TESTBD_BADBLOCK_READERROR',
'LFS_TESTBD_BADBLOCK_PROGNOOP',
'LFS_TESTBD_BADBLOCK_ERASENOOP',
[cases.test_badblocks_superblocks] # (corrupt 1 or 0)
defines.ERASE_CYCLES = 0xffffffff
defines.ERASE_VALUE = [0x00, 0xff, -1]
defines.BADBLOCK_BEHAVIOR = [
'LFS_EMUBD_BADBLOCK_PROGERROR',
'LFS_EMUBD_BADBLOCK_ERASEERROR',
'LFS_EMUBD_BADBLOCK_READERROR',
'LFS_EMUBD_BADBLOCK_PROGNOOP',
'LFS_EMUBD_BADBLOCK_ERASENOOP',
]
code = '''
lfs_testbd_setwear(&cfg, 0, 0xffffffff) => 0;
lfs_testbd_setwear(&cfg, 1, 0xffffffff) => 0;
lfs_emubd_setwear(cfg, 0, 0xffffffff) => 0;
lfs_emubd_setwear(cfg, 1, 0xffffffff) => 0;
lfs_format(&lfs, &cfg) => LFS_ERR_NOSPC;
lfs_mount(&lfs, &cfg) => LFS_ERR_CORRUPT;
lfs_t lfs;
lfs_format(&lfs, cfg) => LFS_ERR_NOSPC;
lfs_mount(&lfs, cfg) => LFS_ERR_CORRUPT;
'''

248
tests/test_bd.toml Normal file
View File

@ -0,0 +1,248 @@
# These tests don't really test littlefs at all, they are here only to make
# sure the underlying block device is working.
#
# Note we use 251, a prime, in places to avoid aliasing powers of 2.
#
[cases.test_bd_one_block]
defines.READ = ['READ_SIZE', 'BLOCK_SIZE']
defines.PROG = ['PROG_SIZE', 'BLOCK_SIZE']
code = '''
uint8_t buffer[lfs_max(READ, PROG)];
// write data
cfg->erase(cfg, 0) => 0;
for (lfs_off_t i = 0; i < cfg->block_size; i += PROG) {
for (lfs_off_t j = 0; j < PROG; j++) {
buffer[j] = (i+j) % 251;
}
cfg->prog(cfg, 0, i, buffer, PROG) => 0;
}
// read data
for (lfs_off_t i = 0; i < cfg->block_size; i += READ) {
cfg->read(cfg, 0, i, buffer, READ) => 0;
for (lfs_off_t j = 0; j < READ; j++) {
LFS_ASSERT(buffer[j] == (i+j) % 251);
}
}
'''
[cases.test_bd_two_block]
defines.READ = ['READ_SIZE', 'BLOCK_SIZE']
defines.PROG = ['PROG_SIZE', 'BLOCK_SIZE']
code = '''
uint8_t buffer[lfs_max(READ, PROG)];
lfs_block_t block;
// write block 0
block = 0;
cfg->erase(cfg, block) => 0;
for (lfs_off_t i = 0; i < cfg->block_size; i += PROG) {
for (lfs_off_t j = 0; j < PROG; j++) {
buffer[j] = (block+i+j) % 251;
}
cfg->prog(cfg, block, i, buffer, PROG) => 0;
}
// read block 0
block = 0;
for (lfs_off_t i = 0; i < cfg->block_size; i += READ) {
cfg->read(cfg, block, i, buffer, READ) => 0;
for (lfs_off_t j = 0; j < READ; j++) {
LFS_ASSERT(buffer[j] == (block+i+j) % 251);
}
}
// write block 1
block = 1;
cfg->erase(cfg, block) => 0;
for (lfs_off_t i = 0; i < cfg->block_size; i += PROG) {
for (lfs_off_t j = 0; j < PROG; j++) {
buffer[j] = (block+i+j) % 251;
}
cfg->prog(cfg, block, i, buffer, PROG) => 0;
}
// read block 1
block = 1;
for (lfs_off_t i = 0; i < cfg->block_size; i += READ) {
cfg->read(cfg, block, i, buffer, READ) => 0;
for (lfs_off_t j = 0; j < READ; j++) {
LFS_ASSERT(buffer[j] == (block+i+j) % 251);
}
}
// read block 0 again
block = 0;
for (lfs_off_t i = 0; i < cfg->block_size; i += READ) {
cfg->read(cfg, block, i, buffer, READ) => 0;
for (lfs_off_t j = 0; j < READ; j++) {
LFS_ASSERT(buffer[j] == (block+i+j) % 251);
}
}
'''
[cases.test_bd_last_block]
defines.READ = ['READ_SIZE', 'BLOCK_SIZE']
defines.PROG = ['PROG_SIZE', 'BLOCK_SIZE']
code = '''
uint8_t buffer[lfs_max(READ, PROG)];
lfs_block_t block;
// write block 0
block = 0;
cfg->erase(cfg, block) => 0;
for (lfs_off_t i = 0; i < cfg->block_size; i += PROG) {
for (lfs_off_t j = 0; j < PROG; j++) {
buffer[j] = (block+i+j) % 251;
}
cfg->prog(cfg, block, i, buffer, PROG) => 0;
}
// read block 0
block = 0;
for (lfs_off_t i = 0; i < cfg->block_size; i += READ) {
cfg->read(cfg, block, i, buffer, READ) => 0;
for (lfs_off_t j = 0; j < READ; j++) {
LFS_ASSERT(buffer[j] == (block+i+j) % 251);
}
}
// write block n-1
block = cfg->block_count-1;
cfg->erase(cfg, block) => 0;
for (lfs_off_t i = 0; i < cfg->block_size; i += PROG) {
for (lfs_off_t j = 0; j < PROG; j++) {
buffer[j] = (block+i+j) % 251;
}
cfg->prog(cfg, block, i, buffer, PROG) => 0;
}
// read block n-1
block = cfg->block_count-1;
for (lfs_off_t i = 0; i < cfg->block_size; i += READ) {
cfg->read(cfg, block, i, buffer, READ) => 0;
for (lfs_off_t j = 0; j < READ; j++) {
LFS_ASSERT(buffer[j] == (block+i+j) % 251);
}
}
// read block 0 again
block = 0;
for (lfs_off_t i = 0; i < cfg->block_size; i += READ) {
cfg->read(cfg, block, i, buffer, READ) => 0;
for (lfs_off_t j = 0; j < READ; j++) {
LFS_ASSERT(buffer[j] == (block+i+j) % 251);
}
}
'''
[cases.test_bd_powers_of_two]
defines.READ = ['READ_SIZE', 'BLOCK_SIZE']
defines.PROG = ['PROG_SIZE', 'BLOCK_SIZE']
code = '''
uint8_t buffer[lfs_max(READ, PROG)];
// write/read every power of 2
lfs_block_t block = 1;
while (block < cfg->block_count) {
// write
cfg->erase(cfg, block) => 0;
for (lfs_off_t i = 0; i < cfg->block_size; i += PROG) {
for (lfs_off_t j = 0; j < PROG; j++) {
buffer[j] = (block+i+j) % 251;
}
cfg->prog(cfg, block, i, buffer, PROG) => 0;
}
// read
for (lfs_off_t i = 0; i < cfg->block_size; i += READ) {
cfg->read(cfg, block, i, buffer, READ) => 0;
for (lfs_off_t j = 0; j < READ; j++) {
LFS_ASSERT(buffer[j] == (block+i+j) % 251);
}
}
block *= 2;
}
// read every power of 2 again
block = 1;
while (block < cfg->block_count) {
// read
for (lfs_off_t i = 0; i < cfg->block_size; i += READ) {
cfg->read(cfg, block, i, buffer, READ) => 0;
for (lfs_off_t j = 0; j < READ; j++) {
LFS_ASSERT(buffer[j] == (block+i+j) % 251);
}
}
block *= 2;
}
'''
[cases.test_bd_fibonacci]
defines.READ = ['READ_SIZE', 'BLOCK_SIZE']
defines.PROG = ['PROG_SIZE', 'BLOCK_SIZE']
code = '''
uint8_t buffer[lfs_max(READ, PROG)];
// write/read every fibonacci number on our device
lfs_block_t block = 1;
lfs_block_t block_ = 1;
while (block < cfg->block_count) {
// write
cfg->erase(cfg, block) => 0;
for (lfs_off_t i = 0; i < cfg->block_size; i += PROG) {
for (lfs_off_t j = 0; j < PROG; j++) {
buffer[j] = (block+i+j) % 251;
}
cfg->prog(cfg, block, i, buffer, PROG) => 0;
}
// read
for (lfs_off_t i = 0; i < cfg->block_size; i += READ) {
cfg->read(cfg, block, i, buffer, READ) => 0;
for (lfs_off_t j = 0; j < READ; j++) {
LFS_ASSERT(buffer[j] == (block+i+j) % 251);
}
}
lfs_block_t nblock = block + block_;
block_ = block;
block = nblock;
}
// read every fibonacci number again
block = 1;
block_ = 1;
while (block < cfg->block_count) {
// read
for (lfs_off_t i = 0; i < cfg->block_size; i += READ) {
cfg->read(cfg, block, i, buffer, READ) => 0;
for (lfs_off_t j = 0; j < READ; j++) {
LFS_ASSERT(buffer[j] == (block+i+j) % 251);
}
}
lfs_block_t nblock = block + block_;
block_ = block;
block = nblock;
}
'''

1453
tests/test_compat.toml Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,8 +1,11 @@
[[case]] # root
[cases.test_dirs_root]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "/") => 0;
struct lfs_info info;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
@ -14,20 +17,25 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # many directory creation
define.N = 'range(0, 100, 3)'
[cases.test_dirs_many_creation]
defines.N = 'range(3, 100, 3)'
if = 'N < BLOCK_COUNT/2'
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
for (int i = 0; i < N; i++) {
char path[1024];
sprintf(path, "dir%03d", i);
lfs_mkdir(&lfs, path) => 0;
}
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "/") => 0;
struct lfs_info info;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
@ -35,6 +43,7 @@ code = '''
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
for (int i = 0; i < N; i++) {
char path[1024];
sprintf(path, "dir%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
@ -45,20 +54,25 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # many directory removal
define.N = 'range(3, 100, 11)'
[cases.test_dirs_many_removal]
defines.N = 'range(3, 100, 11)'
if = 'N < BLOCK_COUNT/2'
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
for (int i = 0; i < N; i++) {
char path[1024];
sprintf(path, "removeme%03d", i);
lfs_mkdir(&lfs, path) => 0;
}
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "/") => 0;
struct lfs_info info;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
@ -66,6 +80,7 @@ code = '''
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
for (int i = 0; i < N; i++) {
char path[1024];
sprintf(path, "removeme%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
@ -75,14 +90,15 @@ code = '''
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs);
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
for (int i = 0; i < N; i++) {
char path[1024];
sprintf(path, "removeme%03d", i);
lfs_remove(&lfs, path) => 0;
}
lfs_unmount(&lfs);
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
@ -95,20 +111,25 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # many directory rename
define.N = 'range(3, 100, 11)'
[cases.test_dirs_many_rename]
defines.N = 'range(3, 100, 11)'
if = 'N < BLOCK_COUNT/2'
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
for (int i = 0; i < N; i++) {
char path[1024];
sprintf(path, "test%03d", i);
lfs_mkdir(&lfs, path) => 0;
}
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "/") => 0;
struct lfs_info info;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
@ -116,6 +137,7 @@ code = '''
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
for (int i = 0; i < N; i++) {
char path[1024];
sprintf(path, "test%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
@ -125,7 +147,7 @@ code = '''
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs);
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
for (int i = 0; i < N; i++) {
char oldpath[128];
char newpath[128];
@ -135,7 +157,7 @@ code = '''
}
lfs_unmount(&lfs);
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
@ -144,6 +166,7 @@ code = '''
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
for (int i = 0; i < N; i++) {
char path[1024];
sprintf(path, "tedd%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
@ -154,29 +177,35 @@ code = '''
lfs_unmount(&lfs);
'''
[[case]] # reentrant many directory creation/rename/removal
define.N = [5, 11]
[cases.test_dirs_many_reentrant]
defines.N = [5, 11]
if = 'BLOCK_COUNT >= 4*N'
reentrant = true
code = '''
err = lfs_mount(&lfs, &cfg);
lfs_t lfs;
int err = lfs_mount(&lfs, cfg);
if (err) {
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
}
for (int i = 0; i < N; i++) {
char path[1024];
sprintf(path, "hi%03d", i);
err = lfs_mkdir(&lfs, path);
assert(err == 0 || err == LFS_ERR_EXIST);
}
for (int i = 0; i < N; i++) {
char path[1024];
sprintf(path, "hello%03d", i);
err = lfs_remove(&lfs, path);
assert(err == 0 || err == LFS_ERR_NOENT);
}
lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "/") => 0;
struct lfs_info info;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
@ -184,6 +213,7 @@ code = '''
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
for (int i = 0; i < N; i++) {
char path[1024];
sprintf(path, "hi%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
@ -209,6 +239,7 @@ code = '''
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
for (int i = 0; i < N; i++) {
char path[1024];
sprintf(path, "hello%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
@ -218,6 +249,7 @@ code = '''
lfs_dir_close(&lfs, &dir) => 0;
for (int i = 0; i < N; i++) {
char path[1024];
sprintf(path, "hello%03d", i);
lfs_remove(&lfs, path) => 0;
}
@ -234,22 +266,28 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # file creation
define.N = 'range(3, 100, 11)'
[cases.test_dirs_file_creation]
defines.N = 'range(3, 100, 11)'
if = 'N < BLOCK_COUNT/2'
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
for (int i = 0; i < N; i++) {
char path[1024];
sprintf(path, "file%03d", i);
lfs_file_t file;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
lfs_file_close(&lfs, &file) => 0;
}
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "/") => 0;
struct lfs_info info;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
@ -257,6 +295,7 @@ code = '''
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
for (int i = 0; i < N; i++) {
char path[1024];
sprintf(path, "file%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_REG);
@ -267,22 +306,28 @@ code = '''
lfs_unmount(&lfs);
'''
[[case]] # file removal
define.N = 'range(0, 100, 3)'
[cases.test_dirs_file_removal]
defines.N = 'range(3, 100, 11)'
if = 'N < BLOCK_COUNT/2'
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
for (int i = 0; i < N; i++) {
char path[1024];
sprintf(path, "removeme%03d", i);
lfs_file_t file;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
lfs_file_close(&lfs, &file) => 0;
}
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "/") => 0;
struct lfs_info info;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
@ -290,6 +335,7 @@ code = '''
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
for (int i = 0; i < N; i++) {
char path[1024];
sprintf(path, "removeme%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_REG);
@ -299,14 +345,15 @@ code = '''
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs);
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
for (int i = 0; i < N; i++) {
char path[1024];
sprintf(path, "removeme%03d", i);
lfs_remove(&lfs, path) => 0;
}
lfs_unmount(&lfs);
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
@ -319,22 +366,28 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # file rename
define.N = 'range(0, 100, 3)'
[cases.test_dirs_file_rename]
defines.N = 'range(3, 100, 11)'
if = 'N < BLOCK_COUNT/2'
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
for (int i = 0; i < N; i++) {
char path[1024];
sprintf(path, "test%03d", i);
lfs_file_t file;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
lfs_file_close(&lfs, &file) => 0;
}
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "/") => 0;
struct lfs_info info;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
@ -342,6 +395,7 @@ code = '''
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
for (int i = 0; i < N; i++) {
char path[1024];
sprintf(path, "test%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_REG);
@ -351,7 +405,7 @@ code = '''
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs);
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
for (int i = 0; i < N; i++) {
char oldpath[128];
char newpath[128];
@ -361,7 +415,7 @@ code = '''
}
lfs_unmount(&lfs);
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
@ -370,6 +424,7 @@ code = '''
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
for (int i = 0; i < N; i++) {
char path[1024];
sprintf(path, "tedd%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_REG);
@ -380,29 +435,36 @@ code = '''
lfs_unmount(&lfs);
'''
[[case]] # reentrant file creation/rename/removal
define.N = [5, 25]
[cases.test_dirs_file_reentrant]
defines.N = [5, 25]
if = 'N < BLOCK_COUNT/2'
reentrant = true
code = '''
err = lfs_mount(&lfs, &cfg);
lfs_t lfs;
int err = lfs_mount(&lfs, cfg);
if (err) {
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
}
for (int i = 0; i < N; i++) {
char path[1024];
sprintf(path, "hi%03d", i);
lfs_file_t file;
lfs_file_open(&lfs, &file, path, LFS_O_CREAT | LFS_O_WRONLY) => 0;
lfs_file_close(&lfs, &file) => 0;
}
for (int i = 0; i < N; i++) {
char path[1024];
sprintf(path, "hello%03d", i);
err = lfs_remove(&lfs, path);
assert(err == 0 || err == LFS_ERR_NOENT);
}
lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "/") => 0;
struct lfs_info info;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
@ -410,6 +472,7 @@ code = '''
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
for (int i = 0; i < N; i++) {
char path[1024];
sprintf(path, "hi%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_REG);
@ -435,6 +498,7 @@ code = '''
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
for (int i = 0; i < N; i++) {
char path[1024];
sprintf(path, "hello%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_REG);
@ -444,6 +508,7 @@ code = '''
lfs_dir_close(&lfs, &dir) => 0;
for (int i = 0; i < N; i++) {
char path[1024];
sprintf(path, "hello%03d", i);
lfs_remove(&lfs, path) => 0;
}
@ -460,24 +525,28 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # nested directories
[cases.test_dirs_nested]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "potato") => 0;
lfs_file_t file;
lfs_file_open(&lfs, &file, "burito",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "potato/baked") => 0;
lfs_mkdir(&lfs, "potato/sweet") => 0;
lfs_mkdir(&lfs, "potato/fried") => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "potato") => 0;
struct lfs_info info;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
info.type => LFS_TYPE_DIR;
@ -498,21 +567,21 @@ code = '''
lfs_unmount(&lfs) => 0;
// try removing?
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_remove(&lfs, "potato") => LFS_ERR_NOTEMPTY;
lfs_unmount(&lfs) => 0;
// try renaming?
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_rename(&lfs, "potato", "coldpotato") => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_rename(&lfs, "coldpotato", "warmpotato") => 0;
lfs_rename(&lfs, "warmpotato", "hotpotato") => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_remove(&lfs, "potato") => LFS_ERR_NOENT;
lfs_remove(&lfs, "coldpotato") => LFS_ERR_NOENT;
lfs_remove(&lfs, "warmpotato") => LFS_ERR_NOENT;
@ -520,7 +589,7 @@ code = '''
lfs_unmount(&lfs) => 0;
// try cross-directory renaming
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "coldpotato") => 0;
lfs_rename(&lfs, "hotpotato/baked", "coldpotato/baked") => 0;
lfs_rename(&lfs, "coldpotato", "hotpotato") => LFS_ERR_NOTEMPTY;
@ -536,7 +605,7 @@ code = '''
lfs_remove(&lfs, "hotpotato") => LFS_ERR_NOTEMPTY;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_dir_open(&lfs, &dir, "hotpotato") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
@ -558,7 +627,7 @@ code = '''
lfs_unmount(&lfs) => 0;
// final remove
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_remove(&lfs, "hotpotato") => LFS_ERR_NOTEMPTY;
lfs_remove(&lfs, "hotpotato/baked") => 0;
lfs_remove(&lfs, "hotpotato") => LFS_ERR_NOTEMPTY;
@ -568,7 +637,7 @@ code = '''
lfs_remove(&lfs, "hotpotato") => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
@ -584,17 +653,22 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # recursive remove
define.N = [10, 100]
[cases.test_dirs_recursive_remove]
defines.N = [10, 100]
if = 'N < BLOCK_COUNT/2'
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "prickly-pear") => 0;
for (int i = 0; i < N; i++) {
char path[1024];
sprintf(path, "prickly-pear/cactus%03d", i);
lfs_mkdir(&lfs, path) => 0;
}
lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "prickly-pear") => 0;
struct lfs_info info;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
@ -602,6 +676,7 @@ code = '''
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
for (int i = 0; i < N; i++) {
char path[1024];
sprintf(path, "cactus%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
@ -611,7 +686,7 @@ code = '''
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs);
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_remove(&lfs, "prickly-pear") => LFS_ERR_NOTEMPTY;
lfs_dir_open(&lfs, &dir, "prickly-pear") => 0;
@ -622,6 +697,7 @@ code = '''
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
for (int i = 0; i < N; i++) {
char path[1024];
sprintf(path, "cactus%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
@ -636,22 +712,24 @@ code = '''
lfs_remove(&lfs, "prickly-pear") => LFS_ERR_NOENT;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_remove(&lfs, "prickly-pear") => LFS_ERR_NOENT;
lfs_unmount(&lfs) => 0;
'''
[[case]] # other error cases
[cases.test_dirs_other_errors]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "potato") => 0;
lfs_file_t file;
lfs_file_open(&lfs, &file, "burito",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "potato") => LFS_ERR_EXIST;
lfs_mkdir(&lfs, "burito") => LFS_ERR_EXIST;
@ -659,6 +737,7 @@ code = '''
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => LFS_ERR_EXIST;
lfs_file_open(&lfs, &file, "potato",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => LFS_ERR_EXIST;
lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "tomato") => LFS_ERR_NOENT;
lfs_dir_open(&lfs, &dir, "burito") => LFS_ERR_NOTDIR;
lfs_file_open(&lfs, &file, "tomato", LFS_O_RDONLY) => LFS_ERR_NOENT;
@ -678,6 +757,7 @@ code = '''
// check that errors did not corrupt directory
lfs_dir_open(&lfs, &dir, "/") => 0;
struct lfs_info info;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
@ -696,7 +776,7 @@ code = '''
lfs_unmount(&lfs) => 0;
// or on disk
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
@ -715,21 +795,27 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # directory seek
define.COUNT = [4, 128, 132]
[cases.test_dirs_seek]
defines.COUNT = [4, 128, 132]
if = 'COUNT < BLOCK_COUNT/2'
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "hello") => 0;
for (int i = 0; i < COUNT; i++) {
char path[1024];
sprintf(path, "hello/kitty%03d", i);
lfs_mkdir(&lfs, path) => 0;
}
lfs_unmount(&lfs) => 0;
for (int j = 2; j < COUNT; j++) {
lfs_mount(&lfs, &cfg) => 0;
// try seeking to each dir entry
for (int j = 0; j < COUNT; j++) {
lfs_mount(&lfs, cfg) => 0;
lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "hello") => 0;
struct lfs_info info;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
assert(info.type == LFS_TYPE_DIR);
@ -737,24 +823,25 @@ code = '''
assert(strcmp(info.name, "..") == 0);
assert(info.type == LFS_TYPE_DIR);
lfs_soff_t pos;
for (int i = 0; i < j; i++) {
char path[1024];
sprintf(path, "kitty%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, path) == 0);
assert(info.type == LFS_TYPE_DIR);
pos = lfs_dir_tell(&lfs, &dir);
assert(pos >= 0);
}
lfs_soff_t pos = lfs_dir_tell(&lfs, &dir);
assert(pos >= 0);
lfs_dir_seek(&lfs, &dir, pos) => 0;
char path[1024];
sprintf(path, "kitty%03d", j);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, path) == 0);
assert(info.type == LFS_TYPE_DIR);
lfs_dir_rewind(&lfs, &dir) => 0;
sprintf(path, "kitty%03d", 0);
sprintf(path, "kitty%03u", 0);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
assert(info.type == LFS_TYPE_DIR);
@ -774,22 +861,73 @@ code = '''
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
}
// try seeking to end of dir
lfs_mount(&lfs, cfg) => 0;
lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "hello") => 0;
struct lfs_info info;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
assert(info.type == LFS_TYPE_DIR);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, "..") == 0);
assert(info.type == LFS_TYPE_DIR);
for (int i = 0; i < COUNT; i++) {
char path[1024];
sprintf(path, "kitty%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, path) == 0);
assert(info.type == LFS_TYPE_DIR);
}
lfs_soff_t pos = lfs_dir_tell(&lfs, &dir);
assert(pos >= 0);
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_seek(&lfs, &dir, pos) => 0;
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_rewind(&lfs, &dir) => 0;
char path[1024];
sprintf(path, "kitty%03d", 0);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
assert(info.type == LFS_TYPE_DIR);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, "..") == 0);
assert(info.type == LFS_TYPE_DIR);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, path) == 0);
assert(info.type == LFS_TYPE_DIR);
lfs_dir_seek(&lfs, &dir, pos) => 0;
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
'''
[[case]] # root seek
define.COUNT = [4, 128, 132]
[cases.test_dirs_toot_seek]
defines.COUNT = [4, 128, 132]
if = 'COUNT < BLOCK_COUNT/2'
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
for (int i = 0; i < COUNT; i++) {
char path[1024];
sprintf(path, "hi%03d", i);
lfs_mkdir(&lfs, path) => 0;
}
lfs_unmount(&lfs) => 0;
for (int j = 2; j < COUNT; j++) {
lfs_mount(&lfs, &cfg) => 0;
for (int j = 0; j < COUNT; j++) {
lfs_mount(&lfs, cfg) => 0;
lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "/") => 0;
struct lfs_info info;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
assert(info.type == LFS_TYPE_DIR);
@ -797,24 +935,25 @@ code = '''
assert(strcmp(info.name, "..") == 0);
assert(info.type == LFS_TYPE_DIR);
lfs_soff_t pos;
for (int i = 0; i < j; i++) {
char path[1024];
sprintf(path, "hi%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, path) == 0);
assert(info.type == LFS_TYPE_DIR);
pos = lfs_dir_tell(&lfs, &dir);
assert(pos >= 0);
}
lfs_soff_t pos = lfs_dir_tell(&lfs, &dir);
assert(pos >= 0);
lfs_dir_seek(&lfs, &dir, pos) => 0;
char path[1024];
sprintf(path, "hi%03d", j);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, path) == 0);
assert(info.type == LFS_TYPE_DIR);
lfs_dir_rewind(&lfs, &dir) => 0;
sprintf(path, "hi%03d", 0);
sprintf(path, "hi%03u", 0);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
assert(info.type == LFS_TYPE_DIR);
@ -834,5 +973,51 @@ code = '''
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
}
// try seeking to end of dir
lfs_mount(&lfs, cfg) => 0;
lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "/") => 0;
struct lfs_info info;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
assert(info.type == LFS_TYPE_DIR);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, "..") == 0);
assert(info.type == LFS_TYPE_DIR);
for (int i = 0; i < COUNT; i++) {
char path[1024];
sprintf(path, "hi%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, path) == 0);
assert(info.type == LFS_TYPE_DIR);
}
lfs_soff_t pos = lfs_dir_tell(&lfs, &dir);
assert(pos >= 0);
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_seek(&lfs, &dir, pos) => 0;
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_rewind(&lfs, &dir) => 0;
char path[1024];
sprintf(path, "hi%03d", 0);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
assert(info.type == LFS_TYPE_DIR);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, "..") == 0);
assert(info.type == LFS_TYPE_DIR);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, path) == 0);
assert(info.type == LFS_TYPE_DIR);
lfs_dir_seek(&lfs, &dir, pos) => 0;
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
'''

View File

@ -2,19 +2,23 @@
# Note that these tests are intended for 512 byte inline sizes. They should
# still pass with other inline sizes but wouldn't be testing anything.
define.LFS_CACHE_SIZE = 512
if = 'LFS_CACHE_SIZE % LFS_PROG_SIZE == 0 && LFS_CACHE_SIZE == 512'
defines.CACHE_SIZE = 512
if = 'CACHE_SIZE % PROG_SIZE == 0 && CACHE_SIZE == 512'
[[case]] # entry grow test
[cases.test_entries_grow]
code = '''
uint8_t wbuffer[1024];
uint8_t rbuffer[1024];
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
// write hi0 20
char path[1024];
lfs_size_t size;
sprintf(path, "hi0"); size = 20;
lfs_file_t file;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
memset(wbuffer, 'c', size);
@ -94,16 +98,20 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # entry shrink test
[cases.test_entries_shrink]
code = '''
uint8_t wbuffer[1024];
uint8_t rbuffer[1024];
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
// write hi0 20
char path[1024];
lfs_size_t size;
sprintf(path, "hi0"); size = 20;
lfs_file_t file;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
memset(wbuffer, 'c', size);
@ -183,16 +191,20 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # entry spill test
[cases.test_entries_spill]
code = '''
uint8_t wbuffer[1024];
uint8_t rbuffer[1024];
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
// write hi0 200
char path[1024];
lfs_size_t size;
sprintf(path, "hi0"); size = 200;
lfs_file_t file;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
memset(wbuffer, 'c', size);
@ -256,16 +268,20 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # entry push spill test
[cases.test_entries_push_spill]
code = '''
uint8_t wbuffer[1024];
uint8_t rbuffer[1024];
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
// write hi0 200
char path[1024];
lfs_size_t size;
sprintf(path, "hi0"); size = 200;
lfs_file_t file;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
memset(wbuffer, 'c', size);
@ -345,16 +361,20 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # entry push spill two test
[cases.test_entries_push_spill_two]
code = '''
uint8_t wbuffer[1024];
uint8_t rbuffer[1024];
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
// write hi0 200
char path[1024];
lfs_size_t size;
sprintf(path, "hi0"); size = 200;
lfs_file_t file;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
memset(wbuffer, 'c', size);
@ -449,16 +469,20 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # entry drop test
[cases.test_entries_drop]
code = '''
uint8_t wbuffer[1024];
uint8_t rbuffer[1024];
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
// write hi0 200
char path[1024];
lfs_size_t size;
sprintf(path, "hi0"); size = 200;
lfs_file_t file;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
memset(wbuffer, 'c', size);
@ -491,6 +515,7 @@ code = '''
lfs_file_close(&lfs, &file) => 0;
lfs_remove(&lfs, "hi1") => 0;
struct lfs_info info;
lfs_stat(&lfs, "hi1", &info) => LFS_ERR_NOENT;
// read hi0 200
sprintf(path, "hi0"); size = 200;
@ -547,15 +572,18 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # create too big
[cases.test_entries_create_too_big]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
char path[1024];
memset(path, 'm', 200);
path[200] = '\0';
size = 400;
lfs_size_t size = 400;
lfs_file_t file;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
uint8_t wbuffer[1024];
@ -572,15 +600,18 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # resize too big
[cases.test_entries_resize_too_big]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
char path[1024];
memset(path, 'm', 200);
path[200] = '\0';
size = 40;
lfs_size_t size = 40;
lfs_file_t file;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
uint8_t wbuffer[1024];

View File

@ -3,16 +3,17 @@
# invalid pointer tests (outside of block_count)
[[case]] # invalid tail-pointer test
define.TAIL_TYPE = ['LFS_TYPE_HARDTAIL', 'LFS_TYPE_SOFTTAIL']
define.INVALSET = [0x3, 0x1, 0x2]
[cases.test_evil_invalid_tail_pointer]
defines.TAIL_TYPE = ['LFS_TYPE_HARDTAIL', 'LFS_TYPE_SOFTTAIL']
defines.INVALSET = [0x3, 0x1, 0x2]
in = "lfs.c"
code = '''
// create littlefs
lfs_format(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
// change tail-pointer to invalid pointers
lfs_init(&lfs, &cfg) => 0;
lfs_init(&lfs, cfg) => 0;
lfs_mdir_t mdir;
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS(
@ -23,25 +24,27 @@ code = '''
lfs_deinit(&lfs) => 0;
// test that mount fails gracefully
lfs_mount(&lfs, &cfg) => LFS_ERR_CORRUPT;
lfs_mount(&lfs, cfg) => LFS_ERR_CORRUPT;
'''
[[case]] # invalid dir pointer test
define.INVALSET = [0x3, 0x1, 0x2]
[cases.test_evil_invalid_dir_pointer]
defines.INVALSET = [0x3, 0x1, 0x2]
in = "lfs.c"
code = '''
// create littlefs
lfs_format(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
// make a dir
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "dir_here") => 0;
lfs_unmount(&lfs) => 0;
// change the dir pointer to be invalid
lfs_init(&lfs, &cfg) => 0;
lfs_init(&lfs, cfg) => 0;
lfs_mdir_t mdir;
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
// make sure id 1 == our directory
uint8_t buffer[1024];
lfs_dir_get(&lfs, &mdir,
LFS_MKTAG(0x700, 0x3ff, 0),
LFS_MKTAG(LFS_TYPE_NAME, 1, strlen("dir_here")), buffer)
@ -57,14 +60,17 @@ code = '''
// test that accessing our bad dir fails, note there's a number
// of ways to access the dir, some can fail, but some don't
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
struct lfs_info info;
lfs_stat(&lfs, "dir_here", &info) => 0;
assert(strcmp(info.name, "dir_here") == 0);
assert(info.type == LFS_TYPE_DIR);
lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "dir_here") => LFS_ERR_CORRUPT;
lfs_stat(&lfs, "dir_here/file_here", &info) => LFS_ERR_CORRUPT;
lfs_dir_open(&lfs, &dir, "dir_here/dir_here") => LFS_ERR_CORRUPT;
lfs_file_t file;
lfs_file_open(&lfs, &file, "dir_here/file_here",
LFS_O_RDONLY) => LFS_ERR_CORRUPT;
lfs_file_open(&lfs, &file, "dir_here/file_here",
@ -72,24 +78,27 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # invalid file pointer test
[cases.test_evil_invalid_file_pointer]
in = "lfs.c"
define.SIZE = [10, 1000, 100000] # faked file size
defines.SIZE = [10, 1000, 100000] # faked file size
code = '''
// create littlefs
lfs_format(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
// make a file
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_t file;
lfs_file_open(&lfs, &file, "file_here",
LFS_O_WRONLY | LFS_O_CREAT) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
// change the file pointer to be invalid
lfs_init(&lfs, &cfg) => 0;
lfs_init(&lfs, cfg) => 0;
lfs_mdir_t mdir;
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
// make sure id 1 == our file
uint8_t buffer[1024];
lfs_dir_get(&lfs, &mdir,
LFS_MKTAG(0x700, 0x3ff, 0),
LFS_MKTAG(LFS_TYPE_NAME, 1, strlen("file_here")), buffer)
@ -103,7 +112,8 @@ code = '''
// test that accessing our bad file fails, note there's a number
// of ways to access the dir, some can fail, but some don't
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
struct lfs_info info;
lfs_stat(&lfs, "file_here", &info) => 0;
assert(strcmp(info.name, "file_here") == 0);
assert(info.type == LFS_TYPE_REG);
@ -114,20 +124,22 @@ code = '''
lfs_file_close(&lfs, &file) => 0;
// any allocs that traverse CTZ must unfortunately must fail
if (SIZE > 2*LFS_BLOCK_SIZE) {
if (SIZE > 2*BLOCK_SIZE) {
lfs_mkdir(&lfs, "dir_here") => LFS_ERR_CORRUPT;
}
lfs_unmount(&lfs) => 0;
'''
[[case]] # invalid pointer in CTZ skip-list test
define.SIZE = ['2*LFS_BLOCK_SIZE', '3*LFS_BLOCK_SIZE', '4*LFS_BLOCK_SIZE']
[cases.test_evil_invalid_ctz_pointer] # invalid pointer in CTZ skip-list test
defines.SIZE = ['2*BLOCK_SIZE', '3*BLOCK_SIZE', '4*BLOCK_SIZE']
in = "lfs.c"
code = '''
// create littlefs
lfs_format(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
// make a file
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_t file;
lfs_file_open(&lfs, &file, "file_here",
LFS_O_WRONLY | LFS_O_CREAT) => 0;
for (int i = 0; i < SIZE; i++) {
@ -137,10 +149,11 @@ code = '''
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
// change pointer in CTZ skip-list to be invalid
lfs_init(&lfs, &cfg) => 0;
lfs_init(&lfs, cfg) => 0;
lfs_mdir_t mdir;
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
// make sure id 1 == our file and get our CTZ structure
uint8_t buffer[4*BLOCK_SIZE];
lfs_dir_get(&lfs, &mdir,
LFS_MKTAG(0x700, 0x3ff, 0),
LFS_MKTAG(LFS_TYPE_NAME, 1, strlen("file_here")), buffer)
@ -153,18 +166,19 @@ code = '''
=> LFS_MKTAG(LFS_TYPE_CTZSTRUCT, 1, sizeof(struct lfs_ctz));
lfs_ctz_fromle32(&ctz);
// rewrite block to contain bad pointer
uint8_t bbuffer[LFS_BLOCK_SIZE];
cfg.read(&cfg, ctz.head, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
uint8_t bbuffer[BLOCK_SIZE];
cfg->read(cfg, ctz.head, 0, bbuffer, BLOCK_SIZE) => 0;
uint32_t bad = lfs_tole32(0xcccccccc);
memcpy(&bbuffer[0], &bad, sizeof(bad));
memcpy(&bbuffer[4], &bad, sizeof(bad));
cfg.erase(&cfg, ctz.head) => 0;
cfg.prog(&cfg, ctz.head, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
cfg->erase(cfg, ctz.head) => 0;
cfg->prog(cfg, ctz.head, 0, bbuffer, BLOCK_SIZE) => 0;
lfs_deinit(&lfs) => 0;
// test that accessing our bad file fails, note there's a number
// of ways to access the dir, some can fail, but some don't
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
struct lfs_info info;
lfs_stat(&lfs, "file_here", &info) => 0;
assert(strcmp(info.name, "file_here") == 0);
assert(info.type == LFS_TYPE_REG);
@ -175,22 +189,23 @@ code = '''
lfs_file_close(&lfs, &file) => 0;
// any allocs that traverse CTZ must unfortunately must fail
if (SIZE > 2*LFS_BLOCK_SIZE) {
if (SIZE > 2*BLOCK_SIZE) {
lfs_mkdir(&lfs, "dir_here") => LFS_ERR_CORRUPT;
}
lfs_unmount(&lfs) => 0;
'''
[[case]] # invalid gstate pointer
define.INVALSET = [0x3, 0x1, 0x2]
[cases.test_evil_invalid_gstate_pointer]
defines.INVALSET = [0x3, 0x1, 0x2]
in = "lfs.c"
code = '''
// create littlefs
lfs_format(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
// create an invalid gstate
lfs_init(&lfs, &cfg) => 0;
lfs_init(&lfs, cfg) => 0;
lfs_mdir_t mdir;
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
lfs_fs_prepmove(&lfs, 1, (lfs_block_t [2]){
@ -202,21 +217,22 @@ code = '''
// test that mount fails gracefully
// mount may not fail, but our first alloc should fail when
// we try to fix the gstate
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "should_fail") => LFS_ERR_CORRUPT;
lfs_unmount(&lfs) => 0;
'''
# cycle detection/recovery tests
[[case]] # metadata-pair threaded-list loop test
[cases.test_evil_mdir_loop] # metadata-pair threaded-list loop test
in = "lfs.c"
code = '''
// create littlefs
lfs_format(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
// change tail-pointer to point to ourself
lfs_init(&lfs, &cfg) => 0;
lfs_init(&lfs, cfg) => 0;
lfs_mdir_t mdir;
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS(
@ -225,20 +241,21 @@ code = '''
lfs_deinit(&lfs) => 0;
// test that mount fails gracefully
lfs_mount(&lfs, &cfg) => LFS_ERR_CORRUPT;
lfs_mount(&lfs, cfg) => LFS_ERR_CORRUPT;
'''
[[case]] # metadata-pair threaded-list 2-length loop test
[cases.test_evil_mdir_loop2] # metadata-pair threaded-list 2-length loop test
in = "lfs.c"
code = '''
// create littlefs with child dir
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "child") => 0;
lfs_unmount(&lfs) => 0;
// find child
lfs_init(&lfs, &cfg) => 0;
lfs_init(&lfs, cfg) => 0;
lfs_mdir_t mdir;
lfs_block_t pair[2];
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
@ -255,20 +272,21 @@ code = '''
lfs_deinit(&lfs) => 0;
// test that mount fails gracefully
lfs_mount(&lfs, &cfg) => LFS_ERR_CORRUPT;
lfs_mount(&lfs, cfg) => LFS_ERR_CORRUPT;
'''
[[case]] # metadata-pair threaded-list 1-length child loop test
[cases.test_evil_mdir_loop_child] # metadata-pair threaded-list 1-length child loop test
in = "lfs.c"
code = '''
// create littlefs with child dir
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "child") => 0;
lfs_unmount(&lfs) => 0;
// find child
lfs_init(&lfs, &cfg) => 0;
lfs_init(&lfs, cfg) => 0;
lfs_mdir_t mdir;
lfs_block_t pair[2];
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
@ -284,5 +302,5 @@ code = '''
lfs_deinit(&lfs) => 0;
// test that mount fails gracefully
lfs_mount(&lfs, &cfg) => LFS_ERR_CORRUPT;
lfs_mount(&lfs, cfg) => LFS_ERR_CORRUPT;
'''

View File

@ -1,46 +1,50 @@
[[case]] # test running a filesystem to exhaustion
define.LFS_ERASE_CYCLES = 10
define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster
define.LFS_BLOCK_CYCLES = 'LFS_ERASE_CYCLES / 2'
define.LFS_BADBLOCK_BEHAVIOR = [
'LFS_TESTBD_BADBLOCK_PROGERROR',
'LFS_TESTBD_BADBLOCK_ERASEERROR',
'LFS_TESTBD_BADBLOCK_READERROR',
'LFS_TESTBD_BADBLOCK_PROGNOOP',
'LFS_TESTBD_BADBLOCK_ERASENOOP',
# test running a filesystem to exhaustion
[cases.test_exhaustion_normal]
defines.ERASE_CYCLES = 10
defines.ERASE_COUNT = 256 # small bd so test runs faster
defines.BLOCK_CYCLES = 'ERASE_CYCLES / 2'
defines.BADBLOCK_BEHAVIOR = [
'LFS_EMUBD_BADBLOCK_PROGERROR',
'LFS_EMUBD_BADBLOCK_ERASEERROR',
'LFS_EMUBD_BADBLOCK_READERROR',
'LFS_EMUBD_BADBLOCK_PROGNOOP',
'LFS_EMUBD_BADBLOCK_ERASENOOP',
]
define.FILES = 10
defines.FILES = 10
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "roadrunner") => 0;
lfs_unmount(&lfs) => 0;
uint32_t cycle = 0;
while (true) {
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
for (uint32_t i = 0; i < FILES; i++) {
// chose name, roughly random seed, and random 2^n size
char path[1024];
sprintf(path, "roadrunner/test%d", i);
srand(cycle * i);
size = 1 << ((rand() % 10)+2);
uint32_t prng = cycle * i;
lfs_size_t size = 1 << ((TEST_PRNG(&prng) % 10)+2);
lfs_file_t file;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
for (lfs_size_t j = 0; j < size; j++) {
char c = 'a' + (rand() % 26);
char c = 'a' + (TEST_PRNG(&prng) % 26);
lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1);
assert(res == 1 || res == LFS_ERR_NOSPC);
if (res == LFS_ERR_NOSPC) {
err = lfs_file_close(&lfs, &file);
int err = lfs_file_close(&lfs, &file);
assert(err == 0 || err == LFS_ERR_NOSPC);
lfs_unmount(&lfs) => 0;
goto exhausted;
}
}
err = lfs_file_close(&lfs, &file);
int err = lfs_file_close(&lfs, &file);
assert(err == 0 || err == LFS_ERR_NOSPC);
if (err == LFS_ERR_NOSPC) {
lfs_unmount(&lfs) => 0;
@ -50,13 +54,15 @@ code = '''
for (uint32_t i = 0; i < FILES; i++) {
// check for errors
char path[1024];
sprintf(path, "roadrunner/test%d", i);
srand(cycle * i);
size = 1 << ((rand() % 10)+2);
uint32_t prng = cycle * i;
lfs_size_t size = 1 << ((TEST_PRNG(&prng) % 10)+2);
lfs_file_t file;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
for (lfs_size_t j = 0; j < size; j++) {
char c = 'a' + (rand() % 26);
char c = 'a' + (TEST_PRNG(&prng) % 26);
char r;
lfs_file_read(&lfs, &file, &r, 1) => 1;
assert(r == c);
@ -71,10 +77,12 @@ code = '''
exhausted:
// should still be readable
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
for (uint32_t i = 0; i < FILES; i++) {
// check for errors
char path[1024];
sprintf(path, "roadrunner/test%d", i);
struct lfs_info info;
lfs_stat(&lfs, path, &info) => 0;
}
lfs_unmount(&lfs) => 0;
@ -82,47 +90,51 @@ exhausted:
LFS_WARN("completed %d cycles", cycle);
'''
[[case]] # test running a filesystem to exhaustion
# which also requires expanding superblocks
define.LFS_ERASE_CYCLES = 10
define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster
define.LFS_BLOCK_CYCLES = 'LFS_ERASE_CYCLES / 2'
define.LFS_BADBLOCK_BEHAVIOR = [
'LFS_TESTBD_BADBLOCK_PROGERROR',
'LFS_TESTBD_BADBLOCK_ERASEERROR',
'LFS_TESTBD_BADBLOCK_READERROR',
'LFS_TESTBD_BADBLOCK_PROGNOOP',
'LFS_TESTBD_BADBLOCK_ERASENOOP',
# test running a filesystem to exhaustion
# which also requires expanding superblocks
[cases.test_exhaustion_superblocks]
defines.ERASE_CYCLES = 10
defines.ERASE_COUNT = 256 # small bd so test runs faster
defines.BLOCK_CYCLES = 'ERASE_CYCLES / 2'
defines.BADBLOCK_BEHAVIOR = [
'LFS_EMUBD_BADBLOCK_PROGERROR',
'LFS_EMUBD_BADBLOCK_ERASEERROR',
'LFS_EMUBD_BADBLOCK_READERROR',
'LFS_EMUBD_BADBLOCK_PROGNOOP',
'LFS_EMUBD_BADBLOCK_ERASENOOP',
]
define.FILES = 10
defines.FILES = 10
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
uint32_t cycle = 0;
while (true) {
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
for (uint32_t i = 0; i < FILES; i++) {
// chose name, roughly random seed, and random 2^n size
char path[1024];
sprintf(path, "test%d", i);
srand(cycle * i);
size = 1 << ((rand() % 10)+2);
uint32_t prng = cycle * i;
lfs_size_t size = 1 << ((TEST_PRNG(&prng) % 10)+2);
lfs_file_t file;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
for (lfs_size_t j = 0; j < size; j++) {
char c = 'a' + (rand() % 26);
char c = 'a' + (TEST_PRNG(&prng) % 26);
lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1);
assert(res == 1 || res == LFS_ERR_NOSPC);
if (res == LFS_ERR_NOSPC) {
err = lfs_file_close(&lfs, &file);
int err = lfs_file_close(&lfs, &file);
assert(err == 0 || err == LFS_ERR_NOSPC);
lfs_unmount(&lfs) => 0;
goto exhausted;
}
}
err = lfs_file_close(&lfs, &file);
int err = lfs_file_close(&lfs, &file);
assert(err == 0 || err == LFS_ERR_NOSPC);
if (err == LFS_ERR_NOSPC) {
lfs_unmount(&lfs) => 0;
@ -132,13 +144,15 @@ code = '''
for (uint32_t i = 0; i < FILES; i++) {
// check for errors
char path[1024];
sprintf(path, "test%d", i);
srand(cycle * i);
size = 1 << ((rand() % 10)+2);
uint32_t prng = cycle * i;
lfs_size_t size = 1 << ((TEST_PRNG(&prng) % 10)+2);
lfs_file_t file;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
for (lfs_size_t j = 0; j < size; j++) {
char c = 'a' + (rand() % 26);
char c = 'a' + (TEST_PRNG(&prng) % 26);
char r;
lfs_file_read(&lfs, &file, &r, 1) => 1;
assert(r == c);
@ -153,9 +167,11 @@ code = '''
exhausted:
// should still be readable
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
for (uint32_t i = 0; i < FILES; i++) {
// check for errors
char path[1024];
struct lfs_info info;
sprintf(path, "test%d", i);
lfs_stat(&lfs, path, &info) => 0;
}
@ -169,51 +185,55 @@ exhausted:
# into increasing the block devices lifetime. This is something we can actually
# check for.
[[case]] # wear-level test running a filesystem to exhaustion
define.LFS_ERASE_CYCLES = 20
define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster
define.LFS_BLOCK_CYCLES = 'LFS_ERASE_CYCLES / 2'
define.FILES = 10
# wear-level test running a filesystem to exhaustion
[cases.test_exhuastion_wear_leveling]
defines.ERASE_CYCLES = 20
defines.ERASE_COUNT = 256 # small bd so test runs faster
defines.BLOCK_CYCLES = 'ERASE_CYCLES / 2'
defines.FILES = 10
code = '''
uint32_t run_cycles[2];
const uint32_t run_block_count[2] = {LFS_BLOCK_COUNT/2, LFS_BLOCK_COUNT};
const uint32_t run_block_count[2] = {BLOCK_COUNT/2, BLOCK_COUNT};
for (int run = 0; run < 2; run++) {
for (lfs_block_t b = 0; b < LFS_BLOCK_COUNT; b++) {
lfs_testbd_setwear(&cfg, b,
(b < run_block_count[run]) ? 0 : LFS_ERASE_CYCLES) => 0;
for (lfs_block_t b = 0; b < BLOCK_COUNT; b++) {
lfs_emubd_setwear(cfg, b,
(b < run_block_count[run]) ? 0 : ERASE_CYCLES) => 0;
}
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "roadrunner") => 0;
lfs_unmount(&lfs) => 0;
uint32_t cycle = 0;
while (true) {
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
for (uint32_t i = 0; i < FILES; i++) {
// chose name, roughly random seed, and random 2^n size
char path[1024];
sprintf(path, "roadrunner/test%d", i);
srand(cycle * i);
size = 1 << ((rand() % 10)+2);
uint32_t prng = cycle * i;
lfs_size_t size = 1 << ((TEST_PRNG(&prng) % 10)+2);
lfs_file_t file;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
for (lfs_size_t j = 0; j < size; j++) {
char c = 'a' + (rand() % 26);
char c = 'a' + (TEST_PRNG(&prng) % 26);
lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1);
assert(res == 1 || res == LFS_ERR_NOSPC);
if (res == LFS_ERR_NOSPC) {
err = lfs_file_close(&lfs, &file);
int err = lfs_file_close(&lfs, &file);
assert(err == 0 || err == LFS_ERR_NOSPC);
lfs_unmount(&lfs) => 0;
goto exhausted;
}
}
err = lfs_file_close(&lfs, &file);
int err = lfs_file_close(&lfs, &file);
assert(err == 0 || err == LFS_ERR_NOSPC);
if (err == LFS_ERR_NOSPC) {
lfs_unmount(&lfs) => 0;
@ -223,13 +243,15 @@ code = '''
for (uint32_t i = 0; i < FILES; i++) {
// check for errors
char path[1024];
sprintf(path, "roadrunner/test%d", i);
srand(cycle * i);
size = 1 << ((rand() % 10)+2);
uint32_t prng = cycle * i;
lfs_size_t size = 1 << ((TEST_PRNG(&prng) % 10)+2);
lfs_file_t file;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
for (lfs_size_t j = 0; j < size; j++) {
char c = 'a' + (rand() % 26);
char c = 'a' + (TEST_PRNG(&prng) % 26);
char r;
lfs_file_read(&lfs, &file, &r, 1) => 1;
assert(r == c);
@ -244,9 +266,11 @@ code = '''
exhausted:
// should still be readable
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
for (uint32_t i = 0; i < FILES; i++) {
// check for errors
char path[1024];
struct lfs_info info;
sprintf(path, "roadrunner/test%d", i);
lfs_stat(&lfs, path, &info) => 0;
}
@ -261,48 +285,52 @@ exhausted:
LFS_ASSERT(run_cycles[1]*110/100 > 2*run_cycles[0]);
'''
[[case]] # wear-level test + expanding superblock
define.LFS_ERASE_CYCLES = 20
define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster
define.LFS_BLOCK_CYCLES = 'LFS_ERASE_CYCLES / 2'
define.FILES = 10
# wear-level test + expanding superblock
[cases.test_exhaustion_wear_leveling_superblocks]
defines.ERASE_CYCLES = 20
defines.ERASE_COUNT = 256 # small bd so test runs faster
defines.BLOCK_CYCLES = 'ERASE_CYCLES / 2'
defines.FILES = 10
code = '''
uint32_t run_cycles[2];
const uint32_t run_block_count[2] = {LFS_BLOCK_COUNT/2, LFS_BLOCK_COUNT};
const uint32_t run_block_count[2] = {BLOCK_COUNT/2, BLOCK_COUNT};
for (int run = 0; run < 2; run++) {
for (lfs_block_t b = 0; b < LFS_BLOCK_COUNT; b++) {
lfs_testbd_setwear(&cfg, b,
(b < run_block_count[run]) ? 0 : LFS_ERASE_CYCLES) => 0;
for (lfs_block_t b = 0; b < BLOCK_COUNT; b++) {
lfs_emubd_setwear(cfg, b,
(b < run_block_count[run]) ? 0 : ERASE_CYCLES) => 0;
}
lfs_format(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
uint32_t cycle = 0;
while (true) {
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
for (uint32_t i = 0; i < FILES; i++) {
// chose name, roughly random seed, and random 2^n size
char path[1024];
sprintf(path, "test%d", i);
srand(cycle * i);
size = 1 << ((rand() % 10)+2);
uint32_t prng = cycle * i;
lfs_size_t size = 1 << ((TEST_PRNG(&prng) % 10)+2);
lfs_file_t file;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
for (lfs_size_t j = 0; j < size; j++) {
char c = 'a' + (rand() % 26);
char c = 'a' + (TEST_PRNG(&prng) % 26);
lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1);
assert(res == 1 || res == LFS_ERR_NOSPC);
if (res == LFS_ERR_NOSPC) {
err = lfs_file_close(&lfs, &file);
int err = lfs_file_close(&lfs, &file);
assert(err == 0 || err == LFS_ERR_NOSPC);
lfs_unmount(&lfs) => 0;
goto exhausted;
}
}
err = lfs_file_close(&lfs, &file);
int err = lfs_file_close(&lfs, &file);
assert(err == 0 || err == LFS_ERR_NOSPC);
if (err == LFS_ERR_NOSPC) {
lfs_unmount(&lfs) => 0;
@ -312,13 +340,15 @@ code = '''
for (uint32_t i = 0; i < FILES; i++) {
// check for errors
char path[1024];
sprintf(path, "test%d", i);
srand(cycle * i);
size = 1 << ((rand() % 10)+2);
uint32_t prng = cycle * i;
lfs_size_t size = 1 << ((TEST_PRNG(&prng) % 10)+2);
lfs_file_t file;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
for (lfs_size_t j = 0; j < size; j++) {
char c = 'a' + (rand() % 26);
char c = 'a' + (TEST_PRNG(&prng) % 26);
char r;
lfs_file_read(&lfs, &file, &r, 1) => 1;
assert(r == c);
@ -333,9 +363,11 @@ code = '''
exhausted:
// should still be readable
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
for (uint32_t i = 0; i < FILES; i++) {
// check for errors
char path[1024];
struct lfs_info info;
sprintf(path, "test%d", i);
lfs_stat(&lfs, path, &info) => 0;
}
@ -350,44 +382,48 @@ exhausted:
LFS_ASSERT(run_cycles[1]*110/100 > 2*run_cycles[0]);
'''
[[case]] # test that we wear blocks roughly evenly
define.LFS_ERASE_CYCLES = 0xffffffff
define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster
define.LFS_BLOCK_CYCLES = [5, 4, 3, 2, 1]
define.CYCLES = 100
define.FILES = 10
if = 'LFS_BLOCK_CYCLES < CYCLES/10'
# test that we wear blocks roughly evenly
[cases.test_exhaustion_wear_distribution]
defines.ERASE_CYCLES = 0xffffffff
defines.ERASE_COUNT = 256 # small bd so test runs faster
defines.BLOCK_CYCLES = [5, 4, 3, 2, 1]
defines.CYCLES = 100
defines.FILES = 10
if = 'BLOCK_CYCLES < CYCLES/10'
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "roadrunner") => 0;
lfs_unmount(&lfs) => 0;
uint32_t cycle = 0;
while (cycle < CYCLES) {
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
for (uint32_t i = 0; i < FILES; i++) {
// chose name, roughly random seed, and random 2^n size
char path[1024];
sprintf(path, "roadrunner/test%d", i);
srand(cycle * i);
size = 1 << 4; //((rand() % 10)+2);
uint32_t prng = cycle * i;
lfs_size_t size = 1 << 4; //((TEST_PRNG(&prng) % 10)+2);
lfs_file_t file;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
for (lfs_size_t j = 0; j < size; j++) {
char c = 'a' + (rand() % 26);
char c = 'a' + (TEST_PRNG(&prng) % 26);
lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1);
assert(res == 1 || res == LFS_ERR_NOSPC);
if (res == LFS_ERR_NOSPC) {
err = lfs_file_close(&lfs, &file);
int err = lfs_file_close(&lfs, &file);
assert(err == 0 || err == LFS_ERR_NOSPC);
lfs_unmount(&lfs) => 0;
goto exhausted;
}
}
err = lfs_file_close(&lfs, &file);
int err = lfs_file_close(&lfs, &file);
assert(err == 0 || err == LFS_ERR_NOSPC);
if (err == LFS_ERR_NOSPC) {
lfs_unmount(&lfs) => 0;
@ -397,13 +433,15 @@ code = '''
for (uint32_t i = 0; i < FILES; i++) {
// check for errors
char path[1024];
sprintf(path, "roadrunner/test%d", i);
srand(cycle * i);
size = 1 << 4; //((rand() % 10)+2);
uint32_t prng = cycle * i;
lfs_size_t size = 1 << 4; //((TEST_PRNG(&prng) % 10)+2);
lfs_file_t file;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
for (lfs_size_t j = 0; j < size; j++) {
char c = 'a' + (rand() % 26);
char c = 'a' + (TEST_PRNG(&prng) % 26);
char r;
lfs_file_read(&lfs, &file, &r, 1) => 1;
assert(r == c);
@ -418,9 +456,11 @@ code = '''
exhausted:
// should still be readable
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
for (uint32_t i = 0; i < FILES; i++) {
// check for errors
char path[1024];
struct lfs_info info;
sprintf(path, "roadrunner/test%d", i);
lfs_stat(&lfs, path, &info) => 0;
}
@ -429,12 +469,12 @@ exhausted:
LFS_WARN("completed %d cycles", cycle);
// check the wear on our block device
lfs_testbd_wear_t minwear = -1;
lfs_testbd_wear_t totalwear = 0;
lfs_testbd_wear_t maxwear = 0;
lfs_emubd_wear_t minwear = -1;
lfs_emubd_wear_t totalwear = 0;
lfs_emubd_wear_t maxwear = 0;
// skip 0 and 1 as superblock movement is intentionally avoided
for (lfs_block_t b = 2; b < LFS_BLOCK_COUNT; b++) {
lfs_testbd_wear_t wear = lfs_testbd_getwear(&cfg, b);
for (lfs_block_t b = 2; b < BLOCK_COUNT; b++) {
lfs_emubd_wear_t wear = lfs_emubd_wear(cfg, b);
printf("%08x: wear %d\n", b, wear);
assert(wear >= 0);
if (wear < minwear) {
@ -445,17 +485,17 @@ exhausted:
}
totalwear += wear;
}
lfs_testbd_wear_t avgwear = totalwear / LFS_BLOCK_COUNT;
lfs_emubd_wear_t avgwear = totalwear / BLOCK_COUNT;
LFS_WARN("max wear: %d cycles", maxwear);
LFS_WARN("avg wear: %d cycles", totalwear / LFS_BLOCK_COUNT);
LFS_WARN("avg wear: %d cycles", totalwear / (int)BLOCK_COUNT);
LFS_WARN("min wear: %d cycles", minwear);
// find standard deviation^2
lfs_testbd_wear_t dev2 = 0;
for (lfs_block_t b = 2; b < LFS_BLOCK_COUNT; b++) {
lfs_testbd_wear_t wear = lfs_testbd_getwear(&cfg, b);
lfs_emubd_wear_t dev2 = 0;
for (lfs_block_t b = 2; b < BLOCK_COUNT; b++) {
lfs_emubd_wear_t wear = lfs_emubd_wear(cfg, b);
assert(wear >= 0);
lfs_testbd_swear_t diff = wear - avgwear;
lfs_emubd_swear_t diff = wear - avgwear;
dev2 += diff*diff;
}
dev2 /= totalwear;

View File

@ -1,17 +1,20 @@
[[case]] # simple file test
[cases.test_files_simple]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_t file;
lfs_file_open(&lfs, &file, "hello",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
size = strlen("Hello World!")+1;
lfs_size_t size = strlen("Hello World!")+1;
uint8_t buffer[1024];
strcpy((char*)buffer, "Hello World!");
lfs_file_write(&lfs, &file, buffer, size) => size;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "hello", LFS_O_RDONLY) => 0;
lfs_file_read(&lfs, &file, buffer, size) => size;
assert(strcmp((char*)buffer, "Hello World!") == 0);
@ -19,21 +22,24 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # larger files
define.SIZE = [32, 8192, 262144, 0, 7, 8193]
define.CHUNKSIZE = [31, 16, 33, 1, 1023]
[cases.test_files_large]
defines.SIZE = [32, 8192, 262144, 0, 7, 8193]
defines.CHUNKSIZE = [31, 16, 33, 1, 1023]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
// write
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_t file;
lfs_file_open(&lfs, &file, "avacado",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
srand(1);
uint32_t prng = 1;
uint8_t buffer[1024];
for (lfs_size_t i = 0; i < SIZE; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE-i);
for (lfs_size_t b = 0; b < chunk; b++) {
buffer[b] = rand() & 0xff;
buffer[b] = TEST_PRNG(&prng) & 0xff;
}
lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
}
@ -41,15 +47,15 @@ code = '''
lfs_unmount(&lfs) => 0;
// read
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => SIZE;
srand(1);
prng = 1;
for (lfs_size_t i = 0; i < SIZE; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE-i);
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
for (lfs_size_t b = 0; b < chunk; b++) {
assert(buffer[b] == (rand() & 0xff));
assert(buffer[b] == (TEST_PRNG(&prng) & 0xff));
}
}
lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
@ -57,22 +63,25 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # rewriting files
define.SIZE1 = [32, 8192, 131072, 0, 7, 8193]
define.SIZE2 = [32, 8192, 131072, 0, 7, 8193]
define.CHUNKSIZE = [31, 16, 1]
[cases.test_files_rewrite]
defines.SIZE1 = [32, 8192, 131072, 0, 7, 8193]
defines.SIZE2 = [32, 8192, 131072, 0, 7, 8193]
defines.CHUNKSIZE = [31, 16, 1]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
// write
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_t file;
uint8_t buffer[1024];
lfs_file_open(&lfs, &file, "avacado",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
srand(1);
uint32_t prng = 1;
for (lfs_size_t i = 0; i < SIZE1; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE1-i);
for (lfs_size_t b = 0; b < chunk; b++) {
buffer[b] = rand() & 0xff;
buffer[b] = TEST_PRNG(&prng) & 0xff;
}
lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
}
@ -80,15 +89,15 @@ code = '''
lfs_unmount(&lfs) => 0;
// read
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => SIZE1;
srand(1);
prng = 1;
for (lfs_size_t i = 0; i < SIZE1; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE1-i);
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
for (lfs_size_t b = 0; b < chunk; b++) {
assert(buffer[b] == (rand() & 0xff));
assert(buffer[b] == (TEST_PRNG(&prng) & 0xff));
}
}
lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
@ -96,13 +105,13 @@ code = '''
lfs_unmount(&lfs) => 0;
// rewrite
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "avacado", LFS_O_WRONLY) => 0;
srand(2);
prng = 2;
for (lfs_size_t i = 0; i < SIZE2; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE2-i);
for (lfs_size_t b = 0; b < chunk; b++) {
buffer[b] = rand() & 0xff;
buffer[b] = TEST_PRNG(&prng) & 0xff;
}
lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
}
@ -110,27 +119,27 @@ code = '''
lfs_unmount(&lfs) => 0;
// read
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => lfs_max(SIZE1, SIZE2);
srand(2);
prng = 2;
for (lfs_size_t i = 0; i < SIZE2; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE2-i);
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
for (lfs_size_t b = 0; b < chunk; b++) {
assert(buffer[b] == (rand() & 0xff));
assert(buffer[b] == (TEST_PRNG(&prng) & 0xff));
}
}
if (SIZE1 > SIZE2) {
srand(1);
prng = 1;
for (lfs_size_t b = 0; b < SIZE2; b++) {
rand();
TEST_PRNG(&prng);
}
for (lfs_size_t i = SIZE2; i < SIZE1; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE1-i);
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
for (lfs_size_t b = 0; b < chunk; b++) {
assert(buffer[b] == (rand() & 0xff));
assert(buffer[b] == (TEST_PRNG(&prng) & 0xff));
}
}
}
@ -139,22 +148,25 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # appending files
define.SIZE1 = [32, 8192, 131072, 0, 7, 8193]
define.SIZE2 = [32, 8192, 131072, 0, 7, 8193]
define.CHUNKSIZE = [31, 16, 1]
[cases.test_files_append]
defines.SIZE1 = [32, 8192, 131072, 0, 7, 8193]
defines.SIZE2 = [32, 8192, 131072, 0, 7, 8193]
defines.CHUNKSIZE = [31, 16, 1]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
// write
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_t file;
uint8_t buffer[1024];
lfs_file_open(&lfs, &file, "avacado",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
srand(1);
uint32_t prng = 1;
for (lfs_size_t i = 0; i < SIZE1; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE1-i);
for (lfs_size_t b = 0; b < chunk; b++) {
buffer[b] = rand() & 0xff;
buffer[b] = TEST_PRNG(&prng) & 0xff;
}
lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
}
@ -162,15 +174,15 @@ code = '''
lfs_unmount(&lfs) => 0;
// read
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => SIZE1;
srand(1);
prng = 1;
for (lfs_size_t i = 0; i < SIZE1; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE1-i);
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
for (lfs_size_t b = 0; b < chunk; b++) {
assert(buffer[b] == (rand() & 0xff));
assert(buffer[b] == (TEST_PRNG(&prng) & 0xff));
}
}
lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
@ -178,13 +190,13 @@ code = '''
lfs_unmount(&lfs) => 0;
// append
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "avacado", LFS_O_WRONLY | LFS_O_APPEND) => 0;
srand(2);
prng = 2;
for (lfs_size_t i = 0; i < SIZE2; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE2-i);
for (lfs_size_t b = 0; b < chunk; b++) {
buffer[b] = rand() & 0xff;
buffer[b] = TEST_PRNG(&prng) & 0xff;
}
lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
}
@ -192,23 +204,23 @@ code = '''
lfs_unmount(&lfs) => 0;
// read
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => SIZE1 + SIZE2;
srand(1);
prng = 1;
for (lfs_size_t i = 0; i < SIZE1; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE1-i);
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
for (lfs_size_t b = 0; b < chunk; b++) {
assert(buffer[b] == (rand() & 0xff));
assert(buffer[b] == (TEST_PRNG(&prng) & 0xff));
}
}
srand(2);
prng = 2;
for (lfs_size_t i = 0; i < SIZE2; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE2-i);
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
for (lfs_size_t b = 0; b < chunk; b++) {
assert(buffer[b] == (rand() & 0xff));
assert(buffer[b] == (TEST_PRNG(&prng) & 0xff));
}
}
lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
@ -216,22 +228,25 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # truncating files
define.SIZE1 = [32, 8192, 131072, 0, 7, 8193]
define.SIZE2 = [32, 8192, 131072, 0, 7, 8193]
define.CHUNKSIZE = [31, 16, 1]
[cases.test_files_truncate]
defines.SIZE1 = [32, 8192, 131072, 0, 7, 8193]
defines.SIZE2 = [32, 8192, 131072, 0, 7, 8193]
defines.CHUNKSIZE = [31, 16, 1]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
// write
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_t file;
uint8_t buffer[1024];
lfs_file_open(&lfs, &file, "avacado",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
srand(1);
uint32_t prng = 1;
for (lfs_size_t i = 0; i < SIZE1; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE1-i);
for (lfs_size_t b = 0; b < chunk; b++) {
buffer[b] = rand() & 0xff;
buffer[b] = TEST_PRNG(&prng) & 0xff;
}
lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
}
@ -239,15 +254,15 @@ code = '''
lfs_unmount(&lfs) => 0;
// read
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => SIZE1;
srand(1);
prng = 1;
for (lfs_size_t i = 0; i < SIZE1; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE1-i);
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
for (lfs_size_t b = 0; b < chunk; b++) {
assert(buffer[b] == (rand() & 0xff));
assert(buffer[b] == (TEST_PRNG(&prng) & 0xff));
}
}
lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
@ -255,13 +270,13 @@ code = '''
lfs_unmount(&lfs) => 0;
// truncate
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "avacado", LFS_O_WRONLY | LFS_O_TRUNC) => 0;
srand(2);
prng = 2;
for (lfs_size_t i = 0; i < SIZE2; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE2-i);
for (lfs_size_t b = 0; b < chunk; b++) {
buffer[b] = rand() & 0xff;
buffer[b] = TEST_PRNG(&prng) & 0xff;
}
lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
}
@ -269,15 +284,15 @@ code = '''
lfs_unmount(&lfs) => 0;
// read
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => SIZE2;
srand(2);
prng = 2;
for (lfs_size_t i = 0; i < SIZE2; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE2-i);
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
for (lfs_size_t b = 0; b < chunk; b++) {
assert(buffer[b] == (rand() & 0xff));
assert(buffer[b] == (TEST_PRNG(&prng) & 0xff));
}
}
lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
@ -285,33 +300,36 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # reentrant file writing
define.SIZE = [32, 0, 7, 2049]
define.CHUNKSIZE = [31, 16, 65]
[cases.test_files_reentrant_write]
defines.SIZE = [32, 0, 7, 2049]
defines.CHUNKSIZE = [31, 16, 65]
reentrant = true
code = '''
err = lfs_mount(&lfs, &cfg);
lfs_t lfs;
int err = lfs_mount(&lfs, cfg);
if (err) {
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
}
lfs_file_t file;
uint8_t buffer[1024];
err = lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY);
assert(err == LFS_ERR_NOENT || err == 0);
if (err == 0) {
// can only be 0 (new file) or full size
size = lfs_file_size(&lfs, &file);
lfs_size_t size = lfs_file_size(&lfs, &file);
assert(size == 0 || size == SIZE);
lfs_file_close(&lfs, &file) => 0;
}
// write
lfs_file_open(&lfs, &file, "avacado", LFS_O_WRONLY | LFS_O_CREAT) => 0;
srand(1);
uint32_t prng = 1;
for (lfs_size_t i = 0; i < SIZE; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE-i);
for (lfs_size_t b = 0; b < chunk; b++) {
buffer[b] = rand() & 0xff;
buffer[b] = TEST_PRNG(&prng) & 0xff;
}
lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
}
@ -320,12 +338,12 @@ code = '''
// read
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => SIZE;
srand(1);
prng = 1;
for (lfs_size_t i = 0; i < SIZE; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE-i);
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
for (lfs_size_t b = 0; b < chunk; b++) {
assert(buffer[b] == (rand() & 0xff));
assert(buffer[b] == (TEST_PRNG(&prng) & 0xff));
}
}
lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
@ -333,8 +351,8 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # reentrant file writing with syncs
define = [
[cases.test_files_reentrant_write_sync]
defines = [
# append (O(n))
{MODE='LFS_O_APPEND', SIZE=[32, 0, 7, 2049], CHUNKSIZE=[31, 16, 65]},
# truncate (O(n^2))
@ -344,24 +362,27 @@ define = [
]
reentrant = true
code = '''
err = lfs_mount(&lfs, &cfg);
lfs_t lfs;
int err = lfs_mount(&lfs, cfg);
if (err) {
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
}
lfs_file_t file;
uint8_t buffer[1024];
err = lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY);
assert(err == LFS_ERR_NOENT || err == 0);
if (err == 0) {
// with syncs we could be any size, but it at least must be valid data
size = lfs_file_size(&lfs, &file);
lfs_size_t size = lfs_file_size(&lfs, &file);
assert(size <= SIZE);
srand(1);
uint32_t prng = 1;
for (lfs_size_t i = 0; i < size; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, size-i);
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
for (lfs_size_t b = 0; b < chunk; b++) {
assert(buffer[b] == (rand() & 0xff));
assert(buffer[b] == (TEST_PRNG(&prng) & 0xff));
}
}
lfs_file_close(&lfs, &file) => 0;
@ -370,17 +391,17 @@ code = '''
// write
lfs_file_open(&lfs, &file, "avacado",
LFS_O_WRONLY | LFS_O_CREAT | MODE) => 0;
size = lfs_file_size(&lfs, &file);
lfs_size_t size = lfs_file_size(&lfs, &file);
assert(size <= SIZE);
srand(1);
uint32_t prng = 1;
lfs_size_t skip = (MODE == LFS_O_APPEND) ? size : 0;
for (lfs_size_t b = 0; b < skip; b++) {
rand();
TEST_PRNG(&prng);
}
for (lfs_size_t i = skip; i < SIZE; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE-i);
for (lfs_size_t b = 0; b < chunk; b++) {
buffer[b] = rand() & 0xff;
buffer[b] = TEST_PRNG(&prng) & 0xff;
}
lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
lfs_file_sync(&lfs, &file) => 0;
@ -390,12 +411,12 @@ code = '''
// read
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => SIZE;
srand(1);
prng = 1;
for (lfs_size_t i = 0; i < SIZE; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE-i);
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
for (lfs_size_t b = 0; b < chunk; b++) {
assert(buffer[b] == (rand() & 0xff));
assert(buffer[b] == (TEST_PRNG(&prng) & 0xff));
}
}
lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
@ -403,19 +424,22 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # many files
define.N = 300
[cases.test_files_many]
defines.N = 300
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
// create N files of 7 bytes
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
for (int i = 0; i < N; i++) {
lfs_file_t file;
char path[1024];
sprintf(path, "file_%03d", i);
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
char wbuffer[1024];
size = 7;
snprintf(wbuffer, size, "Hi %03d", i);
lfs_size_t size = 7;
sprintf(wbuffer, "Hi %03d", i);
lfs_file_write(&lfs, &file, wbuffer, size) => size;
lfs_file_close(&lfs, &file) => 0;
@ -428,25 +452,28 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # many files with power cycle
define.N = 300
[cases.test_files_many_power_cycle]
defines.N = 300
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
// create N files of 7 bytes
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
for (int i = 0; i < N; i++) {
lfs_file_t file;
char path[1024];
sprintf(path, "file_%03d", i);
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
char wbuffer[1024];
size = 7;
snprintf(wbuffer, size, "Hi %03d", i);
lfs_size_t size = 7;
sprintf(wbuffer, "Hi %03d", i);
lfs_file_write(&lfs, &file, wbuffer, size) => size;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
char rbuffer[1024];
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
lfs_file_read(&lfs, &file, rbuffer, size) => size;
assert(strcmp(rbuffer, wbuffer) == 0);
@ -455,22 +482,25 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # many files with power loss
define.N = 300
[cases.test_files_many_power_loss]
defines.N = 300
reentrant = true
code = '''
err = lfs_mount(&lfs, &cfg);
lfs_t lfs;
int err = lfs_mount(&lfs, cfg);
if (err) {
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
}
// create N files of 7 bytes
for (int i = 0; i < N; i++) {
lfs_file_t file;
char path[1024];
sprintf(path, "file_%03d", i);
err = lfs_file_open(&lfs, &file, path, LFS_O_WRONLY | LFS_O_CREAT);
char wbuffer[1024];
size = 7;
snprintf(wbuffer, size, "Hi %03d", i);
lfs_size_t size = 7;
sprintf(wbuffer, "Hi %03d", i);
if ((lfs_size_t)lfs_file_size(&lfs, &file) != size) {
lfs_file_write(&lfs, &file, wbuffer, size) => size;
}

View File

@ -1,13 +1,15 @@
[[case]] # interspersed file test
define.SIZE = [10, 100]
define.FILES = [4, 10, 26]
[cases.test_interspersed_files]
defines.SIZE = [10, 100]
defines.FILES = [4, 10, 26]
code = '''
lfs_t lfs;
lfs_file_t files[FILES];
const char alphas[] = "abcdefghijklmnopqrstuvwxyz";
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
for (int j = 0; j < FILES; j++) {
char path[1024];
sprintf(path, "%c", alphas[j]);
lfs_file_open(&lfs, &files[j], path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
@ -23,7 +25,9 @@ code = '''
lfs_file_close(&lfs, &files[j]);
}
lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "/") => 0;
struct lfs_info info;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
assert(info.type == LFS_TYPE_DIR);
@ -31,6 +35,7 @@ code = '''
assert(strcmp(info.name, "..") == 0);
assert(info.type == LFS_TYPE_DIR);
for (int j = 0; j < FILES; j++) {
char path[1024];
sprintf(path, "%c", alphas[j]);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, path) == 0);
@ -41,12 +46,14 @@ code = '''
lfs_dir_close(&lfs, &dir) => 0;
for (int j = 0; j < FILES; j++) {
char path[1024];
sprintf(path, "%c", alphas[j]);
lfs_file_open(&lfs, &files[j], path, LFS_O_RDONLY) => 0;
}
for (int i = 0; i < 10; i++) {
for (int j = 0; j < FILES; j++) {
uint8_t buffer[1024];
lfs_file_read(&lfs, &files[j], buffer, 1) => 1;
assert(buffer[0] == alphas[j]);
}
@ -59,15 +66,18 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # interspersed remove file test
define.SIZE = [10, 100]
define.FILES = [4, 10, 26]
[cases.test_interspersed_remove_files]
defines.SIZE = [10, 100]
defines.FILES = [4, 10, 26]
code = '''
lfs_t lfs;
const char alphas[] = "abcdefghijklmnopqrstuvwxyz";
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
for (int j = 0; j < FILES; j++) {
char path[1024];
sprintf(path, "%c", alphas[j]);
lfs_file_t file;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
for (int i = 0; i < SIZE; i++) {
@ -77,18 +87,22 @@ code = '''
}
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_t file;
lfs_file_open(&lfs, &file, "zzz", LFS_O_WRONLY | LFS_O_CREAT) => 0;
for (int j = 0; j < FILES; j++) {
lfs_file_write(&lfs, &file, (const void*)"~", 1) => 1;
lfs_file_sync(&lfs, &file) => 0;
char path[1024];
sprintf(path, "%c", alphas[j]);
lfs_remove(&lfs, path) => 0;
}
lfs_file_close(&lfs, &file);
lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "/") => 0;
struct lfs_info info;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
assert(info.type == LFS_TYPE_DIR);
@ -104,6 +118,7 @@ code = '''
lfs_file_open(&lfs, &file, "zzz", LFS_O_RDONLY) => 0;
for (int i = 0; i < FILES; i++) {
uint8_t buffer[1024];
lfs_file_read(&lfs, &file, buffer, 1) => 1;
assert(buffer[0] == '~');
}
@ -112,11 +127,12 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # remove inconveniently test
define.SIZE = [10, 100]
[cases.test_interspersed_remove_inconveniently]
defines.SIZE = [10, 100]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_t files[3];
lfs_file_open(&lfs, &files[0], "e", LFS_O_WRONLY | LFS_O_CREAT) => 0;
lfs_file_open(&lfs, &files[1], "f", LFS_O_WRONLY | LFS_O_CREAT) => 0;
@ -140,7 +156,9 @@ code = '''
lfs_file_close(&lfs, &files[1]);
lfs_file_close(&lfs, &files[2]);
lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "/") => 0;
struct lfs_info info;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
assert(info.type == LFS_TYPE_DIR);
@ -161,6 +179,7 @@ code = '''
lfs_file_open(&lfs, &files[0], "e", LFS_O_RDONLY) => 0;
lfs_file_open(&lfs, &files[1], "g", LFS_O_RDONLY) => 0;
for (int i = 0; i < SIZE; i++) {
uint8_t buffer[1024];
lfs_file_read(&lfs, &files[0], buffer, 1) => 1;
assert(buffer[0] == 'e');
lfs_file_read(&lfs, &files[1], buffer, 1) => 1;
@ -172,21 +191,23 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # reentrant interspersed file test
define.SIZE = [10, 100]
define.FILES = [4, 10, 26]
[cases.test_interspersed_reentrant_files]
defines.SIZE = [10, 100]
defines.FILES = [4, 10, 26]
reentrant = true
code = '''
lfs_t lfs;
lfs_file_t files[FILES];
const char alphas[] = "abcdefghijklmnopqrstuvwxyz";
err = lfs_mount(&lfs, &cfg);
int err = lfs_mount(&lfs, cfg);
if (err) {
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
}
for (int j = 0; j < FILES; j++) {
char path[1024];
sprintf(path, "%c", alphas[j]);
lfs_file_open(&lfs, &files[j], path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
@ -194,8 +215,8 @@ code = '''
for (int i = 0; i < SIZE; i++) {
for (int j = 0; j < FILES; j++) {
size = lfs_file_size(&lfs, &files[j]);
assert((int)size >= 0);
lfs_ssize_t size = lfs_file_size(&lfs, &files[j]);
assert(size >= 0);
if ((int)size <= i) {
lfs_file_write(&lfs, &files[j], &alphas[j], 1) => 1;
lfs_file_sync(&lfs, &files[j]) => 0;
@ -207,7 +228,9 @@ code = '''
lfs_file_close(&lfs, &files[j]);
}
lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "/") => 0;
struct lfs_info info;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
assert(info.type == LFS_TYPE_DIR);
@ -215,6 +238,7 @@ code = '''
assert(strcmp(info.name, "..") == 0);
assert(info.type == LFS_TYPE_DIR);
for (int j = 0; j < FILES; j++) {
char path[1024];
sprintf(path, "%c", alphas[j]);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, path) == 0);
@ -225,12 +249,14 @@ code = '''
lfs_dir_close(&lfs, &dir) => 0;
for (int j = 0; j < FILES; j++) {
char path[1024];
sprintf(path, "%c", alphas[j]);
lfs_file_open(&lfs, &files[j], path, LFS_O_RDONLY) => 0;
}
for (int i = 0; i < 10; i++) {
for (int j = 0; j < FILES; j++) {
uint8_t buffer[1024];
lfs_file_read(&lfs, &files[j], buffer, 1) => 1;
assert(buffer[0] == alphas[j]);
}

View File

@ -1,11 +1,13 @@
[[case]] # move file
[cases.test_move_file]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "a") => 0;
lfs_mkdir(&lfs, "b") => 0;
lfs_mkdir(&lfs, "c") => 0;
lfs_mkdir(&lfs, "d") => 0;
lfs_file_t file;
lfs_file_open(&lfs, &file, "a/hello", LFS_O_CREAT | LFS_O_WRONLY) => 0;
lfs_file_write(&lfs, &file, "hola\n", 5) => 5;
lfs_file_write(&lfs, &file, "bonjour\n", 8) => 8;
@ -13,11 +15,13 @@ code = '''
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_rename(&lfs, "a/hello", "c/hello") => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_dir_t dir;
struct lfs_info info;
lfs_dir_open(&lfs, &dir, "a") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
@ -44,6 +48,7 @@ code = '''
lfs_file_open(&lfs, &file, "a/hello", LFS_O_RDONLY) => LFS_ERR_NOENT;
lfs_file_open(&lfs, &file, "b/hello", LFS_O_RDONLY) => LFS_ERR_NOENT;
lfs_file_open(&lfs, &file, "c/hello", LFS_O_RDONLY) => 0;
uint8_t buffer[1024];
lfs_file_read(&lfs, &file, buffer, 5) => 5;
memcmp(buffer, "hola\n", 5) => 0;
lfs_file_read(&lfs, &file, buffer, 8) => 8;
@ -55,31 +60,35 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # noop move, yes this is legal
[cases.test_move_nop] # yes this is legal
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "hi") => 0;
lfs_rename(&lfs, "hi", "hi") => 0;
lfs_mkdir(&lfs, "hi/hi") => 0;
lfs_rename(&lfs, "hi/hi", "hi/hi") => 0;
lfs_mkdir(&lfs, "hi/hi/hi") => 0;
lfs_rename(&lfs, "hi/hi/hi", "hi/hi/hi") => 0;
struct lfs_info info;
lfs_stat(&lfs, "hi/hi/hi", &info) => 0;
assert(strcmp(info.name, "hi") == 0);
assert(info.type == LFS_TYPE_DIR);
lfs_unmount(&lfs) => 0;
'''
[[case]] # move file corrupt source
[cases.test_move_file_corrupt_source]
in = "lfs.c"
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "a") => 0;
lfs_mkdir(&lfs, "b") => 0;
lfs_mkdir(&lfs, "c") => 0;
lfs_mkdir(&lfs, "d") => 0;
lfs_file_t file;
lfs_file_open(&lfs, &file, "a/hello", LFS_O_CREAT | LFS_O_WRONLY) => 0;
lfs_file_write(&lfs, &file, "hola\n", 5) => 5;
lfs_file_write(&lfs, &file, "bonjour\n", 8) => 8;
@ -87,28 +96,30 @@ code = '''
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_rename(&lfs, "a/hello", "c/hello") => 0;
lfs_unmount(&lfs) => 0;
// corrupt the source
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_dir_t dir;
struct lfs_info info;
lfs_dir_open(&lfs, &dir, "a") => 0;
lfs_block_t block = dir.m.pair[0];
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
uint8_t bbuffer[LFS_BLOCK_SIZE];
cfg.read(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
int off = LFS_BLOCK_SIZE-1;
while (off >= 0 && bbuffer[off] == LFS_ERASE_VALUE) {
uint8_t buffer[BLOCK_SIZE];
cfg->read(cfg, block, 0, buffer, BLOCK_SIZE) => 0;
int off = BLOCK_SIZE-1;
while (off >= 0 && buffer[off] == ERASE_VALUE) {
off -= 1;
}
memset(&bbuffer[off-3], LFS_BLOCK_SIZE, 3);
cfg.erase(&cfg, block) => 0;
cfg.prog(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
cfg.sync(&cfg) => 0;
memset(&buffer[off-3], BLOCK_SIZE, 3);
cfg->erase(cfg, block) => 0;
cfg->prog(cfg, block, 0, buffer, BLOCK_SIZE) => 0;
cfg->sync(cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_dir_open(&lfs, &dir, "a") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
@ -146,16 +157,19 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # move file corrupt source and dest
# move file corrupt source and dest
[cases.test_move_file_corrupt_source_dest]
in = "lfs.c"
if = 'LFS_PROG_SIZE <= 0x3fe' # only works with one crc per commit
if = 'PROG_SIZE <= 0x3fe' # only works with one crc per commit
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "a") => 0;
lfs_mkdir(&lfs, "b") => 0;
lfs_mkdir(&lfs, "c") => 0;
lfs_mkdir(&lfs, "d") => 0;
lfs_file_t file;
lfs_file_open(&lfs, &file, "a/hello", LFS_O_CREAT | LFS_O_WRONLY) => 0;
lfs_file_write(&lfs, &file, "hola\n", 5) => 5;
lfs_file_write(&lfs, &file, "bonjour\n", 8) => 8;
@ -163,44 +177,46 @@ code = '''
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_rename(&lfs, "a/hello", "c/hello") => 0;
lfs_unmount(&lfs) => 0;
// corrupt the source
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_dir_t dir;
struct lfs_info info;
lfs_dir_open(&lfs, &dir, "a") => 0;
lfs_block_t block = dir.m.pair[0];
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
uint8_t bbuffer[LFS_BLOCK_SIZE];
cfg.read(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
int off = LFS_BLOCK_SIZE-1;
while (off >= 0 && bbuffer[off] == LFS_ERASE_VALUE) {
uint8_t buffer[BLOCK_SIZE];
cfg->read(cfg, block, 0, buffer, BLOCK_SIZE) => 0;
int off = BLOCK_SIZE-1;
while (off >= 0 && buffer[off] == ERASE_VALUE) {
off -= 1;
}
memset(&bbuffer[off-3], LFS_BLOCK_SIZE, 3);
cfg.erase(&cfg, block) => 0;
cfg.prog(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
cfg.sync(&cfg) => 0;
memset(&buffer[off-3], BLOCK_SIZE, 3);
cfg->erase(cfg, block) => 0;
cfg->prog(cfg, block, 0, buffer, BLOCK_SIZE) => 0;
cfg->sync(cfg) => 0;
// corrupt the destination
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_dir_open(&lfs, &dir, "c") => 0;
block = dir.m.pair[0];
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
cfg.read(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
off = LFS_BLOCK_SIZE-1;
while (off >= 0 && bbuffer[off] == LFS_ERASE_VALUE) {
cfg->read(cfg, block, 0, buffer, BLOCK_SIZE) => 0;
off = BLOCK_SIZE-1;
while (off >= 0 && buffer[off] == ERASE_VALUE) {
off -= 1;
}
memset(&bbuffer[off-3], LFS_BLOCK_SIZE, 3);
cfg.erase(&cfg, block) => 0;
cfg.prog(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
cfg.sync(&cfg) => 0;
memset(&buffer[off-3], BLOCK_SIZE, 3);
cfg->erase(cfg, block) => 0;
cfg->prog(cfg, block, 0, buffer, BLOCK_SIZE) => 0;
cfg->sync(cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_dir_open(&lfs, &dir, "a") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
@ -238,16 +254,18 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # move file after corrupt
[cases.test_move_file_after_corrupt]
in = "lfs.c"
if = 'LFS_PROG_SIZE <= 0x3fe' # only works with one crc per commit
if = 'PROG_SIZE <= 0x3fe' # only works with one crc per commit
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "a") => 0;
lfs_mkdir(&lfs, "b") => 0;
lfs_mkdir(&lfs, "c") => 0;
lfs_mkdir(&lfs, "d") => 0;
lfs_file_t file;
lfs_file_open(&lfs, &file, "a/hello", LFS_O_CREAT | LFS_O_WRONLY) => 0;
lfs_file_write(&lfs, &file, "hola\n", 5) => 5;
lfs_file_write(&lfs, &file, "bonjour\n", 8) => 8;
@ -255,49 +273,51 @@ code = '''
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_rename(&lfs, "a/hello", "c/hello") => 0;
lfs_unmount(&lfs) => 0;
// corrupt the source
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_dir_t dir;
struct lfs_info info;
lfs_dir_open(&lfs, &dir, "a") => 0;
lfs_block_t block = dir.m.pair[0];
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
uint8_t bbuffer[LFS_BLOCK_SIZE];
cfg.read(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
int off = LFS_BLOCK_SIZE-1;
while (off >= 0 && bbuffer[off] == LFS_ERASE_VALUE) {
uint8_t buffer[BLOCK_SIZE];
cfg->read(cfg, block, 0, buffer, BLOCK_SIZE) => 0;
int off = BLOCK_SIZE-1;
while (off >= 0 && buffer[off] == ERASE_VALUE) {
off -= 1;
}
memset(&bbuffer[off-3], LFS_BLOCK_SIZE, 3);
cfg.erase(&cfg, block) => 0;
cfg.prog(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
cfg.sync(&cfg) => 0;
memset(&buffer[off-3], BLOCK_SIZE, 3);
cfg->erase(cfg, block) => 0;
cfg->prog(cfg, block, 0, buffer, BLOCK_SIZE) => 0;
cfg->sync(cfg) => 0;
// corrupt the destination
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_dir_open(&lfs, &dir, "c") => 0;
block = dir.m.pair[0];
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
cfg.read(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
off = LFS_BLOCK_SIZE-1;
while (off >= 0 && bbuffer[off] == LFS_ERASE_VALUE) {
cfg->read(cfg, block, 0, buffer, BLOCK_SIZE) => 0;
off = BLOCK_SIZE-1;
while (off >= 0 && buffer[off] == ERASE_VALUE) {
off -= 1;
}
memset(&bbuffer[off-3], LFS_BLOCK_SIZE, 3);
cfg.erase(&cfg, block) => 0;
cfg.prog(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
cfg.sync(&cfg) => 0;
memset(&buffer[off-3], BLOCK_SIZE, 3);
cfg->erase(cfg, block) => 0;
cfg->prog(cfg, block, 0, buffer, BLOCK_SIZE) => 0;
cfg->sync(cfg) => 0;
// continue move
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_rename(&lfs, "a/hello", "c/hello") => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_dir_open(&lfs, &dir, "a") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
@ -335,13 +355,14 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # simple reentrant move file
[cases.test_move_reentrant_file]
reentrant = true
code = '''
err = lfs_mount(&lfs, &cfg);
lfs_t lfs;
int err = lfs_mount(&lfs, cfg);
if (err) {
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
}
err = lfs_mkdir(&lfs, "a");
assert(!err || err == LFS_ERR_EXIST);
@ -354,9 +375,10 @@ code = '''
lfs_unmount(&lfs) => 0;
while (true) {
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
// there should never exist _2_ hello files
int count = 0;
struct lfs_info info;
if (lfs_stat(&lfs, "a/hello", &info) == 0) {
assert(strcmp(info.name, "hello") == 0);
assert(info.type == LFS_TYPE_REG);
@ -384,7 +406,7 @@ code = '''
assert(count <= 1);
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
if (lfs_stat(&lfs, "a/hello", &info) == 0 && info.size > 0) {
lfs_rename(&lfs, "a/hello", "b/hello") => 0;
} else if (lfs_stat(&lfs, "b/hello", &info) == 0) {
@ -397,6 +419,7 @@ code = '''
break;
} else {
// create file
lfs_file_t file;
lfs_file_open(&lfs, &file, "a/hello",
LFS_O_WRONLY | LFS_O_CREAT) => 0;
lfs_file_write(&lfs, &file, "hola\n", 5) => 5;
@ -407,7 +430,9 @@ code = '''
lfs_unmount(&lfs) => 0;
}
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_dir_t dir;
struct lfs_info info;
lfs_dir_open(&lfs, &dir, "a") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
@ -431,10 +456,12 @@ code = '''
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_file_t file;
lfs_file_open(&lfs, &file, "a/hello", LFS_O_RDONLY) => LFS_ERR_NOENT;
lfs_file_open(&lfs, &file, "b/hello", LFS_O_RDONLY) => LFS_ERR_NOENT;
lfs_file_open(&lfs, &file, "c/hello", LFS_O_RDONLY) => LFS_ERR_NOENT;
lfs_file_open(&lfs, &file, "d/hello", LFS_O_RDONLY) => 0;
uint8_t buffer[1024];
lfs_file_read(&lfs, &file, buffer, 5) => 5;
memcmp(buffer, "hola\n", 5) => 0;
lfs_file_read(&lfs, &file, buffer, 8) => 8;
@ -445,10 +472,11 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # move dir
[cases.test_move_dir]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "a") => 0;
lfs_mkdir(&lfs, "b") => 0;
lfs_mkdir(&lfs, "c") => 0;
@ -459,11 +487,13 @@ code = '''
lfs_mkdir(&lfs, "a/hi/ohayo") => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_rename(&lfs, "a/hi", "c/hi") => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_dir_t dir;
struct lfs_info info;
lfs_dir_open(&lfs, &dir, "a") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
@ -510,11 +540,12 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # move dir corrupt source
[cases.test_move_dir_corrupt_source]
in = "lfs.c"
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "a") => 0;
lfs_mkdir(&lfs, "b") => 0;
lfs_mkdir(&lfs, "c") => 0;
@ -525,28 +556,30 @@ code = '''
lfs_mkdir(&lfs, "a/hi/ohayo") => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_rename(&lfs, "a/hi", "c/hi") => 0;
lfs_unmount(&lfs) => 0;
// corrupt the source
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_dir_t dir;
struct lfs_info info;
lfs_dir_open(&lfs, &dir, "a") => 0;
lfs_block_t block = dir.m.pair[0];
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
uint8_t bbuffer[LFS_BLOCK_SIZE];
cfg.read(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
int off = LFS_BLOCK_SIZE-1;
while (off >= 0 && bbuffer[off] == LFS_ERASE_VALUE) {
uint8_t buffer[BLOCK_SIZE];
cfg->read(cfg, block, 0, buffer, BLOCK_SIZE) => 0;
int off = BLOCK_SIZE-1;
while (off >= 0 && buffer[off] == ERASE_VALUE) {
off -= 1;
}
memset(&bbuffer[off-3], LFS_BLOCK_SIZE, 3);
cfg.erase(&cfg, block) => 0;
cfg.prog(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
cfg.sync(&cfg) => 0;
memset(&buffer[off-3], BLOCK_SIZE, 3);
cfg->erase(cfg, block) => 0;
cfg->prog(cfg, block, 0, buffer, BLOCK_SIZE) => 0;
cfg->sync(cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_dir_open(&lfs, &dir, "a") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
@ -593,12 +626,13 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # move dir corrupt source and dest
[cases.test_move_dir_corrupt_source_dest]
in = "lfs.c"
if = 'LFS_PROG_SIZE <= 0x3fe' # only works with one crc per commit
if = 'PROG_SIZE <= 0x3fe' # only works with one crc per commit
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "a") => 0;
lfs_mkdir(&lfs, "b") => 0;
lfs_mkdir(&lfs, "c") => 0;
@ -609,44 +643,46 @@ code = '''
lfs_mkdir(&lfs, "a/hi/ohayo") => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_rename(&lfs, "a/hi", "c/hi") => 0;
lfs_unmount(&lfs) => 0;
// corrupt the source
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_dir_t dir;
struct lfs_info info;
lfs_dir_open(&lfs, &dir, "a") => 0;
lfs_block_t block = dir.m.pair[0];
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
uint8_t bbuffer[LFS_BLOCK_SIZE];
cfg.read(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
int off = LFS_BLOCK_SIZE-1;
while (off >= 0 && bbuffer[off] == LFS_ERASE_VALUE) {
uint8_t buffer[BLOCK_SIZE];
cfg->read(cfg, block, 0, buffer, BLOCK_SIZE) => 0;
int off = BLOCK_SIZE-1;
while (off >= 0 && buffer[off] == ERASE_VALUE) {
off -= 1;
}
memset(&bbuffer[off-3], LFS_BLOCK_SIZE, 3);
cfg.erase(&cfg, block) => 0;
cfg.prog(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
cfg.sync(&cfg) => 0;
memset(&buffer[off-3], BLOCK_SIZE, 3);
cfg->erase(cfg, block) => 0;
cfg->prog(cfg, block, 0, buffer, BLOCK_SIZE) => 0;
cfg->sync(cfg) => 0;
// corrupt the destination
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_dir_open(&lfs, &dir, "c") => 0;
block = dir.m.pair[0];
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
cfg.read(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
off = LFS_BLOCK_SIZE-1;
while (off >= 0 && bbuffer[off] == LFS_ERASE_VALUE) {
cfg->read(cfg, block, 0, buffer, BLOCK_SIZE) => 0;
off = BLOCK_SIZE-1;
while (off >= 0 && buffer[off] == ERASE_VALUE) {
off -= 1;
}
memset(&bbuffer[off-3], LFS_BLOCK_SIZE, 3);
cfg.erase(&cfg, block) => 0;
cfg.prog(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
cfg.sync(&cfg) => 0;
memset(&buffer[off-3], BLOCK_SIZE, 3);
cfg->erase(cfg, block) => 0;
cfg->prog(cfg, block, 0, buffer, BLOCK_SIZE) => 0;
cfg->sync(cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_dir_open(&lfs, &dir, "a") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
@ -693,12 +729,13 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # move dir after corrupt
[cases.test_move_dir_after_corrupt]
in = "lfs.c"
if = 'LFS_PROG_SIZE <= 0x3fe' # only works with one crc per commit
if = 'PROG_SIZE <= 0x3fe' # only works with one crc per commit
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "a") => 0;
lfs_mkdir(&lfs, "b") => 0;
lfs_mkdir(&lfs, "c") => 0;
@ -709,49 +746,51 @@ code = '''
lfs_mkdir(&lfs, "a/hi/ohayo") => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_rename(&lfs, "a/hi", "c/hi") => 0;
lfs_unmount(&lfs) => 0;
// corrupt the source
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_dir_t dir;
struct lfs_info info;
lfs_dir_open(&lfs, &dir, "a") => 0;
lfs_block_t block = dir.m.pair[0];
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
uint8_t bbuffer[LFS_BLOCK_SIZE];
cfg.read(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
int off = LFS_BLOCK_SIZE-1;
while (off >= 0 && bbuffer[off] == LFS_ERASE_VALUE) {
uint8_t buffer[BLOCK_SIZE];
cfg->read(cfg, block, 0, buffer, BLOCK_SIZE) => 0;
int off = BLOCK_SIZE-1;
while (off >= 0 && buffer[off] == ERASE_VALUE) {
off -= 1;
}
memset(&bbuffer[off-3], LFS_BLOCK_SIZE, 3);
cfg.erase(&cfg, block) => 0;
cfg.prog(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
cfg.sync(&cfg) => 0;
memset(&buffer[off-3], BLOCK_SIZE, 3);
cfg->erase(cfg, block) => 0;
cfg->prog(cfg, block, 0, buffer, BLOCK_SIZE) => 0;
cfg->sync(cfg) => 0;
// corrupt the destination
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_dir_open(&lfs, &dir, "c") => 0;
block = dir.m.pair[0];
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
cfg.read(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
off = LFS_BLOCK_SIZE-1;
while (off >= 0 && bbuffer[off] == LFS_ERASE_VALUE) {
cfg->read(cfg, block, 0, buffer, BLOCK_SIZE) => 0;
off = BLOCK_SIZE-1;
while (off >= 0 && buffer[off] == ERASE_VALUE) {
off -= 1;
}
memset(&bbuffer[off-3], LFS_BLOCK_SIZE, 3);
cfg.erase(&cfg, block) => 0;
cfg.prog(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
cfg.sync(&cfg) => 0;
memset(&buffer[off-3], BLOCK_SIZE, 3);
cfg->erase(cfg, block) => 0;
cfg->prog(cfg, block, 0, buffer, BLOCK_SIZE) => 0;
cfg->sync(cfg) => 0;
// continue move
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_rename(&lfs, "a/hi", "c/hi") => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_dir_open(&lfs, &dir, "a") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
@ -798,13 +837,14 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # simple reentrant move dir
[cases.test_reentrant_dir]
reentrant = true
code = '''
err = lfs_mount(&lfs, &cfg);
lfs_t lfs;
int err = lfs_mount(&lfs, cfg);
if (err) {
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
}
err = lfs_mkdir(&lfs, "a");
assert(!err || err == LFS_ERR_EXIST);
@ -817,9 +857,10 @@ code = '''
lfs_unmount(&lfs) => 0;
while (true) {
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
// there should never exist _2_ hi directories
int count = 0;
struct lfs_info info;
if (lfs_stat(&lfs, "a/hi", &info) == 0) {
assert(strcmp(info.name, "hi") == 0);
assert(info.type == LFS_TYPE_DIR);
@ -843,7 +884,7 @@ code = '''
assert(count <= 1);
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
if (lfs_stat(&lfs, "a/hi", &info) == 0) {
lfs_rename(&lfs, "a/hi", "b/hi") => 0;
} else if (lfs_stat(&lfs, "b/hi", &info) == 0) {
@ -868,7 +909,9 @@ code = '''
lfs_unmount(&lfs) => 0;
}
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_dir_t dir;
struct lfs_info info;
lfs_dir_open(&lfs, &dir, "a") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
@ -915,14 +958,16 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # move state stealing
[cases.test_move_state_stealing]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "a") => 0;
lfs_mkdir(&lfs, "b") => 0;
lfs_mkdir(&lfs, "c") => 0;
lfs_mkdir(&lfs, "d") => 0;
lfs_file_t file;
lfs_file_open(&lfs, &file, "a/hello", LFS_O_CREAT | LFS_O_WRONLY) => 0;
lfs_file_write(&lfs, &file, "hola\n", 5) => 5;
lfs_file_write(&lfs, &file, "bonjour\n", 8) => 8;
@ -930,21 +975,22 @@ code = '''
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_rename(&lfs, "a/hello", "b/hello") => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_rename(&lfs, "b/hello", "c/hello") => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_rename(&lfs, "c/hello", "d/hello") => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "a/hello", LFS_O_RDONLY) => LFS_ERR_NOENT;
lfs_file_open(&lfs, &file, "b/hello", LFS_O_RDONLY) => LFS_ERR_NOENT;
lfs_file_open(&lfs, &file, "c/hello", LFS_O_RDONLY) => LFS_ERR_NOENT;
lfs_file_open(&lfs, &file, "d/hello", LFS_O_RDONLY) => 0;
uint8_t buffer[1024];
lfs_file_read(&lfs, &file, buffer, 5) => 5;
memcmp(buffer, "hola\n", 5) => 0;
lfs_file_read(&lfs, &file, buffer, 8) => 8;
@ -954,12 +1000,13 @@ code = '''
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_remove(&lfs, "b") => 0;
lfs_remove(&lfs, "c") => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
struct lfs_info info;
lfs_stat(&lfs, "a", &info) => 0;
lfs_stat(&lfs, "b", &info) => LFS_ERR_NOENT;
lfs_stat(&lfs, "c", &info) => LFS_ERR_NOENT;
@ -979,12 +1026,16 @@ code = '''
'''
# Other specific corner cases
[[case]] # create + delete in same commit with neighbors
# create + delete in same commit with neighbors
[cases.test_move_create_delete_same]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
// littlefs keeps files sorted, so we know the order these will be in
lfs_file_t file;
lfs_file_open(&lfs, &file, "/1.move_me",
LFS_O_WRONLY | LFS_O_CREAT) => 0;
lfs_file_close(&lfs, &file) => 0;
@ -1024,6 +1075,8 @@ code = '''
lfs_file_close(&lfs, &files[2]) => 0;
// check that nothing was corrupted
lfs_dir_t dir;
struct lfs_info info;
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
@ -1051,6 +1104,7 @@ code = '''
lfs_dir_close(&lfs, &dir) => 0;
lfs_file_open(&lfs, &file, "/0.before", LFS_O_RDONLY) => 0;
uint8_t buffer[1024];
lfs_file_read(&lfs, &file, buffer, 7) => 7;
assert(strcmp((char*)buffer, "test.4") == 0);
lfs_file_close(&lfs, &file) => 0;
@ -1124,13 +1178,15 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
# Other specific corner cases
[[case]] # create + delete + delete in same commit with neighbors
# create + delete + delete in same commit with neighbors
[cases.test_move_create_delete_delete_same]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
// littlefs keeps files sorted, so we know the order these will be in
lfs_file_t file;
lfs_file_open(&lfs, &file, "/1.move_me",
LFS_O_WRONLY | LFS_O_CREAT) => 0;
lfs_file_close(&lfs, &file) => 0;
@ -1175,6 +1231,8 @@ code = '''
lfs_file_close(&lfs, &files[2]) => 0;
// check that nothing was corrupted
lfs_dir_t dir;
struct lfs_info info;
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
@ -1202,6 +1260,7 @@ code = '''
lfs_dir_close(&lfs, &dir) => 0;
lfs_file_open(&lfs, &file, "/0.before", LFS_O_RDONLY) => 0;
uint8_t buffer[1024];
lfs_file_read(&lfs, &file, buffer, 7) => 7;
assert(strcmp((char*)buffer, "test.4") == 0);
lfs_file_close(&lfs, &file) => 0;
@ -1281,14 +1340,17 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # create + delete in different dirs with neighbors
# create + delete in different dirs with neighbors
[cases.test_move_create_delete_different]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
// littlefs keeps files sorted, so we know the order these will be in
lfs_mkdir(&lfs, "/dir.1") => 0;
lfs_mkdir(&lfs, "/dir.2") => 0;
lfs_file_t file;
lfs_file_open(&lfs, &file, "/dir.1/1.move_me",
LFS_O_WRONLY | LFS_O_CREAT) => 0;
lfs_file_close(&lfs, &file) => 0;
@ -1340,6 +1402,8 @@ code = '''
lfs_file_close(&lfs, &files[3]) => 0;
// check that nothing was corrupted
lfs_dir_t dir;
struct lfs_info info;
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
@ -1397,6 +1461,7 @@ code = '''
lfs_dir_close(&lfs, &dir) => 0;
lfs_file_open(&lfs, &file, "/dir.1/0.before", LFS_O_RDONLY) => 0;
uint8_t buffer[1024];
lfs_file_read(&lfs, &file, buffer, 7) => 7;
assert(strcmp((char*)buffer, "test.5") == 0);
lfs_file_close(&lfs, &file) => 0;
@ -1518,17 +1583,20 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # move fix in relocation
# move fix in relocation
[cases.test_move_fix_relocation]
in = "lfs.c"
define.RELOCATIONS = 'range(0x3+1)'
define.LFS_ERASE_CYCLES = 0xffffffff
defines.RELOCATIONS = 'range(4)'
defines.ERASE_CYCLES = 0xffffffff
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "/parent") => 0;
lfs_mkdir(&lfs, "/parent/child") => 0;
lfs_file_t file;
lfs_file_open(&lfs, &file, "/parent/1.move_me",
LFS_O_WRONLY | LFS_O_CREAT) => 0;
lfs_file_write(&lfs, &file, "move me",
@ -1568,15 +1636,17 @@ code = '''
// force specific directories to relocate
if (RELOCATIONS & 0x1) {
lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "/parent");
lfs_testbd_setwear(&cfg, dir.m.pair[0], 0xffffffff) => 0;
lfs_testbd_setwear(&cfg, dir.m.pair[1], 0xffffffff) => 0;
lfs_emubd_setwear(cfg, dir.m.pair[0], 0xffffffff) => 0;
lfs_emubd_setwear(cfg, dir.m.pair[1], 0xffffffff) => 0;
lfs_dir_close(&lfs, &dir) => 0;
}
if (RELOCATIONS & 0x2) {
lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "/parent/child");
lfs_testbd_setwear(&cfg, dir.m.pair[0], 0xffffffff) => 0;
lfs_testbd_setwear(&cfg, dir.m.pair[1], 0xffffffff) => 0;
lfs_emubd_setwear(cfg, dir.m.pair[0], 0xffffffff) => 0;
lfs_emubd_setwear(cfg, dir.m.pair[1], 0xffffffff) => 0;
lfs_dir_close(&lfs, &dir) => 0;
}
@ -1593,6 +1663,8 @@ code = '''
lfs_file_close(&lfs, &files[3]) => 0;
// check that nothing was corrupted
lfs_dir_t dir;
struct lfs_info info;
lfs_dir_open(&lfs, &dir, "/parent") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
@ -1637,6 +1709,7 @@ code = '''
lfs_dir_close(&lfs, &dir) => 0;
lfs_file_open(&lfs, &file, "/parent/0.before", LFS_O_RDONLY) => 0;
uint8_t buffer[1024];
lfs_file_read(&lfs, &file, buffer, 7) => 7;
assert(strcmp((char*)buffer, "test.5") == 0);
lfs_file_close(&lfs, &file) => 0;
@ -1655,18 +1728,21 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # move fix in relocation with predecessor
# move fix in relocation with predecessor
[cases.test_move_fix_relocation_predecessor]
in = "lfs.c"
define.RELOCATIONS = 'range(0x7+1)'
define.LFS_ERASE_CYCLES = 0xffffffff
defines.RELOCATIONS = 'range(8)'
defines.ERASE_CYCLES = 0xffffffff
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "/parent") => 0;
lfs_mkdir(&lfs, "/parent/child") => 0;
lfs_mkdir(&lfs, "/parent/sibling") => 0;
lfs_file_t file;
lfs_file_open(&lfs, &file, "/parent/sibling/1.move_me",
LFS_O_WRONLY | LFS_O_CREAT) => 0;
lfs_file_write(&lfs, &file, "move me",
@ -1706,21 +1782,24 @@ code = '''
// force specific directories to relocate
if (RELOCATIONS & 0x1) {
lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "/parent");
lfs_testbd_setwear(&cfg, dir.m.pair[0], 0xffffffff) => 0;
lfs_testbd_setwear(&cfg, dir.m.pair[1], 0xffffffff) => 0;
lfs_emubd_setwear(cfg, dir.m.pair[0], 0xffffffff) => 0;
lfs_emubd_setwear(cfg, dir.m.pair[1], 0xffffffff) => 0;
lfs_dir_close(&lfs, &dir) => 0;
}
if (RELOCATIONS & 0x2) {
lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "/parent/sibling");
lfs_testbd_setwear(&cfg, dir.m.pair[0], 0xffffffff) => 0;
lfs_testbd_setwear(&cfg, dir.m.pair[1], 0xffffffff) => 0;
lfs_emubd_setwear(cfg, dir.m.pair[0], 0xffffffff) => 0;
lfs_emubd_setwear(cfg, dir.m.pair[1], 0xffffffff) => 0;
lfs_dir_close(&lfs, &dir) => 0;
}
if (RELOCATIONS & 0x4) {
lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "/parent/child");
lfs_testbd_setwear(&cfg, dir.m.pair[0], 0xffffffff) => 0;
lfs_testbd_setwear(&cfg, dir.m.pair[1], 0xffffffff) => 0;
lfs_emubd_setwear(cfg, dir.m.pair[0], 0xffffffff) => 0;
lfs_emubd_setwear(cfg, dir.m.pair[1], 0xffffffff) => 0;
lfs_dir_close(&lfs, &dir) => 0;
}
@ -1739,6 +1818,8 @@ code = '''
lfs_file_close(&lfs, &files[3]) => 0;
// check that nothing was corrupted
lfs_dir_t dir;
struct lfs_info info;
lfs_dir_open(&lfs, &dir, "/parent") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
@ -1796,6 +1877,7 @@ code = '''
lfs_dir_close(&lfs, &dir) => 0;
lfs_file_open(&lfs, &file, "/parent/sibling/0.before", LFS_O_RDONLY) => 0;
uint8_t buffer[1024];
lfs_file_read(&lfs, &file, buffer, 7) => 7;
assert(strcmp((char*)buffer, "test.5") == 0);
lfs_file_close(&lfs, &file) => 0;

View File

@ -1,9 +1,10 @@
[[case]] # orphan test
[cases.test_orphans_normal]
in = "lfs.c"
if = 'LFS_PROG_SIZE <= 0x3fe' # only works with one crc per commit
if = 'PROG_SIZE <= 0x3fe' # only works with one crc per commit
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "parent") => 0;
lfs_mkdir(&lfs, "parent/orphan") => 0;
lfs_mkdir(&lfs, "parent/child") => 0;
@ -13,29 +14,31 @@ code = '''
// corrupt the child's most recent commit, this should be the update
// to the linked-list entry, which should orphan the orphan. Note this
// makes a lot of assumptions about the remove operation.
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "parent/child") => 0;
lfs_block_t block = dir.m.pair[0];
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
uint8_t bbuffer[LFS_BLOCK_SIZE];
cfg.read(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
int off = LFS_BLOCK_SIZE-1;
while (off >= 0 && bbuffer[off] == LFS_ERASE_VALUE) {
uint8_t buffer[BLOCK_SIZE];
cfg->read(cfg, block, 0, buffer, BLOCK_SIZE) => 0;
int off = BLOCK_SIZE-1;
while (off >= 0 && buffer[off] == ERASE_VALUE) {
off -= 1;
}
memset(&bbuffer[off-3], LFS_BLOCK_SIZE, 3);
cfg.erase(&cfg, block) => 0;
cfg.prog(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
cfg.sync(&cfg) => 0;
memset(&buffer[off-3], BLOCK_SIZE, 3);
cfg->erase(cfg, block) => 0;
cfg->prog(cfg, block, 0, buffer, BLOCK_SIZE) => 0;
cfg->sync(cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
struct lfs_info info;
lfs_stat(&lfs, "parent/orphan", &info) => LFS_ERR_NOENT;
lfs_stat(&lfs, "parent/child", &info) => 0;
lfs_fs_size(&lfs) => 8;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_stat(&lfs, "parent/orphan", &info) => LFS_ERR_NOENT;
lfs_stat(&lfs, "parent/child", &info) => 0;
lfs_fs_size(&lfs) => 8;
@ -48,7 +51,7 @@ code = '''
lfs_fs_size(&lfs) => 8;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_stat(&lfs, "parent/orphan", &info) => LFS_ERR_NOENT;
lfs_stat(&lfs, "parent/child", &info) => 0;
lfs_stat(&lfs, "parent/otherchild", &info) => 0;
@ -56,43 +59,192 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # reentrant testing for orphans, basically just spam mkdir/remove
# test that we only run deorphan once per power-cycle
[cases.test_orphans_no_orphans]
in = 'lfs.c'
code = '''
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
// mark the filesystem as having orphans
lfs_fs_preporphans(&lfs, +1) => 0;
lfs_mdir_t mdir;
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
lfs_dir_commit(&lfs, &mdir, NULL, 0) => 0;
// we should have orphans at this state
assert(lfs_gstate_hasorphans(&lfs.gstate));
lfs_unmount(&lfs) => 0;
// mount
lfs_mount(&lfs, cfg) => 0;
// we should detect orphans
assert(lfs_gstate_hasorphans(&lfs.gstate));
// force consistency
lfs_fs_forceconsistency(&lfs) => 0;
// we should no longer have orphans
assert(!lfs_gstate_hasorphans(&lfs.gstate));
lfs_unmount(&lfs) => 0;
'''
[cases.test_orphans_one_orphan]
in = 'lfs.c'
code = '''
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
// create an orphan
lfs_mdir_t orphan;
lfs_alloc_ack(&lfs);
lfs_dir_alloc(&lfs, &orphan) => 0;
lfs_dir_commit(&lfs, &orphan, NULL, 0) => 0;
// append our orphan and mark the filesystem as having orphans
lfs_fs_preporphans(&lfs, +1) => 0;
lfs_mdir_t mdir;
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
lfs_pair_tole32(orphan.pair);
lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS(
{LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8), orphan.pair})) => 0;
// we should have orphans at this state
assert(lfs_gstate_hasorphans(&lfs.gstate));
lfs_unmount(&lfs) => 0;
// mount
lfs_mount(&lfs, cfg) => 0;
// we should detect orphans
assert(lfs_gstate_hasorphans(&lfs.gstate));
// force consistency
lfs_fs_forceconsistency(&lfs) => 0;
// we should no longer have orphans
assert(!lfs_gstate_hasorphans(&lfs.gstate));
lfs_unmount(&lfs) => 0;
'''
# test that we can persist gstate with lfs_fs_mkconsistent
[cases.test_orphans_mkconsistent_no_orphans]
in = 'lfs.c'
code = '''
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
// mark the filesystem as having orphans
lfs_fs_preporphans(&lfs, +1) => 0;
lfs_mdir_t mdir;
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
lfs_dir_commit(&lfs, &mdir, NULL, 0) => 0;
// we should have orphans at this state
assert(lfs_gstate_hasorphans(&lfs.gstate));
lfs_unmount(&lfs) => 0;
// mount
lfs_mount(&lfs, cfg) => 0;
// we should detect orphans
assert(lfs_gstate_hasorphans(&lfs.gstate));
// force consistency
lfs_fs_mkconsistent(&lfs) => 0;
// we should no longer have orphans
assert(!lfs_gstate_hasorphans(&lfs.gstate));
// remount
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, cfg) => 0;
// we should still have no orphans
assert(!lfs_gstate_hasorphans(&lfs.gstate));
lfs_unmount(&lfs) => 0;
'''
[cases.test_orphans_mkconsistent_one_orphan]
in = 'lfs.c'
code = '''
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
// create an orphan
lfs_mdir_t orphan;
lfs_alloc_ack(&lfs);
lfs_dir_alloc(&lfs, &orphan) => 0;
lfs_dir_commit(&lfs, &orphan, NULL, 0) => 0;
// append our orphan and mark the filesystem as having orphans
lfs_fs_preporphans(&lfs, +1) => 0;
lfs_mdir_t mdir;
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
lfs_pair_tole32(orphan.pair);
lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS(
{LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8), orphan.pair})) => 0;
// we should have orphans at this state
assert(lfs_gstate_hasorphans(&lfs.gstate));
lfs_unmount(&lfs) => 0;
// mount
lfs_mount(&lfs, cfg) => 0;
// we should detect orphans
assert(lfs_gstate_hasorphans(&lfs.gstate));
// force consistency
lfs_fs_mkconsistent(&lfs) => 0;
// we should no longer have orphans
assert(!lfs_gstate_hasorphans(&lfs.gstate));
// remount
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, cfg) => 0;
// we should still have no orphans
assert(!lfs_gstate_hasorphans(&lfs.gstate));
lfs_unmount(&lfs) => 0;
'''
# reentrant testing for orphans, basically just spam mkdir/remove
[cases.test_orphans_reentrant]
reentrant = true
# TODO fix this case, caused by non-DAG trees
if = '!(DEPTH == 3 && LFS_CACHE_SIZE != 64)'
define = [
if = '!(DEPTH == 3 && CACHE_SIZE != 64)'
defines = [
{FILES=6, DEPTH=1, CYCLES=20},
{FILES=26, DEPTH=1, CYCLES=20},
{FILES=3, DEPTH=3, CYCLES=20},
]
code = '''
err = lfs_mount(&lfs, &cfg);
lfs_t lfs;
int err = lfs_mount(&lfs, cfg);
if (err) {
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
}
srand(1);
uint32_t prng = 1;
const char alpha[] = "abcdefghijklmnopqrstuvwxyz";
for (int i = 0; i < CYCLES; i++) {
for (unsigned i = 0; i < CYCLES; i++) {
// create random path
char full_path[256];
for (int d = 0; d < DEPTH; d++) {
sprintf(&full_path[2*d], "/%c", alpha[rand() % FILES]);
for (unsigned d = 0; d < DEPTH; d++) {
sprintf(&full_path[2*d], "/%c", alpha[TEST_PRNG(&prng) % FILES]);
}
// if it does not exist, we create it, else we destroy
struct lfs_info info;
int res = lfs_stat(&lfs, full_path, &info);
if (res == LFS_ERR_NOENT) {
// create each directory in turn, ignore if dir already exists
for (int d = 0; d < DEPTH; d++) {
for (unsigned d = 0; d < DEPTH; d++) {
char path[1024];
strcpy(path, full_path);
path[2*d+2] = '\0';
err = lfs_mkdir(&lfs, path);
assert(!err || err == LFS_ERR_EXIST);
}
for (int d = 0; d < DEPTH; d++) {
for (unsigned d = 0; d < DEPTH; d++) {
char path[1024];
strcpy(path, full_path);
path[2*d+2] = '\0';
lfs_stat(&lfs, path, &info) => 0;
@ -106,6 +258,7 @@ code = '''
// try to delete path in reverse order, ignore if dir is not empty
for (int d = DEPTH-1; d >= 0; d--) {
char path[1024];
strcpy(path, full_path);
path[2*d+2] = '\0';
err = lfs_remove(&lfs, path);

View File

@ -1,13 +1,16 @@
[[case]] # simple path test
# simple path test
[cases.test_paths_normal]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "tea") => 0;
lfs_mkdir(&lfs, "tea/hottea") => 0;
lfs_mkdir(&lfs, "tea/warmtea") => 0;
lfs_mkdir(&lfs, "tea/coldtea") => 0;
struct lfs_info info;
lfs_stat(&lfs, "tea/hottea", &info) => 0;
assert(strcmp(info.name, "hottea") == 0);
lfs_stat(&lfs, "/tea/hottea", &info) => 0;
@ -21,15 +24,18 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # redundant slashes
# redundant slashes
[cases.test_paths_redundant_slashes]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "tea") => 0;
lfs_mkdir(&lfs, "tea/hottea") => 0;
lfs_mkdir(&lfs, "tea/warmtea") => 0;
lfs_mkdir(&lfs, "tea/coldtea") => 0;
struct lfs_info info;
lfs_stat(&lfs, "/tea/hottea", &info) => 0;
assert(strcmp(info.name, "hottea") == 0);
lfs_stat(&lfs, "//tea//hottea", &info) => 0;
@ -45,15 +51,18 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # dot path test
# dot path test
[cases.test_paths_dot]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "tea") => 0;
lfs_mkdir(&lfs, "tea/hottea") => 0;
lfs_mkdir(&lfs, "tea/warmtea") => 0;
lfs_mkdir(&lfs, "tea/coldtea") => 0;
struct lfs_info info;
lfs_stat(&lfs, "./tea/hottea", &info) => 0;
assert(strcmp(info.name, "hottea") == 0);
lfs_stat(&lfs, "/./tea/hottea", &info) => 0;
@ -71,10 +80,12 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # dot dot path test
# dot dot path test
[cases.test_paths_dot_dot]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "tea") => 0;
lfs_mkdir(&lfs, "tea/hottea") => 0;
lfs_mkdir(&lfs, "tea/warmtea") => 0;
@ -84,6 +95,7 @@ code = '''
lfs_mkdir(&lfs, "coffee/warmcoffee") => 0;
lfs_mkdir(&lfs, "coffee/coldcoffee") => 0;
struct lfs_info info;
lfs_stat(&lfs, "coffee/../tea/hottea", &info) => 0;
assert(strcmp(info.name, "hottea") == 0);
lfs_stat(&lfs, "tea/coldtea/../hottea", &info) => 0;
@ -101,15 +113,18 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # trailing dot path test
# trailing dot path test
[cases.test_paths_trailing_dot]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "tea") => 0;
lfs_mkdir(&lfs, "tea/hottea") => 0;
lfs_mkdir(&lfs, "tea/warmtea") => 0;
lfs_mkdir(&lfs, "tea/coldtea") => 0;
struct lfs_info info;
lfs_stat(&lfs, "tea/hottea/", &info) => 0;
assert(strcmp(info.name, "hottea") == 0);
lfs_stat(&lfs, "tea/hottea/.", &info) => 0;
@ -123,11 +138,14 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # leading dot path test
# leading dot path test
[cases.test_paths_leading_dot]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, ".milk") => 0;
struct lfs_info info;
lfs_stat(&lfs, ".milk", &info) => 0;
strcmp(info.name, ".milk") => 0;
lfs_stat(&lfs, "tea/.././.milk", &info) => 0;
@ -135,10 +153,12 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # root dot dot path test
# root dot dot path test
[cases.test_paths_root_dot_dot]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "tea") => 0;
lfs_mkdir(&lfs, "tea/hottea") => 0;
lfs_mkdir(&lfs, "tea/warmtea") => 0;
@ -148,6 +168,7 @@ code = '''
lfs_mkdir(&lfs, "coffee/warmcoffee") => 0;
lfs_mkdir(&lfs, "coffee/coldcoffee") => 0;
struct lfs_info info;
lfs_stat(&lfs, "coffee/../../../../../../tea/hottea", &info) => 0;
strcmp(info.name, "hottea") => 0;
@ -159,10 +180,13 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # invalid path tests
# invalid path tests
[cases.test_paths_invalid]
code = '''
lfs_format(&lfs, &cfg);
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg);
lfs_mount(&lfs, cfg) => 0;
struct lfs_info info;
lfs_stat(&lfs, "dirt", &info) => LFS_ERR_NOENT;
lfs_stat(&lfs, "dirt/ground", &info) => LFS_ERR_NOENT;
lfs_stat(&lfs, "dirt/ground/earth", &info) => LFS_ERR_NOENT;
@ -172,6 +196,7 @@ code = '''
lfs_remove(&lfs, "dirt/ground/earth") => LFS_ERR_NOENT;
lfs_mkdir(&lfs, "dirt/ground") => LFS_ERR_NOENT;
lfs_file_t file;
lfs_file_open(&lfs, &file, "dirt/ground", LFS_O_WRONLY | LFS_O_CREAT)
=> LFS_ERR_NOENT;
lfs_mkdir(&lfs, "dirt/ground/earth") => LFS_ERR_NOENT;
@ -180,15 +205,19 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # root operations
# root operations
[cases.test_paths_root]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
struct lfs_info info;
lfs_stat(&lfs, "/", &info) => 0;
assert(strcmp(info.name, "/") == 0);
assert(info.type == LFS_TYPE_DIR);
lfs_mkdir(&lfs, "/") => LFS_ERR_EXIST;
lfs_file_t file;
lfs_file_open(&lfs, &file, "/", LFS_O_WRONLY | LFS_O_CREAT)
=> LFS_ERR_ISDIR;
@ -196,10 +225,13 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # root representations
# root representations
[cases.test_paths_root_reprs]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
struct lfs_info info;
lfs_stat(&lfs, "/", &info) => 0;
assert(strcmp(info.name, "/") == 0);
assert(info.type == LFS_TYPE_DIR);
@ -221,10 +253,13 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # superblock conflict test
# superblock conflict test
[cases.test_paths_superblock_conflict]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
struct lfs_info info;
lfs_stat(&lfs, "littlefs", &info) => LFS_ERR_NOENT;
lfs_remove(&lfs, "littlefs") => LFS_ERR_NOENT;
@ -237,18 +272,22 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # max path test
# max path test
[cases.test_paths_max]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "coffee") => 0;
lfs_mkdir(&lfs, "coffee/hotcoffee") => 0;
lfs_mkdir(&lfs, "coffee/warmcoffee") => 0;
lfs_mkdir(&lfs, "coffee/coldcoffee") => 0;
char path[1024];
memset(path, 'w', LFS_NAME_MAX+1);
path[LFS_NAME_MAX+1] = '\0';
lfs_mkdir(&lfs, path) => LFS_ERR_NAMETOOLONG;
lfs_file_t file;
lfs_file_open(&lfs, &file, path, LFS_O_WRONLY | LFS_O_CREAT)
=> LFS_ERR_NAMETOOLONG;
@ -261,19 +300,23 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # really big path test
# really big path test
[cases.test_paths_really_big]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "coffee") => 0;
lfs_mkdir(&lfs, "coffee/hotcoffee") => 0;
lfs_mkdir(&lfs, "coffee/warmcoffee") => 0;
lfs_mkdir(&lfs, "coffee/coldcoffee") => 0;
char path[1024];
memset(path, 'w', LFS_NAME_MAX);
path[LFS_NAME_MAX] = '\0';
lfs_mkdir(&lfs, path) => 0;
lfs_remove(&lfs, path) => 0;
lfs_file_t file;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT) => 0;
lfs_file_close(&lfs, &file) => 0;

185
tests/test_powerloss.toml Normal file
View File

@ -0,0 +1,185 @@
# There are already a number of tests that test general operations under
# power-loss (see the reentrant attribute). These tests are for explicitly
# testing specific corner cases.
# only a revision count
[cases.test_powerloss_only_rev]
code = '''
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "notebook") => 0;
lfs_file_t file;
lfs_file_open(&lfs, &file, "notebook/paper",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
char buffer[256];
strcpy(buffer, "hello");
lfs_size_t size = strlen("hello");
for (int i = 0; i < 5; i++) {
lfs_file_write(&lfs, &file, buffer, size) => size;
lfs_file_sync(&lfs, &file) => 0;
}
lfs_file_close(&lfs, &file) => 0;
char rbuffer[256];
lfs_file_open(&lfs, &file, "notebook/paper", LFS_O_RDONLY) => 0;
for (int i = 0; i < 5; i++) {
lfs_file_read(&lfs, &file, rbuffer, size) => size;
assert(memcmp(rbuffer, buffer, size) == 0);
}
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
// get pair/rev count
lfs_mount(&lfs, cfg) => 0;
lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "notebook") => 0;
lfs_block_t pair[2] = {dir.m.pair[0], dir.m.pair[1]};
uint32_t rev = dir.m.rev;
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
// write just the revision count
uint8_t bbuffer[BLOCK_SIZE];
cfg->read(cfg, pair[1], 0, bbuffer, BLOCK_SIZE) => 0;
memcpy(bbuffer, &(uint32_t){lfs_tole32(rev+1)}, sizeof(uint32_t));
cfg->erase(cfg, pair[1]) => 0;
cfg->prog(cfg, pair[1], 0, bbuffer, BLOCK_SIZE) => 0;
lfs_mount(&lfs, cfg) => 0;
// can read?
lfs_file_open(&lfs, &file, "notebook/paper", LFS_O_RDONLY) => 0;
for (int i = 0; i < 5; i++) {
lfs_file_read(&lfs, &file, rbuffer, size) => size;
assert(memcmp(rbuffer, buffer, size) == 0);
}
lfs_file_close(&lfs, &file) => 0;
// can write?
lfs_file_open(&lfs, &file, "notebook/paper",
LFS_O_WRONLY | LFS_O_APPEND) => 0;
strcpy(buffer, "goodbye");
size = strlen("goodbye");
for (int i = 0; i < 5; i++) {
lfs_file_write(&lfs, &file, buffer, size) => size;
lfs_file_sync(&lfs, &file) => 0;
}
lfs_file_close(&lfs, &file) => 0;
lfs_file_open(&lfs, &file, "notebook/paper", LFS_O_RDONLY) => 0;
strcpy(buffer, "hello");
size = strlen("hello");
for (int i = 0; i < 5; i++) {
lfs_file_read(&lfs, &file, rbuffer, size) => size;
assert(memcmp(rbuffer, buffer, size) == 0);
}
strcpy(buffer, "goodbye");
size = strlen("goodbye");
for (int i = 0; i < 5; i++) {
lfs_file_read(&lfs, &file, rbuffer, size) => size;
assert(memcmp(rbuffer, buffer, size) == 0);
}
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
'''
# partial prog, may not be byte in order!
[cases.test_powerloss_partial_prog]
if = '''
PROG_SIZE < BLOCK_SIZE
&& (DISK_VERSION == 0 || DISK_VERSION >= 0x00020001)
'''
defines.BYTE_OFF = ["0", "PROG_SIZE-1", "PROG_SIZE/2"]
defines.BYTE_VALUE = [0x33, 0xcc]
in = "lfs.c"
code = '''
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_mkdir(&lfs, "notebook") => 0;
lfs_file_t file;
lfs_file_open(&lfs, &file, "notebook/paper",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
char buffer[256];
strcpy(buffer, "hello");
lfs_size_t size = strlen("hello");
for (int i = 0; i < 5; i++) {
lfs_file_write(&lfs, &file, buffer, size) => size;
lfs_file_sync(&lfs, &file) => 0;
}
lfs_file_close(&lfs, &file) => 0;
char rbuffer[256];
lfs_file_open(&lfs, &file, "notebook/paper", LFS_O_RDONLY) => 0;
for (int i = 0; i < 5; i++) {
lfs_file_read(&lfs, &file, rbuffer, size) => size;
assert(memcmp(rbuffer, buffer, size) == 0);
}
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
// imitate a partial prog, value should not matter, if littlefs
// doesn't notice the partial prog testbd will assert
// get offset to next prog
lfs_mount(&lfs, cfg) => 0;
lfs_dir_t dir;
lfs_dir_open(&lfs, &dir, "notebook") => 0;
lfs_block_t block = dir.m.pair[0];
lfs_off_t off = dir.m.off;
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
// tweak byte
uint8_t bbuffer[BLOCK_SIZE];
cfg->read(cfg, block, 0, bbuffer, BLOCK_SIZE) => 0;
bbuffer[off + BYTE_OFF] = BYTE_VALUE;
cfg->erase(cfg, block) => 0;
cfg->prog(cfg, block, 0, bbuffer, BLOCK_SIZE) => 0;
lfs_mount(&lfs, cfg) => 0;
// can read?
lfs_file_open(&lfs, &file, "notebook/paper", LFS_O_RDONLY) => 0;
for (int i = 0; i < 5; i++) {
lfs_file_read(&lfs, &file, rbuffer, size) => size;
assert(memcmp(rbuffer, buffer, size) == 0);
}
lfs_file_close(&lfs, &file) => 0;
// can write?
lfs_file_open(&lfs, &file, "notebook/paper",
LFS_O_WRONLY | LFS_O_APPEND) => 0;
strcpy(buffer, "goodbye");
size = strlen("goodbye");
for (int i = 0; i < 5; i++) {
lfs_file_write(&lfs, &file, buffer, size) => size;
lfs_file_sync(&lfs, &file) => 0;
}
lfs_file_close(&lfs, &file) => 0;
lfs_file_open(&lfs, &file, "notebook/paper", LFS_O_RDONLY) => 0;
strcpy(buffer, "hello");
size = strlen("hello");
for (int i = 0; i < 5; i++) {
lfs_file_read(&lfs, &file, rbuffer, size) => size;
assert(memcmp(rbuffer, buffer, size) == 0);
}
strcpy(buffer, "goodbye");
size = strlen("goodbye");
for (int i = 0; i < 5; i++) {
lfs_file_read(&lfs, &file, rbuffer, size) => size;
assert(memcmp(rbuffer, buffer, size) == 0);
}
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
'''

View File

@ -1,15 +1,18 @@
# specific corner cases worth explicitly testing for
[[case]] # dangling split dir test
define.ITERATIONS = 20
define.COUNT = 10
define.LFS_BLOCK_CYCLES = [8, 1]
[cases.test_relocations_dangling_split_dir]
defines.ITERATIONS = 20
defines.COUNT = 10
defines.BLOCK_CYCLES = [8, 1]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
// fill up filesystem so only ~16 blocks are left
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_t file;
lfs_file_open(&lfs, &file, "padding", LFS_O_CREAT | LFS_O_WRONLY) => 0;
uint8_t buffer[512];
memset(buffer, 0, 512);
while (LFS_BLOCK_COUNT - lfs_fs_size(&lfs) > 16) {
while (BLOCK_COUNT - lfs_fs_size(&lfs) > 16) {
lfs_file_write(&lfs, &file, buffer, 512) => 512;
}
lfs_file_close(&lfs, &file) => 0;
@ -17,18 +20,22 @@ code = '''
lfs_mkdir(&lfs, "child") => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
for (int j = 0; j < ITERATIONS; j++) {
for (int i = 0; i < COUNT; i++) {
lfs_mount(&lfs, cfg) => 0;
for (unsigned j = 0; j < ITERATIONS; j++) {
for (unsigned i = 0; i < COUNT; i++) {
char path[1024];
sprintf(path, "child/test%03d_loooooooooooooooooong_name", i);
lfs_file_open(&lfs, &file, path, LFS_O_CREAT | LFS_O_WRONLY) => 0;
lfs_file_close(&lfs, &file) => 0;
}
lfs_dir_t dir;
struct lfs_info info;
lfs_dir_open(&lfs, &dir, "child") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
lfs_dir_read(&lfs, &dir, &info) => 1;
for (int i = 0; i < COUNT; i++) {
for (unsigned i = 0; i < COUNT; i++) {
char path[1024];
sprintf(path, "test%03d_loooooooooooooooooong_name", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, path) => 0;
@ -36,46 +43,54 @@ code = '''
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
if (j == ITERATIONS-1) {
if (j == (unsigned)ITERATIONS-1) {
break;
}
for (int i = 0; i < COUNT; i++) {
for (unsigned i = 0; i < COUNT; i++) {
char path[1024];
sprintf(path, "child/test%03d_loooooooooooooooooong_name", i);
lfs_remove(&lfs, path) => 0;
}
}
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_dir_t dir;
struct lfs_info info;
lfs_dir_open(&lfs, &dir, "child") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
lfs_dir_read(&lfs, &dir, &info) => 1;
for (int i = 0; i < COUNT; i++) {
for (unsigned i = 0; i < COUNT; i++) {
char path[1024];
sprintf(path, "test%03d_loooooooooooooooooong_name", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, path) => 0;
}
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
for (int i = 0; i < COUNT; i++) {
for (unsigned i = 0; i < COUNT; i++) {
char path[1024];
sprintf(path, "child/test%03d_loooooooooooooooooong_name", i);
lfs_remove(&lfs, path) => 0;
}
lfs_unmount(&lfs) => 0;
'''
[[case]] # outdated head test
define.ITERATIONS = 20
define.COUNT = 10
define.LFS_BLOCK_CYCLES = [8, 1]
[cases.test_relocations_outdated_head]
defines.ITERATIONS = 20
defines.COUNT = 10
defines.BLOCK_CYCLES = [8, 1]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
// fill up filesystem so only ~16 blocks are left
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_t file;
lfs_file_open(&lfs, &file, "padding", LFS_O_CREAT | LFS_O_WRONLY) => 0;
uint8_t buffer[512];
memset(buffer, 0, 512);
while (LFS_BLOCK_COUNT - lfs_fs_size(&lfs) > 16) {
while (BLOCK_COUNT - lfs_fs_size(&lfs) > 16) {
lfs_file_write(&lfs, &file, buffer, 512) => 512;
}
lfs_file_close(&lfs, &file) => 0;
@ -83,18 +98,22 @@ code = '''
lfs_mkdir(&lfs, "child") => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
for (int j = 0; j < ITERATIONS; j++) {
for (int i = 0; i < COUNT; i++) {
lfs_mount(&lfs, cfg) => 0;
for (unsigned j = 0; j < ITERATIONS; j++) {
for (unsigned i = 0; i < COUNT; i++) {
char path[1024];
sprintf(path, "child/test%03d_loooooooooooooooooong_name", i);
lfs_file_open(&lfs, &file, path, LFS_O_CREAT | LFS_O_WRONLY) => 0;
lfs_file_close(&lfs, &file) => 0;
}
lfs_dir_t dir;
struct lfs_info info;
lfs_dir_open(&lfs, &dir, "child") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
lfs_dir_read(&lfs, &dir, &info) => 1;
for (int i = 0; i < COUNT; i++) {
for (unsigned i = 0; i < COUNT; i++) {
char path[1024];
sprintf(path, "test%03d_loooooooooooooooooong_name", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, path) => 0;
@ -110,7 +129,8 @@ code = '''
lfs_dir_rewind(&lfs, &dir) => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
lfs_dir_read(&lfs, &dir, &info) => 1;
for (int i = 0; i < COUNT; i++) {
for (unsigned i = 0; i < COUNT; i++) {
char path[1024];
sprintf(path, "test%03d_loooooooooooooooooong_name", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, path) => 0;
@ -126,7 +146,8 @@ code = '''
lfs_dir_rewind(&lfs, &dir) => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
lfs_dir_read(&lfs, &dir, &info) => 1;
for (int i = 0; i < COUNT; i++) {
for (unsigned i = 0; i < COUNT; i++) {
char path[1024];
sprintf(path, "test%03d_loooooooooooooooooong_name", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, path) => 0;
@ -135,7 +156,8 @@ code = '''
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
for (int i = 0; i < COUNT; i++) {
for (unsigned i = 0; i < COUNT; i++) {
char path[1024];
sprintf(path, "child/test%03d_loooooooooooooooooong_name", i);
lfs_remove(&lfs, path) => 0;
}
@ -143,45 +165,51 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # reentrant testing for relocations, this is the same as the
# orphan testing, except here we also set block_cycles so that
# almost every tree operation needs a relocation
# reentrant testing for relocations, this is the same as the
# orphan testing, except here we also set block_cycles so that
# almost every tree operation needs a relocation
[cases.test_relocations_reentrant]
reentrant = true
# TODO fix this case, caused by non-DAG trees
if = '!(DEPTH == 3 && LFS_CACHE_SIZE != 64)'
define = [
{FILES=6, DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1},
{FILES=26, DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1},
{FILES=3, DEPTH=3, CYCLES=20, LFS_BLOCK_CYCLES=1},
# NOTE the second condition is required
if = '!(DEPTH == 3 && CACHE_SIZE != 64) && 2*FILES < BLOCK_COUNT'
defines = [
{FILES=6, DEPTH=1, CYCLES=20, BLOCK_CYCLES=1},
{FILES=26, DEPTH=1, CYCLES=20, BLOCK_CYCLES=1},
{FILES=3, DEPTH=3, CYCLES=20, BLOCK_CYCLES=1},
]
code = '''
err = lfs_mount(&lfs, &cfg);
lfs_t lfs;
int err = lfs_mount(&lfs, cfg);
if (err) {
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
}
srand(1);
uint32_t prng = 1;
const char alpha[] = "abcdefghijklmnopqrstuvwxyz";
for (int i = 0; i < CYCLES; i++) {
for (unsigned i = 0; i < CYCLES; i++) {
// create random path
char full_path[256];
for (int d = 0; d < DEPTH; d++) {
sprintf(&full_path[2*d], "/%c", alpha[rand() % FILES]);
for (unsigned d = 0; d < DEPTH; d++) {
sprintf(&full_path[2*d], "/%c", alpha[TEST_PRNG(&prng) % FILES]);
}
// if it does not exist, we create it, else we destroy
struct lfs_info info;
int res = lfs_stat(&lfs, full_path, &info);
if (res == LFS_ERR_NOENT) {
// create each directory in turn, ignore if dir already exists
for (int d = 0; d < DEPTH; d++) {
for (unsigned d = 0; d < DEPTH; d++) {
char path[1024];
strcpy(path, full_path);
path[2*d+2] = '\0';
err = lfs_mkdir(&lfs, path);
assert(!err || err == LFS_ERR_EXIST);
}
for (int d = 0; d < DEPTH; d++) {
for (unsigned d = 0; d < DEPTH; d++) {
char path[1024];
strcpy(path, full_path);
path[2*d+2] = '\0';
lfs_stat(&lfs, path, &info) => 0;
@ -194,7 +222,8 @@ code = '''
assert(info.type == LFS_TYPE_DIR);
// try to delete path in reverse order, ignore if dir is not empty
for (int d = DEPTH-1; d >= 0; d--) {
for (unsigned d = DEPTH-1; d+1 > 0; d--) {
char path[1024];
strcpy(path, full_path);
path[2*d+2] = '\0';
err = lfs_remove(&lfs, path);
@ -207,44 +236,50 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # reentrant testing for relocations, but now with random renames!
# reentrant testing for relocations, but now with random renames!
[cases.test_relocations_reentrant_renames]
reentrant = true
# TODO fix this case, caused by non-DAG trees
if = '!(DEPTH == 3 && LFS_CACHE_SIZE != 64)'
define = [
{FILES=6, DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1},
{FILES=26, DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1},
{FILES=3, DEPTH=3, CYCLES=20, LFS_BLOCK_CYCLES=1},
# NOTE the second condition is required
if = '!(DEPTH == 3 && CACHE_SIZE != 64) && 2*FILES < BLOCK_COUNT'
defines = [
{FILES=6, DEPTH=1, CYCLES=20, BLOCK_CYCLES=1},
{FILES=26, DEPTH=1, CYCLES=20, BLOCK_CYCLES=1},
{FILES=3, DEPTH=3, CYCLES=20, BLOCK_CYCLES=1},
]
code = '''
err = lfs_mount(&lfs, &cfg);
lfs_t lfs;
int err = lfs_mount(&lfs, cfg);
if (err) {
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
}
srand(1);
uint32_t prng = 1;
const char alpha[] = "abcdefghijklmnopqrstuvwxyz";
for (int i = 0; i < CYCLES; i++) {
for (unsigned i = 0; i < CYCLES; i++) {
// create random path
char full_path[256];
for (int d = 0; d < DEPTH; d++) {
sprintf(&full_path[2*d], "/%c", alpha[rand() % FILES]);
for (unsigned d = 0; d < DEPTH; d++) {
sprintf(&full_path[2*d], "/%c", alpha[TEST_PRNG(&prng) % FILES]);
}
// if it does not exist, we create it, else we destroy
struct lfs_info info;
int res = lfs_stat(&lfs, full_path, &info);
assert(!res || res == LFS_ERR_NOENT);
if (res == LFS_ERR_NOENT) {
// create each directory in turn, ignore if dir already exists
for (int d = 0; d < DEPTH; d++) {
for (unsigned d = 0; d < DEPTH; d++) {
char path[1024];
strcpy(path, full_path);
path[2*d+2] = '\0';
err = lfs_mkdir(&lfs, path);
assert(!err || err == LFS_ERR_EXIST);
}
for (int d = 0; d < DEPTH; d++) {
for (unsigned d = 0; d < DEPTH; d++) {
char path[1024];
strcpy(path, full_path);
path[2*d+2] = '\0';
lfs_stat(&lfs, path, &info) => 0;
@ -257,8 +292,8 @@ code = '''
// create new random path
char new_path[256];
for (int d = 0; d < DEPTH; d++) {
sprintf(&new_path[2*d], "/%c", alpha[rand() % FILES]);
for (unsigned d = 0; d < DEPTH; d++) {
sprintf(&new_path[2*d], "/%c", alpha[TEST_PRNG(&prng) % FILES]);
}
// if new path does not exist, rename, otherwise destroy
@ -266,7 +301,8 @@ code = '''
assert(!res || res == LFS_ERR_NOENT);
if (res == LFS_ERR_NOENT) {
// stop once some dir is renamed
for (int d = 0; d < DEPTH; d++) {
for (unsigned d = 0; d < DEPTH; d++) {
char path[1024];
strcpy(&path[2*d], &full_path[2*d]);
path[2*d+2] = '\0';
strcpy(&path[128+2*d], &new_path[2*d]);
@ -278,7 +314,8 @@ code = '''
}
}
for (int d = 0; d < DEPTH; d++) {
for (unsigned d = 0; d < DEPTH; d++) {
char path[1024];
strcpy(path, new_path);
path[2*d+2] = '\0';
lfs_stat(&lfs, path, &info) => 0;
@ -290,7 +327,8 @@ code = '''
} else {
// try to delete path in reverse order,
// ignore if dir is not empty
for (int d = DEPTH-1; d >= 0; d--) {
for (unsigned d = DEPTH-1; d+1 > 0; d--) {
char path[1024];
strcpy(path, full_path);
path[2*d+2] = '\0';
err = lfs_remove(&lfs, path);

View File

@ -1,6 +1,7 @@
[[case]] # simple file seek
define = [
# simple file seek
[cases.test_seek_read]
defines = [
{COUNT=132, SKIP=4},
{COUNT=132, SKIP=128},
{COUNT=200, SKIP=10},
@ -9,11 +10,14 @@ define = [
{COUNT=4, SKIP=2},
]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_t file;
lfs_file_open(&lfs, &file, "kitty",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
size = strlen("kittycatcat");
size_t size = strlen("kittycatcat");
uint8_t buffer[1024];
memcpy(buffer, "kittycatcat", size);
for (int j = 0; j < COUNT; j++) {
lfs_file_write(&lfs, &file, buffer, size);
@ -21,7 +25,7 @@ code = '''
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "kitty", LFS_O_RDONLY) => 0;
lfs_soff_t pos = -1;
@ -68,8 +72,9 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # simple file seek and write
define = [
# simple file seek and write
[cases.test_seek_write]
defines = [
{COUNT=132, SKIP=4},
{COUNT=132, SKIP=128},
{COUNT=200, SKIP=10},
@ -78,11 +83,14 @@ define = [
{COUNT=4, SKIP=2},
]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_t file;
lfs_file_open(&lfs, &file, "kitty",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
size = strlen("kittycatcat");
size_t size = strlen("kittycatcat");
uint8_t buffer[1024];
memcpy(buffer, "kittycatcat", size);
for (int j = 0; j < COUNT; j++) {
lfs_file_write(&lfs, &file, buffer, size);
@ -90,7 +98,7 @@ code = '''
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "kitty", LFS_O_RDWR) => 0;
lfs_soff_t pos = -1;
@ -129,15 +137,18 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # boundary seek and writes
define.COUNT = 132
define.OFFSETS = '"{512, 1020, 513, 1021, 511, 1019, 1441}"'
# boundary seek and writes
[cases.test_seek_boundary_write]
defines.COUNT = 132
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_t file;
lfs_file_open(&lfs, &file, "kitty",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
size = strlen("kittycatcat");
size_t size = strlen("kittycatcat");
uint8_t buffer[1024];
memcpy(buffer, "kittycatcat", size);
for (int j = 0; j < COUNT; j++) {
lfs_file_write(&lfs, &file, buffer, size);
@ -145,11 +156,11 @@ code = '''
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "kitty", LFS_O_RDWR) => 0;
size = strlen("hedgehoghog");
const lfs_soff_t offsets[] = OFFSETS;
const lfs_soff_t offsets[] = {512, 1020, 513, 1021, 511, 1019, 1441};
for (unsigned i = 0; i < sizeof(offsets) / sizeof(offsets[0]); i++) {
lfs_soff_t off = offsets[i];
@ -183,8 +194,9 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # out of bounds seek
define = [
# out of bounds seek
[cases.test_seek_out_of_bounds]
defines = [
{COUNT=132, SKIP=4},
{COUNT=132, SKIP=128},
{COUNT=200, SKIP=10},
@ -193,18 +205,21 @@ define = [
{COUNT=4, SKIP=3},
]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_t file;
lfs_file_open(&lfs, &file, "kitty",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
size = strlen("kittycatcat");
size_t size = strlen("kittycatcat");
uint8_t buffer[1024];
memcpy(buffer, "kittycatcat", size);
for (int j = 0; j < COUNT; j++) {
lfs_file_write(&lfs, &file, buffer, size);
}
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "kitty", LFS_O_RDWR) => 0;
size = strlen("kittycatcat");
@ -238,16 +253,20 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # inline write and seek
define.SIZE = [2, 4, 128, 132]
# inline write and seek
[cases.test_seek_inline_write]
defines.SIZE = [2, 4, 128, 132]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_t file;
lfs_file_open(&lfs, &file, "tinykitty",
LFS_O_RDWR | LFS_O_CREAT) => 0;
int j = 0;
int k = 0;
uint8_t buffer[1024];
memcpy(buffer, "abcdefghijklmnopqrstuvwxyz", 26);
for (unsigned i = 0; i < SIZE; i++) {
lfs_file_write(&lfs, &file, &buffer[j++ % 26], 1) => 1;
@ -305,16 +324,20 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # file seek and write with power-loss
# file seek and write with power-loss
[cases.test_seek_reentrant_write]
# must be power-of-2 for quadratic probing to be exhaustive
define.COUNT = [4, 64, 128]
defines.COUNT = [4, 64, 128]
reentrant = true
code = '''
err = lfs_mount(&lfs, &cfg);
lfs_t lfs;
int err = lfs_mount(&lfs, cfg);
if (err) {
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
}
lfs_file_t file;
uint8_t buffer[1024];
err = lfs_file_open(&lfs, &file, "kitty", LFS_O_RDONLY);
assert(!err || err == LFS_ERR_NOENT);
if (!err) {
@ -334,14 +357,14 @@ code = '''
if (lfs_file_size(&lfs, &file) == 0) {
for (int j = 0; j < COUNT; j++) {
strcpy((char*)buffer, "kittycatcat");
size = strlen((char*)buffer);
size_t size = strlen((char*)buffer);
lfs_file_write(&lfs, &file, buffer, size) => size;
}
}
lfs_file_close(&lfs, &file) => 0;
strcpy((char*)buffer, "doggodogdog");
size = strlen((char*)buffer);
size_t size = strlen((char*)buffer);
lfs_file_open(&lfs, &file, "kitty", LFS_O_RDWR) => 0;
lfs_file_size(&lfs, &file) => COUNT*size;

View File

@ -1,41 +1,116 @@
[[case]] # simple formatting test
# simple formatting test
[cases.test_superblocks_format]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
'''
[[case]] # mount/unmount
# mount/unmount
[cases.test_superblocks_mount]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_unmount(&lfs) => 0;
'''
[[case]] # reentrant format
# mount/unmount from interpretting a previous superblock block_count
[cases.test_superblocks_mount_unknown_block_count]
code = '''
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
memset(&lfs, 0, sizeof(lfs));
struct lfs_config tweaked_cfg = *cfg;
tweaked_cfg.block_count = 0;
lfs_mount(&lfs, &tweaked_cfg) => 0;
assert(lfs.block_count == cfg->block_count);
lfs_unmount(&lfs) => 0;
'''
# reentrant format
[cases.test_superblocks_reentrant_format]
reentrant = true
code = '''
err = lfs_mount(&lfs, &cfg);
lfs_t lfs;
int err = lfs_mount(&lfs, cfg);
if (err) {
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
}
lfs_unmount(&lfs) => 0;
'''
[[case]] # invalid mount
# invalid mount
[cases.test_superblocks_invalid_mount]
code = '''
lfs_mount(&lfs, &cfg) => LFS_ERR_CORRUPT;
lfs_t lfs;
lfs_mount(&lfs, cfg) => LFS_ERR_CORRUPT;
'''
[[case]] # expanding superblock
define.LFS_BLOCK_CYCLES = [32, 33, 1]
define.N = [10, 100, 1000]
# test we can read superblock info through lfs_fs_stat
[cases.test_superblocks_stat]
if = 'DISK_VERSION == 0'
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
// test we can mount and read fsinfo
lfs_mount(&lfs, cfg) => 0;
struct lfs_fsinfo fsinfo;
lfs_fs_stat(&lfs, &fsinfo) => 0;
assert(fsinfo.disk_version == LFS_DISK_VERSION);
assert(fsinfo.name_max == LFS_NAME_MAX);
assert(fsinfo.file_max == LFS_FILE_MAX);
assert(fsinfo.attr_max == LFS_ATTR_MAX);
lfs_unmount(&lfs) => 0;
'''
[cases.test_superblocks_stat_tweaked]
if = 'DISK_VERSION == 0'
defines.TWEAKED_NAME_MAX = 63
defines.TWEAKED_FILE_MAX = '(1 << 16)-1'
defines.TWEAKED_ATTR_MAX = 512
code = '''
// create filesystem with tweaked params
struct lfs_config tweaked_cfg = *cfg;
tweaked_cfg.name_max = TWEAKED_NAME_MAX;
tweaked_cfg.file_max = TWEAKED_FILE_MAX;
tweaked_cfg.attr_max = TWEAKED_ATTR_MAX;
lfs_t lfs;
lfs_format(&lfs, &tweaked_cfg) => 0;
// test we can mount and read these params with the original config
lfs_mount(&lfs, cfg) => 0;
struct lfs_fsinfo fsinfo;
lfs_fs_stat(&lfs, &fsinfo) => 0;
assert(fsinfo.disk_version == LFS_DISK_VERSION);
assert(fsinfo.name_max == TWEAKED_NAME_MAX);
assert(fsinfo.file_max == TWEAKED_FILE_MAX);
assert(fsinfo.attr_max == TWEAKED_ATTR_MAX);
lfs_unmount(&lfs) => 0;
'''
# expanding superblock
[cases.test_superblocks_expand]
defines.BLOCK_CYCLES = [32, 33, 1]
defines.N = [10, 100, 1000]
code = '''
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
for (int i = 0; i < N; i++) {
lfs_file_t file;
lfs_file_open(&lfs, &file, "dummy",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
lfs_file_close(&lfs, &file) => 0;
struct lfs_info info;
lfs_stat(&lfs, "dummy", &info) => 0;
assert(strcmp(info.name, "dummy") == 0);
assert(info.type == LFS_TYPE_REG);
@ -44,25 +119,30 @@ code = '''
lfs_unmount(&lfs) => 0;
// one last check after power-cycle
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_t file;
lfs_file_open(&lfs, &file, "dummy",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
lfs_file_close(&lfs, &file) => 0;
struct lfs_info info;
lfs_stat(&lfs, "dummy", &info) => 0;
assert(strcmp(info.name, "dummy") == 0);
assert(info.type == LFS_TYPE_REG);
lfs_unmount(&lfs) => 0;
'''
[[case]] # expanding superblock with power cycle
define.LFS_BLOCK_CYCLES = [32, 33, 1]
define.N = [10, 100, 1000]
# expanding superblock with power cycle
[cases.test_superblocks_expand_power_cycle]
defines.BLOCK_CYCLES = [32, 33, 1]
defines.N = [10, 100, 1000]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
for (int i = 0; i < N; i++) {
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
// remove lingering dummy?
err = lfs_stat(&lfs, "dummy", &info);
struct lfs_info info;
int err = lfs_stat(&lfs, "dummy", &info);
assert(err == 0 || (err == LFS_ERR_NOENT && i == 0));
if (!err) {
assert(strcmp(info.name, "dummy") == 0);
@ -70,6 +150,7 @@ code = '''
lfs_remove(&lfs, "dummy") => 0;
}
lfs_file_t file;
lfs_file_open(&lfs, &file, "dummy",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
lfs_file_close(&lfs, &file) => 0;
@ -80,26 +161,30 @@ code = '''
}
// one last check after power-cycle
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
struct lfs_info info;
lfs_stat(&lfs, "dummy", &info) => 0;
assert(strcmp(info.name, "dummy") == 0);
assert(info.type == LFS_TYPE_REG);
lfs_unmount(&lfs) => 0;
'''
[[case]] # reentrant expanding superblock
define.LFS_BLOCK_CYCLES = [2, 1]
define.N = 24
# reentrant expanding superblock
[cases.test_superblocks_reentrant_expand]
defines.BLOCK_CYCLES = [2, 1]
defines.N = 24
reentrant = true
code = '''
err = lfs_mount(&lfs, &cfg);
lfs_t lfs;
int err = lfs_mount(&lfs, cfg);
if (err) {
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
}
for (int i = 0; i < N; i++) {
// remove lingering dummy?
struct lfs_info info;
err = lfs_stat(&lfs, "dummy", &info);
assert(err == 0 || (err == LFS_ERR_NOENT && i == 0));
if (!err) {
@ -108,6 +193,7 @@ code = '''
lfs_remove(&lfs, "dummy") => 0;
}
lfs_file_t file;
lfs_file_open(&lfs, &file, "dummy",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
lfs_file_close(&lfs, &file) => 0;
@ -119,9 +205,262 @@ code = '''
lfs_unmount(&lfs) => 0;
// one last check after power-cycle
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
struct lfs_info info;
lfs_stat(&lfs, "dummy", &info) => 0;
assert(strcmp(info.name, "dummy") == 0);
assert(info.type == LFS_TYPE_REG);
lfs_unmount(&lfs) => 0;
'''
# mount with unknown block_count
[cases.test_superblocks_unknown_blocks]
code = '''
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
// known block_size/block_count
cfg->block_size = BLOCK_SIZE;
cfg->block_count = BLOCK_COUNT;
lfs_mount(&lfs, cfg) => 0;
struct lfs_fsinfo fsinfo;
lfs_fs_stat(&lfs, &fsinfo) => 0;
assert(fsinfo.block_size == BLOCK_SIZE);
assert(fsinfo.block_count == BLOCK_COUNT);
lfs_unmount(&lfs) => 0;
// unknown block_count
cfg->block_size = BLOCK_SIZE;
cfg->block_count = 0;
lfs_mount(&lfs, cfg) => 0;
lfs_fs_stat(&lfs, &fsinfo) => 0;
assert(fsinfo.block_size == BLOCK_SIZE);
assert(fsinfo.block_count == BLOCK_COUNT);
lfs_unmount(&lfs) => 0;
// do some work
lfs_mount(&lfs, cfg) => 0;
lfs_fs_stat(&lfs, &fsinfo) => 0;
assert(fsinfo.block_size == BLOCK_SIZE);
assert(fsinfo.block_count == BLOCK_COUNT);
lfs_file_t file;
lfs_file_open(&lfs, &file, "test",
LFS_O_CREAT | LFS_O_EXCL | LFS_O_WRONLY) => 0;
lfs_file_write(&lfs, &file, "hello!", 6) => 6;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_fs_stat(&lfs, &fsinfo) => 0;
assert(fsinfo.block_size == BLOCK_SIZE);
assert(fsinfo.block_count == BLOCK_COUNT);
lfs_file_open(&lfs, &file, "test", LFS_O_RDONLY) => 0;
uint8_t buffer[256];
lfs_file_read(&lfs, &file, buffer, sizeof(buffer)) => 6;
lfs_file_close(&lfs, &file) => 0;
assert(memcmp(buffer, "hello!", 6) == 0);
lfs_unmount(&lfs) => 0;
'''
# mount with blocks fewer than the erase_count
[cases.test_superblocks_fewer_blocks]
defines.BLOCK_COUNT = ['ERASE_COUNT/2', 'ERASE_COUNT/4', '2']
code = '''
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
// known block_size/block_count
cfg->block_size = BLOCK_SIZE;
cfg->block_count = BLOCK_COUNT;
lfs_mount(&lfs, cfg) => 0;
struct lfs_fsinfo fsinfo;
lfs_fs_stat(&lfs, &fsinfo) => 0;
assert(fsinfo.block_size == BLOCK_SIZE);
assert(fsinfo.block_count == BLOCK_COUNT);
lfs_unmount(&lfs) => 0;
// incorrect block_count
cfg->block_size = BLOCK_SIZE;
cfg->block_count = ERASE_COUNT;
lfs_mount(&lfs, cfg) => LFS_ERR_INVAL;
// unknown block_count
cfg->block_size = BLOCK_SIZE;
cfg->block_count = 0;
lfs_mount(&lfs, cfg) => 0;
lfs_fs_stat(&lfs, &fsinfo) => 0;
assert(fsinfo.block_size == BLOCK_SIZE);
assert(fsinfo.block_count == BLOCK_COUNT);
lfs_unmount(&lfs) => 0;
// do some work
lfs_mount(&lfs, cfg) => 0;
lfs_fs_stat(&lfs, &fsinfo) => 0;
assert(fsinfo.block_size == BLOCK_SIZE);
assert(fsinfo.block_count == BLOCK_COUNT);
lfs_file_t file;
lfs_file_open(&lfs, &file, "test",
LFS_O_CREAT | LFS_O_EXCL | LFS_O_WRONLY) => 0;
lfs_file_write(&lfs, &file, "hello!", 6) => 6;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_fs_stat(&lfs, &fsinfo) => 0;
assert(fsinfo.block_size == BLOCK_SIZE);
assert(fsinfo.block_count == BLOCK_COUNT);
lfs_file_open(&lfs, &file, "test", LFS_O_RDONLY) => 0;
uint8_t buffer[256];
lfs_file_read(&lfs, &file, buffer, sizeof(buffer)) => 6;
lfs_file_close(&lfs, &file) => 0;
assert(memcmp(buffer, "hello!", 6) == 0);
lfs_unmount(&lfs) => 0;
'''
# mount with more blocks than the erase_count
[cases.test_superblocks_more_blocks]
defines.FORMAT_BLOCK_COUNT = '2*ERASE_COUNT'
in = 'lfs.c'
code = '''
lfs_t lfs;
lfs_init(&lfs, cfg) => 0;
lfs.block_count = BLOCK_COUNT;
lfs_mdir_t root = {
.pair = {0, 0}, // make sure this goes into block 0
.rev = 0,
.off = sizeof(uint32_t),
.etag = 0xffffffff,
.count = 0,
.tail = {LFS_BLOCK_NULL, LFS_BLOCK_NULL},
.erased = false,
.split = false,
};
lfs_superblock_t superblock = {
.version = LFS_DISK_VERSION,
.block_size = BLOCK_SIZE,
.block_count = FORMAT_BLOCK_COUNT,
.name_max = LFS_NAME_MAX,
.file_max = LFS_FILE_MAX,
.attr_max = LFS_ATTR_MAX,
};
lfs_superblock_tole32(&superblock);
lfs_dir_commit(&lfs, &root, LFS_MKATTRS(
{LFS_MKTAG(LFS_TYPE_CREATE, 0, 0), NULL},
{LFS_MKTAG(LFS_TYPE_SUPERBLOCK, 0, 8), "littlefs"},
{LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
&superblock})) => 0;
lfs_deinit(&lfs) => 0;
// known block_size/block_count
cfg->block_size = BLOCK_SIZE;
cfg->block_count = BLOCK_COUNT;
lfs_mount(&lfs, cfg) => LFS_ERR_INVAL;
'''
# mount and grow the filesystem
[cases.test_superblocks_grow]
defines.BLOCK_COUNT = ['ERASE_COUNT/2', 'ERASE_COUNT/4', '2']
defines.BLOCK_COUNT_2 = 'ERASE_COUNT'
defines.KNOWN_BLOCK_COUNT = [true, false]
code = '''
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
if (KNOWN_BLOCK_COUNT) {
cfg->block_count = BLOCK_COUNT;
} else {
cfg->block_count = 0;
}
// mount with block_size < erase_size
lfs_mount(&lfs, cfg) => 0;
struct lfs_fsinfo fsinfo;
lfs_fs_stat(&lfs, &fsinfo) => 0;
assert(fsinfo.block_size == BLOCK_SIZE);
assert(fsinfo.block_count == BLOCK_COUNT);
lfs_unmount(&lfs) => 0;
// same size is a noop
lfs_mount(&lfs, cfg) => 0;
lfs_fs_grow(&lfs, BLOCK_COUNT) => 0;
lfs_fs_stat(&lfs, &fsinfo) => 0;
assert(fsinfo.block_size == BLOCK_SIZE);
assert(fsinfo.block_count == BLOCK_COUNT);
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_fs_stat(&lfs, &fsinfo) => 0;
assert(fsinfo.block_size == BLOCK_SIZE);
assert(fsinfo.block_count == BLOCK_COUNT);
lfs_unmount(&lfs) => 0;
// grow to new size
lfs_mount(&lfs, cfg) => 0;
lfs_fs_grow(&lfs, BLOCK_COUNT_2) => 0;
lfs_fs_stat(&lfs, &fsinfo) => 0;
assert(fsinfo.block_size == BLOCK_SIZE);
assert(fsinfo.block_count == BLOCK_COUNT_2);
lfs_unmount(&lfs) => 0;
if (KNOWN_BLOCK_COUNT) {
cfg->block_count = BLOCK_COUNT_2;
} else {
cfg->block_count = 0;
}
lfs_mount(&lfs, cfg) => 0;
lfs_fs_stat(&lfs, &fsinfo) => 0;
assert(fsinfo.block_size == BLOCK_SIZE);
assert(fsinfo.block_count == BLOCK_COUNT_2);
lfs_unmount(&lfs) => 0;
// mounting with the previous size should fail
cfg->block_count = BLOCK_COUNT;
lfs_mount(&lfs, cfg) => LFS_ERR_INVAL;
if (KNOWN_BLOCK_COUNT) {
cfg->block_count = BLOCK_COUNT_2;
} else {
cfg->block_count = 0;
}
// same size is a noop
lfs_mount(&lfs, cfg) => 0;
lfs_fs_grow(&lfs, BLOCK_COUNT_2) => 0;
lfs_fs_stat(&lfs, &fsinfo) => 0;
assert(fsinfo.block_size == BLOCK_SIZE);
assert(fsinfo.block_count == BLOCK_COUNT_2);
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_fs_stat(&lfs, &fsinfo) => 0;
assert(fsinfo.block_size == BLOCK_SIZE);
assert(fsinfo.block_count == BLOCK_COUNT_2);
lfs_unmount(&lfs) => 0;
// do some work
lfs_mount(&lfs, cfg) => 0;
lfs_fs_stat(&lfs, &fsinfo) => 0;
assert(fsinfo.block_size == BLOCK_SIZE);
assert(fsinfo.block_count == BLOCK_COUNT_2);
lfs_file_t file;
lfs_file_open(&lfs, &file, "test",
LFS_O_CREAT | LFS_O_EXCL | LFS_O_WRONLY) => 0;
lfs_file_write(&lfs, &file, "hello!", 6) => 6;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_fs_stat(&lfs, &fsinfo) => 0;
assert(fsinfo.block_size == BLOCK_SIZE);
assert(fsinfo.block_count == BLOCK_COUNT_2);
lfs_file_open(&lfs, &file, "test", LFS_O_RDONLY) => 0;
uint8_t buffer[256];
lfs_file_read(&lfs, &file, buffer, sizeof(buffer)) => 6;
lfs_file_close(&lfs, &file) => 0;
assert(memcmp(buffer, "hello!", 6) == 0);
lfs_unmount(&lfs) => 0;
'''

View File

@ -1,23 +1,29 @@
[[case]] # simple truncate
define.MEDIUMSIZE = [32, 2048]
define.LARGESIZE = 8192
# simple truncate
[cases.test_truncate_simple]
defines.MEDIUMSIZE = [31, 32, 33, 511, 512, 513, 2047, 2048, 2049]
defines.LARGESIZE = [32, 33, 512, 513, 2048, 2049, 8192, 8193]
if = 'MEDIUMSIZE < LARGESIZE'
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_t file;
lfs_file_open(&lfs, &file, "baldynoop",
LFS_O_WRONLY | LFS_O_CREAT) => 0;
uint8_t buffer[1024];
strcpy((char*)buffer, "hair");
size = strlen((char*)buffer);
size_t size = strlen((char*)buffer);
for (lfs_off_t j = 0; j < LARGESIZE; j += size) {
lfs_file_write(&lfs, &file, buffer, size) => size;
lfs_file_write(&lfs, &file, buffer, lfs_min(size, LARGESIZE-j))
=> lfs_min(size, LARGESIZE-j);
}
lfs_file_size(&lfs, &file) => LARGESIZE;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "baldynoop", LFS_O_RDWR) => 0;
lfs_file_size(&lfs, &file) => LARGESIZE;
@ -27,14 +33,15 @@ code = '''
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "baldynoop", LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
size = strlen("hair");
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "hair", size) => 0;
lfs_file_read(&lfs, &file, buffer, lfs_min(size, MEDIUMSIZE-j))
=> lfs_min(size, MEDIUMSIZE-j);
memcmp(buffer, "hair", lfs_min(size, MEDIUMSIZE-j)) => 0;
}
lfs_file_read(&lfs, &file, buffer, size) => 0;
@ -42,26 +49,32 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # truncate and read
define.MEDIUMSIZE = [32, 2048]
define.LARGESIZE = 8192
# truncate and read
[cases.test_truncate_read]
defines.MEDIUMSIZE = [31, 32, 33, 511, 512, 513, 2047, 2048, 2049]
defines.LARGESIZE = [32, 33, 512, 513, 2048, 2049, 8192, 8193]
if = 'MEDIUMSIZE < LARGESIZE'
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_t file;
lfs_file_open(&lfs, &file, "baldyread",
LFS_O_WRONLY | LFS_O_CREAT) => 0;
uint8_t buffer[1024];
strcpy((char*)buffer, "hair");
size = strlen((char*)buffer);
size_t size = strlen((char*)buffer);
for (lfs_off_t j = 0; j < LARGESIZE; j += size) {
lfs_file_write(&lfs, &file, buffer, size) => size;
lfs_file_write(&lfs, &file, buffer, lfs_min(size, LARGESIZE-j))
=> lfs_min(size, LARGESIZE-j);
}
lfs_file_size(&lfs, &file) => LARGESIZE;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "baldyread", LFS_O_RDWR) => 0;
lfs_file_size(&lfs, &file) => LARGESIZE;
@ -70,22 +83,24 @@ code = '''
size = strlen("hair");
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "hair", size) => 0;
lfs_file_read(&lfs, &file, buffer, lfs_min(size, MEDIUMSIZE-j))
=> lfs_min(size, MEDIUMSIZE-j);
memcmp(buffer, "hair", lfs_min(size, MEDIUMSIZE-j)) => 0;
}
lfs_file_read(&lfs, &file, buffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "baldyread", LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
size = strlen("hair");
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "hair", size) => 0;
lfs_file_read(&lfs, &file, buffer, lfs_min(size, MEDIUMSIZE-j))
=> lfs_min(size, MEDIUMSIZE-j);
memcmp(buffer, "hair", lfs_min(size, MEDIUMSIZE-j)) => 0;
}
lfs_file_read(&lfs, &file, buffer, size) => 0;
@ -93,14 +108,18 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # write, truncate, and read
# write, truncate, and read
[cases.test_truncate_write_read]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_t file;
lfs_file_open(&lfs, &file, "sequence",
LFS_O_RDWR | LFS_O_CREAT | LFS_O_TRUNC) => 0;
size = lfs_min(lfs.cfg->cache_size, sizeof(buffer)/2);
uint8_t buffer[1024];
size_t size = lfs_min(lfs.cfg->cache_size, sizeof(buffer)/2);
lfs_size_t qsize = size / 4;
uint8_t *wb = buffer;
uint8_t *rb = buffer + size;
@ -136,7 +155,7 @@ code = '''
lfs_file_truncate(&lfs, &file, trunc) => 0;
lfs_file_tell(&lfs, &file) => qsize;
lfs_file_size(&lfs, &file) => trunc;
/* Read should produce second quarter */
lfs_file_read(&lfs, &file, rb, size) => trunc - qsize;
memcmp(rb, wb + qsize, trunc - qsize) => 0;
@ -145,50 +164,60 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # truncate and write
define.MEDIUMSIZE = [32, 2048]
define.LARGESIZE = 8192
# truncate and write
[cases.test_truncate_write]
defines.MEDIUMSIZE = [31, 32, 33, 511, 512, 513, 2047, 2048, 2049]
defines.LARGESIZE = [32, 33, 512, 513, 2048, 2049, 8192, 8193]
if = 'MEDIUMSIZE < LARGESIZE'
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_t file;
lfs_file_open(&lfs, &file, "baldywrite",
LFS_O_WRONLY | LFS_O_CREAT) => 0;
uint8_t buffer[1024];
strcpy((char*)buffer, "hair");
size = strlen((char*)buffer);
size_t size = strlen((char*)buffer);
for (lfs_off_t j = 0; j < LARGESIZE; j += size) {
lfs_file_write(&lfs, &file, buffer, size) => size;
lfs_file_write(&lfs, &file, buffer, lfs_min(size, LARGESIZE-j))
=> lfs_min(size, LARGESIZE-j);
}
lfs_file_size(&lfs, &file) => LARGESIZE;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "baldywrite", LFS_O_RDWR) => 0;
lfs_file_size(&lfs, &file) => LARGESIZE;
/* truncate */
lfs_file_truncate(&lfs, &file, MEDIUMSIZE) => 0;
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
/* and write */
strcpy((char*)buffer, "bald");
size = strlen((char*)buffer);
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
lfs_file_write(&lfs, &file, buffer, size) => size;
lfs_file_write(&lfs, &file, buffer, lfs_min(size, MEDIUMSIZE-j))
=> lfs_min(size, MEDIUMSIZE-j);
}
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "baldywrite", LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
size = strlen("bald");
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "bald", size) => 0;
lfs_file_read(&lfs, &file, buffer, lfs_min(size, MEDIUMSIZE-j))
=> lfs_min(size, MEDIUMSIZE-j);
memcmp(buffer, "bald", lfs_min(size, MEDIUMSIZE-j)) => 0;
}
lfs_file_read(&lfs, &file, buffer, size) => 0;
@ -196,30 +225,35 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # truncate write under powerloss
define.SMALLSIZE = [4, 512]
define.MEDIUMSIZE = [32, 1024]
define.LARGESIZE = 2048
# truncate write under powerloss
[cases.test_truncate_reentrant_write]
defines.SMALLSIZE = [4, 512]
defines.MEDIUMSIZE = [0, 3, 4, 5, 31, 32, 33, 511, 512, 513, 1023, 1024, 1025]
defines.LARGESIZE = 2048
reentrant = true
code = '''
err = lfs_mount(&lfs, &cfg);
lfs_t lfs;
int err = lfs_mount(&lfs, cfg);
if (err) {
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
}
lfs_file_t file;
err = lfs_file_open(&lfs, &file, "baldy", LFS_O_RDONLY);
assert(!err || err == LFS_ERR_NOENT);
if (!err) {
size = lfs_file_size(&lfs, &file);
size_t size = lfs_file_size(&lfs, &file);
assert(size == 0 ||
size == LARGESIZE ||
size == MEDIUMSIZE ||
size == SMALLSIZE);
size == (size_t)LARGESIZE ||
size == (size_t)MEDIUMSIZE ||
size == (size_t)SMALLSIZE);
for (lfs_off_t j = 0; j < size; j += 4) {
lfs_file_read(&lfs, &file, buffer, 4) => 4;
assert(memcmp(buffer, "hair", 4) == 0 ||
memcmp(buffer, "bald", 4) == 0 ||
memcmp(buffer, "comb", 4) == 0);
uint8_t buffer[1024];
lfs_file_read(&lfs, &file, buffer, lfs_min(4, size-j))
=> lfs_min(4, size-j);
assert(memcmp(buffer, "hair", lfs_min(4, size-j)) == 0 ||
memcmp(buffer, "bald", lfs_min(4, size-j)) == 0 ||
memcmp(buffer, "comb", lfs_min(4, size-j)) == 0);
}
lfs_file_close(&lfs, &file) => 0;
}
@ -227,22 +261,27 @@ code = '''
lfs_file_open(&lfs, &file, "baldy",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
lfs_file_size(&lfs, &file) => 0;
uint8_t buffer[1024];
strcpy((char*)buffer, "hair");
size = strlen((char*)buffer);
size_t size = strlen((char*)buffer);
for (lfs_off_t j = 0; j < LARGESIZE; j += size) {
lfs_file_write(&lfs, &file, buffer, size) => size;
lfs_file_write(&lfs, &file, buffer, lfs_min(size, LARGESIZE-j))
=> lfs_min(size, LARGESIZE-j);
}
lfs_file_size(&lfs, &file) => LARGESIZE;
lfs_file_close(&lfs, &file) => 0;
lfs_file_open(&lfs, &file, "baldy", LFS_O_RDWR) => 0;
lfs_file_size(&lfs, &file) => LARGESIZE;
/* truncate */
lfs_file_truncate(&lfs, &file, MEDIUMSIZE) => 0;
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
/* and write */
strcpy((char*)buffer, "bald");
size = strlen((char*)buffer);
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
lfs_file_write(&lfs, &file, buffer, size) => size;
lfs_file_write(&lfs, &file, buffer, lfs_min(size, MEDIUMSIZE-j))
=> lfs_min(size, MEDIUMSIZE-j);
}
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
lfs_file_close(&lfs, &file) => 0;
@ -254,7 +293,8 @@ code = '''
strcpy((char*)buffer, "comb");
size = strlen((char*)buffer);
for (lfs_off_t j = 0; j < SMALLSIZE; j += size) {
lfs_file_write(&lfs, &file, buffer, size) => size;
lfs_file_write(&lfs, &file, buffer, lfs_min(size, SMALLSIZE-j))
=> lfs_min(size, SMALLSIZE-j);
}
lfs_file_size(&lfs, &file) => SMALLSIZE;
lfs_file_close(&lfs, &file) => 0;
@ -262,12 +302,14 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # more aggressive general truncation tests
define.CONFIG = 'range(6)'
define.SMALLSIZE = 32
define.MEDIUMSIZE = 2048
define.LARGESIZE = 8192
# more aggressive general truncation tests
[cases.test_truncate_aggressive]
defines.CONFIG = 'range(6)'
defines.SMALLSIZE = 32
defines.MEDIUMSIZE = 2048
defines.LARGESIZE = 8192
code = '''
lfs_t lfs;
#define COUNT 5
const struct {
lfs_off_t startsizes[COUNT];
@ -312,16 +354,19 @@ code = '''
const lfs_off_t *hotsizes = configs[CONFIG].hotsizes;
const lfs_off_t *coldsizes = configs[CONFIG].coldsizes;
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
for (unsigned i = 0; i < COUNT; i++) {
char path[1024];
sprintf(path, "hairyhead%d", i);
lfs_file_t file;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
uint8_t buffer[1024];
strcpy((char*)buffer, "hair");
size = strlen((char*)buffer);
size_t size = strlen((char*)buffer);
for (lfs_off_t j = 0; j < startsizes[i]; j += size) {
lfs_file_write(&lfs, &file, buffer, size) => size;
}
@ -340,21 +385,25 @@ code = '''
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
for (unsigned i = 0; i < COUNT; i++) {
char path[1024];
sprintf(path, "hairyhead%d", i);
lfs_file_t file;
lfs_file_open(&lfs, &file, path, LFS_O_RDWR) => 0;
lfs_file_size(&lfs, &file) => hotsizes[i];
size = strlen("hair");
size_t size = strlen("hair");
lfs_off_t j = 0;
for (; j < startsizes[i] && j < hotsizes[i]; j += size) {
uint8_t buffer[1024];
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "hair", size) => 0;
}
for (; j < hotsizes[i]; j += size) {
uint8_t buffer[1024];
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "\0\0\0\0", size) => 0;
}
@ -367,22 +416,26 @@ code = '''
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
for (unsigned i = 0; i < COUNT; i++) {
char path[1024];
sprintf(path, "hairyhead%d", i);
lfs_file_t file;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => coldsizes[i];
size = strlen("hair");
size_t size = strlen("hair");
lfs_off_t j = 0;
for (; j < startsizes[i] && j < hotsizes[i] && j < coldsizes[i];
j += size) {
uint8_t buffer[1024];
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "hair", size) => 0;
}
for (; j < coldsizes[i]; j += size) {
uint8_t buffer[1024];
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "\0\0\0\0", size) => 0;
}
@ -393,21 +446,26 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
[[case]] # noop truncate
define.MEDIUMSIZE = [32, 2048]
# noop truncate
[cases.test_truncate_nop]
defines.MEDIUMSIZE = [32, 33, 512, 513, 2048, 2049, 8192, 8193]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_t file;
lfs_file_open(&lfs, &file, "baldynoop",
LFS_O_RDWR | LFS_O_CREAT) => 0;
uint8_t buffer[1024];
strcpy((char*)buffer, "hair");
size = strlen((char*)buffer);
size_t size = strlen((char*)buffer);
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
lfs_file_write(&lfs, &file, buffer, size) => size;
lfs_file_write(&lfs, &file, buffer, lfs_min(size, MEDIUMSIZE-j))
=> lfs_min(size, MEDIUMSIZE-j);
// this truncate should do nothing
lfs_file_truncate(&lfs, &file, j+size) => 0;
lfs_file_truncate(&lfs, &file, j+lfs_min(size, MEDIUMSIZE-j)) => 0;
}
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
@ -417,8 +475,9 @@ code = '''
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "hair", size) => 0;
lfs_file_read(&lfs, &file, buffer, lfs_min(size, MEDIUMSIZE-j))
=> lfs_min(size, MEDIUMSIZE-j);
memcmp(buffer, "hair", lfs_min(size, MEDIUMSIZE-j)) => 0;
}
lfs_file_read(&lfs, &file, buffer, size) => 0;
@ -426,12 +485,13 @@ code = '''
lfs_unmount(&lfs) => 0;
// still there after reboot?
lfs_mount(&lfs, &cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
lfs_file_open(&lfs, &file, "baldynoop", LFS_O_RDWR) => 0;
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "hair", size) => 0;
lfs_file_read(&lfs, &file, buffer, lfs_min(size, MEDIUMSIZE-j))
=> lfs_min(size, MEDIUMSIZE-j);
memcmp(buffer, "hair", lfs_min(size, MEDIUMSIZE-j)) => 0;
}
lfs_file_read(&lfs, &file, buffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;