mirror of
https://gitee.com/openharmony/third_party_littlefs
synced 2024-11-26 16:42:14 +00:00
feat: update to v2.5.0
Close: #I77XXH Signed-off-by: liuwenxin <liuwenxin11@huawei.com> Change-Id: Id783611964850c03b96eb0f2a24bdedd376650df
This commit is contained in:
parent
473722ba75
commit
6b96ac5db9
2
.gitignore
vendored
2
.gitignore
vendored
@ -2,6 +2,8 @@
|
||||
*.o
|
||||
*.d
|
||||
*.a
|
||||
*.ci
|
||||
*.csv
|
||||
|
||||
# Testing things
|
||||
blocks/
|
||||
|
@ -1,3 +1,4 @@
|
||||
Copyright (c) 2022, The littlefs authors.
|
||||
Copyright (c) 2017, Arm Limited. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
|
93
Makefile
93
Makefile
@ -17,44 +17,63 @@ TARGET ?= $(BUILDDIR)lfs.a
|
||||
endif
|
||||
|
||||
|
||||
CC ?= gcc
|
||||
AR ?= ar
|
||||
SIZE ?= size
|
||||
CTAGS ?= ctags
|
||||
NM ?= nm
|
||||
LCOV ?= lcov
|
||||
CC ?= gcc
|
||||
AR ?= ar
|
||||
SIZE ?= size
|
||||
CTAGS ?= ctags
|
||||
NM ?= nm
|
||||
OBJDUMP ?= objdump
|
||||
LCOV ?= lcov
|
||||
|
||||
SRC ?= $(wildcard *.c)
|
||||
OBJ := $(SRC:%.c=$(BUILDDIR)%.o)
|
||||
DEP := $(SRC:%.c=$(BUILDDIR)%.d)
|
||||
ASM := $(SRC:%.c=$(BUILDDIR)%.s)
|
||||
CGI := $(SRC:%.c=$(BUILDDIR)%.ci)
|
||||
|
||||
ifdef DEBUG
|
||||
override CFLAGS += -O0 -g3
|
||||
override CFLAGS += -O0
|
||||
else
|
||||
override CFLAGS += -Os
|
||||
endif
|
||||
ifdef TRACE
|
||||
override CFLAGS += -DLFS_YES_TRACE
|
||||
endif
|
||||
override CFLAGS += -g3
|
||||
override CFLAGS += -I.
|
||||
override CFLAGS += -std=c99 -Wall -pedantic
|
||||
override CFLAGS += -Wextra -Wshadow -Wjump-misses-init -Wundef
|
||||
|
||||
ifdef VERBOSE
|
||||
override TESTFLAGS += -v
|
||||
override CODEFLAGS += -v
|
||||
override TESTFLAGS += -v
|
||||
override CALLSFLAGS += -v
|
||||
override CODEFLAGS += -v
|
||||
override DATAFLAGS += -v
|
||||
override STACKFLAGS += -v
|
||||
override STRUCTSFLAGS += -v
|
||||
override COVERAGEFLAGS += -v
|
||||
endif
|
||||
ifdef EXEC
|
||||
override TESTFLAGS += --exec="$(EXEC)"
|
||||
endif
|
||||
ifdef COVERAGE
|
||||
override TESTFLAGS += --coverage
|
||||
endif
|
||||
ifdef BUILDDIR
|
||||
override TESTFLAGS += --build-dir="$(BUILDDIR:/=)"
|
||||
override CODEFLAGS += --build-dir="$(BUILDDIR:/=)"
|
||||
override TESTFLAGS += --build-dir="$(BUILDDIR:/=)"
|
||||
override CALLSFLAGS += --build-dir="$(BUILDDIR:/=)"
|
||||
override CODEFLAGS += --build-dir="$(BUILDDIR:/=)"
|
||||
override DATAFLAGS += --build-dir="$(BUILDDIR:/=)"
|
||||
override STACKFLAGS += --build-dir="$(BUILDDIR:/=)"
|
||||
override STRUCTSFLAGS += --build-dir="$(BUILDDIR:/=)"
|
||||
override COVERAGEFLAGS += --build-dir="$(BUILDDIR:/=)"
|
||||
endif
|
||||
ifneq ($(NM),nm)
|
||||
override CODEFLAGS += --nm-tool="$(NM)"
|
||||
override DATAFLAGS += --nm-tool="$(NM)"
|
||||
endif
|
||||
ifneq ($(OBJDUMP),objdump)
|
||||
override STRUCTSFLAGS += --objdump-tool="$(OBJDUMP)"
|
||||
endif
|
||||
|
||||
|
||||
@ -73,9 +92,9 @@ size: $(OBJ)
|
||||
tags:
|
||||
$(CTAGS) --totals --c-types=+p $(shell find -H -name '*.h') $(SRC)
|
||||
|
||||
.PHONY: code
|
||||
code: $(OBJ)
|
||||
./scripts/code.py $^ $(CODEFLAGS)
|
||||
.PHONY: calls
|
||||
calls: $(CGI)
|
||||
./scripts/calls.py $^ $(CALLSFLAGS)
|
||||
|
||||
.PHONY: test
|
||||
test:
|
||||
@ -84,9 +103,30 @@ test:
|
||||
test%: tests/test$$(firstword $$(subst \#, ,%)).toml
|
||||
./scripts/test.py $@ $(TESTFLAGS)
|
||||
|
||||
.PHONY: code
|
||||
code: $(OBJ)
|
||||
./scripts/code.py $^ -S $(CODEFLAGS)
|
||||
|
||||
.PHONY: data
|
||||
data: $(OBJ)
|
||||
./scripts/data.py $^ -S $(DATAFLAGS)
|
||||
|
||||
.PHONY: stack
|
||||
stack: $(CGI)
|
||||
./scripts/stack.py $^ -S $(STACKFLAGS)
|
||||
|
||||
.PHONY: structs
|
||||
structs: $(OBJ)
|
||||
./scripts/structs.py $^ -S $(STRUCTSFLAGS)
|
||||
|
||||
.PHONY: coverage
|
||||
coverage:
|
||||
./scripts/coverage.py $(BUILDDIR)tests/*.toml.info $(COVERAGEFLAGS)
|
||||
./scripts/coverage.py $(BUILDDIR)tests/*.toml.info -s $(COVERAGEFLAGS)
|
||||
|
||||
.PHONY: summary
|
||||
summary: $(BUILDDIR)lfs.csv
|
||||
./scripts/summary.py -Y $^ $(SUMMARYFLAGS)
|
||||
|
||||
|
||||
# rules
|
||||
-include $(DEP)
|
||||
@ -95,20 +135,39 @@ coverage:
|
||||
$(BUILDDIR)lfs: $(OBJ)
|
||||
$(CC) $(CFLAGS) $^ $(LFLAGS) -o $@
|
||||
|
||||
$(BUILDDIR)%.a: $(OBJ)
|
||||
$(BUILDDIR)lfs.a: $(OBJ)
|
||||
$(AR) rcs $@ $^
|
||||
|
||||
$(BUILDDIR)lfs.csv: $(OBJ) $(CGI)
|
||||
./scripts/code.py $(OBJ) -q $(CODEFLAGS) -o $@
|
||||
./scripts/data.py $(OBJ) -q -m $@ $(DATAFLAGS) -o $@
|
||||
./scripts/stack.py $(CGI) -q -m $@ $(STACKFLAGS) -o $@
|
||||
./scripts/structs.py $(OBJ) -q -m $@ $(STRUCTSFLAGS) -o $@
|
||||
$(if $(COVERAGE),\
|
||||
./scripts/coverage.py $(BUILDDIR)tests/*.toml.info \
|
||||
-q -m $@ $(COVERAGEFLAGS) -o $@)
|
||||
|
||||
$(BUILDDIR)%.o: %.c
|
||||
$(CC) -c -MMD $(CFLAGS) $< -o $@
|
||||
|
||||
$(BUILDDIR)%.s: %.c
|
||||
$(CC) -S $(CFLAGS) $< -o $@
|
||||
|
||||
# gcc depends on the output file for intermediate file names, so
|
||||
# we can't omit to .o output. We also need to serialize with the
|
||||
# normal .o rule because otherwise we can end up with multiprocess
|
||||
# problems with two instances of gcc modifying the same .o
|
||||
$(BUILDDIR)%.ci: %.c | $(BUILDDIR)%.o
|
||||
$(CC) -c -MMD -fcallgraph-info=su $(CFLAGS) $< -o $|
|
||||
|
||||
# clean everything
|
||||
.PHONY: clean
|
||||
clean:
|
||||
rm -f $(TARGET)
|
||||
rm -f $(BUILDDIR)lfs
|
||||
rm -f $(BUILDDIR)lfs.a
|
||||
rm -f $(BUILDDIR)lfs.csv
|
||||
rm -f $(OBJ)
|
||||
rm -f $(CGI)
|
||||
rm -f $(DEP)
|
||||
rm -f $(ASM)
|
||||
rm -f $(BUILDDIR)tests/*.toml.*
|
||||
|
@ -3,7 +3,7 @@
|
||||
"Name" : "littlefs",
|
||||
"License" : "BSD 3-Clause License",
|
||||
"License File" : "LICENSE.md",
|
||||
"Version Number" : "V2.4",
|
||||
"Version Number" : "V2.5",
|
||||
"Owner" : "wangmihu@huawei.com",
|
||||
"Upstream URL" : "https://github.com/littlefs-project/littlefs",
|
||||
"Description" : "A little fail-safe filesystem designed for microcontrollers."
|
||||
|
@ -192,7 +192,7 @@ More details on how littlefs works can be found in [DESIGN.md](DESIGN.md) and
|
||||
## Testing
|
||||
|
||||
The littlefs comes with a test suite designed to run on a PC using the
|
||||
[emulated block device](emubd/lfs_emubd.h) found in the emubd directory.
|
||||
[emulated block device](bd/lfs_testbd.h) found in the `bd` directory.
|
||||
The tests assume a Linux environment and can be started with make:
|
||||
|
||||
``` bash
|
||||
|
14
SPEC.md
14
SPEC.md
@ -233,19 +233,19 @@ Metadata tag fields:
|
||||
into a 3-bit abstract type and an 8-bit chunk field. Note that the value
|
||||
`0x000` is invalid and not assigned a type.
|
||||
|
||||
3. **Type1 (3-bits)** - Abstract type of the tag. Groups the tags into
|
||||
8 categories that facilitate bitmasked lookups.
|
||||
1. **Type1 (3-bits)** - Abstract type of the tag. Groups the tags into
|
||||
8 categories that facilitate bitmasked lookups.
|
||||
|
||||
4. **Chunk (8-bits)** - Chunk field used for various purposes by the different
|
||||
abstract types. type1+chunk+id form a unique identifier for each tag in the
|
||||
metadata block.
|
||||
2. **Chunk (8-bits)** - Chunk field used for various purposes by the different
|
||||
abstract types. type1+chunk+id form a unique identifier for each tag in the
|
||||
metadata block.
|
||||
|
||||
5. **Id (10-bits)** - File id associated with the tag. Each file in a metadata
|
||||
3. **Id (10-bits)** - File id associated with the tag. Each file in a metadata
|
||||
block gets a unique id which is used to associate tags with that file. The
|
||||
special value `0x3ff` is used for any tags that are not associated with a
|
||||
file, such as directory and global metadata.
|
||||
|
||||
6. **Length (10-bits)** - Length of the data in bytes. The special value
|
||||
4. **Length (10-bits)** - Length of the data in bytes. The special value
|
||||
`0x3ff` indicates that this tag has been deleted.
|
||||
|
||||
## Metadata types
|
||||
|
@ -1,6 +1,7 @@
|
||||
/*
|
||||
* Block device emulated in a file
|
||||
*
|
||||
* Copyright (c) 2022, The littlefs authors.
|
||||
* Copyright (c) 2017, Arm Limited. All rights reserved.
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
@ -10,6 +11,10 @@
|
||||
#include <unistd.h>
|
||||
#include <errno.h>
|
||||
|
||||
#ifdef _WIN32
|
||||
#include <windows.h>
|
||||
#endif
|
||||
|
||||
int lfs_filebd_createcfg(const struct lfs_config *cfg, const char *path,
|
||||
const struct lfs_filebd_config *bdcfg) {
|
||||
LFS_FILEBD_TRACE("lfs_filebd_createcfg(%p {.context=%p, "
|
||||
@ -27,7 +32,12 @@ int lfs_filebd_createcfg(const struct lfs_config *cfg, const char *path,
|
||||
bd->cfg = bdcfg;
|
||||
|
||||
// open file
|
||||
#ifdef _WIN32
|
||||
bd->fd = open(path, O_RDWR | O_CREAT | O_BINARY, 0666);
|
||||
#else
|
||||
bd->fd = open(path, O_RDWR | O_CREAT, 0666);
|
||||
#endif
|
||||
|
||||
if (bd->fd < 0) {
|
||||
int err = -errno;
|
||||
LFS_FILEBD_TRACE("lfs_filebd_createcfg -> %d", err);
|
||||
@ -80,7 +90,7 @@ int lfs_filebd_read(const struct lfs_config *cfg, lfs_block_t block,
|
||||
LFS_ASSERT(size % cfg->read_size == 0);
|
||||
LFS_ASSERT(block < cfg->block_count);
|
||||
|
||||
// zero for reproducability (in case file is truncated)
|
||||
// zero for reproducibility (in case file is truncated)
|
||||
if (bd->cfg->erase_value != -1) {
|
||||
memset(buffer, bd->cfg->erase_value, size);
|
||||
}
|
||||
@ -193,7 +203,11 @@ int lfs_filebd_sync(const struct lfs_config *cfg) {
|
||||
LFS_FILEBD_TRACE("lfs_filebd_sync(%p)", (void*)cfg);
|
||||
// file sync
|
||||
lfs_filebd_t *bd = cfg->context;
|
||||
#ifdef _WIN32
|
||||
int err = FlushFileBuffers((HANDLE) _get_osfhandle(fd)) ? 0 : -1;
|
||||
#else
|
||||
int err = fsync(bd->fd);
|
||||
#endif
|
||||
if (err) {
|
||||
err = -errno;
|
||||
LFS_FILEBD_TRACE("lfs_filebd_sync -> %d", 0);
|
||||
|
@ -1,6 +1,7 @@
|
||||
/*
|
||||
* Block device emulated in a file
|
||||
*
|
||||
* Copyright (c) 2022, The littlefs authors.
|
||||
* Copyright (c) 2017, Arm Limited. All rights reserved.
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
@ -1,6 +1,7 @@
|
||||
/*
|
||||
* Block device emulated in RAM
|
||||
*
|
||||
* Copyright (c) 2022, The littlefs authors.
|
||||
* Copyright (c) 2017, Arm Limited. All rights reserved.
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
@ -32,10 +33,12 @@ int lfs_rambd_createcfg(const struct lfs_config *cfg,
|
||||
}
|
||||
}
|
||||
|
||||
// zero for reproducability?
|
||||
// zero for reproducibility?
|
||||
if (bd->cfg->erase_value != -1) {
|
||||
memset(bd->buffer, bd->cfg->erase_value,
|
||||
cfg->block_size * cfg->block_count);
|
||||
} else {
|
||||
memset(bd->buffer, 0, cfg->block_size * cfg->block_count);
|
||||
}
|
||||
|
||||
LFS_RAMBD_TRACE("lfs_rambd_createcfg -> %d", 0);
|
||||
|
@ -1,6 +1,7 @@
|
||||
/*
|
||||
* Block device emulated in RAM
|
||||
*
|
||||
* Copyright (c) 2022, The littlefs authors.
|
||||
* Copyright (c) 2017, Arm Limited. All rights reserved.
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
@ -2,6 +2,7 @@
|
||||
* Testing block device, wraps filebd and rambd while providing a bunch
|
||||
* of hooks for testing littlefs in various conditions.
|
||||
*
|
||||
* Copyright (c) 2022, The littlefs authors.
|
||||
* Copyright (c) 2017, Arm Limited. All rights reserved.
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
@ -2,6 +2,7 @@
|
||||
* Testing block device, wraps filebd and rambd while providing a bunch
|
||||
* of hooks for testing littlefs in various conditions.
|
||||
*
|
||||
* Copyright (c) 2022, The littlefs authors.
|
||||
* Copyright (c) 2017, Arm Limited. All rights reserved.
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
44
lfs.h
44
lfs.h
@ -1,6 +1,7 @@
|
||||
/*
|
||||
* The little filesystem
|
||||
*
|
||||
* Copyright (c) 2022, The littlefs authors.
|
||||
* Copyright (c) 2017, Arm Limited. All rights reserved.
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
@ -22,7 +23,7 @@ extern "C"
|
||||
// Software library version
|
||||
// Major (top-nibble), incremented on backwards incompatible changes
|
||||
// Minor (bottom-nibble), incremented on feature additions
|
||||
#define LFS_VERSION 0x00020004
|
||||
#define LFS_VERSION 0x00020005
|
||||
#define LFS_VERSION_MAJOR (0xffff & (LFS_VERSION >> 16))
|
||||
#define LFS_VERSION_MINOR (0xffff & (LFS_VERSION >> 0))
|
||||
|
||||
@ -159,49 +160,49 @@ struct lfs_config {
|
||||
// information to the block device operations
|
||||
void *context;
|
||||
|
||||
// Read a region in a block. Negative error codes are propogated
|
||||
// Read a region in a block. Negative error codes are propagated
|
||||
// to the user.
|
||||
int (*read)(const struct lfs_config *c, lfs_block_t block,
|
||||
lfs_off_t off, void *buffer, lfs_size_t size);
|
||||
|
||||
// Program a region in a block. The block must have previously
|
||||
// been erased. Negative error codes are propogated to the user.
|
||||
// been erased. Negative error codes are propagated to the user.
|
||||
// May return LFS_ERR_CORRUPT if the block should be considered bad.
|
||||
int (*prog)(const struct lfs_config *c, lfs_block_t block,
|
||||
lfs_off_t off, const void *buffer, lfs_size_t size);
|
||||
|
||||
// Erase a block. A block must be erased before being programmed.
|
||||
// The state of an erased block is undefined. Negative error codes
|
||||
// are propogated to the user.
|
||||
// are propagated to the user.
|
||||
// May return LFS_ERR_CORRUPT if the block should be considered bad.
|
||||
int (*erase)(const struct lfs_config *c, lfs_block_t block);
|
||||
|
||||
// Sync the state of the underlying block device. Negative error codes
|
||||
// are propogated to the user.
|
||||
// are propagated to the user.
|
||||
int (*sync)(const struct lfs_config *c);
|
||||
|
||||
#ifdef LFS_THREADSAFE
|
||||
// Lock the underlying block device. Negative error codes
|
||||
// are propogated to the user.
|
||||
// are propagated to the user.
|
||||
int (*lock)(const struct lfs_config *c);
|
||||
|
||||
// Unlock the underlying block device. Negative error codes
|
||||
// are propogated to the user.
|
||||
// are propagated to the user.
|
||||
int (*unlock)(const struct lfs_config *c);
|
||||
#endif
|
||||
|
||||
// Minimum size of a block read. All read operations will be a
|
||||
// Minimum size of a block read in bytes. All read operations will be a
|
||||
// multiple of this value.
|
||||
lfs_size_t read_size;
|
||||
|
||||
// Minimum size of a block program. All program operations will be a
|
||||
// multiple of this value.
|
||||
// Minimum size of a block program in bytes. All program operations will be
|
||||
// a multiple of this value.
|
||||
lfs_size_t prog_size;
|
||||
|
||||
// Size of an erasable block. This does not impact ram consumption and
|
||||
// may be larger than the physical erase size. However, non-inlined files
|
||||
// take up at minimum one block. Must be a multiple of the read
|
||||
// and program sizes.
|
||||
// Size of an erasable block in bytes. This does not impact ram consumption
|
||||
// and may be larger than the physical erase size. However, non-inlined
|
||||
// files take up at minimum one block. Must be a multiple of the read and
|
||||
// program sizes.
|
||||
lfs_size_t block_size;
|
||||
|
||||
// Number of erasable blocks on the device.
|
||||
@ -215,11 +216,11 @@ struct lfs_config {
|
||||
// Set to -1 to disable block-level wear-leveling.
|
||||
int32_t block_cycles;
|
||||
|
||||
// Size of block caches. Each cache buffers a portion of a block in RAM.
|
||||
// The littlefs needs a read cache, a program cache, and one additional
|
||||
// Size of block caches in bytes. Each cache buffers a portion of a block in
|
||||
// RAM. The littlefs needs a read cache, a program cache, and one additional
|
||||
// cache per file. Larger caches can improve performance by storing more
|
||||
// data and reducing the number of disk accesses. Must be a multiple of
|
||||
// the read and program sizes, and a factor of the block size.
|
||||
// data and reducing the number of disk accesses. Must be a multiple of the
|
||||
// read and program sizes, and a factor of the block size.
|
||||
lfs_size_t cache_size;
|
||||
|
||||
// Size of the lookahead buffer in bytes. A larger lookahead buffer
|
||||
@ -485,7 +486,7 @@ int lfs_stat(lfs_t *lfs, const char *path, struct lfs_info *info);
|
||||
// Returns the size of the attribute, or a negative error code on failure.
|
||||
// Note, the returned size is the size of the attribute on disk, irrespective
|
||||
// of the size of the buffer. This can be used to dynamically allocate a buffer
|
||||
// or check for existance.
|
||||
// or check for existence.
|
||||
lfs_ssize_t lfs_getattr(lfs_t *lfs, const char *path,
|
||||
uint8_t type, void *buffer, lfs_size_t size);
|
||||
|
||||
@ -513,6 +514,7 @@ int lfs_removeattr(lfs_t *lfs, const char *path, uint8_t type);
|
||||
|
||||
/// File operations ///
|
||||
|
||||
#ifndef LFS_NO_MALLOC
|
||||
// Open a file
|
||||
//
|
||||
// The mode that the file is opened in is determined by the flags, which
|
||||
@ -522,6 +524,10 @@ int lfs_removeattr(lfs_t *lfs, const char *path, uint8_t type);
|
||||
int lfs_file_open(lfs_t *lfs, lfs_file_t *file,
|
||||
const char *path, int flags);
|
||||
|
||||
// if LFS_NO_MALLOC is defined, lfs_file_open() will fail with LFS_ERR_NOMEM
|
||||
// thus use lfs_file_opencfg() with config.buffer set.
|
||||
#endif
|
||||
|
||||
// Open a file with extra configuration
|
||||
//
|
||||
// The mode that the file is opened in is determined by the flags, which
|
||||
|
@ -1,6 +1,7 @@
|
||||
/*
|
||||
* lfs util functions
|
||||
*
|
||||
* Copyright (c) 2022, The littlefs authors.
|
||||
* Copyright (c) 2017, Arm Limited. All rights reserved.
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
@ -1,6 +1,7 @@
|
||||
/*
|
||||
* lfs utility functions
|
||||
*
|
||||
* Copyright (c) 2022, The littlefs authors.
|
||||
* Copyright (c) 2017, Arm Limited. All rights reserved.
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
158
scripts/code.py
158
scripts/code.py
@ -15,7 +15,7 @@ import csv
|
||||
import collections as co
|
||||
|
||||
|
||||
OBJ_PATHS = ['*.o', 'bd/*.o']
|
||||
OBJ_PATHS = ['*.o']
|
||||
|
||||
def collect(paths, **args):
|
||||
results = co.defaultdict(lambda: 0)
|
||||
@ -31,7 +31,8 @@ def collect(paths, **args):
|
||||
proc = sp.Popen(cmd,
|
||||
stdout=sp.PIPE,
|
||||
stderr=sp.PIPE if not args.get('verbose') else None,
|
||||
universal_newlines=True)
|
||||
universal_newlines=True,
|
||||
errors='replace')
|
||||
for line in proc.stdout:
|
||||
m = pattern.match(line)
|
||||
if m:
|
||||
@ -48,16 +49,30 @@ def collect(paths, **args):
|
||||
# map to source files
|
||||
if args.get('build_dir'):
|
||||
file = re.sub('%s/*' % re.escape(args['build_dir']), '', file)
|
||||
# replace .o with .c, different scripts report .o/.c, we need to
|
||||
# choose one if we want to deduplicate csv files
|
||||
file = re.sub('\.o$', '.c', file)
|
||||
# discard internal functions
|
||||
if func.startswith('__'):
|
||||
continue
|
||||
if not args.get('everything'):
|
||||
if func.startswith('__'):
|
||||
continue
|
||||
# discard .8449 suffixes created by optimizer
|
||||
func = re.sub('\.[0-9]+', '', func)
|
||||
|
||||
flat_results.append((file, func, size))
|
||||
|
||||
return flat_results
|
||||
|
||||
def main(**args):
|
||||
def openio(path, mode='r'):
|
||||
if path == '-':
|
||||
if 'r' in mode:
|
||||
return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
|
||||
else:
|
||||
return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
|
||||
else:
|
||||
return open(path, mode)
|
||||
|
||||
# find sizes
|
||||
if not args.get('use', None):
|
||||
# find .o files
|
||||
@ -75,13 +90,14 @@ def main(**args):
|
||||
|
||||
results = collect(paths, **args)
|
||||
else:
|
||||
with open(args['use']) as f:
|
||||
with openio(args['use']) as f:
|
||||
r = csv.DictReader(f)
|
||||
results = [
|
||||
( result['file'],
|
||||
result['function'],
|
||||
int(result['size']))
|
||||
for result in r]
|
||||
result['name'],
|
||||
int(result['code_size']))
|
||||
for result in r
|
||||
if result.get('code_size') not in {None, ''}]
|
||||
|
||||
total = 0
|
||||
for _, _, size in results:
|
||||
@ -89,13 +105,17 @@ def main(**args):
|
||||
|
||||
# find previous results?
|
||||
if args.get('diff'):
|
||||
with open(args['diff']) as f:
|
||||
r = csv.DictReader(f)
|
||||
prev_results = [
|
||||
( result['file'],
|
||||
result['function'],
|
||||
int(result['size']))
|
||||
for result in r]
|
||||
try:
|
||||
with openio(args['diff']) as f:
|
||||
r = csv.DictReader(f)
|
||||
prev_results = [
|
||||
( result['file'],
|
||||
result['name'],
|
||||
int(result['code_size']))
|
||||
for result in r
|
||||
if result.get('code_size') not in {None, ''}]
|
||||
except FileNotFoundError:
|
||||
prev_results = []
|
||||
|
||||
prev_total = 0
|
||||
for _, _, size in prev_results:
|
||||
@ -103,14 +123,34 @@ def main(**args):
|
||||
|
||||
# write results to CSV
|
||||
if args.get('output'):
|
||||
with open(args['output'], 'w') as f:
|
||||
w = csv.writer(f)
|
||||
w.writerow(['file', 'function', 'size'])
|
||||
for file, func, size in sorted(results):
|
||||
w.writerow((file, func, size))
|
||||
merged_results = co.defaultdict(lambda: {})
|
||||
other_fields = []
|
||||
|
||||
# merge?
|
||||
if args.get('merge'):
|
||||
try:
|
||||
with openio(args['merge']) as f:
|
||||
r = csv.DictReader(f)
|
||||
for result in r:
|
||||
file = result.pop('file', '')
|
||||
func = result.pop('name', '')
|
||||
result.pop('code_size', None)
|
||||
merged_results[(file, func)] = result
|
||||
other_fields = result.keys()
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
for file, func, size in results:
|
||||
merged_results[(file, func)]['code_size'] = size
|
||||
|
||||
with openio(args['output'], 'w') as f:
|
||||
w = csv.DictWriter(f, ['file', 'name', *other_fields, 'code_size'])
|
||||
w.writeheader()
|
||||
for (file, func), result in sorted(merged_results.items()):
|
||||
w.writerow({'file': file, 'name': func, **result})
|
||||
|
||||
# print results
|
||||
def dedup_entries(results, by='function'):
|
||||
def dedup_entries(results, by='name'):
|
||||
entries = co.defaultdict(lambda: 0)
|
||||
for file, func, size in results:
|
||||
entry = (file if by == 'file' else func)
|
||||
@ -126,45 +166,67 @@ def main(**args):
|
||||
diff[name] = (old, new, new-old, (new-old)/old if old else 1.0)
|
||||
return diff
|
||||
|
||||
def sorted_entries(entries):
|
||||
if args.get('size_sort'):
|
||||
return sorted(entries, key=lambda x: (-x[1], x))
|
||||
elif args.get('reverse_size_sort'):
|
||||
return sorted(entries, key=lambda x: (+x[1], x))
|
||||
else:
|
||||
return sorted(entries)
|
||||
|
||||
def sorted_diff_entries(entries):
|
||||
if args.get('size_sort'):
|
||||
return sorted(entries, key=lambda x: (-x[1][1], x))
|
||||
elif args.get('reverse_size_sort'):
|
||||
return sorted(entries, key=lambda x: (+x[1][1], x))
|
||||
else:
|
||||
return sorted(entries, key=lambda x: (-x[1][3], x))
|
||||
|
||||
def print_header(by=''):
|
||||
if not args.get('diff'):
|
||||
print('%-36s %7s' % (by, 'size'))
|
||||
else:
|
||||
print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff'))
|
||||
|
||||
def print_entries(by='function'):
|
||||
def print_entry(name, size):
|
||||
print("%-36s %7d" % (name, size))
|
||||
|
||||
def print_diff_entry(name, old, new, diff, ratio):
|
||||
print("%-36s %7s %7s %+7d%s" % (name,
|
||||
old or "-",
|
||||
new or "-",
|
||||
diff,
|
||||
' (%+.1f%%)' % (100*ratio) if ratio else ''))
|
||||
|
||||
def print_entries(by='name'):
|
||||
entries = dedup_entries(results, by=by)
|
||||
|
||||
if not args.get('diff'):
|
||||
print_header(by=by)
|
||||
for name, size in sorted(entries.items()):
|
||||
print("%-36s %7d" % (name, size))
|
||||
for name, size in sorted_entries(entries.items()):
|
||||
print_entry(name, size)
|
||||
else:
|
||||
prev_entries = dedup_entries(prev_results, by=by)
|
||||
diff = diff_entries(prev_entries, entries)
|
||||
print_header(by='%s (%d added, %d removed)' % (by,
|
||||
sum(1 for old, _, _, _ in diff.values() if not old),
|
||||
sum(1 for _, new, _, _ in diff.values() if not new)))
|
||||
for name, (old, new, diff, ratio) in sorted(diff.items(),
|
||||
key=lambda x: (-x[1][3], x)):
|
||||
for name, (old, new, diff, ratio) in sorted_diff_entries(
|
||||
diff.items()):
|
||||
if ratio or args.get('all'):
|
||||
print("%-36s %7s %7s %+7d%s" % (name,
|
||||
old or "-",
|
||||
new or "-",
|
||||
diff,
|
||||
' (%+.1f%%)' % (100*ratio) if ratio else ''))
|
||||
print_diff_entry(name, old, new, diff, ratio)
|
||||
|
||||
def print_totals():
|
||||
if not args.get('diff'):
|
||||
print("%-36s %7d" % ('TOTAL', total))
|
||||
print_entry('TOTAL', total)
|
||||
else:
|
||||
ratio = (total-prev_total)/prev_total if prev_total else 1.0
|
||||
print("%-36s %7s %7s %+7d%s" % (
|
||||
'TOTAL',
|
||||
prev_total if prev_total else '-',
|
||||
total if total else '-',
|
||||
ratio = (0.0 if not prev_total and not total
|
||||
else 1.0 if not prev_total
|
||||
else (total-prev_total)/prev_total)
|
||||
print_diff_entry('TOTAL',
|
||||
prev_total, total,
|
||||
total-prev_total,
|
||||
' (%+.1f%%)' % (100*ratio) if ratio else ''))
|
||||
ratio)
|
||||
|
||||
if args.get('quiet'):
|
||||
pass
|
||||
@ -175,7 +237,7 @@ def main(**args):
|
||||
print_entries(by='file')
|
||||
print_totals()
|
||||
else:
|
||||
print_entries(by='function')
|
||||
print_entries(by='name')
|
||||
print_totals()
|
||||
|
||||
if __name__ == "__main__":
|
||||
@ -188,22 +250,30 @@ if __name__ == "__main__":
|
||||
or a list of paths. Defaults to %r." % OBJ_PATHS)
|
||||
parser.add_argument('-v', '--verbose', action='store_true',
|
||||
help="Output commands that run behind the scenes.")
|
||||
parser.add_argument('-q', '--quiet', action='store_true',
|
||||
help="Don't show anything, useful with -o.")
|
||||
parser.add_argument('-o', '--output',
|
||||
help="Specify CSV file to store results.")
|
||||
parser.add_argument('-u', '--use',
|
||||
help="Don't compile and find code sizes, instead use this CSV file.")
|
||||
parser.add_argument('-d', '--diff',
|
||||
help="Specify CSV file to diff code size against.")
|
||||
parser.add_argument('-m', '--merge',
|
||||
help="Merge with an existing CSV file when writing to output.")
|
||||
parser.add_argument('-a', '--all', action='store_true',
|
||||
help="Show all functions, not just the ones that changed.")
|
||||
parser.add_argument('--files', action='store_true',
|
||||
parser.add_argument('-A', '--everything', action='store_true',
|
||||
help="Include builtin and libc specific symbols.")
|
||||
parser.add_argument('-s', '--size-sort', action='store_true',
|
||||
help="Sort by size.")
|
||||
parser.add_argument('-S', '--reverse-size-sort', action='store_true',
|
||||
help="Sort by size, but backwards.")
|
||||
parser.add_argument('-F', '--files', action='store_true',
|
||||
help="Show file-level code sizes. Note this does not include padding! "
|
||||
"So sizes may differ from other tools.")
|
||||
parser.add_argument('-s', '--summary', action='store_true',
|
||||
parser.add_argument('-Y', '--summary', action='store_true',
|
||||
help="Only show the total code size.")
|
||||
parser.add_argument('-q', '--quiet', action='store_true',
|
||||
help="Don't show anything, useful with -o.")
|
||||
parser.add_argument('--type', default='tTrRdDbB',
|
||||
parser.add_argument('--type', default='tTrRdD',
|
||||
help="Type of symbols to report, this uses the same single-character "
|
||||
"type-names emitted by nm. Defaults to %(default)r.")
|
||||
parser.add_argument('--nm-tool', default=['nm'], type=lambda x: x.split(),
|
||||
|
@ -55,8 +55,9 @@ def collect(paths, **args):
|
||||
for (file, func), (hits, count) in reduced_funcs.items():
|
||||
# discard internal/testing functions (test_* injected with
|
||||
# internal testing)
|
||||
if func.startswith('__') or func.startswith('test_'):
|
||||
continue
|
||||
if not args.get('everything'):
|
||||
if func.startswith('__') or func.startswith('test_'):
|
||||
continue
|
||||
# discard .8449 suffixes created by optimizer
|
||||
func = re.sub('\.[0-9]+', '', func)
|
||||
results.append((file, func, hits, count))
|
||||
@ -65,6 +66,15 @@ def collect(paths, **args):
|
||||
|
||||
|
||||
def main(**args):
|
||||
def openio(path, mode='r'):
|
||||
if path == '-':
|
||||
if 'r' in mode:
|
||||
return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
|
||||
else:
|
||||
return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
|
||||
else:
|
||||
return open(path, mode)
|
||||
|
||||
# find coverage
|
||||
if not args.get('use'):
|
||||
# find *.info files
|
||||
@ -82,14 +92,16 @@ def main(**args):
|
||||
|
||||
results = collect(paths, **args)
|
||||
else:
|
||||
with open(args['use']) as f:
|
||||
with openio(args['use']) as f:
|
||||
r = csv.DictReader(f)
|
||||
results = [
|
||||
( result['file'],
|
||||
result['function'],
|
||||
int(result['hits']),
|
||||
int(result['count']))
|
||||
for result in r]
|
||||
result['name'],
|
||||
int(result['coverage_hits']),
|
||||
int(result['coverage_count']))
|
||||
for result in r
|
||||
if result.get('coverage_hits') not in {None, ''}
|
||||
if result.get('coverage_count') not in {None, ''}]
|
||||
|
||||
total_hits, total_count = 0, 0
|
||||
for _, _, hits, count in results:
|
||||
@ -98,14 +110,19 @@ def main(**args):
|
||||
|
||||
# find previous results?
|
||||
if args.get('diff'):
|
||||
with open(args['diff']) as f:
|
||||
r = csv.DictReader(f)
|
||||
prev_results = [
|
||||
( result['file'],
|
||||
result['function'],
|
||||
int(result['hits']),
|
||||
int(result['count']))
|
||||
for result in r]
|
||||
try:
|
||||
with openio(args['diff']) as f:
|
||||
r = csv.DictReader(f)
|
||||
prev_results = [
|
||||
( result['file'],
|
||||
result['name'],
|
||||
int(result['coverage_hits']),
|
||||
int(result['coverage_count']))
|
||||
for result in r
|
||||
if result.get('coverage_hits') not in {None, ''}
|
||||
if result.get('coverage_count') not in {None, ''}]
|
||||
except FileNotFoundError:
|
||||
prev_results = []
|
||||
|
||||
prev_total_hits, prev_total_count = 0, 0
|
||||
for _, _, hits, count in prev_results:
|
||||
@ -114,14 +131,36 @@ def main(**args):
|
||||
|
||||
# write results to CSV
|
||||
if args.get('output'):
|
||||
with open(args['output'], 'w') as f:
|
||||
w = csv.writer(f)
|
||||
w.writerow(['file', 'function', 'hits', 'count'])
|
||||
for file, func, hits, count in sorted(results):
|
||||
w.writerow((file, func, hits, count))
|
||||
merged_results = co.defaultdict(lambda: {})
|
||||
other_fields = []
|
||||
|
||||
# merge?
|
||||
if args.get('merge'):
|
||||
try:
|
||||
with openio(args['merge']) as f:
|
||||
r = csv.DictReader(f)
|
||||
for result in r:
|
||||
file = result.pop('file', '')
|
||||
func = result.pop('name', '')
|
||||
result.pop('coverage_hits', None)
|
||||
result.pop('coverage_count', None)
|
||||
merged_results[(file, func)] = result
|
||||
other_fields = result.keys()
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
for file, func, hits, count in results:
|
||||
merged_results[(file, func)]['coverage_hits'] = hits
|
||||
merged_results[(file, func)]['coverage_count'] = count
|
||||
|
||||
with openio(args['output'], 'w') as f:
|
||||
w = csv.DictWriter(f, ['file', 'name', *other_fields, 'coverage_hits', 'coverage_count'])
|
||||
w.writeheader()
|
||||
for (file, func), result in sorted(merged_results.items()):
|
||||
w.writerow({'file': file, 'name': func, **result})
|
||||
|
||||
# print results
|
||||
def dedup_entries(results, by='function'):
|
||||
def dedup_entries(results, by='name'):
|
||||
entries = co.defaultdict(lambda: (0, 0))
|
||||
for file, func, hits, count in results:
|
||||
entry = (file if by == 'file' else func)
|
||||
@ -147,23 +186,59 @@ def main(**args):
|
||||
- (old_hits/old_count if old_count else 1.0)))
|
||||
return diff
|
||||
|
||||
def sorted_entries(entries):
|
||||
if args.get('coverage_sort'):
|
||||
return sorted(entries, key=lambda x: (-(x[1][0]/x[1][1] if x[1][1] else -1), x))
|
||||
elif args.get('reverse_coverage_sort'):
|
||||
return sorted(entries, key=lambda x: (+(x[1][0]/x[1][1] if x[1][1] else -1), x))
|
||||
else:
|
||||
return sorted(entries)
|
||||
|
||||
def sorted_diff_entries(entries):
|
||||
if args.get('coverage_sort'):
|
||||
return sorted(entries, key=lambda x: (-(x[1][2]/x[1][3] if x[1][3] else -1), x))
|
||||
elif args.get('reverse_coverage_sort'):
|
||||
return sorted(entries, key=lambda x: (+(x[1][2]/x[1][3] if x[1][3] else -1), x))
|
||||
else:
|
||||
return sorted(entries, key=lambda x: (-x[1][6], x))
|
||||
|
||||
def print_header(by=''):
|
||||
if not args.get('diff'):
|
||||
print('%-36s %19s' % (by, 'hits/line'))
|
||||
else:
|
||||
print('%-36s %19s %19s %11s' % (by, 'old', 'new', 'diff'))
|
||||
|
||||
def print_entries(by='function'):
|
||||
def print_entry(name, hits, count):
|
||||
print("%-36s %11s %7s" % (name,
|
||||
'%d/%d' % (hits, count)
|
||||
if count else '-',
|
||||
'%.1f%%' % (100*hits/count)
|
||||
if count else '-'))
|
||||
|
||||
def print_diff_entry(name,
|
||||
old_hits, old_count,
|
||||
new_hits, new_count,
|
||||
diff_hits, diff_count,
|
||||
ratio):
|
||||
print("%-36s %11s %7s %11s %7s %11s%s" % (name,
|
||||
'%d/%d' % (old_hits, old_count)
|
||||
if old_count else '-',
|
||||
'%.1f%%' % (100*old_hits/old_count)
|
||||
if old_count else '-',
|
||||
'%d/%d' % (new_hits, new_count)
|
||||
if new_count else '-',
|
||||
'%.1f%%' % (100*new_hits/new_count)
|
||||
if new_count else '-',
|
||||
'%+d/%+d' % (diff_hits, diff_count),
|
||||
' (%+.1f%%)' % (100*ratio) if ratio else ''))
|
||||
|
||||
def print_entries(by='name'):
|
||||
entries = dedup_entries(results, by=by)
|
||||
|
||||
if not args.get('diff'):
|
||||
print_header(by=by)
|
||||
for name, (hits, count) in sorted(entries.items()):
|
||||
print("%-36s %11s %7s" % (name,
|
||||
'%d/%d' % (hits, count)
|
||||
if count else '-',
|
||||
'%.1f%%' % (100*hits/count)
|
||||
if count else '-'))
|
||||
for name, (hits, count) in sorted_entries(entries.items()):
|
||||
print_entry(name, hits, count)
|
||||
else:
|
||||
prev_entries = dedup_entries(prev_results, by=by)
|
||||
diff = diff_entries(prev_entries, entries)
|
||||
@ -173,45 +248,28 @@ def main(**args):
|
||||
for name, (
|
||||
old_hits, old_count,
|
||||
new_hits, new_count,
|
||||
diff_hits, diff_count, ratio) in sorted(diff.items(),
|
||||
key=lambda x: (-x[1][6], x)):
|
||||
diff_hits, diff_count, ratio) in sorted_diff_entries(
|
||||
diff.items()):
|
||||
if ratio or args.get('all'):
|
||||
print("%-36s %11s %7s %11s %7s %11s%s" % (name,
|
||||
'%d/%d' % (old_hits, old_count)
|
||||
if old_count else '-',
|
||||
'%.1f%%' % (100*old_hits/old_count)
|
||||
if old_count else '-',
|
||||
'%d/%d' % (new_hits, new_count)
|
||||
if new_count else '-',
|
||||
'%.1f%%' % (100*new_hits/new_count)
|
||||
if new_count else '-',
|
||||
'%+d/%+d' % (diff_hits, diff_count),
|
||||
' (%+.1f%%)' % (100*ratio) if ratio else ''))
|
||||
print_diff_entry(name,
|
||||
old_hits, old_count,
|
||||
new_hits, new_count,
|
||||
diff_hits, diff_count,
|
||||
ratio)
|
||||
|
||||
def print_totals():
|
||||
if not args.get('diff'):
|
||||
print("%-36s %11s %7s" % ('TOTAL',
|
||||
'%d/%d' % (total_hits, total_count)
|
||||
if total_count else '-',
|
||||
'%.1f%%' % (100*total_hits/total_count)
|
||||
if total_count else '-'))
|
||||
print_entry('TOTAL', total_hits, total_count)
|
||||
else:
|
||||
ratio = ((total_hits/total_count
|
||||
if total_count else 1.0)
|
||||
- (prev_total_hits/prev_total_count
|
||||
if prev_total_count else 1.0))
|
||||
print("%-36s %11s %7s %11s %7s %11s%s" % ('TOTAL',
|
||||
'%d/%d' % (prev_total_hits, prev_total_count)
|
||||
if prev_total_count else '-',
|
||||
'%.1f%%' % (100*prev_total_hits/prev_total_count)
|
||||
if prev_total_count else '-',
|
||||
'%d/%d' % (total_hits, total_count)
|
||||
if total_count else '-',
|
||||
'%.1f%%' % (100*total_hits/total_count)
|
||||
if total_count else '-',
|
||||
'%+d/%+d' % (total_hits-prev_total_hits,
|
||||
total_count-prev_total_count),
|
||||
' (%+.1f%%)' % (100*ratio) if ratio else ''))
|
||||
print_diff_entry('TOTAL',
|
||||
prev_total_hits, prev_total_count,
|
||||
total_hits, total_count,
|
||||
total_hits-prev_total_hits, total_count-prev_total_count,
|
||||
ratio)
|
||||
|
||||
if args.get('quiet'):
|
||||
pass
|
||||
@ -222,7 +280,7 @@ def main(**args):
|
||||
print_entries(by='file')
|
||||
print_totals()
|
||||
else:
|
||||
print_entries(by='function')
|
||||
print_entries(by='name')
|
||||
print_totals()
|
||||
|
||||
if __name__ == "__main__":
|
||||
@ -243,12 +301,23 @@ if __name__ == "__main__":
|
||||
help="Don't do any work, instead use this CSV file.")
|
||||
parser.add_argument('-d', '--diff',
|
||||
help="Specify CSV file to diff code size against.")
|
||||
parser.add_argument('-m', '--merge',
|
||||
help="Merge with an existing CSV file when writing to output.")
|
||||
parser.add_argument('-a', '--all', action='store_true',
|
||||
help="Show all functions, not just the ones that changed.")
|
||||
parser.add_argument('--files', action='store_true',
|
||||
parser.add_argument('-A', '--everything', action='store_true',
|
||||
help="Include builtin and libc specific symbols.")
|
||||
parser.add_argument('-s', '--coverage-sort', action='store_true',
|
||||
help="Sort by coverage.")
|
||||
parser.add_argument('-S', '--reverse-coverage-sort', action='store_true',
|
||||
help="Sort by coverage, but backwards.")
|
||||
parser.add_argument('-F', '--files', action='store_true',
|
||||
help="Show file-level coverage.")
|
||||
parser.add_argument('-s', '--summary', action='store_true',
|
||||
parser.add_argument('-Y', '--summary', action='store_true',
|
||||
help="Only show the total coverage.")
|
||||
parser.add_argument('-q', '--quiet', action='store_true',
|
||||
help="Don't show anything, useful with -o.")
|
||||
parser.add_argument('--build-dir',
|
||||
help="Specify the relative build directory. Used to map object files \
|
||||
to the correct source files.")
|
||||
sys.exit(main(**vars(parser.parse_args())))
|
||||
|
283
scripts/data.py
Normal file
283
scripts/data.py
Normal file
@ -0,0 +1,283 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Script to find data size at the function level. Basically just a bit wrapper
|
||||
# around nm with some extra conveniences for comparing builds. Heavily inspired
|
||||
# by Linux's Bloat-O-Meter.
|
||||
#
|
||||
|
||||
import os
|
||||
import glob
|
||||
import itertools as it
|
||||
import subprocess as sp
|
||||
import shlex
|
||||
import re
|
||||
import csv
|
||||
import collections as co
|
||||
|
||||
|
||||
OBJ_PATHS = ['*.o']
|
||||
|
||||
def collect(paths, **args):
|
||||
results = co.defaultdict(lambda: 0)
|
||||
pattern = re.compile(
|
||||
'^(?P<size>[0-9a-fA-F]+)' +
|
||||
' (?P<type>[%s])' % re.escape(args['type']) +
|
||||
' (?P<func>.+?)$')
|
||||
for path in paths:
|
||||
# note nm-tool may contain extra args
|
||||
cmd = args['nm_tool'] + ['--size-sort', path]
|
||||
if args.get('verbose'):
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
proc = sp.Popen(cmd,
|
||||
stdout=sp.PIPE,
|
||||
stderr=sp.PIPE if not args.get('verbose') else None,
|
||||
universal_newlines=True,
|
||||
errors='replace')
|
||||
for line in proc.stdout:
|
||||
m = pattern.match(line)
|
||||
if m:
|
||||
results[(path, m.group('func'))] += int(m.group('size'), 16)
|
||||
proc.wait()
|
||||
if proc.returncode != 0:
|
||||
if not args.get('verbose'):
|
||||
for line in proc.stderr:
|
||||
sys.stdout.write(line)
|
||||
sys.exit(-1)
|
||||
|
||||
flat_results = []
|
||||
for (file, func), size in results.items():
|
||||
# map to source files
|
||||
if args.get('build_dir'):
|
||||
file = re.sub('%s/*' % re.escape(args['build_dir']), '', file)
|
||||
# replace .o with .c, different scripts report .o/.c, we need to
|
||||
# choose one if we want to deduplicate csv files
|
||||
file = re.sub('\.o$', '.c', file)
|
||||
# discard internal functions
|
||||
if not args.get('everything'):
|
||||
if func.startswith('__'):
|
||||
continue
|
||||
# discard .8449 suffixes created by optimizer
|
||||
func = re.sub('\.[0-9]+', '', func)
|
||||
flat_results.append((file, func, size))
|
||||
|
||||
return flat_results
|
||||
|
||||
def main(**args):
|
||||
def openio(path, mode='r'):
|
||||
if path == '-':
|
||||
if 'r' in mode:
|
||||
return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
|
||||
else:
|
||||
return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
|
||||
else:
|
||||
return open(path, mode)
|
||||
|
||||
# find sizes
|
||||
if not args.get('use', None):
|
||||
# find .o files
|
||||
paths = []
|
||||
for path in args['obj_paths']:
|
||||
if os.path.isdir(path):
|
||||
path = path + '/*.o'
|
||||
|
||||
for path in glob.glob(path):
|
||||
paths.append(path)
|
||||
|
||||
if not paths:
|
||||
print('no .obj files found in %r?' % args['obj_paths'])
|
||||
sys.exit(-1)
|
||||
|
||||
results = collect(paths, **args)
|
||||
else:
|
||||
with openio(args['use']) as f:
|
||||
r = csv.DictReader(f)
|
||||
results = [
|
||||
( result['file'],
|
||||
result['name'],
|
||||
int(result['data_size']))
|
||||
for result in r
|
||||
if result.get('data_size') not in {None, ''}]
|
||||
|
||||
total = 0
|
||||
for _, _, size in results:
|
||||
total += size
|
||||
|
||||
# find previous results?
|
||||
if args.get('diff'):
|
||||
try:
|
||||
with openio(args['diff']) as f:
|
||||
r = csv.DictReader(f)
|
||||
prev_results = [
|
||||
( result['file'],
|
||||
result['name'],
|
||||
int(result['data_size']))
|
||||
for result in r
|
||||
if result.get('data_size') not in {None, ''}]
|
||||
except FileNotFoundError:
|
||||
prev_results = []
|
||||
|
||||
prev_total = 0
|
||||
for _, _, size in prev_results:
|
||||
prev_total += size
|
||||
|
||||
# write results to CSV
|
||||
if args.get('output'):
|
||||
merged_results = co.defaultdict(lambda: {})
|
||||
other_fields = []
|
||||
|
||||
# merge?
|
||||
if args.get('merge'):
|
||||
try:
|
||||
with openio(args['merge']) as f:
|
||||
r = csv.DictReader(f)
|
||||
for result in r:
|
||||
file = result.pop('file', '')
|
||||
func = result.pop('name', '')
|
||||
result.pop('data_size', None)
|
||||
merged_results[(file, func)] = result
|
||||
other_fields = result.keys()
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
for file, func, size in results:
|
||||
merged_results[(file, func)]['data_size'] = size
|
||||
|
||||
with openio(args['output'], 'w') as f:
|
||||
w = csv.DictWriter(f, ['file', 'name', *other_fields, 'data_size'])
|
||||
w.writeheader()
|
||||
for (file, func), result in sorted(merged_results.items()):
|
||||
w.writerow({'file': file, 'name': func, **result})
|
||||
|
||||
# print results
|
||||
def dedup_entries(results, by='name'):
|
||||
entries = co.defaultdict(lambda: 0)
|
||||
for file, func, size in results:
|
||||
entry = (file if by == 'file' else func)
|
||||
entries[entry] += size
|
||||
return entries
|
||||
|
||||
def diff_entries(olds, news):
|
||||
diff = co.defaultdict(lambda: (0, 0, 0, 0))
|
||||
for name, new in news.items():
|
||||
diff[name] = (0, new, new, 1.0)
|
||||
for name, old in olds.items():
|
||||
_, new, _, _ = diff[name]
|
||||
diff[name] = (old, new, new-old, (new-old)/old if old else 1.0)
|
||||
return diff
|
||||
|
||||
def sorted_entries(entries):
|
||||
if args.get('size_sort'):
|
||||
return sorted(entries, key=lambda x: (-x[1], x))
|
||||
elif args.get('reverse_size_sort'):
|
||||
return sorted(entries, key=lambda x: (+x[1], x))
|
||||
else:
|
||||
return sorted(entries)
|
||||
|
||||
def sorted_diff_entries(entries):
|
||||
if args.get('size_sort'):
|
||||
return sorted(entries, key=lambda x: (-x[1][1], x))
|
||||
elif args.get('reverse_size_sort'):
|
||||
return sorted(entries, key=lambda x: (+x[1][1], x))
|
||||
else:
|
||||
return sorted(entries, key=lambda x: (-x[1][3], x))
|
||||
|
||||
def print_header(by=''):
|
||||
if not args.get('diff'):
|
||||
print('%-36s %7s' % (by, 'size'))
|
||||
else:
|
||||
print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff'))
|
||||
|
||||
def print_entry(name, size):
|
||||
print("%-36s %7d" % (name, size))
|
||||
|
||||
def print_diff_entry(name, old, new, diff, ratio):
|
||||
print("%-36s %7s %7s %+7d%s" % (name,
|
||||
old or "-",
|
||||
new or "-",
|
||||
diff,
|
||||
' (%+.1f%%)' % (100*ratio) if ratio else ''))
|
||||
|
||||
def print_entries(by='name'):
|
||||
entries = dedup_entries(results, by=by)
|
||||
|
||||
if not args.get('diff'):
|
||||
print_header(by=by)
|
||||
for name, size in sorted_entries(entries.items()):
|
||||
print_entry(name, size)
|
||||
else:
|
||||
prev_entries = dedup_entries(prev_results, by=by)
|
||||
diff = diff_entries(prev_entries, entries)
|
||||
print_header(by='%s (%d added, %d removed)' % (by,
|
||||
sum(1 for old, _, _, _ in diff.values() if not old),
|
||||
sum(1 for _, new, _, _ in diff.values() if not new)))
|
||||
for name, (old, new, diff, ratio) in sorted_diff_entries(
|
||||
diff.items()):
|
||||
if ratio or args.get('all'):
|
||||
print_diff_entry(name, old, new, diff, ratio)
|
||||
|
||||
def print_totals():
|
||||
if not args.get('diff'):
|
||||
print_entry('TOTAL', total)
|
||||
else:
|
||||
ratio = (0.0 if not prev_total and not total
|
||||
else 1.0 if not prev_total
|
||||
else (total-prev_total)/prev_total)
|
||||
print_diff_entry('TOTAL',
|
||||
prev_total, total,
|
||||
total-prev_total,
|
||||
ratio)
|
||||
|
||||
if args.get('quiet'):
|
||||
pass
|
||||
elif args.get('summary'):
|
||||
print_header()
|
||||
print_totals()
|
||||
elif args.get('files'):
|
||||
print_entries(by='file')
|
||||
print_totals()
|
||||
else:
|
||||
print_entries(by='name')
|
||||
print_totals()
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Find data size at the function level.")
|
||||
parser.add_argument('obj_paths', nargs='*', default=OBJ_PATHS,
|
||||
help="Description of where to find *.o files. May be a directory \
|
||||
or a list of paths. Defaults to %r." % OBJ_PATHS)
|
||||
parser.add_argument('-v', '--verbose', action='store_true',
|
||||
help="Output commands that run behind the scenes.")
|
||||
parser.add_argument('-q', '--quiet', action='store_true',
|
||||
help="Don't show anything, useful with -o.")
|
||||
parser.add_argument('-o', '--output',
|
||||
help="Specify CSV file to store results.")
|
||||
parser.add_argument('-u', '--use',
|
||||
help="Don't compile and find data sizes, instead use this CSV file.")
|
||||
parser.add_argument('-d', '--diff',
|
||||
help="Specify CSV file to diff data size against.")
|
||||
parser.add_argument('-m', '--merge',
|
||||
help="Merge with an existing CSV file when writing to output.")
|
||||
parser.add_argument('-a', '--all', action='store_true',
|
||||
help="Show all functions, not just the ones that changed.")
|
||||
parser.add_argument('-A', '--everything', action='store_true',
|
||||
help="Include builtin and libc specific symbols.")
|
||||
parser.add_argument('-s', '--size-sort', action='store_true',
|
||||
help="Sort by size.")
|
||||
parser.add_argument('-S', '--reverse-size-sort', action='store_true',
|
||||
help="Sort by size, but backwards.")
|
||||
parser.add_argument('-F', '--files', action='store_true',
|
||||
help="Show file-level data sizes. Note this does not include padding! "
|
||||
"So sizes may differ from other tools.")
|
||||
parser.add_argument('-Y', '--summary', action='store_true',
|
||||
help="Only show the total data size.")
|
||||
parser.add_argument('--type', default='dDbB',
|
||||
help="Type of symbols to report, this uses the same single-character "
|
||||
"type-names emitted by nm. Defaults to %(default)r.")
|
||||
parser.add_argument('--nm-tool', default=['nm'], type=lambda x: x.split(),
|
||||
help="Path to the nm tool to use.")
|
||||
parser.add_argument('--build-dir',
|
||||
help="Specify the relative build directory. Used to map object files \
|
||||
to the correct source files.")
|
||||
sys.exit(main(**vars(parser.parse_args())))
|
430
scripts/stack.py
Normal file
430
scripts/stack.py
Normal file
@ -0,0 +1,430 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Script to find stack usage at the function level. Will detect recursion and
|
||||
# report as infinite stack usage.
|
||||
#
|
||||
|
||||
import os
|
||||
import glob
|
||||
import itertools as it
|
||||
import re
|
||||
import csv
|
||||
import collections as co
|
||||
import math as m
|
||||
|
||||
|
||||
CI_PATHS = ['*.ci']
|
||||
|
||||
def collect(paths, **args):
|
||||
# parse the vcg format
|
||||
k_pattern = re.compile('([a-z]+)\s*:', re.DOTALL)
|
||||
v_pattern = re.compile('(?:"(.*?)"|([a-z]+))', re.DOTALL)
|
||||
def parse_vcg(rest):
|
||||
def parse_vcg(rest):
|
||||
node = []
|
||||
while True:
|
||||
rest = rest.lstrip()
|
||||
m = k_pattern.match(rest)
|
||||
if not m:
|
||||
return (node, rest)
|
||||
k, rest = m.group(1), rest[m.end(0):]
|
||||
|
||||
rest = rest.lstrip()
|
||||
if rest.startswith('{'):
|
||||
v, rest = parse_vcg(rest[1:])
|
||||
assert rest[0] == '}', "unexpected %r" % rest[0:1]
|
||||
rest = rest[1:]
|
||||
node.append((k, v))
|
||||
else:
|
||||
m = v_pattern.match(rest)
|
||||
assert m, "unexpected %r" % rest[0:1]
|
||||
v, rest = m.group(1) or m.group(2), rest[m.end(0):]
|
||||
node.append((k, v))
|
||||
|
||||
node, rest = parse_vcg(rest)
|
||||
assert rest == '', "unexpected %r" % rest[0:1]
|
||||
return node
|
||||
|
||||
# collect into functions
|
||||
results = co.defaultdict(lambda: (None, None, 0, set()))
|
||||
f_pattern = re.compile(
|
||||
r'([^\\]*)\\n([^:]*)[^\\]*\\n([0-9]+) bytes \((.*)\)')
|
||||
for path in paths:
|
||||
with open(path) as f:
|
||||
vcg = parse_vcg(f.read())
|
||||
for k, graph in vcg:
|
||||
if k != 'graph':
|
||||
continue
|
||||
for k, info in graph:
|
||||
if k == 'node':
|
||||
info = dict(info)
|
||||
m = f_pattern.match(info['label'])
|
||||
if m:
|
||||
function, file, size, type = m.groups()
|
||||
if not args.get('quiet') and type != 'static':
|
||||
print('warning: found non-static stack for %s (%s)'
|
||||
% (function, type))
|
||||
_, _, _, targets = results[info['title']]
|
||||
results[info['title']] = (
|
||||
file, function, int(size), targets)
|
||||
elif k == 'edge':
|
||||
info = dict(info)
|
||||
_, _, _, targets = results[info['sourcename']]
|
||||
targets.add(info['targetname'])
|
||||
else:
|
||||
continue
|
||||
|
||||
if not args.get('everything'):
|
||||
for source, (s_file, s_function, _, _) in list(results.items()):
|
||||
# discard internal functions
|
||||
if s_file.startswith('<') or s_file.startswith('/usr/include'):
|
||||
del results[source]
|
||||
|
||||
# find maximum stack size recursively, this requires also detecting cycles
|
||||
# (in case of recursion)
|
||||
def find_limit(source, seen=None):
|
||||
seen = seen or set()
|
||||
if source not in results:
|
||||
return 0
|
||||
_, _, frame, targets = results[source]
|
||||
|
||||
limit = 0
|
||||
for target in targets:
|
||||
if target in seen:
|
||||
# found a cycle
|
||||
return float('inf')
|
||||
limit_ = find_limit(target, seen | {target})
|
||||
limit = max(limit, limit_)
|
||||
|
||||
return frame + limit
|
||||
|
||||
def find_deps(targets):
|
||||
deps = set()
|
||||
for target in targets:
|
||||
if target in results:
|
||||
t_file, t_function, _, _ = results[target]
|
||||
deps.add((t_file, t_function))
|
||||
return deps
|
||||
|
||||
# flatten into a list
|
||||
flat_results = []
|
||||
for source, (s_file, s_function, frame, targets) in results.items():
|
||||
limit = find_limit(source)
|
||||
deps = find_deps(targets)
|
||||
flat_results.append((s_file, s_function, frame, limit, deps))
|
||||
|
||||
return flat_results
|
||||
|
||||
def main(**args):
|
||||
def openio(path, mode='r'):
|
||||
if path == '-':
|
||||
if 'r' in mode:
|
||||
return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
|
||||
else:
|
||||
return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
|
||||
else:
|
||||
return open(path, mode)
|
||||
|
||||
# find sizes
|
||||
if not args.get('use', None):
|
||||
# find .ci files
|
||||
paths = []
|
||||
for path in args['ci_paths']:
|
||||
if os.path.isdir(path):
|
||||
path = path + '/*.ci'
|
||||
|
||||
for path in glob.glob(path):
|
||||
paths.append(path)
|
||||
|
||||
if not paths:
|
||||
print('no .ci files found in %r?' % args['ci_paths'])
|
||||
sys.exit(-1)
|
||||
|
||||
results = collect(paths, **args)
|
||||
else:
|
||||
with openio(args['use']) as f:
|
||||
r = csv.DictReader(f)
|
||||
results = [
|
||||
( result['file'],
|
||||
result['name'],
|
||||
int(result['stack_frame']),
|
||||
float(result['stack_limit']), # note limit can be inf
|
||||
set())
|
||||
for result in r
|
||||
if result.get('stack_frame') not in {None, ''}
|
||||
if result.get('stack_limit') not in {None, ''}]
|
||||
|
||||
total_frame = 0
|
||||
total_limit = 0
|
||||
for _, _, frame, limit, _ in results:
|
||||
total_frame += frame
|
||||
total_limit = max(total_limit, limit)
|
||||
|
||||
# find previous results?
|
||||
if args.get('diff'):
|
||||
try:
|
||||
with openio(args['diff']) as f:
|
||||
r = csv.DictReader(f)
|
||||
prev_results = [
|
||||
( result['file'],
|
||||
result['name'],
|
||||
int(result['stack_frame']),
|
||||
float(result['stack_limit']),
|
||||
set())
|
||||
for result in r
|
||||
if result.get('stack_frame') not in {None, ''}
|
||||
if result.get('stack_limit') not in {None, ''}]
|
||||
except FileNotFoundError:
|
||||
prev_results = []
|
||||
|
||||
prev_total_frame = 0
|
||||
prev_total_limit = 0
|
||||
for _, _, frame, limit, _ in prev_results:
|
||||
prev_total_frame += frame
|
||||
prev_total_limit = max(prev_total_limit, limit)
|
||||
|
||||
# write results to CSV
|
||||
if args.get('output'):
|
||||
merged_results = co.defaultdict(lambda: {})
|
||||
other_fields = []
|
||||
|
||||
# merge?
|
||||
if args.get('merge'):
|
||||
try:
|
||||
with openio(args['merge']) as f:
|
||||
r = csv.DictReader(f)
|
||||
for result in r:
|
||||
file = result.pop('file', '')
|
||||
func = result.pop('name', '')
|
||||
result.pop('stack_frame', None)
|
||||
result.pop('stack_limit', None)
|
||||
merged_results[(file, func)] = result
|
||||
other_fields = result.keys()
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
for file, func, frame, limit, _ in results:
|
||||
merged_results[(file, func)]['stack_frame'] = frame
|
||||
merged_results[(file, func)]['stack_limit'] = limit
|
||||
|
||||
with openio(args['output'], 'w') as f:
|
||||
w = csv.DictWriter(f, ['file', 'name', *other_fields, 'stack_frame', 'stack_limit'])
|
||||
w.writeheader()
|
||||
for (file, func), result in sorted(merged_results.items()):
|
||||
w.writerow({'file': file, 'name': func, **result})
|
||||
|
||||
# print results
|
||||
def dedup_entries(results, by='name'):
|
||||
entries = co.defaultdict(lambda: (0, 0, set()))
|
||||
for file, func, frame, limit, deps in results:
|
||||
entry = (file if by == 'file' else func)
|
||||
entry_frame, entry_limit, entry_deps = entries[entry]
|
||||
entries[entry] = (
|
||||
entry_frame + frame,
|
||||
max(entry_limit, limit),
|
||||
entry_deps | {file if by == 'file' else func
|
||||
for file, func in deps})
|
||||
return entries
|
||||
|
||||
def diff_entries(olds, news):
|
||||
diff = co.defaultdict(lambda: (None, None, None, None, 0, 0, 0, set()))
|
||||
for name, (new_frame, new_limit, deps) in news.items():
|
||||
diff[name] = (
|
||||
None, None,
|
||||
new_frame, new_limit,
|
||||
new_frame, new_limit,
|
||||
1.0,
|
||||
deps)
|
||||
for name, (old_frame, old_limit, _) in olds.items():
|
||||
_, _, new_frame, new_limit, _, _, _, deps = diff[name]
|
||||
diff[name] = (
|
||||
old_frame, old_limit,
|
||||
new_frame, new_limit,
|
||||
(new_frame or 0) - (old_frame or 0),
|
||||
0 if m.isinf(new_limit or 0) and m.isinf(old_limit or 0)
|
||||
else (new_limit or 0) - (old_limit or 0),
|
||||
0.0 if m.isinf(new_limit or 0) and m.isinf(old_limit or 0)
|
||||
else +float('inf') if m.isinf(new_limit or 0)
|
||||
else -float('inf') if m.isinf(old_limit or 0)
|
||||
else +0.0 if not old_limit and not new_limit
|
||||
else +1.0 if not old_limit
|
||||
else ((new_limit or 0) - (old_limit or 0))/(old_limit or 0),
|
||||
deps)
|
||||
return diff
|
||||
|
||||
def sorted_entries(entries):
|
||||
if args.get('limit_sort'):
|
||||
return sorted(entries, key=lambda x: (-x[1][1], x))
|
||||
elif args.get('reverse_limit_sort'):
|
||||
return sorted(entries, key=lambda x: (+x[1][1], x))
|
||||
elif args.get('frame_sort'):
|
||||
return sorted(entries, key=lambda x: (-x[1][0], x))
|
||||
elif args.get('reverse_frame_sort'):
|
||||
return sorted(entries, key=lambda x: (+x[1][0], x))
|
||||
else:
|
||||
return sorted(entries)
|
||||
|
||||
def sorted_diff_entries(entries):
|
||||
if args.get('limit_sort'):
|
||||
return sorted(entries, key=lambda x: (-(x[1][3] or 0), x))
|
||||
elif args.get('reverse_limit_sort'):
|
||||
return sorted(entries, key=lambda x: (+(x[1][3] or 0), x))
|
||||
elif args.get('frame_sort'):
|
||||
return sorted(entries, key=lambda x: (-(x[1][2] or 0), x))
|
||||
elif args.get('reverse_frame_sort'):
|
||||
return sorted(entries, key=lambda x: (+(x[1][2] or 0), x))
|
||||
else:
|
||||
return sorted(entries, key=lambda x: (-x[1][6], x))
|
||||
|
||||
def print_header(by=''):
|
||||
if not args.get('diff'):
|
||||
print('%-36s %7s %7s' % (by, 'frame', 'limit'))
|
||||
else:
|
||||
print('%-36s %15s %15s %15s' % (by, 'old', 'new', 'diff'))
|
||||
|
||||
def print_entry(name, frame, limit):
|
||||
print("%-36s %7d %7s" % (name,
|
||||
frame, '∞' if m.isinf(limit) else int(limit)))
|
||||
|
||||
def print_diff_entry(name,
|
||||
old_frame, old_limit,
|
||||
new_frame, new_limit,
|
||||
diff_frame, diff_limit,
|
||||
ratio):
|
||||
print('%-36s %7s %7s %7s %7s %+7d %7s%s' % (name,
|
||||
old_frame if old_frame is not None else "-",
|
||||
('∞' if m.isinf(old_limit) else int(old_limit))
|
||||
if old_limit is not None else "-",
|
||||
new_frame if new_frame is not None else "-",
|
||||
('∞' if m.isinf(new_limit) else int(new_limit))
|
||||
if new_limit is not None else "-",
|
||||
diff_frame,
|
||||
('+∞' if diff_limit > 0 and m.isinf(diff_limit)
|
||||
else '-∞' if diff_limit < 0 and m.isinf(diff_limit)
|
||||
else '%+d' % diff_limit),
|
||||
'' if not ratio
|
||||
else ' (+∞%)' if ratio > 0 and m.isinf(ratio)
|
||||
else ' (-∞%)' if ratio < 0 and m.isinf(ratio)
|
||||
else ' (%+.1f%%)' % (100*ratio)))
|
||||
|
||||
def print_entries(by='name'):
|
||||
# build optional tree of dependencies
|
||||
def print_deps(entries, depth, print,
|
||||
filter=lambda _: True,
|
||||
prefixes=('', '', '', '')):
|
||||
entries = entries if isinstance(entries, list) else list(entries)
|
||||
filtered_entries = [(name, entry)
|
||||
for name, entry in entries
|
||||
if filter(name)]
|
||||
for i, (name, entry) in enumerate(filtered_entries):
|
||||
last = (i == len(filtered_entries)-1)
|
||||
print(prefixes[0+last] + name, entry)
|
||||
|
||||
if depth > 0:
|
||||
deps = entry[-1]
|
||||
print_deps(entries, depth-1, print,
|
||||
lambda name: name in deps,
|
||||
( prefixes[2+last] + "|-> ",
|
||||
prefixes[2+last] + "'-> ",
|
||||
prefixes[2+last] + "| ",
|
||||
prefixes[2+last] + " "))
|
||||
|
||||
entries = dedup_entries(results, by=by)
|
||||
|
||||
if not args.get('diff'):
|
||||
print_header(by=by)
|
||||
print_deps(
|
||||
sorted_entries(entries.items()),
|
||||
args.get('depth') or 0,
|
||||
lambda name, entry: print_entry(name, *entry[:-1]))
|
||||
else:
|
||||
prev_entries = dedup_entries(prev_results, by=by)
|
||||
diff = diff_entries(prev_entries, entries)
|
||||
|
||||
print_header(by='%s (%d added, %d removed)' % (by,
|
||||
sum(1 for _, old, _, _, _, _, _, _ in diff.values() if old is None),
|
||||
sum(1 for _, _, _, new, _, _, _, _ in diff.values() if new is None)))
|
||||
print_deps(
|
||||
filter(
|
||||
lambda x: x[1][6] or args.get('all'),
|
||||
sorted_diff_entries(diff.items())),
|
||||
args.get('depth') or 0,
|
||||
lambda name, entry: print_diff_entry(name, *entry[:-1]))
|
||||
|
||||
def print_totals():
|
||||
if not args.get('diff'):
|
||||
print_entry('TOTAL', total_frame, total_limit)
|
||||
else:
|
||||
diff_frame = total_frame - prev_total_frame
|
||||
diff_limit = (
|
||||
0 if m.isinf(total_limit or 0) and m.isinf(prev_total_limit or 0)
|
||||
else (total_limit or 0) - (prev_total_limit or 0))
|
||||
ratio = (
|
||||
0.0 if m.isinf(total_limit or 0) and m.isinf(prev_total_limit or 0)
|
||||
else +float('inf') if m.isinf(total_limit or 0)
|
||||
else -float('inf') if m.isinf(prev_total_limit or 0)
|
||||
else 0.0 if not prev_total_limit and not total_limit
|
||||
else 1.0 if not prev_total_limit
|
||||
else ((total_limit or 0) - (prev_total_limit or 0))/(prev_total_limit or 0))
|
||||
print_diff_entry('TOTAL',
|
||||
prev_total_frame, prev_total_limit,
|
||||
total_frame, total_limit,
|
||||
diff_frame, diff_limit,
|
||||
ratio)
|
||||
|
||||
if args.get('quiet'):
|
||||
pass
|
||||
elif args.get('summary'):
|
||||
print_header()
|
||||
print_totals()
|
||||
elif args.get('files'):
|
||||
print_entries(by='file')
|
||||
print_totals()
|
||||
else:
|
||||
print_entries(by='name')
|
||||
print_totals()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Find stack usage at the function level.")
|
||||
parser.add_argument('ci_paths', nargs='*', default=CI_PATHS,
|
||||
help="Description of where to find *.ci files. May be a directory \
|
||||
or a list of paths. Defaults to %r." % CI_PATHS)
|
||||
parser.add_argument('-v', '--verbose', action='store_true',
|
||||
help="Output commands that run behind the scenes.")
|
||||
parser.add_argument('-q', '--quiet', action='store_true',
|
||||
help="Don't show anything, useful with -o.")
|
||||
parser.add_argument('-o', '--output',
|
||||
help="Specify CSV file to store results.")
|
||||
parser.add_argument('-u', '--use',
|
||||
help="Don't parse callgraph files, instead use this CSV file.")
|
||||
parser.add_argument('-d', '--diff',
|
||||
help="Specify CSV file to diff against.")
|
||||
parser.add_argument('-m', '--merge',
|
||||
help="Merge with an existing CSV file when writing to output.")
|
||||
parser.add_argument('-a', '--all', action='store_true',
|
||||
help="Show all functions, not just the ones that changed.")
|
||||
parser.add_argument('-A', '--everything', action='store_true',
|
||||
help="Include builtin and libc specific symbols.")
|
||||
parser.add_argument('-s', '--limit-sort', action='store_true',
|
||||
help="Sort by stack limit.")
|
||||
parser.add_argument('-S', '--reverse-limit-sort', action='store_true',
|
||||
help="Sort by stack limit, but backwards.")
|
||||
parser.add_argument('--frame-sort', action='store_true',
|
||||
help="Sort by stack frame size.")
|
||||
parser.add_argument('--reverse-frame-sort', action='store_true',
|
||||
help="Sort by stack frame size, but backwards.")
|
||||
parser.add_argument('-L', '--depth', default=0, type=lambda x: int(x, 0),
|
||||
nargs='?', const=float('inf'),
|
||||
help="Depth of dependencies to show.")
|
||||
parser.add_argument('-F', '--files', action='store_true',
|
||||
help="Show file-level calls.")
|
||||
parser.add_argument('-Y', '--summary', action='store_true',
|
||||
help="Only show the total stack size.")
|
||||
parser.add_argument('--build-dir',
|
||||
help="Specify the relative build directory. Used to map object files \
|
||||
to the correct source files.")
|
||||
sys.exit(main(**vars(parser.parse_args())))
|
331
scripts/structs.py
Normal file
331
scripts/structs.py
Normal file
@ -0,0 +1,331 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Script to find struct sizes.
|
||||
#
|
||||
|
||||
import os
|
||||
import glob
|
||||
import itertools as it
|
||||
import subprocess as sp
|
||||
import shlex
|
||||
import re
|
||||
import csv
|
||||
import collections as co
|
||||
|
||||
|
||||
OBJ_PATHS = ['*.o']
|
||||
|
||||
def collect(paths, **args):
|
||||
decl_pattern = re.compile(
|
||||
'^\s+(?P<no>[0-9]+)'
|
||||
'\s+(?P<dir>[0-9]+)'
|
||||
'\s+.*'
|
||||
'\s+(?P<file>[^\s]+)$')
|
||||
struct_pattern = re.compile(
|
||||
'^(?:.*DW_TAG_(?P<tag>[a-z_]+).*'
|
||||
'|^.*DW_AT_name.*:\s*(?P<name>[^:\s]+)\s*'
|
||||
'|^.*DW_AT_decl_file.*:\s*(?P<decl>[0-9]+)\s*'
|
||||
'|^.*DW_AT_byte_size.*:\s*(?P<size>[0-9]+)\s*)$')
|
||||
|
||||
results = co.defaultdict(lambda: 0)
|
||||
for path in paths:
|
||||
# find decl, we want to filter by structs in .h files
|
||||
decls = {}
|
||||
# note objdump-tool may contain extra args
|
||||
cmd = args['objdump_tool'] + ['--dwarf=rawline', path]
|
||||
if args.get('verbose'):
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
proc = sp.Popen(cmd,
|
||||
stdout=sp.PIPE,
|
||||
stderr=sp.PIPE if not args.get('verbose') else None,
|
||||
universal_newlines=True,
|
||||
errors='replace')
|
||||
for line in proc.stdout:
|
||||
# find file numbers
|
||||
m = decl_pattern.match(line)
|
||||
if m:
|
||||
decls[int(m.group('no'))] = m.group('file')
|
||||
proc.wait()
|
||||
if proc.returncode != 0:
|
||||
if not args.get('verbose'):
|
||||
for line in proc.stderr:
|
||||
sys.stdout.write(line)
|
||||
sys.exit(-1)
|
||||
|
||||
# collect structs as we parse dwarf info
|
||||
found = False
|
||||
name = None
|
||||
decl = None
|
||||
size = None
|
||||
|
||||
# note objdump-tool may contain extra args
|
||||
cmd = args['objdump_tool'] + ['--dwarf=info', path]
|
||||
if args.get('verbose'):
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
proc = sp.Popen(cmd,
|
||||
stdout=sp.PIPE,
|
||||
stderr=sp.PIPE if not args.get('verbose') else None,
|
||||
universal_newlines=True,
|
||||
errors='replace')
|
||||
for line in proc.stdout:
|
||||
# state machine here to find structs
|
||||
m = struct_pattern.match(line)
|
||||
if m:
|
||||
if m.group('tag'):
|
||||
if (name is not None
|
||||
and decl is not None
|
||||
and size is not None):
|
||||
decl = decls.get(decl, '?')
|
||||
results[(decl, name)] = size
|
||||
found = (m.group('tag') == 'structure_type')
|
||||
name = None
|
||||
decl = None
|
||||
size = None
|
||||
elif found and m.group('name'):
|
||||
name = m.group('name')
|
||||
elif found and name and m.group('decl'):
|
||||
decl = int(m.group('decl'))
|
||||
elif found and name and m.group('size'):
|
||||
size = int(m.group('size'))
|
||||
proc.wait()
|
||||
if proc.returncode != 0:
|
||||
if not args.get('verbose'):
|
||||
for line in proc.stderr:
|
||||
sys.stdout.write(line)
|
||||
sys.exit(-1)
|
||||
|
||||
flat_results = []
|
||||
for (file, struct), size in results.items():
|
||||
# map to source files
|
||||
if args.get('build_dir'):
|
||||
file = re.sub('%s/*' % re.escape(args['build_dir']), '', file)
|
||||
# only include structs declared in header files in the current
|
||||
# directory, ignore internal-only # structs (these are represented
|
||||
# in other measurements)
|
||||
if not args.get('everything'):
|
||||
if not file.endswith('.h'):
|
||||
continue
|
||||
# replace .o with .c, different scripts report .o/.c, we need to
|
||||
# choose one if we want to deduplicate csv files
|
||||
file = re.sub('\.o$', '.c', file)
|
||||
|
||||
flat_results.append((file, struct, size))
|
||||
|
||||
return flat_results
|
||||
|
||||
|
||||
def main(**args):
|
||||
def openio(path, mode='r'):
|
||||
if path == '-':
|
||||
if 'r' in mode:
|
||||
return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
|
||||
else:
|
||||
return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
|
||||
else:
|
||||
return open(path, mode)
|
||||
|
||||
# find sizes
|
||||
if not args.get('use', None):
|
||||
# find .o files
|
||||
paths = []
|
||||
for path in args['obj_paths']:
|
||||
if os.path.isdir(path):
|
||||
path = path + '/*.o'
|
||||
|
||||
for path in glob.glob(path):
|
||||
paths.append(path)
|
||||
|
||||
if not paths:
|
||||
print('no .obj files found in %r?' % args['obj_paths'])
|
||||
sys.exit(-1)
|
||||
|
||||
results = collect(paths, **args)
|
||||
else:
|
||||
with openio(args['use']) as f:
|
||||
r = csv.DictReader(f)
|
||||
results = [
|
||||
( result['file'],
|
||||
result['name'],
|
||||
int(result['struct_size']))
|
||||
for result in r
|
||||
if result.get('struct_size') not in {None, ''}]
|
||||
|
||||
total = 0
|
||||
for _, _, size in results:
|
||||
total += size
|
||||
|
||||
# find previous results?
|
||||
if args.get('diff'):
|
||||
try:
|
||||
with openio(args['diff']) as f:
|
||||
r = csv.DictReader(f)
|
||||
prev_results = [
|
||||
( result['file'],
|
||||
result['name'],
|
||||
int(result['struct_size']))
|
||||
for result in r
|
||||
if result.get('struct_size') not in {None, ''}]
|
||||
except FileNotFoundError:
|
||||
prev_results = []
|
||||
|
||||
prev_total = 0
|
||||
for _, _, size in prev_results:
|
||||
prev_total += size
|
||||
|
||||
# write results to CSV
|
||||
if args.get('output'):
|
||||
merged_results = co.defaultdict(lambda: {})
|
||||
other_fields = []
|
||||
|
||||
# merge?
|
||||
if args.get('merge'):
|
||||
try:
|
||||
with openio(args['merge']) as f:
|
||||
r = csv.DictReader(f)
|
||||
for result in r:
|
||||
file = result.pop('file', '')
|
||||
struct = result.pop('name', '')
|
||||
result.pop('struct_size', None)
|
||||
merged_results[(file, struct)] = result
|
||||
other_fields = result.keys()
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
for file, struct, size in results:
|
||||
merged_results[(file, struct)]['struct_size'] = size
|
||||
|
||||
with openio(args['output'], 'w') as f:
|
||||
w = csv.DictWriter(f, ['file', 'name', *other_fields, 'struct_size'])
|
||||
w.writeheader()
|
||||
for (file, struct), result in sorted(merged_results.items()):
|
||||
w.writerow({'file': file, 'name': struct, **result})
|
||||
|
||||
# print results
|
||||
def dedup_entries(results, by='name'):
|
||||
entries = co.defaultdict(lambda: 0)
|
||||
for file, struct, size in results:
|
||||
entry = (file if by == 'file' else struct)
|
||||
entries[entry] += size
|
||||
return entries
|
||||
|
||||
def diff_entries(olds, news):
|
||||
diff = co.defaultdict(lambda: (0, 0, 0, 0))
|
||||
for name, new in news.items():
|
||||
diff[name] = (0, new, new, 1.0)
|
||||
for name, old in olds.items():
|
||||
_, new, _, _ = diff[name]
|
||||
diff[name] = (old, new, new-old, (new-old)/old if old else 1.0)
|
||||
return diff
|
||||
|
||||
def sorted_entries(entries):
|
||||
if args.get('size_sort'):
|
||||
return sorted(entries, key=lambda x: (-x[1], x))
|
||||
elif args.get('reverse_size_sort'):
|
||||
return sorted(entries, key=lambda x: (+x[1], x))
|
||||
else:
|
||||
return sorted(entries)
|
||||
|
||||
def sorted_diff_entries(entries):
|
||||
if args.get('size_sort'):
|
||||
return sorted(entries, key=lambda x: (-x[1][1], x))
|
||||
elif args.get('reverse_size_sort'):
|
||||
return sorted(entries, key=lambda x: (+x[1][1], x))
|
||||
else:
|
||||
return sorted(entries, key=lambda x: (-x[1][3], x))
|
||||
|
||||
def print_header(by=''):
|
||||
if not args.get('diff'):
|
||||
print('%-36s %7s' % (by, 'size'))
|
||||
else:
|
||||
print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff'))
|
||||
|
||||
def print_entry(name, size):
|
||||
print("%-36s %7d" % (name, size))
|
||||
|
||||
def print_diff_entry(name, old, new, diff, ratio):
|
||||
print("%-36s %7s %7s %+7d%s" % (name,
|
||||
old or "-",
|
||||
new or "-",
|
||||
diff,
|
||||
' (%+.1f%%)' % (100*ratio) if ratio else ''))
|
||||
|
||||
def print_entries(by='name'):
|
||||
entries = dedup_entries(results, by=by)
|
||||
|
||||
if not args.get('diff'):
|
||||
print_header(by=by)
|
||||
for name, size in sorted_entries(entries.items()):
|
||||
print_entry(name, size)
|
||||
else:
|
||||
prev_entries = dedup_entries(prev_results, by=by)
|
||||
diff = diff_entries(prev_entries, entries)
|
||||
print_header(by='%s (%d added, %d removed)' % (by,
|
||||
sum(1 for old, _, _, _ in diff.values() if not old),
|
||||
sum(1 for _, new, _, _ in diff.values() if not new)))
|
||||
for name, (old, new, diff, ratio) in sorted_diff_entries(
|
||||
diff.items()):
|
||||
if ratio or args.get('all'):
|
||||
print_diff_entry(name, old, new, diff, ratio)
|
||||
|
||||
def print_totals():
|
||||
if not args.get('diff'):
|
||||
print_entry('TOTAL', total)
|
||||
else:
|
||||
ratio = (0.0 if not prev_total and not total
|
||||
else 1.0 if not prev_total
|
||||
else (total-prev_total)/prev_total)
|
||||
print_diff_entry('TOTAL',
|
||||
prev_total, total,
|
||||
total-prev_total,
|
||||
ratio)
|
||||
|
||||
if args.get('quiet'):
|
||||
pass
|
||||
elif args.get('summary'):
|
||||
print_header()
|
||||
print_totals()
|
||||
elif args.get('files'):
|
||||
print_entries(by='file')
|
||||
print_totals()
|
||||
else:
|
||||
print_entries(by='name')
|
||||
print_totals()
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Find struct sizes.")
|
||||
parser.add_argument('obj_paths', nargs='*', default=OBJ_PATHS,
|
||||
help="Description of where to find *.o files. May be a directory \
|
||||
or a list of paths. Defaults to %r." % OBJ_PATHS)
|
||||
parser.add_argument('-v', '--verbose', action='store_true',
|
||||
help="Output commands that run behind the scenes.")
|
||||
parser.add_argument('-q', '--quiet', action='store_true',
|
||||
help="Don't show anything, useful with -o.")
|
||||
parser.add_argument('-o', '--output',
|
||||
help="Specify CSV file to store results.")
|
||||
parser.add_argument('-u', '--use',
|
||||
help="Don't compile and find struct sizes, instead use this CSV file.")
|
||||
parser.add_argument('-d', '--diff',
|
||||
help="Specify CSV file to diff struct size against.")
|
||||
parser.add_argument('-m', '--merge',
|
||||
help="Merge with an existing CSV file when writing to output.")
|
||||
parser.add_argument('-a', '--all', action='store_true',
|
||||
help="Show all functions, not just the ones that changed.")
|
||||
parser.add_argument('-A', '--everything', action='store_true',
|
||||
help="Include builtin and libc specific symbols.")
|
||||
parser.add_argument('-s', '--size-sort', action='store_true',
|
||||
help="Sort by size.")
|
||||
parser.add_argument('-S', '--reverse-size-sort', action='store_true',
|
||||
help="Sort by size, but backwards.")
|
||||
parser.add_argument('-F', '--files', action='store_true',
|
||||
help="Show file-level struct sizes.")
|
||||
parser.add_argument('-Y', '--summary', action='store_true',
|
||||
help="Only show the total struct size.")
|
||||
parser.add_argument('--objdump-tool', default=['objdump'], type=lambda x: x.split(),
|
||||
help="Path to the objdump tool to use.")
|
||||
parser.add_argument('--build-dir',
|
||||
help="Specify the relative build directory. Used to map object files \
|
||||
to the correct source files.")
|
||||
sys.exit(main(**vars(parser.parse_args())))
|
279
scripts/summary.py
Normal file
279
scripts/summary.py
Normal file
@ -0,0 +1,279 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Script to summarize the outputs of other scripts. Operates on CSV files.
|
||||
#
|
||||
|
||||
import functools as ft
|
||||
import collections as co
|
||||
import os
|
||||
import csv
|
||||
import re
|
||||
import math as m
|
||||
|
||||
# displayable fields
|
||||
Field = co.namedtuple('Field', 'name,parse,acc,key,fmt,repr,null,ratio')
|
||||
FIELDS = [
|
||||
# name, parse, accumulate, fmt, print, null
|
||||
Field('code',
|
||||
lambda r: int(r['code_size']),
|
||||
sum,
|
||||
lambda r: r,
|
||||
'%7s',
|
||||
lambda r: r,
|
||||
'-',
|
||||
lambda old, new: (new-old)/old),
|
||||
Field('data',
|
||||
lambda r: int(r['data_size']),
|
||||
sum,
|
||||
lambda r: r,
|
||||
'%7s',
|
||||
lambda r: r,
|
||||
'-',
|
||||
lambda old, new: (new-old)/old),
|
||||
Field('stack',
|
||||
lambda r: float(r['stack_limit']),
|
||||
max,
|
||||
lambda r: r,
|
||||
'%7s',
|
||||
lambda r: '∞' if m.isinf(r) else int(r),
|
||||
'-',
|
||||
lambda old, new: (new-old)/old),
|
||||
Field('structs',
|
||||
lambda r: int(r['struct_size']),
|
||||
sum,
|
||||
lambda r: r,
|
||||
'%8s',
|
||||
lambda r: r,
|
||||
'-',
|
||||
lambda old, new: (new-old)/old),
|
||||
Field('coverage',
|
||||
lambda r: (int(r['coverage_hits']), int(r['coverage_count'])),
|
||||
lambda rs: ft.reduce(lambda a, b: (a[0]+b[0], a[1]+b[1]), rs),
|
||||
lambda r: r[0]/r[1],
|
||||
'%19s',
|
||||
lambda r: '%11s %7s' % ('%d/%d' % (r[0], r[1]), '%.1f%%' % (100*r[0]/r[1])),
|
||||
'%11s %7s' % ('-', '-'),
|
||||
lambda old, new: ((new[0]/new[1]) - (old[0]/old[1])))
|
||||
]
|
||||
|
||||
|
||||
def main(**args):
|
||||
def openio(path, mode='r'):
|
||||
if path == '-':
|
||||
if 'r' in mode:
|
||||
return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
|
||||
else:
|
||||
return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
|
||||
else:
|
||||
return open(path, mode)
|
||||
|
||||
# find results
|
||||
results = co.defaultdict(lambda: {})
|
||||
for path in args.get('csv_paths', '-'):
|
||||
try:
|
||||
with openio(path) as f:
|
||||
r = csv.DictReader(f)
|
||||
for result in r:
|
||||
file = result.pop('file', '')
|
||||
name = result.pop('name', '')
|
||||
prev = results[(file, name)]
|
||||
for field in FIELDS:
|
||||
try:
|
||||
r = field.parse(result)
|
||||
if field.name in prev:
|
||||
results[(file, name)][field.name] = field.acc(
|
||||
[prev[field.name], r])
|
||||
else:
|
||||
results[(file, name)][field.name] = r
|
||||
except (KeyError, ValueError):
|
||||
pass
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
# find fields
|
||||
if args.get('all_fields'):
|
||||
fields = FIELDS
|
||||
elif args.get('fields') is not None:
|
||||
fields_dict = {field.name: field for field in FIELDS}
|
||||
fields = [fields_dict[f] for f in args['fields']]
|
||||
else:
|
||||
fields = []
|
||||
for field in FIELDS:
|
||||
if any(field.name in result for result in results.values()):
|
||||
fields.append(field)
|
||||
|
||||
# find total for every field
|
||||
total = {}
|
||||
for result in results.values():
|
||||
for field in fields:
|
||||
if field.name in result and field.name in total:
|
||||
total[field.name] = field.acc(
|
||||
[total[field.name], result[field.name]])
|
||||
elif field.name in result:
|
||||
total[field.name] = result[field.name]
|
||||
|
||||
# find previous results?
|
||||
if args.get('diff'):
|
||||
prev_results = co.defaultdict(lambda: {})
|
||||
try:
|
||||
with openio(args['diff']) as f:
|
||||
r = csv.DictReader(f)
|
||||
for result in r:
|
||||
file = result.pop('file', '')
|
||||
name = result.pop('name', '')
|
||||
prev = prev_results[(file, name)]
|
||||
for field in FIELDS:
|
||||
try:
|
||||
r = field.parse(result)
|
||||
if field.name in prev:
|
||||
prev_results[(file, name)][field.name] = field.acc(
|
||||
[prev[field.name], r])
|
||||
else:
|
||||
prev_results[(file, name)][field.name] = r
|
||||
except (KeyError, ValueError):
|
||||
pass
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
prev_total = {}
|
||||
for result in prev_results.values():
|
||||
for field in fields:
|
||||
if field.name in result and field.name in prev_total:
|
||||
prev_total[field.name] = field.acc(
|
||||
[prev_total[field.name], result[field.name]])
|
||||
elif field.name in result:
|
||||
prev_total[field.name] = result[field.name]
|
||||
|
||||
# print results
|
||||
def dedup_entries(results, by='name'):
|
||||
entries = co.defaultdict(lambda: {})
|
||||
for (file, func), result in results.items():
|
||||
entry = (file if by == 'file' else func)
|
||||
prev = entries[entry]
|
||||
for field in fields:
|
||||
if field.name in result and field.name in prev:
|
||||
entries[entry][field.name] = field.acc(
|
||||
[prev[field.name], result[field.name]])
|
||||
elif field.name in result:
|
||||
entries[entry][field.name] = result[field.name]
|
||||
return entries
|
||||
|
||||
def sorted_entries(entries):
|
||||
if args.get('sort') is not None:
|
||||
field = {field.name: field for field in FIELDS}[args['sort']]
|
||||
return sorted(entries, key=lambda x: (
|
||||
-(field.key(x[1][field.name])) if field.name in x[1] else -1, x))
|
||||
elif args.get('reverse_sort') is not None:
|
||||
field = {field.name: field for field in FIELDS}[args['reverse_sort']]
|
||||
return sorted(entries, key=lambda x: (
|
||||
+(field.key(x[1][field.name])) if field.name in x[1] else -1, x))
|
||||
else:
|
||||
return sorted(entries)
|
||||
|
||||
def print_header(by=''):
|
||||
if not args.get('diff'):
|
||||
print('%-36s' % by, end='')
|
||||
for field in fields:
|
||||
print((' '+field.fmt) % field.name, end='')
|
||||
print()
|
||||
else:
|
||||
print('%-36s' % by, end='')
|
||||
for field in fields:
|
||||
print((' '+field.fmt) % field.name, end='')
|
||||
print(' %-9s' % '', end='')
|
||||
print()
|
||||
|
||||
def print_entry(name, result):
|
||||
print('%-36s' % name, end='')
|
||||
for field in fields:
|
||||
r = result.get(field.name)
|
||||
if r is not None:
|
||||
print((' '+field.fmt) % field.repr(r), end='')
|
||||
else:
|
||||
print((' '+field.fmt) % '-', end='')
|
||||
print()
|
||||
|
||||
def print_diff_entry(name, old, new):
|
||||
print('%-36s' % name, end='')
|
||||
for field in fields:
|
||||
n = new.get(field.name)
|
||||
if n is not None:
|
||||
print((' '+field.fmt) % field.repr(n), end='')
|
||||
else:
|
||||
print((' '+field.fmt) % '-', end='')
|
||||
o = old.get(field.name)
|
||||
ratio = (
|
||||
0.0 if m.isinf(o or 0) and m.isinf(n or 0)
|
||||
else +float('inf') if m.isinf(n or 0)
|
||||
else -float('inf') if m.isinf(o or 0)
|
||||
else 0.0 if not o and not n
|
||||
else +1.0 if not o
|
||||
else -1.0 if not n
|
||||
else field.ratio(o, n))
|
||||
print(' %-9s' % (
|
||||
'' if not ratio
|
||||
else '(+∞%)' if ratio > 0 and m.isinf(ratio)
|
||||
else '(-∞%)' if ratio < 0 and m.isinf(ratio)
|
||||
else '(%+.1f%%)' % (100*ratio)), end='')
|
||||
print()
|
||||
|
||||
def print_entries(by='name'):
|
||||
entries = dedup_entries(results, by=by)
|
||||
|
||||
if not args.get('diff'):
|
||||
print_header(by=by)
|
||||
for name, result in sorted_entries(entries.items()):
|
||||
print_entry(name, result)
|
||||
else:
|
||||
prev_entries = dedup_entries(prev_results, by=by)
|
||||
print_header(by='%s (%d added, %d removed)' % (by,
|
||||
sum(1 for name in entries if name not in prev_entries),
|
||||
sum(1 for name in prev_entries if name not in entries)))
|
||||
for name, result in sorted_entries(entries.items()):
|
||||
if args.get('all') or result != prev_entries.get(name, {}):
|
||||
print_diff_entry(name, prev_entries.get(name, {}), result)
|
||||
|
||||
def print_totals():
|
||||
if not args.get('diff'):
|
||||
print_entry('TOTAL', total)
|
||||
else:
|
||||
print_diff_entry('TOTAL', prev_total, total)
|
||||
|
||||
if args.get('summary'):
|
||||
print_header()
|
||||
print_totals()
|
||||
elif args.get('files'):
|
||||
print_entries(by='file')
|
||||
print_totals()
|
||||
else:
|
||||
print_entries(by='name')
|
||||
print_totals()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Summarize measurements")
|
||||
parser.add_argument('csv_paths', nargs='*', default='-',
|
||||
help="Description of where to find *.csv files. May be a directory \
|
||||
or list of paths. *.csv files will be merged to show the total \
|
||||
coverage.")
|
||||
parser.add_argument('-d', '--diff',
|
||||
help="Specify CSV file to diff against.")
|
||||
parser.add_argument('-a', '--all', action='store_true',
|
||||
help="Show all objects, not just the ones that changed.")
|
||||
parser.add_argument('-e', '--all-fields', action='store_true',
|
||||
help="Show all fields, even those with no results.")
|
||||
parser.add_argument('-f', '--fields', type=lambda x: re.split('\s*,\s*', x),
|
||||
help="Comma separated list of fields to print, by default all fields \
|
||||
that are found in the CSV files are printed.")
|
||||
parser.add_argument('-s', '--sort',
|
||||
help="Sort by this field.")
|
||||
parser.add_argument('-S', '--reverse-sort',
|
||||
help="Sort by this field, but backwards.")
|
||||
parser.add_argument('-F', '--files', action='store_true',
|
||||
help="Show file-level calls.")
|
||||
parser.add_argument('-Y', '--summary', action='store_true',
|
||||
help="Only show the totals.")
|
||||
sys.exit(main(**vars(parser.parse_args())))
|
@ -292,6 +292,8 @@ class TestCase:
|
||||
if e.errno == errno.EIO:
|
||||
break
|
||||
raise
|
||||
if not line:
|
||||
break;
|
||||
stdout.append(line)
|
||||
if args.get('verbose'):
|
||||
sys.stdout.write(line)
|
||||
@ -563,7 +565,7 @@ class TestSuite:
|
||||
path=self.path))
|
||||
mk.write('\n')
|
||||
|
||||
# add truely global defines globally
|
||||
# add truly global defines globally
|
||||
for k, v in sorted(self.defines.items()):
|
||||
mk.write('%s.test: override CFLAGS += -D%s=%r\n'
|
||||
% (self.path, k, v))
|
||||
@ -654,7 +656,7 @@ def main(**args):
|
||||
for path in glob.glob(testpath):
|
||||
suites.append(TestSuite(path, classes, defines, filter, **args))
|
||||
|
||||
# sort for reproducability
|
||||
# sort for reproducibility
|
||||
suites = sorted(suites)
|
||||
|
||||
# generate permutations
|
||||
@ -687,6 +689,8 @@ def main(**args):
|
||||
if e.errno == errno.EIO:
|
||||
break
|
||||
raise
|
||||
if not line:
|
||||
break;
|
||||
stdout.append(line)
|
||||
if args.get('verbose'):
|
||||
sys.stdout.write(line)
|
||||
@ -780,10 +784,13 @@ def main(**args):
|
||||
stdout=sp.PIPE if not args.get('verbose') else None,
|
||||
stderr=sp.STDOUT if not args.get('verbose') else None,
|
||||
universal_newlines=True)
|
||||
stdout = []
|
||||
for line in proc.stdout:
|
||||
stdout.append(line)
|
||||
proc.wait()
|
||||
if proc.returncode != 0:
|
||||
if not args.get('verbose'):
|
||||
for line in proc.stdout:
|
||||
for line in stdout:
|
||||
sys.stdout.write(line)
|
||||
sys.exit(-1)
|
||||
|
||||
@ -799,9 +806,9 @@ def main(**args):
|
||||
failure.case.test(failure=failure, **args)
|
||||
sys.exit(0)
|
||||
|
||||
print('tests passed %d/%d (%.2f%%)' % (passed, total,
|
||||
print('tests passed %d/%d (%.1f%%)' % (passed, total,
|
||||
100*(passed/total if total else 1.0)))
|
||||
print('tests failed %d/%d (%.2f%%)' % (failed, total,
|
||||
print('tests failed %d/%d (%.1f%%)' % (failed, total,
|
||||
100*(failed/total if total else 1.0)))
|
||||
return 1 if failed > 0 else 0
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user