Implement io.cache.auto to cache all reads for slow io backends

This commit is contained in:
pancake 2017-12-18 01:44:22 +01:00
parent 019792d7ec
commit 6f0de4913e
7 changed files with 48 additions and 16 deletions

View File

@ -1427,6 +1427,17 @@ static int cb_iobuffer(void *user, void *data) {
return true;
}
static int cb_io_cache_mode(void *user, void *data) {
RCore *core = (RCore *)user;
RConfigNode *node = (RConfigNode *)data;
if (node->i_value) {
core->io->cachemode = true;
} else {
core->io->cachemode = false;
}
return true;
}
static int cb_io_cache_read(void *user, void *data) {
RCore *core = (RCore *)user;
RConfigNode *node = (RConfigNode *)data;
@ -2739,6 +2750,7 @@ R_API int r_core_config_init(RCore *core) {
SETI ("io.buffer.from", 0, "Lower address of buffered cache");
SETI ("io.buffer.to", 0, "Higher address of buffered cache");
SETCB ("io.cache", "false", &cb_io_cache, "Change both of io.cache.{read,write}");
SETCB ("io.cache.auto", "false", &cb_io_cache_mode, "Automatic cache all reads in the IO backend");
SETCB ("io.cache.read", "false", &cb_io_cache_read, "Enable read cache for vaddr (or paddr when io.va=0)");
SETCB ("io.cache.write", "false", &cb_io_cache_write, "Enable write cache for vaddr (or paddr when io.va=0)");
SETCB ("io.pcache", "false", &cb_iopcache, "io.cache for p-level");

View File

@ -1805,6 +1805,8 @@ R_API bool r_core_init(RCore *core) {
core->offset = 0LL;
r_core_cmd_init (core);
core->dbg = r_debug_new (true);
r_io_bind (core->io, &(core->dbg->iob));
r_io_bind (core->io, &(core->dbg->bp->iob));
r_core_bind (core, &core->dbg->corebind);
core->dbg->anal = core->anal; // XXX: dupped instance.. can cause lost pointerz
//r_debug_use (core->dbg, "native");
@ -1814,7 +1816,6 @@ R_API bool r_core_init(RCore *core) {
core->io->cb_printf = r_cons_printf;
core->dbg->cb_printf = r_cons_printf;
core->dbg->bp->cb_printf = r_cons_printf;
r_debug_io_bind (core->dbg, core->io);
r_core_config_init (core);

View File

@ -856,11 +856,6 @@ R_API int r_debug_step(RDebug *dbg, int steps) {
return steps_taken;
}
R_API void r_debug_io_bind(RDebug *dbg, RIO *io) {
r_io_bind (io, &dbg->bp->iob);
r_io_bind (io, &dbg->iob);
}
R_API int r_debug_step_over(RDebug *dbg, int steps) {
RAnalOp op;
ut64 buf_pc, pc, ins_size;

View File

@ -506,7 +506,6 @@ R_API int r_debug_reg_set(RDebug *dbg, const char *name, ut64 num);
R_API ut64 r_debug_reg_get(RDebug *dbg, const char *name);
R_API ut64 r_debug_reg_get_err(RDebug *dbg, const char *name, int *err, utX *value);
R_API void r_debug_io_bind(RDebug *dbg, RIO *io);
R_API ut64 r_debug_execute(RDebug *dbg, const ut8 *buf, int len, int restore);
R_API int r_debug_map_sync(RDebug *dbg);

View File

@ -67,6 +67,7 @@ typedef struct r_io_t {
int aslr;
int autofd;
int cached;
bool cachemode; // write in cache all the read operations (EXPERIMENTAL)
int p_cache;
int buffer_enabled;
int debug;
@ -409,6 +410,7 @@ R_API bool r_io_desc_fini (RIO *io);
/* io/cache.c */
R_API int r_io_cache_invalidate(RIO *io, ut64 from, ut64 to);
R_API bool r_io_cache_at(RIO *io, ut64 addr);
R_API void r_io_cache_commit(RIO *io, ut64 from, ut64 to);
R_API void r_io_cache_init(RIO *io);
R_API int r_io_cache_list(RIO *io, int rad);

View File

@ -6,13 +6,25 @@
#include "r_io.h"
static void cache_item_free(RIOCache *cache) {
if (!cache)
if (!cache) {
return;
}
free (cache->data);
free (cache->odata);
free (cache);
}
R_API bool r_io_cache_at(RIO *io, ut64 addr) {
RListIter *iter;
RIOCache *c;
r_list_foreach (io->cache, iter, c) {
if (addr >= c->from && addr < c->to) {
return true;
}
}
return false;
}
R_API void r_io_cache_init(RIO *io) {
io->cache = r_list_newf ((RListFree)cache_item_free);
io->cached = 0;
@ -134,7 +146,12 @@ R_API bool r_io_cache_write(RIO *io, ut64 addr, const ut8 *buf, int len) {
return false;
}
ch->written = false;
r_io_read_at (io, addr, ch->odata, len);
{
bool cm = io->cachemode;
io->cachemode = false;
r_io_read_at (io, addr, ch->odata, len);
io->cachemode = cm;
}
memcpy (ch->data, buf, len);
r_list_append (io->cache, ch);
return true;

View File

@ -158,19 +158,26 @@ R_API int r_io_desc_write(RIODesc *desc, const ut8* buf, int len) {
return 0;
}
//returns length of read bytes
// returns length of read bytes
R_API int r_io_desc_read(RIODesc *desc, ut8 *buf, int len) {
ut64 seek;
int ret = -1;
//check pointers and permissions
// check pointers and permissions
if (!buf || !desc || !desc->plugin || len < 1 || !(desc->flags & R_IO_READ)) {
return 0;
}
seek = r_io_desc_seek (desc, 0LL, R_IO_SEEK_CUR);
if (desc->io->cachemode) {
if (seek != UT64_MAX && r_io_cache_at (desc->io, seek)) {
return r_io_cache_read (desc->io, seek, buf, len);
}
}
if (desc->plugin->read) {
ret = desc->plugin->read (desc->io, desc, buf, len);
}
if ((ret > 0) && desc->io && (desc->io->p_cache & 1)) {
if (ret > 0 && desc->io->cachemode) {
r_io_cache_write (desc->io, seek, buf, len);
} else if ((ret > 0) && desc->io && (desc->io->p_cache & 1)) {
ret = r_io_desc_cache_read (desc, seek, buf, ret);
}
return ret;
@ -184,13 +191,12 @@ R_API ut64 r_io_desc_seek(RIODesc* desc, ut64 offset, int whence) {
}
R_API ut64 r_io_desc_size(RIODesc* desc) {
ut64 off, ret;
if (!desc || !desc->plugin || !desc->plugin->lseek) {
return 0LL;
}
off = r_io_desc_seek (desc, 0LL, R_IO_SEEK_CUR);
ret = r_io_desc_seek (desc, 0LL, R_IO_SEEK_END);
//what to do if that seek fails?
ut64 off = r_io_desc_seek (desc, 0LL, R_IO_SEEK_CUR);
ut64 ret = r_io_desc_seek (desc, 0LL, R_IO_SEEK_END);
// what to do if that seek fails?
r_io_desc_seek (desc, off, R_IO_SEEK_SET);
return ret;
}