2018-01-10 06:43:23 +00:00
|
|
|
/* radare - LGPL - Copyright 2007-2018 - pancake */
|
2009-02-05 21:08:46 +00:00
|
|
|
|
2009-09-08 18:16:52 +00:00
|
|
|
#include <r_io.h>
|
|
|
|
|
2009-02-16 23:09:40 +00:00
|
|
|
#if 0
|
2009-09-08 18:16:52 +00:00
|
|
|
* TODO:
|
|
|
|
* - make path of indirections shortr (io->undo.foo is slow) */
|
2010-05-25 23:42:22 +00:00
|
|
|
* - Plugin changes in write and seeks
|
2009-09-08 18:16:52 +00:00
|
|
|
* - Per-fd history log
|
2009-02-16 23:09:40 +00:00
|
|
|
#endif
|
|
|
|
|
2011-11-16 09:06:34 +00:00
|
|
|
R_API int r_io_undo_init(RIO *io) {
|
2012-02-15 10:13:05 +00:00
|
|
|
/* seek undo */
|
|
|
|
r_io_sundo_reset (io);
|
|
|
|
|
|
|
|
/* write undo */
|
2009-09-08 18:16:52 +00:00
|
|
|
io->undo.w_init = 0;
|
2009-09-09 00:35:00 +00:00
|
|
|
io->undo.w_enable = 0;
|
|
|
|
io->undo.w_enable = 0;
|
2011-11-15 22:26:45 +00:00
|
|
|
io->undo.w_list = r_list_new ();
|
2012-02-15 10:13:05 +00:00
|
|
|
|
2015-09-14 00:08:31 +00:00
|
|
|
return true;
|
2009-02-16 23:09:40 +00:00
|
|
|
}
|
|
|
|
|
2011-11-16 09:06:34 +00:00
|
|
|
R_API void r_io_undo_enable(RIO *io, int s, int w) {
|
2009-09-09 00:35:00 +00:00
|
|
|
io->undo.s_enable = s;
|
|
|
|
io->undo.w_enable = w;
|
2009-02-16 23:09:40 +00:00
|
|
|
}
|
2009-02-05 21:08:46 +00:00
|
|
|
|
2012-02-15 10:13:05 +00:00
|
|
|
/* undo seekz */
|
2009-02-05 21:08:46 +00:00
|
|
|
|
2016-06-02 04:34:06 +00:00
|
|
|
R_API RIOUndos *r_io_sundo(RIO *io, ut64 offset) {
|
|
|
|
RIOUndos *undo;
|
2017-08-22 07:42:16 +00:00
|
|
|
RIOSection *sec;
|
2012-02-15 10:13:05 +00:00
|
|
|
|
2018-01-10 06:43:23 +00:00
|
|
|
if (!io->undo.s_enable || !io->undo.undos) {
|
2016-06-01 10:16:00 +00:00
|
|
|
return NULL;
|
2018-01-10 06:43:23 +00:00
|
|
|
}
|
2012-02-15 10:13:05 +00:00
|
|
|
|
|
|
|
/* No redos yet, store the current seek so we can redo to it. */
|
|
|
|
if (!io->undo.redos) {
|
2016-06-01 10:16:00 +00:00
|
|
|
undo = &io->undo.seek[io->undo.idx];
|
|
|
|
undo->off = offset;
|
|
|
|
undo->cursor = 0;
|
2011-12-04 20:09:16 +00:00
|
|
|
}
|
2012-02-15 10:13:05 +00:00
|
|
|
|
|
|
|
io->undo.idx = (io->undo.idx - 1 + R_IO_UNDOS) % R_IO_UNDOS;
|
|
|
|
io->undo.undos--;
|
|
|
|
io->undo.redos++;
|
|
|
|
|
2016-06-01 10:16:00 +00:00
|
|
|
undo = &io->undo.seek[io->undo.idx];
|
2017-08-22 07:42:16 +00:00
|
|
|
sec = r_io_section_vget (io, undo->off);
|
|
|
|
if (!sec || (sec->paddr == sec->vaddr)) {
|
|
|
|
io->off = undo->off;
|
|
|
|
} else {
|
|
|
|
io->off = undo->off - sec->vaddr + sec->paddr;
|
|
|
|
}
|
2016-06-01 10:16:00 +00:00
|
|
|
return undo;
|
2009-02-05 21:08:46 +00:00
|
|
|
}
|
|
|
|
|
2016-06-02 04:34:06 +00:00
|
|
|
R_API RIOUndos *r_io_sundo_redo(RIO *io) {
|
|
|
|
RIOUndos *undo;
|
2017-08-22 07:42:16 +00:00
|
|
|
RIOSection *sec;
|
2011-12-05 07:27:16 +00:00
|
|
|
|
2018-01-10 06:43:23 +00:00
|
|
|
if (!io->undo.s_enable || !io->undo.redos) {
|
2016-06-01 10:16:00 +00:00
|
|
|
return NULL;
|
2018-01-10 06:43:23 +00:00
|
|
|
}
|
2012-02-15 10:13:05 +00:00
|
|
|
|
|
|
|
io->undo.idx = (io->undo.idx + 1) % R_IO_UNDOS;
|
|
|
|
io->undo.undos++;
|
|
|
|
io->undo.redos--;
|
|
|
|
|
2016-06-01 10:16:00 +00:00
|
|
|
undo = &io->undo.seek[io->undo.idx];
|
2017-08-22 07:42:16 +00:00
|
|
|
sec = r_io_section_vget (io, undo->off);
|
|
|
|
if (!sec || (sec->paddr == sec->vaddr)) {
|
|
|
|
io->off = undo->off;
|
|
|
|
} else {
|
|
|
|
io->off = undo->off - sec->vaddr + sec->paddr;
|
|
|
|
}
|
2016-06-01 10:16:00 +00:00
|
|
|
return undo;
|
2009-02-05 21:08:46 +00:00
|
|
|
}
|
|
|
|
|
2016-06-01 10:16:00 +00:00
|
|
|
R_API void r_io_sundo_push(RIO *io, ut64 off, int cursor) {
|
2018-01-10 06:43:23 +00:00
|
|
|
if (!io->undo.s_enable) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
// the first insert
|
2016-06-01 10:16:00 +00:00
|
|
|
if (io->undo.idx > 0) {
|
2018-01-10 06:43:23 +00:00
|
|
|
RIOUndos *undo = &io->undo.seek[io->undo.idx - 1];
|
2016-06-01 10:16:00 +00:00
|
|
|
if (undo->off == off && undo->cursor == cursor) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
undo = &io->undo.seek[io->undo.idx];
|
|
|
|
undo->off = off;
|
|
|
|
undo->cursor = cursor;
|
2012-02-15 10:13:05 +00:00
|
|
|
io->undo.idx = (io->undo.idx + 1) % R_IO_UNDOS;
|
|
|
|
/* Only R_IO_UNDOS - 1 undos can be used because r_io_sundo_undo () must
|
|
|
|
* push the current position for redo as well, which takes one entry in
|
|
|
|
* the table. */
|
|
|
|
if (io->undo.undos < R_IO_UNDOS - 1)
|
|
|
|
io->undo.undos++;
|
|
|
|
/* We only have linear undo/redo, no tree. So after this new possible
|
|
|
|
* undo, all redos are lost. */
|
|
|
|
io->undo.redos = 0;
|
2009-02-05 21:08:46 +00:00
|
|
|
}
|
|
|
|
|
2011-11-16 09:06:34 +00:00
|
|
|
R_API void r_io_sundo_reset(RIO *io) {
|
2009-09-08 18:16:52 +00:00
|
|
|
io->undo.idx = 0;
|
2012-02-15 10:13:05 +00:00
|
|
|
io->undo.undos = 0;
|
|
|
|
io->undo.redos = 0;
|
2009-02-05 21:08:46 +00:00
|
|
|
}
|
|
|
|
|
2017-03-27 11:12:59 +00:00
|
|
|
R_API RList *r_io_sundo_list(RIO *io, int mode) {
|
2012-02-15 10:13:05 +00:00
|
|
|
int idx, undos, redos, i, j, start, end;
|
2017-03-27 11:42:52 +00:00
|
|
|
RList* list = NULL;
|
2012-02-15 10:13:05 +00:00
|
|
|
|
2017-03-27 11:12:59 +00:00
|
|
|
if (mode == '!') {
|
|
|
|
mode = 0;
|
|
|
|
}
|
2016-11-24 15:11:43 +00:00
|
|
|
if (!io->undo.s_enable) {
|
2017-03-27 11:12:59 +00:00
|
|
|
return NULL;
|
2016-11-24 15:11:43 +00:00
|
|
|
}
|
2012-02-15 10:13:05 +00:00
|
|
|
undos = io->undo.undos;
|
|
|
|
redos = io->undo.redos;
|
|
|
|
|
|
|
|
idx = io->undo.idx;
|
|
|
|
start = (idx - undos + R_IO_UNDOS) % R_IO_UNDOS;
|
2017-06-28 12:13:12 +00:00
|
|
|
end = (idx + redos + 1) % R_IO_UNDOS;
|
2012-02-15 10:13:05 +00:00
|
|
|
|
|
|
|
j = 0;
|
2016-05-09 08:39:19 +00:00
|
|
|
switch (mode) {
|
|
|
|
case 'j':
|
|
|
|
io->cb_printf ("[");
|
|
|
|
break;
|
2017-06-28 12:13:12 +00:00
|
|
|
case 0:
|
2017-03-27 11:12:59 +00:00
|
|
|
list = r_list_newf (free);
|
2017-06-28 12:13:12 +00:00
|
|
|
break;
|
2017-03-27 11:12:59 +00:00
|
|
|
}
|
2016-11-24 15:11:43 +00:00
|
|
|
const char *comma = "";
|
2016-05-09 08:39:19 +00:00
|
|
|
for (i = start; i < end || j == 0; i = (i + 1) % R_IO_UNDOS) {
|
2018-01-10 06:43:23 +00:00
|
|
|
int idx = (j < undos)? undos - j - 1: j - undos - 1;
|
2016-06-02 04:34:06 +00:00
|
|
|
RIOUndos *undo = &io->undo.seek[i];
|
2016-06-01 10:16:00 +00:00
|
|
|
ut64 addr = undo->off;
|
2017-06-28 12:13:12 +00:00
|
|
|
ut64 notLast = (j + 1 < undos) && (i != end - 1);
|
2016-05-09 08:39:19 +00:00
|
|
|
switch (mode) {
|
|
|
|
case '=':
|
|
|
|
if (j < undos) {
|
|
|
|
io->cb_printf ("0x%"PFMT64x"%s", addr, notLast? " > ": "");
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 'j':
|
|
|
|
if (j < undos) {
|
|
|
|
io->cb_printf ("%"PFMT64d"%s", addr, notLast? ",": "");
|
2016-11-24 15:11:43 +00:00
|
|
|
comma = ",";
|
2016-05-09 08:39:19 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case '*':
|
|
|
|
if (j < undos) {
|
|
|
|
io->cb_printf ("f undo_%d @ 0x%"PFMT64x"\n", idx, addr);
|
|
|
|
} else if (j == undos && j != 0 && redos != 0) {
|
|
|
|
io->cb_printf ("# Current undo/redo position.\n");
|
|
|
|
} else if (j != undos) {
|
|
|
|
io->cb_printf ("f redo_%d @ 0x%"PFMT64x"\n", idx, addr);
|
|
|
|
}
|
2017-03-27 11:42:52 +00:00
|
|
|
break;
|
|
|
|
case 0:
|
2017-03-27 11:12:59 +00:00
|
|
|
if (list) {
|
|
|
|
RIOUndos *u = R_NEW0 (RIOUndos);
|
2017-06-28 12:13:12 +00:00
|
|
|
if (u) {
|
|
|
|
memcpy (u, undo, sizeof (RIOUndos));
|
|
|
|
r_list_append (list, u);
|
|
|
|
}
|
2017-03-27 11:12:59 +00:00
|
|
|
}
|
2017-06-28 12:13:12 +00:00
|
|
|
break;
|
2012-02-15 10:13:05 +00:00
|
|
|
}
|
|
|
|
j++;
|
|
|
|
}
|
2016-05-09 08:39:19 +00:00
|
|
|
switch (mode) {
|
|
|
|
case '=':
|
|
|
|
io->cb_printf ("\n");
|
|
|
|
break;
|
|
|
|
case 'j':
|
2016-11-24 15:11:43 +00:00
|
|
|
io->cb_printf ("%s%"PFMT64d"]\n", comma, io->off);
|
2016-05-09 08:39:19 +00:00
|
|
|
break;
|
|
|
|
}
|
2017-03-27 11:12:59 +00:00
|
|
|
return list;
|
2009-02-05 21:08:46 +00:00
|
|
|
}
|
|
|
|
|
2009-09-09 00:35:00 +00:00
|
|
|
/* undo writez */
|
|
|
|
|
2011-11-16 09:06:34 +00:00
|
|
|
R_API void r_io_wundo_new(RIO *io, ut64 off, const ut8 *data, int len) {
|
2013-08-17 23:30:03 +00:00
|
|
|
RIOUndoWrite *uw;
|
2009-09-09 00:35:00 +00:00
|
|
|
if (!io->undo.w_enable)
|
2009-02-05 21:08:46 +00:00
|
|
|
return;
|
2009-09-09 00:35:00 +00:00
|
|
|
/* undo write changes */
|
2016-05-24 20:22:15 +00:00
|
|
|
uw = R_NEW0 (RIOUndoWrite);
|
2018-01-10 06:43:23 +00:00
|
|
|
if (!uw) {
|
|
|
|
return;
|
|
|
|
}
|
2015-09-14 00:08:31 +00:00
|
|
|
uw->set = true;
|
2009-02-05 21:08:46 +00:00
|
|
|
uw->off = off;
|
|
|
|
uw->len = len;
|
2011-11-16 09:06:34 +00:00
|
|
|
uw->n = (ut8*) malloc (len);
|
2016-05-24 20:22:15 +00:00
|
|
|
if (!uw->n) {
|
2018-01-10 06:43:23 +00:00
|
|
|
free (uw);
|
2016-05-24 20:22:15 +00:00
|
|
|
return;
|
|
|
|
}
|
2018-01-10 06:43:23 +00:00
|
|
|
memcpy (uw->n, data, len);
|
2011-11-16 09:06:34 +00:00
|
|
|
uw->o = (ut8*) malloc (len);
|
2016-05-24 20:22:15 +00:00
|
|
|
if (!uw->o) {
|
|
|
|
R_FREE (uw);
|
|
|
|
return;
|
|
|
|
}
|
2018-01-10 06:43:23 +00:00
|
|
|
memset (uw->o, 0xff, len);
|
|
|
|
r_io_read_at (io, off, uw->o, len);
|
2011-11-15 22:26:45 +00:00
|
|
|
r_list_append (io->undo.w_list, uw);
|
2009-02-05 21:08:46 +00:00
|
|
|
}
|
|
|
|
|
2011-11-16 09:06:34 +00:00
|
|
|
R_API void r_io_wundo_clear(RIO *io) {
|
2009-02-05 21:08:46 +00:00
|
|
|
// XXX memory leak
|
2011-11-15 22:26:45 +00:00
|
|
|
io->undo.w_list = r_list_new ();
|
2009-02-05 21:08:46 +00:00
|
|
|
}
|
|
|
|
|
2009-09-09 00:35:00 +00:00
|
|
|
// rename to r_io_undo_length ?
|
2011-11-16 09:06:34 +00:00
|
|
|
R_API int r_io_wundo_size(RIO *io) {
|
2017-08-19 17:17:09 +00:00
|
|
|
return r_list_length (io->undo.w_list);
|
2009-02-05 21:08:46 +00:00
|
|
|
}
|
|
|
|
|
2009-09-08 18:16:52 +00:00
|
|
|
// TODO: Deprecate or so? iterators must be language-wide, but helpers are useful
|
2011-11-16 09:06:34 +00:00
|
|
|
R_API void r_io_wundo_list(RIO *io) {
|
2009-02-05 21:08:46 +00:00
|
|
|
#define BW 8 /* byte wrap */
|
2011-11-15 22:26:45 +00:00
|
|
|
RListIter *iter;
|
2011-11-16 09:06:34 +00:00
|
|
|
RIOUndoWrite *u;
|
2009-02-05 21:08:46 +00:00
|
|
|
int i = 0, j, len;
|
|
|
|
|
2009-09-08 18:16:52 +00:00
|
|
|
if (io->undo.w_init)
|
2011-11-16 09:06:34 +00:00
|
|
|
r_list_foreach (io->undo.w_list, iter, u) {
|
2015-08-08 18:15:13 +00:00
|
|
|
io->cb_printf ("%02d %c %d %08"PFMT64x": ", i, u->set?'+':'-', u->len, u->off);
|
2009-02-05 21:08:46 +00:00
|
|
|
len = (u->len>BW)?BW:u->len;
|
2015-08-08 18:15:13 +00:00
|
|
|
for (j=0;j<len;j++) io->cb_printf ("%02x ", u->o[j]);
|
|
|
|
if (len == BW) io->cb_printf (".. ");
|
|
|
|
io->cb_printf ("=> ");
|
|
|
|
for (j=0;j<len;j++) io->cb_printf ("%02x ", u->n[j]);
|
|
|
|
if (len == BW) io->cb_printf (".. ");
|
|
|
|
io->cb_printf ("\n");
|
2009-02-05 21:08:46 +00:00
|
|
|
i++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-10 06:43:23 +00:00
|
|
|
R_API int r_io_wundo_apply(RIO *io, RIOUndoWrite *u, int set) {
|
2009-09-09 00:35:00 +00:00
|
|
|
int orig = io->undo.w_enable;
|
|
|
|
io->undo.w_enable = 0;
|
2009-02-05 21:08:46 +00:00
|
|
|
if (set) {
|
2011-11-16 09:06:34 +00:00
|
|
|
r_io_write_at (io, u->off, u->n, u->len);
|
2015-09-14 00:08:31 +00:00
|
|
|
u->set = true;
|
2009-02-05 21:08:46 +00:00
|
|
|
} else {
|
2011-11-16 09:06:34 +00:00
|
|
|
r_io_write_at (io, u->off, u->o, u->len);
|
2015-09-14 00:08:31 +00:00
|
|
|
u->set = false;
|
2009-02-05 21:08:46 +00:00
|
|
|
}
|
2009-09-09 00:35:00 +00:00
|
|
|
io->undo.w_enable = orig;
|
2009-02-05 21:08:46 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-11-16 09:06:34 +00:00
|
|
|
R_API void r_io_wundo_apply_all(RIO *io, int set) {
|
2011-11-15 22:26:45 +00:00
|
|
|
RListIter *iter;
|
2011-11-16 09:06:34 +00:00
|
|
|
RIOUndoWrite *u;
|
2009-02-05 21:08:46 +00:00
|
|
|
|
2011-11-15 22:26:45 +00:00
|
|
|
r_list_foreach_prev (io->undo.w_list, iter, u) {
|
2010-05-19 00:39:01 +00:00
|
|
|
r_io_wundo_apply (io, u, set); //UNDO_WRITE_UNSET);
|
|
|
|
eprintf ("%s 0x%08"PFMT64x"\n", set?"redo":"undo", u->off);
|
2009-02-05 21:08:46 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* sets or unsets the writes done */
|
|
|
|
/* if ( set == 0 ) unset(n) */
|
2011-11-16 09:06:34 +00:00
|
|
|
R_API int r_io_wundo_set(RIO *io, int n, int set) {
|
2011-11-15 22:26:45 +00:00
|
|
|
RListIter *iter;
|
2011-11-16 09:06:34 +00:00
|
|
|
RIOUndoWrite *u = NULL;
|
2009-02-05 21:08:46 +00:00
|
|
|
int i = 0;
|
2009-09-08 18:16:52 +00:00
|
|
|
if (io->undo.w_init) {
|
2018-01-10 06:43:23 +00:00
|
|
|
r_list_foreach_prev (io->undo.w_list, iter, u) {
|
|
|
|
if (i++ == n) {
|
2009-02-05 21:08:46 +00:00
|
|
|
break;
|
2018-01-10 06:43:23 +00:00
|
|
|
}
|
|
|
|
}
|
2011-11-16 09:06:34 +00:00
|
|
|
if (u) { // wtf?
|
|
|
|
r_io_wundo_apply (io, u, set);
|
2015-09-14 00:08:31 +00:00
|
|
|
return true;
|
2011-11-16 09:06:34 +00:00
|
|
|
}
|
|
|
|
eprintf ("invalid undo-write index\n");
|
2018-01-10 06:43:23 +00:00
|
|
|
} else {
|
|
|
|
eprintf ("no writes done\n");
|
|
|
|
}
|
2015-09-14 00:08:31 +00:00
|
|
|
return false;
|
2009-02-05 21:08:46 +00:00
|
|
|
}
|