2012-04-21 21:13:50 +00:00
|
|
|
/* RetroArch - A frontend for libretro.
|
2014-01-01 00:50:59 +00:00
|
|
|
* Copyright (C) 2010-2014 - Hans-Kristian Arntzen
|
2015-01-07 16:46:50 +00:00
|
|
|
* Copyright (C) 2011-2015 - Daniel De Matteis
|
|
|
|
* Copyright (C) 2014-2015 - Alfred Agrell
|
2014-02-22 18:21:55 +00:00
|
|
|
*
|
2012-04-21 21:13:50 +00:00
|
|
|
* RetroArch is free software: you can redistribute it and/or modify it under the terms
|
2011-01-31 15:06:46 +00:00
|
|
|
* of the GNU General Public License as published by the Free Software Found-
|
|
|
|
* ation, either version 3 of the License, or (at your option) any later version.
|
|
|
|
*
|
2012-04-21 21:13:50 +00:00
|
|
|
* RetroArch is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
|
2011-01-31 15:06:46 +00:00
|
|
|
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
|
|
|
|
* PURPOSE. See the GNU General Public License for more details.
|
|
|
|
*
|
2012-04-21 21:31:57 +00:00
|
|
|
* You should have received a copy of the GNU General Public License along with RetroArch.
|
2011-01-31 15:06:46 +00:00
|
|
|
* If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
2014-02-22 18:21:55 +00:00
|
|
|
#define __STDC_LIMIT_MACROS
|
2011-01-31 15:06:46 +00:00
|
|
|
#include "rewind.h"
|
2014-02-22 12:13:18 +00:00
|
|
|
#include "performance.h"
|
2011-01-31 15:06:46 +00:00
|
|
|
#include <stdlib.h>
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <string.h>
|
2015-03-15 03:28:53 +00:00
|
|
|
#include <retro_inline.h>
|
2011-01-31 15:06:46 +00:00
|
|
|
|
2014-02-23 10:10:25 +00:00
|
|
|
#ifndef UINT16_MAX
|
|
|
|
#define UINT16_MAX 0xffff
|
2011-11-01 20:28:32 +00:00
|
|
|
#endif
|
2014-02-23 10:10:25 +00:00
|
|
|
#ifndef UINT32_MAX
|
|
|
|
#define UINT32_MAX 0xffffffffu
|
2011-11-01 20:28:32 +00:00
|
|
|
#endif
|
2014-02-18 07:37:41 +00:00
|
|
|
|
2014-02-23 10:10:25 +00:00
|
|
|
#undef CPU_X86
|
|
|
|
#if defined(__x86_64__) || defined(__i386__) || defined(__i486__) || defined(__i686__)
|
|
|
|
#define CPU_X86
|
2011-11-01 20:28:32 +00:00
|
|
|
#endif
|
2011-11-01 20:00:41 +00:00
|
|
|
|
2014-09-09 16:31:44 +00:00
|
|
|
/* Other arches SIGBUS (usually) on unaligned accesses. */
|
2014-02-23 10:10:25 +00:00
|
|
|
#ifndef CPU_X86
|
|
|
|
#define NO_UNALIGNED_MEM
|
|
|
|
#endif
|
2011-11-01 20:00:41 +00:00
|
|
|
|
2014-09-09 16:31:44 +00:00
|
|
|
/* Format per frame (pseudocode): */
|
|
|
|
#if 0
|
|
|
|
size nextstart;
|
|
|
|
repeat {
|
|
|
|
uint16 numchanged; /* everything is counted in units of uint16 */
|
|
|
|
if (numchanged)
|
|
|
|
{
|
|
|
|
uint16 numunchanged; /* skip these before handling numchanged */
|
|
|
|
uint16[numchanged] changeddata;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
uint32 numunchanged;
|
|
|
|
if (!numunchanged)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
size thisstart;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* The start offsets point to 'nextstart' of any given compressed frame.
|
|
|
|
* Each uint16 is stored native endian; anything that claims any other
|
|
|
|
* endianness refers to the endianness of this specific item.
|
|
|
|
* The uint32 is stored little endian.
|
|
|
|
*
|
|
|
|
* Each size value is stored native endian if alignment is not enforced;
|
|
|
|
* if it is, they're little endian.
|
|
|
|
*
|
|
|
|
* The start of the buffer contains a size pointing to the end of the
|
|
|
|
* buffer; the end points to its start.
|
|
|
|
*
|
|
|
|
* Wrapping is handled by returning to the start of the buffer if the
|
|
|
|
* compressed data could potentially hit the edge;
|
|
|
|
*
|
|
|
|
* if the compressed data could potentially overwrite the tail pointer,
|
|
|
|
* the tail retreats until it can no longer collide.
|
|
|
|
*
|
|
|
|
* This means that on average, ~2 * maxcompsize is
|
|
|
|
* unused at any given moment. */
|
2014-02-23 10:10:25 +00:00
|
|
|
|
2014-09-09 16:31:44 +00:00
|
|
|
|
|
|
|
/* These are called very few constant times per frame,
|
|
|
|
* keep it as simple as possible. */
|
2015-03-15 03:28:53 +00:00
|
|
|
static INLINE void write_size_t(void *ptr, size_t val)
|
2014-02-18 07:37:41 +00:00
|
|
|
{
|
2014-02-23 10:10:25 +00:00
|
|
|
memcpy(ptr, &val, sizeof(val));
|
2014-02-18 07:37:41 +00:00
|
|
|
}
|
2014-09-09 16:31:44 +00:00
|
|
|
|
2015-03-15 03:28:53 +00:00
|
|
|
static INLINE size_t read_size_t(const void *ptr)
|
2014-02-18 07:37:41 +00:00
|
|
|
{
|
2014-02-23 10:10:25 +00:00
|
|
|
size_t ret;
|
2015-01-09 20:39:44 +00:00
|
|
|
|
2014-02-23 10:10:25 +00:00
|
|
|
memcpy(&ret, ptr, sizeof(ret));
|
|
|
|
return ret;
|
2011-11-01 20:00:41 +00:00
|
|
|
}
|
|
|
|
|
2014-02-23 10:10:25 +00:00
|
|
|
struct state_manager
|
2011-01-31 15:06:46 +00:00
|
|
|
{
|
2014-02-23 10:10:25 +00:00
|
|
|
uint8_t *data;
|
2014-02-22 18:21:55 +00:00
|
|
|
size_t capacity;
|
2014-09-02 03:57:53 +00:00
|
|
|
/* Reading and writing is done here here. */
|
|
|
|
uint8_t *head;
|
|
|
|
/* If head comes close to this, discard a frame. */
|
|
|
|
uint8_t *tail;
|
2014-02-22 18:21:55 +00:00
|
|
|
|
2014-02-23 10:10:25 +00:00
|
|
|
uint8_t *thisblock;
|
|
|
|
uint8_t *nextblock;
|
2011-01-31 15:06:46 +00:00
|
|
|
|
2014-09-02 03:57:53 +00:00
|
|
|
/* This one is rounded up from reset::blocksize. */
|
|
|
|
size_t blocksize;
|
|
|
|
|
|
|
|
/* size_t + (blocksize + 131071) / 131072 *
|
|
|
|
* (blocksize + u16 + u16) + u16 + u32 + size_t
|
|
|
|
* (yes, the math is a bit ugly). */
|
|
|
|
size_t maxcompsize;
|
2014-02-22 18:21:55 +00:00
|
|
|
|
2014-02-23 10:10:25 +00:00
|
|
|
unsigned entries;
|
|
|
|
bool thisblock_valid;
|
2014-02-18 07:37:41 +00:00
|
|
|
};
|
|
|
|
|
2014-02-22 18:21:55 +00:00
|
|
|
state_manager_t *state_manager_new(size_t state_size, size_t buffer_size)
|
2014-02-18 07:37:41 +00:00
|
|
|
{
|
2015-01-09 20:39:44 +00:00
|
|
|
size_t newblocksize;
|
|
|
|
int maxcblks;
|
|
|
|
const int maxcblkcover = UINT16_MAX * sizeof(uint16_t);
|
2011-12-24 12:46:12 +00:00
|
|
|
state_manager_t *state = (state_manager_t*)calloc(1, sizeof(*state));
|
2015-01-09 20:39:44 +00:00
|
|
|
|
2011-01-31 15:06:46 +00:00
|
|
|
if (!state)
|
|
|
|
return NULL;
|
|
|
|
|
2015-01-09 20:39:44 +00:00
|
|
|
newblocksize = ((state_size - 1) | (sizeof(uint16_t) - 1)) + 1;
|
2014-02-22 18:21:55 +00:00
|
|
|
state->blocksize = newblocksize;
|
2011-11-01 20:00:41 +00:00
|
|
|
|
2015-01-09 20:39:44 +00:00
|
|
|
maxcblks = (state->blocksize + maxcblkcover - 1) / maxcblkcover;
|
2014-09-02 03:57:53 +00:00
|
|
|
state->maxcompsize = state->blocksize + maxcblks * sizeof(uint16_t) * 2 +
|
|
|
|
sizeof(uint16_t) + sizeof(uint32_t) + sizeof(size_t) * 2;
|
2011-11-01 20:00:41 +00:00
|
|
|
|
2014-02-23 10:10:25 +00:00
|
|
|
state->data = (uint8_t*)malloc(buffer_size);
|
2014-02-22 18:21:55 +00:00
|
|
|
|
2014-09-15 04:14:42 +00:00
|
|
|
state->thisblock = (uint8_t*)
|
|
|
|
calloc(state->blocksize + sizeof(uint16_t) * 4 + 16, 1);
|
|
|
|
state->nextblock = (uint8_t*)
|
|
|
|
calloc(state->blocksize + sizeof(uint16_t) * 4 + 16, 1);
|
2014-02-22 18:21:55 +00:00
|
|
|
if (!state->data || !state->thisblock || !state->nextblock)
|
2011-01-31 15:06:46 +00:00
|
|
|
goto error;
|
|
|
|
|
2014-09-02 03:57:53 +00:00
|
|
|
/* Force in a different byte at the end, so we don't need to check
|
|
|
|
* bounds in the innermost loop (it's expensive).
|
|
|
|
*
|
2014-09-15 04:14:42 +00:00
|
|
|
* There is also a large amount of data that's the same, to stop
|
|
|
|
* the other scan.
|
|
|
|
*
|
|
|
|
* There is also some padding at the end. This is so we don't
|
|
|
|
* read outside the buffer end if we're reading in large blocks;
|
2014-09-02 03:57:53 +00:00
|
|
|
*
|
|
|
|
* It doesn't make any difference to us, but sacrificing 16 bytes to get
|
|
|
|
* Valgrind happy is worth it. */
|
|
|
|
*(uint16_t*)(state->thisblock + state->blocksize + sizeof(uint16_t) * 3) =
|
|
|
|
0xFFFF;
|
|
|
|
*(uint16_t*)(state->nextblock + state->blocksize + sizeof(uint16_t) * 3) =
|
|
|
|
0x0000;
|
2014-02-22 18:21:55 +00:00
|
|
|
|
2014-02-23 10:10:25 +00:00
|
|
|
state->capacity = buffer_size;
|
2014-02-22 18:21:55 +00:00
|
|
|
|
2014-02-23 10:10:25 +00:00
|
|
|
state->head = state->data + sizeof(size_t);
|
|
|
|
state->tail = state->data + sizeof(size_t);
|
2011-01-31 15:06:46 +00:00
|
|
|
|
|
|
|
return state;
|
|
|
|
|
|
|
|
error:
|
2014-02-23 10:10:25 +00:00
|
|
|
state_manager_free(state);
|
2011-01-31 15:06:46 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
void state_manager_free(state_manager_t *state)
|
|
|
|
{
|
2014-09-15 12:17:16 +00:00
|
|
|
if (!state)
|
|
|
|
return;
|
|
|
|
|
2014-02-22 18:21:55 +00:00
|
|
|
free(state->data);
|
|
|
|
free(state->thisblock);
|
|
|
|
free(state->nextblock);
|
2011-01-31 15:06:46 +00:00
|
|
|
free(state);
|
|
|
|
}
|
|
|
|
|
2014-02-22 18:21:55 +00:00
|
|
|
bool state_manager_pop(state_manager_t *state, const void **data)
|
|
|
|
{
|
2015-01-09 20:39:44 +00:00
|
|
|
size_t start;
|
|
|
|
uint8_t *out;
|
|
|
|
uint16_t *out16;
|
|
|
|
const uint8_t *compressed = NULL;
|
|
|
|
const uint16_t *compressed16 = NULL;
|
|
|
|
|
2014-02-22 18:21:55 +00:00
|
|
|
*data = NULL;
|
|
|
|
|
|
|
|
if (state->thisblock_valid)
|
2011-02-26 00:13:52 +00:00
|
|
|
{
|
2014-02-22 18:21:55 +00:00
|
|
|
state->thisblock_valid = false;
|
|
|
|
state->entries--;
|
|
|
|
*data = state->thisblock;
|
2011-02-26 00:13:52 +00:00
|
|
|
return true;
|
|
|
|
}
|
2011-01-31 15:06:46 +00:00
|
|
|
|
2014-02-23 10:10:25 +00:00
|
|
|
if (state->head == state->tail)
|
2011-01-31 15:06:46 +00:00
|
|
|
return false;
|
|
|
|
|
2015-01-09 20:39:44 +00:00
|
|
|
start = read_size_t(state->head - sizeof(size_t));
|
2014-02-23 10:10:25 +00:00
|
|
|
state->head = state->data + start;
|
2011-01-31 15:06:46 +00:00
|
|
|
|
2015-01-09 20:39:44 +00:00
|
|
|
compressed = state->data + start + sizeof(size_t);
|
|
|
|
out = state->thisblock;
|
2014-02-23 10:10:25 +00:00
|
|
|
|
2014-09-02 03:57:53 +00:00
|
|
|
/* Begin decompression code
|
|
|
|
* out is the last pushed (or returned) state */
|
2015-01-09 20:39:44 +00:00
|
|
|
compressed16 = (const uint16_t*)compressed;
|
|
|
|
out16 = (uint16_t*)out;
|
2011-01-31 15:06:46 +00:00
|
|
|
|
2014-02-23 10:10:25 +00:00
|
|
|
for (;;)
|
2011-01-31 15:06:46 +00:00
|
|
|
{
|
2014-02-23 10:10:25 +00:00
|
|
|
uint16_t i;
|
2014-02-22 18:21:55 +00:00
|
|
|
uint16_t numchanged = *(compressed16++);
|
2015-01-09 20:39:44 +00:00
|
|
|
|
2014-02-22 18:21:55 +00:00
|
|
|
if (numchanged)
|
|
|
|
{
|
2014-02-23 10:10:25 +00:00
|
|
|
out16 += *compressed16++;
|
2014-09-02 03:57:53 +00:00
|
|
|
|
|
|
|
/* We could do memcpy, but it seems that memcpy has a
|
|
|
|
* constant-per-call overhead that actually shows up.
|
|
|
|
*
|
|
|
|
* Our average size in here seems to be 8 or something.
|
|
|
|
* Therefore, we do something with lower overhead. */
|
2014-02-23 10:10:25 +00:00
|
|
|
for (i = 0; i < numchanged; i++)
|
|
|
|
out16[i] = compressed16[i];
|
|
|
|
|
2014-02-22 18:21:55 +00:00
|
|
|
compressed16 += numchanged;
|
|
|
|
out16 += numchanged;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2014-02-23 10:10:25 +00:00
|
|
|
uint32_t numunchanged = compressed16[0] | (compressed16[1] << 16);
|
2015-01-09 20:39:44 +00:00
|
|
|
|
2014-02-23 10:10:25 +00:00
|
|
|
if (!numunchanged)
|
|
|
|
break;
|
2014-02-22 18:21:55 +00:00
|
|
|
compressed16 += 2;
|
|
|
|
out16 += numunchanged;
|
|
|
|
}
|
2011-01-31 15:06:46 +00:00
|
|
|
}
|
2014-09-02 03:57:53 +00:00
|
|
|
/* End decompression code */
|
2011-01-31 15:06:46 +00:00
|
|
|
|
2014-02-22 18:21:55 +00:00
|
|
|
state->entries--;
|
|
|
|
*data = state->thisblock;
|
2011-01-31 15:06:46 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-02-22 18:21:55 +00:00
|
|
|
void state_manager_push_where(state_manager_t *state, void **data)
|
2011-01-31 15:06:46 +00:00
|
|
|
{
|
2014-09-02 03:57:53 +00:00
|
|
|
/* We need to ensure we have an uncompressed copy of the last
|
|
|
|
* pushed state, or we could end up applying a 'patch' to wrong
|
|
|
|
* savestate, and that'd blow up rather quickly. */
|
|
|
|
|
2014-02-22 18:21:55 +00:00
|
|
|
if (!state->thisblock_valid)
|
|
|
|
{
|
|
|
|
const void *ignored;
|
|
|
|
if (state_manager_pop(state, &ignored))
|
|
|
|
{
|
|
|
|
state->thisblock_valid = true;
|
|
|
|
state->entries++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-02-23 10:10:25 +00:00
|
|
|
*data = state->nextblock;
|
2011-01-31 15:06:46 +00:00
|
|
|
}
|
|
|
|
|
2014-02-22 12:13:18 +00:00
|
|
|
#if __SSE2__
|
2014-02-22 12:29:05 +00:00
|
|
|
#if defined(__GNUC__)
|
2015-03-15 03:28:53 +00:00
|
|
|
static INLINE int compat_ctz(unsigned x)
|
2014-02-22 12:29:05 +00:00
|
|
|
{
|
2014-02-22 18:21:55 +00:00
|
|
|
return __builtin_ctz(x);
|
2014-02-22 12:29:05 +00:00
|
|
|
}
|
|
|
|
#else
|
2014-09-02 03:57:53 +00:00
|
|
|
|
|
|
|
/* Only checks at nibble granularity,
|
|
|
|
* because that's what we need. */
|
|
|
|
|
2015-03-15 03:28:53 +00:00
|
|
|
static INLINE int compat_ctz(unsigned x)
|
2014-02-22 12:29:05 +00:00
|
|
|
{
|
2014-02-22 18:21:55 +00:00
|
|
|
if (x & 0x000f)
|
2014-02-22 12:29:05 +00:00
|
|
|
return 0;
|
2014-02-22 18:21:55 +00:00
|
|
|
if (x & 0x00f0)
|
2014-02-22 12:29:05 +00:00
|
|
|
return 4;
|
2014-02-22 18:21:55 +00:00
|
|
|
if (x & 0x0f00)
|
2014-02-22 12:29:05 +00:00
|
|
|
return 8;
|
2014-02-22 18:21:55 +00:00
|
|
|
if (x & 0xf000)
|
2014-02-22 12:29:05 +00:00
|
|
|
return 12;
|
|
|
|
return 16;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2014-02-22 12:13:18 +00:00
|
|
|
#include <emmintrin.h>
|
2014-09-02 03:57:53 +00:00
|
|
|
/* There's no equivalent in libc, you'd think so ...
|
|
|
|
* std::mismatch exists, but it's not optimized at all. */
|
|
|
|
|
2015-03-15 03:28:53 +00:00
|
|
|
static INLINE size_t find_change(const uint16_t *a, const uint16_t *b)
|
2014-02-22 12:13:18 +00:00
|
|
|
{
|
2014-09-23 12:16:54 +00:00
|
|
|
const __m128i *a128 = (const __m128i*)a;
|
|
|
|
const __m128i *b128 = (const __m128i*)b;
|
2014-02-22 16:02:49 +00:00
|
|
|
|
2014-02-23 10:10:25 +00:00
|
|
|
for (;;)
|
2014-09-23 12:16:54 +00:00
|
|
|
{
|
2015-01-09 20:39:44 +00:00
|
|
|
__m128i v0 = _mm_loadu_si128(a128);
|
|
|
|
__m128i v1 = _mm_loadu_si128(b128);
|
|
|
|
__m128i c = _mm_cmpeq_epi32(v0, v1);
|
2014-09-23 12:16:54 +00:00
|
|
|
uint32_t mask = _mm_movemask_epi8(c);
|
2015-01-09 20:39:44 +00:00
|
|
|
|
2014-09-23 12:16:54 +00:00
|
|
|
if (mask != 0xffff) /* Something has changed, figure out where. */
|
|
|
|
{
|
|
|
|
size_t ret = (((uint8_t*)a128 - (uint8_t*)a) |
|
2014-09-02 03:57:53 +00:00
|
|
|
(compat_ctz(~mask))) >> 1;
|
2014-02-23 10:10:25 +00:00
|
|
|
return ret | (a[ret] == b[ret]);
|
2014-09-23 12:16:54 +00:00
|
|
|
}
|
2014-02-23 10:10:25 +00:00
|
|
|
|
2014-09-23 12:16:54 +00:00
|
|
|
a128++;
|
|
|
|
b128++;
|
|
|
|
}
|
2014-02-22 12:13:18 +00:00
|
|
|
}
|
|
|
|
#else
|
2015-03-15 03:28:53 +00:00
|
|
|
static INLINE size_t find_change(const uint16_t *a, const uint16_t *b)
|
2014-02-22 12:13:18 +00:00
|
|
|
{
|
2014-09-23 12:16:54 +00:00
|
|
|
const uint16_t *a_org = a;
|
2014-02-22 16:02:49 +00:00
|
|
|
#ifdef NO_UNALIGNED_MEM
|
2014-09-23 12:16:54 +00:00
|
|
|
while (((uintptr_t)a & (sizeof(size_t) - 1)) && *a == *b)
|
|
|
|
{
|
|
|
|
a++;
|
|
|
|
b++;
|
|
|
|
}
|
|
|
|
if (*a == *b)
|
2014-02-22 16:02:49 +00:00
|
|
|
#endif
|
2014-09-23 12:16:54 +00:00
|
|
|
{
|
|
|
|
const size_t *a_big = (const size_t*)a;
|
|
|
|
const size_t *b_big = (const size_t*)b;
|
2014-02-22 16:02:49 +00:00
|
|
|
|
2014-09-23 12:16:54 +00:00
|
|
|
while (*a_big == *b_big)
|
|
|
|
{
|
|
|
|
a_big++;
|
|
|
|
b_big++;
|
|
|
|
}
|
|
|
|
a = (const uint16_t*)a_big;
|
|
|
|
b = (const uint16_t*)b_big;
|
2014-02-22 16:02:49 +00:00
|
|
|
|
2014-09-23 12:16:54 +00:00
|
|
|
while (*a == *b)
|
|
|
|
{
|
|
|
|
a++;
|
|
|
|
b++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return a - a_org;
|
2014-02-22 12:13:18 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2015-03-15 03:28:53 +00:00
|
|
|
static INLINE size_t find_same(const uint16_t *a, const uint16_t *b)
|
2011-01-31 15:06:46 +00:00
|
|
|
{
|
2014-09-23 12:16:54 +00:00
|
|
|
const uint16_t *a_org = a;
|
2014-02-22 17:33:47 +00:00
|
|
|
#ifdef NO_UNALIGNED_MEM
|
2014-09-23 12:16:54 +00:00
|
|
|
if (((uintptr_t)a & (sizeof(uint32_t) - 1)) && *a != *b)
|
|
|
|
{
|
|
|
|
a++;
|
|
|
|
b++;
|
|
|
|
}
|
|
|
|
if (*a != *b)
|
2014-02-22 17:33:47 +00:00
|
|
|
#endif
|
2014-09-23 12:16:54 +00:00
|
|
|
{
|
|
|
|
/* With this, it's random whether two consecutive identical
|
2014-09-02 03:57:53 +00:00
|
|
|
* words are caught.
|
|
|
|
*
|
|
|
|
* Luckily, compression rate is the same for both cases, and
|
|
|
|
* three is always caught.
|
|
|
|
*
|
|
|
|
* (We prefer to miss two-word blocks, anyways; fewer iterations
|
|
|
|
* of the outer loop, as well as in the decompressor.) */
|
2014-09-23 12:16:54 +00:00
|
|
|
const uint32_t *a_big = (const uint32_t*)a;
|
|
|
|
const uint32_t *b_big = (const uint32_t*)b;
|
2014-02-22 17:33:47 +00:00
|
|
|
|
2014-09-23 12:16:54 +00:00
|
|
|
while (*a_big != *b_big)
|
|
|
|
{
|
|
|
|
a_big++;
|
|
|
|
b_big++;
|
|
|
|
}
|
|
|
|
a = (const uint16_t*)a_big;
|
|
|
|
b = (const uint16_t*)b_big;
|
2014-02-22 17:33:47 +00:00
|
|
|
|
2014-09-23 12:16:54 +00:00
|
|
|
if (a != a_org && a[-1] == b[-1])
|
|
|
|
{
|
|
|
|
a--;
|
|
|
|
b--;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return a - a_org;
|
2014-02-22 16:09:36 +00:00
|
|
|
}
|
2011-01-31 15:06:46 +00:00
|
|
|
|
2014-02-22 18:21:55 +00:00
|
|
|
void state_manager_push_do(state_manager_t *state)
|
2014-02-22 18:21:26 +00:00
|
|
|
{
|
2014-02-22 18:21:55 +00:00
|
|
|
if (state->thisblock_valid)
|
2011-01-31 15:06:46 +00:00
|
|
|
{
|
2014-02-23 10:10:25 +00:00
|
|
|
if (state->capacity < sizeof(size_t) + state->maxcompsize)
|
|
|
|
return;
|
|
|
|
|
|
|
|
recheckcapacity:;
|
2014-02-18 08:06:53 +00:00
|
|
|
|
2014-02-23 10:10:25 +00:00
|
|
|
size_t headpos = state->head - state->data;
|
|
|
|
size_t tailpos = state->tail - state->data;
|
2014-09-02 03:57:53 +00:00
|
|
|
size_t remaining = (tailpos + state->capacity -
|
|
|
|
sizeof(size_t) - headpos - 1) % state->capacity + 1;
|
|
|
|
|
2014-02-22 18:21:55 +00:00
|
|
|
if (remaining <= state->maxcompsize)
|
|
|
|
{
|
2014-02-23 10:10:25 +00:00
|
|
|
state->tail = state->data + read_size_t(state->tail);
|
2014-02-22 18:21:55 +00:00
|
|
|
state->entries--;
|
|
|
|
goto recheckcapacity;
|
|
|
|
}
|
2014-02-18 08:06:53 +00:00
|
|
|
|
2014-02-23 10:10:25 +00:00
|
|
|
RARCH_PERFORMANCE_INIT(gen_deltas);
|
|
|
|
RARCH_PERFORMANCE_START(gen_deltas);
|
2014-02-18 08:06:53 +00:00
|
|
|
|
2014-02-23 10:10:25 +00:00
|
|
|
const uint8_t *oldb = state->thisblock;
|
|
|
|
const uint8_t *newb = state->nextblock;
|
|
|
|
uint8_t *compressed = state->head + sizeof(size_t);
|
|
|
|
|
2014-09-02 03:57:53 +00:00
|
|
|
/* Begin compression code; 'compressed' will point to
|
|
|
|
* the end of the compressed data (excluding the prev pointer). */
|
2014-02-22 18:21:55 +00:00
|
|
|
const uint16_t *old16 = (const uint16_t*)oldb;
|
|
|
|
const uint16_t *new16 = (const uint16_t*)newb;
|
|
|
|
uint16_t *compressed16 = (uint16_t*)compressed;
|
2014-02-23 10:10:25 +00:00
|
|
|
size_t num16s = state->blocksize / sizeof(uint16_t);
|
|
|
|
|
2014-02-22 18:21:55 +00:00
|
|
|
while (num16s)
|
|
|
|
{
|
2014-02-23 10:10:25 +00:00
|
|
|
size_t i;
|
2014-02-22 18:21:55 +00:00
|
|
|
size_t skip = find_change(old16, new16);
|
2014-02-18 07:37:41 +00:00
|
|
|
|
2014-02-23 10:10:25 +00:00
|
|
|
if (skip >= num16s)
|
|
|
|
break;
|
|
|
|
|
|
|
|
old16 += skip;
|
|
|
|
new16 += skip;
|
|
|
|
num16s -= skip;
|
2014-02-22 18:21:55 +00:00
|
|
|
|
|
|
|
if (skip > UINT16_MAX)
|
|
|
|
{
|
|
|
|
if (skip > UINT32_MAX)
|
|
|
|
{
|
2014-09-02 03:57:53 +00:00
|
|
|
/* This will make it scan the entire thing again,
|
|
|
|
* but it only hits on 8GB unchanged data anyways,
|
|
|
|
* and if you're doing that, you've got bigger problems. */
|
2014-02-22 18:21:55 +00:00
|
|
|
skip = UINT32_MAX;
|
|
|
|
}
|
2014-02-23 10:10:25 +00:00
|
|
|
*compressed16++ = 0;
|
|
|
|
*compressed16++ = skip;
|
|
|
|
*compressed16++ = skip >> 16;
|
2014-02-22 18:21:55 +00:00
|
|
|
skip = 0;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2014-02-23 10:10:25 +00:00
|
|
|
size_t changed = find_same(old16, new16);
|
|
|
|
if (changed > UINT16_MAX)
|
|
|
|
changed = UINT16_MAX;
|
|
|
|
|
|
|
|
*compressed16++ = changed;
|
|
|
|
*compressed16++ = skip;
|
|
|
|
|
|
|
|
for (i = 0; i < changed; i++)
|
|
|
|
compressed16[i] = old16[i];
|
|
|
|
|
|
|
|
old16 += changed;
|
|
|
|
new16 += changed;
|
|
|
|
num16s -= changed;
|
|
|
|
compressed16 += changed;
|
2014-02-22 18:21:55 +00:00
|
|
|
}
|
2014-02-23 10:10:25 +00:00
|
|
|
|
2014-02-22 18:21:55 +00:00
|
|
|
compressed16[0] = 0;
|
|
|
|
compressed16[1] = 0;
|
|
|
|
compressed16[2] = 0;
|
2014-02-23 10:10:25 +00:00
|
|
|
compressed = (uint8_t*)(compressed16 + 3);
|
2014-09-02 03:57:53 +00:00
|
|
|
/* End compression code. */
|
2014-02-22 18:21:55 +00:00
|
|
|
|
|
|
|
if (compressed - state->data + state->maxcompsize > state->capacity)
|
|
|
|
{
|
|
|
|
compressed = state->data;
|
2014-02-23 10:10:25 +00:00
|
|
|
if (state->tail == state->data + sizeof(size_t))
|
|
|
|
state->tail = state->data + read_size_t(state->tail);
|
2014-02-22 18:21:55 +00:00
|
|
|
}
|
2014-02-23 10:10:25 +00:00
|
|
|
write_size_t(compressed, state->head-state->data);
|
2014-02-22 18:21:55 +00:00
|
|
|
compressed += sizeof(size_t);
|
2014-02-23 10:10:25 +00:00
|
|
|
write_size_t(state->head, compressed-state->data);
|
2014-02-22 18:21:55 +00:00
|
|
|
state->head = compressed;
|
2014-02-23 10:10:25 +00:00
|
|
|
|
|
|
|
RARCH_PERFORMANCE_STOP(gen_deltas);
|
2011-01-31 15:06:46 +00:00
|
|
|
}
|
2014-02-22 18:21:55 +00:00
|
|
|
else
|
|
|
|
state->thisblock_valid = true;
|
2011-01-31 15:06:46 +00:00
|
|
|
|
2014-02-23 10:10:25 +00:00
|
|
|
uint8_t *swap = state->thisblock;
|
2014-02-22 18:21:55 +00:00
|
|
|
state->thisblock = state->nextblock;
|
|
|
|
state->nextblock = swap;
|
|
|
|
|
|
|
|
state->entries++;
|
2011-01-31 15:06:46 +00:00
|
|
|
}
|
|
|
|
|
2014-09-02 03:57:53 +00:00
|
|
|
void state_manager_capacity(state_manager_t *state,
|
|
|
|
unsigned *entries, size_t *bytes, bool *full)
|
2011-01-31 15:06:46 +00:00
|
|
|
{
|
2015-01-09 20:39:44 +00:00
|
|
|
size_t headpos = state->head - state->data;
|
|
|
|
size_t tailpos = state->tail - state->data;
|
2014-09-02 03:57:53 +00:00
|
|
|
size_t remaining = (tailpos + state->capacity -
|
|
|
|
sizeof(size_t) - headpos - 1) % state->capacity + 1;
|
2014-02-23 10:10:25 +00:00
|
|
|
|
|
|
|
if (entries)
|
|
|
|
*entries = state->entries;
|
|
|
|
if (bytes)
|
|
|
|
*bytes = state->capacity-remaining;
|
|
|
|
if (full)
|
|
|
|
*full = remaining <= state->maxcompsize * 2;
|
2011-01-31 15:06:46 +00:00
|
|
|
}
|