2015-12-23 20:25:28 +00:00
|
|
|
/* RetroArch - A frontend for libretro.
|
|
|
|
* Copyright (C) 2010-2014 - Hans-Kristian Arntzen
|
2016-01-10 03:06:50 +00:00
|
|
|
* Copyright (C) 2011-2016 - Daniel De Matteis
|
2016-09-13 21:06:23 +00:00
|
|
|
* Copyright (C) 2016 - Gregor Richards
|
2015-12-23 20:25:28 +00:00
|
|
|
*
|
|
|
|
* RetroArch is free software: you can redistribute it and/or modify it under the terms
|
|
|
|
* of the GNU General Public License as published by the Free Software Found-
|
|
|
|
* ation, either version 3 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* RetroArch is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
|
|
|
|
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
|
|
|
|
* PURPOSE. See the GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License along with RetroArch.
|
|
|
|
* If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
2016-09-05 22:56:00 +00:00
|
|
|
#include <compat/strl.h>
|
2016-09-13 01:18:00 +00:00
|
|
|
#include <stdio.h>
|
2016-09-05 22:56:00 +00:00
|
|
|
|
2016-09-21 21:23:36 +00:00
|
|
|
#include <net/net_compat.h>
|
|
|
|
#include <net/net_socket.h>
|
2016-11-30 03:59:46 +00:00
|
|
|
#include <net/net_natt.h>
|
2016-09-21 21:23:36 +00:00
|
|
|
|
2015-12-23 20:25:28 +00:00
|
|
|
#include "netplay_private.h"
|
2016-09-03 05:48:25 +00:00
|
|
|
|
2016-09-13 01:18:00 +00:00
|
|
|
#include "retro_assert.h"
|
|
|
|
|
2016-09-03 05:48:25 +00:00
|
|
|
#include "../../autosave.h"
|
|
|
|
|
2016-10-05 11:58:01 +00:00
|
|
|
#if 0
|
|
|
|
#define DEBUG_NONDETERMINISTIC_CORES
|
|
|
|
#endif
|
2016-09-28 00:49:16 +00:00
|
|
|
|
2016-09-15 03:19:47 +00:00
|
|
|
static void netplay_handle_frame_hash(netplay_t *netplay, struct delta_frame *delta)
|
|
|
|
{
|
2016-10-05 11:55:30 +00:00
|
|
|
static bool crcs_valid = true;
|
2016-09-15 03:19:47 +00:00
|
|
|
if (netplay_is_server(netplay))
|
|
|
|
{
|
2016-10-05 11:55:30 +00:00
|
|
|
if (netplay->check_frames &&
|
|
|
|
(delta->frame % netplay->check_frames == 0 || delta->frame == 1))
|
2016-09-15 03:54:18 +00:00
|
|
|
{
|
|
|
|
delta->crc = netplay_delta_frame_crc(netplay, delta);
|
|
|
|
netplay_cmd_crc(netplay, delta);
|
|
|
|
}
|
2016-09-15 03:19:47 +00:00
|
|
|
}
|
2016-10-05 11:55:30 +00:00
|
|
|
else if (delta->crc && crcs_valid)
|
2016-09-15 03:19:47 +00:00
|
|
|
{
|
|
|
|
/* We have a remote CRC, so check it */
|
|
|
|
uint32_t local_crc = netplay_delta_frame_crc(netplay, delta);
|
|
|
|
if (local_crc != delta->crc)
|
|
|
|
{
|
2016-10-05 11:55:30 +00:00
|
|
|
if (delta->frame == 1)
|
|
|
|
{
|
|
|
|
/* We check frame 1 just to make sure the CRCs make sense at all.
|
|
|
|
* If we've diverged at frame 1, we assume CRCs are not useful. */
|
|
|
|
crcs_valid = false;
|
|
|
|
}
|
|
|
|
else if (crcs_valid)
|
|
|
|
{
|
|
|
|
/* Fix this! */
|
|
|
|
netplay_cmd_request_savestate(netplay);
|
|
|
|
}
|
2016-09-15 03:19:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-23 20:25:28 +00:00
|
|
|
/**
|
2016-12-10 04:11:18 +00:00
|
|
|
* netplay_sync_pre_frame:
|
2015-12-23 20:25:28 +00:00
|
|
|
* @netplay : pointer to netplay object
|
|
|
|
*
|
2016-12-10 04:11:18 +00:00
|
|
|
* Pre-frame for Netplay synchronization.
|
2015-12-23 20:25:28 +00:00
|
|
|
**/
|
2016-12-10 04:11:18 +00:00
|
|
|
bool netplay_sync_pre_frame(netplay_t *netplay)
|
2015-12-23 20:25:28 +00:00
|
|
|
{
|
2016-01-27 06:28:03 +00:00
|
|
|
retro_ctx_serialize_info_t serial_info;
|
|
|
|
|
2016-09-30 18:16:48 +00:00
|
|
|
if (netplay_delta_frame_ready(netplay, &netplay->buffer[netplay->self_ptr], netplay->self_frame_count))
|
Multitudinous fixes and updates to Netplay. Had to be one commit since
they're mostly related:
(1) Renamed frame_count to self_frame_count to be consistent with all
other names.
(2) Previously, it was possible to overwrite data in the ring buffer
that hadn't yet been used. Now that's not possible, but that just
changes one breakage for another: It's now possible to miss the NEW
data. The final resolution for this will probably be requesting stalls.
This is accomplished simply by storing frame numbers in the ring buffer
and checking them against the 'other' head.
(3) In TCP packets, separated cmd_size from cmd. It was beyond pointless
for these to be combined, and restricted cmd_size to 16 bits, which
will probably fail when/if state loading is supported.
(4) Readahead is now allowed. In the past, if the peer got ahead of us,
we would simply ignore their data. Thus, if they got too far ahead of
us, we'd stop reading their data altogether. Fabulous. Now, we're happy
to read future input.
(5) If the peer gets too far ahead of us (currently an unconfigurable 10
frames), fast forward to catch up. This should prevent desync due to
clock drift or stutter.
(6) Used frame_count in a few places where ptr was used. Doing a
comparison of pointers on a ring buffer is a far more dangerous way to
assure we're done with a task than simply using the count, since the
ring buffer is... well, a ring.
(7) Renamed tmp_{ptr,frame_count} to replay_{ptr,frame_count} for
clarity.
(8) Slightly changed the protocol version hash, just to assure that
other clients wouldn't think they were compatible with this one.
(9) There was an off-by-one error which, under some circumstances, could
allow the replay engine to run a complete round through the ring buffer,
replaying stale data. Fixed.
2016-09-12 02:01:47 +00:00
|
|
|
{
|
2016-09-13 21:05:28 +00:00
|
|
|
serial_info.data_const = NULL;
|
|
|
|
serial_info.data = netplay->buffer[netplay->self_ptr].state;
|
|
|
|
serial_info.size = netplay->state_size;
|
|
|
|
|
2016-10-05 02:24:33 +00:00
|
|
|
memset(serial_info.data, 0, serial_info.size);
|
2016-10-06 00:45:30 +00:00
|
|
|
if ((netplay->quirks & NETPLAY_QUIRK_INITIALIZATION) || netplay->self_frame_count == 0)
|
2016-09-30 18:03:18 +00:00
|
|
|
{
|
|
|
|
/* Don't serialize until it's safe */
|
|
|
|
}
|
|
|
|
else if (!(netplay->quirks & NETPLAY_QUIRK_NO_SAVESTATES) && core_serialize(&serial_info))
|
2016-09-15 03:19:47 +00:00
|
|
|
{
|
2016-12-13 00:34:50 +00:00
|
|
|
if (netplay->force_send_savestate && !netplay->stall)
|
2016-09-15 03:19:47 +00:00
|
|
|
{
|
|
|
|
/* Send this along to the other side */
|
|
|
|
serial_info.data_const = netplay->buffer[netplay->self_ptr].state;
|
|
|
|
netplay_load_savestate(netplay, &serial_info, false);
|
2016-12-13 00:34:50 +00:00
|
|
|
netplay->force_send_savestate = false;
|
2016-09-15 03:19:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
2016-09-13 21:05:28 +00:00
|
|
|
{
|
|
|
|
/* If the core can't serialize properly, we must stall for the
|
|
|
|
* remote input on EVERY frame, because we can't recover */
|
2016-09-30 17:31:58 +00:00
|
|
|
netplay->quirks |= NETPLAY_QUIRK_NO_SAVESTATES;
|
2016-12-01 18:34:37 +00:00
|
|
|
netplay->delay_frames = 0;
|
2016-09-13 21:05:28 +00:00
|
|
|
}
|
2016-09-30 17:39:00 +00:00
|
|
|
|
|
|
|
/* If we can't transmit savestates, we must stall until the client is ready */
|
2016-12-05 05:04:01 +00:00
|
|
|
if (netplay->self_frame_count > 0 &&
|
|
|
|
(netplay->quirks & (NETPLAY_QUIRK_NO_SAVESTATES|NETPLAY_QUIRK_NO_TRANSMISSION)) &&
|
|
|
|
(netplay->connections_size == 0 || !netplay->connections[0].active ||
|
|
|
|
netplay->connections[0].mode < NETPLAY_CONNECTION_CONNECTED))
|
2016-12-09 18:32:04 +00:00
|
|
|
netplay->stall = NETPLAY_STALL_NO_CONNECTION;
|
Multitudinous fixes and updates to Netplay. Had to be one commit since
they're mostly related:
(1) Renamed frame_count to self_frame_count to be consistent with all
other names.
(2) Previously, it was possible to overwrite data in the ring buffer
that hadn't yet been used. Now that's not possible, but that just
changes one breakage for another: It's now possible to miss the NEW
data. The final resolution for this will probably be requesting stalls.
This is accomplished simply by storing frame numbers in the ring buffer
and checking them against the 'other' head.
(3) In TCP packets, separated cmd_size from cmd. It was beyond pointless
for these to be combined, and restricted cmd_size to 16 bits, which
will probably fail when/if state loading is supported.
(4) Readahead is now allowed. In the past, if the peer got ahead of us,
we would simply ignore their data. Thus, if they got too far ahead of
us, we'd stop reading their data altogether. Fabulous. Now, we're happy
to read future input.
(5) If the peer gets too far ahead of us (currently an unconfigurable 10
frames), fast forward to catch up. This should prevent desync due to
clock drift or stutter.
(6) Used frame_count in a few places where ptr was used. Doing a
comparison of pointers on a ring buffer is a far more dangerous way to
assure we're done with a task than simply using the count, since the
ring buffer is... well, a ring.
(7) Renamed tmp_{ptr,frame_count} to replay_{ptr,frame_count} for
clarity.
(8) Slightly changed the protocol version hash, just to assure that
other clients wouldn't think they were compatible with this one.
(9) There was an off-by-one error which, under some circumstances, could
allow the replay engine to run a complete round through the ring buffer,
replaying stale data. Fixed.
2016-09-12 02:01:47 +00:00
|
|
|
}
|
2016-01-27 06:28:03 +00:00
|
|
|
|
2016-12-05 05:04:01 +00:00
|
|
|
if (netplay->is_server)
|
2016-09-21 21:23:36 +00:00
|
|
|
{
|
|
|
|
fd_set fds;
|
|
|
|
struct timeval tmp_tv = {0};
|
2016-09-22 20:01:35 +00:00
|
|
|
int new_fd;
|
2016-09-21 21:23:36 +00:00
|
|
|
struct sockaddr_storage their_addr;
|
|
|
|
socklen_t addr_size;
|
2016-12-05 05:04:01 +00:00
|
|
|
struct netplay_connection *connection;
|
|
|
|
size_t connection_num;
|
2016-09-21 21:23:36 +00:00
|
|
|
|
|
|
|
/* Check for a connection */
|
|
|
|
FD_ZERO(&fds);
|
2016-12-05 05:04:01 +00:00
|
|
|
FD_SET(netplay->listen_fd, &fds);
|
|
|
|
if (socket_select(netplay->listen_fd + 1, &fds, NULL, NULL, &tmp_tv) > 0 &&
|
|
|
|
FD_ISSET(netplay->listen_fd, &fds))
|
2016-09-21 21:23:36 +00:00
|
|
|
{
|
|
|
|
addr_size = sizeof(their_addr);
|
2016-12-05 05:04:01 +00:00
|
|
|
new_fd = accept(netplay->listen_fd, (struct sockaddr*)&their_addr, &addr_size);
|
2016-09-21 21:23:36 +00:00
|
|
|
if (new_fd < 0)
|
|
|
|
{
|
|
|
|
RARCH_ERR("%s\n", msg_hash_to_str(MSG_NETPLAY_FAILED));
|
2016-12-05 05:04:01 +00:00
|
|
|
goto process;
|
2016-09-21 21:23:36 +00:00
|
|
|
}
|
|
|
|
|
2016-12-05 05:04:01 +00:00
|
|
|
/* Set the socket nonblocking */
|
|
|
|
if (!socket_nonblock(new_fd))
|
|
|
|
{
|
|
|
|
/* Catastrophe! */
|
|
|
|
socket_close(new_fd);
|
|
|
|
goto process;
|
|
|
|
}
|
2016-09-21 21:23:36 +00:00
|
|
|
|
2016-09-21 21:31:19 +00:00
|
|
|
#if defined(IPPROTO_TCP) && defined(TCP_NODELAY)
|
|
|
|
{
|
|
|
|
int flag = 1;
|
2016-12-05 05:04:01 +00:00
|
|
|
if (setsockopt(new_fd, IPPROTO_TCP, TCP_NODELAY,
|
2006-05-18 11:31:43 +00:00
|
|
|
#ifdef _WIN32
|
|
|
|
(const char*)
|
|
|
|
#else
|
|
|
|
(const void*)
|
|
|
|
#endif
|
|
|
|
&flag,
|
|
|
|
sizeof(int)) < 0)
|
2016-09-24 22:48:55 +00:00
|
|
|
RARCH_WARN("Could not set netplay TCP socket to nodelay. Expect jitter.\n");
|
2016-09-21 21:31:19 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2016-09-26 03:26:32 +00:00
|
|
|
#if defined(F_SETFD) && defined(FD_CLOEXEC)
|
|
|
|
/* Don't let any inherited processes keep open our port */
|
2016-12-05 05:04:01 +00:00
|
|
|
if (fcntl(new_fd, F_SETFD, FD_CLOEXEC) < 0)
|
2016-09-26 03:26:32 +00:00
|
|
|
RARCH_WARN("Cannot set Netplay port to close-on-exec. It may fail to reopen if the client disconnects.\n");
|
|
|
|
#endif
|
|
|
|
|
2016-12-05 05:04:01 +00:00
|
|
|
/* Allocate a connection */
|
|
|
|
for (connection_num = 0; connection_num < netplay->connections_size; connection_num++)
|
|
|
|
if (!netplay->connections[connection_num].active) break;
|
|
|
|
if (connection_num == netplay->connections_size)
|
2016-12-03 20:17:38 +00:00
|
|
|
{
|
2016-12-05 05:04:01 +00:00
|
|
|
if (connection_num == 0)
|
|
|
|
{
|
|
|
|
netplay->connections = malloc(sizeof(struct netplay_connection));
|
|
|
|
if (netplay->connections == NULL)
|
|
|
|
{
|
|
|
|
socket_close(new_fd);
|
|
|
|
goto process;
|
|
|
|
}
|
|
|
|
netplay->connections_size = 1;
|
|
|
|
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
size_t new_connections_size = netplay->connections_size * 2;
|
|
|
|
struct netplay_connection *new_connections =
|
|
|
|
realloc(netplay->connections,
|
|
|
|
new_connections_size*sizeof(struct netplay_connection));
|
|
|
|
if (new_connections == NULL)
|
|
|
|
{
|
|
|
|
socket_close(new_fd);
|
|
|
|
goto process;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(new_connections + netplay->connections_size, 0,
|
|
|
|
netplay->connections_size * sizeof(struct netplay_connection));
|
|
|
|
netplay->connections = new_connections;
|
|
|
|
netplay->connections_size = new_connections_size;
|
|
|
|
|
|
|
|
}
|
2016-12-03 20:17:38 +00:00
|
|
|
}
|
2016-12-05 05:04:01 +00:00
|
|
|
connection = &netplay->connections[connection_num];
|
|
|
|
|
|
|
|
/* Set it up */
|
|
|
|
memset(connection, 0, sizeof(*connection));
|
|
|
|
connection->active = true;
|
|
|
|
connection->fd = new_fd;
|
|
|
|
connection->mode = NETPLAY_CONNECTION_INIT;
|
|
|
|
|
2016-12-09 19:14:54 +00:00
|
|
|
if (!netplay_init_socket_buffer(&connection->send_packet_buffer,
|
|
|
|
netplay->packet_buffer_size) ||
|
|
|
|
!netplay_init_socket_buffer(&connection->recv_packet_buffer,
|
|
|
|
netplay->packet_buffer_size))
|
|
|
|
{
|
|
|
|
if (connection->send_packet_buffer.data)
|
|
|
|
netplay_deinit_socket_buffer(&connection->send_packet_buffer);
|
|
|
|
connection->active = false;
|
|
|
|
socket_close(new_fd);
|
|
|
|
goto process;
|
|
|
|
}
|
2016-12-03 20:17:38 +00:00
|
|
|
|
2016-12-05 05:04:01 +00:00
|
|
|
netplay_handshake_init_send(netplay, connection);
|
2016-12-03 20:17:38 +00:00
|
|
|
|
2016-09-21 21:23:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-05 05:04:01 +00:00
|
|
|
process:
|
2016-09-21 21:23:36 +00:00
|
|
|
netplay->can_poll = true;
|
2015-12-23 20:25:28 +00:00
|
|
|
input_poll_net();
|
2016-09-16 03:04:48 +00:00
|
|
|
|
2016-12-09 18:32:04 +00:00
|
|
|
return (netplay->stall != NETPLAY_STALL_NO_CONNECTION);
|
2015-12-23 20:25:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2016-12-10 04:11:18 +00:00
|
|
|
* netplay_sync_post_frame:
|
2015-12-23 20:25:28 +00:00
|
|
|
* @netplay : pointer to netplay object
|
|
|
|
*
|
2016-12-10 04:11:18 +00:00
|
|
|
* Post-frame for Netplay synchronization.
|
2015-12-23 20:25:28 +00:00
|
|
|
* We check if we have new input and replay from recorded input.
|
|
|
|
**/
|
2016-12-10 04:11:18 +00:00
|
|
|
void netplay_sync_post_frame(netplay_t *netplay)
|
2015-12-23 20:25:28 +00:00
|
|
|
{
|
2016-09-14 00:37:53 +00:00
|
|
|
netplay->self_ptr = NEXT_PTR(netplay->self_ptr);
|
Multitudinous fixes and updates to Netplay. Had to be one commit since
they're mostly related:
(1) Renamed frame_count to self_frame_count to be consistent with all
other names.
(2) Previously, it was possible to overwrite data in the ring buffer
that hadn't yet been used. Now that's not possible, but that just
changes one breakage for another: It's now possible to miss the NEW
data. The final resolution for this will probably be requesting stalls.
This is accomplished simply by storing frame numbers in the ring buffer
and checking them against the 'other' head.
(3) In TCP packets, separated cmd_size from cmd. It was beyond pointless
for these to be combined, and restricted cmd_size to 16 bits, which
will probably fail when/if state loading is supported.
(4) Readahead is now allowed. In the past, if the peer got ahead of us,
we would simply ignore their data. Thus, if they got too far ahead of
us, we'd stop reading their data altogether. Fabulous. Now, we're happy
to read future input.
(5) If the peer gets too far ahead of us (currently an unconfigurable 10
frames), fast forward to catch up. This should prevent desync due to
clock drift or stutter.
(6) Used frame_count in a few places where ptr was used. Doing a
comparison of pointers on a ring buffer is a far more dangerous way to
assure we're done with a task than simply using the count, since the
ring buffer is... well, a ring.
(7) Renamed tmp_{ptr,frame_count} to replay_{ptr,frame_count} for
clarity.
(8) Slightly changed the protocol version hash, just to assure that
other clients wouldn't think they were compatible with this one.
(9) There was an off-by-one error which, under some circumstances, could
allow the replay engine to run a complete round through the ring buffer,
replaying stale data. Fixed.
2016-09-12 02:01:47 +00:00
|
|
|
netplay->self_frame_count++;
|
2015-12-23 20:25:28 +00:00
|
|
|
|
2016-09-13 21:33:26 +00:00
|
|
|
/* Only relevant if we're connected */
|
2016-12-11 01:36:57 +00:00
|
|
|
if (!netplay->connected_players)
|
2016-09-30 00:48:39 +00:00
|
|
|
{
|
2016-12-11 01:36:57 +00:00
|
|
|
netplay->other_frame_count = netplay->self_frame_count;
|
|
|
|
netplay->other_ptr = netplay->self_ptr;
|
2016-09-13 21:33:26 +00:00
|
|
|
return;
|
2016-09-30 00:48:39 +00:00
|
|
|
}
|
2016-09-13 21:33:26 +00:00
|
|
|
|
2016-10-05 11:58:01 +00:00
|
|
|
#ifndef DEBUG_NONDETERMINISTIC_CORES
|
2016-09-14 22:03:40 +00:00
|
|
|
if (!netplay->force_rewind)
|
2015-12-23 20:25:28 +00:00
|
|
|
{
|
2016-09-14 22:03:40 +00:00
|
|
|
/* Skip ahead if we predicted correctly.
|
|
|
|
* Skip until our simulation failed. */
|
2016-12-11 01:36:57 +00:00
|
|
|
while (netplay->other_frame_count < netplay->unread_frame_count &&
|
|
|
|
netplay->other_frame_count < netplay->self_frame_count)
|
2016-09-14 22:03:40 +00:00
|
|
|
{
|
2016-09-15 03:19:47 +00:00
|
|
|
struct delta_frame *ptr = &netplay->buffer[netplay->other_ptr];
|
2016-12-11 01:36:57 +00:00
|
|
|
size_t i;
|
2016-09-14 22:03:40 +00:00
|
|
|
|
2016-12-11 01:36:57 +00:00
|
|
|
for (i = 0; i < MAX_USERS; i++)
|
|
|
|
{
|
2016-12-12 17:34:41 +00:00
|
|
|
if (memcmp(ptr->simulated_input_state[i], ptr->real_input_state[i],
|
|
|
|
sizeof(ptr->real_input_state[i])) != 0
|
2016-12-11 01:36:57 +00:00
|
|
|
&& !ptr->used_real[i])
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (i != MAX_USERS) break;
|
2016-09-15 03:19:47 +00:00
|
|
|
netplay_handle_frame_hash(netplay, ptr);
|
2016-09-14 22:03:40 +00:00
|
|
|
netplay->other_ptr = NEXT_PTR(netplay->other_ptr);
|
|
|
|
netplay->other_frame_count++;
|
|
|
|
}
|
2015-12-23 20:25:28 +00:00
|
|
|
}
|
2016-10-05 11:58:01 +00:00
|
|
|
#endif
|
2015-12-23 20:25:28 +00:00
|
|
|
|
Multitudinous fixes and updates to Netplay. Had to be one commit since
they're mostly related:
(1) Renamed frame_count to self_frame_count to be consistent with all
other names.
(2) Previously, it was possible to overwrite data in the ring buffer
that hadn't yet been used. Now that's not possible, but that just
changes one breakage for another: It's now possible to miss the NEW
data. The final resolution for this will probably be requesting stalls.
This is accomplished simply by storing frame numbers in the ring buffer
and checking them against the 'other' head.
(3) In TCP packets, separated cmd_size from cmd. It was beyond pointless
for these to be combined, and restricted cmd_size to 16 bits, which
will probably fail when/if state loading is supported.
(4) Readahead is now allowed. In the past, if the peer got ahead of us,
we would simply ignore their data. Thus, if they got too far ahead of
us, we'd stop reading their data altogether. Fabulous. Now, we're happy
to read future input.
(5) If the peer gets too far ahead of us (currently an unconfigurable 10
frames), fast forward to catch up. This should prevent desync due to
clock drift or stutter.
(6) Used frame_count in a few places where ptr was used. Doing a
comparison of pointers on a ring buffer is a far more dangerous way to
assure we're done with a task than simply using the count, since the
ring buffer is... well, a ring.
(7) Renamed tmp_{ptr,frame_count} to replay_{ptr,frame_count} for
clarity.
(8) Slightly changed the protocol version hash, just to assure that
other clients wouldn't think they were compatible with this one.
(9) There was an off-by-one error which, under some circumstances, could
allow the replay engine to run a complete round through the ring buffer,
replaying stale data. Fixed.
2016-09-12 02:01:47 +00:00
|
|
|
/* Now replay the real input if we've gotten ahead of it */
|
2016-09-14 22:03:40 +00:00
|
|
|
if (netplay->force_rewind ||
|
2016-12-11 01:36:57 +00:00
|
|
|
(netplay->other_frame_count < netplay->unread_frame_count &&
|
2016-09-14 22:03:40 +00:00
|
|
|
netplay->other_frame_count < netplay->self_frame_count))
|
2015-12-23 20:25:28 +00:00
|
|
|
{
|
2016-01-27 06:28:03 +00:00
|
|
|
retro_ctx_serialize_info_t serial_info;
|
2015-12-23 20:25:28 +00:00
|
|
|
|
|
|
|
/* Replay frames. */
|
|
|
|
netplay->is_replay = true;
|
Bugfixes to bring Netplay Nouveau from "kinda working" to "stably
working":
(1) Fixups to the stall logic to make sure it always receives frames
while stalling :)
(2) Disused the used_real field. It was misconfigured and would
frequently claim to be using real data when real data hadn't been
used... this means more replays for now, but used_real will be readded.
(TODO)
(3) Stall duration is now related to sync frames, and thus configurable.
(4) Delta frames were having the good ol' initialization problem, as
frame==0 was indistinguishable from unused. Quickfixed by adding a
"used" field, but maybe there's a better way.
(5) If serialization fails, switch immediately to blocking mode
(stall_frames = 0). Blocking mode barely works, but if serialization
fails, no mode will work!
(6) I'm not sure which bug my replaying-from-previous-frame was trying
to fix, but the correct behavior is to replay from the last frame we had
vital information, not the frame prior. Notionally this should just be
an efficiency thing, but unsigned arithmetic at 0 made this a "just
ignore all input from now on" thing.
2016-09-12 21:50:38 +00:00
|
|
|
netplay->replay_ptr = netplay->other_ptr;
|
|
|
|
netplay->replay_frame_count = netplay->other_frame_count;
|
2015-12-23 20:25:28 +00:00
|
|
|
|
2016-09-30 18:03:18 +00:00
|
|
|
if (netplay->quirks & NETPLAY_QUIRK_INITIALIZATION)
|
|
|
|
/* Make sure we're initialized before we start loading things */
|
|
|
|
netplay_wait_and_init_serialization(netplay);
|
|
|
|
|
2016-09-15 03:19:47 +00:00
|
|
|
serial_info.data = NULL;
|
|
|
|
serial_info.data_const = netplay->buffer[netplay->replay_ptr].state;
|
|
|
|
serial_info.size = netplay->state_size;
|
2016-09-13 01:18:00 +00:00
|
|
|
|
2016-09-28 19:45:52 +00:00
|
|
|
if (!core_unserialize(&serial_info))
|
|
|
|
{
|
|
|
|
RARCH_ERR("Netplay savestate loading failed: Prepare for desync!\n");
|
|
|
|
}
|
2015-12-23 20:25:28 +00:00
|
|
|
|
Multitudinous fixes and updates to Netplay. Had to be one commit since
they're mostly related:
(1) Renamed frame_count to self_frame_count to be consistent with all
other names.
(2) Previously, it was possible to overwrite data in the ring buffer
that hadn't yet been used. Now that's not possible, but that just
changes one breakage for another: It's now possible to miss the NEW
data. The final resolution for this will probably be requesting stalls.
This is accomplished simply by storing frame numbers in the ring buffer
and checking them against the 'other' head.
(3) In TCP packets, separated cmd_size from cmd. It was beyond pointless
for these to be combined, and restricted cmd_size to 16 bits, which
will probably fail when/if state loading is supported.
(4) Readahead is now allowed. In the past, if the peer got ahead of us,
we would simply ignore their data. Thus, if they got too far ahead of
us, we'd stop reading their data altogether. Fabulous. Now, we're happy
to read future input.
(5) If the peer gets too far ahead of us (currently an unconfigurable 10
frames), fast forward to catch up. This should prevent desync due to
clock drift or stutter.
(6) Used frame_count in a few places where ptr was used. Doing a
comparison of pointers on a ring buffer is a far more dangerous way to
assure we're done with a task than simply using the count, since the
ring buffer is... well, a ring.
(7) Renamed tmp_{ptr,frame_count} to replay_{ptr,frame_count} for
clarity.
(8) Slightly changed the protocol version hash, just to assure that
other clients wouldn't think they were compatible with this one.
(9) There was an off-by-one error which, under some circumstances, could
allow the replay engine to run a complete round through the ring buffer,
replaying stale data. Fixed.
2016-09-12 02:01:47 +00:00
|
|
|
while (netplay->replay_frame_count < netplay->self_frame_count)
|
2015-12-23 20:25:28 +00:00
|
|
|
{
|
2016-09-15 03:19:47 +00:00
|
|
|
struct delta_frame *ptr = &netplay->buffer[netplay->replay_ptr];
|
|
|
|
serial_info.data = ptr->state;
|
2016-01-27 06:28:03 +00:00
|
|
|
serial_info.size = netplay->state_size;
|
2016-04-10 14:35:25 +00:00
|
|
|
serial_info.data_const = NULL;
|
2016-01-27 06:28:03 +00:00
|
|
|
|
2016-09-24 12:12:08 +00:00
|
|
|
/* Remember the current state */
|
2016-10-05 02:24:33 +00:00
|
|
|
memset(serial_info.data, 0, serial_info.size);
|
2016-05-07 23:33:57 +00:00
|
|
|
core_serialize(&serial_info);
|
2016-12-11 01:36:57 +00:00
|
|
|
if (netplay->replay_frame_count < netplay->unread_frame_count)
|
2016-09-24 12:12:08 +00:00
|
|
|
netplay_handle_frame_hash(netplay, ptr);
|
2016-01-27 06:28:03 +00:00
|
|
|
|
2016-12-12 22:23:21 +00:00
|
|
|
/* Re-simulate this frame's input */
|
|
|
|
netplay_simulate_input(netplay, netplay->replay_ptr, true);
|
2016-09-15 03:19:47 +00:00
|
|
|
|
2016-05-09 06:17:35 +00:00
|
|
|
autosave_lock();
|
2016-05-07 23:33:57 +00:00
|
|
|
core_run();
|
2016-05-09 06:17:35 +00:00
|
|
|
autosave_unlock();
|
Multitudinous fixes and updates to Netplay. Had to be one commit since
they're mostly related:
(1) Renamed frame_count to self_frame_count to be consistent with all
other names.
(2) Previously, it was possible to overwrite data in the ring buffer
that hadn't yet been used. Now that's not possible, but that just
changes one breakage for another: It's now possible to miss the NEW
data. The final resolution for this will probably be requesting stalls.
This is accomplished simply by storing frame numbers in the ring buffer
and checking them against the 'other' head.
(3) In TCP packets, separated cmd_size from cmd. It was beyond pointless
for these to be combined, and restricted cmd_size to 16 bits, which
will probably fail when/if state loading is supported.
(4) Readahead is now allowed. In the past, if the peer got ahead of us,
we would simply ignore their data. Thus, if they got too far ahead of
us, we'd stop reading their data altogether. Fabulous. Now, we're happy
to read future input.
(5) If the peer gets too far ahead of us (currently an unconfigurable 10
frames), fast forward to catch up. This should prevent desync due to
clock drift or stutter.
(6) Used frame_count in a few places where ptr was used. Doing a
comparison of pointers on a ring buffer is a far more dangerous way to
assure we're done with a task than simply using the count, since the
ring buffer is... well, a ring.
(7) Renamed tmp_{ptr,frame_count} to replay_{ptr,frame_count} for
clarity.
(8) Slightly changed the protocol version hash, just to assure that
other clients wouldn't think they were compatible with this one.
(9) There was an off-by-one error which, under some circumstances, could
allow the replay engine to run a complete round through the ring buffer,
replaying stale data. Fixed.
2016-09-12 02:01:47 +00:00
|
|
|
netplay->replay_ptr = NEXT_PTR(netplay->replay_ptr);
|
|
|
|
netplay->replay_frame_count++;
|
2016-10-05 11:58:01 +00:00
|
|
|
|
|
|
|
#ifdef DEBUG_NONDETERMINISTIC_CORES
|
|
|
|
if (ptr->have_remote && netplay_delta_frame_ready(netplay, &netplay->buffer[netplay->replay_ptr], netplay->replay_frame_count))
|
|
|
|
{
|
|
|
|
RARCH_LOG("PRE %u: %X\n", netplay->replay_frame_count-1, netplay_delta_frame_crc(netplay, ptr));
|
|
|
|
if (netplay->is_server)
|
|
|
|
RARCH_LOG("INP %X %X\n", ptr->real_input_state[0], ptr->self_state[0]);
|
|
|
|
else
|
|
|
|
RARCH_LOG("INP %X %X\n", ptr->self_state[0], ptr->real_input_state[0]);
|
|
|
|
ptr = &netplay->buffer[netplay->replay_ptr];
|
|
|
|
serial_info.data = ptr->state;
|
|
|
|
memset(serial_info.data, 0, serial_info.size);
|
|
|
|
core_serialize(&serial_info);
|
|
|
|
RARCH_LOG("POST %u: %X\n", netplay->replay_frame_count-1, netplay_delta_frame_crc(netplay, ptr));
|
|
|
|
}
|
|
|
|
#endif
|
2015-12-23 20:25:28 +00:00
|
|
|
}
|
|
|
|
|
2016-12-11 01:36:57 +00:00
|
|
|
if (netplay->unread_frame_count < netplay->self_frame_count)
|
2016-09-12 11:42:35 +00:00
|
|
|
{
|
2016-12-11 01:36:57 +00:00
|
|
|
netplay->other_ptr = netplay->unread_ptr;
|
|
|
|
netplay->other_frame_count = netplay->unread_frame_count;
|
2016-09-14 14:06:57 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2016-09-14 00:35:10 +00:00
|
|
|
netplay->other_ptr = netplay->self_ptr;
|
|
|
|
netplay->other_frame_count = netplay->self_frame_count;
|
2016-09-12 11:42:35 +00:00
|
|
|
}
|
2015-12-23 20:25:28 +00:00
|
|
|
netplay->is_replay = false;
|
2016-09-14 22:03:40 +00:00
|
|
|
netplay->force_rewind = false;
|
2015-12-23 20:25:28 +00:00
|
|
|
}
|
Multitudinous fixes and updates to Netplay. Had to be one commit since
they're mostly related:
(1) Renamed frame_count to self_frame_count to be consistent with all
other names.
(2) Previously, it was possible to overwrite data in the ring buffer
that hadn't yet been used. Now that's not possible, but that just
changes one breakage for another: It's now possible to miss the NEW
data. The final resolution for this will probably be requesting stalls.
This is accomplished simply by storing frame numbers in the ring buffer
and checking them against the 'other' head.
(3) In TCP packets, separated cmd_size from cmd. It was beyond pointless
for these to be combined, and restricted cmd_size to 16 bits, which
will probably fail when/if state loading is supported.
(4) Readahead is now allowed. In the past, if the peer got ahead of us,
we would simply ignore their data. Thus, if they got too far ahead of
us, we'd stop reading their data altogether. Fabulous. Now, we're happy
to read future input.
(5) If the peer gets too far ahead of us (currently an unconfigurable 10
frames), fast forward to catch up. This should prevent desync due to
clock drift or stutter.
(6) Used frame_count in a few places where ptr was used. Doing a
comparison of pointers on a ring buffer is a far more dangerous way to
assure we're done with a task than simply using the count, since the
ring buffer is... well, a ring.
(7) Renamed tmp_{ptr,frame_count} to replay_{ptr,frame_count} for
clarity.
(8) Slightly changed the protocol version hash, just to assure that
other clients wouldn't think they were compatible with this one.
(9) There was an off-by-one error which, under some circumstances, could
allow the replay engine to run a complete round through the ring buffer,
replaying stale data. Fixed.
2016-09-12 02:01:47 +00:00
|
|
|
|
2016-09-14 18:25:54 +00:00
|
|
|
/* If we're supposed to stall, rewind (we shouldn't get this far if we're
|
|
|
|
* stalled, so this is a last resort) */
|
2016-09-12 13:13:26 +00:00
|
|
|
if (netplay->stall)
|
|
|
|
{
|
|
|
|
retro_ctx_serialize_info_t serial_info;
|
|
|
|
|
|
|
|
netplay->self_ptr = PREV_PTR(netplay->self_ptr);
|
|
|
|
netplay->self_frame_count--;
|
|
|
|
|
|
|
|
serial_info.data = NULL;
|
|
|
|
serial_info.data_const = netplay->buffer[netplay->self_ptr].state;
|
|
|
|
serial_info.size = netplay->state_size;
|
|
|
|
|
|
|
|
core_unserialize(&serial_info);
|
|
|
|
}
|
2015-12-23 20:25:28 +00:00
|
|
|
}
|