RetroArch/network/netplay/netplay.c

1416 lines
39 KiB
C
Raw Normal View History

2012-04-21 21:13:50 +00:00
/* RetroArch - A frontend for libretro.
2014-01-01 00:50:59 +00:00
* Copyright (C) 2010-2014 - Hans-Kristian Arntzen
2016-01-10 03:06:50 +00:00
* Copyright (C) 2011-2016 - Daniel De Matteis
* Copyright (C) 2016 - Gregor Richards
2011-02-13 15:40:24 +00:00
*
2012-04-21 21:13:50 +00:00
* RetroArch is free software: you can redistribute it and/or modify it under the terms
2011-02-13 15:40:24 +00:00
* of the GNU General Public License as published by the Free Software Found-
* ation, either version 3 of the License, or (at your option) any later version.
*
2012-04-21 21:13:50 +00:00
* RetroArch is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
2011-02-13 15:40:24 +00:00
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.
*
2012-04-21 21:31:57 +00:00
* You should have received a copy of the GNU General Public License along with RetroArch.
2011-02-13 15:40:24 +00:00
* If not, see <http://www.gnu.org/licenses/>.
*/
2011-12-04 17:03:08 +00:00
2012-11-15 13:32:06 +00:00
#if defined(_MSC_VER) && !defined(_XBOX)
2012-11-15 08:40:31 +00:00
#pragma comment(lib, "ws2_32")
#endif
#include <stdlib.h>
#include <string.h>
2015-09-04 19:11:00 +00:00
2016-09-05 22:56:00 +00:00
#include <compat/strl.h>
2016-09-08 09:59:44 +00:00
#include <retro_assert.h>
#include <net/net_compat.h>
2016-05-01 19:18:45 +00:00
#include <net/net_socket.h>
#include <features/features_cpu.h>
#include <retro_endianness.h>
2015-09-04 19:11:00 +00:00
2015-12-23 20:25:28 +00:00
#include "netplay_private.h"
2015-12-05 15:41:00 +00:00
2016-09-05 16:31:32 +00:00
#include "../../configuration.h"
2016-09-03 05:48:25 +00:00
#include "../../command.h"
2016-09-03 05:45:51 +00:00
#include "../../movie.h"
2016-09-30 04:22:56 +00:00
#include "../../paths.h"
2016-09-03 05:51:11 +00:00
#include "../../runloop.h"
2016-09-03 05:45:51 +00:00
2016-09-29 18:11:46 +00:00
#define MAX_STALL_TIME_USEC (10*1000*1000)
#define MAX_RETRIES 16
#define RETRY_MS 500
2015-12-05 15:41:00 +00:00
enum
{
CMD_OPT_ALLOWED_IN_SPECTATE_MODE = 0x1,
CMD_OPT_REQUIRE_ACK = 0x2,
CMD_OPT_HOST_ONLY = 0x4,
CMD_OPT_CLIENT_ONLY = 0x8,
CMD_OPT_REQUIRE_SYNC = 0x10
};
2016-09-29 18:11:46 +00:00
static netplay_t *netplay_data = NULL;
static int init_tcp_connection(const struct addrinfo *res,
bool server, bool spectate,
struct sockaddr *other_addr, socklen_t addr_size)
{
bool ret = true;
int fd = socket(res->ai_family, res->ai_socktype, res->ai_protocol);
if (fd < 0)
{
ret = false;
goto end;
}
#if defined(IPPROTO_TCP) && defined(TCP_NODELAY)
{
int flag = 1;
if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (void*)&flag, sizeof(int)) < 0)
RARCH_WARN("Could not set netplay TCP socket to nodelay. Expect jitter.\n");
}
#endif
#if defined(F_SETFD) && defined(FD_CLOEXEC)
/* Don't let any inherited processes keep open our port */
if (fcntl(fd, F_SETFD, FD_CLOEXEC) < 0)
RARCH_WARN("Cannot set Netplay port to close-on-exec. It may fail to reopen if the client disconnects.\n");
#endif
if (server)
{
if (socket_connect(fd, (void*)res, false) < 0)
{
ret = false;
goto end;
}
}
else
{
if ( !socket_bind(fd, (void*)res) ||
listen(fd, spectate ? MAX_SPECTATORS : 1) < 0)
{
ret = false;
goto end;
}
}
end:
if (!ret && fd >= 0)
{
socket_close(fd);
fd = -1;
}
return fd;
}
static bool init_tcp_socket(netplay_t *netplay, const char *server,
uint16_t port, bool spectate)
{
char port_buf[16] = {0};
bool ret = false;
const struct addrinfo *tmp_info = NULL;
struct addrinfo *res = NULL;
struct addrinfo hints = {0};
hints.ai_socktype = SOCK_STREAM;
if (!server)
hints.ai_flags = AI_PASSIVE;
snprintf(port_buf, sizeof(port_buf), "%hu", (unsigned short)port);
if (getaddrinfo_retro(server, port_buf, &hints, &res) < 0)
return false;
if (!res)
return false;
/* If "localhost" is used, it is important to check every possible
* address for IPv4/IPv6. */
tmp_info = res;
while (tmp_info)
{
int fd = init_tcp_connection(
tmp_info,
server,
netplay->spectate.enabled,
(struct sockaddr*)&netplay->other_addr,
sizeof(netplay->other_addr));
if (fd >= 0)
{
ret = true;
netplay->fd = fd;
break;
}
tmp_info = tmp_info->ai_next;
}
2015-12-26 07:10:37 +00:00
2016-09-29 18:11:46 +00:00
if (res)
freeaddrinfo_retro(res);
if (!ret)
RARCH_ERR("Failed to set up netplay sockets.\n");
return ret;
}
static bool init_socket(netplay_t *netplay, const char *server, uint16_t port)
{
if (!network_init())
return false;
if (!init_tcp_socket(netplay, server, port, netplay->spectate.enabled))
return false;
return true;
}
2015-01-09 17:34:00 +00:00
/**
* hangup:
2015-01-09 17:34:00 +00:00
*
* Disconnects an active Netplay connection due to an error
2015-01-09 17:34:00 +00:00
**/
static void hangup(netplay_t *netplay)
{
2016-09-29 18:11:46 +00:00
if (!netplay)
return;
if (!netplay->has_connection)
return;
2016-09-29 18:11:46 +00:00
RARCH_WARN("Netplay has disconnected. Will continue without connection ...\n");
runloop_msg_queue_push("Netplay has disconnected. Will continue without connection.", 0, 480, false);
2016-09-29 18:11:46 +00:00
socket_close(netplay->fd);
netplay->fd = -1;
2016-09-29 18:11:46 +00:00
if (netplay->is_server && !netplay->spectate.enabled)
{
/* In server mode, make the socket listen for a new connection */
if (!init_socket(netplay, NULL, netplay->tcp_port))
{
RARCH_WARN("Failed to reinitialize Netplay.\n");
runloop_msg_queue_push("Failed to reinitialize Netplay.", 0, 480, false);
}
2016-09-29 18:11:46 +00:00
}
2016-09-29 18:11:46 +00:00
netplay->has_connection = false;
2016-09-29 18:11:46 +00:00
/* Reset things that will behave oddly if we get a new connection */
netplay->remote_paused = false;
netplay->flip = false;
netplay->flip_frame = 0;
netplay->stall = 0;
}
2016-09-17 16:21:29 +00:00
static bool netplay_info_cb(netplay_t* netplay, unsigned frames)
{
2015-12-23 20:25:28 +00:00
return netplay->net_cbs->info_cb(netplay, frames);
}
2015-01-09 17:34:00 +00:00
/**
* netplay_should_skip:
* @netplay : pointer to netplay object
*
* If we're fast-forward replaying to resync, check if we
* should actually show frame.
*
* Returns: bool (1) if we should skip this frame, otherwise
* false (0).
**/
static bool netplay_should_skip(netplay_t *netplay)
2011-02-13 15:40:24 +00:00
{
if (!netplay)
return false;
return netplay->is_replay && netplay->has_connection;
2011-02-13 15:40:24 +00:00
}
static bool netplay_can_poll(netplay_t *netplay)
2011-02-13 15:40:24 +00:00
{
if (!netplay)
return false;
return netplay->can_poll;
2011-02-13 15:40:24 +00:00
}
2015-01-09 17:34:00 +00:00
/**
* get_self_input_state:
* @netplay : pointer to netplay object
*
* Grab our own input state and send this over the network.
*
* Returns: true (1) if successful, otherwise false (0).
**/
static bool get_self_input_state(netplay_t *netplay)
2011-02-13 15:40:24 +00:00
{
uint32_t state[WORDS_PER_FRAME - 1] = {0, 0, 0};
2016-09-17 16:21:29 +00:00
struct delta_frame *ptr = &netplay->buffer[netplay->self_ptr];
2011-02-13 15:40:24 +00:00
if (!netplay_delta_frame_ready(netplay, ptr, netplay->self_frame_count))
return false;
Multitudinous fixes and updates to Netplay. Had to be one commit since they're mostly related: (1) Renamed frame_count to self_frame_count to be consistent with all other names. (2) Previously, it was possible to overwrite data in the ring buffer that hadn't yet been used. Now that's not possible, but that just changes one breakage for another: It's now possible to miss the NEW data. The final resolution for this will probably be requesting stalls. This is accomplished simply by storing frame numbers in the ring buffer and checking them against the 'other' head. (3) In TCP packets, separated cmd_size from cmd. It was beyond pointless for these to be combined, and restricted cmd_size to 16 bits, which will probably fail when/if state loading is supported. (4) Readahead is now allowed. In the past, if the peer got ahead of us, we would simply ignore their data. Thus, if they got too far ahead of us, we'd stop reading their data altogether. Fabulous. Now, we're happy to read future input. (5) If the peer gets too far ahead of us (currently an unconfigurable 10 frames), fast forward to catch up. This should prevent desync due to clock drift or stutter. (6) Used frame_count in a few places where ptr was used. Doing a comparison of pointers on a ring buffer is a far more dangerous way to assure we're done with a task than simply using the count, since the ring buffer is... well, a ring. (7) Renamed tmp_{ptr,frame_count} to replay_{ptr,frame_count} for clarity. (8) Slightly changed the protocol version hash, just to assure that other clients wouldn't think they were compatible with this one. (9) There was an off-by-one error which, under some circumstances, could allow the replay engine to run a complete round through the ring buffer, replaying stale data. Fixed.
2016-09-12 02:01:47 +00:00
if (ptr->have_local)
{
/* We've already read this frame! */
return true;
}
Multitudinous fixes and updates to Netplay. Had to be one commit since they're mostly related: (1) Renamed frame_count to self_frame_count to be consistent with all other names. (2) Previously, it was possible to overwrite data in the ring buffer that hadn't yet been used. Now that's not possible, but that just changes one breakage for another: It's now possible to miss the NEW data. The final resolution for this will probably be requesting stalls. This is accomplished simply by storing frame numbers in the ring buffer and checking them against the 'other' head. (3) In TCP packets, separated cmd_size from cmd. It was beyond pointless for these to be combined, and restricted cmd_size to 16 bits, which will probably fail when/if state loading is supported. (4) Readahead is now allowed. In the past, if the peer got ahead of us, we would simply ignore their data. Thus, if they got too far ahead of us, we'd stop reading their data altogether. Fabulous. Now, we're happy to read future input. (5) If the peer gets too far ahead of us (currently an unconfigurable 10 frames), fast forward to catch up. This should prevent desync due to clock drift or stutter. (6) Used frame_count in a few places where ptr was used. Doing a comparison of pointers on a ring buffer is a far more dangerous way to assure we're done with a task than simply using the count, since the ring buffer is... well, a ring. (7) Renamed tmp_{ptr,frame_count} to replay_{ptr,frame_count} for clarity. (8) Slightly changed the protocol version hash, just to assure that other clients wouldn't think they were compatible with this one. (9) There was an off-by-one error which, under some circumstances, could allow the replay engine to run a complete round through the ring buffer, replaying stale data. Fixed.
2016-09-12 02:01:47 +00:00
if (!input_driver_is_libretro_input_blocked() && netplay->self_frame_count > 0)
{
2015-09-29 16:08:33 +00:00
unsigned i;
2016-05-08 21:12:04 +00:00
settings_t *settings = config_get_ptr();
2015-09-29 16:08:33 +00:00
/* First frame we always give zero input since relying on
* input from first frame screws up when we use -F 0. */
retro_input_state_t cb = netplay->cbs.state_cb;
2015-11-17 04:02:15 +00:00
for (i = 0; i < RARCH_FIRST_CUSTOM_BIND; i++)
{
int16_t tmp = cb(settings->input.netplay_client_swap_input ?
0 : !netplay->port,
RETRO_DEVICE_JOYPAD, 0, i);
state[0] |= tmp ? 1 << i : 0;
2015-11-17 04:02:15 +00:00
}
for (i = 0; i < 2; i++)
{
int16_t tmp_x = cb(settings->input.netplay_client_swap_input ?
0 : !netplay->port,
RETRO_DEVICE_ANALOG, i, 0);
int16_t tmp_y = cb(settings->input.netplay_client_swap_input ?
0 : !netplay->port,
RETRO_DEVICE_ANALOG, i, 1);
state[1 + i] = (uint16_t)tmp_x | (((uint16_t)tmp_y) << 16);
}
}
/* Here we construct the payload format:
* frame {
* uint32_t frame_number
* uint32_t RETRO_DEVICE_JOYPAD state (top 16 bits zero)
* uint32_t ANALOG state[0]
* uint32_t ANALOG state[1]
* }
*
* payload {
* cmd (CMD_INPUT)
* cmd_size (4 words)
* frame
* }
*/
netplay->packet_buffer[0] = htonl(NETPLAY_CMD_INPUT);
netplay->packet_buffer[1] = htonl(WORDS_PER_FRAME * sizeof(uint32_t));
netplay->packet_buffer[2] = htonl(netplay->self_frame_count);
netplay->packet_buffer[3] = htonl(state[0]);
netplay->packet_buffer[4] = htonl(state[1]);
netplay->packet_buffer[5] = htonl(state[2]);
if (!netplay->spectate.enabled) /* Spectate sends in its own way */
{
2016-09-17 16:21:29 +00:00
if (!socket_send_all_blocking(netplay->fd,
netplay->packet_buffer, sizeof(netplay->packet_buffer), false))
{
hangup(netplay);
return false;
}
}
memcpy(ptr->self_state, state, sizeof(state));
ptr->have_local = true;
return true;
}
static bool netplay_send_raw_cmd(netplay_t *netplay, uint32_t cmd,
const void *data, size_t size)
{
uint32_t cmdbuf[2];
cmdbuf[0] = htonl(cmd);
cmdbuf[1] = htonl(size);
if (!socket_send_all_blocking(netplay->fd, cmdbuf, sizeof(cmdbuf), false))
return false;
if (size > 0)
if (!socket_send_all_blocking(netplay->fd, data, size, false))
return false;
return true;
}
static bool netplay_cmd_nak(netplay_t *netplay)
{
return netplay_send_raw_cmd(netplay, NETPLAY_CMD_NAK, NULL, 0);
}
bool netplay_cmd_crc(netplay_t *netplay, struct delta_frame *delta)
{
uint32_t payload[2];
payload[0] = htonl(delta->frame);
payload[1] = htonl(delta->crc);
return netplay_send_raw_cmd(netplay, NETPLAY_CMD_CRC, payload, sizeof(payload));
}
bool netplay_cmd_request_savestate(netplay_t *netplay)
{
if (netplay->savestate_request_outstanding)
return true;
netplay->savestate_request_outstanding = true;
return netplay_send_raw_cmd(netplay, NETPLAY_CMD_REQUEST_SAVESTATE, NULL, 0);
}
static bool netplay_get_cmd(netplay_t *netplay)
{
uint32_t cmd;
uint32_t flip_frame;
Multitudinous fixes and updates to Netplay. Had to be one commit since they're mostly related: (1) Renamed frame_count to self_frame_count to be consistent with all other names. (2) Previously, it was possible to overwrite data in the ring buffer that hadn't yet been used. Now that's not possible, but that just changes one breakage for another: It's now possible to miss the NEW data. The final resolution for this will probably be requesting stalls. This is accomplished simply by storing frame numbers in the ring buffer and checking them against the 'other' head. (3) In TCP packets, separated cmd_size from cmd. It was beyond pointless for these to be combined, and restricted cmd_size to 16 bits, which will probably fail when/if state loading is supported. (4) Readahead is now allowed. In the past, if the peer got ahead of us, we would simply ignore their data. Thus, if they got too far ahead of us, we'd stop reading their data altogether. Fabulous. Now, we're happy to read future input. (5) If the peer gets too far ahead of us (currently an unconfigurable 10 frames), fast forward to catch up. This should prevent desync due to clock drift or stutter. (6) Used frame_count in a few places where ptr was used. Doing a comparison of pointers on a ring buffer is a far more dangerous way to assure we're done with a task than simply using the count, since the ring buffer is... well, a ring. (7) Renamed tmp_{ptr,frame_count} to replay_{ptr,frame_count} for clarity. (8) Slightly changed the protocol version hash, just to assure that other clients wouldn't think they were compatible with this one. (9) There was an off-by-one error which, under some circumstances, could allow the replay engine to run a complete round through the ring buffer, replaying stale data. Fixed.
2016-09-12 02:01:47 +00:00
uint32_t cmd_size;
/* FIXME: This depends on delta_frame_ready */
netplay->timeout_cnt = 0;
if (!socket_receive_all_blocking(netplay->fd, &cmd, sizeof(cmd)))
return false;
cmd = ntohl(cmd);
Multitudinous fixes and updates to Netplay. Had to be one commit since they're mostly related: (1) Renamed frame_count to self_frame_count to be consistent with all other names. (2) Previously, it was possible to overwrite data in the ring buffer that hadn't yet been used. Now that's not possible, but that just changes one breakage for another: It's now possible to miss the NEW data. The final resolution for this will probably be requesting stalls. This is accomplished simply by storing frame numbers in the ring buffer and checking them against the 'other' head. (3) In TCP packets, separated cmd_size from cmd. It was beyond pointless for these to be combined, and restricted cmd_size to 16 bits, which will probably fail when/if state loading is supported. (4) Readahead is now allowed. In the past, if the peer got ahead of us, we would simply ignore their data. Thus, if they got too far ahead of us, we'd stop reading their data altogether. Fabulous. Now, we're happy to read future input. (5) If the peer gets too far ahead of us (currently an unconfigurable 10 frames), fast forward to catch up. This should prevent desync due to clock drift or stutter. (6) Used frame_count in a few places where ptr was used. Doing a comparison of pointers on a ring buffer is a far more dangerous way to assure we're done with a task than simply using the count, since the ring buffer is... well, a ring. (7) Renamed tmp_{ptr,frame_count} to replay_{ptr,frame_count} for clarity. (8) Slightly changed the protocol version hash, just to assure that other clients wouldn't think they were compatible with this one. (9) There was an off-by-one error which, under some circumstances, could allow the replay engine to run a complete round through the ring buffer, replaying stale data. Fixed.
2016-09-12 02:01:47 +00:00
if (!socket_receive_all_blocking(netplay->fd, &cmd_size, sizeof(cmd)))
return false;
Multitudinous fixes and updates to Netplay. Had to be one commit since they're mostly related: (1) Renamed frame_count to self_frame_count to be consistent with all other names. (2) Previously, it was possible to overwrite data in the ring buffer that hadn't yet been used. Now that's not possible, but that just changes one breakage for another: It's now possible to miss the NEW data. The final resolution for this will probably be requesting stalls. This is accomplished simply by storing frame numbers in the ring buffer and checking them against the 'other' head. (3) In TCP packets, separated cmd_size from cmd. It was beyond pointless for these to be combined, and restricted cmd_size to 16 bits, which will probably fail when/if state loading is supported. (4) Readahead is now allowed. In the past, if the peer got ahead of us, we would simply ignore their data. Thus, if they got too far ahead of us, we'd stop reading their data altogether. Fabulous. Now, we're happy to read future input. (5) If the peer gets too far ahead of us (currently an unconfigurable 10 frames), fast forward to catch up. This should prevent desync due to clock drift or stutter. (6) Used frame_count in a few places where ptr was used. Doing a comparison of pointers on a ring buffer is a far more dangerous way to assure we're done with a task than simply using the count, since the ring buffer is... well, a ring. (7) Renamed tmp_{ptr,frame_count} to replay_{ptr,frame_count} for clarity. (8) Slightly changed the protocol version hash, just to assure that other clients wouldn't think they were compatible with this one. (9) There was an off-by-one error which, under some circumstances, could allow the replay engine to run a complete round through the ring buffer, replaying stale data. Fixed.
2016-09-12 02:01:47 +00:00
cmd_size = ntohl(cmd_size);
switch (cmd)
{
case NETPLAY_CMD_ACK:
/* Why are we even bothering? */
return true;
2016-09-14 00:34:10 +00:00
case NETPLAY_CMD_NAK:
/* Disconnect now! */
return false;
case NETPLAY_CMD_INPUT:
{
uint32_t buffer[WORDS_PER_FRAME];
unsigned i;
if (cmd_size != WORDS_PER_FRAME * sizeof(uint32_t))
{
RARCH_ERR("NETPLAY_CMD_INPUT received an unexpected payload size.\n");
return netplay_cmd_nak(netplay);
}
if (!socket_receive_all_blocking(netplay->fd, buffer, sizeof(buffer)))
{
RARCH_ERR("Failed to receive NETPLAY_CMD_INPUT input.\n");
return netplay_cmd_nak(netplay);
}
for (i = 0; i < WORDS_PER_FRAME; i++)
buffer[i] = ntohl(buffer[i]);
if (buffer[0] < netplay->read_frame_count)
{
/* We already had this, so ignore the new transmission */
return true;
}
else if (buffer[0] > netplay->read_frame_count)
{
2016-09-14 00:34:10 +00:00
/* Out of order = out of luck */
return netplay_cmd_nak(netplay);
}
/* The data's good! */
netplay->buffer[netplay->read_ptr].have_remote = true;
2016-09-17 16:21:29 +00:00
memcpy(netplay->buffer[netplay->read_ptr].real_input_state,
buffer + 1, sizeof(buffer) - sizeof(uint32_t));
netplay->read_ptr = NEXT_PTR(netplay->read_ptr);
netplay->read_frame_count++;
return true;
}
case NETPLAY_CMD_FLIP_PLAYERS:
if (cmd_size != sizeof(uint32_t))
{
2016-06-28 10:08:30 +00:00
RARCH_ERR("CMD_FLIP_PLAYERS received an unexpected command size.\n");
return netplay_cmd_nak(netplay);
}
2016-02-03 16:00:43 +00:00
if (!socket_receive_all_blocking(
netplay->fd, &flip_frame, sizeof(flip_frame)))
{
RARCH_ERR("Failed to receive CMD_FLIP_PLAYERS argument.\n");
return netplay_cmd_nak(netplay);
}
flip_frame = ntohl(flip_frame);
if (flip_frame < netplay->read_frame_count)
{
RARCH_ERR("Host asked us to flip users in the past. Not possible ...\n");
return netplay_cmd_nak(netplay);
}
netplay->flip ^= true;
netplay->flip_frame = flip_frame;
/* Force a rewind to assure the flip happens: This just prevents us
* from skipping other past the flip because our prediction was
* correct */
if (flip_frame < netplay->self_frame_count)
netplay->force_rewind = true;
RARCH_LOG("Netplay users are flipped.\n");
2015-12-07 14:32:14 +00:00
runloop_msg_queue_push("Netplay users are flipped.", 1, 180, false);
return true;
2015-11-17 23:18:04 +00:00
case NETPLAY_CMD_SPECTATE:
RARCH_ERR("NETPLAY_CMD_SPECTATE unimplemented.\n");
return netplay_cmd_nak(netplay);
case NETPLAY_CMD_DISCONNECT:
hangup(netplay);
return true;
2015-11-17 23:18:04 +00:00
case NETPLAY_CMD_CRC:
{
uint32_t buffer[2];
size_t tmp_ptr = netplay->self_ptr;
bool found = false;
if (cmd_size != sizeof(buffer))
{
RARCH_ERR("NETPLAY_CMD_CRC received unexpected payload size.\n");
return netplay_cmd_nak(netplay);
}
if (!socket_receive_all_blocking(netplay->fd, buffer, sizeof(buffer)))
{
RARCH_ERR("NETPLAY_CMD_CRC failed to receive payload.\n");
return netplay_cmd_nak(netplay);
}
buffer[0] = ntohl(buffer[0]);
buffer[1] = ntohl(buffer[1]);
/* Received a CRC for some frame. If we still have it, check if it
* matched. This approach could be improved with some quick modular
* arithmetic. */
2016-09-17 16:21:29 +00:00
do
{
2016-09-29 18:11:46 +00:00
if ( netplay->buffer[tmp_ptr].used
&& netplay->buffer[tmp_ptr].frame == buffer[0])
{
found = true;
break;
}
tmp_ptr = PREV_PTR(tmp_ptr);
} while (tmp_ptr != netplay->self_ptr);
if (!found)
{
/* Oh well, we got rid of it! */
return true;
}
if (buffer[0] <= netplay->other_frame_count)
{
/* We've already replayed up to this frame, so we can check it
* directly */
2016-09-29 18:11:46 +00:00
uint32_t local_crc = netplay_delta_frame_crc(
netplay, &netplay->buffer[tmp_ptr]);
if (buffer[1] != local_crc)
{
/* Problem! */
netplay_cmd_request_savestate(netplay);
}
}
else
{
/* We'll have to check it when we catch up */
netplay->buffer[tmp_ptr].crc = buffer[1];
}
return true;
}
case NETPLAY_CMD_REQUEST_SAVESTATE:
/* Delay until next frame so we don't send the savestate after the
* input */
netplay->force_send_savestate = true;
return true;
2015-11-17 23:18:04 +00:00
case NETPLAY_CMD_LOAD_SAVESTATE:
{
uint32_t frame;
/* There is a subtlty in whether the load comes before or after the
* current frame:
*
* If it comes before the current frame, then we need to force a
* rewind to that point.
*
* If it comes after the current frame, we need to jump ahead, then
* (strangely) force a rewind to the frame we're already on, so it
* gets loaded. This is just to avoid having reloading implemented in
* too many places. */
if (cmd_size > netplay->state_size + sizeof(uint32_t))
{
RARCH_ERR("CMD_LOAD_SAVESTATE received an unexpected save state size.\n");
return netplay_cmd_nak(netplay);
}
if (!socket_receive_all_blocking(netplay->fd, &frame, sizeof(frame)))
{
RARCH_ERR("CMD_LOAD_SAVESTATE failed to receive savestate frame.\n");
return netplay_cmd_nak(netplay);
}
frame = ntohl(frame);
if (frame != netplay->read_frame_count)
{
RARCH_ERR("CMD_LOAD_SAVESTATE loading a state out of order!\n");
return netplay_cmd_nak(netplay);
}
if (!socket_receive_all_blocking(netplay->fd,
2016-09-29 18:11:46 +00:00
netplay->buffer[netplay->read_ptr].state,
cmd_size - sizeof(uint32_t)))
{
RARCH_ERR("CMD_LOAD_SAVESTATE failed to receive savestate.\n");
return netplay_cmd_nak(netplay);
}
/* Skip ahead if it's past where we are */
if (frame > netplay->self_frame_count)
{
/* This is squirrely: We need to assure that when we advance the
* frame in post_frame, THEN we're referring to the frame to
* load into. If we refer directly to read_ptr, then we'll end
* up never reading the input for read_frame_count itself, which
* will make the other side unhappy. */
netplay->self_ptr = PREV_PTR(netplay->read_ptr);
netplay->self_frame_count = frame - 1;
}
/* And force rewind to it */
netplay->force_rewind = true;
netplay->savestate_request_outstanding = false;
netplay->other_ptr = netplay->read_ptr;
netplay->other_frame_count = frame;
return true;
}
2015-11-17 23:18:04 +00:00
case NETPLAY_CMD_PAUSE:
netplay->remote_paused = true;
return true;
2015-11-17 23:18:04 +00:00
case NETPLAY_CMD_RESUME:
netplay->remote_paused = false;
return true;
2015-11-17 23:18:04 +00:00
2016-09-29 18:11:46 +00:00
default:
break;
}
RARCH_ERR("Unknown netplay command received.\n");
return netplay_cmd_nak(netplay);
}
static int poll_input(netplay_t *netplay, bool block)
2011-02-13 15:40:24 +00:00
{
bool had_input = false;
int max_fd = netplay->fd + 1;
2015-04-11 00:49:30 +00:00
struct timeval tv = {0};
2015-03-16 01:17:04 +00:00
tv.tv_sec = 0;
tv.tv_usec = block ? (RETRY_MS * 1000) : 0;
do
{
2015-01-23 19:01:25 +00:00
fd_set fds;
/* select() does not take pointer to const struct timeval.
* Technically possible for select() to modify tmp_tv, so
* we go paranoia mode. */
struct timeval tmp_tv = tv;
had_input = false;
2015-01-23 19:01:25 +00:00
netplay->timeout_cnt++;
FD_ZERO(&fds);
FD_SET(netplay->fd, &fds);
if (socket_select(max_fd, &fds, NULL, NULL, &tmp_tv) < 0)
return -1;
if (FD_ISSET(netplay->fd, &fds))
{
2016-09-29 18:11:46 +00:00
/* If we're not ready for input, wait until we are.
* Could fill the TCP buffer, stalling the other side. */
if (netplay_delta_frame_ready(netplay,
&netplay->buffer[netplay->read_ptr],
netplay->read_frame_count))
{
had_input = true;
if (!netplay_get_cmd(netplay))
return -1;
}
}
/* If we were blocked for input, pass if we have this frame's input */
if (block && netplay->read_frame_count > netplay->self_frame_count)
break;
/* If we had input, we might have more */
if (had_input || !block)
2015-01-23 19:01:25 +00:00
continue;
RARCH_LOG("Network is stalling at frame %u, count %u of %d ...\n",
netplay->self_frame_count, netplay->timeout_cnt, MAX_RETRIES);
if (netplay->timeout_cnt >= MAX_RETRIES && !netplay->remote_paused)
return -1;
} while (had_input || block);
return 0;
}
/**
* netplay_simulate_input:
* @netplay : pointer to netplay object
* @sim_ptr : frame index for which to simulate input
*
* "Simulate" input by assuming it hasn't changed since the last read input.
*/
void netplay_simulate_input(netplay_t *netplay, uint32_t sim_ptr)
{
2015-04-11 00:49:30 +00:00
size_t prev = PREV_PTR(netplay->read_ptr);
memcpy(netplay->buffer[sim_ptr].simulated_input_state,
netplay->buffer[prev].real_input_state,
sizeof(netplay->buffer[prev].real_input_state));
}
2015-01-09 17:34:00 +00:00
/**
* netplay_poll:
* @netplay : pointer to netplay object
*
* Polls network to see if we have anything new. If our
* network buffer is full, we simply have to block
* for new input data.
*
* Returns: true (1) if successful, otherwise false (0).
**/
2016-09-29 18:11:46 +00:00
static bool netplay_poll(void)
{
int res;
2016-09-29 18:11:46 +00:00
if (!netplay_data->has_connection)
return false;
2016-09-29 18:11:46 +00:00
netplay_data->can_poll = false;
2016-09-29 18:11:46 +00:00
get_self_input_state(netplay_data);
/* No network side in spectate mode */
2016-09-29 18:11:46 +00:00
if (netplay_is_server(netplay_data) && netplay_data->spectate.enabled)
return true;
/* WORKAROUND: The only reason poll_input is ignored in the first frame is
* that some cores can't report state size until after the first frame. */
if (netplay_data->self_frame_count > 0 || netplay_data->stall || netplay_data->spectate.enabled)
{
/* Read Netplay input, block if we're configured to stall for input every
* frame */
2016-09-29 18:11:46 +00:00
res = poll_input(netplay_data,
(netplay_data->stall_frames == 0)
&& (netplay_data->read_frame_count <= netplay_data->self_frame_count));
if (res == -1)
{
2016-09-29 18:11:46 +00:00
hangup(netplay_data);
return false;
}
}
/* Simulate the input if we don't have real input */
2016-09-29 18:11:46 +00:00
if (!netplay_data->buffer[netplay_data->self_ptr].have_remote)
netplay_simulate_input(netplay_data, netplay_data->self_ptr);
/* Consider stalling */
2016-09-29 18:11:46 +00:00
switch (netplay_data->stall)
{
case RARCH_NETPLAY_STALL_RUNNING_FAST:
2016-09-29 18:11:46 +00:00
if (netplay_data->read_frame_count >= netplay_data->self_frame_count)
netplay_data->stall = RARCH_NETPLAY_STALL_NONE;
break;
default: /* not stalling */
2016-09-29 18:11:46 +00:00
if (netplay_data->read_frame_count + netplay_data->stall_frames
<= netplay_data->self_frame_count)
{
2016-09-29 18:11:46 +00:00
netplay_data->stall = RARCH_NETPLAY_STALL_RUNNING_FAST;
netplay_data->stall_time = cpu_features_get_time_usec();
}
}
/* If we're stalling, consider disconnection */
2016-09-29 18:11:46 +00:00
if (netplay_data->stall)
{
retro_time_t now = cpu_features_get_time_usec();
2016-09-29 18:11:46 +00:00
/* Don't stall out while they're paused */
if (netplay_data->remote_paused)
netplay_data->stall_time = now;
else if (now - netplay_data->stall_time >= MAX_STALL_TIME_USEC)
{
/* Stalled out! */
2016-09-29 18:11:46 +00:00
hangup(netplay_data);
return false;
}
}
return true;
}
void input_poll_net(void)
{
2016-09-29 18:11:46 +00:00
if (!netplay_should_skip(netplay_data) && netplay_can_poll(netplay_data))
netplay_poll();
}
void video_frame_net(const void *data, unsigned width,
unsigned height, size_t pitch)
{
2016-09-29 18:11:46 +00:00
if (!netplay_should_skip(netplay_data))
netplay_data->cbs.frame_cb(data, width, height, pitch);
}
void audio_sample_net(int16_t left, int16_t right)
{
2016-09-29 18:11:46 +00:00
if (!netplay_should_skip(netplay_data) && !netplay_data->stall)
netplay_data->cbs.sample_cb(left, right);
}
size_t audio_sample_batch_net(const int16_t *data, size_t frames)
{
2016-09-29 18:11:46 +00:00
if (!netplay_should_skip(netplay_data) && !netplay_data->stall)
return netplay_data->cbs.sample_batch_cb(data, frames);
return frames;
}
2015-01-09 17:34:00 +00:00
/**
* netplay_is_alive:
* @netplay : pointer to netplay object
*
* Checks if input port/index is controlled by netplay or not.
*
* Returns: true (1) if alive, otherwise false (0).
**/
2016-09-29 18:11:46 +00:00
static bool netplay_is_alive(void)
{
2016-09-29 18:11:46 +00:00
if (!netplay_data)
return false;
2016-09-29 18:11:46 +00:00
return netplay_data->has_connection;
}
static bool netplay_flip_port(netplay_t *netplay, bool port)
{
Multitudinous fixes and updates to Netplay. Had to be one commit since they're mostly related: (1) Renamed frame_count to self_frame_count to be consistent with all other names. (2) Previously, it was possible to overwrite data in the ring buffer that hadn't yet been used. Now that's not possible, but that just changes one breakage for another: It's now possible to miss the NEW data. The final resolution for this will probably be requesting stalls. This is accomplished simply by storing frame numbers in the ring buffer and checking them against the 'other' head. (3) In TCP packets, separated cmd_size from cmd. It was beyond pointless for these to be combined, and restricted cmd_size to 16 bits, which will probably fail when/if state loading is supported. (4) Readahead is now allowed. In the past, if the peer got ahead of us, we would simply ignore their data. Thus, if they got too far ahead of us, we'd stop reading their data altogether. Fabulous. Now, we're happy to read future input. (5) If the peer gets too far ahead of us (currently an unconfigurable 10 frames), fast forward to catch up. This should prevent desync due to clock drift or stutter. (6) Used frame_count in a few places where ptr was used. Doing a comparison of pointers on a ring buffer is a far more dangerous way to assure we're done with a task than simply using the count, since the ring buffer is... well, a ring. (7) Renamed tmp_{ptr,frame_count} to replay_{ptr,frame_count} for clarity. (8) Slightly changed the protocol version hash, just to assure that other clients wouldn't think they were compatible with this one. (9) There was an off-by-one error which, under some circumstances, could allow the replay engine to run a complete round through the ring buffer, replaying stale data. Fixed.
2016-09-12 02:01:47 +00:00
size_t frame = netplay->self_frame_count;
if (netplay->flip_frame == 0)
return port;
if (netplay->is_replay)
Multitudinous fixes and updates to Netplay. Had to be one commit since they're mostly related: (1) Renamed frame_count to self_frame_count to be consistent with all other names. (2) Previously, it was possible to overwrite data in the ring buffer that hadn't yet been used. Now that's not possible, but that just changes one breakage for another: It's now possible to miss the NEW data. The final resolution for this will probably be requesting stalls. This is accomplished simply by storing frame numbers in the ring buffer and checking them against the 'other' head. (3) In TCP packets, separated cmd_size from cmd. It was beyond pointless for these to be combined, and restricted cmd_size to 16 bits, which will probably fail when/if state loading is supported. (4) Readahead is now allowed. In the past, if the peer got ahead of us, we would simply ignore their data. Thus, if they got too far ahead of us, we'd stop reading their data altogether. Fabulous. Now, we're happy to read future input. (5) If the peer gets too far ahead of us (currently an unconfigurable 10 frames), fast forward to catch up. This should prevent desync due to clock drift or stutter. (6) Used frame_count in a few places where ptr was used. Doing a comparison of pointers on a ring buffer is a far more dangerous way to assure we're done with a task than simply using the count, since the ring buffer is... well, a ring. (7) Renamed tmp_{ptr,frame_count} to replay_{ptr,frame_count} for clarity. (8) Slightly changed the protocol version hash, just to assure that other clients wouldn't think they were compatible with this one. (9) There was an off-by-one error which, under some circumstances, could allow the replay engine to run a complete round through the ring buffer, replaying stale data. Fixed.
2016-09-12 02:01:47 +00:00
frame = netplay->replay_frame_count;
return port ^ netplay->flip ^ (frame < netplay->flip_frame);
}
2016-02-03 16:00:43 +00:00
static int16_t netplay_input_state(netplay_t *netplay,
bool port, unsigned device,
unsigned idx, unsigned id)
{
size_t ptr = netplay->is_replay ?
netplay->replay_ptr : netplay->self_ptr;
const uint32_t *curr_input_state = netplay->buffer[ptr].self_state;
if (netplay->port == (netplay_flip_port(netplay, port) ? 1 : 0))
{
if (netplay->buffer[ptr].have_remote)
{
netplay->buffer[ptr].used_real = true;
curr_input_state = netplay->buffer[ptr].real_input_state;
}
else
{
curr_input_state = netplay->buffer[ptr].simulated_input_state;
}
}
switch (device)
{
case RETRO_DEVICE_JOYPAD:
return ((1 << id) & curr_input_state[0]) ? 1 : 0;
case RETRO_DEVICE_ANALOG:
{
uint32_t state = curr_input_state[1 + idx];
return (int16_t)(uint16_t)(state >> (id * 16));
}
default:
return 0;
}
}
int16_t input_state_net(unsigned port, unsigned device,
unsigned idx, unsigned id)
{
2016-09-29 18:11:46 +00:00
if (netplay_is_alive())
{
/* Only two players for now. */
if (port > 1)
return 0;
2016-09-29 18:11:46 +00:00
return netplay_input_state(netplay_data, port, device, idx, id);
}
2016-09-29 18:11:46 +00:00
return netplay_data->cbs.state_cb(port, device, idx, id);
}
#ifndef HAVE_SOCKET_LEGACY
/* Custom inet_ntop. Win32 doesn't seem to support this ... */
2016-05-12 10:03:43 +00:00
void netplay_log_connection(const struct sockaddr_storage *their_addr,
unsigned slot, const char *nick)
{
union
{
const struct sockaddr_storage *storage;
const struct sockaddr_in *v4;
const struct sockaddr_in6 *v6;
} u;
2015-06-12 15:00:37 +00:00
const char *str = NULL;
char buf_v4[INET_ADDRSTRLEN] = {0};
char buf_v6[INET6_ADDRSTRLEN] = {0};
2015-01-23 08:00:53 +00:00
u.storage = their_addr;
2016-05-09 04:46:08 +00:00
switch (their_addr->ss_family)
{
2016-05-09 04:46:08 +00:00
case AF_INET:
{
struct sockaddr_in in;
2015-01-23 08:00:53 +00:00
2016-05-09 04:46:08 +00:00
memset(&in, 0, sizeof(in));
2015-01-23 08:00:53 +00:00
2016-05-09 04:46:08 +00:00
str = buf_v4;
in.sin_family = AF_INET;
memcpy(&in.sin_addr, &u.v4->sin_addr, sizeof(struct in_addr));
2016-05-09 04:46:08 +00:00
getnameinfo((struct sockaddr*)&in, sizeof(struct sockaddr_in),
buf_v4, sizeof(buf_v4),
NULL, 0, NI_NUMERICHOST);
}
break;
case AF_INET6:
{
struct sockaddr_in6 in;
memset(&in, 0, sizeof(in));
2015-01-23 08:00:53 +00:00
2016-05-09 04:46:08 +00:00
str = buf_v6;
in.sin6_family = AF_INET6;
memcpy(&in.sin6_addr, &u.v6->sin6_addr, sizeof(struct in6_addr));
2016-05-09 04:46:08 +00:00
getnameinfo((struct sockaddr*)&in, sizeof(struct sockaddr_in6),
buf_v6, sizeof(buf_v6), NULL, 0, NI_NUMERICHOST);
}
break;
default:
break;
}
if (str)
{
2015-06-12 15:00:37 +00:00
char msg[512] = {0};
2015-06-13 01:06:11 +00:00
snprintf(msg, sizeof(msg), "Got connection from: \"%s (%s)\" (#%u)",
nick, str, slot);
2015-12-07 14:32:14 +00:00
runloop_msg_queue_push(msg, 1, 180, false);
RARCH_LOG("%s\n", msg);
}
}
#endif
static bool netplay_init_buffers(netplay_t *netplay, unsigned frames)
{
if (!netplay)
return false;
/* * 2 + 1 because:
* Self sits in the middle,
* Other is allowed to drift as much as 'frames' frames behind
* Read is allowed to drift as much as 'frames' frames ahead */
netplay->buffer_size = frames * 2 + 1;
netplay->buffer = (struct delta_frame*)calloc(netplay->buffer_size,
sizeof(*netplay->buffer));
if (!netplay->buffer)
return false;
/* WORKAROUND: The code to initialize state buffers really should be here.
* It's been moved to work around cores that can't core_serialize_size
* early. */
return true;
}
2015-01-09 17:34:00 +00:00
/**
* netplay_new:
* @server : IP address of server.
* @port : Port of server.
* @frames : Amount of lag frames.
* @check_frames : Frequency with which to check CRCs.
2015-01-09 17:34:00 +00:00
* @cb : Libretro callbacks.
* @spectate : If true, enable spectator mode.
* @nick : Nickname of user.
*
* Creates a new netplay handle. A NULL host means we're
* hosting (user 1).
*
* Returns: new netplay handle.
**/
netplay_t *netplay_new(const char *server, uint16_t port,
unsigned frames, unsigned check_frames, const struct retro_callbacks *cb,
bool spectate, const char *nick)
{
2016-09-15 19:26:10 +00:00
netplay_t *netplay = (netplay_t*)calloc(1, sizeof(*netplay));
if (!netplay)
return NULL;
netplay->fd = -1;
netplay->tcp_port = port;
netplay->cbs = *cb;
netplay->port = server ? 0 : 1;
netplay->spectate.enabled = spectate;
2015-12-23 20:25:28 +00:00
netplay->is_server = server == NULL;
netplay->savestates_work = true;
strlcpy(netplay->nick, nick, sizeof(netplay->nick));
netplay->stall_frames = frames;
netplay->check_frames = check_frames;
if (!netplay_init_buffers(netplay, frames))
{
free(netplay);
return NULL;
}
2015-12-23 20:25:28 +00:00
if(spectate)
netplay->net_cbs = netplay_get_cbs_spectate();
else
netplay->net_cbs = netplay_get_cbs_net();
if (!init_socket(netplay, server, port))
{
free(netplay);
return NULL;
}
2015-12-24 18:23:46 +00:00
if(!netplay_info_cb(netplay, frames))
2015-12-23 20:25:28 +00:00
goto error;
2012-01-21 13:00:11 +00:00
return netplay;
2012-01-21 13:00:11 +00:00
error:
if (netplay->fd >= 0)
socket_close(netplay->fd);
2012-01-21 13:00:11 +00:00
2015-04-11 00:49:30 +00:00
free(netplay);
return NULL;
2012-01-21 13:00:11 +00:00
}
2015-01-09 17:34:00 +00:00
/**
2015-11-17 23:18:04 +00:00
* netplay_command:
* @netplay : pointer to netplay object
* @cmd : command to send
* @data : data to send as argument
* @sz : size of data
* @flags : flags of CMD_OPT_*
* @command_str : name of action
* @success_msg : message to display upon success
*
* Sends a single netplay command and waits for response.
*/
bool netplay_command(netplay_t* netplay, enum netplay_cmd cmd,
void* data, size_t sz,
uint32_t flags,
const char* command_str,
const char* success_msg)
2012-01-21 13:00:11 +00:00
{
2015-11-19 12:03:23 +00:00
char m[256];
2015-11-17 23:18:04 +00:00
const char* msg = NULL;
bool allowed_spectate = !!(flags & CMD_OPT_ALLOWED_IN_SPECTATE_MODE);
bool host_only = !!(flags & CMD_OPT_HOST_ONLY);
2016-09-08 09:59:44 +00:00
retro_assert(netplay);
2015-11-19 12:03:23 +00:00
if (netplay->spectate.enabled && !allowed_spectate)
2012-01-21 13:00:11 +00:00
{
2015-11-17 23:18:04 +00:00
msg = "Cannot %s in spectate mode.";
goto error;
2012-01-21 13:00:11 +00:00
}
2015-11-17 23:18:04 +00:00
if (host_only && netplay->port == 0)
2012-01-21 13:00:11 +00:00
{
2015-11-17 23:18:04 +00:00
msg = "Cannot %s as a client.";
2012-01-21 13:00:11 +00:00
goto error;
}
if (!netplay_send_raw_cmd(netplay, cmd, data, sz))
2012-01-21 13:00:11 +00:00
goto error;
runloop_msg_queue_push(success_msg, 1, 180, false);
2015-11-17 23:18:04 +00:00
return true;
2015-11-19 12:03:23 +00:00
error:
if (msg)
snprintf(m, sizeof(m), msg, command_str);
2015-11-17 23:18:04 +00:00
RARCH_WARN("%s\n", m);
2015-12-07 14:32:14 +00:00
runloop_msg_queue_push(m, 1, 180, false);
2015-11-17 23:18:04 +00:00
return false;
}
2012-01-21 13:24:38 +00:00
2015-11-17 23:18:04 +00:00
/**
* netplay_flip_users:
* @netplay : pointer to netplay object
*
* On regular netplay, flip who controls user 1 and 2.
**/
2016-04-09 01:16:11 +00:00
static void netplay_flip_users(netplay_t *netplay)
2015-11-17 23:18:04 +00:00
{
/* Must be in the future because we may have already sent this frame's data */
2016-09-17 16:21:29 +00:00
uint32_t flip_frame = netplay->self_frame_count + 1;
2015-11-17 23:35:53 +00:00
uint32_t flip_frame_net = htonl(flip_frame);
2016-09-17 16:21:29 +00:00
bool command = netplay_command(
2015-11-17 23:18:04 +00:00
netplay, NETPLAY_CMD_FLIP_PLAYERS,
&flip_frame_net, sizeof flip_frame_net,
CMD_OPT_HOST_ONLY | CMD_OPT_REQUIRE_SYNC,
"flip users", "Successfully flipped users.\n");
2015-11-17 23:18:04 +00:00
if(command)
{
2015-11-17 23:32:45 +00:00
netplay->flip ^= true;
2015-11-17 23:37:43 +00:00
netplay->flip_frame = flip_frame;
2012-01-21 13:00:11 +00:00
}
}
2015-01-09 17:34:00 +00:00
/**
* netplay_free:
* @netplay : pointer to netplay object
*
* Frees netplay handle.
**/
void netplay_free(netplay_t *netplay)
2011-02-13 15:40:24 +00:00
{
2013-10-22 19:26:33 +00:00
unsigned i;
2015-01-09 17:34:00 +00:00
if (netplay->fd >= 0)
socket_close(netplay->fd);
if (netplay->spectate.enabled)
2012-01-11 18:22:18 +00:00
{
2013-10-22 19:26:33 +00:00
for (i = 0; i < MAX_SPECTATORS; i++)
if (netplay->spectate.fds[i] >= 0)
socket_close(netplay->spectate.fds[i]);
2012-01-11 18:22:18 +00:00
free(netplay->spectate.input);
2012-01-11 18:22:18 +00:00
}
else
{
for (i = 0; i < netplay->buffer_size; i++)
if (netplay->buffer[i].state)
free(netplay->buffer[i].state);
2012-01-11 18:22:18 +00:00
free(netplay->buffer);
2012-01-11 18:22:18 +00:00
}
if (netplay->addr)
freeaddrinfo_retro(netplay->addr);
2012-01-11 18:22:18 +00:00
free(netplay);
2011-02-13 15:40:24 +00:00
}
2015-01-09 17:34:00 +00:00
/**
* netplay_pre_frame:
* @netplay : pointer to netplay object
*
* Pre-frame for Netplay.
* Call this before running retro_run().
*
* Returns: true (1) if the frontend is cleared to emulate the frame, false (0)
* if we're stalled or paused
2015-01-09 17:34:00 +00:00
**/
bool netplay_pre_frame(netplay_t *netplay)
2012-01-11 18:22:18 +00:00
{
2016-09-08 09:59:44 +00:00
retro_assert(netplay && netplay->net_cbs->pre_frame);
2016-09-17 16:21:29 +00:00
/* FIXME: This is an ugly way to learn we're not paused anymore */
if (netplay->local_paused)
netplay_frontend_paused(netplay, false);
2016-09-17 16:21:29 +00:00
if (!netplay->net_cbs->pre_frame(netplay))
return false;
2016-09-17 16:21:29 +00:00
return (!netplay->has_connection || (!netplay->stall && !netplay->remote_paused));
2012-01-11 18:22:18 +00:00
}
2015-01-09 17:34:00 +00:00
/**
* netplay_post_frame:
* @netplay : pointer to netplay object
*
* Post-frame for Netplay.
* We check if we have new input and replay from recorded input.
* Call this after running retro_run().
**/
void netplay_post_frame(netplay_t *netplay)
2012-01-11 18:22:18 +00:00
{
2016-09-08 09:59:44 +00:00
retro_assert(netplay && netplay->net_cbs->post_frame);
2015-12-23 20:25:28 +00:00
netplay->net_cbs->post_frame(netplay);
2012-01-11 18:22:18 +00:00
}
/**
* netplay_frontend_paused
* @netplay : pointer to netplay object
* @paused : true if frontend is paused
*
* Inform Netplay of the frontend's pause state (paused or otherwise)
**/
void netplay_frontend_paused(netplay_t *netplay, bool paused)
{
/* Nothing to do if we already knew this */
if (netplay->local_paused == paused)
return;
netplay->local_paused = paused;
if (netplay->has_connection && !netplay->spectate.enabled)
2016-09-17 16:21:29 +00:00
netplay_send_raw_cmd(netplay, paused
? NETPLAY_CMD_PAUSE : NETPLAY_CMD_RESUME, NULL, 0);
}
/**
* netplay_load_savestate
* @netplay : pointer to netplay object
* @serial_info : the savestate being loaded, NULL means "load it yourself"
* @save : whether to save the provided serial_info into the frame buffer
*
* Inform Netplay of a savestate load and send it to the other side
**/
void netplay_load_savestate(netplay_t *netplay, retro_ctx_serialize_info_t *serial_info, bool save)
{
uint32_t header[3];
retro_ctx_serialize_info_t tmp_serial_info;
if (!netplay->has_connection)
return;
/* Record it in our own buffer */
2016-09-17 16:21:29 +00:00
if (save || !serial_info)
{
2016-09-17 16:21:29 +00:00
if (netplay_delta_frame_ready(netplay,
&netplay->buffer[netplay->self_ptr], netplay->self_frame_count))
{
2016-09-17 16:21:29 +00:00
if (!serial_info)
{
2016-09-17 16:21:29 +00:00
tmp_serial_info.size = netplay->state_size;
tmp_serial_info.data = netplay->buffer[netplay->self_ptr].state;
if (!core_serialize(&tmp_serial_info))
return;
tmp_serial_info.data_const = tmp_serial_info.data;
serial_info = &tmp_serial_info;
}
else
{
if (serial_info->size <= netplay->state_size)
{
memcpy(netplay->buffer[netplay->self_ptr].state,
serial_info->data_const, serial_info->size);
}
}
}
}
/* We need to ignore any intervening data from the other side, and never rewind past this */
if (netplay->read_frame_count < netplay->self_frame_count)
{
netplay->read_ptr = netplay->self_ptr;
netplay->read_frame_count = netplay->self_frame_count;
}
if (netplay->other_frame_count < netplay->self_frame_count)
{
netplay->other_ptr = netplay->self_ptr;
netplay->other_frame_count = netplay->self_frame_count;
}
/* And send it to the peer (FIXME: this is an ugly way to do this) */
header[0] = htonl(NETPLAY_CMD_LOAD_SAVESTATE);
header[1] = htonl(serial_info->size + sizeof(uint32_t));
header[2] = htonl(netplay->self_frame_count);
2016-09-17 16:21:29 +00:00
if (!socket_send_all_blocking(netplay->fd, header, sizeof(header), false))
{
hangup(netplay);
return;
}
if (!socket_send_all_blocking(netplay->fd, serial_info->data_const, serial_info->size, false))
{
hangup(netplay);
return;
}
}
/**
* netplay_disconnect
* @netplay : pointer to netplay object
*
* Disconnect netplay.
*
* Returns: true (1) if successful. At present, cannot fail.
**/
bool netplay_disconnect(netplay_t *netplay)
{
if (!netplay || !netplay->has_connection)
return true;
hangup(netplay);
return true;
}
2015-04-11 11:31:33 +00:00
void deinit_netplay(void)
{
2016-09-29 18:11:46 +00:00
if (netplay_data)
netplay_free(netplay_data);
2015-12-05 15:41:00 +00:00
netplay_data = NULL;
2015-04-11 11:31:33 +00:00
}
2015-04-11 11:29:40 +00:00
/**
* init_netplay:
*
* Initializes netplay.
*
* If netplay is already initialized, will return false (0).
*
* Returns: true (1) if successful, otherwise false (0).
**/
bool init_netplay(void)
{
struct retro_callbacks cbs = {0};
settings_t *settings = config_get_ptr();
global_t *global = global_get_ptr();
if (!global->netplay.enable)
2015-04-11 11:29:40 +00:00
return false;
2015-11-30 23:08:02 +00:00
if (bsv_movie_ctl(BSV_MOVIE_CTL_START_PLAYBACK, NULL))
2015-04-11 11:29:40 +00:00
{
2016-02-03 16:00:43 +00:00
RARCH_WARN("%s\n",
msg_hash_to_str(MSG_NETPLAY_FAILED_MOVIE_PLAYBACK_HAS_STARTED));
2015-04-11 11:29:40 +00:00
return false;
}
2016-05-07 23:33:57 +00:00
core_set_default_callbacks(&cbs);
2015-04-11 11:29:40 +00:00
2016-09-30 04:22:56 +00:00
if (!path_is_empty(RARCH_PATH_SERVER))
2015-04-11 11:29:40 +00:00
{
RARCH_LOG("Connecting to netplay host...\n");
2015-07-27 15:18:10 +00:00
global->netplay.is_client = true;
2015-04-11 11:29:40 +00:00
}
else
{
2015-04-11 11:29:40 +00:00
RARCH_LOG("Waiting for client...\n");
runloop_msg_queue_push(
"Waiting for client...",
0, 180, false);
}
2015-04-11 11:29:40 +00:00
2015-12-05 15:41:00 +00:00
netplay_data = (netplay_t*)netplay_new(
2016-09-30 04:22:56 +00:00
global->netplay.is_client ? path_get(RARCH_PATH_SERVER) : NULL,
global->netplay.port ? global->netplay.port : RARCH_DEFAULT_PORT,
global->netplay.sync_frames,
global->netplay.check_frames,
&cbs,
global->netplay.is_spectate,
settings->username);
2015-04-11 11:29:40 +00:00
2015-12-05 15:41:00 +00:00
if (netplay_data)
2015-04-11 11:29:40 +00:00
return true;
2015-07-27 15:18:10 +00:00
global->netplay.is_client = false;
2015-07-02 16:39:57 +00:00
RARCH_WARN("%s\n", msg_hash_to_str(MSG_NETPLAY_FAILED));
2015-04-11 11:29:40 +00:00
runloop_msg_queue_push(
msg_hash_to_str(MSG_NETPLAY_FAILED),
2015-04-11 11:29:40 +00:00
0, 180, false);
return false;
}
2015-12-05 15:24:31 +00:00
bool netplay_driver_ctl(enum rarch_netplay_ctl_state state, void *data)
{
2015-12-05 15:41:00 +00:00
if (!netplay_data)
{
if (state == RARCH_NETPLAY_CTL_IS_DATA_INITED)
return false;
else
return true;
}
2015-12-05 15:24:31 +00:00
switch (state)
{
case RARCH_NETPLAY_CTL_IS_DATA_INITED:
return true;
case RARCH_NETPLAY_CTL_POST_FRAME:
2016-09-29 18:11:46 +00:00
netplay_post_frame(netplay_data);
break;
case RARCH_NETPLAY_CTL_PRE_FRAME:
2016-09-29 18:11:46 +00:00
return netplay_pre_frame(netplay_data);
2015-12-05 15:24:31 +00:00
case RARCH_NETPLAY_CTL_FLIP_PLAYERS:
{
bool *state = (bool*)data;
if (*state)
2016-09-29 18:11:46 +00:00
netplay_flip_users(netplay_data);
2015-12-05 15:24:31 +00:00
}
break;
case RARCH_NETPLAY_CTL_FULLSCREEN_TOGGLE:
{
bool *state = (bool*)data;
if (*state)
2016-05-09 18:51:53 +00:00
command_event(CMD_EVENT_FULLSCREEN_TOGGLE, NULL);
2015-12-05 15:24:31 +00:00
}
break;
case RARCH_NETPLAY_CTL_PAUSE:
2016-09-29 18:11:46 +00:00
netplay_frontend_paused(netplay_data, true);
break;
case RARCH_NETPLAY_CTL_UNPAUSE:
2016-09-29 18:11:46 +00:00
netplay_frontend_paused(netplay_data, false);
break;
case RARCH_NETPLAY_CTL_LOAD_SAVESTATE:
2016-09-29 18:11:46 +00:00
netplay_load_savestate(netplay_data, (retro_ctx_serialize_info_t*)data, true);
break;
case RARCH_NETPLAY_CTL_DISCONNECT:
2016-09-29 18:11:46 +00:00
return netplay_disconnect(netplay_data);
2015-12-05 15:24:31 +00:00
default:
case RARCH_NETPLAY_CTL_NONE:
break;
}
return false;
}