RetroArch/network/netplay/netplay.c

1929 lines
54 KiB
C
Raw Normal View History

2012-04-21 21:13:50 +00:00
/* RetroArch - A frontend for libretro.
2014-01-01 00:50:59 +00:00
* Copyright (C) 2010-2014 - Hans-Kristian Arntzen
2016-01-10 03:06:50 +00:00
* Copyright (C) 2011-2016 - Daniel De Matteis
* Copyright (C) 2016 - Gregor Richards
2011-02-13 15:40:24 +00:00
*
2012-04-21 21:13:50 +00:00
* RetroArch is free software: you can redistribute it and/or modify it under the terms
2011-02-13 15:40:24 +00:00
* of the GNU General Public License as published by the Free Software Found-
* ation, either version 3 of the License, or (at your option) any later version.
*
2012-04-21 21:13:50 +00:00
* RetroArch is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
2011-02-13 15:40:24 +00:00
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.
*
2012-04-21 21:31:57 +00:00
* You should have received a copy of the GNU General Public License along with RetroArch.
2011-02-13 15:40:24 +00:00
* If not, see <http://www.gnu.org/licenses/>.
*/
2011-12-04 17:03:08 +00:00
2012-11-15 13:32:06 +00:00
#if defined(_MSC_VER) && !defined(_XBOX)
2012-11-15 08:40:31 +00:00
#pragma comment(lib, "ws2_32")
#endif
#include <stdlib.h>
#include <string.h>
2015-09-04 19:11:00 +00:00
2016-09-05 22:56:00 +00:00
#include <compat/strl.h>
2016-09-08 09:59:44 +00:00
#include <retro_assert.h>
#include <net/net_compat.h>
2016-05-01 19:18:45 +00:00
#include <net/net_socket.h>
#include <features/features_cpu.h>
#include <retro_endianness.h>
2015-09-04 19:11:00 +00:00
2015-12-23 20:25:28 +00:00
#include "netplay_private.h"
#include "netplay_discovery.h"
2015-12-05 15:41:00 +00:00
#include "../../autosave.h"
2016-09-05 16:31:32 +00:00
#include "../../configuration.h"
2016-09-03 05:48:25 +00:00
#include "../../command.h"
2016-09-03 05:45:51 +00:00
#include "../../movie.h"
2016-09-03 05:51:11 +00:00
#include "../../runloop.h"
2016-09-03 05:45:51 +00:00
2016-09-29 18:11:46 +00:00
#define MAX_STALL_TIME_USEC (10*1000*1000)
#define MAX_RETRIES 16
#define RETRY_MS 500
#if defined(AF_INET6) && !defined(HAVE_SOCKET_LEGACY)
#define HAVE_INET6 1
#endif
/* Only used before init_netplay */
static bool netplay_enabled = false;
static bool netplay_is_client = false;
/* Used while Netplay is running */
2016-09-29 18:11:46 +00:00
static netplay_t *netplay_data = NULL;
/* Used to avoid recursive netplay calls */
static bool in_netplay = false;
#ifndef HAVE_SOCKET_LEGACY
static void announce_nat_traversal(netplay_t *netplay);
#endif
2016-09-29 18:11:46 +00:00
static int init_tcp_connection(const struct addrinfo *res,
bool server, bool spectate,
struct sockaddr *other_addr, socklen_t addr_size)
{
bool ret = true;
int fd = socket(res->ai_family, res->ai_socktype, res->ai_protocol);
if (fd < 0)
{
ret = false;
goto end;
}
#if defined(IPPROTO_TCP) && defined(TCP_NODELAY)
{
int flag = 1;
2006-05-18 11:31:43 +00:00
if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY,
#ifdef _WIN32
(const char*)
#else
(const void*)
#endif
&flag,
sizeof(int)) < 0)
2016-09-29 18:11:46 +00:00
RARCH_WARN("Could not set netplay TCP socket to nodelay. Expect jitter.\n");
}
#endif
#if defined(F_SETFD) && defined(FD_CLOEXEC)
/* Don't let any inherited processes keep open our port */
if (fcntl(fd, F_SETFD, FD_CLOEXEC) < 0)
RARCH_WARN("Cannot set Netplay port to close-on-exec. It may fail to reopen if the client disconnects.\n");
#endif
if (server)
{
if (socket_connect(fd, (void*)res, false) < 0)
{
ret = false;
goto end;
}
}
else
{
#if defined(HAVE_INET6) && defined(IPPROTO_IPV6) && defined(IPV6_V6ONLY)
/* Make sure we accept connections on both IPv6 and IPv4 */
int on = 0;
if (res->ai_family == AF_INET6)
{
if (setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, (void*)&on, sizeof(on)) < 0)
RARCH_WARN("Failed to listen on both IPv6 and IPv4\n");
}
#endif
2016-09-29 18:11:46 +00:00
if ( !socket_bind(fd, (void*)res) ||
listen(fd, spectate ? MAX_SPECTATORS : 1) < 0)
{
ret = false;
goto end;
}
}
end:
if (!ret && fd >= 0)
{
socket_close(fd);
fd = -1;
}
return fd;
}
static bool init_tcp_socket(netplay_t *netplay, void *direct_host,
const char *server, uint16_t port, bool spectate)
2016-09-29 18:11:46 +00:00
{
2016-10-21 17:39:51 +00:00
char port_buf[16];
2016-09-29 18:11:46 +00:00
bool ret = false;
const struct addrinfo *tmp_info = NULL;
struct addrinfo *res = NULL;
struct addrinfo hints = {0};
2016-10-21 17:39:51 +00:00
port_buf[0] = '\0';
if (!direct_host)
{
#ifdef HAVE_INET6
/* Default to hosting on IPv6 and IPv4 */
if (!server)
hints.ai_family = AF_INET6;
2016-11-30 05:25:16 +00:00
#endif
hints.ai_socktype = SOCK_STREAM;
if (!server)
hints.ai_flags = AI_PASSIVE;
2016-09-29 18:11:46 +00:00
snprintf(port_buf, sizeof(port_buf), "%hu", (unsigned short)port);
if (getaddrinfo_retro(server, port_buf, &hints, &res) < 0)
{
#ifdef HAVE_INET6
if (!server)
{
/* Didn't work with IPv6, try wildcard */
hints.ai_family = 0;
if (getaddrinfo_retro(server, port_buf, &hints, &res) < 0)
return false;
}
else
#endif
return false;
}
2016-09-29 18:11:46 +00:00
if (!res)
return false;
}
else
{
/* I'll build my own addrinfo! With blackjack and hookers! */
struct netplay_host *host = (struct netplay_host *) direct_host;
hints.ai_family = host->addr.sa_family;
hints.ai_socktype = SOCK_STREAM;
hints.ai_protocol = 0;
hints.ai_addrlen = host->addrlen;
hints.ai_addr = &host->addr;
res = &hints;
}
2016-09-29 18:11:46 +00:00
2016-11-30 05:25:16 +00:00
/* If we're serving on IPv6, make sure we accept all connections, including
* IPv4 */
#ifdef HAVE_INET6
if (!direct_host && !server && res->ai_family == AF_INET6)
2016-11-30 05:25:16 +00:00
{
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) res->ai_addr;
sin6->sin6_addr = in6addr_any;
}
#endif
2016-09-29 18:11:46 +00:00
/* If "localhost" is used, it is important to check every possible
* address for IPv4/IPv6. */
tmp_info = res;
while (tmp_info)
{
int fd = init_tcp_connection(
tmp_info,
direct_host || server,
2016-09-29 18:11:46 +00:00
netplay->spectate.enabled,
(struct sockaddr*)&netplay->other_addr,
sizeof(netplay->other_addr));
if (fd >= 0)
{
ret = true;
netplay->fd = fd;
break;
}
tmp_info = tmp_info->ai_next;
}
2015-12-26 07:10:37 +00:00
if (res && !direct_host)
2016-09-29 18:11:46 +00:00
freeaddrinfo_retro(res);
if (!ret)
RARCH_ERR("Failed to set up netplay sockets.\n");
return ret;
}
static void init_nat_traversal(netplay_t *netplay)
{
natt_init();
if (!natt_new(&netplay->nat_traversal_state))
{
netplay->nat_traversal = false;
return;
}
natt_open_port_any(&netplay->nat_traversal_state, netplay->tcp_port, SOCKET_PROTOCOL_TCP);
#ifndef HAVE_SOCKET_LEGACY
if (!netplay->nat_traversal_state.request_outstanding)
announce_nat_traversal(netplay);
#endif
}
static bool init_socket(netplay_t *netplay, void *direct_host, const char *server, uint16_t port)
2016-09-29 18:11:46 +00:00
{
if (!network_init())
return false;
if (!init_tcp_socket(netplay, direct_host, server, port, netplay->spectate.enabled))
2016-09-29 18:11:46 +00:00
return false;
netplay_clear_socket_buffer(&netplay->send_packet_buffer);
netplay_clear_socket_buffer(&netplay->recv_packet_buffer);
if (netplay->is_server && netplay->nat_traversal)
init_nat_traversal(netplay);
2016-09-29 18:11:46 +00:00
return true;
}
2015-01-09 17:34:00 +00:00
/**
* hangup:
2015-01-09 17:34:00 +00:00
*
* Disconnects an active Netplay connection due to an error
2015-01-09 17:34:00 +00:00
**/
static void hangup(netplay_t *netplay)
{
2016-09-29 18:11:46 +00:00
if (!netplay)
return;
if (netplay->mode == NETPLAY_CONNECTION_NONE)
2016-09-29 18:11:46 +00:00
return;
2016-09-29 18:11:46 +00:00
RARCH_WARN("Netplay has disconnected. Will continue without connection ...\n");
runloop_msg_queue_push("Netplay has disconnected. Will continue without connection.", 0, 480, false);
2016-09-29 18:11:46 +00:00
socket_close(netplay->fd);
netplay->fd = -1;
2016-09-29 18:11:46 +00:00
if (netplay->is_server && !netplay->spectate.enabled)
{
/* In server mode, make the socket listen for a new connection */
if (!init_socket(netplay, NULL, NULL, netplay->tcp_port))
2016-09-29 18:11:46 +00:00
{
RARCH_WARN("Failed to reinitialize Netplay.\n");
runloop_msg_queue_push("Failed to reinitialize Netplay.", 0, 480, false);
}
2016-09-29 18:11:46 +00:00
}
netplay->mode = NETPLAY_CONNECTION_NONE;
2016-09-29 18:11:46 +00:00
/* Reset things that will behave oddly if we get a new connection */
netplay->remote_paused = false;
netplay->flip = false;
netplay->flip_frame = 0;
netplay->stall = 0;
}
static bool netplay_info_cb(netplay_t* netplay, unsigned delay_frames)
2016-09-17 16:21:29 +00:00
{
return netplay->net_cbs->info_cb(netplay, delay_frames);
2015-12-23 20:25:28 +00:00
}
2015-01-09 17:34:00 +00:00
/**
* netplay_should_skip:
* @netplay : pointer to netplay object
*
* If we're fast-forward replaying to resync, check if we
* should actually show frame.
*
* Returns: bool (1) if we should skip this frame, otherwise
* false (0).
**/
static bool netplay_should_skip(netplay_t *netplay)
2011-02-13 15:40:24 +00:00
{
if (!netplay)
return false;
return netplay->is_replay && (netplay->mode == NETPLAY_CONNECTION_PLAYING);
2011-02-13 15:40:24 +00:00
}
static bool netplay_can_poll(netplay_t *netplay)
2011-02-13 15:40:24 +00:00
{
if (!netplay)
return false;
return netplay->can_poll;
2011-02-13 15:40:24 +00:00
}
/* Send the current input state, either immediately after receiving it or after
* finishing the initial handshake */
static void send_input(netplay_t *netplay)
{
if (!netplay->spectate.enabled && /* Spectate sends in its own way */
netplay->mode == NETPLAY_CONNECTION_PLAYING)
{
netplay->input_packet_buffer[2] = htonl(netplay->self_frame_count);
if (!netplay_send(&netplay->send_packet_buffer, netplay->fd,
netplay->input_packet_buffer,
sizeof(netplay->input_packet_buffer)) ||
!netplay_send_flush(&netplay->send_packet_buffer, netplay->fd,
false))
{
hangup(netplay);
}
}
}
2015-01-09 17:34:00 +00:00
/**
* get_self_input_state:
* @netplay : pointer to netplay object
*
* Grab our own input state and send this over the network.
*
* Returns: true (1) if successful, otherwise false (0).
**/
static bool get_self_input_state(netplay_t *netplay)
2011-02-13 15:40:24 +00:00
{
uint32_t state[WORDS_PER_FRAME - 1] = {0, 0, 0};
2016-09-17 16:21:29 +00:00
struct delta_frame *ptr = &netplay->buffer[netplay->self_ptr];
2011-02-13 15:40:24 +00:00
if (!netplay_delta_frame_ready(netplay, ptr, netplay->self_frame_count))
return false;
Multitudinous fixes and updates to Netplay. Had to be one commit since they're mostly related: (1) Renamed frame_count to self_frame_count to be consistent with all other names. (2) Previously, it was possible to overwrite data in the ring buffer that hadn't yet been used. Now that's not possible, but that just changes one breakage for another: It's now possible to miss the NEW data. The final resolution for this will probably be requesting stalls. This is accomplished simply by storing frame numbers in the ring buffer and checking them against the 'other' head. (3) In TCP packets, separated cmd_size from cmd. It was beyond pointless for these to be combined, and restricted cmd_size to 16 bits, which will probably fail when/if state loading is supported. (4) Readahead is now allowed. In the past, if the peer got ahead of us, we would simply ignore their data. Thus, if they got too far ahead of us, we'd stop reading their data altogether. Fabulous. Now, we're happy to read future input. (5) If the peer gets too far ahead of us (currently an unconfigurable 10 frames), fast forward to catch up. This should prevent desync due to clock drift or stutter. (6) Used frame_count in a few places where ptr was used. Doing a comparison of pointers on a ring buffer is a far more dangerous way to assure we're done with a task than simply using the count, since the ring buffer is... well, a ring. (7) Renamed tmp_{ptr,frame_count} to replay_{ptr,frame_count} for clarity. (8) Slightly changed the protocol version hash, just to assure that other clients wouldn't think they were compatible with this one. (9) There was an off-by-one error which, under some circumstances, could allow the replay engine to run a complete round through the ring buffer, replaying stale data. Fixed.
2016-09-12 02:01:47 +00:00
if (ptr->have_local)
{
/* We've already read this frame! */
return true;
}
Multitudinous fixes and updates to Netplay. Had to be one commit since they're mostly related: (1) Renamed frame_count to self_frame_count to be consistent with all other names. (2) Previously, it was possible to overwrite data in the ring buffer that hadn't yet been used. Now that's not possible, but that just changes one breakage for another: It's now possible to miss the NEW data. The final resolution for this will probably be requesting stalls. This is accomplished simply by storing frame numbers in the ring buffer and checking them against the 'other' head. (3) In TCP packets, separated cmd_size from cmd. It was beyond pointless for these to be combined, and restricted cmd_size to 16 bits, which will probably fail when/if state loading is supported. (4) Readahead is now allowed. In the past, if the peer got ahead of us, we would simply ignore their data. Thus, if they got too far ahead of us, we'd stop reading their data altogether. Fabulous. Now, we're happy to read future input. (5) If the peer gets too far ahead of us (currently an unconfigurable 10 frames), fast forward to catch up. This should prevent desync due to clock drift or stutter. (6) Used frame_count in a few places where ptr was used. Doing a comparison of pointers on a ring buffer is a far more dangerous way to assure we're done with a task than simply using the count, since the ring buffer is... well, a ring. (7) Renamed tmp_{ptr,frame_count} to replay_{ptr,frame_count} for clarity. (8) Slightly changed the protocol version hash, just to assure that other clients wouldn't think they were compatible with this one. (9) There was an off-by-one error which, under some circumstances, could allow the replay engine to run a complete round through the ring buffer, replaying stale data. Fixed.
2016-09-12 02:01:47 +00:00
if (!input_driver_is_libretro_input_blocked() && netplay->self_frame_count > 0)
{
2015-09-29 16:08:33 +00:00
unsigned i;
2016-05-08 21:12:04 +00:00
settings_t *settings = config_get_ptr();
2015-09-29 16:08:33 +00:00
/* First frame we always give zero input since relying on
* input from first frame screws up when we use -F 0. */
retro_input_state_t cb = netplay->cbs.state_cb;
2015-11-17 04:02:15 +00:00
for (i = 0; i < RARCH_FIRST_CUSTOM_BIND; i++)
{
int16_t tmp = cb(settings->netplay.swap_input ?
2015-11-17 04:02:15 +00:00
0 : !netplay->port,
RETRO_DEVICE_JOYPAD, 0, i);
state[0] |= tmp ? 1 << i : 0;
2015-11-17 04:02:15 +00:00
}
for (i = 0; i < 2; i++)
{
int16_t tmp_x = cb(settings->netplay.swap_input ?
0 : !netplay->port,
RETRO_DEVICE_ANALOG, i, 0);
int16_t tmp_y = cb(settings->netplay.swap_input ?
0 : !netplay->port,
RETRO_DEVICE_ANALOG, i, 1);
state[1 + i] = (uint16_t)tmp_x | (((uint16_t)tmp_y) << 16);
}
}
/* Here we construct the payload format:
* frame {
* uint32_t frame_number
* uint32_t RETRO_DEVICE_JOYPAD state (top 16 bits zero)
* uint32_t ANALOG state[0]
* uint32_t ANALOG state[1]
* }
*
* payload {
* cmd (CMD_INPUT)
* cmd_size (4 words)
* frame
* }
*/
netplay->input_packet_buffer[0] = htonl(NETPLAY_CMD_INPUT);
netplay->input_packet_buffer[1] = htonl(WORDS_PER_FRAME * sizeof(uint32_t));
netplay->input_packet_buffer[2] = htonl(netplay->self_frame_count);
netplay->input_packet_buffer[3] = htonl(state[0]);
netplay->input_packet_buffer[4] = htonl(state[1]);
netplay->input_packet_buffer[5] = htonl(state[2]);
send_input(netplay);
memcpy(ptr->self_state, state, sizeof(state));
ptr->have_local = true;
return true;
}
static bool netplay_send_raw_cmd(netplay_t *netplay, uint32_t cmd,
const void *data, size_t size)
{
uint32_t cmdbuf[2];
cmdbuf[0] = htonl(cmd);
cmdbuf[1] = htonl(size);
if (!netplay_send(&netplay->send_packet_buffer, netplay->fd, cmdbuf,
sizeof(cmdbuf)))
return false;
if (size > 0)
if (!netplay_send(&netplay->send_packet_buffer, netplay->fd, data, size))
return false;
return true;
}
static bool netplay_cmd_nak(netplay_t *netplay)
{
netplay_send_raw_cmd(netplay, NETPLAY_CMD_NAK, NULL, 0);
return false;
}
bool netplay_cmd_crc(netplay_t *netplay, struct delta_frame *delta)
{
uint32_t payload[2];
payload[0] = htonl(delta->frame);
payload[1] = htonl(delta->crc);
return netplay_send_raw_cmd(netplay, NETPLAY_CMD_CRC, payload, sizeof(payload));
}
bool netplay_cmd_request_savestate(netplay_t *netplay)
{
if (netplay->savestate_request_outstanding)
return true;
netplay->savestate_request_outstanding = true;
return netplay_send_raw_cmd(netplay, NETPLAY_CMD_REQUEST_SAVESTATE, NULL, 0);
}
static ssize_t netplay_recva(netplay_t *netplay, void *buf, size_t len)
{
2016-12-01 04:07:39 +00:00
return netplay_recv(&netplay->recv_packet_buffer, netplay->fd, buf, len, false);
}
static bool netplay_get_cmd(netplay_t *netplay, bool *had_input)
{
uint32_t cmd;
uint32_t flip_frame;
Multitudinous fixes and updates to Netplay. Had to be one commit since they're mostly related: (1) Renamed frame_count to self_frame_count to be consistent with all other names. (2) Previously, it was possible to overwrite data in the ring buffer that hadn't yet been used. Now that's not possible, but that just changes one breakage for another: It's now possible to miss the NEW data. The final resolution for this will probably be requesting stalls. This is accomplished simply by storing frame numbers in the ring buffer and checking them against the 'other' head. (3) In TCP packets, separated cmd_size from cmd. It was beyond pointless for these to be combined, and restricted cmd_size to 16 bits, which will probably fail when/if state loading is supported. (4) Readahead is now allowed. In the past, if the peer got ahead of us, we would simply ignore their data. Thus, if they got too far ahead of us, we'd stop reading their data altogether. Fabulous. Now, we're happy to read future input. (5) If the peer gets too far ahead of us (currently an unconfigurable 10 frames), fast forward to catch up. This should prevent desync due to clock drift or stutter. (6) Used frame_count in a few places where ptr was used. Doing a comparison of pointers on a ring buffer is a far more dangerous way to assure we're done with a task than simply using the count, since the ring buffer is... well, a ring. (7) Renamed tmp_{ptr,frame_count} to replay_{ptr,frame_count} for clarity. (8) Slightly changed the protocol version hash, just to assure that other clients wouldn't think they were compatible with this one. (9) There was an off-by-one error which, under some circumstances, could allow the replay engine to run a complete round through the ring buffer, replaying stale data. Fixed.
2016-09-12 02:01:47 +00:00
uint32_t cmd_size;
ssize_t recvd;
/* We don't handle the initial handshake here */
switch (netplay->mode)
{
case NETPLAY_CONNECTION_NONE:
/* Huh?! */
return false;
case NETPLAY_CONNECTION_INIT:
return netplay_handshake_init(netplay, had_input);
case NETPLAY_CONNECTION_PRE_NICK:
{
bool ret = netplay_handshake_pre_nick(netplay, had_input);
send_input(netplay);
return ret;
}
case NETPLAY_CONNECTION_PRE_SYNC:
{
bool ret = netplay_handshake_pre_sync(netplay, had_input);
send_input(netplay);
return ret;
}
default:
break;
}
/* FIXME: This depends on delta_frame_ready */
#define RECV(buf, sz) \
recvd = netplay_recva(netplay, (buf), (sz)); \
if (recvd >= 0 && recvd < (sz)) goto shrt; \
else if (recvd < 0)
RECV(&cmd, sizeof(cmd))
return false;
cmd = ntohl(cmd);
2016-12-03 17:15:54 +00:00
RECV(&cmd_size, sizeof(cmd_size))
return false;
Multitudinous fixes and updates to Netplay. Had to be one commit since they're mostly related: (1) Renamed frame_count to self_frame_count to be consistent with all other names. (2) Previously, it was possible to overwrite data in the ring buffer that hadn't yet been used. Now that's not possible, but that just changes one breakage for another: It's now possible to miss the NEW data. The final resolution for this will probably be requesting stalls. This is accomplished simply by storing frame numbers in the ring buffer and checking them against the 'other' head. (3) In TCP packets, separated cmd_size from cmd. It was beyond pointless for these to be combined, and restricted cmd_size to 16 bits, which will probably fail when/if state loading is supported. (4) Readahead is now allowed. In the past, if the peer got ahead of us, we would simply ignore their data. Thus, if they got too far ahead of us, we'd stop reading their data altogether. Fabulous. Now, we're happy to read future input. (5) If the peer gets too far ahead of us (currently an unconfigurable 10 frames), fast forward to catch up. This should prevent desync due to clock drift or stutter. (6) Used frame_count in a few places where ptr was used. Doing a comparison of pointers on a ring buffer is a far more dangerous way to assure we're done with a task than simply using the count, since the ring buffer is... well, a ring. (7) Renamed tmp_{ptr,frame_count} to replay_{ptr,frame_count} for clarity. (8) Slightly changed the protocol version hash, just to assure that other clients wouldn't think they were compatible with this one. (9) There was an off-by-one error which, under some circumstances, could allow the replay engine to run a complete round through the ring buffer, replaying stale data. Fixed.
2016-09-12 02:01:47 +00:00
cmd_size = ntohl(cmd_size);
netplay->timeout_cnt = 0;
switch (cmd)
{
case NETPLAY_CMD_ACK:
/* Why are we even bothering? */
break;
2016-09-14 00:34:10 +00:00
case NETPLAY_CMD_NAK:
/* Disconnect now! */
return false;
case NETPLAY_CMD_INPUT:
{
uint32_t buffer[WORDS_PER_FRAME];
unsigned i;
if (cmd_size != WORDS_PER_FRAME * sizeof(uint32_t))
{
RARCH_ERR("NETPLAY_CMD_INPUT received an unexpected payload size.\n");
return netplay_cmd_nak(netplay);
}
RECV(buffer, sizeof(buffer))
{
RARCH_ERR("Failed to receive NETPLAY_CMD_INPUT input.\n");
return netplay_cmd_nak(netplay);
}
for (i = 0; i < WORDS_PER_FRAME; i++)
buffer[i] = ntohl(buffer[i]);
if (buffer[0] < netplay->read_frame_count)
{
/* We already had this, so ignore the new transmission */
break;
}
else if (buffer[0] > netplay->read_frame_count)
{
2016-09-14 00:34:10 +00:00
/* Out of order = out of luck */
return netplay_cmd_nak(netplay);
}
/* The data's good! */
netplay->buffer[netplay->read_ptr].have_remote = true;
2016-09-17 16:21:29 +00:00
memcpy(netplay->buffer[netplay->read_ptr].real_input_state,
buffer + 1, sizeof(buffer) - sizeof(uint32_t));
netplay->read_ptr = NEXT_PTR(netplay->read_ptr);
netplay->read_frame_count++;
break;
}
case NETPLAY_CMD_FLIP_PLAYERS:
if (cmd_size != sizeof(uint32_t))
{
2016-06-28 10:08:30 +00:00
RARCH_ERR("CMD_FLIP_PLAYERS received an unexpected command size.\n");
return netplay_cmd_nak(netplay);
}
RECV(&flip_frame, sizeof(flip_frame))
{
RARCH_ERR("Failed to receive CMD_FLIP_PLAYERS argument.\n");
return netplay_cmd_nak(netplay);
}
flip_frame = ntohl(flip_frame);
if (flip_frame < netplay->read_frame_count)
{
RARCH_ERR("Host asked us to flip users in the past. Not possible ...\n");
return netplay_cmd_nak(netplay);
}
netplay->flip ^= true;
netplay->flip_frame = flip_frame;
/* Force a rewind to assure the flip happens: This just prevents us
* from skipping other past the flip because our prediction was
* correct */
if (flip_frame < netplay->self_frame_count)
netplay->force_rewind = true;
2016-10-22 03:23:45 +00:00
RARCH_LOG("%s.\n", msg_hash_to_str(MSG_NETPLAY_USERS_HAS_FLIPPED));
runloop_msg_queue_push(
msg_hash_to_str(MSG_NETPLAY_USERS_HAS_FLIPPED), 1, 180, false);
break;
2015-11-17 23:18:04 +00:00
case NETPLAY_CMD_SPECTATE:
RARCH_ERR("NETPLAY_CMD_SPECTATE unimplemented.\n");
return netplay_cmd_nak(netplay);
case NETPLAY_CMD_DISCONNECT:
hangup(netplay);
return true;
2015-11-17 23:18:04 +00:00
case NETPLAY_CMD_CRC:
{
uint32_t buffer[2];
size_t tmp_ptr = netplay->self_ptr;
bool found = false;
if (cmd_size != sizeof(buffer))
{
RARCH_ERR("NETPLAY_CMD_CRC received unexpected payload size.\n");
return netplay_cmd_nak(netplay);
}
RECV(buffer, sizeof(buffer))
{
RARCH_ERR("NETPLAY_CMD_CRC failed to receive payload.\n");
return netplay_cmd_nak(netplay);
}
buffer[0] = ntohl(buffer[0]);
buffer[1] = ntohl(buffer[1]);
/* Received a CRC for some frame. If we still have it, check if it
* matched. This approach could be improved with some quick modular
* arithmetic. */
2016-09-17 16:21:29 +00:00
do
{
2016-09-29 18:11:46 +00:00
if ( netplay->buffer[tmp_ptr].used
&& netplay->buffer[tmp_ptr].frame == buffer[0])
{
found = true;
break;
}
tmp_ptr = PREV_PTR(tmp_ptr);
} while (tmp_ptr != netplay->self_ptr);
if (!found)
{
/* Oh well, we got rid of it! */
break;
}
if (buffer[0] <= netplay->other_frame_count)
{
/* We've already replayed up to this frame, so we can check it
* directly */
2016-09-29 18:11:46 +00:00
uint32_t local_crc = netplay_delta_frame_crc(
netplay, &netplay->buffer[tmp_ptr]);
if (buffer[1] != local_crc)
{
/* Problem! */
netplay_cmd_request_savestate(netplay);
}
}
else
{
/* We'll have to check it when we catch up */
netplay->buffer[tmp_ptr].crc = buffer[1];
}
break;
}
case NETPLAY_CMD_REQUEST_SAVESTATE:
/* Delay until next frame so we don't send the savestate after the
* input */
netplay->force_send_savestate = true;
break;
2015-11-17 23:18:04 +00:00
case NETPLAY_CMD_LOAD_SAVESTATE:
{
uint32_t frame;
uint32_t isize;
uint32_t rd, wn;
/* Make sure we're ready for it */
if (netplay->quirks & NETPLAY_QUIRK_INITIALIZATION)
{
if (!netplay->is_replay)
{
netplay->is_replay = true;
netplay->replay_ptr = netplay->self_ptr;
netplay->replay_frame_count = netplay->self_frame_count;
netplay_wait_and_init_serialization(netplay);
netplay->is_replay = false;
}
else
{
netplay_wait_and_init_serialization(netplay);
}
}
/* There is a subtlty in whether the load comes before or after the
* current frame:
*
* If it comes before the current frame, then we need to force a
* rewind to that point.
*
* If it comes after the current frame, we need to jump ahead, then
* (strangely) force a rewind to the frame we're already on, so it
* gets loaded. This is just to avoid having reloading implemented in
* too many places. */
if (cmd_size > netplay->zbuffer_size + 2*sizeof(uint32_t))
{
RARCH_ERR("CMD_LOAD_SAVESTATE received an unexpected payload size.\n");
return netplay_cmd_nak(netplay);
}
RECV(&frame, sizeof(frame))
{
RARCH_ERR("CMD_LOAD_SAVESTATE failed to receive savestate frame.\n");
return netplay_cmd_nak(netplay);
}
frame = ntohl(frame);
if (frame != netplay->read_frame_count)
{
RARCH_ERR("CMD_LOAD_SAVESTATE loading a state out of order!\n");
return netplay_cmd_nak(netplay);
}
RECV(&isize, sizeof(isize))
{
RARCH_ERR("CMD_LOAD_SAVESTATE failed to receive inflated size.\n");
return netplay_cmd_nak(netplay);
}
isize = ntohl(isize);
if (isize != netplay->state_size)
{
RARCH_ERR("CMD_LOAD_SAVESTATE received an unexpected save state size.\n");
return netplay_cmd_nak(netplay);
}
RECV(netplay->zbuffer, cmd_size - 2*sizeof(uint32_t))
{
RARCH_ERR("CMD_LOAD_SAVESTATE failed to receive savestate.\n");
return netplay_cmd_nak(netplay);
}
/* And decompress it */
netplay->decompression_backend->set_in(netplay->decompression_stream,
netplay->zbuffer, cmd_size - 2*sizeof(uint32_t));
netplay->decompression_backend->set_out(netplay->decompression_stream,
2016-11-27 12:30:35 +00:00
(uint8_t*)netplay->buffer[netplay->read_ptr].state, netplay->state_size);
netplay->decompression_backend->trans(netplay->decompression_stream,
true, &rd, &wn, NULL);
/* Skip ahead if it's past where we are */
if (frame > netplay->self_frame_count)
{
/* This is squirrely: We need to assure that when we advance the
* frame in post_frame, THEN we're referring to the frame to
* load into. If we refer directly to read_ptr, then we'll end
* up never reading the input for read_frame_count itself, which
* will make the other side unhappy. */
2016-10-22 03:23:45 +00:00
netplay->self_ptr = PREV_PTR(netplay->read_ptr);
netplay->self_frame_count = frame - 1;
}
/* And force rewind to it */
2016-10-22 03:23:45 +00:00
netplay->force_rewind = true;
netplay->savestate_request_outstanding = false;
2016-10-22 03:23:45 +00:00
netplay->other_ptr = netplay->read_ptr;
netplay->other_frame_count = frame;
break;
}
2015-11-17 23:18:04 +00:00
case NETPLAY_CMD_PAUSE:
netplay->remote_paused = true;
break;
2015-11-17 23:18:04 +00:00
case NETPLAY_CMD_RESUME:
netplay->remote_paused = false;
break;
2015-11-17 23:18:04 +00:00
2016-09-29 18:11:46 +00:00
default:
RARCH_ERR("%s.\n", msg_hash_to_str(MSG_UNKNOWN_NETPLAY_COMMAND_RECEIVED));
return netplay_cmd_nak(netplay);
}
netplay_recv_flush(&netplay->recv_packet_buffer);
if (had_input)
*had_input = true;
return true;
shrt:
/* No more data, reset and try again */
netplay_recv_reset(&netplay->recv_packet_buffer);
return true;
2016-12-01 04:07:39 +00:00
#undef RECV
}
static int poll_input(netplay_t *netplay, bool block)
2011-02-13 15:40:24 +00:00
{
bool had_input = false;
int max_fd = netplay->fd + 1;
do
{
had_input = false;
2015-01-23 19:01:25 +00:00
netplay->timeout_cnt++;
/* If we're not ready for input, wait until we are.
* Could fill the TCP buffer, stalling the other side. */
if (netplay_delta_frame_ready(netplay,
&netplay->buffer[netplay->read_ptr],
netplay->read_frame_count))
{
if (!netplay_get_cmd(netplay, &had_input))
return -1;
}
if (block)
{
/* If we were blocked for input, pass if we have this frame's input */
if (netplay->read_frame_count > netplay->self_frame_count)
break;
/* If we're supposed to block but we didn't have enough input, wait for it */
if (!had_input)
{
fd_set fds;
struct timeval tv = {0};
tv.tv_usec = RETRY_MS * 1000;
FD_ZERO(&fds);
FD_SET(netplay->fd, &fds);
if (socket_select(max_fd, &fds, NULL, NULL, &tv) < 0)
return -1;
2015-01-23 19:01:25 +00:00
RARCH_LOG("Network is stalling at frame %u, count %u of %d ...\n",
netplay->self_frame_count, netplay->timeout_cnt, MAX_RETRIES);
if (netplay->timeout_cnt >= MAX_RETRIES && !netplay->remote_paused)
return -1;
}
}
} while (had_input || block);
return 0;
}
/**
* netplay_simulate_input:
* @netplay : pointer to netplay object
* @sim_ptr : frame index for which to simulate input
* @resim : are we resimulating, or simulating this frame for the
* first time?
*
* "Simulate" input by assuming it hasn't changed since the last read input.
*/
void netplay_simulate_input(netplay_t *netplay, uint32_t sim_ptr, bool resim)
{
2015-04-11 00:49:30 +00:00
size_t prev = PREV_PTR(netplay->read_ptr);
struct delta_frame *pframe = &netplay->buffer[prev],
*simframe = &netplay->buffer[sim_ptr];
if (resim)
{
/* In resimulation mode, we only copy the buttons. The reason for this
* is nonobvious:
*
* If we resimulated nothing, then the /duration/ with which any input
* was pressed would be approximately correct, since the original
* simulation came in as the input came in, but the /number of times/
* the input was pressed would be wrong, as there would be an
* advancing wavefront of real data overtaking the simulated data
* (which is really just real data offset by some frames).
*
* That's acceptable for arrows in most situations, since the amount
* you move is tied to the duration, but unacceptable for buttons,
* which will seem to jerkily be pressed numerous times with those
* wavefronts.
*/
const uint32_t keep = (1U<<RETRO_DEVICE_ID_JOYPAD_UP) |
(1U<<RETRO_DEVICE_ID_JOYPAD_DOWN) |
(1U<<RETRO_DEVICE_ID_JOYPAD_LEFT) |
(1U<<RETRO_DEVICE_ID_JOYPAD_RIGHT);
uint32_t sim_state = simframe->simulated_input_state[0] & keep;
sim_state |= pframe->real_input_state[0] & ~keep;
simframe->simulated_input_state[0] = sim_state;
}
else
{
memcpy(simframe->simulated_input_state,
pframe->real_input_state,
sizeof(pframe->real_input_state));
}
}
2015-01-09 17:34:00 +00:00
/**
* netplay_poll:
* @netplay : pointer to netplay object
*
* Polls network to see if we have anything new. If our
* network buffer is full, we simply have to block
* for new input data.
*
* Returns: true (1) if successful, otherwise false (0).
**/
2016-09-29 18:11:46 +00:00
static bool netplay_poll(void)
{
int res;
if (netplay_data->mode == NETPLAY_CONNECTION_NONE)
return false;
2016-09-29 18:11:46 +00:00
netplay_data->can_poll = false;
2016-09-29 18:11:46 +00:00
get_self_input_state(netplay_data);
/* No network side in spectate mode */
2016-09-29 18:11:46 +00:00
if (netplay_is_server(netplay_data) && netplay_data->spectate.enabled)
return true;
/* Read Netplay input, block if we're configured to stall for input every
* frame */
if (netplay_data->delay_frames == 0 &&
netplay_data->read_frame_count <= netplay_data->self_frame_count)
res = poll_input(netplay_data, true);
else
res = poll_input(netplay_data, false);
if (res == -1)
{
hangup(netplay_data);
return false;
}
/* Simulate the input if we don't have real input */
2016-09-29 18:11:46 +00:00
if (!netplay_data->buffer[netplay_data->self_ptr].have_remote)
netplay_simulate_input(netplay_data, netplay_data->self_ptr, false);
/* Consider stalling */
2016-09-29 18:11:46 +00:00
switch (netplay_data->stall)
{
case RARCH_NETPLAY_STALL_RUNNING_FAST:
2016-09-29 18:11:46 +00:00
if (netplay_data->read_frame_count >= netplay_data->self_frame_count)
netplay_data->stall = RARCH_NETPLAY_STALL_NONE;
break;
default: /* not stalling */
if (netplay_data->read_frame_count + netplay_data->delay_frames
2016-09-29 18:11:46 +00:00
<= netplay_data->self_frame_count)
{
2016-09-29 18:11:46 +00:00
netplay_data->stall = RARCH_NETPLAY_STALL_RUNNING_FAST;
netplay_data->stall_time = cpu_features_get_time_usec();
}
}
/* If we're stalling, consider disconnection */
2016-09-29 18:11:46 +00:00
if (netplay_data->stall)
{
retro_time_t now = cpu_features_get_time_usec();
2016-09-29 18:11:46 +00:00
/* Don't stall out while they're paused */
if (netplay_data->remote_paused)
netplay_data->stall_time = now;
else if (now - netplay_data->stall_time >= MAX_STALL_TIME_USEC)
{
/* Stalled out! */
2016-09-29 18:11:46 +00:00
hangup(netplay_data);
return false;
}
}
return true;
}
void input_poll_net(void)
{
2016-09-29 18:11:46 +00:00
if (!netplay_should_skip(netplay_data) && netplay_can_poll(netplay_data))
netplay_poll();
}
void video_frame_net(const void *data, unsigned width,
unsigned height, size_t pitch)
{
2016-09-29 18:11:46 +00:00
if (!netplay_should_skip(netplay_data))
netplay_data->cbs.frame_cb(data, width, height, pitch);
}
void audio_sample_net(int16_t left, int16_t right)
{
2016-09-29 18:11:46 +00:00
if (!netplay_should_skip(netplay_data) && !netplay_data->stall)
netplay_data->cbs.sample_cb(left, right);
}
size_t audio_sample_batch_net(const int16_t *data, size_t frames)
{
2016-09-29 18:11:46 +00:00
if (!netplay_should_skip(netplay_data) && !netplay_data->stall)
return netplay_data->cbs.sample_batch_cb(data, frames);
return frames;
}
2015-01-09 17:34:00 +00:00
/**
* netplay_is_alive:
* @netplay : pointer to netplay object
*
* Checks if input port/index is controlled by netplay or not.
*
* Returns: true (1) if alive, otherwise false (0).
**/
2016-09-29 18:11:46 +00:00
static bool netplay_is_alive(void)
{
2016-09-29 18:11:46 +00:00
if (!netplay_data)
return false;
return (netplay_data->mode == NETPLAY_CONNECTION_PLAYING);
}
static bool netplay_flip_port(netplay_t *netplay, bool port)
{
Multitudinous fixes and updates to Netplay. Had to be one commit since they're mostly related: (1) Renamed frame_count to self_frame_count to be consistent with all other names. (2) Previously, it was possible to overwrite data in the ring buffer that hadn't yet been used. Now that's not possible, but that just changes one breakage for another: It's now possible to miss the NEW data. The final resolution for this will probably be requesting stalls. This is accomplished simply by storing frame numbers in the ring buffer and checking them against the 'other' head. (3) In TCP packets, separated cmd_size from cmd. It was beyond pointless for these to be combined, and restricted cmd_size to 16 bits, which will probably fail when/if state loading is supported. (4) Readahead is now allowed. In the past, if the peer got ahead of us, we would simply ignore their data. Thus, if they got too far ahead of us, we'd stop reading their data altogether. Fabulous. Now, we're happy to read future input. (5) If the peer gets too far ahead of us (currently an unconfigurable 10 frames), fast forward to catch up. This should prevent desync due to clock drift or stutter. (6) Used frame_count in a few places where ptr was used. Doing a comparison of pointers on a ring buffer is a far more dangerous way to assure we're done with a task than simply using the count, since the ring buffer is... well, a ring. (7) Renamed tmp_{ptr,frame_count} to replay_{ptr,frame_count} for clarity. (8) Slightly changed the protocol version hash, just to assure that other clients wouldn't think they were compatible with this one. (9) There was an off-by-one error which, under some circumstances, could allow the replay engine to run a complete round through the ring buffer, replaying stale data. Fixed.
2016-09-12 02:01:47 +00:00
size_t frame = netplay->self_frame_count;
if (netplay->flip_frame == 0)
return port;
if (netplay->is_replay)
Multitudinous fixes and updates to Netplay. Had to be one commit since they're mostly related: (1) Renamed frame_count to self_frame_count to be consistent with all other names. (2) Previously, it was possible to overwrite data in the ring buffer that hadn't yet been used. Now that's not possible, but that just changes one breakage for another: It's now possible to miss the NEW data. The final resolution for this will probably be requesting stalls. This is accomplished simply by storing frame numbers in the ring buffer and checking them against the 'other' head. (3) In TCP packets, separated cmd_size from cmd. It was beyond pointless for these to be combined, and restricted cmd_size to 16 bits, which will probably fail when/if state loading is supported. (4) Readahead is now allowed. In the past, if the peer got ahead of us, we would simply ignore their data. Thus, if they got too far ahead of us, we'd stop reading their data altogether. Fabulous. Now, we're happy to read future input. (5) If the peer gets too far ahead of us (currently an unconfigurable 10 frames), fast forward to catch up. This should prevent desync due to clock drift or stutter. (6) Used frame_count in a few places where ptr was used. Doing a comparison of pointers on a ring buffer is a far more dangerous way to assure we're done with a task than simply using the count, since the ring buffer is... well, a ring. (7) Renamed tmp_{ptr,frame_count} to replay_{ptr,frame_count} for clarity. (8) Slightly changed the protocol version hash, just to assure that other clients wouldn't think they were compatible with this one. (9) There was an off-by-one error which, under some circumstances, could allow the replay engine to run a complete round through the ring buffer, replaying stale data. Fixed.
2016-09-12 02:01:47 +00:00
frame = netplay->replay_frame_count;
return port ^ netplay->flip ^ (frame < netplay->flip_frame);
}
2016-02-03 16:00:43 +00:00
static int16_t netplay_input_state(netplay_t *netplay,
bool port, unsigned device,
unsigned idx, unsigned id)
{
size_t ptr = netplay->is_replay ?
netplay->replay_ptr : netplay->self_ptr;
const uint32_t *curr_input_state = netplay->buffer[ptr].self_state;
if (netplay->port == (netplay_flip_port(netplay, port) ? 1 : 0))
{
if (netplay->buffer[ptr].have_remote)
{
netplay->buffer[ptr].used_real = true;
curr_input_state = netplay->buffer[ptr].real_input_state;
}
else
{
curr_input_state = netplay->buffer[ptr].simulated_input_state;
}
}
switch (device)
{
case RETRO_DEVICE_JOYPAD:
return ((1 << id) & curr_input_state[0]) ? 1 : 0;
case RETRO_DEVICE_ANALOG:
{
uint32_t state = curr_input_state[1 + idx];
return (int16_t)(uint16_t)(state >> (id * 16));
}
default:
return 0;
}
}
int16_t input_state_net(unsigned port, unsigned device,
unsigned idx, unsigned id)
{
2016-09-29 18:11:46 +00:00
if (netplay_is_alive())
{
/* Only two players for now. */
if (port > 1)
return 0;
2016-09-29 18:11:46 +00:00
return netplay_input_state(netplay_data, port, device, idx, id);
}
2016-09-29 18:11:46 +00:00
return netplay_data->cbs.state_cb(port, device, idx, id);
}
#ifndef HAVE_SOCKET_LEGACY
/* Custom inet_ntop. Win32 doesn't seem to support this ... */
2016-05-12 10:03:43 +00:00
void netplay_log_connection(const struct sockaddr_storage *their_addr,
unsigned slot, const char *nick)
{
union
{
const struct sockaddr_storage *storage;
const struct sockaddr_in *v4;
const struct sockaddr_in6 *v6;
} u;
2015-06-12 15:00:37 +00:00
const char *str = NULL;
char buf_v4[INET_ADDRSTRLEN] = {0};
char buf_v6[INET6_ADDRSTRLEN] = {0};
2016-10-21 17:39:51 +00:00
char msg[512];
msg[0] = '\0';
2015-01-23 08:00:53 +00:00
u.storage = their_addr;
2016-05-09 04:46:08 +00:00
switch (their_addr->ss_family)
{
2016-05-09 04:46:08 +00:00
case AF_INET:
{
struct sockaddr_in in;
2015-01-23 08:00:53 +00:00
2016-05-09 04:46:08 +00:00
memset(&in, 0, sizeof(in));
2015-01-23 08:00:53 +00:00
2016-05-09 04:46:08 +00:00
str = buf_v4;
in.sin_family = AF_INET;
memcpy(&in.sin_addr, &u.v4->sin_addr, sizeof(struct in_addr));
2016-05-09 04:46:08 +00:00
getnameinfo((struct sockaddr*)&in, sizeof(struct sockaddr_in),
buf_v4, sizeof(buf_v4),
NULL, 0, NI_NUMERICHOST);
}
break;
case AF_INET6:
{
struct sockaddr_in6 in;
memset(&in, 0, sizeof(in));
2015-01-23 08:00:53 +00:00
2016-05-09 04:46:08 +00:00
str = buf_v6;
in.sin6_family = AF_INET6;
memcpy(&in.sin6_addr, &u.v6->sin6_addr, sizeof(struct in6_addr));
2016-05-09 04:46:08 +00:00
getnameinfo((struct sockaddr*)&in, sizeof(struct sockaddr_in6),
buf_v6, sizeof(buf_v6), NULL, 0, NI_NUMERICHOST);
}
break;
default:
break;
}
if (str)
{
snprintf(msg, sizeof(msg), msg_hash_to_str(MSG_GOT_CONNECTION_FROM_NAME),
nick, str);
2015-12-07 14:32:14 +00:00
runloop_msg_queue_push(msg, 1, 180, false);
RARCH_LOG("%s\n", msg);
}
else
{
snprintf(msg, sizeof(msg), msg_hash_to_str(MSG_GOT_CONNECTION_FROM),
nick);
runloop_msg_queue_push(msg, 1, 180, false);
RARCH_LOG("%s\n", msg);
}
RARCH_LOG("%s %u\n", msg_hash_to_str(MSG_CONNECTION_SLOT),
2016-10-22 03:23:45 +00:00
slot);
}
#else
void netplay_log_connection(const struct sockaddr_storage *their_addr,
unsigned slot, const char *nick)
{
2016-10-21 17:39:51 +00:00
char msg[512];
msg[0] = '\0';
snprintf(msg, sizeof(msg), msg_hash_to_str(MSG_GOT_CONNECTION_FROM),
nick);
runloop_msg_queue_push(msg, 1, 180, false);
RARCH_LOG("%s\n", msg);
2016-10-22 02:49:04 +00:00
RARCH_LOG("%s %u\n",
msg_hash_to_str(MSG_CONNECTION_SLOT), slot);
}
#endif
#ifndef HAVE_SOCKET_LEGACY
static void announce_nat_traversal(netplay_t *netplay)
{
char msg[512], host[PATH_MAX_LENGTH], port[6];
if (netplay->nat_traversal_state.have_inet4)
{
if (getnameinfo((const struct sockaddr *) &netplay->nat_traversal_state.ext_inet4_addr,
sizeof(struct sockaddr_in),
host, PATH_MAX_LENGTH, port, 6, NI_NUMERICHOST|NI_NUMERICSERV) != 0)
return;
}
#ifdef HAVE_INET6
else if (netplay->nat_traversal_state.have_inet6)
{
if (getnameinfo((const struct sockaddr *) &netplay->nat_traversal_state.ext_inet6_addr,
sizeof(struct sockaddr_in6),
host, PATH_MAX_LENGTH, port, 6, NI_NUMERICHOST|NI_NUMERICSERV) != 0)
return;
}
#endif
else
return;
snprintf(msg, sizeof(msg), "%s: %s:%s\n",
msg_hash_to_str(MSG_PUBLIC_ADDRESS),
host, port);
runloop_msg_queue_push(msg, 1, 180, false);
RARCH_LOG("%s\n", msg);
}
#endif
bool netplay_try_init_serialization(netplay_t *netplay)
{
retro_ctx_serialize_info_t serial_info;
size_t packet_buffer_size;
if (netplay->state_size)
return true;
if (!netplay_init_serialization(netplay))
return false;
/* Check if we can actually save */
serial_info.data_const = NULL;
serial_info.data = netplay->buffer[netplay->self_ptr].state;
serial_info.size = netplay->state_size;
if (!core_serialize(&serial_info))
return false;
/* Once initialized, we no longer exhibit this quirk */
netplay->quirks &= ~((uint64_t) NETPLAY_QUIRK_INITIALIZATION);
return true;
}
bool netplay_wait_and_init_serialization(netplay_t *netplay)
{
int frame;
if (netplay->state_size)
return true;
/* Wait a maximum of 60 frames */
for (frame = 0; frame < 60; frame++) {
if (netplay_try_init_serialization(netplay))
return true;
#if defined(HAVE_THREADS)
autosave_lock();
#endif
core_run();
#if defined(HAVE_THREADS)
autosave_unlock();
#endif
}
return false;
}
static bool netplay_init_socket_buffers(netplay_t *netplay)
{
/* Make our packet buffer big enough for a save state and frames-many frames
* of input data, plus the headers for each of them */
size_t packet_buffer_size = netplay->zbuffer_size +
netplay->delay_frames * WORDS_PER_FRAME + (netplay->delay_frames+1)*3;
if (netplay->send_packet_buffer.data)
{
netplay_deinit_socket_buffer(&netplay->send_packet_buffer);
netplay_deinit_socket_buffer(&netplay->recv_packet_buffer);
netplay->send_packet_buffer.data = netplay->recv_packet_buffer.data = NULL;
}
if (!netplay_init_socket_buffer(&netplay->send_packet_buffer, packet_buffer_size))
return false;
if (!netplay_init_socket_buffer(&netplay->recv_packet_buffer, packet_buffer_size))
return false;
return true;
}
bool netplay_init_serialization(netplay_t *netplay)
{
unsigned i;
retro_ctx_size_info_t info;
if (netplay->state_size)
return true;
core_serialize_size(&info);
if (!info.size)
return false;
netplay->state_size = info.size;
for (i = 0; i < netplay->buffer_size; i++)
{
netplay->buffer[i].state = calloc(netplay->state_size, 1);
if (!netplay->buffer[i].state)
{
netplay->quirks |= NETPLAY_QUIRK_NO_SAVESTATES;
return false;
}
}
netplay->zbuffer_size = netplay->state_size * 2;
2016-10-30 18:32:06 +00:00
netplay->zbuffer = (uint8_t *) calloc(netplay->zbuffer_size, 1);
if (!netplay->zbuffer)
{
netplay->quirks |= NETPLAY_QUIRK_NO_TRANSMISSION;
netplay->zbuffer_size = 0;
return false;
}
return netplay_init_socket_buffers(netplay);
}
static bool netplay_init_buffers(netplay_t *netplay, unsigned frames)
{
size_t packet_buffer_size;
if (!netplay)
return false;
/* * 2 + 1 because:
* Self sits in the middle,
* Other is allowed to drift as much as 'frames' frames behind
* Read is allowed to drift as much as 'frames' frames ahead */
netplay->buffer_size = frames * 2 + 1;
netplay->buffer = (struct delta_frame*)calloc(netplay->buffer_size,
sizeof(*netplay->buffer));
if (!netplay->buffer)
return false;
if (!(netplay->quirks & NETPLAY_QUIRK_INITIALIZATION))
netplay_init_serialization(netplay);
if (!netplay->send_packet_buffer.data)
netplay_init_socket_buffers(netplay);
return true;
}
2015-01-09 17:34:00 +00:00
/**
* netplay_new:
* @direct_host : Netplay host discovered from scanning.
2015-01-09 17:34:00 +00:00
* @server : IP address of server.
* @port : Port of server.
* @delay_frames : Amount of delay frames.
* @check_frames : Frequency with which to check CRCs.
2015-01-09 17:34:00 +00:00
* @cb : Libretro callbacks.
* @spectate : If true, enable spectator mode.
* @nat_traversal : If true, attempt NAT traversal.
2015-01-09 17:34:00 +00:00
* @nick : Nickname of user.
* @quirks : Netplay quirks required for this session.
2015-01-09 17:34:00 +00:00
*
* Creates a new netplay handle. A NULL host means we're
* hosting (user 1).
*
* Returns: new netplay handle.
**/
netplay_t *netplay_new(void *direct_host, const char *server, uint16_t port,
unsigned delay_frames, unsigned check_frames,
const struct retro_callbacks *cb, bool spectate, bool nat_traversal,
const char *nick, uint64_t quirks)
{
2016-09-15 19:26:10 +00:00
netplay_t *netplay = (netplay_t*)calloc(1, sizeof(*netplay));
if (!netplay)
return NULL;
netplay->fd = -1;
netplay->tcp_port = port;
netplay->cbs = *cb;
netplay->port = server ? 0 : 1;
netplay->spectate.enabled = spectate;
2015-12-23 20:25:28 +00:00
netplay->is_server = server == NULL;
netplay->nat_traversal = netplay->is_server ? nat_traversal : false;
netplay->delay_frames = delay_frames;
2016-10-22 03:23:45 +00:00
netplay->check_frames = check_frames;
netplay->quirks = quirks;
2016-12-03 03:40:26 +00:00
strlcpy(netplay->nick, nick[0] ? nick : RARCH_DEFAULT_NICK, sizeof(netplay->nick));
if (!netplay_init_buffers(netplay, delay_frames))
{
free(netplay);
return NULL;
}
2015-12-23 20:25:28 +00:00
if(spectate)
netplay->net_cbs = netplay_get_cbs_spectate();
else
netplay->net_cbs = netplay_get_cbs_net();
if (!init_socket(netplay, direct_host, server, port))
{
free(netplay);
return NULL;
}
if(!netplay_info_cb(netplay, delay_frames))
2015-12-23 20:25:28 +00:00
goto error;
2012-01-21 13:00:11 +00:00
/* FIXME: Our initial connection should also be nonblocking */
if (!socket_nonblock(netplay->fd))
goto error;
return netplay;
2012-01-21 13:00:11 +00:00
error:
if (netplay->fd >= 0)
socket_close(netplay->fd);
2012-01-21 13:00:11 +00:00
2015-04-11 00:49:30 +00:00
free(netplay);
return NULL;
2012-01-21 13:00:11 +00:00
}
2015-01-09 17:34:00 +00:00
/**
2015-11-17 23:18:04 +00:00
* netplay_command:
* @netplay : pointer to netplay object
* @cmd : command to send
* @data : data to send as argument
* @sz : size of data
* @command_str : name of action
* @success_msg : message to display upon success
*
* Sends a single netplay command and waits for response.
*/
bool netplay_command(netplay_t* netplay, enum netplay_cmd cmd,
void* data, size_t sz,
const char* command_str,
const char* success_msg)
2012-01-21 13:00:11 +00:00
{
2015-11-19 12:03:23 +00:00
char m[256];
2015-11-17 23:18:04 +00:00
const char* msg = NULL;
2016-09-08 09:59:44 +00:00
retro_assert(netplay);
2015-11-19 12:03:23 +00:00
if (!netplay_send_raw_cmd(netplay, cmd, data, sz))
2012-01-21 13:00:11 +00:00
goto error;
runloop_msg_queue_push(success_msg, 1, 180, false);
2015-11-17 23:18:04 +00:00
return true;
2015-11-19 12:03:23 +00:00
error:
if (msg)
snprintf(m, sizeof(m), msg, command_str);
2015-11-17 23:18:04 +00:00
RARCH_WARN("%s\n", m);
2015-12-07 14:32:14 +00:00
runloop_msg_queue_push(m, 1, 180, false);
2015-11-17 23:18:04 +00:00
return false;
}
2012-01-21 13:24:38 +00:00
2015-11-17 23:18:04 +00:00
/**
* netplay_flip_users:
* @netplay : pointer to netplay object
*
* On regular netplay, flip who controls user 1 and 2.
**/
2016-04-09 01:16:11 +00:00
static void netplay_flip_users(netplay_t *netplay)
2015-11-17 23:18:04 +00:00
{
2016-10-22 03:23:45 +00:00
/* Must be in the future because we may have
* already sent this frame's data */
2016-09-17 16:21:29 +00:00
uint32_t flip_frame = netplay->self_frame_count + 1;
2015-11-17 23:35:53 +00:00
uint32_t flip_frame_net = htonl(flip_frame);
2016-09-17 16:21:29 +00:00
bool command = netplay_command(
2015-11-17 23:18:04 +00:00
netplay, NETPLAY_CMD_FLIP_PLAYERS,
&flip_frame_net, sizeof flip_frame_net,
"flip users", "Successfully flipped users.\n");
2015-11-17 23:18:04 +00:00
if(command)
{
2015-11-17 23:32:45 +00:00
netplay->flip ^= true;
2015-11-17 23:37:43 +00:00
netplay->flip_frame = flip_frame;
2012-01-21 13:00:11 +00:00
}
}
2015-01-09 17:34:00 +00:00
/**
* netplay_free:
* @netplay : pointer to netplay object
*
* Frees netplay handle.
**/
void netplay_free(netplay_t *netplay)
2011-02-13 15:40:24 +00:00
{
2013-10-22 19:26:33 +00:00
unsigned i;
2015-01-09 17:34:00 +00:00
if (netplay->fd >= 0)
socket_close(netplay->fd);
if (netplay->spectate.enabled)
2012-01-11 18:22:18 +00:00
{
2013-10-22 19:26:33 +00:00
for (i = 0; i < MAX_SPECTATORS; i++)
if (netplay->spectate.fds[i] >= 0)
socket_close(netplay->spectate.fds[i]);
2012-01-11 18:22:18 +00:00
free(netplay->spectate.input);
2012-01-11 18:22:18 +00:00
}
if (netplay->nat_traversal)
natt_free(&netplay->nat_traversal_state);
if (netplay->buffer)
2012-01-11 18:22:18 +00:00
{
for (i = 0; i < netplay->buffer_size; i++)
if (netplay->buffer[i].state)
free(netplay->buffer[i].state);
2012-01-11 18:22:18 +00:00
free(netplay->buffer);
2012-01-11 18:22:18 +00:00
}
netplay_deinit_socket_buffer(&netplay->send_packet_buffer);
netplay_deinit_socket_buffer(&netplay->recv_packet_buffer);
if (netplay->zbuffer)
free(netplay->zbuffer);
if (netplay->compression_stream)
netplay->compression_backend->stream_free(netplay->compression_stream);
if (netplay->addr)
freeaddrinfo_retro(netplay->addr);
2012-01-11 18:22:18 +00:00
free(netplay);
2011-02-13 15:40:24 +00:00
}
2015-01-09 17:34:00 +00:00
/**
* netplay_pre_frame:
* @netplay : pointer to netplay object
*
* Pre-frame for Netplay.
* Call this before running retro_run().
*
* Returns: true (1) if the frontend is cleared to emulate the frame, false (0)
* if we're stalled or paused
2015-01-09 17:34:00 +00:00
**/
bool netplay_pre_frame(netplay_t *netplay)
2012-01-11 18:22:18 +00:00
{
2016-09-08 09:59:44 +00:00
retro_assert(netplay && netplay->net_cbs->pre_frame);
2016-09-17 16:21:29 +00:00
/* FIXME: This is an ugly way to learn we're not paused anymore */
if (netplay->local_paused)
netplay_frontend_paused(netplay, false);
2016-09-17 16:21:29 +00:00
if (netplay->quirks & NETPLAY_QUIRK_INITIALIZATION)
{
/* Are we ready now? */
netplay_try_init_serialization(netplay);
}
if (netplay->is_server)
{
/* Advertise our server */
netplay_lan_ad_server(netplay);
/* NAT traversal if applicable */
if (netplay->nat_traversal &&
netplay->nat_traversal_state.request_outstanding &&
!netplay->nat_traversal_state.have_inet4)
{
struct timeval tmptv = {0};
fd_set fds = netplay->nat_traversal_state.fds;
if (socket_select(netplay->nat_traversal_state.nfds, &fds, NULL, NULL, &tmptv) > 0)
natt_read(&netplay->nat_traversal_state);
#ifndef HAVE_SOCKET_LEGACY
if (!netplay->nat_traversal_state.request_outstanding ||
netplay->nat_traversal_state.have_inet4)
announce_nat_traversal(netplay);
#endif
}
}
if (!netplay->net_cbs->pre_frame(netplay))
return false;
2016-09-17 16:21:29 +00:00
return ((netplay->mode != NETPLAY_CONNECTION_PLAYING) ||
2016-10-22 03:23:45 +00:00
(!netplay->stall && !netplay->remote_paused));
2012-01-11 18:22:18 +00:00
}
2015-01-09 17:34:00 +00:00
/**
* netplay_post_frame:
* @netplay : pointer to netplay object
*
* Post-frame for Netplay.
* We check if we have new input and replay from recorded input.
* Call this after running retro_run().
**/
void netplay_post_frame(netplay_t *netplay)
2012-01-11 18:22:18 +00:00
{
2016-09-08 09:59:44 +00:00
retro_assert(netplay && netplay->net_cbs->post_frame);
2015-12-23 20:25:28 +00:00
netplay->net_cbs->post_frame(netplay);
if (!netplay_send_flush(&netplay->send_packet_buffer, netplay->fd, false))
hangup(netplay);
2012-01-11 18:22:18 +00:00
}
/**
* netplay_frontend_paused
* @netplay : pointer to netplay object
* @paused : true if frontend is paused
*
* Inform Netplay of the frontend's pause state (paused or otherwise)
**/
void netplay_frontend_paused(netplay_t *netplay, bool paused)
{
/* Nothing to do if we already knew this */
if (netplay->local_paused == paused)
return;
netplay->local_paused = paused;
if (netplay->mode != NETPLAY_CONNECTION_NONE &&
!netplay->spectate.enabled)
{
2016-09-17 16:21:29 +00:00
netplay_send_raw_cmd(netplay, paused
? NETPLAY_CMD_PAUSE : NETPLAY_CMD_RESUME, NULL, 0);
/* We're not going to be polled, so we need to flush this command now */
netplay_send_flush(&netplay->send_packet_buffer, netplay->fd, true);
}
}
/**
* netplay_load_savestate
* @netplay : pointer to netplay object
2016-10-22 03:23:45 +00:00
* @serial_info : the savestate being loaded, NULL means
* "load it yourself"
* @save : Whether to save the provided serial_info
* into the frame buffer
*
* Inform Netplay of a savestate load and send it to the other side
**/
2016-10-22 03:23:45 +00:00
void netplay_load_savestate(netplay_t *netplay,
retro_ctx_serialize_info_t *serial_info, bool save)
{
uint32_t header[4];
retro_ctx_serialize_info_t tmp_serial_info;
uint32_t rd, wn;
if (netplay->mode != NETPLAY_CONNECTION_PLAYING)
return;
/* Record it in our own buffer */
2016-09-17 16:21:29 +00:00
if (save || !serial_info)
{
2016-09-17 16:21:29 +00:00
if (netplay_delta_frame_ready(netplay,
&netplay->buffer[netplay->self_ptr], netplay->self_frame_count))
{
2016-09-17 16:21:29 +00:00
if (!serial_info)
{
2016-09-17 16:21:29 +00:00
tmp_serial_info.size = netplay->state_size;
tmp_serial_info.data = netplay->buffer[netplay->self_ptr].state;
if (!core_serialize(&tmp_serial_info))
return;
tmp_serial_info.data_const = tmp_serial_info.data;
serial_info = &tmp_serial_info;
}
else
{
if (serial_info->size <= netplay->state_size)
{
memcpy(netplay->buffer[netplay->self_ptr].state,
serial_info->data_const, serial_info->size);
}
}
}
else
{
/* FIXME: This is a critical failure! */
return;
}
}
2016-10-22 03:23:45 +00:00
/* We need to ignore any intervening data from the other side,
* and never rewind past this */
if (netplay->read_frame_count < netplay->self_frame_count)
{
netplay->read_ptr = netplay->self_ptr;
netplay->read_frame_count = netplay->self_frame_count;
}
if (netplay->other_frame_count < netplay->self_frame_count)
{
netplay->other_ptr = netplay->self_ptr;
netplay->other_frame_count = netplay->self_frame_count;
}
/* If we can't send it to the peer, loading a state was a bad idea */
2016-10-22 03:23:45 +00:00
if (netplay->quirks & (
NETPLAY_QUIRK_NO_SAVESTATES
| NETPLAY_QUIRK_NO_TRANSMISSION))
return;
/* Compress it */
netplay->compression_backend->set_in(netplay->compression_stream,
2016-11-27 12:30:35 +00:00
(const uint8_t*)serial_info->data_const, serial_info->size);
netplay->compression_backend->set_out(netplay->compression_stream,
netplay->zbuffer, netplay->zbuffer_size);
if (!netplay->compression_backend->trans(netplay->compression_stream,
true, &rd, &wn, NULL))
{
hangup(netplay);
return;
}
/* And send it to the peer */
header[0] = htonl(NETPLAY_CMD_LOAD_SAVESTATE);
header[1] = htonl(wn + 2*sizeof(uint32_t));
header[2] = htonl(netplay->self_frame_count);
header[3] = htonl(serial_info->size);
2016-09-17 16:21:29 +00:00
if (!netplay_send(&netplay->send_packet_buffer, netplay->fd, header,
sizeof(header)))
{
hangup(netplay);
return;
}
if (!netplay_send(&netplay->send_packet_buffer, netplay->fd,
netplay->zbuffer, wn))
{
hangup(netplay);
return;
}
}
/**
* netplay_disconnect
* @netplay : pointer to netplay object
*
* Disconnect netplay.
*
* Returns: true (1) if successful. At present, cannot fail.
**/
bool netplay_disconnect(netplay_t *netplay)
{
if (!netplay || (netplay->mode == NETPLAY_CONNECTION_NONE))
return true;
hangup(netplay);
return true;
}
2015-04-11 11:31:33 +00:00
void deinit_netplay(void)
{
2016-09-29 18:11:46 +00:00
if (netplay_data)
netplay_free(netplay_data);
2015-12-05 15:41:00 +00:00
netplay_data = NULL;
core_unset_netplay_callbacks();
2015-04-11 11:31:33 +00:00
}
2015-04-11 11:29:40 +00:00
/**
* init_netplay:
*
* Initializes netplay.
*
* If netplay is already initialized, will return false (0).
*
* Returns: true (1) if successful, otherwise false (0).
**/
bool init_netplay(bool is_spectate, void *direct_host, const char *server, unsigned port)
2015-04-11 11:29:40 +00:00
{
2016-10-21 17:39:51 +00:00
struct retro_callbacks cbs = {0};
settings_t *settings = config_get_ptr();
uint64_t serialization_quirks = 0;
2016-10-21 17:39:51 +00:00
uint64_t quirks = 0;
2015-04-11 11:29:40 +00:00
if (!netplay_enabled)
2015-04-11 11:29:40 +00:00
return false;
2015-11-30 23:08:02 +00:00
if (bsv_movie_ctl(BSV_MOVIE_CTL_START_PLAYBACK, NULL))
2015-04-11 11:29:40 +00:00
{
2016-02-03 16:00:43 +00:00
RARCH_WARN("%s\n",
msg_hash_to_str(MSG_NETPLAY_FAILED_MOVIE_PLAYBACK_HAS_STARTED));
2015-04-11 11:29:40 +00:00
return false;
}
2016-05-07 23:33:57 +00:00
core_set_default_callbacks(&cbs);
if (!core_set_netplay_callbacks())
return false;
2015-04-11 11:29:40 +00:00
/* Map the core's quirks to our quirks */
serialization_quirks = core_serialization_quirks();
2016-10-01 11:44:09 +00:00
if (serialization_quirks & ~((uint64_t) NETPLAY_QUIRK_MAP_UNDERSTOOD))
{
/* Quirks we don't support! Just disable everything. */
quirks |= NETPLAY_QUIRK_NO_SAVESTATES;
}
2016-10-01 11:44:09 +00:00
if (serialization_quirks & NETPLAY_QUIRK_MAP_NO_SAVESTATES)
quirks |= NETPLAY_QUIRK_NO_SAVESTATES;
2016-10-01 11:44:09 +00:00
if (serialization_quirks & NETPLAY_QUIRK_MAP_NO_TRANSMISSION)
quirks |= NETPLAY_QUIRK_NO_TRANSMISSION;
2016-10-01 11:44:09 +00:00
if (serialization_quirks & NETPLAY_QUIRK_MAP_INITIALIZATION)
quirks |= NETPLAY_QUIRK_INITIALIZATION;
if (serialization_quirks & NETPLAY_QUIRK_MAP_ENDIAN_DEPENDENT)
quirks |= NETPLAY_QUIRK_ENDIAN_DEPENDENT;
if (serialization_quirks & NETPLAY_QUIRK_MAP_PLATFORM_DEPENDENT)
quirks |= NETPLAY_QUIRK_PLATFORM_DEPENDENT;
if (netplay_is_client)
2015-04-11 11:29:40 +00:00
{
2016-10-22 03:23:45 +00:00
RARCH_LOG("%s\n", msg_hash_to_str(MSG_CONNECTING_TO_NETPLAY_HOST));
2015-04-11 11:29:40 +00:00
}
else
{
2016-10-22 03:23:45 +00:00
RARCH_LOG("%s\n", msg_hash_to_str(MSG_WAITING_FOR_CLIENT));
runloop_msg_queue_push(
2016-10-22 03:23:45 +00:00
msg_hash_to_str(MSG_WAITING_FOR_CLIENT),
0, 180, false);
}
2015-04-11 11:29:40 +00:00
2015-12-05 15:41:00 +00:00
netplay_data = (netplay_t*)netplay_new(
netplay_is_client ? direct_host : NULL,
netplay_is_client ? server : NULL,
port ? port : RARCH_DEFAULT_PORT,
settings->netplay.delay_frames, settings->netplay.check_frames, &cbs,
is_spectate, settings->netplay.nat_traversal, settings->username,
quirks);
2015-04-11 11:29:40 +00:00
2015-12-05 15:41:00 +00:00
if (netplay_data)
2015-04-11 11:29:40 +00:00
return true;
2015-07-02 16:39:57 +00:00
RARCH_WARN("%s\n", msg_hash_to_str(MSG_NETPLAY_FAILED));
2015-04-11 11:29:40 +00:00
runloop_msg_queue_push(
msg_hash_to_str(MSG_NETPLAY_FAILED),
2015-04-11 11:29:40 +00:00
0, 180, false);
return false;
}
2015-12-05 15:24:31 +00:00
bool netplay_driver_ctl(enum rarch_netplay_ctl_state state, void *data)
{
bool ret = true;
if (in_netplay)
return true;
in_netplay = true;
2015-12-05 15:41:00 +00:00
if (!netplay_data)
{
switch (state)
{
case RARCH_NETPLAY_CTL_ENABLE_SERVER:
netplay_enabled = true;
netplay_is_client = false;
goto done;
case RARCH_NETPLAY_CTL_ENABLE_CLIENT:
netplay_enabled = true;
netplay_is_client = true;
break;
case RARCH_NETPLAY_CTL_DISABLE:
netplay_enabled = false;
goto done;
case RARCH_NETPLAY_CTL_IS_ENABLED:
ret = netplay_enabled;
goto done;
case RARCH_NETPLAY_CTL_IS_DATA_INITED:
ret = false;
goto done;
default:
goto done;
}
}
2015-12-05 15:24:31 +00:00
switch (state)
{
case RARCH_NETPLAY_CTL_ENABLE_SERVER:
case RARCH_NETPLAY_CTL_ENABLE_CLIENT:
case RARCH_NETPLAY_CTL_IS_DATA_INITED:
goto done;
case RARCH_NETPLAY_CTL_DISABLE:
netplay_enabled = false;
deinit_netplay();
goto done;
case RARCH_NETPLAY_CTL_IS_ENABLED:
goto done;
case RARCH_NETPLAY_CTL_POST_FRAME:
2016-09-29 18:11:46 +00:00
netplay_post_frame(netplay_data);
break;
case RARCH_NETPLAY_CTL_PRE_FRAME:
ret = netplay_pre_frame(netplay_data);
goto done;
2015-12-05 15:24:31 +00:00
case RARCH_NETPLAY_CTL_FLIP_PLAYERS:
{
bool *state = (bool*)data;
if (*state)
2016-09-29 18:11:46 +00:00
netplay_flip_users(netplay_data);
2015-12-05 15:24:31 +00:00
}
break;
case RARCH_NETPLAY_CTL_PAUSE:
2016-09-29 18:11:46 +00:00
netplay_frontend_paused(netplay_data, true);
break;
case RARCH_NETPLAY_CTL_UNPAUSE:
2016-09-29 18:11:46 +00:00
netplay_frontend_paused(netplay_data, false);
break;
case RARCH_NETPLAY_CTL_LOAD_SAVESTATE:
2016-09-29 18:11:46 +00:00
netplay_load_savestate(netplay_data, (retro_ctx_serialize_info_t*)data, true);
break;
case RARCH_NETPLAY_CTL_DISCONNECT:
ret = netplay_disconnect(netplay_data);
goto done;
2015-12-05 15:24:31 +00:00
default:
case RARCH_NETPLAY_CTL_NONE:
ret = false;
2015-12-05 15:24:31 +00:00
}
done:
in_netplay = false;
return ret;
2015-12-05 15:24:31 +00:00
}