mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-13 20:33:15 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/rw/uml
Pull UML updates from Richard Weinberger: - a new and faster epoll based IRQ controller and NIC driver - misc fixes and janitorial updates * git://git.kernel.org/pub/scm/linux/kernel/git/rw/uml: Fix vector raw inintialization logic Migrate vector timers to new timer API um: Compile with modern headers um: vector: Fix an error handling path in 'vector_parse()' um: vector: Fix a memory allocation check um: vector: fix missing unlock on error in vector_net_open() um: Add missing EXPORT for free_irq_by_fd() High Performance UML Vector Network Driver Epoll based IRQ controller um: Use POSIX ucontext_t instead of struct ucontext um: time: Use timespec64 for persistent clock um: Restore symbol versions for __memcpy and memcpy
This commit is contained in:
commit
375479c386
@ -109,6 +109,17 @@ config UML_NET_DAEMON
|
||||
more than one without conflict. If you don't need UML networking,
|
||||
say N.
|
||||
|
||||
config UML_NET_VECTOR
|
||||
bool "Vector I/O high performance network devices"
|
||||
depends on UML_NET
|
||||
help
|
||||
This User-Mode Linux network driver uses multi-message send
|
||||
and receive functions. The host running the UML guest must have
|
||||
a linux kernel version above 3.0 and a libc version > 2.13.
|
||||
This driver provides tap, raw, gre and l2tpv3 network transports
|
||||
with up to 4 times higher network throughput than the UML network
|
||||
drivers.
|
||||
|
||||
config UML_NET_VDE
|
||||
bool "VDE transport"
|
||||
depends on UML_NET
|
||||
|
@ -9,6 +9,7 @@
|
||||
slip-objs := slip_kern.o slip_user.o
|
||||
slirp-objs := slirp_kern.o slirp_user.o
|
||||
daemon-objs := daemon_kern.o daemon_user.o
|
||||
vector-objs := vector_kern.o vector_user.o vector_transports.o
|
||||
umcast-objs := umcast_kern.o umcast_user.o
|
||||
net-objs := net_kern.o net_user.o
|
||||
mconsole-objs := mconsole_kern.o mconsole_user.o
|
||||
@ -43,6 +44,7 @@ obj-$(CONFIG_STDERR_CONSOLE) += stderr_console.o
|
||||
obj-$(CONFIG_UML_NET_SLIP) += slip.o slip_common.o
|
||||
obj-$(CONFIG_UML_NET_SLIRP) += slirp.o slip_common.o
|
||||
obj-$(CONFIG_UML_NET_DAEMON) += daemon.o
|
||||
obj-$(CONFIG_UML_NET_VECTOR) += vector.o
|
||||
obj-$(CONFIG_UML_NET_VDE) += vde.o
|
||||
obj-$(CONFIG_UML_NET_MCAST) += umcast.o
|
||||
obj-$(CONFIG_UML_NET_PCAP) += pcap.o
|
||||
@ -61,7 +63,7 @@ obj-$(CONFIG_BLK_DEV_COW_COMMON) += cow_user.o
|
||||
obj-$(CONFIG_UML_RANDOM) += random.o
|
||||
|
||||
# pcap_user.o must be added explicitly.
|
||||
USER_OBJS := fd.o null.o pty.o tty.o xterm.o slip_common.o pcap_user.o vde_user.o
|
||||
USER_OBJS := fd.o null.o pty.o tty.o xterm.o slip_common.o pcap_user.o vde_user.o vector_user.o
|
||||
CFLAGS_null.o = -DDEV_NULL=$(DEV_NULL_PATH)
|
||||
|
||||
include arch/um/scripts/Makefile.rules
|
||||
|
@ -171,56 +171,19 @@ int enable_chan(struct line *line)
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Items are added in IRQ context, when free_irq can't be called, and
|
||||
* removed in process context, when it can.
|
||||
* This handles interrupt sources which disappear, and which need to
|
||||
* be permanently disabled. This is discovered in IRQ context, but
|
||||
* the freeing of the IRQ must be done later.
|
||||
*/
|
||||
static DEFINE_SPINLOCK(irqs_to_free_lock);
|
||||
static LIST_HEAD(irqs_to_free);
|
||||
|
||||
void free_irqs(void)
|
||||
{
|
||||
struct chan *chan;
|
||||
LIST_HEAD(list);
|
||||
struct list_head *ele;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&irqs_to_free_lock, flags);
|
||||
list_splice_init(&irqs_to_free, &list);
|
||||
spin_unlock_irqrestore(&irqs_to_free_lock, flags);
|
||||
|
||||
list_for_each(ele, &list) {
|
||||
chan = list_entry(ele, struct chan, free_list);
|
||||
|
||||
if (chan->input && chan->enabled)
|
||||
um_free_irq(chan->line->driver->read_irq, chan);
|
||||
if (chan->output && chan->enabled)
|
||||
um_free_irq(chan->line->driver->write_irq, chan);
|
||||
chan->enabled = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static void close_one_chan(struct chan *chan, int delay_free_irq)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (!chan->opened)
|
||||
return;
|
||||
|
||||
if (delay_free_irq) {
|
||||
spin_lock_irqsave(&irqs_to_free_lock, flags);
|
||||
list_add(&chan->free_list, &irqs_to_free);
|
||||
spin_unlock_irqrestore(&irqs_to_free_lock, flags);
|
||||
}
|
||||
else {
|
||||
if (chan->input && chan->enabled)
|
||||
um_free_irq(chan->line->driver->read_irq, chan);
|
||||
if (chan->output && chan->enabled)
|
||||
um_free_irq(chan->line->driver->write_irq, chan);
|
||||
chan->enabled = 0;
|
||||
}
|
||||
/* we can safely call free now - it will be marked
|
||||
* as free and freed once the IRQ stopped processing
|
||||
*/
|
||||
if (chan->input && chan->enabled)
|
||||
um_free_irq(chan->line->driver->read_irq, chan);
|
||||
if (chan->output && chan->enabled)
|
||||
um_free_irq(chan->line->driver->write_irq, chan);
|
||||
chan->enabled = 0;
|
||||
if (chan->ops->close != NULL)
|
||||
(*chan->ops->close)(chan->fd, chan->data);
|
||||
|
||||
|
@ -284,7 +284,7 @@ int line_setup_irq(int fd, int input, int output, struct line *line, void *data)
|
||||
if (err)
|
||||
return err;
|
||||
if (output)
|
||||
err = um_request_irq(driver->write_irq, fd, IRQ_WRITE,
|
||||
err = um_request_irq(driver->write_irq, fd, IRQ_NONE,
|
||||
line_write_interrupt, IRQF_SHARED,
|
||||
driver->write_irq_name, data);
|
||||
return err;
|
||||
|
@ -288,7 +288,7 @@ static void uml_net_user_timer_expire(struct timer_list *t)
|
||||
#endif
|
||||
}
|
||||
|
||||
static void setup_etheraddr(struct net_device *dev, char *str)
|
||||
void uml_net_setup_etheraddr(struct net_device *dev, char *str)
|
||||
{
|
||||
unsigned char *addr = dev->dev_addr;
|
||||
char *end;
|
||||
@ -412,7 +412,7 @@ static void eth_configure(int n, void *init, char *mac,
|
||||
*/
|
||||
snprintf(dev->name, sizeof(dev->name), "eth%d", n);
|
||||
|
||||
setup_etheraddr(dev, mac);
|
||||
uml_net_setup_etheraddr(dev, mac);
|
||||
|
||||
printk(KERN_INFO "Netdevice %d (%pM) : ", n, dev->dev_addr);
|
||||
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <linux/miscdevice.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <init.h>
|
||||
#include <irq_kern.h>
|
||||
#include <os.h>
|
||||
|
||||
@ -154,7 +155,14 @@ err_out_cleanup_hw:
|
||||
/*
|
||||
* rng_cleanup - shutdown RNG module
|
||||
*/
|
||||
static void __exit rng_cleanup (void)
|
||||
|
||||
static void cleanup(void)
|
||||
{
|
||||
free_irq_by_fd(random_fd);
|
||||
os_close_file(random_fd);
|
||||
}
|
||||
|
||||
static void __exit rng_cleanup(void)
|
||||
{
|
||||
os_close_file(random_fd);
|
||||
misc_deregister (&rng_miscdev);
|
||||
@ -162,6 +170,7 @@ static void __exit rng_cleanup (void)
|
||||
|
||||
module_init (rng_init);
|
||||
module_exit (rng_cleanup);
|
||||
__uml_exitcall(cleanup);
|
||||
|
||||
MODULE_DESCRIPTION("UML Host Random Number Generator (RNG) driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -1587,11 +1587,11 @@ int io_thread(void *arg)
|
||||
|
||||
do {
|
||||
res = os_write_file(kernel_fd, ((char *) io_req_buffer) + written, n);
|
||||
if (res > 0) {
|
||||
if (res >= 0) {
|
||||
written += res;
|
||||
} else {
|
||||
if (res != -EAGAIN) {
|
||||
printk("io_thread - read failed, fd = %d, "
|
||||
printk("io_thread - write failed, fd = %d, "
|
||||
"err = %d\n", kernel_fd, -n);
|
||||
}
|
||||
}
|
||||
|
1633
arch/um/drivers/vector_kern.c
Normal file
1633
arch/um/drivers/vector_kern.c
Normal file
File diff suppressed because it is too large
Load Diff
130
arch/um/drivers/vector_kern.h
Normal file
130
arch/um/drivers/vector_kern.h
Normal file
@ -0,0 +1,130 @@
|
||||
/*
|
||||
* Copyright (C) 2002 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
|
||||
* Licensed under the GPL
|
||||
*/
|
||||
|
||||
#ifndef __UM_VECTOR_KERN_H
|
||||
#define __UM_VECTOR_KERN_H
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/socket.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include "vector_user.h"
|
||||
|
||||
/* Queue structure specially adapted for multiple enqueue/dequeue
|
||||
* in a mmsgrecv/mmsgsend context
|
||||
*/
|
||||
|
||||
/* Dequeue method */
|
||||
|
||||
#define QUEUE_SENDMSG 0
|
||||
#define QUEUE_SENDMMSG 1
|
||||
|
||||
#define VECTOR_RX 1
|
||||
#define VECTOR_TX (1 << 1)
|
||||
#define VECTOR_BPF (1 << 2)
|
||||
#define VECTOR_QDISC_BYPASS (1 << 3)
|
||||
|
||||
#define ETH_MAX_PACKET 1500
|
||||
#define ETH_HEADER_OTHER 32 /* just in case someone decides to go mad on QnQ */
|
||||
|
||||
struct vector_queue {
|
||||
struct mmsghdr *mmsg_vector;
|
||||
void **skbuff_vector;
|
||||
/* backlink to device which owns us */
|
||||
struct net_device *dev;
|
||||
spinlock_t head_lock;
|
||||
spinlock_t tail_lock;
|
||||
int queue_depth, head, tail, max_depth, max_iov_frags;
|
||||
short options;
|
||||
};
|
||||
|
||||
struct vector_estats {
|
||||
uint64_t rx_queue_max;
|
||||
uint64_t rx_queue_running_average;
|
||||
uint64_t tx_queue_max;
|
||||
uint64_t tx_queue_running_average;
|
||||
uint64_t rx_encaps_errors;
|
||||
uint64_t tx_timeout_count;
|
||||
uint64_t tx_restart_queue;
|
||||
uint64_t tx_kicks;
|
||||
uint64_t tx_flow_control_xon;
|
||||
uint64_t tx_flow_control_xoff;
|
||||
uint64_t rx_csum_offload_good;
|
||||
uint64_t rx_csum_offload_errors;
|
||||
uint64_t sg_ok;
|
||||
uint64_t sg_linearized;
|
||||
};
|
||||
|
||||
#define VERIFY_HEADER_NOK -1
|
||||
#define VERIFY_HEADER_OK 0
|
||||
#define VERIFY_CSUM_OK 1
|
||||
|
||||
struct vector_private {
|
||||
struct list_head list;
|
||||
spinlock_t lock;
|
||||
struct net_device *dev;
|
||||
|
||||
int unit;
|
||||
|
||||
/* Timeout timer in TX */
|
||||
|
||||
struct timer_list tl;
|
||||
|
||||
/* Scheduled "remove device" work */
|
||||
struct work_struct reset_tx;
|
||||
struct vector_fds *fds;
|
||||
|
||||
struct vector_queue *rx_queue;
|
||||
struct vector_queue *tx_queue;
|
||||
|
||||
int rx_irq;
|
||||
int tx_irq;
|
||||
|
||||
struct arglist *parsed;
|
||||
|
||||
void *transport_data; /* transport specific params if needed */
|
||||
|
||||
int max_packet;
|
||||
int req_size; /* different from max packet - used for TSO */
|
||||
int headroom;
|
||||
|
||||
int options;
|
||||
|
||||
/* remote address if any - some transports will leave this as null */
|
||||
|
||||
int header_size;
|
||||
int rx_header_size;
|
||||
int coalesce;
|
||||
|
||||
void *header_rxbuffer;
|
||||
void *header_txbuffer;
|
||||
|
||||
int (*form_header)(uint8_t *header,
|
||||
struct sk_buff *skb, struct vector_private *vp);
|
||||
int (*verify_header)(uint8_t *header,
|
||||
struct sk_buff *skb, struct vector_private *vp);
|
||||
|
||||
spinlock_t stats_lock;
|
||||
|
||||
struct tasklet_struct tx_poll;
|
||||
bool rexmit_scheduled;
|
||||
bool opened;
|
||||
bool in_write_poll;
|
||||
|
||||
/* ethtool stats */
|
||||
|
||||
struct vector_estats estats;
|
||||
void *bpf;
|
||||
|
||||
char user[0];
|
||||
};
|
||||
|
||||
extern int build_transport_data(struct vector_private *vp);
|
||||
|
||||
#endif
|
458
arch/um/drivers/vector_transports.c
Normal file
458
arch/um/drivers/vector_transports.c
Normal file
@ -0,0 +1,458 @@
|
||||
/*
|
||||
* Copyright (C) 2017 - Cambridge Greys Limited
|
||||
* Copyright (C) 2011 - 2014 Cisco Systems Inc
|
||||
* Licensed under the GPL.
|
||||
*/
|
||||
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/byteorder.h>
|
||||
#include <uapi/linux/ip.h>
|
||||
#include <uapi/linux/virtio_net.h>
|
||||
#include <linux/virtio_net.h>
|
||||
#include <linux/virtio_byteorder.h>
|
||||
#include <linux/netdev_features.h>
|
||||
#include "vector_user.h"
|
||||
#include "vector_kern.h"
|
||||
|
||||
#define GOOD_LINEAR 512
|
||||
#define GSO_ERROR "Incoming GSO frames and GRO disabled on the interface"
|
||||
|
||||
struct gre_minimal_header {
|
||||
uint16_t header;
|
||||
uint16_t arptype;
|
||||
};
|
||||
|
||||
|
||||
struct uml_gre_data {
|
||||
uint32_t rx_key;
|
||||
uint32_t tx_key;
|
||||
uint32_t sequence;
|
||||
|
||||
bool ipv6;
|
||||
bool has_sequence;
|
||||
bool pin_sequence;
|
||||
bool checksum;
|
||||
bool key;
|
||||
struct gre_minimal_header expected_header;
|
||||
|
||||
uint32_t checksum_offset;
|
||||
uint32_t key_offset;
|
||||
uint32_t sequence_offset;
|
||||
|
||||
};
|
||||
|
||||
struct uml_l2tpv3_data {
|
||||
uint64_t rx_cookie;
|
||||
uint64_t tx_cookie;
|
||||
uint64_t rx_session;
|
||||
uint64_t tx_session;
|
||||
uint32_t counter;
|
||||
|
||||
bool udp;
|
||||
bool ipv6;
|
||||
bool has_counter;
|
||||
bool pin_counter;
|
||||
bool cookie;
|
||||
bool cookie_is_64;
|
||||
|
||||
uint32_t cookie_offset;
|
||||
uint32_t session_offset;
|
||||
uint32_t counter_offset;
|
||||
};
|
||||
|
||||
static int l2tpv3_form_header(uint8_t *header,
|
||||
struct sk_buff *skb, struct vector_private *vp)
|
||||
{
|
||||
struct uml_l2tpv3_data *td = vp->transport_data;
|
||||
uint32_t *counter;
|
||||
|
||||
if (td->udp)
|
||||
*(uint32_t *) header = cpu_to_be32(L2TPV3_DATA_PACKET);
|
||||
(*(uint32_t *) (header + td->session_offset)) = td->tx_session;
|
||||
|
||||
if (td->cookie) {
|
||||
if (td->cookie_is_64)
|
||||
(*(uint64_t *)(header + td->cookie_offset)) =
|
||||
td->tx_cookie;
|
||||
else
|
||||
(*(uint32_t *)(header + td->cookie_offset)) =
|
||||
td->tx_cookie;
|
||||
}
|
||||
if (td->has_counter) {
|
||||
counter = (uint32_t *)(header + td->counter_offset);
|
||||
if (td->pin_counter) {
|
||||
*counter = 0;
|
||||
} else {
|
||||
td->counter++;
|
||||
*counter = cpu_to_be32(td->counter);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gre_form_header(uint8_t *header,
|
||||
struct sk_buff *skb, struct vector_private *vp)
|
||||
{
|
||||
struct uml_gre_data *td = vp->transport_data;
|
||||
uint32_t *sequence;
|
||||
*((uint32_t *) header) = *((uint32_t *) &td->expected_header);
|
||||
if (td->key)
|
||||
(*(uint32_t *) (header + td->key_offset)) = td->tx_key;
|
||||
if (td->has_sequence) {
|
||||
sequence = (uint32_t *)(header + td->sequence_offset);
|
||||
if (td->pin_sequence)
|
||||
*sequence = 0;
|
||||
else
|
||||
*sequence = cpu_to_be32(++td->sequence);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int raw_form_header(uint8_t *header,
|
||||
struct sk_buff *skb, struct vector_private *vp)
|
||||
{
|
||||
struct virtio_net_hdr *vheader = (struct virtio_net_hdr *) header;
|
||||
|
||||
virtio_net_hdr_from_skb(
|
||||
skb,
|
||||
vheader,
|
||||
virtio_legacy_is_little_endian(),
|
||||
false
|
||||
);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int l2tpv3_verify_header(
|
||||
uint8_t *header, struct sk_buff *skb, struct vector_private *vp)
|
||||
{
|
||||
struct uml_l2tpv3_data *td = vp->transport_data;
|
||||
uint32_t *session;
|
||||
uint64_t cookie;
|
||||
|
||||
if ((!td->udp) && (!td->ipv6))
|
||||
header += sizeof(struct iphdr) /* fix for ipv4 raw */;
|
||||
|
||||
/* we do not do a strict check for "data" packets as per
|
||||
* the RFC spec because the pure IP spec does not have
|
||||
* that anyway.
|
||||
*/
|
||||
|
||||
if (td->cookie) {
|
||||
if (td->cookie_is_64)
|
||||
cookie = *(uint64_t *)(header + td->cookie_offset);
|
||||
else
|
||||
cookie = *(uint32_t *)(header + td->cookie_offset);
|
||||
if (cookie != td->rx_cookie) {
|
||||
if (net_ratelimit())
|
||||
netdev_err(vp->dev, "uml_l2tpv3: unknown cookie id");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
session = (uint32_t *) (header + td->session_offset);
|
||||
if (*session != td->rx_session) {
|
||||
if (net_ratelimit())
|
||||
netdev_err(vp->dev, "uml_l2tpv3: session mismatch");
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gre_verify_header(
|
||||
uint8_t *header, struct sk_buff *skb, struct vector_private *vp)
|
||||
{
|
||||
|
||||
uint32_t key;
|
||||
struct uml_gre_data *td = vp->transport_data;
|
||||
|
||||
if (!td->ipv6)
|
||||
header += sizeof(struct iphdr) /* fix for ipv4 raw */;
|
||||
|
||||
if (*((uint32_t *) header) != *((uint32_t *) &td->expected_header)) {
|
||||
if (net_ratelimit())
|
||||
netdev_err(vp->dev, "header type disagreement, expecting %0x, got %0x",
|
||||
*((uint32_t *) &td->expected_header),
|
||||
*((uint32_t *) header)
|
||||
);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (td->key) {
|
||||
key = (*(uint32_t *)(header + td->key_offset));
|
||||
if (key != td->rx_key) {
|
||||
if (net_ratelimit())
|
||||
netdev_err(vp->dev, "unknown key id %0x, expecting %0x",
|
||||
key, td->rx_key);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int raw_verify_header(
|
||||
uint8_t *header, struct sk_buff *skb, struct vector_private *vp)
|
||||
{
|
||||
struct virtio_net_hdr *vheader = (struct virtio_net_hdr *) header;
|
||||
|
||||
if ((vheader->gso_type != VIRTIO_NET_HDR_GSO_NONE) &&
|
||||
(vp->req_size != 65536)) {
|
||||
if (net_ratelimit())
|
||||
netdev_err(
|
||||
vp->dev,
|
||||
GSO_ERROR
|
||||
);
|
||||
}
|
||||
if ((vheader->flags & VIRTIO_NET_HDR_F_DATA_VALID) > 0)
|
||||
return 1;
|
||||
|
||||
virtio_net_hdr_to_skb(skb, vheader, virtio_legacy_is_little_endian());
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool get_uint_param(
|
||||
struct arglist *def, char *param, unsigned int *result)
|
||||
{
|
||||
char *arg = uml_vector_fetch_arg(def, param);
|
||||
|
||||
if (arg != NULL) {
|
||||
if (kstrtoint(arg, 0, result) == 0)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool get_ulong_param(
|
||||
struct arglist *def, char *param, unsigned long *result)
|
||||
{
|
||||
char *arg = uml_vector_fetch_arg(def, param);
|
||||
|
||||
if (arg != NULL) {
|
||||
if (kstrtoul(arg, 0, result) == 0)
|
||||
return true;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static int build_gre_transport_data(struct vector_private *vp)
|
||||
{
|
||||
struct uml_gre_data *td;
|
||||
int temp_int;
|
||||
int temp_rx;
|
||||
int temp_tx;
|
||||
|
||||
vp->transport_data = kmalloc(sizeof(struct uml_gre_data), GFP_KERNEL);
|
||||
if (vp->transport_data == NULL)
|
||||
return -ENOMEM;
|
||||
td = vp->transport_data;
|
||||
td->sequence = 0;
|
||||
|
||||
td->expected_header.arptype = GRE_IRB;
|
||||
td->expected_header.header = 0;
|
||||
|
||||
vp->form_header = &gre_form_header;
|
||||
vp->verify_header = &gre_verify_header;
|
||||
vp->header_size = 4;
|
||||
td->key_offset = 4;
|
||||
td->sequence_offset = 4;
|
||||
td->checksum_offset = 4;
|
||||
|
||||
td->ipv6 = false;
|
||||
if (get_uint_param(vp->parsed, "v6", &temp_int)) {
|
||||
if (temp_int > 0)
|
||||
td->ipv6 = true;
|
||||
}
|
||||
td->key = false;
|
||||
if (get_uint_param(vp->parsed, "rx_key", &temp_rx)) {
|
||||
if (get_uint_param(vp->parsed, "tx_key", &temp_tx)) {
|
||||
td->key = true;
|
||||
td->expected_header.header |= GRE_MODE_KEY;
|
||||
td->rx_key = cpu_to_be32(temp_rx);
|
||||
td->tx_key = cpu_to_be32(temp_tx);
|
||||
vp->header_size += 4;
|
||||
td->sequence_offset += 4;
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
td->sequence = false;
|
||||
if (get_uint_param(vp->parsed, "sequence", &temp_int)) {
|
||||
if (temp_int > 0) {
|
||||
vp->header_size += 4;
|
||||
td->has_sequence = true;
|
||||
td->expected_header.header |= GRE_MODE_SEQUENCE;
|
||||
if (get_uint_param(
|
||||
vp->parsed, "pin_sequence", &temp_int)) {
|
||||
if (temp_int > 0)
|
||||
td->pin_sequence = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
vp->rx_header_size = vp->header_size;
|
||||
if (!td->ipv6)
|
||||
vp->rx_header_size += sizeof(struct iphdr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int build_l2tpv3_transport_data(struct vector_private *vp)
|
||||
{
|
||||
|
||||
struct uml_l2tpv3_data *td;
|
||||
int temp_int, temp_rxs, temp_txs;
|
||||
unsigned long temp_rx;
|
||||
unsigned long temp_tx;
|
||||
|
||||
vp->transport_data = kmalloc(
|
||||
sizeof(struct uml_l2tpv3_data), GFP_KERNEL);
|
||||
|
||||
if (vp->transport_data == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
td = vp->transport_data;
|
||||
|
||||
vp->form_header = &l2tpv3_form_header;
|
||||
vp->verify_header = &l2tpv3_verify_header;
|
||||
td->counter = 0;
|
||||
|
||||
vp->header_size = 4;
|
||||
td->session_offset = 0;
|
||||
td->cookie_offset = 4;
|
||||
td->counter_offset = 4;
|
||||
|
||||
|
||||
td->ipv6 = false;
|
||||
if (get_uint_param(vp->parsed, "v6", &temp_int)) {
|
||||
if (temp_int > 0)
|
||||
td->ipv6 = true;
|
||||
}
|
||||
|
||||
if (get_uint_param(vp->parsed, "rx_session", &temp_rxs)) {
|
||||
if (get_uint_param(vp->parsed, "tx_session", &temp_txs)) {
|
||||
td->tx_session = cpu_to_be32(temp_txs);
|
||||
td->rx_session = cpu_to_be32(temp_rxs);
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
td->cookie_is_64 = false;
|
||||
if (get_uint_param(vp->parsed, "cookie64", &temp_int)) {
|
||||
if (temp_int > 0)
|
||||
td->cookie_is_64 = true;
|
||||
}
|
||||
td->cookie = false;
|
||||
if (get_ulong_param(vp->parsed, "rx_cookie", &temp_rx)) {
|
||||
if (get_ulong_param(vp->parsed, "tx_cookie", &temp_tx)) {
|
||||
td->cookie = true;
|
||||
if (td->cookie_is_64) {
|
||||
td->rx_cookie = cpu_to_be64(temp_rx);
|
||||
td->tx_cookie = cpu_to_be64(temp_tx);
|
||||
vp->header_size += 8;
|
||||
td->counter_offset += 8;
|
||||
} else {
|
||||
td->rx_cookie = cpu_to_be32(temp_rx);
|
||||
td->tx_cookie = cpu_to_be32(temp_tx);
|
||||
vp->header_size += 4;
|
||||
td->counter_offset += 4;
|
||||
}
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
td->has_counter = false;
|
||||
if (get_uint_param(vp->parsed, "counter", &temp_int)) {
|
||||
if (temp_int > 0) {
|
||||
td->has_counter = true;
|
||||
vp->header_size += 4;
|
||||
if (get_uint_param(
|
||||
vp->parsed, "pin_counter", &temp_int)) {
|
||||
if (temp_int > 0)
|
||||
td->pin_counter = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (get_uint_param(vp->parsed, "udp", &temp_int)) {
|
||||
if (temp_int > 0) {
|
||||
td->udp = true;
|
||||
vp->header_size += 4;
|
||||
td->counter_offset += 4;
|
||||
td->session_offset += 4;
|
||||
td->cookie_offset += 4;
|
||||
}
|
||||
}
|
||||
|
||||
vp->rx_header_size = vp->header_size;
|
||||
if ((!td->ipv6) && (!td->udp))
|
||||
vp->rx_header_size += sizeof(struct iphdr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int build_raw_transport_data(struct vector_private *vp)
|
||||
{
|
||||
if (uml_raw_enable_vnet_headers(vp->fds->rx_fd)) {
|
||||
if (!uml_raw_enable_vnet_headers(vp->fds->tx_fd))
|
||||
return -1;
|
||||
vp->form_header = &raw_form_header;
|
||||
vp->verify_header = &raw_verify_header;
|
||||
vp->header_size = sizeof(struct virtio_net_hdr);
|
||||
vp->rx_header_size = sizeof(struct virtio_net_hdr);
|
||||
vp->dev->hw_features |= (NETIF_F_TSO | NETIF_F_GRO);
|
||||
vp->dev->features |=
|
||||
(NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
|
||||
NETIF_F_TSO | NETIF_F_GRO);
|
||||
netdev_info(
|
||||
vp->dev,
|
||||
"raw: using vnet headers for tso and tx/rx checksum"
|
||||
);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int build_tap_transport_data(struct vector_private *vp)
|
||||
{
|
||||
if (uml_raw_enable_vnet_headers(vp->fds->rx_fd)) {
|
||||
vp->form_header = &raw_form_header;
|
||||
vp->verify_header = &raw_verify_header;
|
||||
vp->header_size = sizeof(struct virtio_net_hdr);
|
||||
vp->rx_header_size = sizeof(struct virtio_net_hdr);
|
||||
vp->dev->hw_features |=
|
||||
(NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GRO);
|
||||
vp->dev->features |=
|
||||
(NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
|
||||
NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GRO);
|
||||
netdev_info(
|
||||
vp->dev,
|
||||
"tap/raw: using vnet headers for tso and tx/rx checksum"
|
||||
);
|
||||
} else {
|
||||
return 0; /* do not try to enable tap too if raw failed */
|
||||
}
|
||||
if (uml_tap_enable_vnet_headers(vp->fds->tx_fd))
|
||||
return 0;
|
||||
return -1;
|
||||
}
|
||||
|
||||
int build_transport_data(struct vector_private *vp)
|
||||
{
|
||||
char *transport = uml_vector_fetch_arg(vp->parsed, "transport");
|
||||
|
||||
if (strncmp(transport, TRANS_GRE, TRANS_GRE_LEN) == 0)
|
||||
return build_gre_transport_data(vp);
|
||||
if (strncmp(transport, TRANS_L2TPV3, TRANS_L2TPV3_LEN) == 0)
|
||||
return build_l2tpv3_transport_data(vp);
|
||||
if (strncmp(transport, TRANS_RAW, TRANS_RAW_LEN) == 0)
|
||||
return build_raw_transport_data(vp);
|
||||
if (strncmp(transport, TRANS_TAP, TRANS_TAP_LEN) == 0)
|
||||
return build_tap_transport_data(vp);
|
||||
return 0;
|
||||
}
|
||||
|
590
arch/um/drivers/vector_user.c
Normal file
590
arch/um/drivers/vector_user.c
Normal file
@ -0,0 +1,590 @@
|
||||
/*
|
||||
* Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
|
||||
* Licensed under the GPL
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <unistd.h>
|
||||
#include <stdarg.h>
|
||||
#include <errno.h>
|
||||
#include <stddef.h>
|
||||
#include <string.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include <net/if.h>
|
||||
#include <linux/if_tun.h>
|
||||
#include <arpa/inet.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <fcntl.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/socket.h>
|
||||
#include <net/ethernet.h>
|
||||
#include <netinet/ip.h>
|
||||
#include <netinet/ether.h>
|
||||
#include <linux/if_ether.h>
|
||||
#include <linux/if_packet.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/wait.h>
|
||||
#include <linux/virtio_net.h>
|
||||
#include <netdb.h>
|
||||
#include <stdlib.h>
|
||||
#include <os.h>
|
||||
#include <um_malloc.h>
|
||||
#include "vector_user.h"
|
||||
|
||||
#define ID_GRE 0
|
||||
#define ID_L2TPV3 1
|
||||
#define ID_MAX 1
|
||||
|
||||
#define TOKEN_IFNAME "ifname"
|
||||
|
||||
#define TRANS_RAW "raw"
|
||||
#define TRANS_RAW_LEN strlen(TRANS_RAW)
|
||||
|
||||
#define VNET_HDR_FAIL "could not enable vnet headers on fd %d"
|
||||
#define TUN_GET_F_FAIL "tapraw: TUNGETFEATURES failed: %s"
|
||||
#define L2TPV3_BIND_FAIL "l2tpv3_open : could not bind socket err=%i"
|
||||
#define BPF_ATTACH_FAIL "Failed to attach filter size %d to %d, err %d\n"
|
||||
|
||||
/* This is very ugly and brute force lookup, but it is done
|
||||
* only once at initialization so not worth doing hashes or
|
||||
* anything more intelligent
|
||||
*/
|
||||
|
||||
char *uml_vector_fetch_arg(struct arglist *ifspec, char *token)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ifspec->numargs; i++) {
|
||||
if (strcmp(ifspec->tokens[i], token) == 0)
|
||||
return ifspec->values[i];
|
||||
}
|
||||
return NULL;
|
||||
|
||||
}
|
||||
|
||||
struct arglist *uml_parse_vector_ifspec(char *arg)
|
||||
{
|
||||
struct arglist *result;
|
||||
int pos, len;
|
||||
bool parsing_token = true, next_starts = true;
|
||||
|
||||
if (arg == NULL)
|
||||
return NULL;
|
||||
result = uml_kmalloc(sizeof(struct arglist), UM_GFP_KERNEL);
|
||||
if (result == NULL)
|
||||
return NULL;
|
||||
result->numargs = 0;
|
||||
len = strlen(arg);
|
||||
for (pos = 0; pos < len; pos++) {
|
||||
if (next_starts) {
|
||||
if (parsing_token) {
|
||||
result->tokens[result->numargs] = arg + pos;
|
||||
} else {
|
||||
result->values[result->numargs] = arg + pos;
|
||||
result->numargs++;
|
||||
}
|
||||
next_starts = false;
|
||||
}
|
||||
if (*(arg + pos) == '=') {
|
||||
if (parsing_token)
|
||||
parsing_token = false;
|
||||
else
|
||||
goto cleanup;
|
||||
next_starts = true;
|
||||
(*(arg + pos)) = '\0';
|
||||
}
|
||||
if (*(arg + pos) == ',') {
|
||||
parsing_token = true;
|
||||
next_starts = true;
|
||||
(*(arg + pos)) = '\0';
|
||||
}
|
||||
}
|
||||
return result;
|
||||
cleanup:
|
||||
printk(UM_KERN_ERR "vector_setup - Couldn't parse '%s'\n", arg);
|
||||
kfree(result);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Socket/FD configuration functions. These return an structure
|
||||
* of rx and tx descriptors to cover cases where these are not
|
||||
* the same (f.e. read via raw socket and write via tap).
|
||||
*/
|
||||
|
||||
#define PATH_NET_TUN "/dev/net/tun"
|
||||
|
||||
static struct vector_fds *user_init_tap_fds(struct arglist *ifspec)
|
||||
{
|
||||
struct ifreq ifr;
|
||||
int fd = -1;
|
||||
struct sockaddr_ll sock;
|
||||
int err = -ENOMEM, offload;
|
||||
char *iface;
|
||||
struct vector_fds *result = NULL;
|
||||
|
||||
iface = uml_vector_fetch_arg(ifspec, TOKEN_IFNAME);
|
||||
if (iface == NULL) {
|
||||
printk(UM_KERN_ERR "uml_tap: failed to parse interface spec\n");
|
||||
goto tap_cleanup;
|
||||
}
|
||||
|
||||
result = uml_kmalloc(sizeof(struct vector_fds), UM_GFP_KERNEL);
|
||||
if (result == NULL) {
|
||||
printk(UM_KERN_ERR "uml_tap: failed to allocate file descriptors\n");
|
||||
goto tap_cleanup;
|
||||
}
|
||||
result->rx_fd = -1;
|
||||
result->tx_fd = -1;
|
||||
result->remote_addr = NULL;
|
||||
result->remote_addr_size = 0;
|
||||
|
||||
/* TAP */
|
||||
|
||||
fd = open(PATH_NET_TUN, O_RDWR);
|
||||
if (fd < 0) {
|
||||
printk(UM_KERN_ERR "uml_tap: failed to open tun device\n");
|
||||
goto tap_cleanup;
|
||||
}
|
||||
result->tx_fd = fd;
|
||||
memset(&ifr, 0, sizeof(ifr));
|
||||
ifr.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_VNET_HDR;
|
||||
strncpy((char *)&ifr.ifr_name, iface, sizeof(ifr.ifr_name) - 1);
|
||||
|
||||
err = ioctl(fd, TUNSETIFF, (void *) &ifr);
|
||||
if (err != 0) {
|
||||
printk(UM_KERN_ERR "uml_tap: failed to select tap interface\n");
|
||||
goto tap_cleanup;
|
||||
}
|
||||
|
||||
offload = TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6;
|
||||
ioctl(fd, TUNSETOFFLOAD, offload);
|
||||
|
||||
/* RAW */
|
||||
|
||||
fd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL));
|
||||
if (fd == -1) {
|
||||
printk(UM_KERN_ERR
|
||||
"uml_tap: failed to create socket: %i\n", -errno);
|
||||
goto tap_cleanup;
|
||||
}
|
||||
result->rx_fd = fd;
|
||||
memset(&ifr, 0, sizeof(ifr));
|
||||
strncpy((char *)&ifr.ifr_name, iface, sizeof(ifr.ifr_name) - 1);
|
||||
if (ioctl(fd, SIOCGIFINDEX, (void *) &ifr) < 0) {
|
||||
printk(UM_KERN_ERR
|
||||
"uml_tap: failed to set interface: %i\n", -errno);
|
||||
goto tap_cleanup;
|
||||
}
|
||||
|
||||
sock.sll_family = AF_PACKET;
|
||||
sock.sll_protocol = htons(ETH_P_ALL);
|
||||
sock.sll_ifindex = ifr.ifr_ifindex;
|
||||
|
||||
if (bind(fd,
|
||||
(struct sockaddr *) &sock, sizeof(struct sockaddr_ll)) < 0) {
|
||||
printk(UM_KERN_ERR
|
||||
"user_init_tap: failed to bind raw pair, err %d\n",
|
||||
-errno);
|
||||
goto tap_cleanup;
|
||||
}
|
||||
return result;
|
||||
tap_cleanup:
|
||||
printk(UM_KERN_ERR "user_init_tap: init failed, error %d", err);
|
||||
if (result != NULL) {
|
||||
if (result->rx_fd >= 0)
|
||||
os_close_file(result->rx_fd);
|
||||
if (result->tx_fd >= 0)
|
||||
os_close_file(result->tx_fd);
|
||||
kfree(result);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
static struct vector_fds *user_init_raw_fds(struct arglist *ifspec)
|
||||
{
|
||||
struct ifreq ifr;
|
||||
int rxfd = -1, txfd = -1;
|
||||
struct sockaddr_ll sock;
|
||||
int err = -ENOMEM;
|
||||
char *iface;
|
||||
struct vector_fds *result = NULL;
|
||||
|
||||
iface = uml_vector_fetch_arg(ifspec, TOKEN_IFNAME);
|
||||
if (iface == NULL)
|
||||
goto cleanup;
|
||||
|
||||
rxfd = socket(AF_PACKET, SOCK_RAW, ETH_P_ALL);
|
||||
if (rxfd == -1) {
|
||||
err = -errno;
|
||||
goto cleanup;
|
||||
}
|
||||
txfd = socket(AF_PACKET, SOCK_RAW, 0); /* Turn off RX on this fd */
|
||||
if (txfd == -1) {
|
||||
err = -errno;
|
||||
goto cleanup;
|
||||
}
|
||||
memset(&ifr, 0, sizeof(ifr));
|
||||
strncpy((char *)&ifr.ifr_name, iface, sizeof(ifr.ifr_name) - 1);
|
||||
if (ioctl(rxfd, SIOCGIFINDEX, (void *) &ifr) < 0) {
|
||||
err = -errno;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
sock.sll_family = AF_PACKET;
|
||||
sock.sll_protocol = htons(ETH_P_ALL);
|
||||
sock.sll_ifindex = ifr.ifr_ifindex;
|
||||
|
||||
if (bind(rxfd,
|
||||
(struct sockaddr *) &sock, sizeof(struct sockaddr_ll)) < 0) {
|
||||
err = -errno;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
sock.sll_family = AF_PACKET;
|
||||
sock.sll_protocol = htons(ETH_P_IP);
|
||||
sock.sll_ifindex = ifr.ifr_ifindex;
|
||||
|
||||
if (bind(txfd,
|
||||
(struct sockaddr *) &sock, sizeof(struct sockaddr_ll)) < 0) {
|
||||
err = -errno;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
result = uml_kmalloc(sizeof(struct vector_fds), UM_GFP_KERNEL);
|
||||
if (result != NULL) {
|
||||
result->rx_fd = rxfd;
|
||||
result->tx_fd = txfd;
|
||||
result->remote_addr = NULL;
|
||||
result->remote_addr_size = 0;
|
||||
}
|
||||
return result;
|
||||
cleanup:
|
||||
printk(UM_KERN_ERR "user_init_raw: init failed, error %d", err);
|
||||
if (rxfd >= 0)
|
||||
os_close_file(rxfd);
|
||||
if (txfd >= 0)
|
||||
os_close_file(txfd);
|
||||
if (result != NULL)
|
||||
kfree(result);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
bool uml_raw_enable_qdisc_bypass(int fd)
|
||||
{
|
||||
int optval = 1;
|
||||
|
||||
if (setsockopt(fd,
|
||||
SOL_PACKET, PACKET_QDISC_BYPASS,
|
||||
&optval, sizeof(optval)) != 0) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool uml_raw_enable_vnet_headers(int fd)
|
||||
{
|
||||
int optval = 1;
|
||||
|
||||
if (setsockopt(fd,
|
||||
SOL_PACKET, PACKET_VNET_HDR,
|
||||
&optval, sizeof(optval)) != 0) {
|
||||
printk(UM_KERN_INFO VNET_HDR_FAIL, fd);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
bool uml_tap_enable_vnet_headers(int fd)
|
||||
{
|
||||
unsigned int features;
|
||||
int len = sizeof(struct virtio_net_hdr);
|
||||
|
||||
if (ioctl(fd, TUNGETFEATURES, &features) == -1) {
|
||||
printk(UM_KERN_INFO TUN_GET_F_FAIL, strerror(errno));
|
||||
return false;
|
||||
}
|
||||
if ((features & IFF_VNET_HDR) == 0) {
|
||||
printk(UM_KERN_INFO "tapraw: No VNET HEADER support");
|
||||
return false;
|
||||
}
|
||||
ioctl(fd, TUNSETVNETHDRSZ, &len);
|
||||
return true;
|
||||
}
|
||||
|
||||
static struct vector_fds *user_init_socket_fds(struct arglist *ifspec, int id)
|
||||
{
|
||||
int err = -ENOMEM;
|
||||
int fd = -1, gairet;
|
||||
struct addrinfo srchints;
|
||||
struct addrinfo dsthints;
|
||||
bool v6, udp;
|
||||
char *value;
|
||||
char *src, *dst, *srcport, *dstport;
|
||||
struct addrinfo *gairesult = NULL;
|
||||
struct vector_fds *result = NULL;
|
||||
|
||||
|
||||
value = uml_vector_fetch_arg(ifspec, "v6");
|
||||
v6 = false;
|
||||
udp = false;
|
||||
if (value != NULL) {
|
||||
if (strtol((const char *) value, NULL, 10) > 0)
|
||||
v6 = true;
|
||||
}
|
||||
|
||||
value = uml_vector_fetch_arg(ifspec, "udp");
|
||||
if (value != NULL) {
|
||||
if (strtol((const char *) value, NULL, 10) > 0)
|
||||
udp = true;
|
||||
}
|
||||
src = uml_vector_fetch_arg(ifspec, "src");
|
||||
dst = uml_vector_fetch_arg(ifspec, "dst");
|
||||
srcport = uml_vector_fetch_arg(ifspec, "srcport");
|
||||
dstport = uml_vector_fetch_arg(ifspec, "dstport");
|
||||
|
||||
memset(&dsthints, 0, sizeof(dsthints));
|
||||
|
||||
if (v6)
|
||||
dsthints.ai_family = AF_INET6;
|
||||
else
|
||||
dsthints.ai_family = AF_INET;
|
||||
|
||||
switch (id) {
|
||||
case ID_GRE:
|
||||
dsthints.ai_socktype = SOCK_RAW;
|
||||
dsthints.ai_protocol = IPPROTO_GRE;
|
||||
break;
|
||||
case ID_L2TPV3:
|
||||
if (udp) {
|
||||
dsthints.ai_socktype = SOCK_DGRAM;
|
||||
dsthints.ai_protocol = 0;
|
||||
} else {
|
||||
dsthints.ai_socktype = SOCK_RAW;
|
||||
dsthints.ai_protocol = IPPROTO_L2TP;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
printk(KERN_ERR "Unsupported socket type\n");
|
||||
return NULL;
|
||||
}
|
||||
memcpy(&srchints, &dsthints, sizeof(struct addrinfo));
|
||||
|
||||
gairet = getaddrinfo(src, srcport, &dsthints, &gairesult);
|
||||
if ((gairet != 0) || (gairesult == NULL)) {
|
||||
printk(UM_KERN_ERR
|
||||
"socket_open : could not resolve src, error = %s",
|
||||
gai_strerror(gairet)
|
||||
);
|
||||
return NULL;
|
||||
}
|
||||
fd = socket(gairesult->ai_family,
|
||||
gairesult->ai_socktype, gairesult->ai_protocol);
|
||||
if (fd == -1) {
|
||||
printk(UM_KERN_ERR
|
||||
"socket_open : could not open socket, error = %d",
|
||||
-errno
|
||||
);
|
||||
goto cleanup;
|
||||
}
|
||||
if (bind(fd,
|
||||
(struct sockaddr *) gairesult->ai_addr,
|
||||
gairesult->ai_addrlen)) {
|
||||
printk(UM_KERN_ERR L2TPV3_BIND_FAIL, errno);
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
if (gairesult != NULL)
|
||||
freeaddrinfo(gairesult);
|
||||
|
||||
gairesult = NULL;
|
||||
|
||||
gairet = getaddrinfo(dst, dstport, &dsthints, &gairesult);
|
||||
if ((gairet != 0) || (gairesult == NULL)) {
|
||||
printk(UM_KERN_ERR
|
||||
"socket_open : could not resolve dst, error = %s",
|
||||
gai_strerror(gairet)
|
||||
);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
result = uml_kmalloc(sizeof(struct vector_fds), UM_GFP_KERNEL);
|
||||
if (result != NULL) {
|
||||
result->rx_fd = fd;
|
||||
result->tx_fd = fd;
|
||||
result->remote_addr = uml_kmalloc(
|
||||
gairesult->ai_addrlen, UM_GFP_KERNEL);
|
||||
if (result->remote_addr == NULL)
|
||||
goto cleanup;
|
||||
result->remote_addr_size = gairesult->ai_addrlen;
|
||||
memcpy(
|
||||
result->remote_addr,
|
||||
gairesult->ai_addr,
|
||||
gairesult->ai_addrlen
|
||||
);
|
||||
}
|
||||
freeaddrinfo(gairesult);
|
||||
return result;
|
||||
cleanup:
|
||||
if (gairesult != NULL)
|
||||
freeaddrinfo(gairesult);
|
||||
printk(UM_KERN_ERR "user_init_socket: init failed, error %d", err);
|
||||
if (fd >= 0)
|
||||
os_close_file(fd);
|
||||
if (result != NULL) {
|
||||
if (result->remote_addr != NULL)
|
||||
kfree(result->remote_addr);
|
||||
kfree(result);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct vector_fds *uml_vector_user_open(
|
||||
int unit,
|
||||
struct arglist *parsed
|
||||
)
|
||||
{
|
||||
char *transport;
|
||||
|
||||
if (parsed == NULL) {
|
||||
printk(UM_KERN_ERR "no parsed config for unit %d\n", unit);
|
||||
return NULL;
|
||||
}
|
||||
transport = uml_vector_fetch_arg(parsed, "transport");
|
||||
if (transport == NULL) {
|
||||
printk(UM_KERN_ERR "missing transport for unit %d\n", unit);
|
||||
return NULL;
|
||||
}
|
||||
if (strncmp(transport, TRANS_RAW, TRANS_RAW_LEN) == 0)
|
||||
return user_init_raw_fds(parsed);
|
||||
if (strncmp(transport, TRANS_TAP, TRANS_TAP_LEN) == 0)
|
||||
return user_init_tap_fds(parsed);
|
||||
if (strncmp(transport, TRANS_GRE, TRANS_GRE_LEN) == 0)
|
||||
return user_init_socket_fds(parsed, ID_GRE);
|
||||
if (strncmp(transport, TRANS_L2TPV3, TRANS_L2TPV3_LEN) == 0)
|
||||
return user_init_socket_fds(parsed, ID_L2TPV3);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
int uml_vector_sendmsg(int fd, void *hdr, int flags)
|
||||
{
|
||||
int n;
|
||||
|
||||
CATCH_EINTR(n = sendmsg(fd, (struct msghdr *) hdr, flags));
|
||||
if ((n < 0) && (errno == EAGAIN))
|
||||
return 0;
|
||||
if (n >= 0)
|
||||
return n;
|
||||
else
|
||||
return -errno;
|
||||
}
|
||||
|
||||
int uml_vector_recvmsg(int fd, void *hdr, int flags)
|
||||
{
|
||||
int n;
|
||||
|
||||
CATCH_EINTR(n = recvmsg(fd, (struct msghdr *) hdr, flags));
|
||||
if ((n < 0) && (errno == EAGAIN))
|
||||
return 0;
|
||||
if (n >= 0)
|
||||
return n;
|
||||
else
|
||||
return -errno;
|
||||
}
|
||||
|
||||
int uml_vector_writev(int fd, void *hdr, int iovcount)
|
||||
{
|
||||
int n;
|
||||
|
||||
CATCH_EINTR(n = writev(fd, (struct iovec *) hdr, iovcount));
|
||||
if ((n < 0) && (errno == EAGAIN))
|
||||
return 0;
|
||||
if (n >= 0)
|
||||
return n;
|
||||
else
|
||||
return -errno;
|
||||
}
|
||||
|
||||
int uml_vector_sendmmsg(
|
||||
int fd,
|
||||
void *msgvec,
|
||||
unsigned int vlen,
|
||||
unsigned int flags)
|
||||
{
|
||||
int n;
|
||||
|
||||
CATCH_EINTR(n = sendmmsg(fd, (struct mmsghdr *) msgvec, vlen, flags));
|
||||
if ((n < 0) && (errno == EAGAIN))
|
||||
return 0;
|
||||
if (n >= 0)
|
||||
return n;
|
||||
else
|
||||
return -errno;
|
||||
}
|
||||
|
||||
int uml_vector_recvmmsg(
|
||||
int fd,
|
||||
void *msgvec,
|
||||
unsigned int vlen,
|
||||
unsigned int flags)
|
||||
{
|
||||
int n;
|
||||
|
||||
CATCH_EINTR(
|
||||
n = recvmmsg(fd, (struct mmsghdr *) msgvec, vlen, flags, 0));
|
||||
if ((n < 0) && (errno == EAGAIN))
|
||||
return 0;
|
||||
if (n >= 0)
|
||||
return n;
|
||||
else
|
||||
return -errno;
|
||||
}
|
||||
int uml_vector_attach_bpf(int fd, void *bpf, int bpf_len)
|
||||
{
|
||||
int err = setsockopt(fd, SOL_SOCKET, SO_ATTACH_FILTER, bpf, bpf_len);
|
||||
|
||||
if (err < 0)
|
||||
printk(KERN_ERR BPF_ATTACH_FAIL, bpf_len, fd, -errno);
|
||||
return err;
|
||||
}
|
||||
|
||||
#define DEFAULT_BPF_LEN 6
|
||||
|
||||
void *uml_vector_default_bpf(int fd, void *mac)
|
||||
{
|
||||
struct sock_filter *bpf;
|
||||
uint32_t *mac1 = (uint32_t *)(mac + 2);
|
||||
uint16_t *mac2 = (uint16_t *) mac;
|
||||
struct sock_fprog bpf_prog = {
|
||||
.len = 6,
|
||||
.filter = NULL,
|
||||
};
|
||||
|
||||
bpf = uml_kmalloc(
|
||||
sizeof(struct sock_filter) * DEFAULT_BPF_LEN, UM_GFP_KERNEL);
|
||||
if (bpf != NULL) {
|
||||
bpf_prog.filter = bpf;
|
||||
/* ld [8] */
|
||||
bpf[0] = (struct sock_filter){ 0x20, 0, 0, 0x00000008 };
|
||||
/* jeq #0xMAC[2-6] jt 2 jf 5*/
|
||||
bpf[1] = (struct sock_filter){ 0x15, 0, 3, ntohl(*mac1)};
|
||||
/* ldh [6] */
|
||||
bpf[2] = (struct sock_filter){ 0x28, 0, 0, 0x00000006 };
|
||||
/* jeq #0xMAC[0-1] jt 4 jf 5 */
|
||||
bpf[3] = (struct sock_filter){ 0x15, 0, 1, ntohs(*mac2)};
|
||||
/* ret #0 */
|
||||
bpf[4] = (struct sock_filter){ 0x6, 0, 0, 0x00000000 };
|
||||
/* ret #0x40000 */
|
||||
bpf[5] = (struct sock_filter){ 0x6, 0, 0, 0x00040000 };
|
||||
if (uml_vector_attach_bpf(
|
||||
fd, &bpf_prog, sizeof(struct sock_fprog)) < 0) {
|
||||
kfree(bpf);
|
||||
bpf = NULL;
|
||||
}
|
||||
}
|
||||
return bpf;
|
||||
}
|
||||
|
100
arch/um/drivers/vector_user.h
Normal file
100
arch/um/drivers/vector_user.h
Normal file
@ -0,0 +1,100 @@
|
||||
/*
|
||||
* Copyright (C) 2002 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
|
||||
* Licensed under the GPL
|
||||
*/
|
||||
|
||||
#ifndef __UM_VECTOR_USER_H
|
||||
#define __UM_VECTOR_USER_H
|
||||
|
||||
#define MAXVARGS 20
|
||||
|
||||
#define TOKEN_IFNAME "ifname"
|
||||
|
||||
#define TRANS_RAW "raw"
|
||||
#define TRANS_RAW_LEN strlen(TRANS_RAW)
|
||||
|
||||
#define TRANS_TAP "tap"
|
||||
#define TRANS_TAP_LEN strlen(TRANS_TAP)
|
||||
|
||||
|
||||
#define TRANS_GRE "gre"
|
||||
#define TRANS_GRE_LEN strlen(TRANS_RAW)
|
||||
|
||||
#define TRANS_L2TPV3 "l2tpv3"
|
||||
#define TRANS_L2TPV3_LEN strlen(TRANS_L2TPV3)
|
||||
|
||||
#ifndef IPPROTO_GRE
|
||||
#define IPPROTO_GRE 0x2F
|
||||
#endif
|
||||
|
||||
#define GRE_MODE_CHECKSUM cpu_to_be16(8 << 12) /* checksum */
|
||||
#define GRE_MODE_RESERVED cpu_to_be16(4 << 12) /* unused */
|
||||
#define GRE_MODE_KEY cpu_to_be16(2 << 12) /* KEY present */
|
||||
#define GRE_MODE_SEQUENCE cpu_to_be16(1 << 12) /* sequence */
|
||||
|
||||
#define GRE_IRB cpu_to_be16(0x6558)
|
||||
|
||||
#define L2TPV3_DATA_PACKET 0x30000
|
||||
|
||||
/* IANA-assigned IP protocol ID for L2TPv3 */
|
||||
|
||||
#ifndef IPPROTO_L2TP
|
||||
#define IPPROTO_L2TP 0x73
|
||||
#endif
|
||||
|
||||
struct arglist {
|
||||
int numargs;
|
||||
char *tokens[MAXVARGS];
|
||||
char *values[MAXVARGS];
|
||||
};
|
||||
|
||||
/* Separating read and write FDs allows us to have different
|
||||
* rx and tx method. Example - read tap via raw socket using
|
||||
* recvmmsg, write using legacy tap write calls
|
||||
*/
|
||||
|
||||
struct vector_fds {
|
||||
int rx_fd;
|
||||
int tx_fd;
|
||||
void *remote_addr;
|
||||
int remote_addr_size;
|
||||
};
|
||||
|
||||
#define VECTOR_READ 1
|
||||
#define VECTOR_WRITE (1 < 1)
|
||||
#define VECTOR_HEADERS (1 < 2)
|
||||
|
||||
extern struct arglist *uml_parse_vector_ifspec(char *arg);
|
||||
|
||||
extern struct vector_fds *uml_vector_user_open(
|
||||
int unit,
|
||||
struct arglist *parsed
|
||||
);
|
||||
|
||||
extern char *uml_vector_fetch_arg(
|
||||
struct arglist *ifspec,
|
||||
char *token
|
||||
);
|
||||
|
||||
extern int uml_vector_recvmsg(int fd, void *hdr, int flags);
|
||||
extern int uml_vector_sendmsg(int fd, void *hdr, int flags);
|
||||
extern int uml_vector_writev(int fd, void *hdr, int iovcount);
|
||||
extern int uml_vector_sendmmsg(
|
||||
int fd, void *msgvec,
|
||||
unsigned int vlen,
|
||||
unsigned int flags
|
||||
);
|
||||
extern int uml_vector_recvmmsg(
|
||||
int fd,
|
||||
void *msgvec,
|
||||
unsigned int vlen,
|
||||
unsigned int flags
|
||||
);
|
||||
extern void *uml_vector_default_bpf(int fd, void *mac);
|
||||
extern int uml_vector_attach_bpf(int fd, void *bpf, int bpf_len);
|
||||
extern bool uml_raw_enable_qdisc_bypass(int fd);
|
||||
extern bool uml_raw_enable_vnet_headers(int fd);
|
||||
extern bool uml_tap_enable_vnet_headers(int fd);
|
||||
|
||||
|
||||
#endif
|
1
arch/um/include/asm/asm-prototypes.h
Normal file
1
arch/um/include/asm/asm-prototypes.h
Normal file
@ -0,0 +1 @@
|
||||
#include <asm-generic/asm-prototypes.h>
|
@ -18,7 +18,19 @@
|
||||
#define XTERM_IRQ 13
|
||||
#define RANDOM_IRQ 14
|
||||
|
||||
#ifdef CONFIG_UML_NET_VECTOR
|
||||
|
||||
#define VECTOR_BASE_IRQ 15
|
||||
#define VECTOR_IRQ_SPACE 8
|
||||
|
||||
#define LAST_IRQ (VECTOR_IRQ_SPACE + VECTOR_BASE_IRQ)
|
||||
|
||||
#else
|
||||
|
||||
#define LAST_IRQ RANDOM_IRQ
|
||||
|
||||
#endif
|
||||
|
||||
#define NR_IRQS (LAST_IRQ + 1)
|
||||
|
||||
#endif
|
||||
|
@ -7,6 +7,7 @@
|
||||
#define __IRQ_USER_H__
|
||||
|
||||
#include <sysdep/ptrace.h>
|
||||
#include <stdbool.h>
|
||||
|
||||
struct irq_fd {
|
||||
struct irq_fd *next;
|
||||
@ -15,10 +16,17 @@ struct irq_fd {
|
||||
int type;
|
||||
int irq;
|
||||
int events;
|
||||
int current_events;
|
||||
bool active;
|
||||
bool pending;
|
||||
bool purge;
|
||||
};
|
||||
|
||||
enum { IRQ_READ, IRQ_WRITE };
|
||||
#define IRQ_READ 0
|
||||
#define IRQ_WRITE 1
|
||||
#define IRQ_NONE 2
|
||||
#define MAX_IRQ_TYPE (IRQ_NONE + 1)
|
||||
|
||||
|
||||
|
||||
struct siginfo;
|
||||
extern void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs);
|
||||
|
@ -65,5 +65,7 @@ extern int tap_setup_common(char *str, char *type, char **dev_name,
|
||||
char **mac_out, char **gate_addr);
|
||||
extern void register_transport(struct transport *new);
|
||||
extern unsigned short eth_protocol(struct sk_buff *skb);
|
||||
extern void uml_net_setup_etheraddr(struct net_device *dev, char *str);
|
||||
|
||||
|
||||
#endif
|
||||
|
@ -290,15 +290,16 @@ extern void halt_skas(void);
|
||||
extern void reboot_skas(void);
|
||||
|
||||
/* irq.c */
|
||||
extern int os_waiting_for_events(struct irq_fd *active_fds);
|
||||
extern int os_create_pollfd(int fd, int events, void *tmp_pfd, int size_tmpfds);
|
||||
extern void os_free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg,
|
||||
struct irq_fd *active_fds, struct irq_fd ***last_irq_ptr2);
|
||||
extern void os_free_irq_later(struct irq_fd *active_fds,
|
||||
int irq, void *dev_id);
|
||||
extern int os_get_pollfd(int i);
|
||||
extern void os_set_pollfd(int i, int fd);
|
||||
extern int os_waiting_for_events_epoll(void);
|
||||
extern void *os_epoll_get_data_pointer(int index);
|
||||
extern int os_epoll_triggered(int index, int events);
|
||||
extern int os_event_mask(int irq_type);
|
||||
extern int os_setup_epoll(void);
|
||||
extern int os_add_epoll_fd(int events, int fd, void *data);
|
||||
extern int os_mod_epoll_fd(int events, int fd, void *data);
|
||||
extern int os_del_epoll_fd(int fd);
|
||||
extern void os_set_ioignore(void);
|
||||
extern void os_close_epoll_fd(void);
|
||||
|
||||
/* sigio.c */
|
||||
extern int add_sigio_fd(int fd);
|
||||
|
@ -1,4 +1,6 @@
|
||||
/*
|
||||
* Copyright (C) 2017 - Cambridge Greys Ltd
|
||||
* Copyright (C) 2011 - 2014 Cisco Systems Inc
|
||||
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
|
||||
* Licensed under the GPL
|
||||
* Derived (i.e. mostly copied) from arch/i386/kernel/irq.c:
|
||||
@ -16,243 +18,362 @@
|
||||
#include <as-layout.h>
|
||||
#include <kern_util.h>
|
||||
#include <os.h>
|
||||
#include <irq_user.h>
|
||||
|
||||
/*
|
||||
* This list is accessed under irq_lock, except in sigio_handler,
|
||||
* where it is safe from being modified. IRQ handlers won't change it -
|
||||
* if an IRQ source has vanished, it will be freed by free_irqs just
|
||||
* before returning from sigio_handler. That will process a separate
|
||||
* list of irqs to free, with its own locking, coming back here to
|
||||
* remove list elements, taking the irq_lock to do so.
|
||||
|
||||
/* When epoll triggers we do not know why it did so
|
||||
* we can also have different IRQs for read and write.
|
||||
* This is why we keep a small irq_fd array for each fd -
|
||||
* one entry per IRQ type
|
||||
*/
|
||||
static struct irq_fd *active_fds = NULL;
|
||||
static struct irq_fd **last_irq_ptr = &active_fds;
|
||||
|
||||
extern void free_irqs(void);
|
||||
struct irq_entry {
|
||||
struct irq_entry *next;
|
||||
int fd;
|
||||
struct irq_fd *irq_array[MAX_IRQ_TYPE + 1];
|
||||
};
|
||||
|
||||
void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
|
||||
{
|
||||
struct irq_fd *irq_fd;
|
||||
int n;
|
||||
|
||||
while (1) {
|
||||
n = os_waiting_for_events(active_fds);
|
||||
if (n <= 0) {
|
||||
if (n == -EINTR)
|
||||
continue;
|
||||
else break;
|
||||
}
|
||||
|
||||
for (irq_fd = active_fds; irq_fd != NULL;
|
||||
irq_fd = irq_fd->next) {
|
||||
if (irq_fd->current_events != 0) {
|
||||
irq_fd->current_events = 0;
|
||||
do_IRQ(irq_fd->irq, regs);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
free_irqs();
|
||||
}
|
||||
static struct irq_entry *active_fds;
|
||||
|
||||
static DEFINE_SPINLOCK(irq_lock);
|
||||
|
||||
static void irq_io_loop(struct irq_fd *irq, struct uml_pt_regs *regs)
|
||||
{
|
||||
/*
|
||||
* irq->active guards against reentry
|
||||
* irq->pending accumulates pending requests
|
||||
* if pending is raised the irq_handler is re-run
|
||||
* until pending is cleared
|
||||
*/
|
||||
if (irq->active) {
|
||||
irq->active = false;
|
||||
do {
|
||||
irq->pending = false;
|
||||
do_IRQ(irq->irq, regs);
|
||||
} while (irq->pending && (!irq->purge));
|
||||
if (!irq->purge)
|
||||
irq->active = true;
|
||||
} else {
|
||||
irq->pending = true;
|
||||
}
|
||||
}
|
||||
|
||||
void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
|
||||
{
|
||||
struct irq_entry *irq_entry;
|
||||
struct irq_fd *irq;
|
||||
|
||||
int n, i, j;
|
||||
|
||||
while (1) {
|
||||
/* This is now lockless - epoll keeps back-referencesto the irqs
|
||||
* which have trigger it so there is no need to walk the irq
|
||||
* list and lock it every time. We avoid locking by turning off
|
||||
* IO for a specific fd by executing os_del_epoll_fd(fd) before
|
||||
* we do any changes to the actual data structures
|
||||
*/
|
||||
n = os_waiting_for_events_epoll();
|
||||
|
||||
if (n <= 0) {
|
||||
if (n == -EINTR)
|
||||
continue;
|
||||
else
|
||||
break;
|
||||
}
|
||||
|
||||
for (i = 0; i < n ; i++) {
|
||||
/* Epoll back reference is the entry with 3 irq_fd
|
||||
* leaves - one for each irq type.
|
||||
*/
|
||||
irq_entry = (struct irq_entry *)
|
||||
os_epoll_get_data_pointer(i);
|
||||
for (j = 0; j < MAX_IRQ_TYPE ; j++) {
|
||||
irq = irq_entry->irq_array[j];
|
||||
if (irq == NULL)
|
||||
continue;
|
||||
if (os_epoll_triggered(i, irq->events) > 0)
|
||||
irq_io_loop(irq, regs);
|
||||
if (irq->purge) {
|
||||
irq_entry->irq_array[j] = NULL;
|
||||
kfree(irq);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int assign_epoll_events_to_irq(struct irq_entry *irq_entry)
|
||||
{
|
||||
int i;
|
||||
int events = 0;
|
||||
struct irq_fd *irq;
|
||||
|
||||
for (i = 0; i < MAX_IRQ_TYPE ; i++) {
|
||||
irq = irq_entry->irq_array[i];
|
||||
if (irq != NULL)
|
||||
events = irq->events | events;
|
||||
}
|
||||
if (events > 0) {
|
||||
/* os_add_epoll will call os_mod_epoll if this already exists */
|
||||
return os_add_epoll_fd(events, irq_entry->fd, irq_entry);
|
||||
}
|
||||
/* No events - delete */
|
||||
return os_del_epoll_fd(irq_entry->fd);
|
||||
}
|
||||
|
||||
|
||||
|
||||
static int activate_fd(int irq, int fd, int type, void *dev_id)
|
||||
{
|
||||
struct pollfd *tmp_pfd;
|
||||
struct irq_fd *new_fd, *irq_fd;
|
||||
struct irq_fd *new_fd;
|
||||
struct irq_entry *irq_entry;
|
||||
int i, err, events;
|
||||
unsigned long flags;
|
||||
int events, err, n;
|
||||
|
||||
err = os_set_fd_async(fd);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
|
||||
err = -ENOMEM;
|
||||
new_fd = kmalloc(sizeof(struct irq_fd), GFP_KERNEL);
|
||||
if (new_fd == NULL)
|
||||
goto out;
|
||||
spin_lock_irqsave(&irq_lock, flags);
|
||||
|
||||
if (type == IRQ_READ)
|
||||
events = UM_POLLIN | UM_POLLPRI;
|
||||
else events = UM_POLLOUT;
|
||||
*new_fd = ((struct irq_fd) { .next = NULL,
|
||||
.id = dev_id,
|
||||
.fd = fd,
|
||||
.type = type,
|
||||
.irq = irq,
|
||||
.events = events,
|
||||
.current_events = 0 } );
|
||||
/* Check if we have an entry for this fd */
|
||||
|
||||
err = -EBUSY;
|
||||
spin_lock_irqsave(&irq_lock, flags);
|
||||
for (irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next) {
|
||||
if ((irq_fd->fd == fd) && (irq_fd->type == type)) {
|
||||
printk(KERN_ERR "Registering fd %d twice\n", fd);
|
||||
printk(KERN_ERR "Irqs : %d, %d\n", irq_fd->irq, irq);
|
||||
printk(KERN_ERR "Ids : 0x%p, 0x%p\n", irq_fd->id,
|
||||
dev_id);
|
||||
for (irq_entry = active_fds;
|
||||
irq_entry != NULL; irq_entry = irq_entry->next) {
|
||||
if (irq_entry->fd == fd)
|
||||
break;
|
||||
}
|
||||
|
||||
if (irq_entry == NULL) {
|
||||
/* This needs to be atomic as it may be called from an
|
||||
* IRQ context.
|
||||
*/
|
||||
irq_entry = kmalloc(sizeof(struct irq_entry), GFP_ATOMIC);
|
||||
if (irq_entry == NULL) {
|
||||
printk(KERN_ERR
|
||||
"Failed to allocate new IRQ entry\n");
|
||||
goto out_unlock;
|
||||
}
|
||||
irq_entry->fd = fd;
|
||||
for (i = 0; i < MAX_IRQ_TYPE; i++)
|
||||
irq_entry->irq_array[i] = NULL;
|
||||
irq_entry->next = active_fds;
|
||||
active_fds = irq_entry;
|
||||
}
|
||||
|
||||
if (type == IRQ_WRITE)
|
||||
fd = -1;
|
||||
|
||||
tmp_pfd = NULL;
|
||||
n = 0;
|
||||
|
||||
while (1) {
|
||||
n = os_create_pollfd(fd, events, tmp_pfd, n);
|
||||
if (n == 0)
|
||||
break;
|
||||
|
||||
/*
|
||||
* n > 0
|
||||
* It means we couldn't put new pollfd to current pollfds
|
||||
* and tmp_fds is NULL or too small for new pollfds array.
|
||||
* Needed size is equal to n as minimum.
|
||||
*
|
||||
* Here we have to drop the lock in order to call
|
||||
* kmalloc, which might sleep.
|
||||
* If something else came in and changed the pollfds array
|
||||
* so we will not be able to put new pollfd struct to pollfds
|
||||
* then we free the buffer tmp_fds and try again.
|
||||
*/
|
||||
spin_unlock_irqrestore(&irq_lock, flags);
|
||||
kfree(tmp_pfd);
|
||||
|
||||
tmp_pfd = kmalloc(n, GFP_KERNEL);
|
||||
if (tmp_pfd == NULL)
|
||||
goto out_kfree;
|
||||
|
||||
spin_lock_irqsave(&irq_lock, flags);
|
||||
}
|
||||
|
||||
*last_irq_ptr = new_fd;
|
||||
last_irq_ptr = &new_fd->next;
|
||||
|
||||
spin_unlock_irqrestore(&irq_lock, flags);
|
||||
|
||||
/*
|
||||
* This calls activate_fd, so it has to be outside the critical
|
||||
* section.
|
||||
/* Check if we are trying to re-register an interrupt for a
|
||||
* particular fd
|
||||
*/
|
||||
maybe_sigio_broken(fd, (type == IRQ_READ));
|
||||
|
||||
if (irq_entry->irq_array[type] != NULL) {
|
||||
printk(KERN_ERR
|
||||
"Trying to reregister IRQ %d FD %d TYPE %d ID %p\n",
|
||||
irq, fd, type, dev_id
|
||||
);
|
||||
goto out_unlock;
|
||||
} else {
|
||||
/* New entry for this fd */
|
||||
|
||||
err = -ENOMEM;
|
||||
new_fd = kmalloc(sizeof(struct irq_fd), GFP_ATOMIC);
|
||||
if (new_fd == NULL)
|
||||
goto out_unlock;
|
||||
|
||||
events = os_event_mask(type);
|
||||
|
||||
*new_fd = ((struct irq_fd) {
|
||||
.id = dev_id,
|
||||
.irq = irq,
|
||||
.type = type,
|
||||
.events = events,
|
||||
.active = true,
|
||||
.pending = false,
|
||||
.purge = false
|
||||
});
|
||||
/* Turn off any IO on this fd - allows us to
|
||||
* avoid locking the IRQ loop
|
||||
*/
|
||||
os_del_epoll_fd(irq_entry->fd);
|
||||
irq_entry->irq_array[type] = new_fd;
|
||||
}
|
||||
|
||||
/* Turn back IO on with the correct (new) IO event mask */
|
||||
assign_epoll_events_to_irq(irq_entry);
|
||||
spin_unlock_irqrestore(&irq_lock, flags);
|
||||
maybe_sigio_broken(fd, (type != IRQ_NONE));
|
||||
|
||||
return 0;
|
||||
|
||||
out_unlock:
|
||||
out_unlock:
|
||||
spin_unlock_irqrestore(&irq_lock, flags);
|
||||
out_kfree:
|
||||
kfree(new_fd);
|
||||
out:
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg)
|
||||
{
|
||||
unsigned long flags;
|
||||
/*
|
||||
* Walk the IRQ list and dispose of any unused entries.
|
||||
* Should be done under irq_lock.
|
||||
*/
|
||||
|
||||
spin_lock_irqsave(&irq_lock, flags);
|
||||
os_free_irq_by_cb(test, arg, active_fds, &last_irq_ptr);
|
||||
spin_unlock_irqrestore(&irq_lock, flags);
|
||||
static void garbage_collect_irq_entries(void)
|
||||
{
|
||||
int i;
|
||||
bool reap;
|
||||
struct irq_entry *walk;
|
||||
struct irq_entry *previous = NULL;
|
||||
struct irq_entry *to_free;
|
||||
|
||||
if (active_fds == NULL)
|
||||
return;
|
||||
walk = active_fds;
|
||||
while (walk != NULL) {
|
||||
reap = true;
|
||||
for (i = 0; i < MAX_IRQ_TYPE ; i++) {
|
||||
if (walk->irq_array[i] != NULL) {
|
||||
reap = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (reap) {
|
||||
if (previous == NULL)
|
||||
active_fds = walk->next;
|
||||
else
|
||||
previous->next = walk->next;
|
||||
to_free = walk;
|
||||
} else {
|
||||
to_free = NULL;
|
||||
}
|
||||
walk = walk->next;
|
||||
if (to_free != NULL)
|
||||
kfree(to_free);
|
||||
}
|
||||
}
|
||||
|
||||
struct irq_and_dev {
|
||||
int irq;
|
||||
void *dev;
|
||||
};
|
||||
/*
|
||||
* Walk the IRQ list and get the descriptor for our FD
|
||||
*/
|
||||
|
||||
static int same_irq_and_dev(struct irq_fd *irq, void *d)
|
||||
static struct irq_entry *get_irq_entry_by_fd(int fd)
|
||||
{
|
||||
struct irq_and_dev *data = d;
|
||||
struct irq_entry *walk = active_fds;
|
||||
|
||||
return ((irq->irq == data->irq) && (irq->id == data->dev));
|
||||
while (walk != NULL) {
|
||||
if (walk->fd == fd)
|
||||
return walk;
|
||||
walk = walk->next;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
|
||||
{
|
||||
struct irq_and_dev data = ((struct irq_and_dev) { .irq = irq,
|
||||
.dev = dev });
|
||||
|
||||
free_irq_by_cb(same_irq_and_dev, &data);
|
||||
}
|
||||
/*
|
||||
* Walk the IRQ list and dispose of an entry for a specific
|
||||
* device, fd and number. Note - if sharing an IRQ for read
|
||||
* and writefor the same FD it will be disposed in either case.
|
||||
* If this behaviour is undesirable use different IRQ ids.
|
||||
*/
|
||||
|
||||
static int same_fd(struct irq_fd *irq, void *fd)
|
||||
#define IGNORE_IRQ 1
|
||||
#define IGNORE_DEV (1<<1)
|
||||
|
||||
static void do_free_by_irq_and_dev(
|
||||
struct irq_entry *irq_entry,
|
||||
unsigned int irq,
|
||||
void *dev,
|
||||
int flags
|
||||
)
|
||||
{
|
||||
return (irq->fd == *((int *)fd));
|
||||
int i;
|
||||
struct irq_fd *to_free;
|
||||
|
||||
for (i = 0; i < MAX_IRQ_TYPE ; i++) {
|
||||
if (irq_entry->irq_array[i] != NULL) {
|
||||
if (
|
||||
((flags & IGNORE_IRQ) ||
|
||||
(irq_entry->irq_array[i]->irq == irq)) &&
|
||||
((flags & IGNORE_DEV) ||
|
||||
(irq_entry->irq_array[i]->id == dev))
|
||||
) {
|
||||
/* Turn off any IO on this fd - allows us to
|
||||
* avoid locking the IRQ loop
|
||||
*/
|
||||
os_del_epoll_fd(irq_entry->fd);
|
||||
to_free = irq_entry->irq_array[i];
|
||||
irq_entry->irq_array[i] = NULL;
|
||||
assign_epoll_events_to_irq(irq_entry);
|
||||
if (to_free->active)
|
||||
to_free->purge = true;
|
||||
else
|
||||
kfree(to_free);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void free_irq_by_fd(int fd)
|
||||
{
|
||||
free_irq_by_cb(same_fd, &fd);
|
||||
}
|
||||
struct irq_entry *to_free;
|
||||
unsigned long flags;
|
||||
|
||||
/* Must be called with irq_lock held */
|
||||
static struct irq_fd *find_irq_by_fd(int fd, int irqnum, int *index_out)
|
||||
spin_lock_irqsave(&irq_lock, flags);
|
||||
to_free = get_irq_entry_by_fd(fd);
|
||||
if (to_free != NULL) {
|
||||
do_free_by_irq_and_dev(
|
||||
to_free,
|
||||
-1,
|
||||
NULL,
|
||||
IGNORE_IRQ | IGNORE_DEV
|
||||
);
|
||||
}
|
||||
garbage_collect_irq_entries();
|
||||
spin_unlock_irqrestore(&irq_lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(free_irq_by_fd);
|
||||
|
||||
static void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
|
||||
{
|
||||
struct irq_fd *irq;
|
||||
int i = 0;
|
||||
int fdi;
|
||||
struct irq_entry *to_free;
|
||||
unsigned long flags;
|
||||
|
||||
for (irq = active_fds; irq != NULL; irq = irq->next) {
|
||||
if ((irq->fd == fd) && (irq->irq == irqnum))
|
||||
break;
|
||||
i++;
|
||||
spin_lock_irqsave(&irq_lock, flags);
|
||||
to_free = active_fds;
|
||||
while (to_free != NULL) {
|
||||
do_free_by_irq_and_dev(
|
||||
to_free,
|
||||
irq,
|
||||
dev,
|
||||
0
|
||||
);
|
||||
to_free = to_free->next;
|
||||
}
|
||||
if (irq == NULL) {
|
||||
printk(KERN_ERR "find_irq_by_fd doesn't have descriptor %d\n",
|
||||
fd);
|
||||
goto out;
|
||||
}
|
||||
fdi = os_get_pollfd(i);
|
||||
if ((fdi != -1) && (fdi != fd)) {
|
||||
printk(KERN_ERR "find_irq_by_fd - mismatch between active_fds "
|
||||
"and pollfds, fd %d vs %d, need %d\n", irq->fd,
|
||||
fdi, fd);
|
||||
irq = NULL;
|
||||
goto out;
|
||||
}
|
||||
*index_out = i;
|
||||
out:
|
||||
return irq;
|
||||
garbage_collect_irq_entries();
|
||||
spin_unlock_irqrestore(&irq_lock, flags);
|
||||
}
|
||||
|
||||
|
||||
void reactivate_fd(int fd, int irqnum)
|
||||
{
|
||||
struct irq_fd *irq;
|
||||
unsigned long flags;
|
||||
int i;
|
||||
|
||||
spin_lock_irqsave(&irq_lock, flags);
|
||||
irq = find_irq_by_fd(fd, irqnum, &i);
|
||||
if (irq == NULL) {
|
||||
spin_unlock_irqrestore(&irq_lock, flags);
|
||||
return;
|
||||
}
|
||||
os_set_pollfd(i, irq->fd);
|
||||
spin_unlock_irqrestore(&irq_lock, flags);
|
||||
|
||||
add_sigio_fd(fd);
|
||||
/** NOP - we do auto-EOI now **/
|
||||
}
|
||||
|
||||
void deactivate_fd(int fd, int irqnum)
|
||||
{
|
||||
struct irq_fd *irq;
|
||||
struct irq_entry *to_free;
|
||||
unsigned long flags;
|
||||
int i;
|
||||
|
||||
os_del_epoll_fd(fd);
|
||||
spin_lock_irqsave(&irq_lock, flags);
|
||||
irq = find_irq_by_fd(fd, irqnum, &i);
|
||||
if (irq == NULL) {
|
||||
spin_unlock_irqrestore(&irq_lock, flags);
|
||||
return;
|
||||
to_free = get_irq_entry_by_fd(fd);
|
||||
if (to_free != NULL) {
|
||||
do_free_by_irq_and_dev(
|
||||
to_free,
|
||||
irqnum,
|
||||
NULL,
|
||||
IGNORE_DEV
|
||||
);
|
||||
}
|
||||
|
||||
os_set_pollfd(i, -1);
|
||||
garbage_collect_irq_entries();
|
||||
spin_unlock_irqrestore(&irq_lock, flags);
|
||||
|
||||
ignore_sigio_fd(fd);
|
||||
}
|
||||
EXPORT_SYMBOL(deactivate_fd);
|
||||
@ -265,17 +386,28 @@ EXPORT_SYMBOL(deactivate_fd);
|
||||
*/
|
||||
int deactivate_all_fds(void)
|
||||
{
|
||||
struct irq_fd *irq;
|
||||
int err;
|
||||
unsigned long flags;
|
||||
struct irq_entry *to_free;
|
||||
|
||||
for (irq = active_fds; irq != NULL; irq = irq->next) {
|
||||
err = os_clear_fd_async(irq->fd);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
/* If there is a signal already queued, after unblocking ignore it */
|
||||
spin_lock_irqsave(&irq_lock, flags);
|
||||
/* Stop IO. The IRQ loop has no lock so this is our
|
||||
* only way of making sure we are safe to dispose
|
||||
* of all IRQ handlers
|
||||
*/
|
||||
os_set_ioignore();
|
||||
|
||||
to_free = active_fds;
|
||||
while (to_free != NULL) {
|
||||
do_free_by_irq_and_dev(
|
||||
to_free,
|
||||
-1,
|
||||
NULL,
|
||||
IGNORE_IRQ | IGNORE_DEV
|
||||
);
|
||||
to_free = to_free->next;
|
||||
}
|
||||
garbage_collect_irq_entries();
|
||||
spin_unlock_irqrestore(&irq_lock, flags);
|
||||
os_close_epoll_fd();
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -353,8 +485,11 @@ void __init init_IRQ(void)
|
||||
|
||||
irq_set_chip_and_handler(TIMER_IRQ, &SIGVTALRM_irq_type, handle_edge_irq);
|
||||
|
||||
|
||||
for (i = 1; i < NR_IRQS; i++)
|
||||
irq_set_chip_and_handler(i, &normal_irq_type, handle_edge_irq);
|
||||
/* Initialize EPOLL Loop */
|
||||
os_setup_epoll();
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -121,12 +121,12 @@ static void __init um_timer_setup(void)
|
||||
clockevents_register_device(&timer_clockevent);
|
||||
}
|
||||
|
||||
void read_persistent_clock(struct timespec *ts)
|
||||
void read_persistent_clock64(struct timespec64 *ts)
|
||||
{
|
||||
long long nsecs = os_persistent_clock_emulation();
|
||||
|
||||
set_normalized_timespec(ts, nsecs / NSEC_PER_SEC,
|
||||
nsecs % NSEC_PER_SEC);
|
||||
set_normalized_timespec64(ts, nsecs / NSEC_PER_SEC,
|
||||
nsecs % NSEC_PER_SEC);
|
||||
}
|
||||
|
||||
void __init time_init(void)
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <sys/mount.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/sysmacros.h>
|
||||
#include <sys/un.h>
|
||||
#include <sys/types.h>
|
||||
#include <os.h>
|
||||
|
@ -1,135 +1,147 @@
|
||||
/*
|
||||
* Copyright (C) 2017 - Cambridge Greys Ltd
|
||||
* Copyright (C) 2011 - 2014 Cisco Systems Inc
|
||||
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
|
||||
* Licensed under the GPL
|
||||
*/
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <errno.h>
|
||||
#include <poll.h>
|
||||
#include <sys/epoll.h>
|
||||
#include <signal.h>
|
||||
#include <string.h>
|
||||
#include <irq_user.h>
|
||||
#include <os.h>
|
||||
#include <um_malloc.h>
|
||||
|
||||
/*
|
||||
* Locked by irq_lock in arch/um/kernel/irq.c. Changed by os_create_pollfd
|
||||
* and os_free_irq_by_cb, which are called under irq_lock.
|
||||
/* Epoll support */
|
||||
|
||||
static int epollfd = -1;
|
||||
|
||||
#define MAX_EPOLL_EVENTS 64
|
||||
|
||||
static struct epoll_event epoll_events[MAX_EPOLL_EVENTS];
|
||||
|
||||
/* Helper to return an Epoll data pointer from an epoll event structure.
|
||||
* We need to keep this one on the userspace side to keep includes separate
|
||||
*/
|
||||
static struct pollfd *pollfds = NULL;
|
||||
static int pollfds_num = 0;
|
||||
static int pollfds_size = 0;
|
||||
|
||||
int os_waiting_for_events(struct irq_fd *active_fds)
|
||||
void *os_epoll_get_data_pointer(int index)
|
||||
{
|
||||
struct irq_fd *irq_fd;
|
||||
int i, n, err;
|
||||
return epoll_events[index].data.ptr;
|
||||
}
|
||||
|
||||
n = poll(pollfds, pollfds_num, 0);
|
||||
/* Helper to compare events versus the events in the epoll structure.
|
||||
* Same as above - needs to be on the userspace side
|
||||
*/
|
||||
|
||||
|
||||
int os_epoll_triggered(int index, int events)
|
||||
{
|
||||
return epoll_events[index].events & events;
|
||||
}
|
||||
/* Helper to set the event mask.
|
||||
* The event mask is opaque to the kernel side, because it does not have
|
||||
* access to the right includes/defines for EPOLL constants.
|
||||
*/
|
||||
|
||||
int os_event_mask(int irq_type)
|
||||
{
|
||||
if (irq_type == IRQ_READ)
|
||||
return EPOLLIN | EPOLLPRI;
|
||||
if (irq_type == IRQ_WRITE)
|
||||
return EPOLLOUT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initial Epoll Setup
|
||||
*/
|
||||
int os_setup_epoll(void)
|
||||
{
|
||||
epollfd = epoll_create(MAX_EPOLL_EVENTS);
|
||||
return epollfd;
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper to run the actual epoll_wait
|
||||
*/
|
||||
int os_waiting_for_events_epoll(void)
|
||||
{
|
||||
int n, err;
|
||||
|
||||
n = epoll_wait(epollfd,
|
||||
(struct epoll_event *) &epoll_events, MAX_EPOLL_EVENTS, 0);
|
||||
if (n < 0) {
|
||||
err = -errno;
|
||||
if (errno != EINTR)
|
||||
printk(UM_KERN_ERR "os_waiting_for_events:"
|
||||
" poll returned %d, errno = %d\n", n, errno);
|
||||
printk(
|
||||
UM_KERN_ERR "os_waiting_for_events:"
|
||||
" epoll returned %d, error = %s\n", n,
|
||||
strerror(errno)
|
||||
);
|
||||
return err;
|
||||
}
|
||||
|
||||
if (n == 0)
|
||||
return 0;
|
||||
|
||||
irq_fd = active_fds;
|
||||
|
||||
for (i = 0; i < pollfds_num; i++) {
|
||||
if (pollfds[i].revents != 0) {
|
||||
irq_fd->current_events = pollfds[i].revents;
|
||||
pollfds[i].fd = -1;
|
||||
}
|
||||
irq_fd = irq_fd->next;
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
||||
int os_create_pollfd(int fd, int events, void *tmp_pfd, int size_tmpfds)
|
||||
|
||||
/*
|
||||
* Helper to add a fd to epoll
|
||||
*/
|
||||
int os_add_epoll_fd(int events, int fd, void *data)
|
||||
{
|
||||
if (pollfds_num == pollfds_size) {
|
||||
if (size_tmpfds <= pollfds_size * sizeof(pollfds[0])) {
|
||||
/* return min size needed for new pollfds area */
|
||||
return (pollfds_size + 1) * sizeof(pollfds[0]);
|
||||
}
|
||||
struct epoll_event event;
|
||||
int result;
|
||||
|
||||
if (pollfds != NULL) {
|
||||
memcpy(tmp_pfd, pollfds,
|
||||
sizeof(pollfds[0]) * pollfds_size);
|
||||
/* remove old pollfds */
|
||||
kfree(pollfds);
|
||||
}
|
||||
pollfds = tmp_pfd;
|
||||
pollfds_size++;
|
||||
} else
|
||||
kfree(tmp_pfd); /* remove not used tmp_pfd */
|
||||
|
||||
pollfds[pollfds_num] = ((struct pollfd) { .fd = fd,
|
||||
.events = events,
|
||||
.revents = 0 });
|
||||
pollfds_num++;
|
||||
|
||||
return 0;
|
||||
event.data.ptr = data;
|
||||
event.events = events | EPOLLET;
|
||||
result = epoll_ctl(epollfd, EPOLL_CTL_ADD, fd, &event);
|
||||
if ((result) && (errno == EEXIST))
|
||||
result = os_mod_epoll_fd(events, fd, data);
|
||||
if (result)
|
||||
printk("epollctl add err fd %d, %s\n", fd, strerror(errno));
|
||||
return result;
|
||||
}
|
||||
|
||||
void os_free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg,
|
||||
struct irq_fd *active_fds, struct irq_fd ***last_irq_ptr2)
|
||||
/*
|
||||
* Helper to mod the fd event mask and/or data backreference
|
||||
*/
|
||||
int os_mod_epoll_fd(int events, int fd, void *data)
|
||||
{
|
||||
struct irq_fd **prev;
|
||||
int i = 0;
|
||||
struct epoll_event event;
|
||||
int result;
|
||||
|
||||
prev = &active_fds;
|
||||
while (*prev != NULL) {
|
||||
if ((*test)(*prev, arg)) {
|
||||
struct irq_fd *old_fd = *prev;
|
||||
if ((pollfds[i].fd != -1) &&
|
||||
(pollfds[i].fd != (*prev)->fd)) {
|
||||
printk(UM_KERN_ERR "os_free_irq_by_cb - "
|
||||
"mismatch between active_fds and "
|
||||
"pollfds, fd %d vs %d\n",
|
||||
(*prev)->fd, pollfds[i].fd);
|
||||
goto out;
|
||||
}
|
||||
|
||||
pollfds_num--;
|
||||
|
||||
/*
|
||||
* This moves the *whole* array after pollfds[i]
|
||||
* (though it doesn't spot as such)!
|
||||
*/
|
||||
memmove(&pollfds[i], &pollfds[i + 1],
|
||||
(pollfds_num - i) * sizeof(pollfds[0]));
|
||||
if (*last_irq_ptr2 == &old_fd->next)
|
||||
*last_irq_ptr2 = prev;
|
||||
|
||||
*prev = (*prev)->next;
|
||||
if (old_fd->type == IRQ_WRITE)
|
||||
ignore_sigio_fd(old_fd->fd);
|
||||
kfree(old_fd);
|
||||
continue;
|
||||
}
|
||||
prev = &(*prev)->next;
|
||||
i++;
|
||||
}
|
||||
out:
|
||||
return;
|
||||
event.data.ptr = data;
|
||||
event.events = events;
|
||||
result = epoll_ctl(epollfd, EPOLL_CTL_MOD, fd, &event);
|
||||
if (result)
|
||||
printk(UM_KERN_ERR
|
||||
"epollctl mod err fd %d, %s\n", fd, strerror(errno));
|
||||
return result;
|
||||
}
|
||||
|
||||
int os_get_pollfd(int i)
|
||||
/*
|
||||
* Helper to delete the epoll fd
|
||||
*/
|
||||
int os_del_epoll_fd(int fd)
|
||||
{
|
||||
return pollfds[i].fd;
|
||||
}
|
||||
|
||||
void os_set_pollfd(int i, int fd)
|
||||
{
|
||||
pollfds[i].fd = fd;
|
||||
struct epoll_event event;
|
||||
int result;
|
||||
/* This is quiet as we use this as IO ON/OFF - so it is often
|
||||
* invoked on a non-existent fd
|
||||
*/
|
||||
result = epoll_ctl(epollfd, EPOLL_CTL_DEL, fd, &event);
|
||||
return result;
|
||||
}
|
||||
|
||||
void os_set_ioignore(void)
|
||||
{
|
||||
signal(SIGIO, SIG_IGN);
|
||||
}
|
||||
|
||||
void os_close_epoll_fd(void)
|
||||
{
|
||||
/* Needed so we do not leak an fd when rebooting */
|
||||
os_close_file(epollfd);
|
||||
}
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include <os.h>
|
||||
#include <sysdep/mcontext.h>
|
||||
#include <um_malloc.h>
|
||||
#include <sys/ucontext.h>
|
||||
|
||||
void (*sig_info[NSIG])(int, struct siginfo *, struct uml_pt_regs *) = {
|
||||
[SIGTRAP] = relay_signal,
|
||||
@ -159,7 +160,7 @@ static void (*handlers[_NSIG])(int sig, struct siginfo *si, mcontext_t *mc) = {
|
||||
|
||||
static void hard_handler(int sig, siginfo_t *si, void *p)
|
||||
{
|
||||
struct ucontext *uc = p;
|
||||
ucontext_t *uc = p;
|
||||
mcontext_t *mc = &uc->uc_mcontext;
|
||||
unsigned long pending = 1UL << sig;
|
||||
|
||||
|
@ -6,11 +6,12 @@
|
||||
#include <sysdep/stub.h>
|
||||
#include <sysdep/faultinfo.h>
|
||||
#include <sysdep/mcontext.h>
|
||||
#include <sys/ucontext.h>
|
||||
|
||||
void __attribute__ ((__section__ (".__syscall_stub")))
|
||||
stub_segv_handler(int sig, siginfo_t *info, void *p)
|
||||
{
|
||||
struct ucontext *uc = p;
|
||||
ucontext_t *uc = p;
|
||||
|
||||
GET_FAULTINFO_FROM_MC(*((struct faultinfo *) STUB_DATA),
|
||||
&uc->uc_mcontext);
|
||||
|
Loading…
Reference in New Issue
Block a user