mirror of
https://github.com/joel16/android_kernel_sony_msm8994_rework.git
synced 2024-12-28 15:24:58 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus
* git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus: (44 commits) Add MAINTAINERS entry for virtio_console virtio: console: Fill ports' entire in_vq with buffers virtio: console: Error out if we can't allocate buffers for control queue virtio: console: Add ability to remove module virtio: console: Ensure no memleaks in case of unused buffers virtio: console: show error message if hvc_alloc fails for console ports virtio: console: Add debugfs files for each port to expose debug info virtio: console: Add ability to hot-unplug ports virtio: console: Handle port hot-plug virtio: console: Remove cached data on port close virtio: console: Register with sysfs and create a 'name' attribute for ports virtio: console: Ensure only one process can have a port open at a time virtio: console: Add file operations to ports for open/read/write/poll virtio: console: Associate each port with a char device virtio: console: Prepare for writing to userspace buffers virtio: console: Add a new MULTIPORT feature, support for generic ports virtio: console: Introduce a send_buf function for a common path for sending data to host virtio: console: Introduce function to hand off data from host to readers virtio: console: Separate out find_vqs operation into a different function virtio: console: Separate out console init into a new function ...
This commit is contained in:
commit
10df38cafc
@ -34,7 +34,6 @@
|
||||
#include <sys/uio.h>
|
||||
#include <termios.h>
|
||||
#include <getopt.h>
|
||||
#include <zlib.h>
|
||||
#include <assert.h>
|
||||
#include <sched.h>
|
||||
#include <limits.h>
|
||||
|
@ -2393,6 +2393,12 @@ L: linuxppc-dev@ozlabs.org
|
||||
S: Odd Fixes
|
||||
F: drivers/char/hvc_*
|
||||
|
||||
VIRTIO CONSOLE DRIVER
|
||||
M: Amit Shah <amit.shah@redhat.com>
|
||||
L: virtualization@lists.linux-foundation.org
|
||||
S: Maintained
|
||||
F: drivers/char/virtio_console.c
|
||||
|
||||
GSPCA FINEPIX SUBDRIVER
|
||||
M: Frank Zago <frank@zago.net>
|
||||
L: linux-media@vger.kernel.org
|
||||
|
@ -243,10 +243,12 @@ static int index_to_minor(int index)
|
||||
static int __devinit virtblk_probe(struct virtio_device *vdev)
|
||||
{
|
||||
struct virtio_blk *vblk;
|
||||
struct request_queue *q;
|
||||
int err;
|
||||
u64 cap;
|
||||
u32 v;
|
||||
u32 blk_size, sg_elems;
|
||||
u32 v, blk_size, sg_elems, opt_io_size;
|
||||
u16 min_io_size;
|
||||
u8 physical_block_exp, alignment_offset;
|
||||
|
||||
if (index_to_minor(index) >= 1 << MINORBITS)
|
||||
return -ENOSPC;
|
||||
@ -293,13 +295,13 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
|
||||
goto out_mempool;
|
||||
}
|
||||
|
||||
vblk->disk->queue = blk_init_queue(do_virtblk_request, &vblk->lock);
|
||||
if (!vblk->disk->queue) {
|
||||
q = vblk->disk->queue = blk_init_queue(do_virtblk_request, &vblk->lock);
|
||||
if (!q) {
|
||||
err = -ENOMEM;
|
||||
goto out_put_disk;
|
||||
}
|
||||
|
||||
vblk->disk->queue->queuedata = vblk;
|
||||
q->queuedata = vblk;
|
||||
|
||||
if (index < 26) {
|
||||
sprintf(vblk->disk->disk_name, "vd%c", 'a' + index % 26);
|
||||
@ -323,10 +325,10 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
|
||||
|
||||
/* If barriers are supported, tell block layer that queue is ordered */
|
||||
if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH))
|
||||
blk_queue_ordered(vblk->disk->queue, QUEUE_ORDERED_DRAIN_FLUSH,
|
||||
blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH,
|
||||
virtblk_prepare_flush);
|
||||
else if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER))
|
||||
blk_queue_ordered(vblk->disk->queue, QUEUE_ORDERED_TAG, NULL);
|
||||
blk_queue_ordered(q, QUEUE_ORDERED_TAG, NULL);
|
||||
|
||||
/* If disk is read-only in the host, the guest should obey */
|
||||
if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
|
||||
@ -345,14 +347,14 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
|
||||
set_capacity(vblk->disk, cap);
|
||||
|
||||
/* We can handle whatever the host told us to handle. */
|
||||
blk_queue_max_phys_segments(vblk->disk->queue, vblk->sg_elems-2);
|
||||
blk_queue_max_hw_segments(vblk->disk->queue, vblk->sg_elems-2);
|
||||
blk_queue_max_phys_segments(q, vblk->sg_elems-2);
|
||||
blk_queue_max_hw_segments(q, vblk->sg_elems-2);
|
||||
|
||||
/* No need to bounce any requests */
|
||||
blk_queue_bounce_limit(vblk->disk->queue, BLK_BOUNCE_ANY);
|
||||
blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
|
||||
|
||||
/* No real sector limit. */
|
||||
blk_queue_max_sectors(vblk->disk->queue, -1U);
|
||||
blk_queue_max_sectors(q, -1U);
|
||||
|
||||
/* Host can optionally specify maximum segment size and number of
|
||||
* segments. */
|
||||
@ -360,16 +362,45 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
|
||||
offsetof(struct virtio_blk_config, size_max),
|
||||
&v);
|
||||
if (!err)
|
||||
blk_queue_max_segment_size(vblk->disk->queue, v);
|
||||
blk_queue_max_segment_size(q, v);
|
||||
else
|
||||
blk_queue_max_segment_size(vblk->disk->queue, -1U);
|
||||
blk_queue_max_segment_size(q, -1U);
|
||||
|
||||
/* Host can optionally specify the block size of the device */
|
||||
err = virtio_config_val(vdev, VIRTIO_BLK_F_BLK_SIZE,
|
||||
offsetof(struct virtio_blk_config, blk_size),
|
||||
&blk_size);
|
||||
if (!err)
|
||||
blk_queue_logical_block_size(vblk->disk->queue, blk_size);
|
||||
blk_queue_logical_block_size(q, blk_size);
|
||||
else
|
||||
blk_size = queue_logical_block_size(q);
|
||||
|
||||
/* Use topology information if available */
|
||||
err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
|
||||
offsetof(struct virtio_blk_config, physical_block_exp),
|
||||
&physical_block_exp);
|
||||
if (!err && physical_block_exp)
|
||||
blk_queue_physical_block_size(q,
|
||||
blk_size * (1 << physical_block_exp));
|
||||
|
||||
err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
|
||||
offsetof(struct virtio_blk_config, alignment_offset),
|
||||
&alignment_offset);
|
||||
if (!err && alignment_offset)
|
||||
blk_queue_alignment_offset(q, blk_size * alignment_offset);
|
||||
|
||||
err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
|
||||
offsetof(struct virtio_blk_config, min_io_size),
|
||||
&min_io_size);
|
||||
if (!err && min_io_size)
|
||||
blk_queue_io_min(q, blk_size * min_io_size);
|
||||
|
||||
err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
|
||||
offsetof(struct virtio_blk_config, opt_io_size),
|
||||
&opt_io_size);
|
||||
if (!err && opt_io_size)
|
||||
blk_queue_io_opt(q, blk_size * opt_io_size);
|
||||
|
||||
|
||||
add_disk(vblk->disk);
|
||||
return 0;
|
||||
@ -412,7 +443,7 @@ static struct virtio_device_id id_table[] = {
|
||||
static unsigned int features[] = {
|
||||
VIRTIO_BLK_F_BARRIER, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX,
|
||||
VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
|
||||
VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_FLUSH
|
||||
VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -666,6 +666,14 @@ config VIRTIO_CONSOLE
|
||||
help
|
||||
Virtio console for use with lguest and other hypervisors.
|
||||
|
||||
Also serves as a general-purpose serial device for data
|
||||
transfer between the guest and host. Character devices at
|
||||
/dev/vportNpn will be created when corresponding ports are
|
||||
found, where N is the device number and n is the port number
|
||||
within that device. If specified by the host, a sysfs
|
||||
attribute called 'name' will be populated with a name for
|
||||
the port which can be used by udev scripts to create a
|
||||
symlink to the device.
|
||||
|
||||
config HVCS
|
||||
tristate "IBM Hypervisor Virtual Console Server support"
|
||||
|
@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t vtermno, const char *buf, int cnt)
|
||||
return cnt;
|
||||
}
|
||||
|
||||
static struct hv_ops hvc_beat_get_put_ops = {
|
||||
static const struct hv_ops hvc_beat_get_put_ops = {
|
||||
.get_chars = hvc_beat_get_chars,
|
||||
.put_chars = hvc_beat_put_chars,
|
||||
};
|
||||
|
@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_index(int index)
|
||||
* console interfaces but can still be used as a tty device. This has to be
|
||||
* static because kmalloc will not work during early console init.
|
||||
*/
|
||||
static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
|
||||
static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
|
||||
static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
|
||||
{[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
|
||||
|
||||
@ -247,7 +247,7 @@ static void destroy_hvc_struct(struct kref *kref)
|
||||
* vty adapters do NOT get an hvc_instantiate() callback since they
|
||||
* appear after early console init.
|
||||
*/
|
||||
int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
|
||||
int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
|
||||
{
|
||||
struct hvc_struct *hp;
|
||||
|
||||
@ -749,7 +749,8 @@ static const struct tty_operations hvc_ops = {
|
||||
};
|
||||
|
||||
struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
|
||||
struct hv_ops *ops, int outbuf_size)
|
||||
const struct hv_ops *ops,
|
||||
int outbuf_size)
|
||||
{
|
||||
struct hvc_struct *hp;
|
||||
int i;
|
||||
|
@ -55,7 +55,7 @@ struct hvc_struct {
|
||||
int outbuf_size;
|
||||
int n_outbuf;
|
||||
uint32_t vtermno;
|
||||
struct hv_ops *ops;
|
||||
const struct hv_ops *ops;
|
||||
int irq_requested;
|
||||
int data;
|
||||
struct winsize ws;
|
||||
@ -76,11 +76,12 @@ struct hv_ops {
|
||||
};
|
||||
|
||||
/* Register a vterm and a slot index for use as a console (console_init) */
|
||||
extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
|
||||
extern int hvc_instantiate(uint32_t vtermno, int index,
|
||||
const struct hv_ops *ops);
|
||||
|
||||
/* register a vterm for hvc tty operation (module_init or hotplug add) */
|
||||
extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
|
||||
struct hv_ops *ops, int outbuf_size);
|
||||
const struct hv_ops *ops, int outbuf_size);
|
||||
/* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
|
||||
extern int hvc_remove(struct hvc_struct *hp);
|
||||
|
||||
|
@ -197,7 +197,7 @@ done:
|
||||
return sent;
|
||||
}
|
||||
|
||||
static struct hv_ops hvc_get_put_ops = {
|
||||
static const struct hv_ops hvc_get_put_ops = {
|
||||
.get_chars = get_chars,
|
||||
.put_chars = put_chars,
|
||||
.notifier_add = notifier_add_irq,
|
||||
|
@ -922,7 +922,7 @@ static int hvc_iucv_pm_restore_thaw(struct device *dev)
|
||||
|
||||
|
||||
/* HVC operations */
|
||||
static struct hv_ops hvc_iucv_ops = {
|
||||
static const struct hv_ops hvc_iucv_ops = {
|
||||
.get_chars = hvc_iucv_get_chars,
|
||||
.put_chars = hvc_iucv_put_chars,
|
||||
.notifier_add = hvc_iucv_notifier_add,
|
||||
|
@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_t vtermno, char *buf, int count)
|
||||
return i;
|
||||
}
|
||||
|
||||
static struct hv_ops hvc_rtas_get_put_ops = {
|
||||
static const struct hv_ops hvc_rtas_get_put_ops = {
|
||||
.get_chars = hvc_rtas_read_console,
|
||||
.put_chars = hvc_rtas_write_console,
|
||||
};
|
||||
|
@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno, char *buf, int count)
|
||||
return i;
|
||||
}
|
||||
|
||||
static struct hv_ops hvc_udbg_ops = {
|
||||
static const struct hv_ops hvc_udbg_ops = {
|
||||
.get_chars = hvc_udbg_get,
|
||||
.put_chars = hvc_udbg_put,
|
||||
};
|
||||
|
@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t vtermno, char *buf, int count)
|
||||
return got;
|
||||
}
|
||||
|
||||
static struct hv_ops hvc_get_put_ops = {
|
||||
static const struct hv_ops hvc_get_put_ops = {
|
||||
.get_chars = filtered_get_chars,
|
||||
.put_chars = hvc_put_chars,
|
||||
.notifier_add = notifier_add_irq,
|
||||
|
@ -122,7 +122,7 @@ static int read_console(uint32_t vtermno, char *buf, int len)
|
||||
return recv;
|
||||
}
|
||||
|
||||
static struct hv_ops hvc_ops = {
|
||||
static const struct hv_ops hvc_ops = {
|
||||
.get_chars = read_console,
|
||||
.put_chars = write_console,
|
||||
.notifier_add = notifier_add_irq,
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -28,7 +28,7 @@
|
||||
struct virtio_balloon
|
||||
{
|
||||
struct virtio_device *vdev;
|
||||
struct virtqueue *inflate_vq, *deflate_vq;
|
||||
struct virtqueue *inflate_vq, *deflate_vq, *stats_vq;
|
||||
|
||||
/* Where the ballooning thread waits for config to change. */
|
||||
wait_queue_head_t config_change;
|
||||
@ -49,6 +49,10 @@ struct virtio_balloon
|
||||
/* The array of pfns we tell the Host about. */
|
||||
unsigned int num_pfns;
|
||||
u32 pfns[256];
|
||||
|
||||
/* Memory statistics */
|
||||
int need_stats_update;
|
||||
struct virtio_balloon_stat stats[VIRTIO_BALLOON_S_NR];
|
||||
};
|
||||
|
||||
static struct virtio_device_id id_table[] = {
|
||||
@ -154,6 +158,72 @@ static void leak_balloon(struct virtio_balloon *vb, size_t num)
|
||||
}
|
||||
}
|
||||
|
||||
static inline void update_stat(struct virtio_balloon *vb, int idx,
|
||||
u16 tag, u64 val)
|
||||
{
|
||||
BUG_ON(idx >= VIRTIO_BALLOON_S_NR);
|
||||
vb->stats[idx].tag = tag;
|
||||
vb->stats[idx].val = val;
|
||||
}
|
||||
|
||||
#define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT)
|
||||
|
||||
static void update_balloon_stats(struct virtio_balloon *vb)
|
||||
{
|
||||
unsigned long events[NR_VM_EVENT_ITEMS];
|
||||
struct sysinfo i;
|
||||
int idx = 0;
|
||||
|
||||
all_vm_events(events);
|
||||
si_meminfo(&i);
|
||||
|
||||
update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN,
|
||||
pages_to_bytes(events[PSWPIN]));
|
||||
update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT,
|
||||
pages_to_bytes(events[PSWPOUT]));
|
||||
update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]);
|
||||
update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]);
|
||||
update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE,
|
||||
pages_to_bytes(i.freeram));
|
||||
update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMTOT,
|
||||
pages_to_bytes(i.totalram));
|
||||
}
|
||||
|
||||
/*
|
||||
* While most virtqueues communicate guest-initiated requests to the hypervisor,
|
||||
* the stats queue operates in reverse. The driver initializes the virtqueue
|
||||
* with a single buffer. From that point forward, all conversations consist of
|
||||
* a hypervisor request (a call to this function) which directs us to refill
|
||||
* the virtqueue with a fresh stats buffer. Since stats collection can sleep,
|
||||
* we notify our kthread which does the actual work via stats_handle_request().
|
||||
*/
|
||||
static void stats_request(struct virtqueue *vq)
|
||||
{
|
||||
struct virtio_balloon *vb;
|
||||
unsigned int len;
|
||||
|
||||
vb = vq->vq_ops->get_buf(vq, &len);
|
||||
if (!vb)
|
||||
return;
|
||||
vb->need_stats_update = 1;
|
||||
wake_up(&vb->config_change);
|
||||
}
|
||||
|
||||
static void stats_handle_request(struct virtio_balloon *vb)
|
||||
{
|
||||
struct virtqueue *vq;
|
||||
struct scatterlist sg;
|
||||
|
||||
vb->need_stats_update = 0;
|
||||
update_balloon_stats(vb);
|
||||
|
||||
vq = vb->stats_vq;
|
||||
sg_init_one(&sg, vb->stats, sizeof(vb->stats));
|
||||
if (vq->vq_ops->add_buf(vq, &sg, 1, 0, vb) < 0)
|
||||
BUG();
|
||||
vq->vq_ops->kick(vq);
|
||||
}
|
||||
|
||||
static void virtballoon_changed(struct virtio_device *vdev)
|
||||
{
|
||||
struct virtio_balloon *vb = vdev->priv;
|
||||
@ -190,8 +260,11 @@ static int balloon(void *_vballoon)
|
||||
try_to_freeze();
|
||||
wait_event_interruptible(vb->config_change,
|
||||
(diff = towards_target(vb)) != 0
|
||||
|| vb->need_stats_update
|
||||
|| kthread_should_stop()
|
||||
|| freezing(current));
|
||||
if (vb->need_stats_update)
|
||||
stats_handle_request(vb);
|
||||
if (diff > 0)
|
||||
fill_balloon(vb, diff);
|
||||
else if (diff < 0)
|
||||
@ -204,10 +277,10 @@ static int balloon(void *_vballoon)
|
||||
static int virtballoon_probe(struct virtio_device *vdev)
|
||||
{
|
||||
struct virtio_balloon *vb;
|
||||
struct virtqueue *vqs[2];
|
||||
vq_callback_t *callbacks[] = { balloon_ack, balloon_ack };
|
||||
const char *names[] = { "inflate", "deflate" };
|
||||
int err;
|
||||
struct virtqueue *vqs[3];
|
||||
vq_callback_t *callbacks[] = { balloon_ack, balloon_ack, stats_request };
|
||||
const char *names[] = { "inflate", "deflate", "stats" };
|
||||
int err, nvqs;
|
||||
|
||||
vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL);
|
||||
if (!vb) {
|
||||
@ -219,14 +292,31 @@ static int virtballoon_probe(struct virtio_device *vdev)
|
||||
vb->num_pages = 0;
|
||||
init_waitqueue_head(&vb->config_change);
|
||||
vb->vdev = vdev;
|
||||
vb->need_stats_update = 0;
|
||||
|
||||
/* We expect two virtqueues. */
|
||||
err = vdev->config->find_vqs(vdev, 2, vqs, callbacks, names);
|
||||
/* We expect two virtqueues: inflate and deflate,
|
||||
* and optionally stat. */
|
||||
nvqs = virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ) ? 3 : 2;
|
||||
err = vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names);
|
||||
if (err)
|
||||
goto out_free_vb;
|
||||
|
||||
vb->inflate_vq = vqs[0];
|
||||
vb->deflate_vq = vqs[1];
|
||||
if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
|
||||
struct scatterlist sg;
|
||||
vb->stats_vq = vqs[2];
|
||||
|
||||
/*
|
||||
* Prime this virtqueue with one buffer so the hypervisor can
|
||||
* use it to signal us later.
|
||||
*/
|
||||
sg_init_one(&sg, vb->stats, sizeof vb->stats);
|
||||
if (vb->stats_vq->vq_ops->add_buf(vb->stats_vq,
|
||||
&sg, 1, 0, vb) < 0)
|
||||
BUG();
|
||||
vb->stats_vq->vq_ops->kick(vb->stats_vq);
|
||||
}
|
||||
|
||||
vb->thread = kthread_run(balloon, vb, "vballoon");
|
||||
if (IS_ERR(vb->thread)) {
|
||||
@ -264,7 +354,10 @@ static void __devexit virtballoon_remove(struct virtio_device *vdev)
|
||||
kfree(vb);
|
||||
}
|
||||
|
||||
static unsigned int features[] = { VIRTIO_BALLOON_F_MUST_TELL_HOST };
|
||||
static unsigned int features[] = {
|
||||
VIRTIO_BALLOON_F_MUST_TELL_HOST,
|
||||
VIRTIO_BALLOON_F_STATS_VQ,
|
||||
};
|
||||
|
||||
static struct virtio_driver virtio_balloon_driver = {
|
||||
.feature_table = features,
|
||||
|
@ -702,7 +702,7 @@ static struct pci_driver virtio_pci_driver = {
|
||||
.name = "virtio-pci",
|
||||
.id_table = virtio_pci_id_table,
|
||||
.probe = virtio_pci_probe,
|
||||
.remove = virtio_pci_remove,
|
||||
.remove = __devexit_p(virtio_pci_remove),
|
||||
#ifdef CONFIG_PM
|
||||
.suspend = virtio_pci_suspend,
|
||||
.resume = virtio_pci_resume,
|
||||
|
@ -21,6 +21,24 @@
|
||||
#include <linux/virtio_config.h>
|
||||
#include <linux/device.h>
|
||||
|
||||
/* virtio guest is communicating with a virtual "device" that actually runs on
|
||||
* a host processor. Memory barriers are used to control SMP effects. */
|
||||
#ifdef CONFIG_SMP
|
||||
/* Where possible, use SMP barriers which are more lightweight than mandatory
|
||||
* barriers, because mandatory barriers control MMIO effects on accesses
|
||||
* through relaxed memory I/O windows (which virtio does not use). */
|
||||
#define virtio_mb() smp_mb()
|
||||
#define virtio_rmb() smp_rmb()
|
||||
#define virtio_wmb() smp_wmb()
|
||||
#else
|
||||
/* We must force memory ordering even if guest is UP since host could be
|
||||
* running on another CPU, but SMP barriers are defined to barrier() in that
|
||||
* configuration. So fall back to mandatory barriers instead. */
|
||||
#define virtio_mb() mb()
|
||||
#define virtio_rmb() rmb()
|
||||
#define virtio_wmb() wmb()
|
||||
#endif
|
||||
|
||||
#ifdef DEBUG
|
||||
/* For development, we want to crash whenever the ring is screwed. */
|
||||
#define BAD_RING(_vq, fmt, args...) \
|
||||
@ -36,10 +54,9 @@
|
||||
panic("%s:in_use = %i\n", \
|
||||
(_vq)->vq.name, (_vq)->in_use); \
|
||||
(_vq)->in_use = __LINE__; \
|
||||
mb(); \
|
||||
} while (0)
|
||||
#define END_USE(_vq) \
|
||||
do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; mb(); } while(0)
|
||||
do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
|
||||
#else
|
||||
#define BAD_RING(_vq, fmt, args...) \
|
||||
do { \
|
||||
@ -221,13 +238,13 @@ static void vring_kick(struct virtqueue *_vq)
|
||||
START_USE(vq);
|
||||
/* Descriptors and available array need to be set before we expose the
|
||||
* new available array entries. */
|
||||
wmb();
|
||||
virtio_wmb();
|
||||
|
||||
vq->vring.avail->idx += vq->num_added;
|
||||
vq->num_added = 0;
|
||||
|
||||
/* Need to update avail index before checking if we should notify */
|
||||
mb();
|
||||
virtio_mb();
|
||||
|
||||
if (!(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY))
|
||||
/* Prod other side to tell it about changes. */
|
||||
@ -286,7 +303,7 @@ static void *vring_get_buf(struct virtqueue *_vq, unsigned int *len)
|
||||
}
|
||||
|
||||
/* Only get used array entries after they have been exposed by host. */
|
||||
rmb();
|
||||
virtio_rmb();
|
||||
|
||||
i = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].id;
|
||||
*len = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].len;
|
||||
@ -324,7 +341,7 @@ static bool vring_enable_cb(struct virtqueue *_vq)
|
||||
/* We optimistically turn back on interrupts, then check if there was
|
||||
* more to do. */
|
||||
vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
|
||||
mb();
|
||||
virtio_mb();
|
||||
if (unlikely(more_used(vq))) {
|
||||
END_USE(vq);
|
||||
return false;
|
||||
@ -334,6 +351,30 @@ static bool vring_enable_cb(struct virtqueue *_vq)
|
||||
return true;
|
||||
}
|
||||
|
||||
static void *vring_detach_unused_buf(struct virtqueue *_vq)
|
||||
{
|
||||
struct vring_virtqueue *vq = to_vvq(_vq);
|
||||
unsigned int i;
|
||||
void *buf;
|
||||
|
||||
START_USE(vq);
|
||||
|
||||
for (i = 0; i < vq->vring.num; i++) {
|
||||
if (!vq->data[i])
|
||||
continue;
|
||||
/* detach_buf clears data, so grab it now. */
|
||||
buf = vq->data[i];
|
||||
detach_buf(vq, i);
|
||||
END_USE(vq);
|
||||
return buf;
|
||||
}
|
||||
/* That should have freed everything. */
|
||||
BUG_ON(vq->num_free != vq->vring.num);
|
||||
|
||||
END_USE(vq);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
irqreturn_t vring_interrupt(int irq, void *_vq)
|
||||
{
|
||||
struct vring_virtqueue *vq = to_vvq(_vq);
|
||||
@ -360,6 +401,7 @@ static struct virtqueue_ops vring_vq_ops = {
|
||||
.kick = vring_kick,
|
||||
.disable_cb = vring_disable_cb,
|
||||
.enable_cb = vring_enable_cb,
|
||||
.detach_unused_buf = vring_detach_unused_buf,
|
||||
};
|
||||
|
||||
struct virtqueue *vring_new_virtqueue(unsigned int num,
|
||||
@ -406,8 +448,11 @@ struct virtqueue *vring_new_virtqueue(unsigned int num,
|
||||
/* Put everything in free lists. */
|
||||
vq->num_free = num;
|
||||
vq->free_head = 0;
|
||||
for (i = 0; i < num-1; i++)
|
||||
for (i = 0; i < num-1; i++) {
|
||||
vq->vring.desc[i].next = i+1;
|
||||
vq->data[i] = NULL;
|
||||
}
|
||||
vq->data[i] = NULL;
|
||||
|
||||
return &vq->vq;
|
||||
}
|
||||
|
@ -51,6 +51,9 @@ struct virtqueue {
|
||||
* This re-enables callbacks; it returns "false" if there are pending
|
||||
* buffers in the queue, to detect a possible race between the driver
|
||||
* checking for more work, and enabling callbacks.
|
||||
* @detach_unused_buf: detach first unused buffer
|
||||
* vq: the struct virtqueue we're talking about.
|
||||
* Returns NULL or the "data" token handed to add_buf
|
||||
*
|
||||
* Locking rules are straightforward: the driver is responsible for
|
||||
* locking. No two operations may be invoked simultaneously, with the exception
|
||||
@ -71,6 +74,7 @@ struct virtqueue_ops {
|
||||
|
||||
void (*disable_cb)(struct virtqueue *vq);
|
||||
bool (*enable_cb)(struct virtqueue *vq);
|
||||
void *(*detach_unused_buf)(struct virtqueue *vq);
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -7,6 +7,7 @@
|
||||
|
||||
/* The feature bitmap for virtio balloon */
|
||||
#define VIRTIO_BALLOON_F_MUST_TELL_HOST 0 /* Tell before reclaiming pages */
|
||||
#define VIRTIO_BALLOON_F_STATS_VQ 1 /* Memory Stats virtqueue */
|
||||
|
||||
/* Size of a PFN in the balloon interface. */
|
||||
#define VIRTIO_BALLOON_PFN_SHIFT 12
|
||||
@ -18,4 +19,18 @@ struct virtio_balloon_config
|
||||
/* Number of pages we've actually got in balloon. */
|
||||
__le32 actual;
|
||||
};
|
||||
|
||||
#define VIRTIO_BALLOON_S_SWAP_IN 0 /* Amount of memory swapped in */
|
||||
#define VIRTIO_BALLOON_S_SWAP_OUT 1 /* Amount of memory swapped out */
|
||||
#define VIRTIO_BALLOON_S_MAJFLT 2 /* Number of major faults */
|
||||
#define VIRTIO_BALLOON_S_MINFLT 3 /* Number of minor faults */
|
||||
#define VIRTIO_BALLOON_S_MEMFREE 4 /* Total amount of free memory */
|
||||
#define VIRTIO_BALLOON_S_MEMTOT 5 /* Total amount of memory */
|
||||
#define VIRTIO_BALLOON_S_NR 6
|
||||
|
||||
struct virtio_balloon_stat {
|
||||
u16 tag;
|
||||
u64 val;
|
||||
} __attribute__((packed));
|
||||
|
||||
#endif /* _LINUX_VIRTIO_BALLOON_H */
|
||||
|
@ -15,6 +15,7 @@
|
||||
#define VIRTIO_BLK_F_BLK_SIZE 6 /* Block size of disk is available*/
|
||||
#define VIRTIO_BLK_F_SCSI 7 /* Supports scsi command passthru */
|
||||
#define VIRTIO_BLK_F_FLUSH 9 /* Cache flush command support */
|
||||
#define VIRTIO_BLK_F_TOPOLOGY 10 /* Topology information is available */
|
||||
|
||||
struct virtio_blk_config {
|
||||
/* The capacity (in 512-byte sectors). */
|
||||
@ -29,8 +30,20 @@ struct virtio_blk_config {
|
||||
__u8 heads;
|
||||
__u8 sectors;
|
||||
} geometry;
|
||||
|
||||
/* block size of device (if VIRTIO_BLK_F_BLK_SIZE) */
|
||||
__u32 blk_size;
|
||||
|
||||
/* the next 4 entries are guarded by VIRTIO_BLK_F_TOPOLOGY */
|
||||
/* exponent for physical block per logical block. */
|
||||
__u8 physical_block_exp;
|
||||
/* alignment offset in logical blocks. */
|
||||
__u8 alignment_offset;
|
||||
/* minimum I/O size without performance penalty in logical blocks. */
|
||||
__u16 min_io_size;
|
||||
/* optimal sustained I/O size in logical blocks. */
|
||||
__u32 opt_io_size;
|
||||
|
||||
} __attribute__((packed));
|
||||
|
||||
/*
|
||||
|
@ -3,19 +3,45 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/virtio_ids.h>
|
||||
#include <linux/virtio_config.h>
|
||||
/* This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so
|
||||
* anyone can use the definitions to implement compatible drivers/servers. */
|
||||
/*
|
||||
* This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so
|
||||
* anyone can use the definitions to implement compatible drivers/servers.
|
||||
*
|
||||
* Copyright (C) Red Hat, Inc., 2009, 2010
|
||||
*/
|
||||
|
||||
/* Feature bits */
|
||||
#define VIRTIO_CONSOLE_F_SIZE 0 /* Does host provide console size? */
|
||||
#define VIRTIO_CONSOLE_F_MULTIPORT 1 /* Does host provide multiple ports? */
|
||||
|
||||
struct virtio_console_config {
|
||||
/* colums of the screens */
|
||||
__u16 cols;
|
||||
/* rows of the screens */
|
||||
__u16 rows;
|
||||
/* max. number of ports this device can hold */
|
||||
__u32 max_nr_ports;
|
||||
/* number of ports added so far */
|
||||
__u32 nr_ports;
|
||||
} __attribute__((packed));
|
||||
|
||||
/*
|
||||
* A message that's passed between the Host and the Guest for a
|
||||
* particular port.
|
||||
*/
|
||||
struct virtio_console_control {
|
||||
__u32 id; /* Port number */
|
||||
__u16 event; /* The kind of control event (see below) */
|
||||
__u16 value; /* Extra information for the key */
|
||||
};
|
||||
|
||||
/* Some events for control messages */
|
||||
#define VIRTIO_CONSOLE_PORT_READY 0
|
||||
#define VIRTIO_CONSOLE_CONSOLE_PORT 1
|
||||
#define VIRTIO_CONSOLE_RESIZE 2
|
||||
#define VIRTIO_CONSOLE_PORT_OPEN 3
|
||||
#define VIRTIO_CONSOLE_PORT_NAME 4
|
||||
#define VIRTIO_CONSOLE_PORT_REMOVE 5
|
||||
|
||||
#ifdef __KERNEL__
|
||||
int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int));
|
||||
|
Loading…
Reference in New Issue
Block a user