mirror of
https://github.com/FEX-Emu/linux.git
synced 2025-01-08 10:30:50 +00:00
virtio, vhost: fixes
ARM DMA fixes vhost vsock bugfix Signed-off-by: Michael S. Tsirkin <mst@redhat.com> -----BEGIN PGP SIGNATURE----- iQEcBAABAgAGBQJYh9ZlAAoJECgfDbjSjVRptqsIAJ03KvIYEUH2+kWr21UWWoFk wYGGtf2Voexnj6SVcY6JMTG4J0PF5y1K9XL1/Z0kXFQWy+PCqp4bdE7PUpxs8PlB ShapIoMTU7dfSL2cFfrYb3Nx4hFiube3HXSYz5LhGUBwWT7v5j9fRodoNZjyi3rs faNKR9psNHFpkSgRTJKVnj53/dU+HcSP1S+x9qpkS2bYHvLA2vQo/FaKg/M8i9jt JzCcX/RbOij9DlHgzT64cFTnIZVekauUsQAJcs8e/SHhwm7CLAlrVIrjcG4H4mvg XIKcL1YJKVrJzjnDcezSkfQpc+oPn2t4Qk+VOcnqbHPsg23Hr5Rj8krixCl6XmQ= =Yeqz -----END PGP SIGNATURE----- Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost Pull virtio/vhost fixes from Michael Tsirkin: - ARM DMA fixes - vhost vsock bugfix * tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost: vring: Force use of DMA API for ARM-based systems with legacy devices virtio_mmio: Set DMA masks appropriately vhost/vsock: handle vhost_vq_init_access() error
This commit is contained in:
commit
49e555a932
@ -373,6 +373,7 @@ static void vhost_vsock_handle_rx_kick(struct vhost_work *work)
|
|||||||
|
|
||||||
static int vhost_vsock_start(struct vhost_vsock *vsock)
|
static int vhost_vsock_start(struct vhost_vsock *vsock)
|
||||||
{
|
{
|
||||||
|
struct vhost_virtqueue *vq;
|
||||||
size_t i;
|
size_t i;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -383,19 +384,20 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
|
|||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
|
for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
|
||||||
struct vhost_virtqueue *vq = &vsock->vqs[i];
|
vq = &vsock->vqs[i];
|
||||||
|
|
||||||
mutex_lock(&vq->mutex);
|
mutex_lock(&vq->mutex);
|
||||||
|
|
||||||
if (!vhost_vq_access_ok(vq)) {
|
if (!vhost_vq_access_ok(vq)) {
|
||||||
ret = -EFAULT;
|
ret = -EFAULT;
|
||||||
mutex_unlock(&vq->mutex);
|
|
||||||
goto err_vq;
|
goto err_vq;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!vq->private_data) {
|
if (!vq->private_data) {
|
||||||
vq->private_data = vsock;
|
vq->private_data = vsock;
|
||||||
vhost_vq_init_access(vq);
|
ret = vhost_vq_init_access(vq);
|
||||||
|
if (ret)
|
||||||
|
goto err_vq;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_unlock(&vq->mutex);
|
mutex_unlock(&vq->mutex);
|
||||||
@ -405,8 +407,11 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_vq:
|
err_vq:
|
||||||
|
vq->private_data = NULL;
|
||||||
|
mutex_unlock(&vq->mutex);
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
|
for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
|
||||||
struct vhost_virtqueue *vq = &vsock->vqs[i];
|
vq = &vsock->vqs[i];
|
||||||
|
|
||||||
mutex_lock(&vq->mutex);
|
mutex_lock(&vq->mutex);
|
||||||
vq->private_data = NULL;
|
vq->private_data = NULL;
|
||||||
|
@ -59,6 +59,7 @@
|
|||||||
#define pr_fmt(fmt) "virtio-mmio: " fmt
|
#define pr_fmt(fmt) "virtio-mmio: " fmt
|
||||||
|
|
||||||
#include <linux/acpi.h>
|
#include <linux/acpi.h>
|
||||||
|
#include <linux/dma-mapping.h>
|
||||||
#include <linux/highmem.h>
|
#include <linux/highmem.h>
|
||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
@ -498,6 +499,7 @@ static int virtio_mmio_probe(struct platform_device *pdev)
|
|||||||
struct virtio_mmio_device *vm_dev;
|
struct virtio_mmio_device *vm_dev;
|
||||||
struct resource *mem;
|
struct resource *mem;
|
||||||
unsigned long magic;
|
unsigned long magic;
|
||||||
|
int rc;
|
||||||
|
|
||||||
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||||
if (!mem)
|
if (!mem)
|
||||||
@ -547,9 +549,25 @@ static int virtio_mmio_probe(struct platform_device *pdev)
|
|||||||
}
|
}
|
||||||
vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
|
vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
|
||||||
|
|
||||||
if (vm_dev->version == 1)
|
if (vm_dev->version == 1) {
|
||||||
writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
|
writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
|
||||||
|
|
||||||
|
rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
|
||||||
|
/*
|
||||||
|
* In the legacy case, ensure our coherently-allocated virtio
|
||||||
|
* ring will be at an address expressable as a 32-bit PFN.
|
||||||
|
*/
|
||||||
|
if (!rc)
|
||||||
|
dma_set_coherent_mask(&pdev->dev,
|
||||||
|
DMA_BIT_MASK(32 + PAGE_SHIFT));
|
||||||
|
} else {
|
||||||
|
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
|
||||||
|
}
|
||||||
|
if (rc)
|
||||||
|
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
|
||||||
|
if (rc)
|
||||||
|
dev_warn(&pdev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
|
||||||
|
|
||||||
platform_set_drvdata(pdev, vm_dev);
|
platform_set_drvdata(pdev, vm_dev);
|
||||||
|
|
||||||
return register_virtio_device(&vm_dev->vdev);
|
return register_virtio_device(&vm_dev->vdev);
|
||||||
|
@ -159,6 +159,13 @@ static bool vring_use_dma_api(struct virtio_device *vdev)
|
|||||||
if (xen_domain())
|
if (xen_domain())
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* On ARM-based machines, the DMA ops will do the right thing,
|
||||||
|
* so always use them with legacy devices.
|
||||||
|
*/
|
||||||
|
if (IS_ENABLED(CONFIG_ARM) || IS_ENABLED(CONFIG_ARM64))
|
||||||
|
return !virtio_has_feature(vdev, VIRTIO_F_VERSION_1);
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user