diff options
29 files changed, 659 insertions, 768 deletions
diff --git a/Documentation/driver-api/vfio-mediated-device.rst b/Documentation/driver-api/vfio-mediated-device.rst index 66bd00d7aecf..f47dca6645aa 100644 --- a/Documentation/driver-api/vfio-mediated-device.rst +++ b/Documentation/driver-api/vfio-mediated-device.rst @@ -112,11 +112,11 @@ to register and unregister itself with the core driver: * Register:: - extern int mdev_register_driver(struct mdev_driver *drv); + int mdev_register_driver(struct mdev_driver *drv); * Unregister:: - extern void mdev_unregister_driver(struct mdev_driver *drv); + void mdev_unregister_driver(struct mdev_driver *drv); The mediated bus driver's probe function should create a vfio_device on top of the mdev_device and connect it to an appropriate implementation of @@ -125,8 +125,8 @@ vfio_device_ops. When a driver wants to add the GUID creation sysfs to an existing device it has probe'd to then it should call:: - extern int mdev_register_device(struct device *dev, - struct mdev_driver *mdev_driver); + int mdev_register_device(struct device *dev, + struct mdev_driver *mdev_driver); This will provide the 'mdev_supported_types/XX/create' files which can then be used to trigger the creation of a mdev_device. The created mdev_device will be @@ -134,7 +134,7 @@ attached to the specified driver. When the driver needs to remove itself it calls:: - extern void mdev_unregister_device(struct device *dev); + void mdev_unregister_device(struct device *dev); Which will unbind and destroy all the created mdevs and remove the sysfs files. @@ -260,10 +260,10 @@ Translation APIs for Mediated Devices The following APIs are provided for translating user pfn to host pfn in a VFIO driver:: - int vfio_pin_pages(struct vfio_device *device, unsigned long *user_pfn, - int npage, int prot, unsigned long *phys_pfn); + int vfio_pin_pages(struct vfio_device *device, dma_addr_t iova, + int npage, int prot, struct page **pages); - int vfio_unpin_pages(struct vfio_device *device, unsigned long *user_pfn, + void vfio_unpin_pages(struct vfio_device *device, dma_addr_t iova, int npage); These functions call back into the back-end IOMMU module by using the pin_pages diff --git a/arch/s390/include/asm/ap.h b/arch/s390/include/asm/ap.h index b515cfa62bd9..f508f5025e38 100644 --- a/arch/s390/include/asm/ap.h +++ b/arch/s390/include/asm/ap.h @@ -227,13 +227,13 @@ struct ap_qirq_ctrl { * ap_aqic(): Control interruption for a specific AP. * @qid: The AP queue number * @qirqctrl: struct ap_qirq_ctrl (64 bit value) - * @ind: The notification indicator byte + * @pa_ind: Physical address of the notification indicator byte * * Returns AP queue status. */ static inline struct ap_queue_status ap_aqic(ap_qid_t qid, struct ap_qirq_ctrl qirqctrl, - void *ind) + phys_addr_t pa_ind) { unsigned long reg0 = qid | (3UL << 24); /* fc 3UL is AQIC */ union { @@ -241,7 +241,7 @@ static inline struct ap_queue_status ap_aqic(ap_qid_t qid, struct ap_qirq_ctrl qirqctrl; struct ap_queue_status status; } reg1; - unsigned long reg2 = virt_to_phys(ind); + unsigned long reg2 = pa_ind; reg1.qirqctrl = qirqctrl; diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index aee1a45da74b..705689e64011 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -226,7 +226,6 @@ struct intel_vgpu { unsigned long nr_cache_entries; struct mutex cache_lock; - struct notifier_block iommu_notifier; atomic_t released; struct kvm_page_track_notifier_node track_node; diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index e2f6c56ab342..e3cd58946477 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -231,57 +231,38 @@ static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt) static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size) { - struct drm_i915_private *i915 = vgpu->gvt->gt->i915; - int total_pages; - int npage; - int ret; - - total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE; - - for (npage = 0; npage < total_pages; npage++) { - unsigned long cur_gfn = gfn + npage; - - ret = vfio_unpin_pages(&vgpu->vfio_device, &cur_gfn, 1); - drm_WARN_ON(&i915->drm, ret != 1); - } + vfio_unpin_pages(&vgpu->vfio_device, gfn << PAGE_SHIFT, + DIV_ROUND_UP(size, PAGE_SIZE)); } /* Pin a normal or compound guest page for dma. */ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size, struct page **page) { - unsigned long base_pfn = 0; - int total_pages; + int total_pages = DIV_ROUND_UP(size, PAGE_SIZE); + struct page *base_page = NULL; int npage; int ret; - total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE; /* * We pin the pages one-by-one to avoid allocating a big arrary * on stack to hold pfns. */ for (npage = 0; npage < total_pages; npage++) { - unsigned long cur_gfn = gfn + npage; - unsigned long pfn; + dma_addr_t cur_iova = (gfn + npage) << PAGE_SHIFT; + struct page *cur_page; - ret = vfio_pin_pages(&vgpu->vfio_device, &cur_gfn, 1, - IOMMU_READ | IOMMU_WRITE, &pfn); + ret = vfio_pin_pages(&vgpu->vfio_device, cur_iova, 1, + IOMMU_READ | IOMMU_WRITE, &cur_page); if (ret != 1) { - gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n", - cur_gfn, ret); - goto err; - } - - if (!pfn_valid(pfn)) { - gvt_vgpu_err("pfn 0x%lx is not mem backed\n", pfn); - npage++; - ret = -EFAULT; + gvt_vgpu_err("vfio_pin_pages failed for iova %pad, ret %d\n", + &cur_iova, ret); goto err; } if (npage == 0) - base_pfn = pfn; - else if (base_pfn + npage != pfn) { + base_page = cur_page; + else if (base_page + npage != cur_page) { gvt_vgpu_err("The pages are not continuous\n"); ret = -EINVAL; npage++; @@ -289,7 +270,7 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, } } - *page = pfn_to_page(base_pfn); + *page = base_page; return 0; err: gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE); @@ -729,34 +710,25 @@ int intel_gvt_set_edid(struct intel_vgpu *vgpu, int port_num) return ret; } -static int intel_vgpu_iommu_notifier(struct notifier_block *nb, - unsigned long action, void *data) +static void intel_vgpu_dma_unmap(struct vfio_device *vfio_dev, u64 iova, + u64 length) { - struct intel_vgpu *vgpu = - container_of(nb, struct intel_vgpu, iommu_notifier); - - if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) { - struct vfio_iommu_type1_dma_unmap *unmap = data; - struct gvt_dma *entry; - unsigned long iov_pfn, end_iov_pfn; + struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev); + struct gvt_dma *entry; + u64 iov_pfn = iova >> PAGE_SHIFT; + u64 end_iov_pfn = iov_pfn + length / PAGE_SIZE; - iov_pfn = unmap->iova >> PAGE_SHIFT; - end_iov_pfn = iov_pfn + unmap->size / PAGE_SIZE; + mutex_lock(&vgpu->cache_lock); + for (; iov_pfn < end_iov_pfn; iov_pfn++) { + entry = __gvt_cache_find_gfn(vgpu, iov_pfn); + if (!entry) + continue; - mutex_lock(&vgpu->cache_lock); - for (; iov_pfn < end_iov_pfn; iov_pfn++) { - entry = __gvt_cache_find_gfn(vgpu, iov_pfn); - if (!entry) - continue; - - gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr, - entry->size); - __gvt_cache_remove_entry(vgpu, entry); - } - mutex_unlock(&vgpu->cache_lock); + gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr, + entry->size); + __gvt_cache_remove_entry(vgpu, entry); } - - return NOTIFY_OK; + mutex_unlock(&vgpu->cache_lock); } static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu) @@ -783,36 +755,20 @@ out: static int intel_vgpu_open_device(struct vfio_device *vfio_dev) { struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev); - unsigned long events; - int ret; - - vgpu->iommu_notifier.notifier_call = intel_vgpu_iommu_notifier; - - events = VFIO_IOMMU_NOTIFY_DMA_UNMAP; - ret = vfio_register_notifier(vfio_dev, VFIO_IOMMU_NOTIFY, &events, - &vgpu->iommu_notifier); - if (ret != 0) { - gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n", - ret); - goto out; - } - ret = -EEXIST; if (vgpu->attached) - goto undo_iommu; + return -EEXIST; - ret = -ESRCH; if (!vgpu->vfio_device.kvm || vgpu->vfio_device.kvm->mm != current->mm) { gvt_vgpu_err("KVM is required to use Intel vGPU\n"); - goto undo_iommu; + return -ESRCH; } kvm_get_kvm(vgpu->vfio_device.kvm); - ret = -EEXIST; if (__kvmgt_vgpu_exist(vgpu)) - goto undo_iommu; + return -EEXIST; vgpu->attached = true; @@ -831,12 +787,6 @@ static int intel_vgpu_open_device(struct vfio_device *vfio_dev) atomic_set(&vgpu->released, 0); return 0; - -undo_iommu: - vfio_unregister_notifier(vfio_dev, VFIO_IOMMU_NOTIFY, - &vgpu->iommu_notifier); -out: - return ret; } static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu) @@ -853,8 +803,6 @@ static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu) static void intel_vgpu_close_device(struct vfio_device *vfio_dev) { struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev); - struct drm_i915_private *i915 = vgpu->gvt->gt->i915; - int ret; if (!vgpu->attached) return; @@ -864,11 +812,6 @@ static void intel_vgpu_close_device(struct vfio_device *vfio_dev) intel_gvt_release_vgpu(vgpu); - ret = vfio_unregister_notifier(&vgpu->vfio_device, VFIO_IOMMU_NOTIFY, - &vgpu->iommu_notifier); - drm_WARN(&i915->drm, ret, - "vfio_unregister_notifier for iommu failed: %d\n", ret); - debugfs_remove(debugfs_lookup(KVMGT_DEBUGFS_FILENAME, vgpu->debugfs)); kvm_page_track_unregister_notifier(vgpu->vfio_device.kvm, @@ -1610,6 +1553,7 @@ static const struct vfio_device_ops intel_vgpu_dev_ops = { .write = intel_vgpu_write, .mmap = intel_vgpu_mmap, .ioctl = intel_vgpu_ioctl, + .dma_unmap = intel_vgpu_dma_unmap, }; static int intel_vgpu_probe(struct mdev_device *mdev) diff --git a/drivers/s390/cio/vfio_ccw_async.c b/drivers/s390/cio/vfio_ccw_async.c index 7a838e3d7c0f..420d89ba7f83 100644 --- a/drivers/s390/cio/vfio_ccw_async.c +++ b/drivers/s390/cio/vfio_ccw_async.c @@ -8,7 +8,6 @@ */ #include <linux/vfio.h> -#include <linux/mdev.h> #include "vfio_ccw_private.h" diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c index 0c2be9421ab7..7b02e97f4b29 100644 --- a/drivers/s390/cio/vfio_ccw_cp.c +++ b/drivers/s390/cio/vfio_ccw_cp.c @@ -11,6 +11,7 @@ #include <linux/ratelimit.h> #include <linux/mm.h> #include <linux/slab.h> +#include <linux/highmem.h> #include <linux/iommu.h> #include <linux/vfio.h> #include <asm/idals.h> @@ -18,13 +19,11 @@ #include "vfio_ccw_cp.h" #include "vfio_ccw_private.h" -struct pfn_array { - /* Starting guest physical I/O address. */ - unsigned long pa_iova; - /* Array that stores PFNs of the pages need to pin. */ - unsigned long *pa_iova_pfn; - /* Array that receives PFNs of the pages pinned. */ - unsigned long *pa_pfn; +struct page_array { + /* Array that stores pages need to pin. */ + dma_addr_t *pa_iova; + /* Array that receives the pinned pages. */ + struct page **pa_page; /* Number of pages pinned from @pa_iova. */ int pa_nr; }; @@ -37,116 +36,158 @@ struct ccwchain { /* Count of the valid ccws in chain. */ int ch_len; /* Pinned PAGEs for the original data. */ - struct pfn_array *ch_pa; + struct page_array *ch_pa; }; /* - * pfn_array_alloc() - alloc memory for PFNs - * @pa: pfn_array on which to perform the operation + * page_array_alloc() - alloc memory for page array + * @pa: page_array on which to perform the operation * @iova: target guest physical address * @len: number of bytes that should be pinned from @iova * - * Attempt to allocate memory for PFNs. + * Attempt to allocate memory for page array. * - * Usage of pfn_array: - * We expect (pa_nr == 0) and (pa_iova_pfn == NULL), any field in + * Usage of page_array: + * We expect (pa_nr == 0) and (pa_iova == NULL), any field in * this structure will be filled in by this function. * * Returns: - * 0 if PFNs are allocated - * -EINVAL if pa->pa_nr is not initially zero, or pa->pa_iova_pfn is not NULL + * 0 if page array is allocated + * -EINVAL if pa->pa_nr is not initially zero, or pa->pa_iova is not NULL * -ENOMEM if alloc failed */ -static int pfn_array_alloc(struct pfn_array *pa, u64 iova, unsigned int len) +static int page_array_alloc(struct page_array *pa, u64 iova, unsigned int len) { int i; - if (pa->pa_nr || pa->pa_iova_pfn) + if (pa->pa_nr || pa->pa_iova) return -EINVAL; - pa->pa_iova = iova; - pa->pa_nr = ((iova & ~PAGE_MASK) + len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; if (!pa->pa_nr) return -EINVAL; - pa->pa_iova_pfn = kcalloc(pa->pa_nr, - sizeof(*pa->pa_iova_pfn) + - sizeof(*pa->pa_pfn), - GFP_KERNEL); - if (unlikely(!pa->pa_iova_pfn)) { + pa->pa_iova = kcalloc(pa->pa_nr, + sizeof(*pa->pa_iova) + sizeof(*pa->pa_page), + GFP_KERNEL); + if (unlikely(!pa->pa_iova)) { pa->pa_nr = 0; return -ENOMEM; } - pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr; + pa->pa_page = (struct page **)&pa->pa_iova[pa->pa_nr]; - pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT; - pa->pa_pfn[0] = -1ULL; + pa->pa_iova[0] = iova; + pa->pa_page[0] = NULL; for (i = 1; i < pa->pa_nr; i++) { - pa->pa_iova_pfn[i] = pa->pa_iova_pfn[i - 1] + 1; - pa->pa_pfn[i] = -1ULL; + pa->pa_iova[i] = pa->pa_iova[i - 1] + PAGE_SIZE; + pa->pa_page[i] = NULL; } return 0; } /* - * pfn_array_pin() - Pin user pages in memory - * @pa: pfn_array on which to perform the operation + * page_array_unpin() - Unpin user pages in memory + * @pa: page_array on which to perform the operation + * @vdev: the vfio device to perform the operation + * @pa_nr: number of user pages to unpin + * + * Only unpin if any pages were pinned to begin with, i.e. pa_nr > 0, + * otherwise only clear pa->pa_nr + */ +static void page_array_unpin(struct page_array *pa, + struct vfio_device *vdev, int pa_nr) +{ + int unpinned = 0, npage = 1; + + while (unpinned < pa_nr) { + dma_addr_t *first = &pa->pa_iova[unpinned]; + dma_addr_t *last = &first[npage]; + + if (unpinned + npage < pa_nr && + *first + npage * PAGE_SIZE == *last) { + npage++; + continue; + } + + vfio_unpin_pages(vdev, *first, npage); + unpinned += npage; + npage = 1; + } + + pa->pa_nr = 0; +} + +/* + * page_array_pin() - Pin user pages in memory + * @pa: page_array on which to perform the operation * @mdev: the mediated device to perform pin operations * * Returns number of pages pinned upon success. * If the pin request partially succeeds, or fails completely, * all pages are left unpinned and a negative error value is returned. */ -static int pfn_array_pin(struct pfn_array *pa, struct vfio_device *vdev) +static int page_array_pin(struct page_array *pa, struct vfio_device *vdev) { + int pinned = 0, npage = 1; int ret = 0; - ret = vfio_pin_pages(vdev, pa->pa_iova_pfn, pa->pa_nr, - IOMMU_READ | IOMMU_WRITE, pa->pa_pfn); + while (pinned < pa->pa_nr) { + dma_addr_t *first = &pa->pa_iova[pinned]; + dma_addr_t *last = &first[npage]; - if (ret < 0) { - goto err_out; - } else if (ret > 0 && ret != pa->pa_nr) { - vfio_unpin_pages(vdev, pa->pa_iova_pfn, ret); - ret = -EINVAL; - goto err_out; + if (pinned + npage < pa->pa_nr && + *first + npage * PAGE_SIZE == *last) { + npage++; + continue; + } + + ret = vfio_pin_pages(vdev, *first, npage, + IOMMU_READ | IOMMU_WRITE, + &pa->pa_page[pinned]); + if (ret < 0) { + goto err_out; + } else if (ret > 0 && ret != npage) { + pinned += ret; + ret = -EINVAL; + goto err_out; + } + pinned += npage; + npage = 1; } return ret; err_out: - pa->pa_nr = 0; - + page_array_unpin(pa, vdev, pinned); return ret; } /* Unpin the pages before releasing the memory. */ -static void pfn_array_unpin_free(struct pfn_array *pa, struct vfio_device *vdev) +static void page_array_unpin_free(struct page_array *pa, struct vfio_device *vdev) { - /* Only unpin if any pages were pinned to begin with */ - if (pa->pa_nr) - vfio_unpin_pages(vdev, pa->pa_iova_pfn, pa->pa_nr); - pa->pa_nr = 0; - kfree(pa->pa_iova_pfn); + page_array_unpin(pa, vdev, pa->pa_nr); + kfree(pa->pa_iova); } -static bool pfn_array_iova_pinned(struct pfn_array *pa, unsigned long iova) +static bool page_array_iova_pinned(struct page_array *pa, u64 iova, u64 length) { - unsigned long iova_pfn = iova >> PAGE_SHIFT; + u64 iova_pfn_start = iova >> PAGE_SHIFT; + u64 iova_pfn_end = (iova + length - 1) >> PAGE_SHIFT; + u64 pfn; int i; - for (i = 0; i < pa->pa_nr; i++) - if (pa->pa_iova_pfn[i] == iova_pfn) + for (i = 0; i < pa->pa_nr; i++) { + pfn = pa->pa_iova[i] >> PAGE_SHIFT; + if (pfn >= iova_pfn_start && pfn <= iova_pfn_end) return true; + } return false; } -/* Create the list of IDAL words for a pfn_array. */ -static inline void pfn_array_idal_create_words( - struct pfn_array *pa, - unsigned long *idaws) +/* Create the list of IDAL words for a page_array. */ +static inline void page_array_idal_create_words(struct page_array *pa, + unsigned long *idaws) { int i; @@ -159,10 +200,10 @@ static inline void pfn_array_idal_create_words( */ for (i = 0; i < pa->pa_nr; i++) - idaws[i] = pa->pa_pfn[i] << PAGE_SHIFT; + idaws[i] = page_to_phys(pa->pa_page[i]); /* Adjust the first IDAW, since it may not start on a page boundary */ - idaws[0] += pa->pa_iova & (PAGE_SIZE - 1); + idaws[0] += pa->pa_iova[0] & (PAGE_SIZE - 1); } static void convert_ccw0_to_ccw1(struct ccw1 *source, unsigned long len) @@ -194,24 +235,24 @@ static void convert_ccw0_to_ccw1(struct ccw1 *source, unsigned long len) static long copy_from_iova(struct vfio_device *vdev, void *to, u64 iova, unsigned long n) { - struct pfn_array pa = {0}; - u64 from; + struct page_array pa = {0}; int i, ret; unsigned long l, m; - ret = pfn_array_alloc(&pa, iova, n); + ret = page_array_alloc(&pa, iova, n); if (ret < 0) return ret; - ret = pfn_array_pin(&pa, vdev); + ret = page_array_pin(&pa, vdev); if (ret < 0) { - pfn_array_unpin_free(&pa, vdev); + page_array_unpin_free(&pa, vdev); return ret; } l = n; for (i = 0; i < pa.pa_nr; i++) { - from = pa.pa_pfn[i] << PAGE_SHIFT; + void *from = kmap_local_page(pa.pa_page[i]); + m = PAGE_SIZE; if (i == 0) { from += iova & (PAGE_SIZE - 1); @@ -219,14 +260,15 @@ static long copy_from_iova(struct vfio_device *vdev, void *to, u64 iova, } m = min(l, m); - memcpy(to + (n - l), (void *)from, m); + memcpy(to + (n - l), from, m); + kunmap_local(from); l -= m; if (l == 0) break; } - pfn_array_unpin_free(&pa, vdev); + page_array_unpin_free(&pa, vdev); return l; } @@ -329,7 +371,7 @@ static struct ccwchain *ccwchain_alloc(struct channel_program *cp, int len) chain->ch_ccw = (struct ccw1 *)data; data = (u8 *)(chain->ch_ccw) + sizeof(*chain->ch_ccw) * len; - chain->ch_pa = (struct pfn_array *)data; + chain->ch_pa = (struct page_array *)data; chain->ch_len = len; @@ -513,7 +555,7 @@ static int ccwchain_fetch_direct(struct ccwchain *chain, struct vfio_device *vdev = &container_of(cp, struct vfio_ccw_private, cp)->vdev; struct ccw1 *ccw; - struct pfn_array *pa; + struct page_array *pa; u64 iova; unsigned long *idaws; int ret; @@ -547,13 +589,13 @@ static int ccwchain_fetch_direct(struct ccwchain *chain, } /* - * Allocate an array of pfn's for pages to pin/translate. + * Allocate an array of pages to pin/translate. * The number of pages is actually the count of the idaws * required for the data transfer, since we only only support * 4K IDAWs today. */ pa = chain->ch_pa + idx; - ret = pfn_array_alloc(pa, iova, bytes); + ret = page_array_alloc(pa, iova, bytes); if (ret < 0) goto out_free_idaws; @@ -564,21 +606,21 @@ static int ccwchain_fetch_direct(struct ccwchain *chain, goto out_unpin; /* - * Copy guest IDAWs into pfn_array, in case the memory they + * Copy guest IDAWs into page_array, in case the memory they * occupy is not contiguous. */ for (i = 0; i < idaw_nr; i++) - pa->pa_iova_pfn[i] = idaws[i] >> PAGE_SHIFT; + pa->pa_iova[i] = idaws[i]; } else { /* - * No action is required here; the iova addresses in pfn_array - * were initialized sequentially in pfn_array_alloc() beginning + * No action is required here; the iova addresses in page_array + * were initialized sequentially in page_array_alloc() beginning * with the contents of ccw->cda. */ } if (ccw_does_data_transfer(ccw)) { - ret = pfn_array_pin(pa, vdev); + ret = page_array_pin(pa, vdev); if (ret < 0) goto out_unpin; } else { @@ -588,13 +630,13 @@ static int ccwchain_fetch_direct(struct ccwchain *chain, ccw->cda = (__u32) virt_to_phys(idaws); ccw->flags |= CCW_FLAG_IDA; - /* Populate the IDAL with pinned/translated addresses from pfn */ - pfn_array_idal_create_words(pa, idaws); + /* Populate the IDAL with pinned/translated addresses from page */ + page_array_idal_create_words(pa, idaws); return 0; out_unpin: - pfn_array_unpin_free(pa, vdev); + page_array_unpin_free(pa, vdev); out_free_idaws: kfree(idaws); out_init: @@ -700,7 +742,7 @@ void cp_free(struct channel_program *cp) cp->initialized = false; list_for_each_entry_safe(chain, temp, &cp->ccwchain_list, next) { for (i = 0; i < chain->ch_len; i++) { - pfn_array_unpin_free(chain->ch_pa + i, vdev); + page_array_unpin_free(chain->ch_pa + i, vdev); ccwchain_cda_free(chain, i); } ccwchain_free(chain); @@ -862,11 +904,12 @@ void cp_update_scsw(struct channel_program *cp, union scsw *scsw) * cp_iova_pinned() - check if an iova is pinned for a ccw chain. * @cp: channel_program on which to perform the operation * @iova: the iova to check + * @length: the length to check from @iova * * If the @iova is currently pinned for the ccw chain, return true; * else return false. */ -bool cp_iova_pinned(struct channel_program *cp, u64 iova) +bool cp_iova_pinned(struct channel_program *cp, u64 iova, u64 length) { struct ccwchain *chain; int i; @@ -876,7 +919,7 @@ bool cp_iova_pinned(struct channel_program *cp, u64 iova) list_for_each_entry(chain, &cp->ccwchain_list, next) { for (i = 0; i < chain->ch_len; i++) - if (pfn_array_iova_pinned(chain->ch_pa + i, iova)) + if (page_array_iova_pinned(chain->ch_pa + i, iova, length)) return true; } diff --git a/drivers/s390/cio/vfio_ccw_cp.h b/drivers/s390/cio/vfio_ccw_cp.h index e4c436199b4c..54d26e242533 100644 --- a/drivers/s390/cio/vfio_ccw_cp.h +++ b/drivers/s390/cio/vfio_ccw_cp.h @@ -41,11 +41,11 @@ struct channel_program { struct ccw1 *guest_cp; }; -extern int cp_init(struct channel_program *cp, union orb *orb); -extern void cp_free(struct channel_program *cp); -extern int cp_prefetch(struct channel_program *cp); -extern union orb *cp_get_orb(struct channel_program *cp, u32 intparm, u8 lpm); -extern void cp_update_scsw(struct channel_program *cp, union scsw *scsw); -extern bool cp_iova_pinned(struct channel_program *cp, u64 iova); +int cp_init(struct channel_program *cp, union orb *orb); +void cp_free(struct channel_program *cp); +int cp_prefetch(struct channel_program *cp); +union orb *cp_get_orb(struct channel_program *cp, u32 intparm, u8 lpm); +void cp_update_scsw(struct channel_program *cp, union scsw *scsw); +bool cp_iova_pinned(struct channel_program *cp, u64 iova, u64 length); #endif diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c index ee182cfb467d..86d9e428357b 100644 --- a/drivers/s390/cio/vfio_ccw_drv.c +++ b/drivers/s390/cio/vfio_ccw_drv.c @@ -14,7 +14,6 @@ #include <linux/init.h> #include <linux/device.h> #include <linux/slab.h> -#include <linux/uuid.h> #include <linux/mdev.h> #include <asm/isc.h> @@ -42,13 +41,6 @@ int vfio_ccw_sch_quiesce(struct subchannel *sch) DECLARE_COMPLETION_ONSTACK(completion); int iretry, ret = 0; - spin_lock_irq(sch->lock); - if (!sch->schib.pmcw.ena) - goto out_unlock; - ret = cio_disable_subchannel(sch); - if (ret != -EBUSY) - goto out_unlock; - iretry = 255; do { @@ -75,9 +67,7 @@ int vfio_ccw_sch_quiesce(struct subchannel *sch) spin_lock_irq(sch->lock); ret = cio_disable_subchannel(sch); } while (ret == -EBUSY); -out_unlock: - private->state = VFIO_CCW_STATE_NOT_OPER; - spin_unlock_irq(sch->lock); + return ret; } @@ -107,9 +97,10 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work) /* * Reset to IDLE only if processing of a channel program * has finished. Do not overwrite a possible processing - * state if the final interrupt was for HSCH or CSCH. + * state if the interrupt was unsolicited, or if the final + * interrupt was for HSCH or CSCH. */ - if (private->mdev && cp_is_finished) + if (cp_is_finished) private->state = VFIO_CCW_STATE_IDLE; if (private->io_trigger) @@ -147,7 +138,7 @@ static struct vfio_ccw_private *vfio_ccw_alloc_private(struct subchannel *sch) private->sch = sch; mutex_init(&private->io_mutex); - private->state = VFIO_CCW_STATE_NOT_OPER; + private->state = VFIO_CCW_STATE_STANDBY; INIT_LIST_HEAD(&private->crw); INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo); INIT_WORK(&private->crw_work, vfio_ccw_crw_todo); @@ -231,26 +222,15 @@ static int vfio_ccw_sch_probe(struct subchannel *sch) dev_set_drvdata(&sch->dev, private); - spin_lock_irq(sch->lock); - sch->isc = VFIO_CCW_ISC; - ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch); - spin_unlock_irq(sch->lock); + ret = mdev_register_device(&sch->dev, &vfio_ccw_mdev_driver); if (ret) goto out_free; - private->state = VFIO_CCW_STATE_STANDBY; - - ret = vfio_ccw_mdev_reg(sch); - if (ret) - goto out_disable; - VFIO_CCW_MSG_EVENT(4, "bound to subchannel %x.%x.%04x\n", sch->schid.cssid, sch->schid.ssid, sch->schid.sch_no); return 0; -out_disable: - cio_disable_subchannel(sch); out_free: dev_set_drvdata(&sch->dev, NULL); vfio_ccw_free_private(private); @@ -261,8 +241,7 @@ static void vfio_ccw_sch_remove(struct subchannel *sch) { struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); - vfio_ccw_sch_quiesce(sch); - vfio_ccw_mdev_unreg(sch); + mdev_unregister_device(&sch->dev); dev_set_drvdata(&sch->dev, NULL); @@ -275,7 +254,10 @@ static void vfio_ccw_sch_remove(struct subchannel *sch) static void vfio_ccw_sch_shutdo |
