diff options
Diffstat (limited to 'drivers/s390/crypto/vfio_ap_ops.c')
-rw-r--r-- | drivers/s390/crypto/vfio_ap_ops.c | 1441 |
1 files changed, 1126 insertions, 315 deletions
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c index 75cd92c291e3..6c8c41fac4e1 100644 --- a/drivers/s390/crypto/vfio_ap_ops.c +++ b/drivers/s390/crypto/vfio_ap_ops.c @@ -26,44 +26,193 @@ #define VFIO_AP_MDEV_TYPE_HWVIRT "passthrough" #define VFIO_AP_MDEV_NAME_HWVIRT "VFIO AP Passthrough Device" -static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev); +#define AP_QUEUE_ASSIGNED "assigned" +#define AP_QUEUE_UNASSIGNED "unassigned" +#define AP_QUEUE_IN_USE "in use" + +static int vfio_ap_mdev_reset_queues(struct ap_queue_table *qtable); static struct vfio_ap_queue *vfio_ap_find_queue(int apqn); static const struct vfio_device_ops vfio_ap_matrix_dev_ops; +static int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q, unsigned int retry); -static int match_apqn(struct device *dev, const void *data) +/** + * get_update_locks_for_kvm: Acquire the locks required to dynamically update a + * KVM guest's APCB in the proper order. + * + * @kvm: a pointer to a struct kvm object containing the KVM guest's APCB. + * + * The proper locking order is: + * 1. matrix_dev->guests_lock: required to use the KVM pointer to update a KVM + * guest's APCB. + * 2. kvm->lock: required to update a guest's APCB + * 3. matrix_dev->mdevs_lock: required to access data stored in a matrix_mdev + * + * Note: If @kvm is NULL, the KVM lock will not be taken. + */ +static inline void get_update_locks_for_kvm(struct kvm *kvm) { - struct vfio_ap_queue *q = dev_get_drvdata(dev); + mutex_lock(&matrix_dev->guests_lock); + if (kvm) + mutex_lock(&kvm->lock); + mutex_lock(&matrix_dev->mdevs_lock); +} - return (q->apqn == *(int *)(data)) ? 1 : 0; +/** + * release_update_locks_for_kvm: Release the locks used to dynamically update a + * KVM guest's APCB in the proper order. + * + * @kvm: a pointer to a struct kvm object containing the KVM guest's APCB. + * + * The proper unlocking order is: + * 1. matrix_dev->mdevs_lock + * 2. kvm->lock + * 3. matrix_dev->guests_lock + * + * Note: If @kvm is NULL, the KVM lock will not be released. + */ +static inline void release_update_locks_for_kvm(struct kvm *kvm) +{ + mutex_unlock(&matrix_dev->mdevs_lock); + if (kvm) + mutex_unlock(&kvm->lock); + mutex_unlock(&matrix_dev->guests_lock); } /** - * vfio_ap_get_queue - retrieve a queue with a specific APQN from a list - * @matrix_mdev: the associated mediated matrix - * @apqn: The queue APQN + * get_update_locks_for_mdev: Acquire the locks required to dynamically update a + * KVM guest's APCB in the proper order. + * + * @matrix_mdev: a pointer to a struct ap_matrix_mdev object containing the AP + * configuration data to use to update a KVM guest's APCB. * - * Retrieve a queue with a specific APQN from the list of the - * devices of the vfio_ap_drv. - * Verify that the APID and the APQI are set in the matrix. + * The proper locking order is: + * 1. matrix_dev->guests_lock: required to use the KVM pointer to update a KVM + * guest's APCB. + * 2. matrix_mdev->kvm->lock: required to update a guest's APCB + * 3. matrix_dev->mdevs_lock: required to access data stored in a matrix_mdev * - * Return: the pointer to the associated vfio_ap_queue + * Note: If @matrix_mdev is NULL or is not attached to a KVM guest, the KVM + * lock will not be taken. */ -static struct vfio_ap_queue *vfio_ap_get_queue( +static inline void get_update_locks_for_mdev(struct ap_matrix_mdev *matrix_mdev) +{ + mutex_lock(&matrix_dev->guests_lock); + if (matrix_mdev && matrix_mdev->kvm) + mutex_lock(&matrix_mdev->kvm->lock); + mutex_lock(&matrix_dev->mdevs_lock); +} + +/** + * release_update_locks_for_mdev: Release the locks used to dynamically update a + * KVM guest's APCB in the proper order. + * + * @matrix_mdev: a pointer to a struct ap_matrix_mdev object containing the AP + * configuration data to use to update a KVM guest's APCB. + * + * The proper unlocking order is: + * 1. matrix_dev->mdevs_lock + * 2. matrix_mdev->kvm->lock + * 3. matrix_dev->guests_lock + * + * Note: If @matrix_mdev is NULL or is not attached to a KVM guest, the KVM + * lock will not be released. + */ +static inline void release_update_locks_for_mdev(struct ap_matrix_mdev *matrix_mdev) +{ + mutex_unlock(&matrix_dev->mdevs_lock); + if (matrix_mdev && matrix_mdev->kvm) + mutex_unlock(&matrix_mdev->kvm->lock); + mutex_unlock(&matrix_dev->guests_lock); +} + +/** + * get_update_locks_by_apqn: Find the mdev to which an APQN is assigned and + * acquire the locks required to update the APCB of + * the KVM guest to which the mdev is attached. + * + * @apqn: the APQN of a queue device. + * + * The proper locking order is: + * 1. matrix_dev->guests_lock: required to use the KVM pointer to update a KVM + * guest's APCB. + * 2. matrix_mdev->kvm->lock: required to update a guest's APCB + * 3. matrix_dev->mdevs_lock: required to access data stored in a matrix_mdev + * + * Note: If @apqn is not assigned to a matrix_mdev, the matrix_mdev->kvm->lock + * will not be taken. + * + * Return: the ap_matrix_mdev object to which @apqn is assigned or NULL if @apqn + * is not assigned to an ap_matrix_mdev. + */ +static struct ap_matrix_mdev *get_update_locks_by_apqn(int apqn) +{ + struct ap_matrix_mdev *matrix_mdev; + + mutex_lock(&matrix_dev->guests_lock); + + list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { + if (test_bit_inv(AP_QID_CARD(apqn), matrix_mdev->matrix.apm) && + test_bit_inv(AP_QID_QUEUE(apqn), matrix_mdev->matrix.aqm)) { + if (matrix_mdev->kvm) + mutex_lock(&matrix_mdev->kvm->lock); + + mutex_lock(&matrix_dev->mdevs_lock); + + return matrix_mdev; + } + } + + mutex_lock(&matrix_dev->mdevs_lock); + + return NULL; +} + +/** + * get_update_locks_for_queue: get the locks required to update the APCB of the + * KVM guest to which the matrix mdev linked to a + * vfio_ap_queue object is attached. + * + * @q: a pointer to a vfio_ap_queue object. + * + * The proper locking order is: + * 1. q->matrix_dev->guests_lock: required to use the KVM pointer to update a + * KVM guest's APCB. + * 2. q->matrix_mdev->kvm->lock: required to update a guest's APCB + * 3. matrix_dev->mdevs_lock: required to access data stored in matrix_mdev + * + * Note: if @queue is not linked to an ap_matrix_mdev object, the KVM lock + * will not be taken. + */ +static inline void get_update_locks_for_queue(struct vfio_ap_queue *q) +{ + mutex_lock(&matrix_dev->guests_lock); + if (q->matrix_mdev && q->matrix_mdev->kvm) + mutex_lock(&q->matrix_mdev->kvm->lock); + mutex_lock(&matrix_dev->mdevs_lock); +} + +/** + * vfio_ap_mdev_get_queue - retrieve a queue with a specific APQN from a + * hash table of queues assigned to a matrix mdev + * @matrix_mdev: the matrix mdev + * @apqn: The APQN of a queue device + * + * Return: the pointer to the vfio_ap_queue struct representing the queue or + * NULL if the queue is not assigned to @matrix_mdev + */ +static struct vfio_ap_queue *vfio_ap_mdev_get_queue( struct ap_matrix_mdev *matrix_mdev, int apqn) { struct vfio_ap_queue *q; - if (!test_bit_inv(AP_QID_CARD(apqn), matrix_mdev->matrix.apm)) - return NULL; - if (!test_bit_inv(AP_QID_QUEUE(apqn), matrix_mdev->matrix.aqm)) - return NULL; - - q = vfio_ap_find_queue(apqn); - if (q) - q->matrix_mdev = matrix_mdev; + hash_for_each_possible(matrix_mdev->qtable.queues, q, mdev_qnode, + apqn) { + if (q && q->apqn == apqn) + return q; + } - return q; + return NULL; } /** @@ -180,7 +329,6 @@ static struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q) status.response_code); end_free: vfio_ap_free_aqic_resources(q); - q->matrix_mdev = NULL; return status; } @@ -399,10 +547,12 @@ static int handle_pqap(struct kvm_vcpu *vcpu) return -EOPNOTSUPP; } - mutex_lock(&matrix_dev->lock); + mutex_lock(&matrix_dev->mdevs_lock); + if (!vcpu->kvm->arch.crypto.pqap_hook) { VFIO_AP_DBF_WARN("%s: PQAP(AQIC) hook not registered with the vfio_ap driver: apqn=0x%04x\n", __func__, apqn); + goto out_unlock; } @@ -418,7 +568,7 @@ static int handle_pqap(struct kvm_vcpu *vcpu) goto out_unlock; } - q = vfio_ap_get_queue(matrix_mdev, apqn); + q = vfio_ap_mdev_get_queue(matrix_mdev, apqn); if (!q) { VFIO_AP_DBF_WARN("%s: Queue %02x.%04x not bound to the vfio_ap driver\n", __func__, AP_QID_CARD(apqn), @@ -437,7 +587,7 @@ static int handle_pqap(struct kvm_vcpu *vcpu) out_unlock: memcpy(&vcpu->run->s.regs.gprs[1], &qstatus, sizeof(qstatus)); vcpu->run->s.regs.gprs[1] >>= 32; - mutex_unlock(&matrix_dev->lock); + mutex_unlock(&matrix_dev->mdevs_lock); return 0; } @@ -449,6 +599,91 @@ static void vfio_ap_matrix_init(struct ap_config_info *info, matrix->adm_max = info->apxa ? info->Nd : 15; } +static void vfio_ap_mdev_update_guest_apcb(struct ap_matrix_mdev *matrix_mdev) +{ + if (matrix_mdev->kvm) + kvm_arch_crypto_set_masks(matrix_mdev->kvm, + matrix_mdev->shadow_apcb.apm, + matrix_mdev->shadow_apcb.aqm, + matrix_mdev->shadow_apcb.adm); +} + +static bool vfio_ap_mdev_filter_cdoms(struct ap_matrix_mdev *matrix_mdev) +{ + DECLARE_BITMAP(prev_shadow_adm, AP_DOMAINS); + + bitmap_copy(prev_shadow_adm, matrix_mdev->shadow_apcb.adm, AP_DOMAINS); + bitmap_and(matrix_mdev->shadow_apcb.adm, matrix_mdev->matrix.adm, + (unsigned long *)matrix_dev->info.adm, AP_DOMAINS); + + return !bitmap_equal(prev_shadow_adm, matrix_mdev->shadow_apcb.adm, + AP_DOMAINS); +} + +/* + * vfio_ap_mdev_filter_matrix - filter the APQNs assigned to the matrix mdev + * to ensure no queue devices are passed through to + * the guest that are not bound to the vfio_ap + * device driver. + * + * @matrix_mdev: the matrix mdev whose matrix is to be filtered. + * + * Note: If an APQN referencing a queue device that is not bound to the vfio_ap + * driver, its APID will be filtered from the guest's APCB. The matrix + * structure precludes filtering an individual APQN, so its APID will be + * filtered. + * + * Return: a boolean value indicating whether the KVM guest's APCB was changed + * by the filtering or not. + */ +static bool vfio_ap_mdev_filter_matrix(unsigned long *apm, unsigned long *aqm, + struct ap_matrix_mdev *matrix_mdev) +{ + unsigned long apid, apqi, apqn; + DECLARE_BITMAP(prev_shadow_apm, AP_DEVICES); + DECLARE_BITMAP(prev_shadow_aqm, AP_DOMAINS); + struct vfio_ap_queue *q; + + bitmap_copy(prev_shadow_apm, matrix_mdev->shadow_apcb.apm, AP_DEVICES); + bitmap_copy(prev_shadow_aqm, matrix_mdev->shadow_apcb.aqm, AP_DOMAINS); + vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->shadow_apcb); + + /* + * Copy the adapters, domains and control domains to the shadow_apcb + * from the matrix mdev, but only those that are assigned to the host's + * AP configuration. + */ + bitmap_and(matrix_mdev->shadow_apcb.apm, matrix_mdev->matrix.apm, + (unsigned long *)matrix_dev->info.apm, AP_DEVICES); + bitmap_and(matrix_mdev->shadow_apcb.aqm, matrix_mdev->matrix.aqm, + (unsigned long *)matrix_dev->info.aqm, AP_DOMAINS); + + for_each_set_bit_inv(apid, apm, AP_DEVICES) { + for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) { + /* + * If the APQN is not bound to the vfio_ap device + * driver, then we can't assign it to the guest's + * AP configuration. The AP architecture won't + * allow filtering of a single APQN, so let's filter + * the APID since an adapter represents a physical + * hardware device. + */ + apqn = AP_MKQID(apid, apqi); + q = vfio_ap_mdev_get_queue(matrix_mdev, apqn); + if (!q || q->reset_rc) { + clear_bit_inv(apid, + matrix_mdev->shadow_apcb.apm); + break; + } + } + } + + return !bitmap_equal(prev_shadow_apm, matrix_mdev->shadow_apcb.apm, + AP_DEVICES) || + !bitmap_equal(prev_shadow_aqm, matrix_mdev->shadow_apcb.aqm, + AP_DOMAINS); +} + static int vfio_ap_mdev_probe(struct mdev_device *mdev) { struct ap_matrix_mdev *matrix_mdev; @@ -468,20 +703,19 @@ static int vfio_ap_mdev_probe(struct mdev_device *mdev) matrix_mdev->mdev = mdev; vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix); matrix_mdev->pqap_hook = handle_pqap; - mutex_lock(&matrix_dev->lock); - list_add(&matrix_mdev->node, &matrix_dev->mdev_list); - mutex_unlock(&matrix_dev->lock); + vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->shadow_apcb); + hash_init(matrix_mdev->qtable.queues); ret = vfio_register_emulated_iommu_dev(&matrix_mdev->vdev); if (ret) goto err_list; dev_set_drvdata(&mdev->dev, matrix_mdev); + mutex_lock(&matrix_dev->mdevs_lock); + list_add(&matrix_mdev->node, &matrix_dev->mdev_list); + mutex_unlock(&matrix_dev->mdevs_lock); return 0; err_list: - mutex_lock(&matrix_dev->lock); - list_del(&matrix_mdev->node); - mutex_unlock(&matrix_dev->lock); vfio_uninit_group_dev(&matrix_mdev->vdev); kfree(matrix_mdev); err_dec_available: @@ -489,16 +723,62 @@ err_dec_available: return ret; } +static void vfio_ap_mdev_link_queue(struct ap_matrix_mdev *matrix_mdev, + struct vfio_ap_queue *q) +{ + if (q) { + q->matrix_mdev = matrix_mdev; + hash_add(matrix_mdev->qtable.queues, &q->mdev_qnode, q->apqn); + } +} + +static void vfio_ap_mdev_link_apqn(struct ap_matrix_mdev *matrix_mdev, int apqn) +{ + struct vfio_ap_queue *q; + + q = vfio_ap_find_queue(apqn); + vfio_ap_mdev_link_queue(matrix_mdev, q); +} + +static void vfio_ap_unlink_queue_fr_mdev(struct vfio_ap_queue *q) +{ + hash_del(&q->mdev_qnode); +} + +static void vfio_ap_unlink_mdev_fr_queue(struct vfio_ap_queue *q) +{ + q->matrix_mdev = NULL; +} + +static void vfio_ap_mdev_unlink_fr_queues(struct ap_matrix_mdev *matrix_mdev) +{ + struct vfio_ap_queue *q; + unsigned long apid, apqi; + + for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) { + for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, + AP_DOMAINS) { + q = vfio_ap_mdev_get_queue(matrix_mdev, + AP_MKQID(apid, apqi)); + if (q) + q->matrix_mdev = NULL; + } + } +} + static void vfio_ap_mdev_remove(struct mdev_device *mdev) { struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(&mdev->dev); vfio_unregister_group_dev(&matrix_mdev->vdev); - mutex_lock(&matrix_dev->lock); - vfio_ap_mdev_reset_queues(matrix_mdev); + mutex_lock(&matrix_dev->guests_lock); + mutex_lock(&matrix_dev->mdevs_lock); + vfio_ap_mdev_reset_queues(&matrix_mdev->qtable); + vfio_ap_mdev_unlink_fr_queues(matrix_mdev); list_del(&matrix_mdev->node); - mutex_unlock(&matrix_dev->lock); + mutex_unlock(&matrix_dev->mdevs_lock); + mutex_unlock(&matrix_dev->guests_lock); vfio_uninit_group_dev(&matrix_mdev->vdev); kfree(matrix_mdev); atomic_inc(&matrix_dev->available_instances); @@ -547,141 +827,48 @@ static struct attribute_group *vfio_ap_mdev_type_groups[] = { NULL, }; -struct vfio_ap_queue_reserved { - unsigned long *apid; - unsigned long *apqi; - bool reserved; -}; +#define MDEV_SHARING_ERR "Userspace may not re-assign queue %02lx.%04lx " \ + "already assigned to %s" -/** - * vfio_ap_has_queue - determines if the AP queue containing the target in @data - * - * @dev: an AP queue device - * @data: a struct vfio_ap_queue_reserved reference - * - * Flags whether the AP queue device (@dev) has a queue ID containing the APQN, - * apid or apqi specified in @data: - * - * - If @data contains both an apid and apqi value, then @data will be flagged - * as reserved if the APID and APQI fields for the AP queue device matches - * - * - If @data contains only an apid value, @data will be flagged as - * reserved if the APID field in the AP queue device matches - * - * - If @data contains only an apqi value, @data will be flagged as - * reserved if the APQI field in the AP queue device matches - * - * Return: 0 to indicate the input to function succeeded. Returns -EINVAL if - * @data does not contain either an apid or apqi. - */ -static int vfio_ap_has_queue(struct device *dev, void *data) +static void vfio_ap_mdev_log_sharing_err(struct ap_matrix_mdev *matrix_mdev, + unsigned long *apm, + unsigned long *aqm) { - struct vfio_ap_queue_reserved *qres = data; - struct ap_queue *ap_queue = to_ap_queue(dev); - ap_qid_t qid; - unsigned long id; - - if (qres->apid && qres->apqi) { - qid = AP_MKQID(*qres->apid, *qres->apqi); - if (qid == ap_queue->qid) - qres->reserved = true; - } else if (qres->apid && !qres->apqi) { - id = AP_QID_CARD(ap_queue->qid); - if (id == *qres->apid) - qres->reserved = true; - } else if (!qres->apid && qres->apqi) { - id = AP_QID_QUEUE(ap_queue->qid); - if (id == *qres->apqi) - qres->reserved = true; - } else { - return -EINVAL; - } - - return 0; -} - -/** - * vfio_ap_verify_queue_reserved - verifies that the AP queue containing - * @apid or @aqpi is reserved - * - * @apid: an AP adapter ID - * @apqi: an AP queue index - * - * Verifies that the AP queue with @apid/@apqi is reserved by the VFIO AP device - * driver according to the following rules: - * - * - If both @apid and @apqi are not NULL, then there must be an AP queue - * device bound to the vfio_ap driver with the APQN identified by @apid and - * @apqi - * - * - If only @apid is not NULL, then there must be an AP queue device bound - * to the vfio_ap driver with an APQN containing @apid - * - * - If only @apqi is not NULL, then there must be an AP queue device bound - * to the vfio_ap driver with an APQN containing @apqi - * - * Return: 0 if the AP queue is reserved; otherwise, returns -EADDRNOTAVAIL. - */ -static int vfio_ap_verify_queue_reserved(unsigned long *apid, - unsigned long *apqi) -{ - int ret; - struct vfio_ap_queue_reserved qres; - - qres.apid = apid; - qres.apqi = apqi; - qres.reserved = false; - - ret = driver_for_each_device(&matrix_dev->vfio_ap_drv->driver, NULL, - &qres, vfio_ap_has_queue); - if (ret) - return ret; - - if (qres.reserved) - return 0; - - return -EADDRNOTAVAIL; -} - -static int -vfio_ap_mdev_verify_queues_reserved_for_apid(struct ap_matrix_mdev *matrix_mdev, - unsigned long apid) -{ - int ret; - unsigned long apqi; - unsigned long nbits = matrix_mdev->matrix.aqm_max + 1; - - if (find_first_bit_inv(matrix_mdev->matrix.aqm, nbits) >= nbits) - return vfio_ap_verify_queue_reserved(&apid, NULL); - - for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, nbits) { - ret = vfio_ap_verify_queue_reserved(&apid, &apqi); - if (ret) - return ret; - } + unsigned long apid, apqi; + const struct device *dev = mdev_dev(matrix_mdev->mdev); + const char *mdev_name = dev_name(dev); - return 0; + for_each_set_bit_inv(apid, apm, AP_DEVICES) + for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) + dev_warn(dev, MDEV_SHARING_ERR, apid, apqi, mdev_name); } /** - * vfio_ap_mdev_verify_no_sharing - verifies that the AP matrix is not configured + * vfio_ap_mdev_verify_no_sharing - verify APQNs are not shared by matrix mdevs * - * @matrix_mdev: the mediated matrix device + * @mdev_apm: mask indicating the APIDs of the APQNs to be verified + * @mdev_aqm: mask indicating the APQIs of the APQNs to be verified * - * Verifies that the APQNs derived from the cross product of the AP adapter IDs - * and AP queue indexes comprising the AP matrix are not configured for another + * Verifies that each APQN derived from the Cartesian product of a bitmap of + * AP adapter IDs and AP queue indexes is not configured for any matrix * mediated device. AP queue sharing is not allowed. * - * Return: 0 if the APQNs are not shared; otherwise returns -EADDRINUSE. + * Return: 0 if the APQNs are not shared; otherwise return -EADDRINUSE. */ -static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev) +static int vfio_ap_mdev_verify_no_sharing(unsigned long *mdev_apm, + unsigned long *mdev_aqm) { - struct ap_matrix_mdev *lstdev; + struct ap_matrix_mdev *matrix_mdev; DECLARE_BITMAP(apm, AP_DEVICES); DECLARE_BITMAP(aqm, AP_DOMAINS); - list_for_each_entry(lstdev, &matrix_dev->mdev_list, node) { - if (matrix_mdev == lstdev) + list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { + /* + * If the input apm and aqm are fields of the matrix_mdev + * object, then move on to the next matrix_mdev. + */ + if (mdev_apm == matrix_mdev->matrix.apm && + mdev_aqm == matrix_mdev->matrix.aqm) continue; memset(apm, 0, sizeof(apm)); @@ -691,14 +878,16 @@ static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev) * We work on full longs, as we can only exclude the leftover * bits in non-inverse order. The leftover is all zeros. */ - if (!bitmap_and(apm, matrix_mdev->matrix.apm, - lstdev->matrix.apm, AP_DEVICES)) + if (!bitmap_and(apm, mdev_apm, matrix_mdev->matrix.apm, + AP_DEVICES)) continue; - if (!bitmap_and(aqm, matrix_mdev->matrix.aqm, - lstdev->matrix.aqm, AP_DOMAINS)) + if (!bitmap_and(aqm, mdev_aqm, matrix_mdev->matrix.aqm, + AP_DOMAINS)) continue; + vfio_ap_mdev_log_sharing_err(matrix_mdev, apm, aqm); + return -EADDRINUSE; } @@ -706,6 +895,41 @@ static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev) } /** + * vfio_ap_mdev_validate_masks - verify that the APQNs assigned to the mdev are + * not reserved for the default zcrypt driver and + * are not assigned to another mdev. + * + * @matrix_mdev: the mdev to which the APQNs being validated are assigned. + * + * Return: One of the following values: + * o the error returned from the ap_apqn_in_matrix_owned_by_def_drv() function, + * most likely -EBUSY indicating the ap_perms_mutex lock is already held. + * o EADDRNOTAVAIL if an APQN assigned to @matrix_mdev is reserved for the + * zcrypt default driver. + * o EADDRINUSE if an APQN assigned to @matrix_mdev is assigned to another mdev + * o A zero indicating validation succeeded. + */ +static int vfio_ap_mdev_validate_masks(struct ap_matrix_mdev *matrix_mdev) +{ + if (ap_apqn_in_matrix_owned_by_def_drv(matrix_mdev->matrix.apm, + matrix_mdev->matrix.aqm)) + return -EADDRNOTAVAIL; + + return vfio_ap_mdev_verify_no_sharing(matrix_mdev->matrix.apm, + matrix_mdev->matrix.aqm); +} + +static void vfio_ap_mdev_link_adapter(struct ap_matrix_mdev *matrix_mdev, + unsigned long apid) +{ + unsigned long apqi; + + for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, AP_DOMAINS) + vfio_ap_mdev_link_apqn(matrix_mdev, + AP_MKQID(apid, apqi)); +} + +/** * assign_adapter_store - parses the APID from @buf and sets the * corresponding bit in the mediated matrix device's APM * @@ -734,6 +958,10 @@ static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev) * An APQN derived from the cross product of the APID being assigned * and the APQIs previously assigned is being used by another mediated * matrix device + * + * 5. -EAGAIN + * A lock required to validate the mdev's AP configuration could not + * be obtained. */ static ssize_t assign_adapter_store(struct device *dev, struct device_attribute *attr, @@ -741,15 +969,11 @@ static ssize_t assign_adapter_store(struct device *dev, { int ret; unsigned long apid; + DECLARE_BITMAP(apm_delta, AP_DEVICES); struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); - mutex_lock(&matrix_dev->lock); - - /* If the KVM guest is running, disallow assignment of adapter */ - if (matrix_mdev->kvm) { - ret = -EBUSY; - goto done; - } + mutex_lock(&ap_perms_mutex); + get_update_locks_for_mdev(matrix_mdev); ret = kstrtoul(buf, 0, &apid); if (ret) @@ -760,33 +984,97 @@ static ssize_t assign_adapter_store(struct device *dev, goto done; } - /* - * Set the bit in the AP mask (APM) corresponding to the AP adapter - * number (APID). The bits in the mask, from most significant to least - * significant bit, correspond to APIDs 0-255. - */ - ret = vfio_ap_mdev_verify_queues_reserved_for_apid(matrix_mdev, apid); - if (ret) + set_bit_inv(apid, matrix_mdev->matrix.apm); + + ret = vfio_ap_mdev_validate_masks(matrix_mdev); + if (ret) { + clear_bit_inv(apid, matrix_mdev->matrix.apm); goto done; + } - set_bit_inv(apid, matrix_mdev->matrix.apm); + vfio_ap_mdev_link_adapter(matrix_mdev, apid); + memset(apm_delta, 0, sizeof(apm_delta)); + set_bit_inv(apid, apm_delta); - ret = vfio_ap_mdev_verify_no_sharing(matrix_mdev); - if (ret) - goto share_err; + if (vfio_ap_mdev_filter_matrix(apm_delta, + matrix_mdev->matrix.aqm, matrix_mdev)) + vfio_ap_mdev_update_guest_apcb(matrix_mdev); ret = count; - goto done; - -share_err: - clear_bit_inv(apid, matrix_mdev->matrix.apm); done: - mutex_unlock(&matrix_dev->lock); + release_update_locks_for_mdev(matrix_mdev); + mutex_unlock(&ap_perms_mutex); return ret; } static DEVICE_ATTR_WO(assign_adapter); +static struct vfio_ap_queue +*vfio_ap_unlink_apqn_fr_mdev(struct ap_matrix_mdev *matrix_mdev, + unsigned long apid, unsigned long apqi) +{ + struct vfio_ap_queue *q = NULL; + + q = vfio_ap_mdev_get_queue(matrix_mdev, AP_MKQID(apid, apqi)); + /* If the queue is assigned to the matrix mdev, unlink it. */ + if (q) + vfio_ap_unlink_queue_fr_mdev(q); + + return q; +} + +/** + * vfio_ap_mdev_unlink_adapter - unlink all queues associated with unassigned + * adapter from the matrix mdev to which the + * adapter was assigned. + * @matrix_mdev: the matrix mediated device to which the adapter was assigned. + * @apid: the APID of the unassigned adapter. + * @qtable: table for storing queues associated with unassigned adapter. + */ +static void vfio_ap_mdev_unlink_adapter(struct ap_matrix_mdev *matrix_mdev, + unsigned long apid, + struct ap_queue_table *qtable) +{ + unsigned long apqi; + struct vfio_ap_queue *q; + + for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, AP_DOMAINS) { + q = vfio_ap_unlink_apqn_fr_mdev(matrix_mdev, apid, apqi); + + if (q && qtable) { + if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) && + test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) + hash_add(qtable->queues, &q->mdev_qnode, + q->apqn); + } + } +} + +static void vfio_ap_mdev_hot_unplug_adapter(struct ap_matrix_mdev *matrix_mdev, + unsigned long apid) +{ + int loop_cursor; + struct vfio_ap_queue *q; + struct ap_queue_table *qtable = kzalloc(sizeof(*qtable), GFP_KERNEL); + + hash_init(qtable->queues); + vfio_ap_mdev_unlink_adapter(matrix_mdev, apid, qtable); + + if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm)) { + clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm); + vfio_ap_mdev_update_guest_apcb(matrix_mdev); + } + + vfio_ap_mdev_reset_queues(qtable); + + hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) { + vfio_ap_unlink_mdev_fr_queue(q); + hash_del(&q->mdev_qnode); + } + + kfree(qtable); +} + /** * unassign_adapter_store - parses the APID from @buf and clears the * corresponding bit in the mediated matrix device's APM @@ -810,13 +1098,7 @@ static ssize_t unassign_adapter_store(struct device *dev, unsigned long apid; struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); - mutex_lock(&matrix_dev->lock); - - /* If the KVM guest is running, disallow unassignment of adapter */ - if (matrix_mdev->kvm) { - ret = -EBUSY; - goto done; - } + get_update_locks_for_mdev(matrix_mdev); ret = kstrtoul(buf, 0, &apid); if (ret) @@ -828,31 +1110,22 @@ static ssize_t unassign_adapter_store(struct device *dev, } clear_bit_inv((unsigned long)apid, matrix_mdev->matrix.apm); + vfio_ap_mdev_hot_unplug_adapter(matrix_mdev, apid); ret = count; done: - mutex_unlock(&matrix_dev->lock); + release_update_locks_for_mdev(matrix_mdev); return ret; } static DEVICE_ATTR_WO(unassign_adapter); -static int -vfio_ap_mdev_verify_queues_reserved_for_apqi(struct ap_matrix_mdev *matrix_mdev, - unsigned long apqi) +static void vfio_ap_mdev_link_domain(struct ap_matrix_mdev *matrix_mdev, + unsigned long apqi) { - int ret; unsigned long apid; - unsigned long nbits = matrix_mdev->matrix.apm_max + 1; - - if (find_first_bit_inv(matrix_mdev->matrix.apm, nbits) >= nbits) - return vfio_ap_verify_queue_reserved(NULL, &apqi); - for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, nbits) { - ret = vfio_ap_verify_queue_reserved(&apid, &apqi); - if (ret) - return ret; - } - - return 0; + for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) + vfio_ap_mdev_link_apqn(matrix_mdev, + AP_MKQID(apid, apqi)); } /** @@ -884,6 +1157,10 @@ vfio_ap_mdev_verify_queues_reserved_for_apqi(struct ap_matrix_mdev *matrix_mdev, * An APQN derived from the cross product of the APQI being assigned * and the APIDs previously assigned is being used by another mediated * matrix device + * + * 5. -EAGAIN + * The lock required to validate the mdev's AP configuration could not + * be obtained. */ static ssize_t assign_domain_store(struct device *dev, struct device_attribute *attr, @@ -891,47 +1168,89 @@ static ssize_t assign_domain_store(struct device *dev, { int ret; unsigned long apqi; + DECLARE_BITMAP(aqm_delta, AP_DOMAINS); struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); - unsigned long max_apqi = matrix_mdev->matrix.aqm_max; - mutex_lock(&matrix_dev->lock); - - /* If the KVM guest is running, disallow assignment of domain */ - if (matrix_mdev->kvm) { - ret = -EBUSY; - goto done; - } + mutex_lock(&ap_perms_mutex); + get_update_locks_for_mdev(matrix_mdev); ret = kstrtoul(buf, 0, &apqi); if (ret) goto done; - if (apqi > max_apqi) { + + if (apqi > matrix_mdev->matrix.aqm_max) { ret = -ENODEV; goto done; } - ret = vfio_ap_mdev_verify_queues_reserved_for_apqi(matrix_mdev, apqi); - if (ret) + set_bit_inv(apqi, matrix_mdev->matrix.aqm); + + ret = vfio_ap_mdev_validate_masks(matrix_mdev); + if (ret) { + clear_bit_inv(apqi, matrix_mdev->matrix.aqm); goto done; + } - set_bit_inv(apqi, matrix_mdev->matrix.aqm); + vfio_ap_mdev_link_domain(matrix_mdev, apqi); + memset(aqm_delta, 0, sizeof(aqm_delta)); + set_bit_inv(apqi, aqm_delta); - ret = vfio_ap_mdev_verify_no_sharing(matrix_mdev); - if (ret) - goto share_err; + if (vfio_ap_mdev_filter_matrix(matrix_mdev->matrix.apm, aqm_delta, + matrix_mdev)) + vfio_ap_mdev_update_guest_apcb(matrix_mdev); ret = count; - goto done; - -share_err: - clear_bit_inv(apqi, matrix_mdev->matrix.aqm); done: - mutex_unlock(&matrix_dev->lock); + release_update_locks_for_mdev(matrix_mdev); + mutex_unlock(&ap_perms_mutex); return ret; } static DEVICE_ATTR_WO(assign_domain); +static void vfio_ap_mdev_unlink_domain(struct ap_matrix_mdev *matrix_mdev, + unsigned long apqi, + struct ap_queue_table *qtable) +{ + unsigned long apid; + struct vfio_ap_queue *q; + + for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) { + q = vfio_ap_unlink_apqn_fr_mdev(matrix_mdev, apid, apqi); + + if (q && qtable) { + if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) && + test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) + hash_add(qtable->queues, &q->mdev_qnode, + q->apqn); + } + } +} + +static void vfio_ap_mdev_hot_unplug_domain(struct ap_matrix_mdev *matrix_mdev, + unsigned long apqi) +{ + int loop_cursor; + struct vfio_ap_queue *q; + struct ap_queue_table *qtable = kzalloc(sizeof(*qtable), GFP_KERNEL); + + hash_init(qtable->queues); + vfio_ap_mdev_unlink_domain(matrix_mdev, apqi, qtable); + + if (test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) { + clear_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm); + vfio_ap_mdev_update_guest_apcb(matrix_mdev); + } + + vfio_ap_mdev_reset_queues(qtable); + + hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) { + vfio_ap_unlink_mdev_fr_queue(q); + hash_del(&q->mdev_qnode); + } + + kfree(qtable); +} /** * unassign_domain_store - parses the APQI from @buf and clears the @@ -956,13 +1275,7 @@ static ssize_t unassign_domain_store(struct device *dev, unsigned long apqi; struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); - mutex_lock(&matrix_dev->lock); - - /* If the KVM guest is running, disallow unassignment of domain */ - if (matrix_mdev->kvm) { - ret = -EBUSY; - goto done; - } + get_update_locks_for_mdev(matrix_mdev); ret = kstrtoul(buf, 0, &apqi); if (ret) @@ -974,10 +1287,11 @@ static ssize_t unassign_domain_store(struct device *dev, } clear_bit_inv((unsigned long)apqi, matrix_mdev->matrix.aqm); + vfio_ap_mdev_hot_unplug_domain(matrix_mdev, apqi); ret = count; done: - mutex_unlock(&matrix_dev->lock); + release_update_locks_for_mdev(matrix_mdev); return ret; } static DEVICE_ATTR_WO(unassign_domain); @@ -1004,13 +1318,7 @@ static ssize_t assign_control_domain_store(struct device *dev, unsigned long id; struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); - mutex_lock(&matrix_dev->lock); - - /* If the KVM guest is running, disallow assignment of control domain */ - if (matrix_mdev->kvm) { - ret = -EBUSY; - goto done; - } + get_update_locks_for_mdev(matrix_mdev); ret = kstrtoul(buf, 0, &id); if (ret) @@ -1027,9 +1335,12 @@ static ssize_t assign_control_domain_store(struct device *dev, * number of control domains that can be assigned. */ set_bit_inv(id, matrix_mdev->matrix.adm); + if (vfio_ap_mdev_filter_cdoms(matrix_mdev)) + vfio_ap_mdev_update_guest_apcb(matrix_mdev); + ret = count; done: - mutex_unlock(&matrix_dev->lock); + release_update_locks_for_mdev(matrix_mdev); return ret; } static DEVICE_ATTR_WO(assign_control_domain); @@ -1055,28 +1366,28 @@ static ssize_t unassign_control_domain_store(struct device *dev, int ret; unsigned long domid; struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); - unsigned long max_domid = matrix_mdev->matrix.adm_max; - mutex_lock(&matrix_dev->lock); - - /* If a KVM guest is running, disallow unassignment of control domain */ - if (matrix_mdev->kvm) { - ret = -EBUSY; - goto done; - } + get_update_locks_for_mdev(matrix_mdev); ret = kstrtoul(buf, 0, &domid); if (ret) goto done; - if (domid > max_domid) { + + if (domid > matrix_mdev->matrix.adm_max) { ret = -ENODEV; goto done; } |