[RFT][PATCH v2 6/9] vfio/ccw: Change pa_pfn list to pa_iova list
Eric Farman
farman at linux.ibm.com
Fri Jul 8 20:26:23 UTC 2022
On Tue, 2022-07-05 at 23:27 -0700, Nicolin Chen wrote:
> The vfio_ccw_cp code maintains both iova and its PFN list because the
> vfio_pin/unpin_pages API wanted pfn list. Since
> vfio_pin/unpin_pages()
> now accept "iova", change to maintain only pa_iova list and rename
> all
> "pfn_array" strings to "page_array", so as to simplify the code.
>
> Signed-off-by: Nicolin Chen <nicolinc at nvidia.com>
Reviewed-by: Eric Farman <farman at linux.ibm.com>
> ---
> drivers/s390/cio/vfio_ccw_cp.c | 135 ++++++++++++++++---------------
> --
> 1 file changed, 64 insertions(+), 71 deletions(-)
>
> diff --git a/drivers/s390/cio/vfio_ccw_cp.c
> b/drivers/s390/cio/vfio_ccw_cp.c
> index a739262f988d..3854c3d573f5 100644
> --- a/drivers/s390/cio/vfio_ccw_cp.c
> +++ b/drivers/s390/cio/vfio_ccw_cp.c
> @@ -18,11 +18,9 @@
> #include "vfio_ccw_cp.h"
> #include "vfio_ccw_private.h"
>
> -struct pfn_array {
> - /* Starting guest physical I/O address. */
> - unsigned long pa_iova;
> - /* Array that stores PFNs of the pages need to pin. */
> - unsigned long *pa_iova_pfn;
> +struct page_array {
> + /* Array that stores pages need to pin. */
> + dma_addr_t *pa_iova;
> /* Array that receives PFNs of the pages pinned. */
> unsigned long *pa_pfn;
> /* Number of pages pinned from @pa_iova. */
> @@ -37,53 +35,50 @@ struct ccwchain {
> /* Count of the valid ccws in chain. */
> int ch_len;
> /* Pinned PAGEs for the original data. */
> - struct pfn_array *ch_pa;
> + struct page_array *ch_pa;
> };
>
> /*
> - * pfn_array_alloc() - alloc memory for PFNs
> - * @pa: pfn_array on which to perform the operation
> + * page_array_alloc() - alloc memory for page array
> + * @pa: page_array on which to perform the operation
> * @iova: target guest physical address
> * @len: number of bytes that should be pinned from @iova
> *
> - * Attempt to allocate memory for PFNs.
> + * Attempt to allocate memory for page array.
> *
> - * Usage of pfn_array:
> - * We expect (pa_nr == 0) and (pa_iova_pfn == NULL), any field in
> + * Usage of page_array:
> + * We expect (pa_nr == 0) and (pa_iova == NULL), any field in
> * this structure will be filled in by this function.
> *
> * Returns:
> - * 0 if PFNs are allocated
> - * -EINVAL if pa->pa_nr is not initially zero, or pa->pa_iova_pfn
> is not NULL
> + * 0 if page array is allocated
> + * -EINVAL if pa->pa_nr is not initially zero, or pa->pa_iova is
> not NULL
> * -ENOMEM if alloc failed
> */
> -static int pfn_array_alloc(struct pfn_array *pa, u64 iova, unsigned
> int len)
> +static int page_array_alloc(struct page_array *pa, u64 iova,
> unsigned int len)
> {
> int i;
>
> - if (pa->pa_nr || pa->pa_iova_pfn)
> + if (pa->pa_nr || pa->pa_iova)
> return -EINVAL;
>
> - pa->pa_iova = iova;
> -
> pa->pa_nr = ((iova & ~PAGE_MASK) + len + (PAGE_SIZE - 1)) >>
> PAGE_SHIFT;
> if (!pa->pa_nr)
> return -EINVAL;
>
> - pa->pa_iova_pfn = kcalloc(pa->pa_nr,
> - sizeof(*pa->pa_iova_pfn) +
> - sizeof(*pa->pa_pfn),
> - GFP_KERNEL);
> - if (unlikely(!pa->pa_iova_pfn)) {
> + pa->pa_iova = kcalloc(pa->pa_nr,
> + sizeof(*pa->pa_iova) + sizeof(*pa-
> >pa_pfn),
> + GFP_KERNEL);
> + if (unlikely(!pa->pa_iova)) {
> pa->pa_nr = 0;
> return -ENOMEM;
> }
> - pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr;
> + pa->pa_pfn = (unsigned long *)&pa->pa_iova[pa->pa_nr];
>
> - pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT;
> + pa->pa_iova[0] = iova;
> pa->pa_pfn[0] = -1ULL;
> for (i = 1; i < pa->pa_nr; i++) {
> - pa->pa_iova_pfn[i] = pa->pa_iova_pfn[i - 1] + 1;
> + pa->pa_iova[i] = pa->pa_iova[i - 1] + PAGE_SIZE;
> pa->pa_pfn[i] = -1ULL;
> }
>
> @@ -91,30 +86,30 @@ static int pfn_array_alloc(struct pfn_array *pa,
> u64 iova, unsigned int len)
> }
>
> /*
> - * pfn_array_unpin() - Unpin user pages in memory
> - * @pa: pfn_array on which to perform the operation
> + * page_array_unpin() - Unpin user pages in memory
> + * @pa: page_array on which to perform the operation
> * @vdev: the vfio device to perform the operation
> * @pa_nr: number of user pages to unpin
> *
> * Only unpin if any pages were pinned to begin with, i.e. pa_nr >
> 0,
> * otherwise only clear pa->pa_nr
> */
> -static void pfn_array_unpin(struct pfn_array *pa,
> - struct vfio_device *vdev, int pa_nr)
> +static void page_array_unpin(struct page_array *pa,
> + struct vfio_device *vdev, int pa_nr)
> {
> int unpinned = 0, npage = 1;
>
> while (unpinned < pa_nr) {
> - unsigned long *first = &pa->pa_iova_pfn[unpinned];
> - unsigned long *last = &first[npage];
> + dma_addr_t *first = &pa->pa_iova[unpinned];
> + dma_addr_t *last = &first[npage];
>
> if (unpinned + npage < pa_nr &&
> - *first + npage == *last) {
> + *first + npage * PAGE_SIZE == *last) {
> npage++;
> continue;
> }
>
> - vfio_unpin_pages(vdev, *first << PAGE_SHIFT, npage);
> + vfio_unpin_pages(vdev, *first, npage);
> unpinned += npage;
> npage = 1;
> }
> @@ -123,30 +118,30 @@ static void pfn_array_unpin(struct pfn_array
> *pa,
> }
>
> /*
> - * pfn_array_pin() - Pin user pages in memory
> - * @pa: pfn_array on which to perform the operation
> + * page_array_pin() - Pin user pages in memory
> + * @pa: page_array on which to perform the operation
> * @mdev: the mediated device to perform pin operations
> *
> * Returns number of pages pinned upon success.
> * If the pin request partially succeeds, or fails completely,
> * all pages are left unpinned and a negative error value is
> returned.
> */
> -static int pfn_array_pin(struct pfn_array *pa, struct vfio_device
> *vdev)
> +static int page_array_pin(struct page_array *pa, struct vfio_device
> *vdev)
> {
> int pinned = 0, npage = 1;
> int ret = 0;
>
> while (pinned < pa->pa_nr) {
> - unsigned long *first = &pa->pa_iova_pfn[pinned];
> - unsigned long *last = &first[npage];
> + dma_addr_t *first = &pa->pa_iova[pinned];
> + dma_addr_t *last = &first[npage];
>
> if (pinned + npage < pa->pa_nr &&
> - *first + npage == *last) {
> + *first + npage * PAGE_SIZE == *last) {
> npage++;
> continue;
> }
>
> - ret = vfio_pin_pages(vdev, *first << PAGE_SHIFT, npage,
> + ret = vfio_pin_pages(vdev, *first, npage,
> IOMMU_READ | IOMMU_WRITE,
> &pa->pa_pfn[pinned]);
> if (ret < 0) {
> @@ -163,32 +158,30 @@ static int pfn_array_pin(struct pfn_array *pa,
> struct vfio_device *vdev)
> return ret;
>
> err_out:
> - pfn_array_unpin(pa, vdev, pinned);
> + page_array_unpin(pa, vdev, pinned);
> return ret;
> }
>
> /* Unpin the pages before releasing the memory. */
> -static void pfn_array_unpin_free(struct pfn_array *pa, struct
> vfio_device *vdev)
> +static void page_array_unpin_free(struct page_array *pa, struct
> vfio_device *vdev)
> {
> - pfn_array_unpin(pa, vdev, pa->pa_nr);
> - kfree(pa->pa_iova_pfn);
> + page_array_unpin(pa, vdev, pa->pa_nr);
> + kfree(pa->pa_iova);
> }
>
> -static bool pfn_array_iova_pinned(struct pfn_array *pa, unsigned
> long iova)
> +static bool page_array_iova_pinned(struct page_array *pa, unsigned
> long iova)
> {
> - unsigned long iova_pfn = iova >> PAGE_SHIFT;
> int i;
>
> for (i = 0; i < pa->pa_nr; i++)
> - if (pa->pa_iova_pfn[i] == iova_pfn)
> + if (pa->pa_iova[i] == iova)
> return true;
>
> return false;
> }
> -/* Create the list of IDAL words for a pfn_array. */
> -static inline void pfn_array_idal_create_words(
> - struct pfn_array *pa,
> - unsigned long *idaws)
> +/* Create the list of IDAL words for a page_array. */
> +static inline void page_array_idal_create_words(struct page_array
> *pa,
> + unsigned long *idaws)
> {
> int i;
>
> @@ -204,7 +197,7 @@ static inline void pfn_array_idal_create_words(
> idaws[i] = pa->pa_pfn[i] << PAGE_SHIFT;
>
> /* Adjust the first IDAW, since it may not start on a page
> boundary */
> - idaws[0] += pa->pa_iova & (PAGE_SIZE - 1);
> + idaws[0] += pa->pa_iova[0] & (PAGE_SIZE - 1);
> }
>
> static void convert_ccw0_to_ccw1(struct ccw1 *source, unsigned long
> len)
> @@ -236,18 +229,18 @@ static void convert_ccw0_to_ccw1(struct ccw1
> *source, unsigned long len)
> static long copy_from_iova(struct vfio_device *vdev, void *to, u64
> iova,
> unsigned long n)
> {
> - struct pfn_array pa = {0};
> + struct page_array pa = {0};
> u64 from;
> int i, ret;
> unsigned long l, m;
>
> - ret = pfn_array_alloc(&pa, iova, n);
> + ret = page_array_alloc(&pa, iova, n);
> if (ret < 0)
> return ret;
>
> - ret = pfn_array_pin(&pa, vdev);
> + ret = page_array_pin(&pa, vdev);
> if (ret < 0) {
> - pfn_array_unpin_free(&pa, vdev);
> + page_array_unpin_free(&pa, vdev);
> return ret;
> }
>
> @@ -268,7 +261,7 @@ static long copy_from_iova(struct vfio_device
> *vdev, void *to, u64 iova,
> break;
> }
>
> - pfn_array_unpin_free(&pa, vdev);
> + page_array_unpin_free(&pa, vdev);
>
> return l;
> }
> @@ -371,7 +364,7 @@ static struct ccwchain *ccwchain_alloc(struct
> channel_program *cp, int len)
> chain->ch_ccw = (struct ccw1 *)data;
>
> data = (u8 *)(chain->ch_ccw) + sizeof(*chain->ch_ccw) * len;
> - chain->ch_pa = (struct pfn_array *)data;
> + chain->ch_pa = (struct page_array *)data;
>
> chain->ch_len = len;
>
> @@ -555,7 +548,7 @@ static int ccwchain_fetch_direct(struct ccwchain
> *chain,
> struct vfio_device *vdev =
> &container_of(cp, struct vfio_ccw_private, cp)->vdev;
> struct ccw1 *ccw;
> - struct pfn_array *pa;
> + struct page_array *pa;
> u64 iova;
> unsigned long *idaws;
> int ret;
> @@ -589,13 +582,13 @@ static int ccwchain_fetch_direct(struct
> ccwchain *chain,
> }
>
> /*
> - * Allocate an array of pfn's for pages to pin/translate.
> + * Allocate an array of pages to pin/translate.
> * The number of pages is actually the count of the idaws
> * required for the data transfer, since we only only support
> * 4K IDAWs today.
> */
> pa = chain->ch_pa + idx;
> - ret = pfn_array_alloc(pa, iova, bytes);
> + ret = page_array_alloc(pa, iova, bytes);
> if (ret < 0)
> goto out_free_idaws;
>
> @@ -606,21 +599,21 @@ static int ccwchain_fetch_direct(struct
> ccwchain *chain,
> goto out_unpin;
>
> /*
> - * Copy guest IDAWs into pfn_array, in case the memory
> they
> + * Copy guest IDAWs into page_array, in case the memory
> they
> * occupy is not contiguous.
> */
> for (i = 0; i < idaw_nr; i++)
> - pa->pa_iova_pfn[i] = idaws[i] >> PAGE_SHIFT;
> + pa->pa_iova[i] = idaws[i];
> } else {
> /*
> - * No action is required here; the iova addresses in
> pfn_array
> - * were initialized sequentially in pfn_array_alloc()
> beginning
> + * No action is required here; the iova addresses in
> page_array
> + * were initialized sequentially in page_array_alloc()
> beginning
> * with the contents of ccw->cda.
> */
> }
>
> if (ccw_does_data_transfer(ccw)) {
> - ret = pfn_array_pin(pa, vdev);
> + ret = page_array_pin(pa, vdev);
> if (ret < 0)
> goto out_unpin;
> } else {
> @@ -630,13 +623,13 @@ static int ccwchain_fetch_direct(struct
> ccwchain *chain,
> ccw->cda = (__u32) virt_to_phys(idaws);
> ccw->flags |= CCW_FLAG_IDA;
>
> - /* Populate the IDAL with pinned/translated addresses from pfn
> */
> - pfn_array_idal_create_words(pa, idaws);
> + /* Populate the IDAL with pinned/translated addresses from page
> */
> + page_array_idal_create_words(pa, idaws);
>
> return 0;
>
> out_unpin:
> - pfn_array_unpin_free(pa, vdev);
> + page_array_unpin_free(pa, vdev);
> out_free_idaws:
> kfree(idaws);
> out_init:
> @@ -742,7 +735,7 @@ void cp_free(struct channel_program *cp)
> cp->initialized = false;
> list_for_each_entry_safe(chain, temp, &cp->ccwchain_list, next)
> {
> for (i = 0; i < chain->ch_len; i++) {
> - pfn_array_unpin_free(chain->ch_pa + i, vdev);
> + page_array_unpin_free(chain->ch_pa + i, vdev);
> ccwchain_cda_free(chain, i);
> }
> ccwchain_free(chain);
> @@ -918,7 +911,7 @@ bool cp_iova_pinned(struct channel_program *cp,
> u64 iova)
>
> list_for_each_entry(chain, &cp->ccwchain_list, next) {
> for (i = 0; i < chain->ch_len; i++)
> - if (pfn_array_iova_pinned(chain->ch_pa + i,
> iova))
> + if (page_array_iova_pinned(chain->ch_pa + i,
> iova))
> return true;
> }
>
More information about the dri-devel
mailing list