[PATCH net-next v22 03/13] netdev: support binding dma-buf to netdevice
Jakub Kicinski
kuba at kernel.org
Wed Aug 28 02:08:05 UTC 2024
On Sun, 25 Aug 2024 04:15:01 +0000 Mina Almasry wrote:
> +u32 dev_get_min_mp_channel_count(const struct net_device *dev)
> +{
> + u32 i, max = 0;
> +
> + ASSERT_RTNL();
> +
> + for (i = 0; i < dev->real_num_rx_queues; i++)
> + if (dev->_rx[i].mp_params.mp_priv)
> + /* The channel count is the idx plus 1. */
> + max = i + 1;
invert the loop so you're walking from highest indexes and you can
return i + 1;
return 0;
> + return max;
> +}
> +
> /**
> * dev_index_reserve() - allocate an ifindex in a namespace
> * @net: the applicable net namespace
> diff --git a/net/core/devmem.c b/net/core/devmem.c
> +#include <linux/types.h>
> +#include <linux/mm.h>
> +#include <linux/netdevice.h>
> +#include <trace/events/page_pool.h>
> +#include <net/netdev_rx_queue.h>
> +#include <net/page_pool/types.h>
> +#include <net/page_pool/helpers.h>
> +#include <linux/genalloc.h>
> +#include <linux/dma-buf.h>
> +#include <net/devmem.h>
> +#include <net/netdev_queues.h>
Please sort include files alphabetically.
> +#if defined(CONFIG_DMA_SHARED_BUFFER) && defined(CONFIG_GENERIC_ALLOCATOR)
Could you create a hidden Kconfig for this feature and use it to make
building this entire file conditional? Hidden Kconfig has no
description and no help, like config NET_DEVLINK, but it can have
dependencies.
> +void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
> +{
> + struct netdev_rx_queue *rxq;
> + unsigned long xa_idx;
> + unsigned int rxq_idx;
> +
> + if (binding->list.next)
> + list_del(&binding->list);
> +
> + xa_for_each(&binding->bound_rxqs, xa_idx, rxq) {
> + if (rxq->mp_params.mp_priv == binding) {
WARN_ON(rxq->mp_params.mp_priv != binding) ?
We know we're bound to this queue, nobody should be able to replace
the mp, right?
> + rxq->mp_params.mp_priv = NULL;
> +
> + rxq_idx = get_netdev_rx_queue_index(rxq);
> +
> + WARN_ON(netdev_rx_queue_restart(binding->dev, rxq_idx));
> + }
> + }
> +
> + xa_erase(&net_devmem_dmabuf_bindings, binding->id);
> +
> + net_devmem_dmabuf_binding_put(binding);
> +}
> +
> +int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
> + struct net_devmem_dmabuf_binding *binding)
> +{
> + struct netdev_rx_queue *rxq;
> + u32 xa_idx;
> + int err;
> +
> + if (rxq_idx >= dev->real_num_rx_queues)
> + return -ERANGE;
> +
> + rxq = __netif_get_rx_queue(dev, rxq_idx);
> + if (rxq->mp_params.mp_priv)
> + return -EEXIST;
> +
> +#ifdef CONFIG_XDP_SOCKETS
> + if (rxq->pool)
> + return -EEXIST;
EBUSY plus extack "designated queue already in use by AF_XDP"
> +#endif
> +
> + if (dev_xdp_prog_count(dev))
> + return -EEXIST;
Also needs an extack, but since it's not queue-specific should
it not live inside net_devmem_bind_dmabuf() ? Or do you anticipate
reuse of this function by non-dmabuf code?
> +void dev_dmabuf_uninstall(struct net_device *dev)
> +{
> + struct net_devmem_dmabuf_binding *binding;
> + struct netdev_rx_queue *rxq;
> + unsigned long xa_idx;
> + unsigned int i;
> +
> + for (i = 0; i < dev->real_num_rx_queues; i++) {
> + binding = dev->_rx[i].mp_params.mp_priv;
> + if (!binding)
> + continue;
> +
> + xa_for_each(&binding->bound_rxqs, xa_idx, rxq)
> + if (rxq == &dev->_rx[i])
> + xa_erase(&binding->bound_rxqs, xa_idx);
break;
I don't think we can store the same queue twice
> + }
> +}
> +#endif
> diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c
> index 2d726e65211d..269faa37f84e 100644
> --- a/net/core/netdev-genl.c
> +++ b/net/core/netdev-genl.c
> @@ -10,6 +10,7 @@
> #include <net/netdev_rx_queue.h>
> #include <net/netdev_queues.h>
> #include <net/busy_poll.h>
> +#include <net/devmem.h>
include order
> + return genlmsg_reply(rsp, info);
Should we goto err_unbind if genlmsg_reply() fails?
Shouldn't really happen unless socket is full but simple enough to fix.
More information about the dri-devel
mailing list