[Intel-gfx] [PATCH 1/5] drm/i915: Add ability for tracking buffer objects per client
Iddamsetty, Aravind
aravind.iddamsetty at intel.com
Tue Jul 11 07:48:00 UTC 2023
On 10-07-2023 18:50, Tvrtko Ursulin wrote:
>
> On 10/07/2023 11:44, Iddamsetty, Aravind wrote:
>> On 07-07-2023 18:32, Tvrtko Ursulin wrote:
>>> From: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
>>>
>>> In order to show per client memory usage lets add some infrastructure
>>> which enables tracking buffer objects owned by clients.
>>>
>>> We add a per client list protected by a new per client lock and to
>>> support
>>> delayed destruction (post client exit) we make tracked objects hold
>>> references to the owning client.
>>>
>>> Also, object memory region teardown is moved to the existing RCU free
>>> callback to allow safe dereference from the fdinfo RCU read section.
>>>
>>> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
>>> ---
>>> drivers/gpu/drm/i915/gem/i915_gem_object.c | 13 +++++--
>>> .../gpu/drm/i915/gem/i915_gem_object_types.h | 12 +++++++
>>> drivers/gpu/drm/i915/i915_drm_client.c | 36 +++++++++++++++++++
>>> drivers/gpu/drm/i915/i915_drm_client.h | 32 +++++++++++++++++
>>> 4 files changed, 90 insertions(+), 3 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c
>>> b/drivers/gpu/drm/i915/gem/i915_gem_object.c
>>> index 97ac6fb37958..3dc4fbb67d2b 100644
>>> --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
>>> +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
>>> @@ -105,6 +105,10 @@ void i915_gem_object_init(struct
>>> drm_i915_gem_object *obj,
>>> INIT_LIST_HEAD(&obj->mm.link);
>>> +#ifdef CONFIG_PROC_FS
>>> + INIT_LIST_HEAD(&obj->client_link);
>>> +#endif
>>> +
>>> INIT_LIST_HEAD(&obj->lut_list);
>>> spin_lock_init(&obj->lut_lock);
>>> @@ -292,6 +296,10 @@ void __i915_gem_free_object_rcu(struct
>>> rcu_head *head)
>>> container_of(head, typeof(*obj), rcu);
>>> struct drm_i915_private *i915 = to_i915(obj->base.dev);
>>> + /* We need to keep this alive for RCU read access from fdinfo. */
>>> + if (obj->mm.n_placements > 1)
>>> + kfree(obj->mm.placements);
>>> +
>>> i915_gem_object_free(obj);
>>> GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
>>> @@ -388,9 +396,6 @@ void __i915_gem_free_object(struct
>>> drm_i915_gem_object *obj)
>>> if (obj->ops->release)
>>> obj->ops->release(obj);
>>> - if (obj->mm.n_placements > 1)
>>> - kfree(obj->mm.placements);
>>> -
>>> if (obj->shares_resv_from)
>>> i915_vm_resv_put(obj->shares_resv_from);
>>> @@ -441,6 +446,8 @@ static void i915_gem_free_object(struct
>>> drm_gem_object *gem_obj)
>>> GEM_BUG_ON(i915_gem_object_is_framebuffer(obj));
>>> + i915_drm_client_remove_object(obj);
>>> +
>>> /*
>>> * Before we free the object, make sure any pure RCU-only
>>> * read-side critical sections are complete, e.g.
>>> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
>>> b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
>>> index e72c57716bee..8de2b91b3edf 100644
>>> --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
>>> +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
>>> @@ -300,6 +300,18 @@ struct drm_i915_gem_object {
>>> */
>>> struct i915_address_space *shares_resv_from;
>>> +#ifdef CONFIG_PROC_FS
>>> + /**
>>> + * @client: @i915_drm_client which created the object
>>> + */
>>> + struct i915_drm_client *client;
>>> +
>>> + /**
>>> + * @client_link: Link into @i915_drm_client.objects_list
>>> + */
>>> + struct list_head client_link;
>>> +#endif
>>> +
>>> union {
>>> struct rcu_head rcu;
>>> struct llist_node freed;
>>> diff --git a/drivers/gpu/drm/i915/i915_drm_client.c
>>> b/drivers/gpu/drm/i915/i915_drm_client.c
>>> index 2a44b3876cb5..2e5e69edc0f9 100644
>>> --- a/drivers/gpu/drm/i915/i915_drm_client.c
>>> +++ b/drivers/gpu/drm/i915/i915_drm_client.c
>>> @@ -28,6 +28,10 @@ struct i915_drm_client *i915_drm_client_alloc(void)
>>> kref_init(&client->kref);
>>> spin_lock_init(&client->ctx_lock);
>>> INIT_LIST_HEAD(&client->ctx_list);
>>> +#ifdef CONFIG_PROC_FS
>>> + spin_lock_init(&client->objects_lock);
>>> + INIT_LIST_HEAD(&client->objects_list);
>>> +#endif
>>> return client;
>>> }
>>> @@ -108,4 +112,36 @@ void i915_drm_client_fdinfo(struct drm_printer
>>> *p, struct drm_file *file)
>>> for (i = 0; i < ARRAY_SIZE(uabi_class_names); i++)
>>> show_client_class(p, i915, file_priv->client, i);
>>> }
>>> +
>>> +void i915_drm_client_add_object(struct i915_drm_client *client,
>>> + struct drm_i915_gem_object *obj)
>>> +{
>>> + unsigned long flags;
>>> +
>>> + GEM_WARN_ON(obj->client);
>>> + GEM_WARN_ON(!list_empty(&obj->client_link));
>>> +
>>> + spin_lock_irqsave(&client->objects_lock, flags);
>>> + obj->client = i915_drm_client_get(client);
>>> + list_add_tail_rcu(&obj->client_link, &client->objects_list);
>>> + spin_unlock_irqrestore(&client->objects_lock, flags);
>>> +}
>>
>> would it be nice to mention that we use this client infra only to track
>> internal objects. While the user created through file->object_idr added
>> during handle creation time.
>
> In this series it is indeed only used for that.
>
> But it would be nicer to use it to track everything, so fdinfo readers
> would not be hitting the idr lock, which would avoid injecting latency
> to real DRM clients.
>
> The only fly in the ointment IMO is that I needed that drm core helper
> to be able to track dmabuf imports. Possibly something for flink too,
> did not look into that yet.
wouldn't dmabuf be tracked via object_idr as a new handle is created for it.
Thanks,
Aravind.
>
> In the light of all that I can mention in the cover letter next time
> round. It is a bit stale anyway (the cover letter).
>
> Regards,
>
> Tvrtko
>
>>> +bool i915_drm_client_remove_object(struct drm_i915_gem_object *obj)
>>> +{
>>> + struct i915_drm_client *client = fetch_and_zero(&obj->client);
>>> + unsigned long flags;
>>> +
>>> + /* Object may not be associated with a client. */
>>> + if (!client)
>>> + return false;
>>> +
>>> + spin_lock_irqsave(&client->objects_lock, flags);
>>> + list_del_rcu(&obj->client_link);
>>> + spin_unlock_irqrestore(&client->objects_lock, flags);
>>> +
>>> + i915_drm_client_put(client);
>>> +
>>> + return true;
>>> +}
>>> #endif
>>> diff --git a/drivers/gpu/drm/i915/i915_drm_client.h
>>> b/drivers/gpu/drm/i915/i915_drm_client.h
>>> index 67816c912bca..5f58fdf7dcb8 100644
>>> --- a/drivers/gpu/drm/i915/i915_drm_client.h
>>> +++ b/drivers/gpu/drm/i915/i915_drm_client.h
>>> @@ -12,6 +12,9 @@
>>> #include <uapi/drm/i915_drm.h>
>>> +#include "i915_file_private.h"
>>> +#include "gem/i915_gem_object_types.h"
>>> +
>>> #define I915_LAST_UABI_ENGINE_CLASS I915_ENGINE_CLASS_COMPUTE
>>> struct drm_file;
>>> @@ -25,6 +28,20 @@ struct i915_drm_client {
>>> spinlock_t ctx_lock; /* For add/remove from ctx_list. */
>>> struct list_head ctx_list; /* List of contexts belonging to
>>> client. */
>>> +#ifdef CONFIG_PROC_FS
>>> + /**
>>> + * @objects_lock: lock protecting @objects_list
>>> + */
>>> + spinlock_t objects_lock;
>>> +
>>> + /**
>>> + * @objects_list: list of objects created by this client
>>> + *
>>> + * Protected by @objects_lock.
>>> + */
>>> + struct list_head objects_list;
>>> +#endif
>>> +
>>> /**
>>> * @past_runtime: Accumulation of pphwsp runtimes from closed
>>> contexts.
>>> */
>>> @@ -49,4 +66,19 @@ struct i915_drm_client *i915_drm_client_alloc(void);
>>> void i915_drm_client_fdinfo(struct drm_printer *p, struct
>>> drm_file *file);
>>> +#ifdef CONFIG_PROC_FS
>>> +void i915_drm_client_add_object(struct i915_drm_client *client,
>>> + struct drm_i915_gem_object *obj);
>>> +bool i915_drm_client_remove_object(struct drm_i915_gem_object *obj);
>>> +#else
>>> +static inline void i915_drm_client_add_object(struct i915_drm_client
>>> *client,
>>> + struct drm_i915_gem_object *obj)
>>> +{
>>> +}
>>> +
>>> +static inline bool i915_drm_client_remove_object(struct
>>> drm_i915_gem_object *obj)
>>> +{
>>> +}
>>> +#endif
>>> +
>>> #endif /* !__I915_DRM_CLIENT_H__ */
More information about the Intel-gfx
mailing list