[RFC PATCH 1/4] gpu: dxgkrnl: core code

Sasha Levin sashal at kernel.org
Tue May 19 16:32:31 UTC 2020


Add support for a Hyper-V based vGPU implementation that exposes the
DirectX API to Linux userspace.

Signed-off-by: Sasha Levin <sashal at kernel.org>
---
 drivers/gpu/dxgkrnl/Kconfig      |   10 +
 drivers/gpu/dxgkrnl/Makefile     |   12 +
 drivers/gpu/dxgkrnl/d3dkmthk.h   | 1635 +++++++++
 drivers/gpu/dxgkrnl/dxgadapter.c | 1399 ++++++++
 drivers/gpu/dxgkrnl/dxgkrnl.h    |  913 ++++++
 drivers/gpu/dxgkrnl/dxgmodule.c  |  692 ++++
 drivers/gpu/dxgkrnl/dxgprocess.c |  355 ++
 drivers/gpu/dxgkrnl/dxgvmbus.c   | 2955 +++++++++++++++++
 drivers/gpu/dxgkrnl/dxgvmbus.h   |  859 +++++
 drivers/gpu/dxgkrnl/hmgr.c       |  593 ++++
 drivers/gpu/dxgkrnl/hmgr.h       |  107 +
 drivers/gpu/dxgkrnl/ioctl.c      | 5269 ++++++++++++++++++++++++++++++
 drivers/gpu/dxgkrnl/misc.c       |  280 ++
 drivers/gpu/dxgkrnl/misc.h       |  288 ++
 14 files changed, 15367 insertions(+)
 create mode 100644 drivers/gpu/dxgkrnl/Kconfig
 create mode 100644 drivers/gpu/dxgkrnl/Makefile
 create mode 100644 drivers/gpu/dxgkrnl/d3dkmthk.h
 create mode 100644 drivers/gpu/dxgkrnl/dxgadapter.c
 create mode 100644 drivers/gpu/dxgkrnl/dxgkrnl.h
 create mode 100644 drivers/gpu/dxgkrnl/dxgmodule.c
 create mode 100644 drivers/gpu/dxgkrnl/dxgprocess.c
 create mode 100644 drivers/gpu/dxgkrnl/dxgvmbus.c
 create mode 100644 drivers/gpu/dxgkrnl/dxgvmbus.h
 create mode 100644 drivers/gpu/dxgkrnl/hmgr.c
 create mode 100644 drivers/gpu/dxgkrnl/hmgr.h
 create mode 100644 drivers/gpu/dxgkrnl/ioctl.c
 create mode 100644 drivers/gpu/dxgkrnl/misc.c
 create mode 100644 drivers/gpu/dxgkrnl/misc.h

diff --git a/drivers/gpu/dxgkrnl/Kconfig b/drivers/gpu/dxgkrnl/Kconfig
new file mode 100644
index 000000000000..8bfec781b9a7
--- /dev/null
+++ b/drivers/gpu/dxgkrnl/Kconfig
@@ -0,0 +1,10 @@
+#
+# dxgkrnl configuration
+#
+
+config DXGKRNL
+	tristate "Microsoft virtual GPU support"
+	depends on HYPERV
+	help
+	  This driver supports Microsoft virtual GPU.
+
diff --git a/drivers/gpu/dxgkrnl/Makefile b/drivers/gpu/dxgkrnl/Makefile
new file mode 100644
index 000000000000..11505a153d9d
--- /dev/null
+++ b/drivers/gpu/dxgkrnl/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for the Linux video drivers.
+# 5 Aug 1999, James Simmons, <mailto:jsimmons at users.sf.net>
+# Rewritten to use lists instead of if-statements.
+
+# Each configuration option enables a list of files.
+
+# Uncomment to enable printing debug messages by default
+#ccflags-y := -DDEBUG
+
+obj-$(CONFIG_DXGKRNL)	+= dxgkrnl.o
+dxgkrnl-y		:= dxgmodule.o hmgr.o misc.o dxgadapter.o ioctl.o dxgvmbus.o dxgprocess.o
diff --git a/drivers/gpu/dxgkrnl/d3dkmthk.h b/drivers/gpu/dxgkrnl/d3dkmthk.h
new file mode 100644
index 000000000000..ed9a72986e57
--- /dev/null
+++ b/drivers/gpu/dxgkrnl/d3dkmthk.h
@@ -0,0 +1,1635 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (c) 2019, Microsoft Corporation.
+ *
+ * Author:
+ *   Iouri Tarassov <iourit at microsoft.com>
+ *
+ * Dxgkrnl Graphics Port Driver user mode interface
+ *
+ */
+
+#ifndef _D3DKMTHK_H
+#define _D3DKMTHK_H
+
+#include "misc.h"
+
+#define D3DDDI_MAX_WRITTEN_PRIMARIES		16
+#define D3DDDI_MAX_MPO_PRESENT_DIRTY_RECTS	0xFFF
+
+#define D3DKMT_CREATEALLOCATION_MAX		1024
+#define D3DKMT_ADAPTERS_MAX			64
+#define D3DDDI_MAX_BROADCAST_CONTEXT		64
+#define D3DDDI_MAX_OBJECT_WAITED_ON		32
+#define D3DDDI_MAX_OBJECT_SIGNALED		32
+
+struct d3dkmt_adapterinfo {
+	d3dkmt_handle			adapter_handle;
+	struct winluid			adapter_luid;
+	uint				num_sources;
+	winbool				present_move_regions_preferred;
+};
+
+struct d3dkmt_enumadapters2 {
+	uint				num_adapters;
+	struct d3dkmt_adapterinfo	*adapters;
+};
+
+struct d3dkmt_closeadapter {
+	d3dkmt_handle			adapter_handle;
+};
+
+struct d3dkmt_openadapterfromluid {
+	struct winluid			adapter_luid;
+	d3dkmt_handle			adapter_handle;
+};
+
+struct d3dddi_allocationlist {
+	d3dkmt_handle			allocation;
+	union {
+		struct {
+			uint		write_operation		:1;
+			uint		do_not_retire_instance	:1;
+			uint		offer_priority		:3;
+			uint		reserved		:27;
+		};
+		uint			value;
+	};
+};
+
+struct d3dddi_patchlocationlist {
+	uint				allocation_index;
+	union {
+		struct {
+			uint		slot_id:24;
+			uint		reserved:8;
+		};
+		uint			value;
+	};
+	uint				driver_id;
+	uint				allocation_offset;
+	uint				patch_offset;
+	uint				split_offset;
+};
+
+struct d3dkmt_createdeviceflags {
+	uint				legacy_mode:1;
+	uint				request_vSync:1;
+	uint				disable_gpu_timeout:1;
+	uint				reserved:29;
+};
+
+struct d3dkmt_createdevice {
+	union {
+		d3dkmt_handle		adapter;
+		void			*adapter_pointer;
+	};
+	struct d3dkmt_createdeviceflags	flags;
+	d3dkmt_handle			device;
+	void				*command_buffer;
+	uint				command_buffer_size;
+	struct d3dddi_allocationlist	*allocation_list;
+	uint				allocation_list_size;
+	struct d3dddi_patchlocationlist	*patch_location_list;
+	uint				patch_location_list_size;
+};
+
+struct d3dkmt_destroydevice {
+	d3dkmt_handle			device;
+};
+
+enum d3dkmt_clienthint {
+	D3DKMT_CLIENTHINT_UNKNOWN	= 0,
+	D3DKMT_CLIENTHINT_OPENGL	= 1,
+	D3DKMT_CLIENTHINT_CDD		= 2,
+	D3DKMT_CLIENTHINT_DX7		= 7,
+	D3DKMT_CLIENTHINT_DX8		= 8,
+	D3DKMT_CLIENTHINT_DX9		= 9,
+	D3DKMT_CLIENTHINT_DX10		= 10,
+};
+
+struct d3dddi_createcontextflags {
+	union {
+		struct {
+			uint		null_rendering:1;
+			uint		initial_data:1;
+			uint		disable_gpu_timeout:1;
+			uint		synchronization_only:1;
+			uint		hw_queue_supported:1;
+			uint		reserved:27;
+		};
+		uint			value;
+	};
+};
+
+struct d3dkmt_createcontext {
+	d3dkmt_handle			device;
+	uint				node_ordinal;
+	uint				engine_affinity;
+	struct d3dddi_createcontextflags flags;
+	void				*priv_drv_data;
+	uint				priv_drv_data_size;
+	enum d3dkmt_clienthint		client_hint;
+	d3dkmt_handle			context;
+	void				*command_buffer;
+	uint				command_buffer_size;
+	struct d3dddi_allocationlist	*allocation_list;
+	uint				allocation_list_size;
+	struct d3dddi_patchlocationlist	*patch_location_list;
+	uint				patch_location_list_size;
+	d3dgpu_virtual_address		obsolete;
+};
+
+struct d3dkmt_destroycontext {
+	d3dkmt_handle			context;
+};
+
+struct d3dkmt_createcontextvirtual {
+	d3dkmt_handle			device;
+	uint				node_ordinal;
+	uint				engine_affinity;
+	struct d3dddi_createcontextflags flags;
+	void				*priv_drv_data;
+	uint				priv_drv_data_size;
+	enum d3dkmt_clienthint		client_hint;
+	d3dkmt_handle			context;
+};
+
+struct d3dddi_createhwcontextflags {
+	union {
+		struct {
+			uint		reserved:32;
+		};
+		uint			value;
+	};
+};
+
+struct d3dddi_createhwqueueflags {
+	union {
+		struct {
+			uint		disable_gpu_timeout:1;
+			uint		reserved:31;
+		};
+		uint			value;
+	};
+};
+
+enum d3dddi_pagingqueue_priority {
+	D3DDDI_PAGINGQUEUE_PRIORITY_BELOW_NORMAL	= -1,
+	D3DDDI_PAGINGQUEUE_PRIORITY_NORMAL		= 0,
+	D3DDDI_PAGINGQUEUE_PRIORITY_ABOVE_NORMAL	= 1,
+};
+
+struct d3dkmt_createpagingqueue {
+	d3dkmt_handle			device;
+	enum d3dddi_pagingqueue_priority priority;
+	d3dkmt_handle			paging_queue;
+	d3dkmt_handle			sync_object;
+	void				*fence_cpu_virtual_address;
+	uint				physical_adapter_index;
+};
+
+struct d3dddi_destroypagingqueue {
+	d3dkmt_handle			paging_queue;
+};
+
+struct d3dkmt_renderflags {
+	uint				resize_command_buffer:1;
+	uint				resize_allocation_list:1;
+	uint				resize_patch_location_list:1;
+	uint				null_rendering:1;
+	uint				present_redirected:1;
+	uint				render_km:1;
+	uint				render_km_readback:1;
+	uint				reserved:25;
+};
+struct d3dkmt_render {
+	union {
+		d3dkmt_handle		device;
+		d3dkmt_handle		context;
+	};
+	uint				command_offset;
+	uint				command_length;
+	uint				allocation_count;
+	uint				patch_location_count;
+	void				*new_command_buffer;
+	uint				new_command_buffer_size;
+	struct d3dddi_allocationlist	*new_allocation_list;
+	uint				new_allocation_list_size;
+	struct d3dddi_patchlocationlist	*new_patch_pocation_list;
+	uint				new_patch_pocation_list_size;
+	struct d3dkmt_renderflags	flags;
+	uint64_t			present_history_token;
+	uint				broadcast_context_count;
+	d3dkmt_handle			broadcast_context[D3DDDI_MAX_BROADCAST_CONTEXT];
+	uint				queued_buffer_count;
+	d3dgpu_virtual_address		obsolete;
+	void				*priv_drv_data;
+	uint				priv_drv_data_size;
+};
+
+enum d3dkmt_standardallocationtype {
+	D3DKMT_STANDARDALLOCATIONTYPE_EXISTINGHEAP	= 1,
+};
+
+struct d3dkmt_standardallocation_existingheap {
+	uint64_t			size;
+};
+
+struct d3dkmt_createstandardallocationflags {
+	union {
+		struct {
+			uint		reserved:32;
+		};
+		uint			value;
+	};
+};
+
+struct d3dkmt_createstandardallocation {
+	enum d3dkmt_standardallocationtype		type;
+	struct d3dkmt_standardallocation_existingheap	existing_heap_data;
+	struct d3dkmt_createstandardallocationflags	flags;
+};
+
+struct d3dddi_allocationinfo2 {
+	d3dkmt_handle			allocation;
+	union {
+		winhandle		section;
+		const void		*sysmem;
+	};
+	void				*priv_drv_data;
+	uint				priv_drv_data_size;
+	uint				vidpn_source_id;
+	union {
+		struct {
+			uint		primary:1;
+			uint		stereo:1;
+			uint		override_priority:1;
+			uint		reserved:29;
+		};
+		uint			value;
+	} flags;
+	d3dgpu_virtual_address		gpu_virtual_address;
+	union {
+		uint			priority;
+		uint64_t		unused;
+	};
+	uint64_t			reserved[5];
+};
+
+struct d3dkmt_createallocationflags {
+	union {
+		struct {
+			uint		create_resource:1;
+			uint		create_shared:1;
+			uint		non_secure:1;
+			uint		create_protected:1;
+			uint		restrict_shared_access:1;
+			uint		existing_sysmem:1;
+			uint		nt_security_sharing:1;
+			uint		read_only:1;
+			uint		create_write_combined:1;
+			uint		create_cached:1;
+			uint		swap_chain_back_buffer:1;
+			uint		cross_adapter:1;
+			uint		open_cross_adapter:1;
+			uint		partial_shared_creation:1;
+			uint		zeroed:1;
+			uint		write_watch:1;
+			uint		standard_allocation:1;
+			uint		existing_section:1;
+			uint		reserved:14;
+		};
+		uint			value;
+	};
+};
+
+struct d3dkmt_createallocation {
+	d3dkmt_handle			device;
+	d3dkmt_handle			resource;
+	d3dkmt_handle			global_share;
+	const void			*private_runtime_data;
+	uint				private_runtime_data_size;
+	union {
+		struct d3dkmt_createstandardallocation *standard_allocation;
+		const void		*priv_drv_data;
+	};
+	uint				priv_drv_data_size;
+	uint				alloc_count;
+	struct d3dddi_allocationinfo2	*allocation_info;
+	struct d3dkmt_createallocationflags flags;
+	winhandle			private_runtime_resource_handle;
+};
+
+struct d3dddicb_destroyallocation2flags {
+	union {
+		struct {
+			uint		assume_not_in_use:1;
+			uint		synchronous_destroy:1;
+			uint		reserved:29;
+			uint		system_use_only:1;
+		};
+		uint			value;
+	};
+};
+
+struct d3dkmt_destroyallocation2 {
+	d3dkmt_handle			device;
+	d3dkmt_handle			resource;
+	const d3dkmt_handle		*allocations;
+	uint				alloc_count;
+	struct d3dddicb_destroyallocation2flags flags;
+};
+
+struct d3dddi_makeresident_flags {
+	union {
+		struct {
+			uint		cant_trim_further:1;
+			uint		must_succeed:1;
+			uint		reserved:30;
+		};
+		uint			value;
+	};
+};
+
+struct d3dddi_makeresident {
+	d3dkmt_handle			paging_queue;
+	uint				alloc_count;
+	const d3dkmt_handle		*allocation_list;
+	const uint			*priority_list;
+	struct d3dddi_makeresident_flags flags;
+	uint64_t			paging_fence_value;
+	uint64_t			num_bytes_to_trim;
+};
+
+struct d3dddi_evict_flags {
+	union {
+		struct {
+			uint		evict_only_if_necessary:1;
+			uint		not_written_to:1;
+			uint		reserved:30;
+		};
+		uint			value;
+	};
+};
+
+struct d3dkmt_evict {
+	d3dkmt_handle			device;
+	uint				alloc_count;
+	const d3dkmt_handle		*allocations;
+	struct d3dddi_evict_flags	flags;
+	uint64_t			num_bytes_to_trim;
+};
+
+struct d3dddigpuva_protection_type {
+	union {
+		struct {
+			uint64_t	write:1;
+			uint64_t	execute:1;
+			uint64_t	zero:1;
+			uint64_t	no_access:1;
+			uint64_t	system_use_only:1;
+			uint64_t	reserved:59;
+		};
+		uint64_t		value;
+	};
+};
+
+enum d3dddi_updategpuvirtualaddress_operation_type {
+	D3DDDI_UPDATEGPUVIRTUALADDRESS_MAP		= 0,
+	D3DDDI_UPDATEGPUVIRTUALADDRESS_UNMAP		= 1,
+	D3DDDI_UPDATEGPUVIRTUALADDRESS_COPY		= 2,
+	D3DDDI_UPDATEGPUVIRTUALADDRESS_MAP_PROTECT	= 3,
+};
+
+struct d3dddi_updategpuvirtualaddress_operation {
+	enum d3dddi_updategpuvirtualaddress_operation_type operation;
+	union {
+		struct {
+			d3dgpu_virtual_address	base_address;
+			d3dgpu_size_t		size;
+			d3dkmt_handle		allocation;
+			d3dgpu_size_t		allocation_offset;
+			d3dgpu_size_t		allocation_size;
+		} map;
+		struct {
+			d3dgpu_virtual_address	base_address;
+			d3dgpu_size_t		size;
+			d3dkmt_handle		allocation;
+			d3dgpu_size_t		allocation_offset;
+			d3dgpu_size_t		allocation_size;
+			struct d3dddigpuva_protection_type protection;
+			uint64_t		driver_protection;
+		} map_protect;
+		struct {
+			d3dgpu_virtual_address	base_address;
+			d3dgpu_size_t		size;
+			struct d3dddigpuva_protection_type protection;
+		} unmap;
+		struct {
+			d3dgpu_virtual_address	source_address;
+			d3dgpu_size_t		size;
+			d3dgpu_virtual_address	dest_address;
+		} copy;
+	};
+};
+
+enum d3dddigpuva_reservation_type {
+	D3DDDIGPUVA_RESERVE_NO_ACCESS		= 0,
+	D3DDDIGPUVA_RESERVE_ZERO		= 1,
+	D3DDDIGPUVA_RESERVE_NO_COMMIT		= 2
+};
+
+struct d3dkmt_updategpuvirtualaddress {
+	d3dkmt_handle				device;
+	d3dkmt_handle				context;
+	d3dkmt_handle				fence_object;
+	uint					num_operations;
+	struct d3dddi_updategpuvirtualaddress_operation *operations;
+	uint					reserved0;
+	uint64_t				reserved1;
+	uint64_t				fence_value;
+	union {
+		struct {
+			uint			do_not_wait:1;
+			uint			reserved:31;
+		};
+		uint				value;
+	} flags;
+};
+
+struct d3dddi_mapgpuvirtualaddress {
+	d3dkmt_handle				paging_queue;
+	d3dgpu_virtual_address			base_address;
+	d3dgpu_virtual_address			minimum_address;
+	d3dgpu_virtual_address			maximum_address;
+	d3dkmt_handle				allocation;
+	d3dgpu_size_t				offset_in_pages;
+	d3dgpu_size_t				size_in_pages;
+	struct d3dddigpuva_protection_type	protection;
+	uint64_t				driver_protection;
+	uint					reserved0;
+	uint64_t				reserved1;
+	d3dgpu_virtual_address			virtual_address;
+	uint64_t				paging_fence_value;
+};
+
+struct d3dddi_reservegpuvirtualaddress {
+	d3dkmt_handle				adapter;
+	d3dgpu_virtual_address			base_address;
+	d3dgpu_virtual_address			minimum_address;
+	d3dgpu_virtual_address			maximum_address;
+	d3dgpu_size_t				size;
+	enum d3dddigpuva_reservation_type	reservation_type;
+	uint64_t				driver_protection;
+	d3dgpu_virtual_address			virtual_address;
+	uint64_t				paging_fence_value;
+};
+
+struct d3dkmt_freegpuvirtualaddress {
+	d3dkmt_handle				adapter;
+	d3dgpu_virtual_address			base_address;
+	d3dgpu_size_t				size;
+};
+
+enum d3dkmt_memory_segment_group {
+	D3DKMT_MEMORY_SEGMENT_GROUP_LOCAL	= 0,
+	D3DKMT_MEMORY_SEGMENT_GROUP_NON_LOCAL	= 1
+};
+
+struct d3dkmt_queryvideomemoryinfo {
+	winhandle				process;
+	d3dkmt_handle				adapter;
+	enum d3dkmt_memory_segment_group	memory_segment_group;
+	uint64_t				budget;
+	uint64_t				current_usage;
+	uint64_t				current_reservation;
+	uint64_t				available_for_reservation;
+	uint					physical_adapter_index;
+};
+
+enum qai_driverversion {
+	KMT_DRIVERVERSION_WDDM_1_0		= 1000,
+	KMT_DRIVERVERSION_WDDM_1_1_PRERELEASE	= 1102,
+	KMT_DRIVERVERSION_WDDM_1_1		= 1105,
+	KMT_DRIVERVERSION_WDDM_1_2		= 1200,
+	KMT_DRIVERVERSION_WDDM_1_3		= 1300,
+	KMT_DRIVERVERSION_WDDM_2_0		= 2000,
+	KMT_DRIVERVERSION_WDDM_2_1		= 2100,
+	KMT_DRIVERVERSION_WDDM_2_2		= 2200,
+	KMT_DRIVERVERSION_WDDM_2_3		= 2300,
+	KMT_DRIVERVERSION_WDDM_2_4		= 2400,
+	KMT_DRIVERVERSION_WDDM_2_5		= 2500,
+	KMT_DRIVERVERSION_WDDM_2_6		= 2600,
+	KMT_DRIVERVERSION_WDDM_2_7		= 2700
+};
+
+struct d3dkmt_adaptertype {
+	union {
+		struct {
+			uint			render_supported:1;
+			uint			display_supported:1;
+			uint			software_device:1;
+			uint			post_device:1;
+			uint			hybrid_discrete:1;
+			uint			hybrid_integrated:1;
+			uint			indirect_display_device:1;
+			uint			paravirtualized:1;
+			uint			acg_supported:1;
+			uint			support_set_timings_from_vidpn:1;
+			uint			detachable:1;
+			uint			compute_only:1;
+			uint			prototype:1;
+			uint			reserved:19;
+		};
+		uint				value;
+	};
+};
+
+enum kmtqueryadapterinfotype {
+	KMTQAITYPE_UMDRIVERPRIVATE				= 0,
+	KMTQAITYPE_UMDRIVERNAME					= 1,
+	KMTQAITYPE_UMOPENGLINFO					= 2,
+	KMTQAITYPE_GETSEGMENTSIZE				= 3,
+	KMTQAITYPE_ADAPTERGUID					= 4,
+	KMTQAITYPE_FLIPQUEUEINFO				= 5,
+	KMTQAITYPE_ADAPTERADDRESS				= 6,
+	KMTQAITYPE_SETWORKINGSETINFO				= 7,
+	KMTQAITYPE_ADAPTERREGISTRYINFO				= 8,
+	KMTQAITYPE_CURRENTDISPLAYMODE				= 9,
+	KMTQAITYPE_MODELIST					= 10,
+	KMTQAITYPE_CHECKDRIVERUPDATESTATUS			= 11,
+	KMTQAITYPE_VIRTUALADDRESSINFO				= 12,
+	KMTQAITYPE_DRIVERVERSION				= 13,
+	KMTQAITYPE_ADAPTERTYPE					= 15,
+	KMTQAITYPE_OUTPUTDUPLCONTEXTSCOUNT			= 16,
+	KMTQAITYPE_WDDM_1_2_CAPS				= 17,
+	KMTQAITYPE_UMD_DRIVER_VERSION				= 18,
+	KMTQAITYPE_DIRECTFLIP_SUPPORT				= 19,
+	KMTQAITYPE_MULTIPLANEOVERLAY_SUPPORT			= 20,
+	KMTQAITYPE_DLIST_DRIVER_NAME				= 21,
+	KMTQAITYPE_WDDM_1_3_CAPS				= 22,
+	KMTQAITYPE_MULTIPLANEOVERLAY_HUD_SUPPORT		= 23,
+	KMTQAITYPE_WDDM_2_0_CAPS				= 24,
+	KMTQAITYPE_NODEMETADATA					= 25,
+	KMTQAITYPE_CPDRIVERNAME					= 26,
+	KMTQAITYPE_XBOX						= 27,
+	KMTQAITYPE_INDEPENDENTFLIP_SUPPORT			= 28,
+	KMTQAITYPE_MIRACASTCOMPANIONDRIVERNAME			= 29,
+	KMTQAITYPE_PHYSICALADAPTERCOUNT				= 30,
+	KMTQAITYPE_PHYSICALADAPTERDEVICEIDS			= 31,
+	KMTQAITYPE_DRIVERCAPS_EXT				= 32,
+	KMTQAITYPE_QUERY_MIRACAST_DRIVER_TYPE			= 33,
+	KMTQAITYPE_QUERY_GPUMMU_CAPS				= 34,
+	KMTQAITYPE_QUERY_MULTIPLANEOVERLAY_DECODE_SUPPORT	= 35,
+	KMTQAITYPE_QUERY_HW_PROTECTION_TEARDOWN_COUNT		= 36,
+	KMTQAITYPE_QUERY_ISBADDRIVERFORHWPROTECTIONDISABLED	= 37,
+	KMTQAITYPE_MULTIPLANEOVERLAY_SECONDARY_SUPPORT		= 38,
+	KMTQAITYPE_INDEPENDENTFLIP_SECONDARY_SUPPORT		= 39,
+	KMTQAITYPE_PANELFITTER_SUPPORT				= 40,
+	KMTQAITYPE_PHYSICALADAPTERPNPKEY			= 41,
+	KMTQAITYPE_GETSEGMENTGROUPSIZE				= 42,
+	KMTQAITYPE_MPO3DDI_SUPPORT				= 43,
+	KMTQAITYPE_HWDRM_SUPPORT				= 44,
+	KMTQAITYPE_MPOKERNELCAPS_SUPPORT			= 45,
+	KMTQAITYPE_MULTIPLANEOVERLAY_STRETCH_SUPPORT		= 46,
+	KMTQAITYPE_GET_DEVICE_VIDPN_OWNERSHIP_INFO		= 47,
+	KMTQAITYPE_QUERYREGISTRY				= 48,
+	KMTQAITYPE_KMD_DRIVER_VERSION				= 49,
+	KMTQAITYPE_BLOCKLIST_KERNEL				= 50,
+	KMTQAITYPE_BLOCKLIST_RUNTIME				= 51,
+	KMTQAITYPE_ADAPTERGUID_RENDER				= 52,
+	KMTQAITYPE_ADAPTERADDRESS_RENDER			= 53,
+	KMTQAITYPE_ADAPTERREGISTRYINFO_RENDER			= 54,
+	KMTQAITYPE_CHECKDRIVERUPDATESTATUS_RENDER		= 55,
+	KMTQAITYPE_DRIVERVERSION_RENDER				= 56,
+	KMTQAITYPE_ADAPTERTYPE_RENDER				= 57,
+	KMTQAITYPE_WDDM_1_2_CAPS_RENDER				= 58,
+	KMTQAITYPE_WDDM_1_3_CAPS_RENDER				= 59,
+	KMTQAITYPE_QUERY_ADAPTER_UNIQUE_GUID			= 60,
+	KMTQAITYPE_NODEPERFDATA					= 61,
+	KMTQAITYPE_ADAPTERPERFDATA				= 62,
+	KMTQAITYPE_ADAPTERPERFDATA_CAPS				= 63,
+	KMTQUITYPE_GPUVERSION					= 64,
+	KMTQAITYPE_DRIVER_DESCRIPTION				= 65,
+	KMTQAITYPE_DRIVER_DESCRIPTION_RENDER			= 66,
+	KMTQAITYPE_SCANOUT_CAPS					= 67,
+	KMTQAITYPE_PARAVIRTUALIZATION_RENDER			= 68,
+};
+
+struct d3dkmt_queryadapterinfo {
+	d3dkmt_handle			adapter;
+	enum kmtqueryadapterinfotype	type;
+	void				*private_data;
+	uint				private_data_size;
+};
+
+enum d3dkmt_escapetype {
+	D3DKMT_ESCAPE_DRIVERPRIVATE	= 0,
+	D3DKMT_ESCAPE_VIDMM		= 1,
+	D3DKMT_ESCAPE_VIDSCH		= 3,
+	D3DKMT_ESCAPE_DEVICE		= 4,
+	D3DKMT_ESCAPE_DRT_TEST		= 8,
+};
+
+enum d3dkmt_drt_test_command {
+	D3DKMT_DRT_TEST_COMMAND_HANDLETABLE = 39,
+};
+
+struct d3dkmt_drt_escape_head {
+	uint				signature;
+	uint				buffer_size;
+	enum d3dkmt_drt_test_command	command;
+};
+
+enum d3dkmt_ht_command {
+	D3DKMT_HT_COMMAND_ALLOC,
+	D3DKMT_HT_COMMAND_FREE,
+	D3DKMT_HT_COMMAND_ASSIGN,
+	D3DKMT_HT_COMMAND_GET,
+	D3DKMT_HT_COMMAND_DESTROY,
+};
+
+struct d3dkmt_ht_desc {
+	struct d3dkmt_drt_escape_head	head;
+	enum d3dkmt_ht_command		command;
+	uint				index;
+	d3dkmt_handle			handle;
+	uint				object_type;
+	void				*object;
+};
+
+struct d3dddi_escapeflags {
+	union {
+		struct {
+			uint		hardware_access:1;
+			uint		device_status_query:1;
+			uint		change_frame_latency:1;
+			uint		no_adapter_synchronization:1;
+			uint		reserved:1;
+			uint		virtual_machine_data:1;
+			uint		driver_known_escape:1;
+			uint		driver_common_escape:1;
+			uint		reserved2:24;
+		};
+		uint			value;
+	};
+};
+
+struct d3dkmt_escape {
+	d3dkmt_handle			adapter;
+	d3dkmt_handle			device;
+	enum d3dkmt_escapetype		type;
+	struct d3dddi_escapeflags	flags;
+	void				*priv_drv_data;
+	uint				priv_drv_data_size;
+	d3dkmt_handle			context;
+};
+
+enum dxgk_render_pipeline_stage {
+	DXGK_RENDER_PIPELINE_STAGE_UNKNOWN		= 0,
+	DXGK_RENDER_PIPELINE_STAGE_INPUT_ASSEMBLER	= 1,
+	DXGK_RENDER_PIPELINE_STAGE_VERTEX_SHADER	= 2,
+	DXGK_RENDER_PIPELINE_STAGE_GEOMETRY_SHADER	= 3,
+	DXGK_RENDER_PIPELINE_STAGE_STREAM_OUTPUT	= 4,
+	DXGK_RENDER_PIPELINE_STAGE_RASTERIZER		= 5,
+	DXGK_RENDER_PIPELINE_STAGE_PIXEL_SHADER		= 6,
+	DXGK_RENDER_PIPELINE_STAGE_OUTPUT_MERGER	= 7,
+};
+
+enum dxgk_page_fault_flags {
+	DXGK_PAGE_FAULT_WRITE			= 0x1,
+	DXGK_PAGE_FAULT_FENCE_INVALID		= 0x2,
+	DXGK_PAGE_FAULT_ADAPTER_RESET_REQUIRED	= 0x4,
+	DXGK_PAGE_FAULT_ENGINE_RESET_REQUIRED	= 0x8,
+	DXGK_PAGE_FAULT_FATAL_HARDWARE_ERROR	= 0x10,
+	DXGK_PAGE_FAULT_IOMMU			= 0x20,
+	DXGK_PAGE_FAULT_HW_CONTEXT_VALID	= 0x40,
+	DXGK_PAGE_FAULT_PROCESS_HANDLE_VALID	= 0x80,
+};
+
+enum dxgk_general_error_code {
+	DXGK_GENERAL_ERROR_PAGE_FAULT		= 0,
+	DXGK_GENERAL_ERROR_INVALID_INSTRUCTION	= 1,
+};
+
+struct dxgk_fault_error_code {
+	union {
+		struct {
+			uint	is_device_specific_code:1;
+			enum dxgk_general_error_code general_error_code:31;
+		};
+		struct {
+			uint	is_device_specific_code_reserved_bit:1;
+			uint	device_specific_code:31;
+		};
+	};
+};
+
+enum d3dkmt_deviceexecution_state {
+	D3DKMT_DEVICEEXECUTION_ACTIVE			= 1,
+	D3DKMT_DEVICEEXECUTION_RESET			= 2,
+	D3DKMT_DEVICEEXECUTION_HUNG			= 3,
+	D3DKMT_DEVICEEXECUTION_STOPPED			= 4,
+	D3DKMT_DEVICEEXECUTION_ERROR_OUTOFMEMORY	= 5,
+	D3DKMT_DEVICEEXECUTION_ERROR_DMAFAULT		= 6,
+	D3DKMT_DEVICEEXECUTION_ERROR_DMAPAGEFAULT	= 7,
+};
+
+struct d3dkmt_devicereset_state {
+	union {
+		struct {
+			uint	desktop_switched:1;
+			uint	reserved:31;
+		};
+		uint		value;
+	};
+};
+
+struct d3dkmt_present_stats {
+	uint		present_count;
+	uint		present_refresh_count;
+	uint		sync_refresh_count;
+	uint64_t	sync_qpc_time;
+	uint64_t	sync_gpu_time;
+};
+
+struct d3dkmt_devicepresent_state {
+	uint				vidpn_source_id;
+	struct d3dkmt_present_stats	present_stats;
+};
+
+struct d3dkmt_present_stats_dwm {
+	uint		present_count;
+	uint		present_refresh_count;
+	uint64_t	present_qpc_time;
+	uint		sync_refresh_count;
+	uint64_t	sync_qpc_time;
+	uint		custom_present_duration;
+};
+
+struct d3dkmt_devicepagefault_state {
+	uint64_t			faulted_primitive_api_sequence_number;
+	enum dxgk_render_pipeline_stage	faulted_pipeline_stage;
+	uint				faulted_bind_table_entry;
+	enum dxgk_page_fault_flags	page_fault_flags;
+	struct dxgk_fault_error_code	fault_error_code;
+	d3dgpu_virtual_address		faulted_virtual_address;
+};
+
+struct d3dkmt_devicepresent_state_dwm {
+	uint				vidpn_source_id;
+	struct d3dkmt_present_stats_dwm	present_stats;
+};
+
+struct d3dkmt_devicepresent_queue_state {
+	uint	vidpn_source_id;
+	bool	bQueuedPresentLimitReached;
+};
+
+enum d3dkmt_devicestate_type {
+	D3DKMT_DEVICESTATE_EXECUTION		= 1,
+	D3DKMT_DEVICESTATE_PRESENT		= 2,
+	D3DKMT_DEVICESTATE_RESET		= 3,
+	D3DKMT_DEVICESTATE_PRESENT_DWM		= 4,
+	D3DKMT_DEVICESTATE_PAGE_FAULT		= 5,
+	D3DKMT_DEVICESTATE_PRESENT_QUEUE	= 6,
+};
+
+struct d3dkmt_getdevicestate {
+	d3dkmt_handle					device;
+	enum d3dkmt_devicestate_type			state_type;
+	union {
+		enum d3dkmt_deviceexecution_state	execution_state;
+		struct d3dkmt_devicepresent_state	present_state;
+		struct d3dkmt_devicereset_state		reset_state;
+		struct d3dkmt_devicepresent_state_dwm	present_state_dwm;
+		struct d3dkmt_devicepagefault_state	page_fault_state;
+		struct d3dkmt_devicepresent_queue_state	present_queue_state;
+	};
+};
+
+enum d3dkmdt_gdisurfacetype {
+	D3DKMDT_GDISURFACE_INVALID				= 0,
+	D3DKMDT_GDISURFACE_TEXTURE				= 1,
+	D3DKMDT_GDISURFACE_STAGING_CPUVISIBLE			= 2,
+	D3DKMDT_GDISURFACE_STAGING				= 3,
+	D3DKMDT_GDISURFACE_LOOKUPTABLE				= 4,
+	D3DKMDT_GDISURFACE_EXISTINGSYSMEM			= 5,
+	D3DKMDT_GDISURFACE_TEXTURE_CPUVISIBLE			= 6,
+	D3DKMDT_GDISURFACE_TEXTURE_CROSSADAPTER			= 7,
+	D3DKMDT_GDISURFACE_TEXTURE_CPUVISIBLE_CROSSADAPTER	= 8,
+};
+
+struct d3dddi_rational {
+	uint	numerator;
+	uint	denominator;
+};
+
+enum d3dddiformat {
+	D3DDDIFMT_UNKNOWN = 0,
+};
+
+struct d3dkmdt_gdisurfacedata {
+	uint				width;
+	uint				height;
+	uint				format;
+	enum d3dkmdt_gdisurfacetype 	type;
+	uint				flags;
+	uint				pitch;
+};
+
+struct d3dkmtd_stagingsurfacedata {
+	uint	width;
+	uint	height;
+	uint	pitch;
+};
+
+struct d3dkmdt_sharedprimarysurfacedata {
+	uint			width;
+	uint			height;
+	enum d3dddiformat	format;
+	struct d3dddi_rational	refresh_rate;
+	uint			vidpn_source_id;
+};
+
+struct d3dkmdt_shadowsurfacedata {
+	uint			width;
+	uint			height;
+	enum d3dddiformat	format;
+	uint			pitch;
+};
+
+enum d3dkmdt_standardallocationtype {
+	D3DKMDT_STANDARDALLOCATION_SHAREDPRIMARYSURFACE	= 1,
+	D3DKMDT_STANDARDALLOCATION_SHADOWSURFACE	= 2,
+	D3DKMDT_STANDARDALLOCATION_STAGINGSURFACE	= 3,
+	D3DKMDT_STANDARDALLOCATION_GDISURFACE		= 4,
+};
+
+struct d3dddi_synchronizationobject_flags {
+	union {
+		struct {
+			uint	shared:1;
+			uint	nt_security_sharing:1;
+			uint	cross_adapter:1;
+			uint	top_of_pipeline:1;
+			uint	no_signal:1;
+			uint	no_wait:1;
+			uint	no_signal_max_value_on_tdr:1;
+			uint	no_gpu_access:1;
+			uint	reserved:23;
+		};
+		uint		value;
+	};
+};
+
+enum d3dddi_synchronizationobject_type {
+	D3DDDI_SYNCHRONIZATION_MUTEX		= 1,
+	D3DDDI_SEMAPHORE			= 2,
+	D3DDDI_FENCE				= 3,
+	D3DDDI_CPU_NOTIFICATION			= 4,
+	D3DDDI_MONITORED_FENCE			= 5,
+	D3DDDI_PERIODIC_MONITORED_FENCE		= 6,
+	D3DDDI_SYNCHRONIZATION_TYPE_LIMIT
+};
+
+struct d3dddi_synchronizationobjectinfo2 {
+	enum d3dddi_synchronizationobject_type	type;
+	struct d3dddi_synchronizationobject_flags flags;
+	union {
+		struct {
+			winbool			initial_state;
+		} synchronization_mutex;
+
+		struct {
+			uint			max_count;
+			uint			initial_count;
+		} semaphore;
+
+		struct {
+			uint64_t 		fence_value;
+		} fence;
+
+		struct {
+			winhandle		event;
+		} cpu_notification;
+
+		struct {
+			uint64_t		initial_fence_value;
+			void			*fence_cpu_virtual_address;
+			d3dgpu_virtual_address	fence_gpu_virtual_address;
+			uint			engine_affinity;
+		} monitored_fence;
+
+		struct periodic_monitored_fence_t {
+			d3dkmt_handle		adapter;
+			uint			vidpn_target_id;
+			uint64_t		time;
+			void			*fence_cpu_virtual_address;
+			d3dgpu_virtual_address	fence_gpu_virtual_address;
+			uint			engine_affinity;
+		} periodic_monitored_fence;
+
+		struct {
+			uint64_t		reserved[8];
+		} reserved;
+	};
+	d3dkmt_handle				shared_handle;
+};
+
+struct d3dkmt_createsynchronizationobject2 {
+	d3dkmt_handle				device;
+	struct d3dddi_synchronizationobjectinfo2 info;
+	d3dkmt_handle				sync_object;
+};
+
+struct d3dkmt_waitforsynchronizationobject2 {
+	d3dkmt_handle				context;
+	uint					object_count;
+	d3dkmt_handle				object_array[D3DDDI_MAX_OBJECT_WAITED_ON];
+	union {
+		struct {
+			uint64_t		fence_value;
+		} fence;
+		uint64_t			reserved[8];
+	};
+};
+
+struct d3dddicb_signalflags {
+	union {
+		struct {
+			uint			signal_at_submission:1;
+			uint			enqueue_cpu_event:1;
+			uint			allow_fence_rewind:1;
+			uint			reserved:28;
+			uint			DXGK_SIGNAL_FLAG_INTERNAL0:1;
+		};
+		uint				value;
+	};
+};
+
+struct d3dkmt_signalsynchronizationobject2 {
+	d3dkmt_handle				context;
+	uint					object_count;
+	d3dkmt_handle				object_array[D3DDDI_MAX_OBJECT_SIGNALED];
+	struct d3dddicb_signalflags		flags;
+	uint					context_count;
+	d3dkmt_handle				contexts[D3DDDI_MAX_BROADCAST_CONTEXT];
+	union {
+		struct {
+			uint64_t		fence_value;
+		} fence;
+		winhandle			cpu_event_handle;
+		uint64_t			reserved[8];
+	};
+};
+
+struct d3dddi_waitforsynchronizationobjectfromcpu_flags {
+	union {
+		struct {
+			uint			wait_any:1;
+			uint			reserved:31;
+		};
+		uint				value;
+	};
+};
+
+struct d3dkmt_waitforsynchronizationobjectfromcpu {
+	d3dkmt_handle				device;
+	uint					object_count;
+	d3dkmt_handle				*objects;
+	uint64_t				*fence_values;
+	winhandle				async_event;
+	struct d3dddi_waitforsynchronizationobjectfromcpu_flags flags;
+};
+
+struct d3dkmt_signalsynchronizationobjectfromcpu {
+	d3dkmt_handle				device;
+	uint					object_count;
+	d3dkmt_handle				*objects;
+	uint64_t				*fence_values;
+	struct d3dddicb_signalflags		flags;
+};
+
+struct d3dkmt_waitforsynchronizationobjectfromgpu {
+	d3dkmt_handle				context;
+	uint					object_count;
+	d3dkmt_handle				*objects;
+	union {
+		uint64_t			*monitored_fence_values;
+		uint64_t			fence_value;
+		uint64_t			reserved[8];
+	};
+};
+
+struct d3dkmt_signalsynchronizationobjectfromgpu {
+	d3dkmt_handle				context;
+	uint					object_count;
+	d3dkmt_handle				*objects;
+	union {
+		uint64_t			*monitored_fence_values;
+		uint64_t			reserved[8];
+	};
+};
+
+struct d3dkmt_signalsynchronizationobjectfromgpu2 {
+	uint					object_count;
+	d3dkmt_handle				*objects;
+	struct d3dddicb_signalflags		flags;
+	uint					context_count;
+	d3dkmt_handle				*contexts;
+	union {
+		uint64_t			fence_value;
+		winhandle			cpu_event_handle;
+		uint64_t			*monitored_fence_values;
+		uint64_t			reserved[8];
+	};
+};
+
+struct d3dkmt_destroysynchronizationobject {
+	d3dkmt_handle				sync_object;
+};
+
+struct d3dkmt_opensynchronizationobject {
+	d3dkmt_handle				shared_handle;
+	d3dkmt_handle				sync_object;
+	uint64_t				reserved[8];
+};
+
+struct d3dkmt_submitcommandflags {
+	uint					null_rendering:1;
+	uint					present_redirected:1;
+	uint					reserved:30;
+};
+
+struct d3dkmt_submitcommand {
+	d3dgpu_virtual_address			command_buffer;
+	uint					command_length;
+	struct d3dkmt_submitcommandflags	flags;
+	uint64_t				present_history_token;
+	uint					broadcast_context_count;
+	d3dkmt_handle				broadcast_context[D3DDDI_MAX_BROADCAST_CONTEXT];
+	void					*priv_drv_data;
+	uint					priv_drv_data_size;
+	uint					num_primaries;
+	d3dkmt_handle				written_primaries[D3DDDI_MAX_WRITTEN_PRIMARIES];
+	uint					num_history_buffers;
+	d3dkmt_handle				*history_buffer_array;
+};
+
+struct d3dkmt_submitcommandtohwqueue {
+	d3dkmt_handle				hwqueue;
+	uint64_t				hwqueue_progress_fence_id;
+	d3dgpu_virtual_address			command_buffer;
+	uint					command_length;
+	uint					priv_drv_data_size;
+	void					*priv_drv_data;
+	uint					num_primaries;
+	d3dkmt_handle				*written_primaries;
+};
+
+struct d3dkmt_setcontextschedulingpriority {
+	d3dkmt_handle				context;
+	int					priority;
+};
+
+struct d3dkmt_setcontextinprocessschedulingpriority {
+	d3dkmt_handle				context;
+	int					priority;
+};
+
+struct d3dkmt_getcontextschedulingpriority {
+	d3dkmt_handle				context;
+	int					priority;
+};
+
+struct d3dkmt_getcontextinprocessschedulingpriority {
+	d3dkmt_handle				context;
+	int					priority;
+};
+
+struct d3dkmt_setallocationpriority {
+	d3dkmt_handle				device;
+	d3dkmt_handle				resource;
+	const d3dkmt_handle			*allocation_list;
+	uint					allocation_count;
+	const uint				*priorities;
+};
+
+struct d3dkmt_getallocationpriority {
+	d3dkmt_handle				device;
+	d3dkmt_handle				resource;
+	const d3dkmt_handle			*allocation_list;
+	uint					allocation_count;
+	uint					*priorities;
+};
+
+enum d3dkmt_allocationresidencystatus {
+	D3DKMT_ALLOCATIONRESIDENCYSTATUS_RESIDENTINGPUMEMORY	= 1,
+	D3DKMT_ALLOCATIONRESIDENCYSTATUS_RESIDENTINSHAREDMEMORY	= 2,
+	D3DKMT_ALLOCATIONRESIDENCYSTATUS_NOTRESIDENT		= 3,
+};
+
+struct d3dkmt_queryallocationresidency {
+	d3dkmt_handle				device;
+	d3dkmt_handle				resource;
+	d3dkmt_handle				*allocations;
+	uint					allocation_count;
+	enum d3dkmt_allocationresidencystatus	*residency_status;
+};
+
+struct D3DDDICB_LOCK2FLAGS {
+	union {
+		struct {
+			uint			reserved:32;
+		};
+		uint				value;
+	};
+};
+
+struct d3dkmt_lock2 {
+	d3dkmt_handle				device;
+	d3dkmt_handle				allocation;
+	struct D3DDDICB_LOCK2FLAGS		flags;
+	void					*data;
+};
+
+struct d3dkmt_unlock2 {
+	d3dkmt_handle				device;
+	d3dkmt_handle				allocation;
+};
+
+enum D3DKMT_DEVICE_ERROR_REASON {
+	D3DKMT_DEVICE_ERROR_REASON_GENERIC	= 0x80000000,
+	D3DKMT_DEVICE_ERROR_REASON_DRIVER_ERROR	= 0x80000006,
+};
+
+struct d3dkmt_markdeviceaserror {
+	d3dkmt_handle				device;
+	enum D3DKMT_DEVICE_ERROR_REASON		reason;
+};
+
+struct D3DDDI_UPDATEALLOCPROPERTY_FLAGS {
+	union {
+		struct {
+			uint			accessed_physically:1;
+			uint			reserved:31;
+		};
+		uint				value;
+	};
+};
+
+struct D3DDDI_SEGMENTPREFERENCE {
+	union {
+		struct {
+			uint			segment_id0:5;
+			uint			direction0:1;
+			uint			segment_id1:5;
+			uint			direction1:1;
+			uint			segment_id2:5;
+			uint			direction2:1;
+			uint			segment_id3:5;
+			uint			direction3:1;
+			uint			segment_id4:5;
+			uint			direction4:1;
+			uint			reserved:2;
+		};
+		uint				value;
+	};
+};
+
+struct d3dddi_updateallocproperty {
+	d3dkmt_handle				paging_queue;
+	d3dkmt_handle				allocation;
+	uint					supported_segment_set;
+	struct D3DDDI_SEGMENTPREFERENCE		preferred_segment;
+	struct D3DDDI_UPDATEALLOCPROPERTY_FLAGS	flags;
+	uint64_t paging_fence_value;
+	union {
+		struct {
+			uint			set_accessed_physically:1;
+			uint			set_supported_segmentSet:1;
+			uint			set_preferred_segment:1;
+			uint			reserved:29;
+		};
+		uint				property_mask_value;
+	};
+};
+
+enum d3dkmt_offer_priority {
+	D3DKMT_OFFER_PRIORITY_LOW	= 1,
+	D3DKMT_OFFER_PRIORITY_NORMAL	= 2,
+	D3DKMT_OFFER_PRIORITY_HIGH	= 3,
+	D3DKMT_OFFER_PRIORITY_AUTO	= 4,
+};
+
+struct d3dkmt_offer_flags {
+	union {
+		struct {
+			uint	offer_immediately:1;
+			uint	allow_decommit:1;
+			uint	reserved:30;
+		};
+		uint		value;
+	};
+};
+
+struct d3dkmt_offerallocations {
+	d3dkmt_handle		device;
+	d3dkmt_handle		*resources;
+	const d3dkmt_handle	*allocations;
+	uint			allocation_count;
+	enum 			d3dkmt_offer_priority priority;
+	struct d3dkmt_offer_flags flags;
+};
+
+enum d3dddi_reclaim_result {
+	D3DDDI_RECLAIM_RESULT_OK		= 0,
+	D3DDDI_RECLAIM_RESULT_DISCARDED		= 1,
+	D3DDDI_RECLAIM_RESULT_NOT_COMMITTED	= 2,
+};
+
+struct d3dkmt_reclaimallocations2 {
+	d3dkmt_handle		paging_queue;
+	uint			allocation_count;
+	d3dkmt_handle		*resources;
+	d3dkmt_handle		*allocations;
+	union {
+		winbool		*discarded;
+		enum d3dddi_reclaim_result *results;
+	};
+	uint64_t		paging_fence_value;
+};
+
+struct d3dkmt_changevideomemoryreservation {
+	winhandle		process;
+	d3dkmt_handle		adapter;
+	enum d3dkmt_memory_segment_group memory_segment_group;
+	uint64_t		reservation;
+	uint			physical_adapter_index;
+};
+
+struct d3dkmt_createhwcontext {
+	d3dkmt_handle		device;
+	uint			node_ordinal;
+	uint			engine_affinity;
+	struct d3dddi_createhwcontextflags flags;
+	uint			priv_drv_data_size;
+	void			*priv_drv_data;
+	d3dkmt_handle		context;
+};
+
+struct d3dkmt_destroyhwcontext {
+	d3dkmt_handle		context;
+};
+
+struct d3dkmt_createhwqueue {
+	d3dkmt_handle		context;
+	struct d3dddi_createhwqueueflags flags;
+	uint			priv_drv_data_size;
+	void			*priv_drv_data;
+	d3dkmt_handle		queue;
+	d3dkmt_handle		queue_progress_fence;
+	void			*queue_progress_fence_cpu_va;
+	d3dgpu_virtual_address	queue_progress_fence_gpu_va;
+};
+
+struct d3dkmt_destroyhwqueue {
+	d3dkmt_handle		queue;
+};
+
+struct d3dkmt_submitwaitforsyncobjectstohwqueue {
+	d3dkmt_handle		hwqueue;
+	uint			object_count;
+	d3dkmt_handle		*objects;
+	uint64_t		*fence_values;
+};
+
+struct d3dkmt_submitsignalsyncobjectstohwqueue {
+	struct d3dddicb_signalflags flags;
+	uint			hwqueue_count;
+	d3dkmt_handle		*hwqueues;
+	uint			object_count;
+	d3dkmt_handle		*objects;
+	uint64_t		*fence_values;
+};
+
+struct dxgk_gpuclockdata_flags {
+	union {
+		struct {
+			uint	context_management_processor:1;
+			uint	reserved:31;
+		};
+		uint		value;
+	};
+};
+
+struct dxgk_gpuclockdata {
+	uint64_t		gpu_frequency;
+	uint64_t		gpu_clock_counter;
+	uint64_t		cpu_clock_counter;
+	struct dxgk_gpuclockdata_flags flags;
+} __packed;
+
+struct d3dkmt_queryclockcalibration {
+	d3dkmt_handle		adapter;
+	uint			node_ordinal;
+	uint			physical_adapter_index;
+	struct dxgk_gpuclockdata clock_data;
+};
+
+struct d3dkmt_flushheaptransitions {
+	d3dkmt_handle		adapter;
+};
+
+struct d3dkmt_getsharedresourceadapterluid {
+	d3dkmt_handle		global_share;
+	winhandle		handle;
+	struct winluid		adapter_luid;
+};
+
+struct d3dkmt_invalidatecache {
+	d3dkmt_handle		device;
+	d3dkmt_handle		allocation;
+	uint64_t		offset;
+	uint64_t		length;
+};
+
+struct d3dddi_openallocationinfo2 {
+	d3dkmt_handle		allocation;
+	void			*priv_drv_data;
+	uint			priv_drv_data_size;
+	d3dgpu_virtual_address	gpu_va;
+	uint64_t		reserved[6];
+};
+
+struct d3dkmt_opensyncobjectfromnthandle {
+	winhandle		nt_handle;
+	d3dkmt_handle		sync_object;
+};
+
+struct d3dkmt_opensyncobjectfromnthandle2 {
+	winhandle		nt_handle;
+	d3dkmt_handle		device;
+	struct d3dddi_synchronizationobject_flags flags;
+	d3dkmt_handle		sync_object;
+	union {
+		struct {
+			void	*fence_value_cpu_va;
+			d3dgpu_virtual_address fence_value_gpu_va;
+			uint	engine_affinity;
+		} monitored_fence;
+		uint64_t	reserved[8];
+	};
+};
+
+struct d3dkmt_openresource {
+	d3dkmt_handle		device;
+	d3dkmt_handle		global_share;
+	uint			allocation_count;
+	struct d3dddi_openallocationinfo2 *open_alloc_info;
+	void			*private_runtime_data;
+	int 			private_runtime_data_size;
+	void			*resource_priv_drv_data;
+	uint			resource_priv_drv_data_size;
+	void			*total_priv_drv_data;
+	uint			total_priv_drv_data_size;
+	d3dkmt_handle		resource;
+};
+
+struct d3dkmt_openresourcefromnthandle {
+	d3dkmt_handle		device;
+	winhandle		nt_handle;
+	uint			allocation_count;
+	struct d3dddi_openallocationinfo2 *open_alloc_info;
+	int 			private_runtime_data_size;
+	void			*private_runtime_data;
+	uint			resource_priv_drv_data_size;
+	void			*resource_priv_drv_data;
+	uint			total_priv_drv_data_size;
+	void			*total_priv_drv_data;
+	d3dkmt_handle		resource;
+	d3dkmt_handle		keyed_mutex;
+	void			*keyed_mutex_private_data;
+	uint			keyed_mutex_private_data_size;
+	d3dkmt_handle		sync_object;
+};
+
+struct d3dkmt_queryresourceinfofromnthandle {
+	d3dkmt_handle		device;
+	winhandle		nt_handle;
+	void			*private_runtime_data;
+	uint			private_runtime_data_size;
+	uint			total_priv_drv_data_size;
+	uint			resource_priv_drv_data_size;
+	uint			allocation_count;
+};
+
+struct d3dkmt_queryresourceinfo {
+	d3dkmt_handle		device;
+	d3dkmt_handle		global_share;
+	void			*private_runtime_data;
+	uint			private_runtime_data_size;
+	uint			total_priv_drv_data_size;
+	uint			resource_priv_drv_data_size;
+	uint			allocation_count;
+};
+
+struct d3dkmt_shareobjects {
+	uint			object_count;
+	const d3dkmt_handle	*objects;	/* per-process DXG handle */
+	void			*object_attr;	/* security attributes */
+	uint			desired_access;
+	winhandle		*shared_handle;	/* output file descriptor */
+};
+
+union d3dkmt_enumadapters_filter {
+	struct {
+		uint64_t	include_compute_only:1;
+		uint64_t	include_display_only:1;
+		uint64_t	reserved:62;
+	};
+	uint64_t		value;
+};
+
+struct d3dkmt_enumadapters3 {
+	union d3dkmt_enumadapters_filter	filter;
+	uint					adapter_count;
+	struct d3dkmt_adapterinfo		*adapters;
+};
+
+/*
+ * Dxgkrnl Graphics Port Driver ioctl definitions
+ *
+ */
+
+#define LX_IOCTL_DIR_WRITE 0x1
+#define LX_IOCTL_DIR_READ  0x2
+
+#define LX_IOCTL_DIR(_ioctl)	(((_ioctl) >> 30) & 0x3)
+#define LX_IOCTL_SIZE(_ioctl)	(((_ioctl) >> 16) & 0x3FFF)
+#define LX_IOCTL_TYPE(_ioctl)	(((_ioctl) >> 8) & 0xFF)
+#define LX_IOCTL_CODE(_ioctl)	(((_ioctl) >> 0) & 0xFF)
+
+#define LX_IOCTL(_dir, _size, _type, _code) (	\
+	(((uint)(_dir) & 0x3) << 30) |		\
+	(((uint)(_size) & 0x3FFF) << 16) |	\
+	(((uint)(_type) & 0xFF) << 8) |		\
+	(((uint)(_code) & 0xFF) << 0))
+
+#define LX_IO(_type, _code) LX_IOCTL(0, 0, (_type), (_code))
+#define LX_IOR(_type, _code, _size)	\
+	LX_IOCTL(LX_IOCTL_DIR_READ, (_size), (_type), (_code))
+#define LX_IOW(_type, _code, _size)	\
+	LX_IOCTL(LX_IOCTL_DIR_WRITE, (_size), (_type), (_code))
+#define LX_IOWR(_type, _code, _size)	\
+	LX_IOCTL(LX_IOCTL_DIR_WRITE |	\
+	LX_IOCTL_DIR_READ, (_size), (_type), (_code))
+
+#define LX_DXOPENADAPTERFROMLUID	\
+	LX_IOWR(0x47, 0x01, sizeof(struct d3dkmt_openadapterfromluid))
+#define LX_DXCREATEDEVICE		\
+	LX_IOWR(0x47, 0x02, sizeof(struct d3dkmt_createdevice))
+#define LX_DXCREATECONTEXT		\
+	LX_IOWR(0x47, 0x03, sizeof(struct d3dkmt_createcontext))
+#define LX_DXCREATECONTEXTVIRTUAL	\
+	LX_IOWR(0x47, 0x04, sizeof(struct d3dkmt_createcontextvirtual))
+#define LX_DXDESTROYCONTEXT		\
+	LX_IOWR(0x47, 0x05, sizeof(struct d3dkmt_destroycontext))
+#define LX_DXCREATEALLOCATION		\
+	LX_IOWR(0x47, 0x06, sizeof(struct d3dkmt_createallocation))
+#define LX_DXCREATEPAGINGQUEUE		\
+	LX_IOWR(0x47, 0x07, sizeof(struct d3dkmt_createpagingqueue))
+#define LX_DXRESERVEGPUVIRTUALADDRESS	\
+	LX_IOWR(0x47, 0x08, sizeof(struct d3dddi_reservegpuvirtualaddress))
+#define LX_DXQUERYADAPTERINFO		\
+	LX_IOWR(0x47, 0x09, sizeof(struct d3dkmt_queryadapterinfo))
+#define LX_DXQUERYVIDEOMEMORYINFO	\
+	LX_IOWR(0x47, 0x0a, sizeof(struct d3dkmt_queryvideomemoryinfo))
+#define LX_DXMAKERESIDENT		\
+	LX_IOWR(0x47, 0x0b, sizeof(struct d3dddi_makeresident))
+#define LX_DXMAPGPUVIRTUALADDRESS	\
+	LX_IOWR(0x47, 0x0c, sizeof(struct d3dddi_mapgpuvirtualaddress))
+#define LX_DXESCAPE			\
+	LX_IOWR(0x47, 0x0d, sizeof(struct d3dkmt_escape))
+#define LX_DXGETDEVICESTATE		\
+	LX_IOWR(0x47, 0x0e, sizeof(struct d3dkmt_getdevicestate))
+#define LX_DXSUBMITCOMMAND		\
+	LX_IOWR(0x47, 0x0f, sizeof(struct d3dkmt_submitcommand))
+#define LX_DXCREATESYNCHRONIZATIONOBJECT \
+	LX_IOWR(0x47, 0x10, sizeof(struct d3dkmt_createsynchronizationobject2))
+#define LX_DXSIGNALSYNCHRONIZATIONOBJECT \
+	LX_IOWR(0x47, 0x11, sizeof(struct d3dkmt_signalsynchronizationobject2))
+#define LX_DXWAITFORSYNCHRONIZATIONOBJECT \
+	LX_IOWR(0x47, 0x12, sizeof(struct d3dkmt_waitforsynchronizationobject2))
+#define LX_DXDESTROYALLOCATION2		\
+	LX_IOWR(0x47, 0x13, sizeof(struct d3dkmt_destroyallocation2))
+#define LX_DXENUMADAPTERS2		\
+	LX_IOWR(0x47, 0x14, sizeof(struct d3dkmt_enumadapters2))
+#define LX_DXCLOSEADAPTER		\
+	LX_IOWR(0x47, 0x15, sizeof(struct d3dkmt_closeadapter))
+#define LX_DXCHANGEVIDEOMEMORYRESERVATION \
+	LX_IOWR(0x47, 0x16, sizeof(struct d3dkmt_changevideomemoryreservation))
+#define LX_DXCREATEHWCONTEXT		\
+	LX_IOWR(0x47, 0x17, sizeof(struct d3dkmt_createhwcontext))
+#define LX_DXCREATEHWQUEUE		\
+	LX_IOWR(0x47, 0x18, sizeof(struct d3dkmt_createhwqueue))
+#define LX_DXDESTROYDEVICE		\
+	LX_IOWR(0x47, 0x19, sizeof(struct d3dkmt_destroydevice))
+#define LX_DXDESTROYHWCONTEXT		\
+	LX_IOWR(0x47, 0x1a, sizeof(struct d3dkmt_destroyhwcontext))
+#define LX_DXDESTROYHWQUEUE		\
+	LX_IOWR(0x47, 0x1b, sizeof(struct d3dkmt_destroyhwqueue))
+#define LX_DXDESTROYPAGINGQUEUE		\
+	LX_IOWR(0x47, 0x1c, sizeof(struct d3dddi_destroypagingqueue))
+#define LX_DXDESTROYSYNCHRONIZATIONOBJECT \
+	LX_IOWR(0x47, 0x1d, sizeof(struct d3dkmt_destroysynchronizationobject))
+#define LX_DXEVICT			\
+	LX_IOWR(0x47, 0x1e, sizeof(struct d3dkmt_evict))
+#define LX_DXFLUSHHEAPTRANSITIONS	\
+	LX_IOWR(0x47, 0x1f, sizeof(struct d3dkmt_flushheaptransitions))
+#define LX_DXFREEGPUVIRTUALADDRESS	\
+	LX_IOWR(0x47, 0x20, sizeof(struct d3dkmt_freegpuvirtualaddress))
+#define LX_DXGETCONTEXTINPROCESSSCHEDULINGPRIORITY \
+	LX_IOWR(0x47, 0x21,		\
+	sizeof(struct d3dkmt_getcontextinprocessschedulingpriority))
+#define LX_DXGETCONTEXTSCHEDULINGPRIORITY \
+	LX_IOWR(0x47, 0x22, sizeof(struct d3dkmt_getcontextschedulingpriority))
+#define LX_DXGETSHAREDRESOURCEADAPTERLUID \
+	LX_IOWR(0x47, 0x23, sizeof(struct d3dkmt_getsharedresourceadapterluid))
+#define LX_DXINVALIDATECACHE		\
+	LX_IOWR(0x47, 0x24, sizeof(struct d3dkmt_invalidatecache))
+#define LX_DXLOCK2			\
+	LX_IOWR(0x47, 0x25, sizeof(struct d3dkmt_lock2))
+#define LX_DXMARKDEVICEASERROR		\
+	LX_IOWR(0x47, 0x26, sizeof(struct d3dkmt_markdeviceaserror))
+#define LX_DXOFFERALLOCATIONS		\
+	LX_IOWR(0x47, 0x27, sizeof(struct d3dkmt_offerallocations))
+#define LX_DXOPENRESOURCE		\
+	LX_IOWR(0x47, 0x28, sizeof(struct d3dkmt_openresource))
+#define LX_DXOPENSYNCHRONIZATIONOBJECT	\
+	LX_IOWR(0x47, 0x29, sizeof(struct d3dkmt_opensynchronizationobject))
+#define LX_DXQUERYALLOCATIONRESIDENCY	\
+	LX_IOWR(0x47, 0x2a, sizeof(struct d3dkmt_queryallocationresidency))
+#define LX_DXQUERYRESOURCEINFO		\
+	LX_IOWR(0x47, 0x2b, sizeof(struct d3dkmt_queryresourceinfo))
+#define LX_DXRECLAIMALLOCATIONS2	\
+	LX_IOWR(0x47, 0x2c, sizeof(struct d3dkmt_reclaimallocations2))
+#define LX_DXRENDER			\
+	LX_IOWR(0x47, 0x2d, sizeof(struct d3dkmt_render))
+#define LX_DXSETALLOCATIONPRIORITY	\
+	LX_IOWR(0x47, 0x2e, sizeof(struct d3dkmt_setallocationpriority))
+#define LX_DXSETCONTEXTINPROCESSSCHEDULINGPRIORITY \
+	LX_IOWR(0x47, 0x2f,		\
+	sizeof(struct d3dkmt_setcontextinprocessschedulingpriority))
+#define LX_DXSETCONTEXTSCHEDULINGPRIORITY \
+	LX_IOWR(0x47, 0x30, sizeof(struct d3dkmt_setcontextschedulingpriority))
+#define LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMCPU \
+	LX_IOWR(0x47, 0x31,		\
+	sizeof(struct d3dkmt_signalsynchronizationobjectfromcpu))
+#define LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMGPU \
+	LX_IOWR(0x47, 0x32,		\
+	sizeof(struct d3dkmt_signalsynchronizationobjectfromgpu))
+#define LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMGPU2 \
+	LX_IOWR(0x47, 0x33,		\
+	sizeof(struct d3dkmt_signalsynchronizationobjectfromgpu2))
+#define LX_DXSUBMITCOMMANDTOHWQUEUE	\
+	LX_IOWR(0x47, 0x34, sizeof(struct d3dkmt_submitcommandtohwqueue))
+#define LX_DXSUBMITSIGNALSYNCOBJECTSTOHWQUEUE \
+	LX_IOWR(0x47, 0x35,		\
+	sizeof(struct d3dkmt_submitsignalsyncobjectstohwqueue))
+#define LX_DXSUBMITWAITFORSYNCOBJECTSTOHWQUEUE \
+	LX_IOWR(0x47, 0x36,		\
+	sizeof(struct d3dkmt_submitwaitforsyncobjectstohwqueue))
+#define LX_DXUNLOCK2			\
+	LX_IOWR(0x47, 0x37, sizeof(struct d3dkmt_unlock2))
+#define LX_DXUPDATEALLOCPROPERTY	\
+	LX_IOWR(0x47, 0x38, sizeof(struct d3dddi_updateallocproperty))
+#define LX_DXUPDATEGPUVIRTUALADDRESS	\
+	LX_IOWR(0x47, 0x39, sizeof(struct d3dkmt_updategpuvirtualaddress))
+#define LX_DXWAITFORSYNCHRONIZATIONOBJECTFROMCPU \
+	LX_IOWR(0x47, 0x3a,		\
+	sizeof(struct d3dkmt_waitforsynchronizationobjectfromcpu))
+#define LX_DXWAITFORSYNCHRONIZATIONOBJECTFROMGPU \
+	LX_IOWR(0x47, 0x3b,		\
+	sizeof(struct d3dkmt_waitforsynchronizationobjectfromgpu))
+#define LX_DXGETALLOCATIONPRIORITY	\
+	LX_IOWR(0x47, 0x3c, sizeof(struct d3dkmt_getallocationpriority))
+#define LX_DXQUERYCLOCKCALIBRATION	\
+	LX_IOWR(0x47, 0x3d, sizeof(struct d3dkmt_queryclockcalibration))
+#define LX_DXENUMADAPTERS3		\
+	LX_IOWR(0x47, 0x3e, sizeof(struct d3dkmt_enumadapters3))
+#define LX_DXSHAREOBJECTS		\
+	LX_IOWR(0x47, 0x3f, sizeof(struct d3dkmt_shareobjects))
+#define LX_DXOPENSYNCOBJECTFROMNTHANDLE2 \
+	LX_IOWR(0x47, 0x40, sizeof(struct d3dkmt_opensyncobjectfromnthandle2))
+#define LX_DXQUERYRESOURCEINFOFROMNTHANDLE \
+	LX_IOWR(0x47, 0x41, sizeof(struct d3dkmt_queryresourceinfofromnthandle))
+#define LX_DXOPENRESOURCEFROMNTHANDLE	\
+	 LX_IOWR(0x47, 0x42, sizeof(struct d3dkmt_openresourcefromnthandle))
+
+#define LX_IO_MAX 0x42
+
+#endif /* _D3DKMTHK_H */
diff --git a/drivers/gpu/dxgkrnl/dxgadapter.c b/drivers/gpu/dxgkrnl/dxgadapter.c
new file mode 100644
index 000000000000..aa50aa5335b3
--- /dev/null
+++ b/drivers/gpu/dxgkrnl/dxgadapter.c
@@ -0,0 +1,1399 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (c) 2019, Microsoft Corporation.
+ *
+ * Author:
+ *   Iouri Tarassov <iourit at microsoft.com>
+ *
+ * Dxgkrnl Graphics Port Driver
+ * dxgadapter  implementation
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/hyperv.h>
+#include <linux/pagemap.h>
+#include <linux/eventfd.h>
+
+#include "dxgkrnl.h"
+
+int dxgadapter_init(struct dxgadapter *adapter, struct hv_device *hdev)
+{
+	int ret = 0;
+	char s[80];
+
+	UNUSED(s);
+	guid_to_luid(&hdev->channel->offermsg.offer.if_instance,
+		     &adapter->luid);
+	TRACE_DEBUG(1, "%s: %x:%x %p %pUb\n",
+		    __func__, adapter->luid.b, adapter->luid.a, hdev->channel,
+		    &hdev->channel->offermsg.offer.if_instance);
+
+	adapter->adapter_state = DXGADAPTER_STATE_STOPPED;
+	refcount_set(&adapter->refcount, 1);
+	init_rwsem(&adapter->core_lock);
+
+	INIT_LIST_HEAD(&adapter->adapter_process_list_head);
+	INIT_LIST_HEAD(&adapter->shared_resource_list_head);
+	INIT_LIST_HEAD(&adapter->adapter_shared_syncobj_list_head);
+	INIT_LIST_HEAD(&adapter->syncobj_list_head);
+	init_rwsem(&adapter->shared_resource_list_lock);
+
+	ret = dxgvmbuschannel_init(&adapter->channel, hdev);
+	if (ret)
+		goto cleanup;
+
+	adapter->channel.adapter = adapter;
+
+	ret = dxgvmb_send_open_adapter(adapter);
+	if (ret) {
+		pr_err("dxgvmb_send_open_adapter failed: %d\n", ret);
+		goto cleanup;
+	}
+
+	adapter->adapter_state = DXGADAPTER_STATE_ACTIVE;
+
+	ret = dxgvmb_send_get_internal_adapter_info(adapter);
+	if (ret) {
+		pr_err("get_internal_adapter_info failed: %d\n", ret);
+		goto cleanup;
+	}
+
+cleanup:
+
+	return ret;
+}
+
+void dxgadapter_stop(struct dxgadapter *adapter)
+{
+	struct dxgprocess_adapter *entry;
+
+	dxgglobal_acquire_process_adapter_lock();
+
+	list_for_each_entry(entry, &adapter->adapter_process_list_head,
+			    adapter_process_list_entry) {
+		dxgprocess_adapter_stop(entry);
+	}
+
+	dxgglobal_release_process_adapter_lock();
+
+	if (dxgadapter_acquire_lock_exclusive(adapter) == 0) {
+		dxgvmb_send_close_adapter(adapter);
+		dxgadapter_release_lock_exclusive(adapter);
+	}
+	dxgvmbuschannel_destroy(&adapter->channel);
+}
+
+void dxgadapter_destroy(struct dxgadapter *adapter)
+{
+	TRACE_DEBUG(1, "%s %p\n", __func__, adapter);
+	dxgmem_free(NULL, DXGMEM_ADAPTER, adapter);
+}
+
+bool dxgadapter_acquire_reference(struct dxgadapter *adapter)
+{
+	return refcount_inc_not_zero(&adapter->refcount);
+}
+
+void dxgadapter_release_reference(struct dxgadapter *adapter)
+{
+	if (refcount_dec_and_test(&adapter->refcount))
+		dxgadapter_destroy(adapter);
+}
+
+bool dxgadapter_is_active(struct dxgadapter *adapter)
+{
+	return adapter->adapter_state == DXGADAPTER_STATE_ACTIVE;
+}
+
+/* Protected by dxgglobal_acquire_process_adapter_lock */
+void dxgadapter_add_process(struct dxgadapter *adapter,
+			    struct dxgprocess_adapter *process_info)
+{
+	TRACE_DEBUG(1, "%s %p %p", __func__, adapter, process_info);
+	list_add_tail(&process_info->adapter_process_list_entry,
+		      &adapter->adapter_process_list_head);
+}
+
+void dxgadapter_remove_process(struct dxgprocess_adapter *process_info)
+{
+	TRACE_DEBUG(1, "%s %p %p", __func__,
+		    process_info->adapter, process_info);
+	list_del(&process_info->adapter_process_list_entry);
+	process_info->adapter_process_list_entry.next = NULL;
+	process_info->adapter_process_list_entry.prev = NULL;
+}
+
+void dxgadapter_add_shared_resource(struct dxgadapter *adapter,
+				    struct dxgsharedresource *object)
+{
+	/* Adapter lock is held */
+	down_write(&adapter->shared_resource_list_lock);
+	list_add_tail(&object->shared_resource_list_entry,
+		      &adapter->shared_resource_list_head);
+	up_write(&adapter->shared_resource_list_lock);
+}
+
+void dxgadapter_remove_shared_resource(struct dxgadapter *adapter,
+				       struct dxgsharedresource *object)
+{
+	down_write(&adapter->shared_resource_list_lock);
+	if (object->shared_resource_list_entry.next) {
+		list_del(&object->shared_resource_list_entry);
+		object->shared_resource_list_entry.next = NULL;
+	}
+	up_write(&adapter->shared_resource_list_lock);
+}
+
+void dxgadapter_add_shared_syncobj(struct dxgadapter *adapter,
+				   struct dxgsharedsyncobject *object)
+{
+	down_write(&adapter->shared_resource_list_lock);
+	list_add_tail(&object->adapter_shared_syncobj_list_entry,
+		      &adapter->adapter_shared_syncobj_list_head);
+	up_write(&adapter->shared_resource_list_lock);
+}
+
+void dxgadapter_remove_shared_syncobj(struct dxgadapter *adapter,
+				      struct dxgsharedsyncobject *object)
+{
+	down_write(&adapter->shared_resource_list_lock);
+	if (object->adapter_shared_syncobj_list_entry.next) {
+		list_del(&object->adapter_shared_syncobj_list_entry);
+		object->adapter_shared_syncobj_list_entry.next = NULL;
+	}
+	up_write(&adapter->shared_resource_list_lock);
+}
+
+void dxgadapter_add_syncobj(struct dxgadapter *adapter,
+			    struct dxgsyncobject *object)
+{
+	down_write(&adapter->shared_resource_list_lock);
+	list_add_tail(&object->syncobj_list_entry, &adapter->syncobj_list_head);
+	up_write(&adapter->shared_resource_list_lock);
+}
+
+void dxgadapter_remove_syncobj(struct dxgsyncobject *object)
+{
+	down_write(&object->adapter->shared_resource_list_lock);
+	if (object->syncobj_list_entry.next) {
+		list_del(&object->syncobj_list_entry);
+		object->syncobj_list_entry.next = NULL;
+	}
+	up_write(&object->adapter->shared_resource_list_lock);
+}
+
+int dxgadapter_acquire_lock_exclusive(struct dxgadapter *adapter)
+{
+	TRACE_DEBUG(1, "%s", __func__);
+	dxglockorder_acquire(DXGLOCK_ADAPTER);
+	down_write(&adapter->core_lock);
+	if (adapter->adapter_state != DXGADAPTER_STATE_ACTIVE) {
+		dxgadapter_release_lock_exclusive(adapter);
+		return STATUS_DEVICE_REMOVED;
+	}
+	return 0;
+}
+
+void dxgadapter_acquire_lock_forced(struct dxgadapter *adapter)
+{
+	dxglockorder_acquire(DXGLOCK_ADAPTER);
+	down_write(&adapter->core_lock);
+}
+
+void dxgadapter_release_lock_exclusive(struct dxgadapter *adapter)
+{
+	TRACE_DEBUG(1, "%s", __func__);
+	up_write(&adapter->core_lock);
+	dxglockorder_release(DXGLOCK_ADAPTER);
+}
+
+int dxgadapter_acquire_lock_shared(struct dxgadapter *adapter)
+{
+	TRACE_DEBUG(1, "%s", __func__);
+	dxglockorder_acquire(DXGLOCK_ADAPTER);
+	down_read(&adapter->core_lock);
+	if (adapter->adapter_state == DXGADAPTER_STATE_ACTIVE)
+		return 0;
+	dxgadapter_release_lock_shared(adapter);
+	return STATUS_DEVICE_REMOVED;
+}
+
+void dxgadapter_release_lock_shared(struct dxgadapter *adapter)
+{
+	TRACE_DEBUG(1, "dxgadapter_release_lock\n");
+	up_read(&adapter->core_lock);
+	dxglockorder_release(DXGLOCK_ADAPTER);
+}
+
+struct dxgdevice *dxgdevice_create(struct dxgadapter *adapter,
+				   struct dxgprocess *process)
+{
+	struct dxgdevice *device = dxgmem_alloc(process, DXGMEM_DEVICE,
+						sizeof(struct dxgdevice));
+	if (device) {
+		refcount_set(&device->refcount, 1);
+		device->adapter = adapter;
+		device->process = process;
+		dxgadapter_acquire_reference(adapter);
+		INIT_LIST_HEAD(&device->context_list_head);
+		INIT_LIST_HEAD(&device->alloc_list_head);
+		INIT_LIST_HEAD(&device->resource_list_head);
+		init_rwsem(&device->device_lock);
+		init_rwsem(&device->context_list_lock);
+		init_rwsem(&device->alloc_list_lock);
+		INIT_LIST_HEAD(&device->pqueue_list_head);
+		INIT_LIST_HEAD(&device->syncobj_list_head);
+		device->object_state = DXGOBJECTSTATE_CREATED;
+
+		dxgprocess_adapter_add_device(process, adapter, device);
+	}
+	return device;
+}
+
+void dxgdevice_stop(struct dxgdevice *device)
+{
+	struct dxgallocation *alloc;
+	struct dxgpagingqueue *pqueue;
+	struct dxgsyncobject *syncobj;
+
+	TRACE_DEBUG(1, "%s: DXGKDEBUG %p", __func__, device);
+	dxgdevice_acquire_alloc_list_lock(device);
+	list_for_each_entry(alloc, &device->alloc_list_head, alloc_list_entry) {
+		dxgallocation_stop(alloc);
+	}
+	dxgdevice_release_alloc_list_lock(device);
+
+	hmgrtable_lock(&device->process->handle_table, DXGLOCK_EXCL);
+	list_for_each_entry(pqueue, &device->pqueue_list_head,
+			    pqueue_list_entry) {
+		dxgpagingqueue_stop(pqueue);
+	}
+	list_for_each_entry(syncobj, &device->syncobj_list_head,
+			    syncobj_list_entry) {
+		dxgsyncobject_stop(syncobj);
+	}
+	hmgrtable_unlock(&device->process->handle_table, DXGLOCK_EXCL);
+	TRACE_DEBUG(1, "%s: end %p\n", __func__, device);
+}
+
+void dxgdevice_mark_destroyed(struct dxgdevice *device)
+{
+	down_write(&device->device_lock);
+	device->object_state = DXGOBJECTSTATE_DESTROYED;
+	up_write(&device->device_lock);
+}
+
+void dxgdevice_destroy(struct dxgdevice *device)
+{
+	struct dxgprocess *process = device->process;
+	struct dxgadapter *adapter = device->adapter;
+	d3dkmt_handle device_handle = 0;
+
+	TRACE_DEBUG(1, "%s: %p\n", __func__, device);
+
+	down_write(&device->device_lock);
+
+	if (device->object_state != DXGOBJECTSTATE_ACTIVE)
+		goto cleanup;
+
+	device->object_state = DXGOBJECTSTATE_DESTROYED;
+
+	dxgdevice_stop(device);
+
+	dxgdevice_acquire_alloc_list_lock(device);
+
+	while (!list_empty(&device->syncobj_list_head)) {
+		struct dxgsyncobject *syncobj =
+		    list_first_entry(&device->syncobj_list_head,
+				     struct dxgsyncobject,
+				     syncobj_list_entry);
+		list_del(&syncobj->syncobj_list_entry);
+		syncobj->syncobj_list_entry.next = NULL;
+		dxgdevice_release_alloc_list_lock(device);
+
+		dxgsyncobject_destroy(process, syncobj);
+
+		dxgdevice_acquire_alloc_list_lock(device);
+	}
+
+	{
+		struct dxgallocation *alloc;
+		struct dxgallocation *tmp;
+
+		TRACE_DEBUG(1, "destroying allocations\n");
+		list_for_each_entry_safe(alloc, tmp, &device->alloc_list_head,
+					 alloc_list_entry) {
+			dxgallocation_destroy(alloc);
+		}
+	}
+
+	{
+		struct dxgresource *resource;
+		struct dxgresource *tmp;
+
+		TRACE_DEBUG(1, "destroying resources\n");
+		list_for_each_entry_safe(resource, tmp,
+					 &device->resource_list_head,
+					 resource_list_entry) {
+			dxgresource_destroy(resource);
+		}
+	}
+
+	dxgdevice_release_alloc_list_lock(device);
+
+	{
+		struct dxgcontext *context;
+		struct dxgcontext *tmp;
+
+		TRACE_DEBUG(1, "destroying contexts\n");
+		dxgdevice_acquire_context_list_lock(device);
+		list_for_each_entry_safe(context, tmp,
+					 &device->context_list_head,
+					 context_list_entry) {
+			dxgcontext_destroy(process, context);
+		}
+		dxgdevice_release_context_list_lock(device);
+	}
+
+	{
+		struct dxgpagingqueue *tmp;
+		struct dxgpagingqueue *pqueue;
+
+		TRACE_DEBUG(1, "destroying paging queues\n");
+		list_for_each_entry_safe(pqueue, tmp, &device->pqueue_list_head,
+					 pqueue_list_entry) {
+			dxgpagingqueue_destroy(pqueue);
+		}
+	}
+
+	/* Guest handles need to be released before the host handles */
+	hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL);
+	if (device->handle_valid) {
+		hmgrtable_free_handle(&process->handle_table,
+				      HMGRENTRY_TYPE_DXGDEVICE, device->handle);
+		device_handle = device->handle;
+		device->handle_valid = 0;
+	}
+	hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL);
+
+	if (device_handle) {
+		up_write(&device->device_lock);
+		if (dxgadapter_acquire_lock_shared(adapter) == 0) {
+			dxgvmb_send_destroy_device(adapter, process,
+						   device_handle);
+			dxgadapter_release_lock_shared(adapter);
+		}
+		down_write(&device->device_lock);
+	}
+
+cleanup:
+
+	if (device->adapter) {
+		dxgprocess_adapter_remove_device(device);
+		dxgadapter_release_reference(device->adapter);
+	}
+
+	up_write(&device->device_lock);
+
+	dxgdevice_release_reference(device);
+	TRACE_DEBUG(1, "dxgdevice_destroy_end\n");
+}
+
+int dxgdevice_acquire_lock_shared(struct dxgdevice *device)
+{
+	down_read(&device->device_lock);
+	if (!dxgdevice_is_active(device)) {
+		up_read(&device->device_lock);
+		return STATUS_DEVICE_REMOVED;
+	}
+	return 0;
+}
+
+void dxgdevice_release_lock_shared(struct dxgdevice *device)
+{
+	up_read(&device->device_lock);
+}
+
+bool dxgdevice_is_active(struct dxgdevice *device)
+{
+	return device->object_state == DXGOBJECTSTATE_ACTIVE;
+}
+
+void dxgdevice_acquire_context_list_lock(struct dxgdevice *device)
+{
+	dxglockorder_acquire(DXGLOCK_DEVICE_CONTEXTLIST);
+	down_write(&device->context_list_lock);
+}
+
+void dxgdevice_release_context_list_lock(struct dxgdevice *device)
+{
+	up_write(&device->context_list_lock);
+	dxglockorder_release(DXGLOCK_DEVICE_CONTEXTLIST);
+}
+
+void dxgdevice_acquire_alloc_list_lock(struct dxgdevice *device)
+{
+	dxglockorder_acquire(DXGLOCK_DEVICE_ALLOCLIST);
+	down_write(&device->alloc_list_lock);
+}
+
+void dxgdevice_release_alloc_list_lock(struct dxgdevice *device)
+{
+	dxglockorder_release(DXGLOCK_DEVICE_ALLOCLIST);
+	up_write(&device->alloc_list_lock);
+}
+
+void dxgdevice_acquire_alloc_list_lock_shared(struct dxgdevice *device)
+{
+	dxglockorder_acquire(DXGLOCK_DEVICE_ALLOCLIST);
+	down_read(&device->alloc_list_lock);
+}
+
+void dxgdevice_release_alloc_list_lock_shared(struct dxgdevice *device)
+{
+	dxglockorder_release(DXGLOCK_DEVICE_ALLOCLIST);
+	up_read(&device->alloc_list_lock);
+}
+
+void dxgdevice_add_context(struct dxgdevice *device, struct dxgcontext *context)
+{
+	down_write(&device->context_list_lock);
+	list_add_tail(&context->context_list_entry, &device->context_list_head);
+	up_write(&device->context_list_lock);
+}
+
+void dxgdevice_remove_context(struct dxgdevice *device,
+			      struct dxgcontext *context)
+{
+	if (context->context_list_entry.next) {
+		list_del(&context->context_list_entry);
+		context->context_list_entry.next = NULL;
+	}
+}
+
+void dxgdevice_add_alloc(struct dxgdevice *device, struct dxgallocation *alloc)
+{
+	dxgdevice_acquire_alloc_list_lock(device);
+	list_add_tail(&alloc->alloc_list_entry, &device->alloc_list_head);
+	dxgdevice_acquire_reference(device);
+	alloc->owner.device = device;
+	dxgdevice_release_alloc_list_lock(device);
+}
+
+void dxgdevice_remove_alloc(struct dxgdevice *device,
+			    struct dxgallocation *alloc)
+{
+	if (alloc->alloc_list_entry.next) {
+		list_del(&alloc->alloc_list_entry);
+		alloc->alloc_list_entry.next = NULL;
+		dxgdevice_release_reference(device);
+	}
+}
+
+void dxgdevice_remove_alloc_safe(struct dxgdevice *device,
+				 struct dxgallocation *alloc)
+{
+	dxgdevice_acquire_alloc_list_lock(device);
+	dxgdevice_remove_alloc(device, alloc);
+	dxgdevice_release_alloc_list_lock(device);
+}
+
+void dxgdevice_add_resource(struct dxgdevice *device, struct dxgresource *res)
+{
+	dxgdevice_acquire_alloc_list_lock(device);
+	list_add_tail(&res->resource_list_entry, &device->resource_list_head);
+	dxgdevice_acquire_reference(device);
+	dxgdevice_release_alloc_list_lock(device);
+}
+
+void dxgdevice_remove_resource(struct dxgdevice *device,
+			       struct dxgresource *res)
+{
+	if (res->resource_list_entry.next) {
+		list_del(&res->resource_list_entry);
+		res->resource_list_entry.next = NULL;
+		dxgdevice_release_reference(device);
+	}
+}
+
+struct dxgsharedresource *dxgsharedresource_create(struct dxgadapter *adapter)
+{
+	struct dxgsharedresource *resource = dxgmem_alloc(NULL,
+							  DXGMEM_SHAREDRESOURCE,
+							  sizeof(struct
+								 dxgsharedresource));
+	if (resource) {
+		INIT_LIST_HEAD(&resource->resource_list_head);
+		refcount_set(&resource->refcount, 1);
+		dxgmutex_init(&resource->fd_mutex, DXGLOCK_FDMUTEX);
+		resource->adapter = adapter;
+	}
+	return resource;
+}
+
+bool dxgsharedresource_acquire_reference(struct dxgsharedresource *resource)
+{
+	return refcount_inc_not_zero(&resource->refcount);
+}
+
+void dxgsharedresource_release_reference(struct dxgsharedresource *resource)
+{
+	if (!refcount_dec_and_test(&resource->refcount))
+		return;
+	if (resource->global_handle)
+		hmgrtable_free_handle_safe(&dxgglobal->handle_table,
+					   HMGRENTRY_TYPE_DXGSHAREDRESOURCE,
+					   resource->global_handle);
+	if (resource->runtime_private_data)
+		dxgmem_free(NULL, DXGMEM_RUNTIMEPRIVATE,
+			    resource->runtime_private_data);
+	if (resource->resource_private_data)
+		dxgmem_free(NULL, DXGMEM_RESOURCEPRIVATE,
+			    resource->resource_private_data);
+	if (resource->alloc_private_data_sizes)
+		dxgmem_free(NULL, DXGMEM_ALLOCPRIVATE,
+			    resource->alloc_private_data_sizes);
+	if (resource->alloc_private_data)
+		dxgmem_free(NULL, DXGMEM_ALLOCPRIVATE,
+			    resource->alloc_private_data);
+	dxgmem_free(NULL, DXGMEM_SHAREDRESOURCE, resource);
+}
+
+void dxgsharedresource_add_resource(struct dxgsharedresource *shared_resource,
+				    struct dxgresource *resource)
+{
+	down_write(&shared_resource->adapter->shared_resource_list_lock);
+	TRACE_DEBUG(1, "%s: %p %p", __func__, shared_resource, resource);
+	list_add_tail(&resource->shared_resource_list_entry,
+		      &shared_resource->resource_list_head);
+	dxgsharedresource_acquire_reference(shared_resource);
+	dxgresource_acquire_reference(resource);
+	resource->shared_owner = shared_resource;
+	up_write(&shared_resource->adapter->shared_resource_list_lock);
+}
+
+void dxgsharedresource_remove_resource(struct dxgsharedresource
+				       *shared_resource,
+				       struct dxgresource *resource)
+{
+	down_write(&shared_resource->adapter->shared_resource_list_lock);
+	TRACE_DEBUG(1, "%s: %p %p", __func__, shared_resource, resource);
+	if (resource->shared_resource_list_entry.next) {
+		list_del(&resource->shared_resource_list_entry);
+		resource->shared_resource_list_entry.next = NULL;
+		dxgsharedresource_release_reference(shared_resource);
+		resource->shared_owner = NULL;
+		dxgresource_release_reference(resource);
+	}
+	up_write(&shared_resource->adapter->shared_resource_list_lock);
+}
+
+struct dxgresource *dxgresource_create(struct dxgdevice *device)
+{
+	struct dxgresource *resource = dxgmem_alloc(device->process,
+						    DXGMEM_RESOURCE,
+						    sizeof(struct dxgresource));
+	if (resource) {
+		refcount_set(&resource->refcount, 1);
+		resource->device = device;
+		resource->process = device->process;
+		resource->object_state = DXGOBJECTSTATE_ACTIVE;
+		dxgmutex_init(&resource->resource_mutex, DXGLOCK_RESOURCE);
+		INIT_LIST_HEAD(&resource->alloc_list_head);
+		dxgdevice_add_resource(device, resource);
+	}
+	return resource;
+}
+
+void dxgresource_free_handle(struct dxgresource *resource)
+{
+	struct dxgallocation *alloc;
+
+	if (resource->handle_valid) {
+		hmgrtable_free_handle_safe(&resource->device->process->
+					   handle_table,
+					   HMGRENTRY_TYPE_DXGRESOURCE,
+					   resource->handle);
+		resource->handle_valid = 0;
+	}
+	list_for_each_entry(alloc, &resource->alloc_list_head, alloc_list_entry) {
+		dxgallocation_free_handle(alloc);
+	}
+}
+
+void dxgresource_destroy(struct dxgresource *resource)
+{
+	/* device->alloc_list_lock is held */
+	struct dxgallocation *alloc;
+	struct dxgallocation *tmp;
+	struct d3dkmt_destroyallocation2 args = { };
+	int destroyed = test_and_set_bit(0, &resource->flags);
+	struct dxgdevice *device = resource->device;
+
+	if (!destroyed) {
+		dxgresource_free_handle(resource);
+		if (resource->handle) {
+			args.device = device->handle;
+			args.resource = resource->handle;
+			args.flags.assume_not_in_use = 1;
+			dxgvmb_send_destroy_allocation(device->process,
+						       device,
+						       &device->adapter->
+						       channel, &args, NULL);
+			resource->handle = 0;
+		}
+		list_for_each_entry_safe(alloc, tmp, &resource->alloc_list_head,
+					 alloc_list_entry) {
+			dxgallocation_destroy(alloc);
+		}
+		dxgdevice_remove_resource(device, resource);
+		if (resource->shared_owner) {
+			dxgsharedresource_remove_resource(resource->
+							  shared_owner,
+							  resource);
+			resource->shared_owner = NULL;
+		}
+	}
+	dxgresource_release_reference(resource);
+}
+
+void dxgresource_acquire_reference(struct dxgresource *resource)
+{
+	refcount_inc_not_zero(&resource->refcount);
+}
+
+void dxgresource_release_reference(struct dxgresource *resource)
+{
+	if (refcount_dec_and_test(&resource->refcount))
+		dxgmem_free(resource->process, DXGMEM_RESOURCE, resource);
+}
+
+bool dxgresource_is_active(struct dxgresource *resource)
+{
+	return resource->object_state == DXGOBJECTSTATE_ACTIVE;
+}
+
+int dxgresource_add_alloc(struct dxgresource *resource,
+			  struct dxgallocation *alloc)
+{
+	int ret = STATUS_DEVICE_REMOVED;
+	struct dxgdevice *device = resource->device;
+
+	dxgdevice_acquire_alloc_list_lock(device);
+	if (dxgresource_is_active(resource)) {
+		list_add_tail(&alloc->alloc_list_entry,
+			      &resource->alloc_list_head);
+		alloc->owner.resource = resource;
+		ret = 0;
+	}
+	alloc->resource_owner = 1;
+	dxgdevice_release_alloc_list_lock(device);
+	return ret;
+}
+
+void dxgresource_remove_alloc(struct dxgresource *resource,
+			      struct dxgallocation *alloc)
+{
+	if (alloc->alloc_list_entry.next) {
+		list_del(&alloc->alloc_list_entry);
+		alloc->alloc_list_entry.next = NULL;
+	}
+}
+
+void dxgresource_remove_alloc_safe(struct dxgresource *resource,
+				   struct dxgallocation *alloc)
+{
+	dxgdevice_acquire_alloc_list_lock(resource->device);
+	dxgresource_remove_alloc(resource, alloc);
+	dxgdevice_release_alloc_list_lock(resource->device);
+}
+
+bool dxgdevice_acquire_reference(struct dxgdevice *device)
+{
+	return refcount_inc_not_zero(&device->refcount);
+}
+
+void dxgdevice_release_reference(struct dxgdevice *device)
+{
+	if (refcount_dec_and_test(&device->refcount))
+		dxgmem_free(device->process, DXGMEM_DEVICE, device);
+}
+
+void dxgdevice_add_paging_queue(struct dxgdevice *device,
+				struct dxgpagingqueue *entry)
+{
+	dxgdevice_acquire_alloc_list_lock(device);
+	list_add_tail(&entry->pqueue_list_entry, &device->pqueue_list_head);
+	dxgdevice_release_alloc_list_lock(device);
+}
+
+void dxgdevice_remove_paging_queue(struct dxgpagingqueue *pqueue)
+{
+	struct dxgdevice *device = pqueue->device;
+
+	dxgdevice_acquire_alloc_list_lock(device);
+	if (pqueue->pqueue_list_entry.next) {
+		list_del(&pqueue->pqueue_list_entry);
+		pqueue->pqueue_list_entry.next = NULL;
+	}
+	dxgdevice_release_alloc_list_lock(device);
+}
+
+void dxgdevice_add_syncobj(struct dxgdevice *device,
+			   struct dxgsyncobject *syncobj)
+{
+	dxgdevice_acquire_alloc_list_lock(device);
+	list_add_tail(&syncobj->syncobj_list_entry, &device->syncobj_list_head);
+	dxgsyncobject_acquire_reference(syncobj);
+	dxgdevice_release_alloc_list_lock(device);
+}
+
+void dxgdevice_remove_syncobj(struct dxgsyncobject *entry)
+{
+	struct dxgdevice *device = entry->device;
+
+	dxgdevice_acquire_alloc_list_lock(device);
+	if (entry->syncobj_list_entry.next) {
+		list_del(&entry->syncobj_list_entry);
+		entry->syncobj_list_entry.next = NULL;
+		dxgsyncobject_release_reference(entry);
+	}
+	dxgdevice_release_alloc_list_lock(device);
+	dxgdevice_release_reference(device);
+	entry->device = NULL;
+}
+
+struct dxgcontext *dxgcontext_create(struct dxgdevice *device)
+{
+	struct dxgcontext *context = dxgmem_alloc(device->process,
+						  DXGMEM_CONTEXT,
+						  sizeof(struct dxgcontext));
+	if (context) {
+		refcount_set(&context->refcount, 1);
+		context->device = device;
+		context->process = device->process;
+		context->device_handle = device->handle;
+		dxgdevice_acquire_reference(device);
+		INIT_LIST_HEAD(&context->hwqueue_list_head);
+		init_rwsem(&context->hwqueue_list_lock);
+		dxgdevice_add_context(device, context);
+		context->object_state = DXGOBJECTSTATE_ACTIVE;
+	}
+	return context;
+}
+
+/*
+ * Called when the device context list lock is held
+ */
+void dxgcontext_destroy(struct dxgprocess *process, struct dxgcontext *context)
+{
+	struct dxghwqueue *hwqueue;
+	struct dxghwqueue *tmp;
+
+	TRACE_DEBUG(1, "%s %p\n", __func__, context);
+	context->object_state = DXGOBJECTSTATE_DESTROYED;
+	if (context->device) {
+		if (context->handle) {
+			hmgrtable_free_handle_safe(&context->process->
+						   handle_table,
+						   HMGRENTRY_TYPE_DXGCONTEXT,
+						   context->handle);
+		}
+		dxgdevice_remove_context(context->device, context);
+		dxgdevice_release_reference(context->device);
+	}
+	list_for_each_entry_safe(hwqueue, tmp, &context->hwqueue_list_head,
+				 hwqueue_list_entry) {
+		dxghwqueue_destroy(process, hwqueue);
+	}
+	dxgcontext_release_reference(context);
+}
+
+void dxgcontext_destroy_safe(struct dxgprocess *process,
+			     struct dxgcontext *context)
+{
+	struct dxgdevice *device = context->device;
+
+	dxgdevice_acquire_context_list_lock(device);
+	dxgcontext_destroy(process, context);
+	dxgdevice_release_context_list_lock(device);
+}
+
+bool dxgcontext_is_active(struct dxgcontext *context)
+{
+	return context->object_state == DXGOBJECTSTATE_ACTIVE;
+}
+
+bool dxgcontext_acquire_reference(struct dxgcontext *context)
+{
+	return refcount_inc_not_zero(&context->refcount);
+}
+
+void dxgcontext_release_reference(struct dxgcontext *context)
+{
+	if (refcount_dec_and_test(&context->refcount))
+		dxgmem_free(context->process, DXGMEM_CONTEXT, context);
+}
+
+int dxgcontext_add_hwqueue(struct dxgcontext *context,
+			   struct dxghwqueue *hwqueue)
+{
+	int ret = 0;
+
+	down_write(&context->hwqueue_list_lock);
+	if (dxgcontext_is_active(context))
+		list_add_tail(&hwqueue->hwqueue_list_entry,
+			      &context->hwqueue_list_head);
+	else
+		ret = STATUS_DEVICE_REMOVED;
+	up_write(&context->hwqueue_list_lock);
+	return ret;
+}
+
+void dxgcontext_remove_hwqueue(struct dxgcontext *context,
+			       struct dxghwqueue *hwqueue)
+{
+	if (hwqueue->hwqueue_list_entry.next) {
+		list_del(&hwqueue->hwqueue_list_entry);
+		hwqueue->hwqueue_list_entry.next = NULL;
+	}
+}
+
+void dxgcontext_remove_hwqueue_safe(struct dxgcontext *context,
+				    struct dxghwqueue *hwqueue)
+{
+	down_write(&context->hwqueue_list_lock);
+	dxgcontext_remove_hwqueue(context, hwqueue);
+	up_write(&context->hwqueue_list_lock);
+}
+
+struct dxgallocation *dxgallocation_create(struct dxgprocess *process)
+{
+	struct dxgallocation *alloc = dxgmem_alloc(process, DXGMEM_ALLOCATION,
+						   sizeof(struct
+							  dxgallocation));
+	if (alloc)
+		alloc->process = process;
+	return alloc;
+}
+
+void dxgallocation_stop(struct dxgallocation *alloc)
+{
+	if (alloc->pages) {
+		release_pages(alloc->pages, alloc->num_pages);
+		dxgmem_free(alloc->process, DXGMEM_ALLOCATION, alloc->pages);
+		alloc->pages = NULL;
+	}
+	dxgprocess_ht_lock_exclusive_down(alloc->process);
+	if (alloc->cpu_address_mapped) {
+		dxg_unmap_iospace(alloc->cpu_address,
+				  alloc->num_pages << PAGE_SHIFT);
+		alloc->cpu_address_mapped = false;
+		alloc->cpu_address = NULL;
+	}
+	dxgprocess_ht_lock_exclusive_up(alloc->process);
+}
+
+void dxgallocation_free_handle(struct dxgallocation *alloc)
+{
+	dxgprocess_ht_lock_exclusive_down(alloc->process);
+	if (alloc->handle_valid) {
+		hmgrtable_free_handle(&alloc->process->handle_table,
+				      HMGRENTRY_TYPE_DXGALLOCATION,
+				      alloc->alloc_handle);
+		alloc->handle_valid = 0;
+	}
+	dxgprocess_ht_lock_exclusive_up(alloc->process);
+}
+
+void dxgallocation_destroy(struct dxgallocation *alloc)
+{
+	struct dxgprocess *process = alloc->process;
+	struct d3dkmt_destroyallocation2 args = { };
+
+	dxgallocation_stop(alloc);
+	if (alloc->resource_owner)
+		dxgresource_remove_alloc(alloc->owner.resource, alloc);
+	else if (alloc->owner.device)
+		dxgdevice_remove_alloc(alloc->owner.device, alloc);
+	dxgallocation_free_handle(alloc);
+	if (alloc->alloc_handle && !alloc->resource_owner) {
+		args.device = alloc->owner.device->handle;
+		args.alloc_count = 1;
+		args.flags.assume_not_in_use = 1;
+		dxgvmb_send_destroy_allocation(process,
+					       alloc->owner.device,
+					       &alloc->owner.device->adapter->
+					       channel, &args,
+					       &alloc->alloc_handle);
+	}
+	if (alloc->gpadl) {
+		TRACE_DEBUG(1, "Teardown gpadl %d", alloc->gpadl);
+		vmbus_teardown_gpadl(dxgglobal_get_vmbus(), alloc->gpadl);
+		TRACE_DEBUG(1, "Teardown gpadl end");
+		alloc->gpadl = 0;
+	}
+	if (alloc->priv_drv_data)
+		dxgmem_free(alloc->process, DXGMEM_ALLOCPRIVATE,
+			    alloc->priv_drv_data);
+	if (alloc->cpu_address_mapped)
+		pr_err("Alloc IO space is mapped: %p", alloc);
+	dxgmem_free(alloc->process, DXGMEM_ALLOCATION, alloc);
+}
+
+struct dxgpagingqueue *dxgpagingqueue_create(struct dxgdevice *device)
+{
+	struct dxgpagingqueue *pqueue;
+
+	pqueue = dxgmem_alloc(device->process, DXGMEM_PQUEUE, sizeof(*pqueue));
+	if (pqueue) {
+		pqueue->device = device;
+		pqueue->process = device->process;
+		pqueue->device_handle = device->handle;
+		dxgdevice_add_paging_queue(device, pqueue);
+	}
+	return pqueue;
+}
+
+void dxgpagingqueue_stop(struct dxgpagingqueue *pqueue)
+{
+	if (pqueue->mapped_address) {
+		int ret = dxg_unmap_iospace(pqueue->mapped_address, PAGE_SIZE);
+
+		UNUSED(ret);
+		TRACE_DEBUG(1, "fence is unmapped %d %p",
+			    ret, pqueue->mapped_address);
+		pqueue->mapped_address = NULL;
+	}
+}
+
+void dxgpagingqueue_destroy(struct dxgpagingqueue *pqueue)
+{
+	struct dxgprocess *process = pqueue->process;
+
+	TRACE_DEBUG(1, "%s %p %x\n", __func__, pqueue, pqueue->handle);
+
+	dxgpagingqueue_stop(pqueue);
+
+	hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL);
+	if (pqueue->handle) {
+		hmgrtable_free_handle(&process->handle_table,
+				      HMGRENTRY_TYPE_DXGPAGINGQUEUE,
+				      pqueue->handle);
+		pqueue->handle = 0;
+	}
+	if (pqueue->syncobj_handle) {
+		hmgrtable_free_handle(&process->handle_table,
+				      HMGRENTRY_TYPE_MONITOREDFENCE,
+				      pqueue->syncobj_handle);
+		pqueue->syncobj_handle = 0;
+	}
+	hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL);
+	if (pqueue->device)
+		dxgdevice_remove_paging_queue(pqueue);
+	dxgmem_free(process, DXGMEM_PQUEUE, pqueue);
+}
+
+struct dxgprocess_adapter *dxgprocess_adapter_create(struct dxgprocess *process,
+						     struct dxgadapter *adapter)
+{
+	struct dxgprocess_adapter *adapter_info = dxgmem_alloc(process,
+							       DXGMEM_PROCESS_ADAPTER,
+							       sizeof
+							       (*adapter_info));
+	if (adapter_info) {
+		if (!dxgadapter_acquire_reference(adapter)) {
+			pr_err("failed to acquire adapter reference");
+			goto cleanup;
+		}
+		adapter_info->adapter = adapter;
+		adapter_info->process = process;
+		adapter_info->refcount = 1;
+		dxgmutex_init(&adapter_info->device_list_mutex,
+			      DXGLOCK_PROCESSADAPTERDEVICELIST);
+		INIT_LIST_HEAD(&adapter_info->device_list_head);
+		list_add_tail(&adapter_info->process_adapter_list_entry,
+			      &process->process_adapter_list_head);
+		dxgadapter_add_process(adapter, adapter_info);
+	}
+	return adapter_info;
+cleanup:
+	if (adapter_info)
+		dxgmem_free(process, DXGMEM_PROCESS_ADAPTER, adapter_info);
+	return NULL;
+}
+
+void dxgprocess_adapter_stop(struct dxgprocess_adapter *adapter_info)
+{
+	struct dxgdevice *device;
+
+	dxgmutex_lock(&adapter_info->device_list_mutex);
+	list_for_each_entry(device, &adapter_info->device_list_head,
+			    device_list_entry) {
+		dxgdevice_stop(device);
+	}
+	dxgmutex_unlock(&adapter_info->device_list_mutex);
+}
+
+void dxgprocess_adapter_destroy(struct dxgprocess_adapter *adapter_info)
+{
+	struct dxgdevice *device;
+
+	dxgmutex_lock(&adapter_info->device_list_mutex);
+	while (!list_empty(&adapter_info->device_list_head)) {
+		device = list_first_entry(&adapter_info->device_list_head,
+					  struct dxgdevice, device_list_entry);
+		list_del(&device->device_list_entry);
+		device->device_list_entry.next = NULL;
+		dxgmutex_unlock(&adapter_info->device_list_mutex);
+		dxgdevice_destroy(device);
+		dxgmutex_lock(&adapter_info->device_list_mutex);
+	}
+	dxgmutex_unlock(&adapter_info->device_list_mutex);
+
+	dxgadapter_remove_process(adapter_info);
+	dxgadapter_release_reference(adapter_info->adapter);
+	list_del(&adapter_info->process_adapter_list_entry);
+	dxgmem_free(adapter_info->process, DXGMEM_PROCESS_ADAPTER,
+		    adapter_info);
+}
+
+/*
+ * Must be called when dxgglobal::process_adapter_mutex is held
+ */
+void dxgprocess_adapter_release(struct dxgprocess_adapter *adapter_info)
+{
+	TRACE_DEBUG(1, "%s %p %d",
+		    __func__, adapter_info, adapter_info->refcount);
+	adapter_info->refcount--;
+	if (adapter_info->refcount == 0)
+		dxgprocess_adapter_destroy(adapter_info);
+}
+
+int dxgprocess_adapter_add_device(struct dxgprocess *process,
+				  struct dxgadapter *adapter,
+				  struct dxgdevice *device)
+{
+	struct dxgprocess_adapter *entry;
+	struct dxgprocess_adapter *adapter_info = NULL;
+	int ret = 0;
+
+	dxgglobal_acquire_process_adapter_lock();
+
+	list_for_each_entry(entry, &process->process_adapter_list_head,
+			    process_adapter_list_entry) {
+		if (entry->adapter == adapter) {
+			adapter_info = entry;
+			break;
+		}
+	}
+	if (adapter_info == NULL) {
+		pr_err("failed to find process adapter info\n");
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+	dxgmutex_lock(&adapter_info->device_list_mutex);
+	list_add_tail(&device->device_list_entry,
+		      &adapter_info->device_list_head);
+	device->adapter_info = adapter_info;
+	dxgmutex_unlock(&adapter_info->device_list_mutex);
+
+cleanup:
+
+	dxgglobal_release_process_adapter_lock();
+	return ret;
+}
+
+void dxgprocess_adapter_remove_device(struct dxgdevice *device)
+{
+	TRACE_DEBUG(1, "%s %p\n", __func__, device);
+	dxgmutex_lock(&device->adapter_info->device_list_mutex);
+	if (device->device_list_entry.next) {
+		list_del(&device->device_list_entry);
+		device->device_list_entry.next = NULL;
+	}
+	dxgmutex_unlock(&device->adapter_info->device_list_mutex);
+}
+
+struct dxgsharedsyncobject *dxgsharedsyncobj_create(struct dxgadapter *adapter,
+						    struct dxgsyncobject *so)
+{
+	struct dxgsharedsyncobject *syncobj;
+
+	syncobj = dxgmem_alloc(NULL, DXGMEM_SHAREDSYNCOBJ, sizeof(*syncobj));
+	if (syncobj) {
+		refcount_set(&syncobj->refcount, 1);
+		INIT_LIST_HEAD(&syncobj->shared_syncobj_list_head);
+		syncobj->adapter = adapter;
+		syncobj->type = so->type;
+		syncobj->monitored_fence = so->monitored_fence;
+		dxgadapter_add_shared_syncobj(adapter, syncobj);
+		dxgadapter_acquire_reference(adapter);
+		init_rwsem(&syncobj->syncobj_list_lock);
+		dxgmutex_init(&syncobj->fd_mutex, DXGLOCK_FDMUTEX);
+	}
+	return syncobj;
+}
+
+bool dxgsharedsyncobj_acquire_reference(struct dxgsharedsyncobject *syncobj)
+{
+	TRACE_DEBUG(1, "%s 0x%p %d", __func__, syncobj,
+		    refcount_read(&syncobj->refcount));
+	return refcount_inc_not_zero(&syncobj->refcount);
+}
+
+void dxgsharedsyncobj_release_reference(struct dxgsharedsyncobject *syncobj)
+{
+	TRACE_DEBUG(1, "%s 0x%p %d", __func__, syncobj,
+		    refcount_read(&syncobj->refcount));
+	if (refcount_dec_and_test(&syncobj->refcount)) {
+		TRACE_DEBUG(1, "Destroying");
+		if (syncobj->global_shared_handle) {
+			hmgrtable_lock(&dxgglobal->handle_table, DXGLOCK_EXCL);
+			hmgrtable_free_handle(&dxgglobal->handle_table,
+					      HMGRENTRY_TYPE_DXGSYNCOBJECT,
+					      syncobj->global_shared_handle);
+			hmgrtable_unlock(&dxgglobal->handle_table,
+					 DXGLOCK_EXCL);
+		}
+		if (syncobj->adapter) {
+			dxgadapter_remove_shared_syncobj(syncobj->adapter,
+							 syncobj);
+			dxgadapter_release_reference(syncobj->adapter);
+		}
+		dxgmem_free(NULL, DXGMEM_SHAREDSYNCOBJ, syncobj);
+	}
+	TRACE_DEBUG(1, "%s end", __func__);
+}
+
+void dxgsharedsyncobj_add_syncobj(struct dxgsharedsyncobject *shared,
+				  struct dxgsyncobject *syncobj)
+{
+	TRACE_DEBUG(1, "%s 0x%p 0x%p", __func__, shared, syncobj);
+	dxgsharedsyncobj_acquire_reference(shared);
+	down_write(&shared->syncobj_list_lock);
+	list_add(&syncobj->shared_syncobj_list_entry,
+		 &shared->shared_syncobj_list_head);
+	syncobj->shared_owner = shared;
+	up_write(&shared->syncobj_list_lock);
+}
+
+void dxgsharedsyncobj_remove_syncobj(struct dxgsharedsyncobject *shared,
+				     struct dxgsyncobject *syncobj)
+{
+	TRACE_DEBUG(1, "%s 0x%p", __func__, shared);
+	down_write(&shared->syncobj_list_lock);
+	list_del(&syncobj->shared_syncobj_list_entry);
+	up_write(&shared->syncobj_list_lock);
+}
+
+struct dxgsyncobject *dxgsyncobject_create(struct dxgprocess *process,
+					   struct dxgdevice *device,
+					   struct dxgadapter *adapter,
+					   enum
+					   d3dddi_synchronizationobject_type
+					   type,
+					   struct
+					   d3dddi_synchronizationobject_flags
+					   flags)
+{
+	struct dxgsyncobject *syncobj;
+
+	syncobj = dxgmem_alloc(process, DXGMEM_SYNCOBJ, sizeof(*syncobj));
+	if (syncobj == NULL)
+		goto cleanup;
+	syncobj->type = type;
+	syncobj->process = process;
+	switch (type) {
+	case D3DDDI_MONITORED_FENCE:
+	case D3DDDI_PERIODIC_MONITORED_FENCE:
+		syncobj->monitored_fence = 1;
+		break;
+	case D3DDDI_CPU_NOTIFICATION:
+		syncobj->cpu_event = 1;
+		syncobj->host_event = dxgmem_alloc(process, DXGMEM_HOSTEVENT,
+						   sizeof(struct dxghostevent));
+		if (syncobj->host_event == NULL)
+			goto cleanup;
+		break;
+	default:
+		break;
+	}
+	if (flags.shared) {
+		syncobj->shared = 1;
+		if (flags.nt_security_sharing)
+			syncobj->shared_nt = 1;
+	}
+
+	refcount_set(&syncobj->refcount, 1);
+
+	if (syncobj->monitored_fence) {
+		syncobj->device = device;
+		syncobj->device_handle = device->handle;
+		dxgdevice_acquire_reference(device);
+		dxgdevice_add_syncobj(device, syncobj);
+	} else {
+		dxgadapter_add_syncobj(adapter, syncobj);
+	}
+	syncobj->adapter = adapter;
+	dxgadapter_acquire_reference(adapter);
+
+	TRACE_DEBUG(1, "%s 0x%p\n", __func__, syncobj);
+	return syncobj;
+cleanup:
+	if (syncobj->host_event)
+		dxgmem_free(process, DXGMEM_HOSTEVENT, syncobj->host_event);
+	if (syncobj)
+		dxgmem_free(process, DXGMEM_SYNCOBJ, syncobj);
+	return NULL;
+}
+
+void dxgsyncobject_destroy(struct dxgprocess *process,
+			   struct dxgsyncobject *syncobj)
+{
+	int destroyed;
+
+	TRACE_DEBUG(1, "%s 0x%p", __func__, syncobj);
+
+	dxgsyncobject_stop(syncobj);
+
+	destroyed = test_and_set_bit(0, &syncobj->flags);
+	if (!destroyed) {
+		TRACE_DEBUG(1, "Deleting handle: %x", syncobj->handle);
+		hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL);
+		if (syncobj->handle) {
+			hmgrtable_free_handle(&process->handle_table,
+					      HMGRENTRY_TYPE_DXGSYNCOBJECT,
+					      syncobj->handle);
+			syncobj->handle = 0;
+			dxgsyncobject_release_reference(syncobj);
+		}
+		hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL);
+
+		if (syncobj->cpu_event) {
+			if (syncobj->host_event->cpu_event) {
+				eventfd_ctx_put(syncobj->host_event->cpu_event);
+				if (syncobj->host_event->event_id) {
+					dxgglobal_remove_host_event(syncobj->
+								    host_event);
+				}
+				syncobj->host_event->cpu_event = NULL;
+			}
+		}
+		if (syncobj->monitored_fence)
+			dxgdevice_remove_syncobj(syncobj);
+		else
+			dxgadapter_remove_syncobj(syncobj);
+		if (syncobj->adapter) {
+			dxgadapter_release_reference(syncobj->adapter);
+			syncobj->adapter = NULL;
+		}
+	}
+	dxgsyncobject_release_reference(syncobj);
+
+	TRACE_DEBUG(1, "%s end", __func__);
+}
+
+void dxgsyncobject_stop(struct dxgsyncobject *syncobj)
+{
+	int stopped = test_and_set_bit(1, &syncobj->flags);
+
+	TRACE_DEBUG(1, "%s", __func__);
+	if (!stopped) {
+		TRACE_DEBUG(1, "stopping");
+		if (syncobj->monitored_fence) {
+			if (syncobj->mapped_address) {
+				int ret =
+				    dxg_unmap_iospace(syncobj->mapped_address,
+						      PAGE_SIZE);
+
+				(void)ret;
+				TRACE_DEBUG(1, "fence is unmapped %d %p\n",
+					    ret, syncobj->mapped_address);
+				syncobj->mapped_address = NULL;
+			}
+		}
+	}
+	TRACE_DEBUG(1, "%s end", __func__);
+}
+
+void dxgsyncobject_acquire_reference(struct dxgsyncobject *syncobj)
+{
+	TRACE_DEBUG(1, "%s 0x%p %d",
+		    __func__, syncobj, refcount_read(&syncobj->refcount));
+	refcount_inc_not_zero(&syncobj->refcount);
+}
+
+void dxgsyncobject_release_reference(struct dxgsyncobject *syncobj)
+{
+	TRACE_DEBUG(1, "%s 0x%p %d",
+		    __func__, syncobj, refcount_read(&syncobj->refcount));
+	if (refcount_dec_and_test(&syncobj->refcount)) {
+		if (syncobj->shared_owner) {
+			dxgsharedsyncobj_remove_syncobj(syncobj->shared_owner,
+							syncobj);
+			dxgsharedsyncobj_release_reference(syncobj->
+							   shared_owner);
+		}
+		if (syncobj->host_event)
+			dxgmem_free(syncobj->process, DXGMEM_HOSTEVENT,
+				    syncobj->host_event);
+		dxgmem_free(syncobj->process, DXGMEM_SYNCOBJ, syncobj);
+	}
+}
+
+struct dxghwqueue *dxghwqueue_create(struct dxgcontext *context)
+{
+	struct dxgprocess *process = context->device->process;
+	struct dxghwqueue *hwqueue =
+	    dxgmem_alloc(process, DXGMEM_HWQUEUE, sizeof(*hwqueue));
+	if (hwqueue) {
+		refcount_set(&hwqueue->refcount, 1);
+		hwqueue->context = context;
+		hwqueue->process = process;
+		hwqueue->device_handle = context->device->handle;
+		if (dxgcontext_add_hwqueue(context, hwqueue)) {
+			dxghwqueue_release_reference(hwqueue);
+			hwqueue = NULL;
+		} else {
+			dxgcontext_acquire_reference(context);
+		}
+	}
+	return hwqueue;
+}
+
+void dxghwqueue_destroy(struct dxgprocess *process, struct dxghwqueue *hwqueue)
+{
+	TRACE_DEBUG(1, "%s %p\n", __func__, hwqueue);
+	hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL);
+	if (hwqueue->handle) {
+		hmgrtable_free_handle(&process->handle_table,
+				      HMGRENTRY_TYPE_DXGHWQUEUE,
+				      hwqueue->handle);
+		hwqueue->handle = 0;
+	}
+	hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL);
+
+	if (hwqueue->progress_fence_mapped_address) {
+		dxg_unmap_iospace(hwqueue->progress_fence_mapped_address,
+				  PAGE_SIZE);
+		hwqueue->progress_fence_mapped_address = NULL;
+	}
+	dxgcontext_remove_hwqueue_safe(hwqueue->context, hwqueue);
+
+	dxgcontext_release_reference(hwqueue->context);
+	dxghwqueue_release_reference(hwqueue);
+}
+
+bool dxghwqueue_acquire_reference(struct dxghwqueue *hwqueue)
+{
+	return refcount_inc_not_zero(&hwqueue->refcount);
+}
+
+void dxghwqueue_release_reference(struct dxghwqueue *hwqueue)
+{
+	if (refcount_dec_and_test(&hwqueue->refcount))
+		dxgmem_free(hwqueue->process, DXGMEM_HWQUEUE, hwqueue);
+}
diff --git a/drivers/gpu/dxgkrnl/dxgkrnl.h b/drivers/gpu/dxgkrnl/dxgkrnl.h
new file mode 100644
index 000000000000..20be1b5053c7
--- /dev/null
+++ b/drivers/gpu/dxgkrnl/dxgkrnl.h
@@ -0,0 +1,913 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (c) 2019, Microsoft Corporation.
+ *
+ * Author:
+ *   Iouri Tarassov <iourit at microsoft.com>
+ *
+ * Dxgkrnl Graphics Port Driver headers
+ *
+ */
+
+#ifndef _DXGKRNL_H
+#define _DXGKRNL_H
+
+#include <linux/uuid.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/semaphore.h>
+#include <linux/refcount.h>
+#include <linux/rwsem.h>
+#include <linux/atomic.h>
+#include <linux/spinlock.h>
+#include <linux/gfp.h>
+#include <linux/cdev.h>
+
+struct dxgprocess;
+struct dxgadapter;
+struct dxgdevice;
+struct dxgcontext;
+struct dxgallocation;
+struct dxgresource;
+struct dxgsharedresource;
+struct dxgsyncobject;
+struct dxgsharedsyncobject;
+struct dxghwqueue;
+
+#include "misc.h"
+#include "hmgr.h"
+#include "d3dkmthk.h"
+
+struct dxgk_device_types {
+	uint post_device:1;
+	uint post_device_certain:1;
+	uint software_device:1;
+	uint soft_gpu_device:1;
+	uint warp_device:1;
+	uint bdd_device:1;
+	uint support_miracast:1;
+	uint mismatched_lda:1;
+	uint indirect_display_device:1;
+	uint xbox_one_device:1;
+	uint child_id_support_dwm_clone:1;
+	uint child_id_support_dwm_clone2:1;
+	uint has_internal_panel:1;
+	uint rfx_vgpu_device:1;
+	uint virtual_render_device:1;
+	uint support_preserve_boot_display:1;
+	uint is_uefi_frame_buffer:1;
+	uint removable_device:1;
+	uint virtual_monitor_device:1;
+};
+
+enum dxgobjectstate {
+	DXGOBJECTSTATE_CREATED,
+	DXGOBJECTSTATE_ACTIVE,
+	DXGOBJECTSTATE_STOPPED,
+	DXGOBJECTSTATE_DESTROYED,
+};
+
+struct dxgvmbuschannel {
+	struct vmbus_channel	*channel;
+	struct hv_device	*hdev;
+	struct dxgadapter	*adapter;
+	spinlock_t		packet_list_mutex;
+	struct list_head	packet_list_head;
+	struct kmem_cache	*packet_cache;
+	atomic64_t		packet_request_id;
+};
+
+int dxgvmbuschannel_init(struct dxgvmbuschannel *ch, struct hv_device *hdev);
+void dxgvmbuschannel_destroy(struct dxgvmbuschannel *ch);
+void dxgvmbuschannel_receive(void *ctx);
+
+struct dxgpagingqueue {
+	struct dxgdevice	*device;
+	struct dxgprocess	*process;
+	struct list_head	pqueue_list_entry;
+	d3dkmt_handle		device_handle;
+	d3dkmt_handle		handle;
+	d3dkmt_handle		syncobj_handle;
+	void			*mapped_address;
+};
+
+/*
+ * The structure describes an event, which will be signaled by
+ * a message from host.
+ */
+struct dxghostevent {
+	struct list_head	host_event_list_entry;
+	u64			event_id;
+	struct dxgprocess	*process;
+	struct eventfd_ctx	*cpu_event;
+	struct completion	*completion_event;
+	bool			destroy_after_signal;
+	bool			remove_from_list;
+};
+
+struct dxgpagingqueue *dxgpagingqueue_create(struct dxgdevice *device);
+void dxgpagingqueue_destroy(struct dxgpagingqueue *pqueue);
+void dxgpagingqueue_stop(struct dxgpagingqueue *pqueue);
+
+/*
+ * This is GPU synchronization object, which is used to synchronize execution
+ * between GPU contextx/hardware queues or for tracking GPU execution progress.
+ * A dxgsyncobject is created when somebody creates a syncobject or opens a
+ * shared syncobject.
+ * A syncobject belongs to an adapter, unless it is a cross-adapter object.
+ * Cross adapter syncobjects are currently not implemented.
+ *
+ * D3DDDI_MONITORED_FENCE and D3DDDI_PERIODIC_MONITORED_FENCE are called
+ * "device" syncobject, because the belong to a device (dxgdevice).
+ * Device syncobjects are inserted to a list in dxgdevice.
+ *
+ * A syncobject can be "shared", meaning that it could be opened by many
+ * processes.
+ *
+ * Shared syncobjects are inserted to a list in its owner
+ * (dxgsharedsyncobject).
+ * A syncobject can be shared by using a global handle or by using
+ * "NT security handle".
+ * When global handle sharing is used, the handle is created durinig object
+ * creation.
+ * When "NT security" is used, the handle for sharing is create be calling
+ * dxgk_share_objects. On Linux "NT handle" is represented by a file
+ * descriptor. FD points to dxgsharedsyncobject.
+ */
+struct dxgsyncobject {
+	refcount_t refcount;
+	enum d3dddi_synchronizationobject_type	type;
+	/*
+	 * List entry in dxgdevice for device sync objects.
+	 * List entry in dxgadapter for other objects
+	 */
+	struct list_head		syncobj_list_entry;
+	/* List entry in the dxgsharedsyncobject object for shared synobjects */
+	struct list_head		shared_syncobj_list_entry;
+	/* Adapter, the syncobject belongs to. NULL for stopped sync obejcts. */
+	struct dxgadapter		*adapter;
+	/*
+	 * Pointer to the device, which was used to create the object.
+	 * This is NULL for non-device syncbjects
+	 */
+	struct dxgdevice		*device;
+	struct dxgprocess		*process;
+	/* Used by D3DDDI_CPU_NOTIFICATION objects */
+	struct dxghostevent		*host_event;
+	/* Owner object for shared syncobjects */
+	struct dxgsharedsyncobject	*shared_owner;
+	/* CPU virtual address of the fence value for "device" syncobjects */
+	void				*mapped_address;
+	/* Handle in the process handle table */
+	d3dkmt_handle			handle;
+	/* Cached handle of the device. Used to avoid device dereference. */
+	d3dkmt_handle			device_handle;
+	union {
+		struct {
+			/* Must be the first bit */
+			uint		destroyed:1;
+			/* Must be the second bit */
+			uint		stopped:1;
+			/* device syncobject */
+			uint		monitored_fence:1;
+			uint		cpu_event:1;
+			uint		shared:1;
+			/* shared using file descriptor */
+			uint		shared_nt:1;
+			uint		reserved:26;
+		};
+		long			flags;
+	};
+};
+
+/*
+ * The object is used as parent of all sync objects, created for a shared
+ * syncobject. When a shared syncobject is created without NT security, the
+ * handle in the global handle table will point to this object.
+ */
+struct dxgsharedsyncobject {
+	refcount_t refcount;
+	/* Referenced by file descriptors */
+	int				host_shared_handle_nt_reference;
+	/*
+	 * Handle in the global handle table. It is zero for NT
+	 * security syncobjects
+	 */
+	d3dkmt_handle			global_shared_handle;
+	/* Corresponding handle in the host global handle table */
+	d3dkmt_handle			host_shared_handle;
+	/*
+	 * When the sync object is shared by NT handle, this is the
+	 * corresponding handle in the host
+	 */
+	d3dkmt_handle			host_shared_handle_nt;
+	/* Protects access to host_shared_handle_nt */
+	struct dxgmutex			fd_mutex;
+	struct rw_semaphore		syncobj_list_lock;
+	struct list_head		shared_syncobj_list_head;
+	struct list_head		adapter_shared_syncobj_list_entry;
+	struct dxgadapter		*adapter;
+	enum d3dddi_synchronizationobject_type type;
+	uint				monitored_fence:1;
+};
+
+struct dxgsharedsyncobject *dxgsharedsyncobj_create(struct dxgadapter *adapter,
+						    struct dxgsyncobject
+						    *syncobj);
+bool dxgsharedsyncobj_acquire_reference(struct dxgsharedsyncobject *syncobj);
+void dxgsharedsyncobj_release_reference(struct dxgsharedsyncobject *syncobj);
+void dxgsharedsyncobj_add_syncobj(struct dxgsharedsyncobject *sharedsyncobj,
+				  struct dxgsyncobject *syncobj);
+void dxgsharedsyncobj_remove_syncobj(struct dxgsharedsyncobject *sharedsyncobj,
+				     struct dxgsyncobject *syncobj);
+
+struct dxgsyncobject *dxgsyncobject_create(struct dxgprocess *process,
+					   struct dxgdevice *device,
+					   struct dxgadapter *adapter,
+					   enum
+					   d3dddi_synchronizationobject_type
+					   type,
+					   struct
+					   d3dddi_synchronizationobject_flags
+					   flags);
+void dxgsyncobject_destroy(struct dxgprocess *process,
+			   struct dxgsyncobject *syncobj);
+void dxgsyncobject_stop(struct dxgsyncobject *syncobj);
+void dxgsyncobject_acquire_reference(struct dxgsyncobject *syncobj);
+void dxgsyncobject_release_reference(struct dxgsyncobject *syncobj);
+
+extern struct device *dxgglobaldev;
+
+struct dxgglobal {
+	struct dxgvmbuschannel	channel;
+	struct delayed_work	dwork;
+	struct hv_device	*hdev;
+	u32			num_adapters;
+	struct resource		*mem;
+	u64			mmiospace_base;
+	u64			mmiospace_size;
+	dev_t			device_devt;
+	struct class		*device_class;
+	struct device		*device;
+	struct cdev		device_cdev;
+	struct dxgmutex		device_mutex;
+
+	/*  list of created  processes */
+	struct list_head	plisthead;
+	struct dxgmutex		plistmutex;
+
+	/* list of created adapters */
+	struct list_head	adapter_list_head;
+	struct rw_semaphore	adapter_list_lock;
+
+	/* List of all current threads for lock order tracking. */
+	struct mutex		thread_info_mutex;
+	struct list_head	thread_info_list_head;
+
+	/* protects acces to the global VM bus channel */
+	struct rw_semaphore	channel_lock;
+
+	/* protects the dxgprocess_adapter lists */
+	struct dxgmutex		process_adapter_mutex;
+
+	/*  list of events, waiting to be signaled by the host */
+	struct list_head	host_event_list_head;
+	spinlock_t		host_event_list_mutex;
+	atomic64_t		host_event_id;
+
+	/* Handle table for shared objects */
+	struct hmgrtable	handle_table;
+
+	bool			cdev_initialized;
+	bool			devt_initialized;
+	bool			vmbus_registered;
+};
+
+extern struct dxgglobal		*dxgglobal;
+
+void dxgglobal_acquire_adapter_list_lock(enum dxglockstate state);
+void dxgglobal_release_adapter_list_lock(enum dxglockstate state);
+struct vmbus_channel *dxgglobal_get_vmbus(void);
+struct dxgvmbuschannel *dxgglobal_get_dxgvmbuschannel(void);
+void dxgglobal_acquire_process_adapter_lock(void);
+void dxgglobal_release_process_adapter_lock(void);
+void dxgglobal_add_host_event(struct dxghostevent *hostevent);
+void dxgglobal_remove_host_event(struct dxghostevent *hostevent);
+u64 dxgglobal_new_host_event_id(void);
+void dxgglobal_signal_host_event(u64 event_id);
+struct dxghostevent *dxgglobal_get_host_event(u64 event_id);
+int dxgglobal_acquire_channel_lock(void);
+void dxgglobal_release_channel_lock(void);
+
+/*
+ * Describes adapter information for each process
+ */
+struct dxgprocess_adapter {
+	/* Entry in dxgadapter::adapter_process_list_head */
+	struct list_head	adapter_process_list_entry;
+	/* Entry in dxgprocess::process_adapter_list_head */
+	struct list_head	process_adapter_list_entry;
+	/* List of all dxgdevice objects created for the process on adapter */
+	struct list_head	device_list_head;
+	struct dxgmutex		device_list_mutex;
+	struct dxgadapter	*adapter;
+	struct dxgprocess	*process;
+	int			refcount;
+};
+
+struct dxgprocess_adapter *dxgprocess_adapter_create(struct dxgprocess *process,
+						     struct dxgadapter
+						     *adapter);
+void dxgprocess_adapter_release(struct dxgprocess_adapter *adapter);
+int dxgprocess_adapter_add_device(struct dxgprocess *process,
+				  struct dxgadapter *adapter,
+				  struct dxgdevice *device);
+void dxgprocess_adapter_remove_device(struct dxgdevice *device);
+void dxgprocess_adapter_stop(struct dxgprocess_adapter *adapter_info);
+void dxgprocess_adapter_destroy(struct dxgprocess_adapter *adapter_info);
+
+struct dxgprocess {
+	/*
+	 * Process list entry in dxgglobal.
+	 * Protected by the dxgglobal->plistmutex.
+	 */
+	struct list_head	plistentry;
+	struct task_struct	*process;
+	pid_t			pid;
+	pid_t			tgid;
+	/* how many time the process was opened */
+	int			refcount;
+	/*
+	 * This handle table is used for all objects except dxgadapter
+	 * The handle table lock order is higher than the local_handle_table
+	 * lock
+	 */
+	struct hmgrtable	handle_table;
+	/*
+	 * This handle table is used for dxgadapter objects.
+	 * The handle table lock order is lowest.
+	 */
+	struct hmgrtable	local_handle_table;
+	d3dkmt_handle		host_handle;
+
+	/* List of opened adapters (dxgprocess_adapter) */
+	struct list_head	process_adapter_list_head;
+
+	struct hmgrtable	*test_handle_table[2];
+	atomic_t		dxg_memory[DXGMEM_LAST];
+	struct dxgmutex		process_mutex;
+};
+
+struct dxgprocess *dxgprocess_create(void);
+void dxgprocess_destroy(struct dxgprocess *process);
+void dxgprocess_release_reference(struct dxgprocess *process);
+int dxgprocess_open_adapter(struct dxgprocess *process,
+			    struct dxgadapter *adapter, d3dkmt_handle *handle);
+int dxgprocess_close_adapter(struct dxgprocess *process, d3dkmt_handle handle);
+struct dxgadapter *dxgprocess_get_adapter(struct dxgprocess *process,
+					  d3dkmt_handle handle);
+struct dxgadapter *dxgprocess_adapter_by_handle(struct dxgprocess *process,
+						d3dkmt_handle handle);
+struct dxgdevice *dxgprocess_device_by_handle(struct dxgprocess *process,
+					      d3dkmt_handle handle);
+struct dxgdevice *dxgprocess_device_by_object_handle(struct dxgprocess *process,
+						     enum hmgrentry_type t,
+						     d3dkmt_handle handle);
+void dxgprocess_ht_lock_shared_down(struct dxgprocess *process);
+void dxgprocess_ht_lock_shared_up(struct dxgprocess *process);
+void dxgprocess_ht_lock_exclusive_down(struct dxgprocess *process);
+void dxgprocess_ht_lock_exclusive_up(struct dxgprocess *process);
+struct dxgprocess_adapter *dxgprocess_get_adapter_info(struct dxgprocess
+						       *process,
+						       struct dxgadapter
+						       *adapter);
+
+enum dxgadapter_state {
+	DXGADAPTER_STATE_ACTIVE		= 0,
+	DXGADAPTER_STATE_STOPPED	= 1,
+};
+
+/*
+ * This object represents the grapchis adapter.
+ * Objects, which take reference on the adapter:
+ * - dxgglobal
+ * - dxgdevice
+ * - adapter handle (d3dkmt_handle)
+ */
+struct dxgadapter {
+	struct rw_semaphore	core_lock;
+	struct rw_semaphore	adapter_process_list_lock;
+	refcount_t refcount;
+	/* Entry in the list of adapters in dxgglobal */
+	struct list_head	adapter_list_entry;
+	/* The list of dxgprocess_adapter entries */
+	struct list_head	adapter_process_list_head;
+	/* List of all dxgsharedresource objects */
+	struct list_head	shared_resource_list_head;
+	/* List of all dxgsharedsyncobject objects */
+	struct list_head	adapter_shared_syncobj_list_head;
+	/* List of all non-device dxgsyncobject objects */
+	struct list_head	syncobj_list_head;
+	/* This lock protects shared resource and syncobject lists */
+	struct rw_semaphore	shared_resource_list_lock;
+	struct winluid		luid;
+	struct dxgvmbuschannel	channel;
+	d3dkmt_handle		host_handle;
+	enum dxgadapter_state	adapter_state;
+	struct winluid		host_adapter_luid;
+	winwchar		device_description[80];
+	winwchar		device_instance_id[W_MAX_PATH];
+};
+
+int dxgadapter_init(struct dxgadapter *adapter, struct hv_device *hdev);
+bool dxgadapter_is_active(struct dxgadapter *adapter);
+void dxgadapter_stop(struct dxgadapter *adapter);
+void dxgadapter_destroy(struct dxgadapter *adapter);
+bool dxgadapter_acquire_reference(struct dxgadapter *adapter);
+void dxgadapter_release_reference(struct dxgadapter *adapter);
+int dxgadapter_acquire_lock_shared(struct dxgadapter *adapter);
+void dxgadapter_release_lock_shared(struct dxgadapter *adapter);
+int dxgadapter_acquire_lock_exclusive(struct dxgadapter *adapter);
+void dxgadapter_acquire_lock_forced(struct dxgadapter *adapter);
+void dxgadapter_release_lock_exclusive(struct dxgadapter *adapter);
+void dxgadapter_add_shared_resource(struct dxgadapter *adapter,
+				    struct dxgsharedresource *res);
+void dxgadapter_remove_shared_resource(struct dxgadapter *adapter,
+				       struct dxgsharedresource *res);
+void dxgadapter_add_shared_syncobj(struct dxgadapter *adapter,
+				   struct dxgsharedsyncobject *so);
+void dxgadapter_remove_shared_syncobj(struct dxgadapter *adapter,
+				      struct dxgsharedsyncobject *so);
+void dxgadapter_add_syncobj(struct dxgadapter *adapter,
+			    struct dxgsyncobject *so);
+void dxgadapter_remove_syncobj(struct dxgsyncobject *so);
+void dxgadapter_add_process(struct dxgadapter *adapter,
+			    struct dxgprocess_adapter *process_info);
+void dxgadapter_remove_process(struct dxgprocess_adapter *process_info);
+
+/*
+ * The object represent the device object.
+ * The following objects take reference on the device
+ * - dxgcontext
+ * - device handle (d3dkmt_handle)
+ */
+struct dxgdevice {
+	enum dxgobjectstate	object_state;
+	/* Device takes reference on the adapter */
+	struct dxgadapter	*adapter;
+	struct dxgprocess_adapter *adapter_info;
+	struct dxgprocess	*process;
+	/* Entry in the DGXPROCESS_ADAPTER device list */
+	struct list_head	device_list_entry;
+	refcount_t		refcount;
+	/* Protects destcruction of the device object */
+	struct rw_semaphore	device_lock;
+	struct rw_semaphore	context_list_lock;
+	struct list_head	context_list_head;
+	/* List of device allocations */
+	struct rw_semaphore	alloc_list_lock;
+	struct list_head	alloc_list_head;
+	struct list_head	resource_list_head;
+	/* List of paging queues. Protected by process handle table lock. */
+	struct list_head	pqueue_list_head;
+	struct list_head	syncobj_list_head;
+	d3dkmt_handle		handle;
+	uint			handle_valid;
+};
+
+struct dxgdevice *dxgdevice_create(struct dxgadapter *a, struct dxgprocess *p);
+void dxgdevice_destroy(struct dxgdevice *device);
+void dxgdevice_stop(struct dxgdevice *device);
+int dxgdevice_acquire_lock_shared(struct dxgdevice *dev);
+void dxgdevice_release_lock_shared(struct dxgdevice *dev);
+bool dxgdevice_acquire_reference(struct dxgdevice *dev);
+void dxgdevice_release_reference(struct dxgdevice *dev);
+void dxgdevice_add_context(struct dxgdevice *dev, struct dxgcontext *ctx);
+void dxgdevice_remove_context(struct dxgdevice *dev, struct dxgcontext *ctx);
+void dxgdevice_add_alloc(struct dxgdevice *dev, struct dxgallocation *a);
+void dxgdevice_remove_alloc(struct dxgdevice *dev, struct dxgallocation *a);
+void dxgdevice_remove_alloc_safe(struct dxgdevice *dev,
+				 struct dxgallocation *a);
+void dxgdevice_add_resource(struct dxgdevice *dev, struct dxgresource *res);
+void dxgdevice_remove_resource(struct dxgdevice *dev, struct dxgresource *res);
+void dxgdevice_add_paging_queue(struct dxgdevice *dev,
+				struct dxgpagingqueue *pqueue);
+void dxgdevice_remove_paging_queue(struct dxgpagingqueue *pqueue);
+void dxgdevice_add_syncobj(struct dxgdevice *dev, struct dxgsyncobject *so);
+void dxgdevice_remove_syncobj(struct dxgsyncobject *so);
+bool dxgdevice_is_active(struct dxgdevice *dev);
+void dxgdevice_acquire_context_list_lock(struct dxgdevice *dev);
+void dxgdevice_release_context_list_lock(struct dxgdevice *dev);
+void dxgdevice_acquire_alloc_list_lock(struct dxgdevice *dev);
+void dxgdevice_release_alloc_list_lock(struct dxgdevice *dev);
+void dxgdevice_acquire_alloc_list_lock_shared(struct dxgdevice *dev);
+void dxgdevice_release_alloc_list_lock_shared(struct dxgdevice *dev);
+
+/*
+ * The object represent the execution context of a device.
+ */
+struct dxgcontext {
+	enum dxgobjectstate	object_state;
+	struct dxgdevice	*device;
+	struct dxgprocess	*process;
+	/* entry in the device context list */
+	struct list_head	context_list_entry;
+	struct list_head	hwqueue_list_head;
+	struct rw_semaphore	hwqueue_list_lock;
+	refcount_t		refcount;
+	d3dkmt_handle		handle;
+	d3dkmt_handle		device_handle;
+};
+
+struct dxgcontext *dxgcontext_create(struct dxgdevice *dev);
+void dxgcontext_destroy(struct dxgprocess *pr, struct dxgcontext *ctx);
+void dxgcontext_destroy_safe(struct dxgprocess *pr, struct dxgcontext *ctx);
+bool dxgcontext_acquire_reference(struct dxgcontext *ctx);
+void dxgcontext_release_reference(struct dxgcontext *ctx);
+int dxgcontext_add_hwqueue(struct dxgcontext *ctx, struct dxghwqueue *hq);
+void dxgcontext_remove_hwqueue(struct dxgcontext *ctx, struct dxghwqueue *hq);
+void dxgcontext_remove_hwqueue_safe(struct dxgcontext *ctx,
+				    struct dxghwqueue *hq);
+bool dxgcontext_is_active(struct dxgcontext *ctx);
+
+/*
+ * The object represent the execution hardware queue of a device.
+ */
+struct dxghwqueue {
+	/* entry in the context hw queue list */
+	struct list_head	hwqueue_list_entry;
+	refcount_t		refcount;
+	struct dxgcontext	*context;
+	struct dxgprocess	*process;
+	d3dkmt_handle		progress_fence_sync_object;
+	d3dkmt_handle		handle;
+	d3dkmt_handle		device_handle;
+	void			*progress_fence_mapped_address;
+};
+
+struct dxghwqueue *dxghwqueue_create(struct dxgcontext *ctx);
+void dxghwqueue_destroy(struct dxgprocess *pr, struct dxghwqueue *hq);
+bool dxghwqueue_acquire_reference(struct dxghwqueue *hq);
+void dxghwqueue_release_reference(struct dxghwqueue *hq);
+
+/*
+ * A shared resource object is created to track the list of dxgresource objects,
+ * which are opened for the same underlying shared resource.
+ * There are two types of sharing resource objects:
+ * - sharing by using a global handle (nt_security is false).
+ *   The global handle is a handle in the handle table of dxgglobal. It points
+ *   to a dxgsharedresource object. dxgk_open_resource() creates a dxgresource
+ *   object using dxgsharedresource.
+ * - shariing by using a file descriptor handle (nt_security is true).
+ *   FD is created by calling dxgk_share_objects and providing shandle to
+ *   dxgsharedresource. The FD points to a dxgresource object, which is created
+ *   by calling dxgk_open_resource_nt.  dxgresource object is referenced by the
+ *   FD.
+ *
+ * The object is referenced by every dxgresource in its list.
+ *
+ */
+struct dxgsharedresource {
+	/* Every dxgresource object in the resource list takes a reference */
+	refcount_t		refcount;
+	struct dxgadapter	*adapter;
+	/* List of dxgresource objects, opened for the shared resource. */
+	/* Protected by dxgadapter::shared_resource_list_lock */
+	struct list_head	resource_list_head;
+	/* Entry in the list of dxgsharedresource in dxgadapter */
+	/* Protected by dxgadapter::shared_resource_list_lock */
+	struct list_head	shared_resource_list_entry;
+	struct dxgmutex		fd_mutex;
+	/* Referenced by file descriptors */
+	int			host_shared_handle_nt_reference;
+	/* Handle in the dxgglobal handle table, when nt_security is not used */
+	d3dkmt_handle		global_handle;
+	/* Corresponding global handle in the host */
+	d3dkmt_handle		host_shared_handle;
+	/*
+	 * When the sync object is shared by NT handle, this is the
+	 * corresponding handle in the host
+	 */
+	d3dkmt_handle		host_shared_handle_nt;
+	/* Values below are computed when the resource is sealed */
+	uint			runtime_private_data_size;
+	uint			alloc_private_data_size;
+	uint			resource_private_data_size;
+	uint			allocation_count;
+	union {
+		struct {
+			/* Referenced by file descriptor */
+			uint	nt_security:1;
+			/* Cannot add new allocations */
+			uint	sealed:1;
+			uint	reserved:30;
+		};
+		long		flags;
+	};
+	uint			*alloc_private_data_sizes;
+	uint8_t			*alloc_private_data;
+	uint8_t			*runtime_private_data;
+	uint8_t			*resource_private_data;
+};
+
+struct dxgsharedresource *dxgsharedresource_create(struct dxgadapter *adapter);
+bool dxgsharedresource_acquire_reference(struct dxgsharedresource *res);
+void dxgsharedresource_release_reference(struct dxgsharedresource *res);
+void dxgsharedresource_add_resource(struct dxgsharedresource *sres,
+				    struct dxgresource *res);
+void dxgsharedresource_remove_resource(struct dxgsharedresource *sres,
+				       struct dxgresource *res);
+
+struct dxgresource {
+	refcount_t		refcount;
+	enum dxgobjectstate	object_state;
+	d3dkmt_handle		handle;
+	struct list_head	alloc_list_head;
+	struct list_head	resource_list_entry;
+	struct list_head	shared_resource_list_entry;
+	struct dxgdevice	*device;
+	struct dxgprocess	*process;
+	/* Protects adding allocations to resource and resource destruction */
+	struct dxgmutex		resource_mutex;
+	winhandle		private_runtime_handle;
+	union {
+		struct {
+			uint	destroyed:1;	/* Must be the first */
+			uint	handle_valid:1;
+			uint	reserved:30;
+		};
+		long		flags;
+	};
+	/* Owner of the shared resource */
+	struct dxgsharedresource *shared_owner;
+};
+
+struct dxgresource *dxgresource_create(struct dxgdevice *dev);
+void dxgresource_destroy(struct dxgresource *res);
+void dxgresource_free_handle(struct dxgresource *res);
+void dxgresource_acquire_reference(struct dxgresource *res);
+void dxgresource_release_reference(struct dxgresource *res);
+int dxgresource_add_alloc(struct dxgresource *res, struct dxgallocation *a);
+void dxgresource_remove_alloc(struct dxgresource *res, struct dxgallocation *a);
+void dxgresource_remove_alloc_safe(struct dxgresource *res,
+				   struct dxgallocation *a);
+bool dxgresource_is_active(struct dxgresource *res);
+
+struct privdata {
+	uint data_size;
+	uint8_t data[1];
+};
+
+struct dxgallocation {
+	/* Entry in the device list or resource list (when resource exists) */
+	struct list_head		alloc_list_entry;
+	/* Allocation owner */
+	union {
+		struct dxgdevice	*device;
+		struct dxgresource	*resource;
+	} owner;
+	struct dxgprocess		*process;
+	/* Pointer to private driver data desc. Used for shared resources */
+	struct privdata			*priv_drv_data;
+	d3dkmt_handle			alloc_handle;
+	/* Set to 1 when allocation belongs to resource. */
+	uint				resource_owner:1;
+	/* Set to 1 when 'cpu_address' is mapped to the IO space. */
+	uint				cpu_address_mapped:1;
+	/* Set to 1 when the allocatio is mapped as cached */
+	uint				cached:1;
+	uint				handle_valid:1;
+	/* GPADL address list for existing sysmem allocations */
+	uint				gpadl;
+	/* Number of pages in the 'pages' array */
+	uint				num_pages;
+	/*
+	 * How many times dxgk_lock2 is called to allocation, which is mapped
+	 * to IO space.
+	 */
+	uint				cpu_address_refcount;
+	/*
+	 * CPU address from the existing sysmem allocation, or
+	 * mapped to the CPU visible backing store in the IO space
+	 */
+	void				*cpu_address;
+	/* Describes pages for the existing sysmem allocation */
+	struct page			**pages;
+};
+
+struct dxgallocation *dxgallocation_create(struct dxgprocess *process);
+void dxgallocation_stop(struct dxgallocation *a);
+void dxgallocation_destroy(struct dxgallocation *a);
+void dxgallocation_free_handle(struct dxgallocation *a);
+
+void ioctl_desc_init(void);
+long dxgk_compat_ioctl(struct file *f, unsigned int p1, unsigned long p2);
+long dxgk_unlocked_ioctl(struct file *f, unsigned int p1, unsigned long p2);
+
+int dxg_unmap_iospace(void *va, uint size);
+int dxg_copy_from_user(void *to, const void __user *from, unsigned long len);
+int dxg_copy_to_user(void *to, const void __user *from, unsigned long len);
+static inline void guid_to_luid(guid_t *guid, struct winluid *luid)
+{
+	*luid = *(struct winluid *)&guid->b[0];
+}
+
+/*
+ * VM bus interface
+ *
+ */
+int dxgvmb_send_set_iospace_region(u64 start, u64 len, u32 shared_mem_gpadl);
+int dxgvmb_send_create_process(struct dxgprocess *process);
+int dxgvmb_send_destroy_process(d3dkmt_handle process);
+int dxgvmb_send_open_adapter(struct dxgadapter *adapter);
+int dxgvmb_send_close_adapter(struct dxgadapter *adapter);
+int dxgvmb_send_get_internal_adapter_info(struct dxgadapter *adapter);
+d3dkmt_handle dxgvmb_send_create_device(struct dxgadapter *adapter,
+					struct dxgprocess *process,
+					struct d3dkmt_createdevice *args);
+int dxgvmb_send_destroy_device(struct dxgadapter *adapter,
+			       struct dxgprocess *process, d3dkmt_handle h);
+d3dkmt_handle dxgvmb_send_create_context(struct dxgadapter *adapter,
+					 struct dxgprocess *process,
+					 struct d3dkmt_createcontextvirtual
+					 *args);
+int dxgvmb_send_destroy_context(struct dxgadapter *adapter,
+				struct dxgprocess *process, d3dkmt_handle h);
+int dxgvmb_send_create_paging_queue(struct dxgprocess *pr,
+				    struct dxgvmbuschannel *ch,
+				    struct dxgdevice *dev,
+				    struct d3dkmt_createpagingqueue *args,
+				    struct dxgpagingqueue *pq);
+int dxgvmb_send_destroy_paging_queue(struct dxgprocess *process,
+				     struct dxgvmbuschannel *ch,
+				     d3dkmt_handle h);
+int dxgvmb_send_create_allocation(struct dxgprocess *pr, struct dxgdevice *dev,
+				  struct d3dkmt_createallocation *args,
+				  struct d3dkmt_createallocation *__user
+				  input_args, struct dxgresource *res,
+				  struct dxgallocation **allocs,
+				  struct d3dddi_allocationinfo2 *alloc_info,
+				  struct d3dkmt_createstandardallocation
+				  *standard_alloc);
+int dxgvmb_send_destroy_allocation(struct dxgprocess *pr, struct dxgdevice *dev,
+				   struct dxgvmbuschannel *ch,
+				   struct d3dkmt_destroyallocation2 *args,
+				   d3dkmt_handle *alloc_handles);
+int dxgvmb_send_make_resident(struct dxgprocess *pr, struct dxgdevice *dev,
+			      struct dxgvmbuschannel *ch,
+			      struct d3dddi_makeresident *args);
+int dxgvmb_send_evict(struct dxgprocess *pr, struct dxgvmbuschannel *ch,
+		      struct d3dkmt_evict *args);
+int dxgvmb_send_submit_command(struct dxgprocess *pr,
+			       struct dxgvmbuschannel *ch,
+			       struct d3dkmt_submitcommand *args);
+int dxgvmb_send_map_gpu_va(struct dxgprocess *pr, d3dkmt_handle h,
+			   struct dxgvmbuschannel *ch,
+			   struct d3dddi_mapgpuvirtualaddress *args);
+int dxgvmb_send_reserve_gpu_va(struct dxgprocess *pr,
+			       struct dxgvmbuschannel *ch,
+			       struct d3dddi_reservegpuvirtualaddress *args);
+int dxgvmb_send_free_gpu_va(struct dxgprocess *pr, struct dxgvmbuschannel *ch,
+			    struct d3dkmt_freegpuvirtualaddress *args);
+int dxgvmb_send_update_gpu_va(struct dxgprocess *pr, struct dxgvmbuschannel *ch,
+			      struct d3dkmt_updategpuvirtualaddress *args);
+int dxgvmb_send_create_sync_object(struct dxgprocess *pr,
+				   struct dxgvmbuschannel *ch,
+				   struct d3dkmt_createsynchronizationobject2
+				   *args, struct dxgsyncobject *so);
+int dxgvmb_send_destroy_sync_object(struct dxgprocess *pr, d3dkmt_handle h);
+int dxgvmb_send_signal_sync_object(struct dxgprocess *process,
+				   struct dxgvmbuschannel *channel,
+				   struct d3dddicb_signalflags flags,
+				   uint64_t legacy_fence_value,
+				   d3dkmt_handle context,
+				   uint object_count, d3dkmt_handle *object,
+				   uint context_count, d3dkmt_handle *contexts,
+				   uint fence_count, uint64_t *fences,
+				   struct eventfd_ctx *cpu_event,
+				   d3dkmt_handle device);
+int dxgvmb_send_wait_sync_object_gpu(struct dxgprocess *process,
+				     struct dxgvmbuschannel *channel,
+				     d3dkmt_handle context, uint object_count,
+				     d3dkmt_handle *objects, uint64_t *fences,
+				     bool legacy_fence);
+int dxgvmb_send_wait_sync_object_cpu(struct dxgprocess *process,
+				     struct dxgvmbuschannel *channel,
+				     struct
+				     d3dkmt_waitforsynchronizationobjectfromcpu
+				     *args, u64 cpu_event);
+int dxgvmb_send_lock2(struct dxgprocess *process,
+		      struct dxgvmbuschannel *channel,
+		      struct d3dkmt_lock2 *args,
+		      struct d3dkmt_lock2 *__user outargs);
+int dxgvmb_send_unlock2(struct dxgprocess *process,
+			struct dxgvmbuschannel *channel,
+			struct d3dkmt_unlock2 *args);
+int dxgvmb_send_update_alloc_property(struct dxgprocess *process,
+				      struct dxgvmbuschannel *channel,
+				      struct d3dddi_updateallocproperty *args,
+				      struct d3dddi_updateallocproperty *__user
+				      inargs);
+int dxgvmb_send_mark_device_as_error(struct dxgprocess *process,
+				     struct dxgvmbuschannel *channel,
+				     struct d3dkmt_markdeviceaserror *args);
+int dxgvmb_send_set_allocation_priority(struct dxgprocess *process,
+					struct dxgvmbuschannel *channel,
+					struct d3dkmt_setallocationpriority
+					*args);
+int dxgvmb_send_get_allocation_priority(struct dxgprocess *process,
+					struct dxgvmbuschannel *channel,
+					struct d3dkmt_getallocationpriority
+					*args);
+int dxgvmb_send_set_context_scheduling_priority(struct dxgprocess *process,
+						struct dxgvmbuschannel *channel,
+						d3dkmt_handle context,
+						int priority, bool in_process);
+int dxgvmb_send_get_context_scheduling_priority(struct dxgprocess *process,
+						struct dxgvmbuschannel *channel,
+						d3dkmt_handle context,
+						int *priority, bool in_process);
+int dxgvmb_send_offer_allocations(struct dxgprocess *process,
+				  struct dxgvmbuschannel *channel,
+				  struct d3dkmt_offerallocations *args);
+int dxgvmb_send_reclaim_allocations(struct dxgprocess *process,
+				    struct dxgvmbuschannel *channel,
+				    d3dkmt_handle device,
+				    struct d3dkmt_reclaimallocations2 *args,
+				    uint64_t * __user paging_fence_value);
+int dxgvmb_send_change_vidmem_reservation(struct dxgprocess *process,
+					  struct dxgvmbuschannel *channel,
+					  d3dkmt_handle other_process,
+					  struct
+					  d3dkmt_changevideomemoryreservation
+					  *args);
+int dxgvmb_send_create_hwqueue(struct dxgprocess *process,
+			       struct dxgvmbuschannel *channel,
+			       struct d3dkmt_createhwqueue *args,
+			       struct d3dkmt_createhwqueue *__user inargs,
+			       struct dxghwqueue *hq);
+int dxgvmb_send_destroy_hwqueue(struct dxgprocess *process,
+				struct dxgvmbuschannel *channel,
+				d3dkmt_handle handle);
+int dxgvmb_send_query_adapter_info(struct dxgprocess *process,
+				   struct dxgvmbuschannel *channel,
+				   struct d3dkmt_queryadapterinfo *args);
+int dxgvmb_send_submit_command_to_hwqueue(struct dxgprocess *process,
+					  struct dxgvmbuschannel *channel,
+					  struct d3dkmt_submitcommandtohwqueue
+					  *args);
+int dxgvmb_send_query_clock_calibration(struct dxgprocess *process,
+					struct dxgvmbuschannel *channel,
+					struct d3dkmt_queryclockcalibration
+					*args,
+					struct d3dkmt_queryclockcalibration
+					*__user inargs);
+int dxgvmb_send_flush_heap_transitions(struct dxgprocess *process,
+				       struct dxgvmbuschannel *channel,
+				       struct d3dkmt_flushheaptransitions
+				       *args);
+int dxgvmb_send_open_sync_object(struct dxgprocess *process,
+				 struct dxgvmbuschannel *channel,
+				 d3dkmt_handle h, d3dkmt_handle *ph);
+int dxgvmb_send_open_sync_object_nt(struct dxgprocess *process,
+				    struct dxgvmbuschannel *channel,
+				    struct d3dkmt_opensyncobjectfromnthandle2
+				    *args, struct dxgsyncobject *syncobj);
+int dxgvmb_send_query_alloc_residency(struct dxgprocess *process,
+				      struct dxgvmbuschannel *channel,
+				      struct d3dkmt_queryallocationresidency
+				      *args);
+int dxgvmb_send_escape(struct dxgprocess *process,
+		       struct dxgvmbuschannel *channel,
+		       struct d3dkmt_escape *args);
+int dxgvmb_send_query_vidmem_info(struct dxgprocess *process,
+				  struct dxgvmbuschannel *channel,
+				  struct d3dkmt_queryvideomemoryinfo *args,
+				  struct d3dkmt_queryvideomemoryinfo *__user
+				  inargs);
+int dxgvmb_send_get_device_state(struct dxgprocess *process,
+				 struct dxgvmbuschannel *channel,
+				 struct d3dkmt_getdevicestate *args,
+				 struct d3dkmt_getdevicestate *__user inargs);
+int dxgvmb_send_create_nt_shared_object(struct dxgprocess *process,
+					d3dkmt_handle object,
+					d3dkmt_handle *shared_handle);
+int dxgvmb_send_destroy_nt_shared_object(d3dkmt_handle shared_handle);
+int dxgvmb_send_open_resource(struct dxgprocess *process,
+			      struct dxgvmbuschannel *channel,
+			      d3dkmt_handle device, bool nt_security_sharing,
+			      d3dkmt_handle global_share, uint allocation_count,
+			      uint total_priv_drv_data_size,
+			      d3dkmt_handle *resource_handle,
+			      d3dkmt_handle *alloc_handles);
+int dxgvmb_send_get_standard_alloc_priv_data(struct dxgdevice *device,
+					     enum d3dkmdt_standardallocationtype
+					     alloc_type,
+					     struct d3dkmdt_gdisurfacedata
+					     *alloc_data,
+					     uint physical_adapter_index,
+					     uint *alloc_priv_driver_size,
+					     void *prive_alloc_data);
+
+#endif
diff --git a/drivers/gpu/dxgkrnl/dxgmodule.c b/drivers/gpu/dxgkrnl/dxgmodule.c
new file mode 100644
index 000000000000..cae13d22801a
--- /dev/null
+++ b/drivers/gpu/dxgkrnl/dxgmodule.c
@@ -0,0 +1,692 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (c) 2019, Microsoft Corporation.
+ *
+ * Author:
+ *   Iouri Tarassov <iourit at microsoft.com>
+ *
+ * Dxgkrnl Graphics Driver
+ * Interface with Linux kernel and the VM bus driver
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/eventfd.h>
+#include <linux/hyperv.h>
+
+#include "dxgkrnl.h"
+
+struct dxgglobal *dxgglobal;
+struct device *dxgglobaldev;
+
+#define DXGKRNL_VERSION			0x0001
+#define PCI_VENDOR_ID_MICROSOFT		0x1414
+#define PCI_DEVICE_ID_VIRTUAL_RENDER	0x008E
+
+//
+// Interface from dxgglobal
+//
+
+struct vmbus_channel *dxgglobal_get_vmbus(void)
+{
+	return dxgglobal->channel.channel;
+}
+
+struct dxgvmbuschannel *dxgglobal_get_dxgvmbuschannel(void)
+{
+	return &dxgglobal->channel;
+}
+
+int dxgglobal_acquire_channel_lock(void)
+{
+	dxglockorder_acquire(DXGLOCK_GLOBAL_CHANNEL);
+	down_read(&dxgglobal->channel_lock);
+	if (dxgglobal->channel.channel == NULL) {
+		pr_err("Failed to acquire global channel lock");
+		return STATUS_DEVICE_REMOVED;
+	} else {
+		return 0;
+	}
+}
+
+void dxgglobal_release_channel_lock(void)
+{
+	up_read(&dxgglobal->channel_lock);
+	dxglockorder_release(DXGLOCK_GLOBAL_CHANNEL);
+}
+
+void dxgglobal_acquire_adapter_list_lock(enum dxglockstate state)
+{
+	TRACE_DEBUG(1, "%s", __func__);
+	dxglockorder_acquire(DXGLOCK_GLOBAL_ADAPTERLIST);
+	if (state == DXGLOCK_EXCL)
+		down_write(&dxgglobal->adapter_list_lock);
+	else
+		down_read(&dxgglobal->adapter_list_lock);
+}
+
+void dxgglobal_release_adapter_list_lock(enum dxglockstate state)
+{
+	TRACE_DEBUG(1, "%s", __func__);
+	if (state == DXGLOCK_EXCL)
+		up_write(&dxgglobal->adapter_list_lock);
+	else
+		up_read(&dxgglobal->adapter_list_lock);
+	dxglockorder_release(DXGLOCK_GLOBAL_ADAPTERLIST);
+}
+
+void dxgglobal_add_host_event(struct dxghostevent *event)
+{
+	spin_lock_irq(&dxgglobal->host_event_list_mutex);
+	list_add_tail(&event->host_event_list_entry,
+		      &dxgglobal->host_event_list_head);
+	spin_unlock_irq(&dxgglobal->host_event_list_mutex);
+}
+
+void dxgglobal_remove_host_event(struct dxghostevent *event)
+{
+	spin_lock_irq(&dxgglobal->host_event_list_mutex);
+	if (event->host_event_list_entry.next != NULL) {
+		list_del(&event->host_event_list_entry);
+		event->host_event_list_entry.next = NULL;
+	}
+	spin_unlock_irq(&dxgglobal->host_event_list_mutex);
+}
+
+void dxgglobal_signal_host_event(u64 event_id)
+{
+	struct dxghostevent *event;
+	unsigned long flags;
+
+	TRACE_DEBUG(1, "%s %lld\n", __func__, event_id);
+
+	spin_lock_irqsave(&dxgglobal->host_event_list_mutex, flags);
+	list_for_each_entry(event, &dxgglobal->host_event_list_head,
+			    host_event_list_entry) {
+		if (event->event_id == event_id) {
+			TRACE_DEBUG(1, "found event to signal %lld\n",
+				    event_id);
+			if (event->remove_from_list ||
+			    event->destroy_after_signal) {
+				list_del(&event->host_event_list_entry);
+				event->host_event_list_entry.next = NULL;
+				TRACE_DEBUG(1, "removing event from list\n");
+			}
+			if (event->cpu_event) {
+				TRACE_DEBUG(1, "signal cpu event\n");
+				eventfd_signal(event->cpu_event, 1);
+				if (event->destroy_after_signal)
+					eventfd_ctx_put(event->cpu_event);
+			} else {
+				TRACE_DEBUG(1, "signal completion\n");
+				complete(event->completion_event);
+			}
+			if (event->destroy_after_signal) {
+				TRACE_DEBUG(1, "destroying event %p\n", event);
+				dxgmem_free(event->process,
+					    DXGMEM_EVENT, event);
+			}
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&dxgglobal->host_event_list_mutex, flags);
+	TRACE_DEBUG(1, "dxgglobal_signal_host_event_end %lld\n", event_id);
+}
+
+struct dxghostevent *dxgglobal_get_host_event(u64 event_id)
+{
+	struct dxghostevent *entry;
+	struct dxghostevent *event = NULL;
+
+	spin_lock_irq(&dxgglobal->host_event_list_mutex);
+	list_for_each_entry(entry, &dxgglobal->host_event_list_head,
+			    host_event_list_entry) {
+		if (entry->event_id == event_id) {
+			list_del(&entry->host_event_list_entry);
+			entry->host_event_list_entry.next = NULL;
+			event = entry;
+			break;
+		}
+	}
+	spin_unlock_irq(&dxgglobal->host_event_list_mutex);
+	return event;
+}
+
+u64 dxgglobal_new_host_event_id(void)
+{
+	return atomic64_inc_return(&dxgglobal->host_event_id);
+}
+
+void dxgglobal_acquire_process_adapter_lock(void)
+{
+	dxgmutex_lock(&dxgglobal->process_adapter_mutex);
+}
+
+void dxgglobal_release_process_adapter_lock(void)
+{
+	dxgmutex_unlock(&dxgglobal->process_adapter_mutex);
+}
+
+/*
+ * File operations
+ */
+
+static struct dxgprocess *dxgglobal_get_current_process(void)
+{
+	/*
+	 * Find the DXG process for the current process.
+	 * A new process is created if necessary.
+	 */
+	struct dxgprocess *process = NULL;
+	struct dxgprocess *entry = NULL;
+
+	dxgmutex_lock(&dxgglobal->plistmutex);
+	list_for_each_entry(entry, &dxgglobal->plisthead, plistentry) {
+		/* All threads of a process have the same thread group ID */
+		if (entry->process->tgid == current->tgid) {
+			entry->refcount++;
+			process = entry;
+			TRACE_DEBUG(1, "found dxgprocess entry\n");
+			break;
+		}
+	}
+	dxgmutex_unlock(&dxgglobal->plistmutex);
+
+	if (process == NULL)
+		process = dxgprocess_create();
+
+	return process;
+}
+
+static int dxgk_open(struct inode *n, struct file *f)
+{
+	int ret = 0;
+	struct dxgprocess *process;
+	struct dxgthreadinfo *thread;
+
+	TRACE_DEBUG2(1, 0, "%s %p %d %d",
+		     __func__, f, current->pid, current->tgid);
+
+	thread = dxglockorder_get_thread();
+
+	/* Find/create a dxgprocess structure for this process */
+	process = dxgglobal_get_current_process();
+
+	if (process) {
+		f->private_data = process;
+	} else {
+		TRACE_DEBUG(1, "cannot create dxgprocess for open\n");
+		ret = -EBADF;
+	}
+
+	dxglockorder_put_thread(thread);
+	TRACE_DEBUG2(1, 0, "%s end %x", __func__, ret);
+	return ret;
+}
+
+static int dxgk_release(struct inode *n, struct file *f)
+{
+	struct dxgthreadinfo *thread;
+	struct dxgprocess *process;
+
+	process = (struct dxgprocess *)f->private_data;
+	TRACE_DEBUG2(1, 0, "%s %p, %p", __func__, f, process);
+
+	if (process == NULL)
+		return -EINVAL;
+
+	thread = dxglockorder_get_thread();
+
+	dxgprocess_release_reference(process);
+
+	dxglockorder_check_empty(thread);
+	dxglockorder_put_thread(thread);
+
+	f->private_data = NULL;
+	TRACE_DEBUG2(1, 0, "%s end", __func__);
+	return 0;
+}
+
+static ssize_t dxgk_read(struct file *f, char __user *s, size_t len,
+			 loff_t *o)
+{
+	TRACE_DEBUG(1, "file read\n");
+	return 0;
+}
+
+static ssize_t dxgk_write(struct file *f, const char __user *s, size_t len,
+			  loff_t *o)
+{
+	TRACE_DEBUG(1, "file write\n");
+	return len;
+}
+
+const struct file_operations dxgk_fops = {
+	.owner = THIS_MODULE,
+	.open = dxgk_open,
+	.release = dxgk_release,
+	.compat_ioctl = dxgk_compat_ioctl,
+	.unlocked_ioctl = dxgk_unlocked_ioctl,
+	.write = dxgk_write,
+	.read = dxgk_read,
+};
+
+/*
+ * Interface with the VM bus driver
+ */
+
+static int dxgglobal_getiospace(struct dxgglobal *dxgglobal)
+{
+	/* Get mmio space for the global channel */
+	struct hv_device *hdev = dxgglobal->hdev;
+	struct vmbus_channel *channel = hdev->channel;
+	resource_size_t pot_start = 0;
+	resource_size_t pot_end = -1;
+	int ret;
+
+	dxgglobal->mmiospace_size = channel->offermsg.offer.mmio_megabytes;
+	if (dxgglobal->mmiospace_size == 0) {
+		TRACE_DEBUG(1, "zero mmio space is offered\n");
+		return STATUS_NO_MEMORY;
+	}
+	dxgglobal->mmiospace_size <<= 20;
+	TRACE_DEBUG(1, "mmio offered: %llx\n", dxgglobal->mmiospace_size);
+
+	ret = vmbus_allocate_mmio(&dxgglobal->mem, hdev, pot_start, pot_end,
+				  dxgglobal->mmiospace_size, 0x10000, false);
+	if (ret) {
+		pr_err("Unable to allocate mmio memory: %d\n", ret);
+		return ret;
+	}
+	dxgglobal->mmiospace_size = dxgglobal->mem->end -
+	    dxgglobal->mem->start + 1;
+	dxgglobal->mmiospace_base = dxgglobal->mem->start;
+	TRACE_DEBUG(1, "mmio allocated %llx  %llx %llx %llx\n",
+		    dxgglobal->mmiospace_base,
+		    dxgglobal->mmiospace_size,
+		    dxgglobal->mem->start, dxgglobal->mem->end);
+
+	return 0;
+}
+
+static char *dxg_devnode(struct device *dev, umode_t *mode)
+{
+	/* Set read/write permissions to the DXG device */
+	if (mode)
+		*mode = 0666;
+	return NULL;
+}
+
+static int dxgglobal_init_global_channel(struct hv_device *hdev)
+{
+	int ret = 0;
+
+	TRACE_DEBUG(1, "%s %x  %x", __func__, hdev->vendor_id, hdev->device_id);
+	{
+		TRACE_DEBUG(1, "device type   : %pUb\n", &hdev->dev_type);
+		TRACE_DEBUG(1, "device channel: %pUb %p primary: %p\n",
+			    &hdev->channel->offermsg.offer.if_type,
+			    hdev->channel, hdev->channel->primary_channel);
+	}
+
+	if (dxgglobal->hdev) {
+		/* This device should appear only once */
+		pr_err("dxgglobal already initialized\n");
+		ret = -EBADE;
+		goto error;
+	}
+
+	dxgglobal->hdev = hdev;
+
+	ret = dxgvmbuschannel_init(&dxgglobal->channel, hdev);
+	if (ret) {
+		pr_err("dxgvmbuschannel_init failed: %d\n", ret);
+		goto error;
+	}
+
+	ret = dxgglobal_getiospace(dxgglobal);
+	if (ret) {
+		pr_err("getiospace failed: %d\n", ret);
+		goto error;
+	}
+
+	ret = dxgvmb_send_set_iospace_region(dxgglobal->mmiospace_base,
+					     dxgglobal->mmiospace_size, 0);
+	if (ret) {
+		pr_err("send_set_iospace_region failed\n");
+		goto error;
+	}
+
+	hv_set_drvdata(hdev, dxgglobal);
+
+	if (alloc_chrdev_region(&dxgglobal->device_devt, 0, 1, "dxgkrnl") < 0) {
+		pr_err("alloc_chrdev_region failed\n");
+		ret = -ENODEV;
+		goto error;
+	}
+	dxgglobal->devt_initialized = true;
+	dxgglobal->device_class = class_create(THIS_MODULE, "dxgkdrv");
+	if (dxgglobal->device_class == NULL) {
+		pr_err("class_create failed\n");
+		ret = -ENODEV;
+		goto error;
+	}
+	dxgglobal->device_class->devnode = dxg_devnode;
+	dxgglobal->device = device_create(dxgglobal->device_class, NULL,
+					  dxgglobal->device_devt, NULL, "dxg");
+	if (dxgglobal->device == NULL) {
+		pr_err("device_create failed\n");
+		ret = -ENODEV;
+		goto error;
+	}
+	dxgglobaldev = dxgglobal->device;
+	cdev_init(&dxgglobal->device_cdev, &dxgk_fops);
+	ret = cdev_add(&dxgglobal->device_cdev, dxgglobal->device_devt, 1);
+	if (ret < 0) {
+		pr_err("cdev_add failed: %d\n", ret);
+		goto error;
+	}
+	dxgglobal->cdev_initialized = true;
+
+error:
+	return ret;
+}
+
+static void dxgglobal_destroy_global_channel(void)
+{
+	dxglockorder_acquire(DXGLOCK_GLOBAL_CHANNEL);
+	down_write(&dxgglobal->channel_lock);
+
+	TRACE_DEBUG(1, "%s", __func__);
+
+	if (dxgglobal->devt_initialized) {
+		unregister_chrdev_region(dxgglobal->device_devt, 1);
+		dxgglobal->devt_initialized = false;
+	}
+	if (dxgglobal->cdev_initialized) {
+		cdev_del(&dxgglobal->device_cdev);
+		dxgglobal->cdev_initialized = false;
+	}
+	if (dxgglobal->device) {
+		device_destroy(dxgglobal->device_class, dxgglobal->device_devt);
+		dxgglobal->device = NULL;
+		dxgglobaldev = NULL;
+	}
+	if (dxgglobal->device_class) {
+		class_destroy(dxgglobal->device_class);
+		dxgglobal->device_class = NULL;
+	}
+	if (dxgglobal->mem) {
+		vmbus_free_mmio(dxgglobal->mmiospace_base,
+				dxgglobal->mmiospace_size);
+		dxgglobal->mem = NULL;
+	}
+
+	dxgvmbuschannel_destroy(&dxgglobal->channel);
+
+	if (dxgglobal->hdev) {
+		hv_set_drvdata(dxgglobal->hdev, NULL);
+		dxgglobal->hdev = NULL;
+	}
+
+	TRACE_DEBUG(1, "%s end\n", __func__);
+
+	up_write(&dxgglobal->channel_lock);
+	dxglockorder_release(DXGLOCK_GLOBAL_CHANNEL);
+}
+
+static int dxgglobal_create_adapter(struct hv_device *hdev)
+{
+	struct dxgadapter *adapter;
+	int ret;
+
+	TRACE_DEBUG(1, "%s", __func__);
+
+	adapter = dxgmem_alloc(NULL, DXGMEM_ADAPTER, sizeof(struct dxgadapter));
+	if (adapter == NULL) {
+		pr_err("failed to allocated dxgadapter\n");
+		return STATUS_NO_MEMORY;
+	}
+
+	ret = dxgadapter_init(adapter, hdev);
+	if (ret) {
+		dxgadapter_stop(adapter);
+		dxgadapter_release_reference(adapter);
+	} else {
+		dxgglobal_acquire_adapter_list_lock(DXGLOCK_EXCL);
+
+		TRACE_DEBUG(1, "new adapter added %p\n", adapter);
+
+		list_add_tail(&adapter->adapter_list_entry,
+			      &dxgglobal->adapter_list_head);
+		dxgglobal->num_adapters++;
+
+		dxgglobal_release_adapter_list_lock(DXGLOCK_EXCL);
+	}
+
+	TRACE_DEBUG(1, "%s end: %d\n", __func__, ret);
+	return ret;
+}
+
+static void dxgglobal_stop_adapter(struct hv_device *hdev)
+{
+	struct dxgadapter *adapter = NULL;
+	struct dxgadapter *entry;
+	struct winluid luid;
+
+	guid_to_luid(&hdev->channel->offermsg.offer.if_instance, &luid);
+
+	TRACE_DEBUG(1, "%s: %x:%x\n", __func__, luid.b, luid.a);
+
+	dxgglobal_acquire_adapter_list_lock(DXGLOCK_EXCL);
+
+	list_for_each_entry(entry, &dxgglobal->adapter_list_head,
+			    adapter_list_entry) {
+		if (*(u64 *) &luid == *(u64 *) &entry->luid) {
+			adapter = entry;
+			break;
+		}
+	}
+
+	if (adapter)
+		list_del(&adapter->adapter_list_entry);
+
+	dxgglobal_release_adapter_list_lock(DXGLOCK_EXCL);
+
+	if (adapter) {
+		dxgadapter_stop(adapter);
+		dxgadapter_release_reference(adapter);
+	} else {
+		pr_err("Adapter is not found\n");
+	}
+
+	TRACE_DEBUG(1, "%s end", __func__);
+}
+
+static const struct hv_vmbus_device_id id_table[] = {
+	/* Per GPU Device GUID */
+	{ HV_GPUP_DXGK_VGPU_GUID },
+	/* Global Dxgkgnl channel for the virtual machine */
+	{ HV_GPUP_DXGK_GLOBAL_GUID },
+	{ }
+};
+
+static int dxg_probe_device(struct hv_device *hdev,
+			    const struct hv_vmbus_device_id *dev_id)
+{
+	int ret = 0;
+	struct dxgthreadinfo *thread = dxglockorder_get_thread();
+
+	dxgmutex_lock(&dxgglobal->device_mutex);
+
+	TRACE_DEBUG(1, "probe_device\n");
+
+	if (uuid_le_cmp(hdev->dev_type, id_table[0].guid) == 0) {
+		/* This is a new virtual GPU channel */
+		ret = dxgglobal_create_adapter(hdev);
+	} else if (uuid_le_cmp(hdev->dev_type, id_table[1].guid) == 0) {
+		/* This is the global Dxgkgnl channel */
+		ret = dxgglobal_init_global_channel(hdev);
+		if (ret) {
+			dxgglobal_destroy_global_channel();
+			goto error;
+		}
+	} else {
+		/* Unknown device type */
+		pr_err("probe: unknown device type\n");
+		ret = -EBADE;
+		goto error;
+	}
+
+error:
+
+	TRACE_DEBUG(1, "probe_device end\n");
+
+	dxgmutex_unlock(&dxgglobal->device_mutex);
+
+	dxglockorder_put_thread(thread);
+
+	return ret;
+}
+
+static int dxg_remove_device(struct hv_device *hdev)
+{
+	int ret = 0;
+	struct dxgthreadinfo *thread;
+
+	TRACE_DEBUG(1, "%s\n", __func__);
+
+	thread = dxglockorder_get_thread();
+	dxgmutex_lock(&dxgglobal->device_mutex);
+
+	if (uuid_le_cmp(hdev->dev_type, id_table[0].guid) == 0) {
+		TRACE_DEBUG(1, "Remove virtual GPU\n");
+		dxgglobal_stop_adapter(hdev);
+	} else if (uuid_le_cmp(hdev->dev_type, id_table[1].guid) == 0) {
+		TRACE_DEBUG(1, "Remove global channel device\n");
+		dxgglobal_destroy_global_channel();
+	} else {
+		/* Unknown device type */
+		pr_err("remove: unknown device type\n");
+		ret = -EBADE;
+	}
+
+	dxgmutex_unlock(&dxgglobal->device_mutex);
+	dxglockorder_put_thread(thread);
+	return ret;
+}
+
+MODULE_DEVICE_TABLE(vmbus, id_table);
+
+static struct hv_driver dxg_drv = {
+	.name = KBUILD_MODNAME,
+	.id_table = id_table,
+	.probe = dxg_probe_device,
+	.remove = dxg_remove_device,
+	.driver = {
+		   .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+		    },
+};
+
+/*
+ * Interface with Linux kernel
+ */
+
+static int dxgglobal_create(void)
+{
+	int ret = 0;
+
+	TRACE_DEBUG(1, "%s", __func__);
+
+	dxgglobal = dxgmem_alloc(NULL, DXGMEM_GLOBAL, sizeof(struct dxgglobal));
+	if (!dxgglobal) {
+		pr_err("no memory for dxgglobal\n");
+		return STATUS_NO_MEMORY;
+	}
+
+	INIT_LIST_HEAD(&dxgglobal->plisthead);
+	dxgmutex_init(&dxgglobal->plistmutex, DXGLOCK_PROCESSLIST);
+	dxgmutex_init(&dxgglobal->device_mutex, DXGLOCK_GLOBAL_DEVICE);
+	dxgmutex_init(&dxgglobal->process_adapter_mutex,
+		      DXGLOCK_PROCESSADAPTER);
+
+	INIT_LIST_HEAD(&dxgglobal->thread_info_list_head);
+	mutex_init(&dxgglobal->thread_info_mutex);
+
+	INIT_LIST_HEAD(&dxgglobal->adapter_list_head);
+	init_rwsem(&dxgglobal->adapter_list_lock);
+
+	init_rwsem(&dxgglobal->channel_lock);
+
+	INIT_LIST_HEAD(&dxgglobal->host_event_list_head);
+	spin_lock_init(&dxgglobal->host_event_list_mutex);
+	atomic64_set(&dxgglobal->host_event_id, 1);
+
+	hmgrtable_init(&dxgglobal->handle_table, NULL);
+
+	TRACE_DEBUG(1, "dxgglobal_init end\n");
+	return ret;
+}
+
+static void dxgglobal_destroy(void)
+{
+	if (dxgglobal) {
+		TRACE_DEBUG(1, "%s\n", __func__);
+
+		if (dxgglobal->vmbus_registered)
+			vmbus_driver_unregister(&dxg_drv);
+
+		dxgglobal_destroy_global_channel();
+		hmgrtable_destroy(&dxgglobal->handle_table);
+
+		dxgmem_free(NULL, DXGMEM_GLOBAL, dxgglobal);
+		dxgglobal = NULL;
+		TRACE_DEBUG(1, "%s end\n", __func__);
+	}
+}
+
+static int __init dxg_drv_init(void)
+{
+	int ret;
+
+	pr_err("%s  Version: %x", __func__, DXGKRNL_VERSION);
+
+	ret = dxgglobal_create();
+	if (ret) {
+		pr_err("dxgglobal_init failed");
+		return STATUS_NO_MEMORY;
+	}
+
+	ret = vmbus_driver_register(&dxg_drv);
+	if (ret) {
+		pr_err("vmbus_driver_register failed: %d", ret);
+		return ret;
+	}
+	dxgglobal->vmbus_registered = true;
+
+	ioctl_desc_init();
+
+	return 0;
+}
+
+static void __exit dxg_drv_exit(void)
+{
+	struct dxgthreadinfo *thread;
+
+	TRACE_DEBUG(1, "%s\n", __func__);
+
+	thread = dxglockorder_get_thread();
+	dxgglobal_destroy();
+	thread->lock_held = true;	/* No need to acquire internal locks */
+	dxglockorder_put_thread(thread);
+	dxgmem_check(NULL, DXGMEM_GLOBAL);
+
+	TRACE_DEBUG(1, "%s end\n", __func__);
+}
+
+module_init(dxg_drv_init);
+module_exit(dxg_drv_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Microsoft Dxgkrnl virtual GPU Driver");
diff --git a/drivers/gpu/dxgkrnl/dxgprocess.c b/drivers/gpu/dxgkrnl/dxgprocess.c
new file mode 100644
index 000000000000..fae2076631fd
--- /dev/null
+++ b/drivers/gpu/dxgkrnl/dxgprocess.c
@@ -0,0 +1,355 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (c) 2019, Microsoft Corporation.
+ *
+ * Author:
+ *   Iouri Tarassov <iourit at microsoft.com>
+ *
+ * Dxgkrnl Graphics Port Driver
+ * DXGPROCSS implementation
+ *
+ */
+
+#include "dxgkrnl.h"
+
+/*
+ * Creates a new dxgprocess object
+ * Must be called when dxgglobal->plistmutex is held
+ */
+struct dxgprocess *dxgprocess_create(void)
+{
+	struct dxgprocess *process;
+	int ret;
+
+	TRACE_DEBUG(1, "%s", __func__);
+
+	process = dxgmem_alloc(NULL, DXGMEM_PROCESS, sizeof(struct dxgprocess));
+	if (process == NULL) {
+		pr_err("failed to allocate dxgprocess\n");
+	} else {
+		TRACE_DEBUG(1, "new dxgprocess created\n");
+		process->process = current;
+		process->pid = current->pid;
+		process->tgid = current->tgid;
+		dxgmutex_init(&process->process_mutex, DXGLOCK_PROCESSMUTEX);
+		ret = dxgvmb_send_create_process(process);
+		if (ret) {
+			TRACE_DEBUG(1, "dxgvmb_send_create_process failed\n");
+			dxgmem_free(NULL, DXGMEM_PROCESS, process);
+			process = NULL;
+		} else {
+			INIT_LIST_HEAD(&process->plistentry);
+			process->refcount = 1;
+
+			dxgmutex_lock(&dxgglobal->plistmutex);
+			list_add_tail(&process->plistentry,
+				      &dxgglobal->plisthead);
+			dxgmutex_unlock(&dxgglobal->plistmutex);
+
+			hmgrtable_init(&process->handle_table, process);
+			hmgrtable_init(&process->local_handle_table, process);
+			INIT_LIST_HEAD(&process->process_adapter_list_head);
+		}
+	}
+	return process;
+}
+
+void dxgprocess_destroy(struct dxgprocess *process)
+{
+	int i;
+	enum hmgrentry_type t;
+	d3dkmt_handle h;
+	void *o;
+	struct dxgsyncobject *syncobj;
+	struct dxgprocess_adapter *entry;
+	struct dxgprocess_adapter *tmp;
+	struct dxgadapter *adapter;
+
+	TRACE_DEBUG(1, "%s", __func__);
+
+	/* Destroy all adapter state */
+	dxgglobal_acquire_process_adapter_lock();
+	list_for_each_entry_safe(entry, tmp,
+				 &process->process_adapter_list_head,
+				 process_adapter_list_entry) {
+		adapter = entry->adapter;
+		dxgprocess_adapter_destroy(entry);
+	}
+	dxgglobal_release_process_adapter_lock();
+
+	i = 0;
+	while (hmgrtable_next_entry(&process->local_handle_table,
+				    &i, &t, &h, &o)) {
+		switch (t) {
+		case HMGRENTRY_TYPE_DXGADAPTER:
+			dxgprocess_close_adapter(process, h);
+			break;
+		default:
+			pr_err("invalid entry in local handle table %d", t);
+			break;
+		}
+	}
+
+	i = 0;
+	while (hmgrtable_next_entry(&process->handle_table, &i, &t, &h, &o)) {
+		switch (t) {
+		case HMGRENTRY_TYPE_DXGSYNCOBJECT:
+			TRACE_DEBUG(1, "Destroy syncobj: %p %d", o, i);
+			syncobj = o;
+			syncobj->handle = 0;
+			dxgsyncobject_destroy(process, syncobj);
+			break;
+		default:
+			pr_err("invalid entry in handle table %d", t);
+			break;
+		}
+	}
+
+	hmgrtable_destroy(&process->handle_table);
+	hmgrtable_destroy(&process->local_handle_table);
+
+	for (i = 0; i < 2; i++) {
+		if (process->test_handle_table[i]) {
+			hmgrtable_destroy(process->test_handle_table[i]);
+			dxgmem_free(process, DXGMEM_HANDLE_TABLE,
+				    process->test_handle_table[i]);
+			process->test_handle_table[i] = NULL;
+		}
+	}
+
+	TRACE_DEBUG(1, "%s end", __func__);
+}
+
+/*
+ * Release reference count on a process object
+ */
+void dxgprocess_release_reference(struct dxgprocess *process)
+{
+	TRACE_DEBUG(1, "%s %d", __func__, process->refcount);
+	dxgmutex_lock(&dxgglobal->plistmutex);
+	process->refcount--;
+	if (process->refcount == 0) {
+		list_del(&process->plistentry);
+		process->plistentry.next = NULL;
+		dxgmutex_unlock(&dxgglobal->plistmutex);
+
+		dxgprocess_destroy(process);
+
+		if (process->host_handle)
+			dxgvmb_send_destroy_process(process->host_handle);
+		dxgmem_check(process, DXGMEM_PROCESS);
+		dxgmem_free(NULL, DXGMEM_PROCESS, process);
+	} else {
+		dxgmutex_unlock(&dxgglobal->plistmutex);
+	}
+}
+
+struct dxgprocess_adapter *dxgprocess_get_adapter_info(struct dxgprocess
+						       *process,
+						       struct dxgadapter
+						       *adapter)
+{
+	struct dxgprocess_adapter *entry;
+
+	list_for_each_entry(entry, &process->process_adapter_list_head,
+			    process_adapter_list_entry) {
+		if (adapter == entry->adapter) {
+			TRACE_DEBUG(1, "found process adapter info %p", entry);
+			return entry;
+		}
+	}
+	return NULL;
+}
+
+/*
+ * Dxgprocess takes references on dxgadapter and  dxgprocess_adapter.
+ */
+int dxgprocess_open_adapter(struct dxgprocess *process,
+			    struct dxgadapter *adapter, d3dkmt_handle *h)
+{
+	int ret = 0;
+	struct dxgprocess_adapter *adapter_info;
+	d3dkmt_handle handle;
+
+	*h = 0;
+	adapter_info = dxgprocess_get_adapter_info(process, adapter);
+	if (adapter_info == NULL) {
+		TRACE_DEBUG(1, "creating new process adapter info\n");
+		adapter_info = dxgprocess_adapter_create(process, adapter);
+		if (adapter_info == NULL) {
+			ret = STATUS_NO_MEMORY;
+			goto cleanup;
+		}
+	} else {
+		adapter_info->refcount++;
+	}
+
+	handle = hmgrtable_alloc_handle_safe(&process->local_handle_table,
+					     adapter, HMGRENTRY_TYPE_DXGADAPTER,
+					     true);
+	if (handle) {
+		*h = handle;
+	} else {
+		pr_err("failed to create adapter handle\n");
+		ret = STATUS_NO_MEMORY;
+		goto cleanup;
+	}
+
+cleanup:
+
+	if (ret) {
+		if (adapter_info) {
+			dxgglobal_acquire_process_adapter_lock();
+			dxgprocess_adapter_release(adapter_info);
+			dxgglobal_release_process_adapter_lock();
+		}
+	}
+
+	return ret;
+}
+
+int dxgprocess_close_adapter(struct dxgprocess *process, d3dkmt_handle handle)
+{
+	struct dxgadapter *adapter;
+	struct dxgprocess_adapter *adapter_info;
+	int ret = 0;
+
+	if (handle == 0)
+		return 0;
+
+	hmgrtable_lock(&process->local_handle_table, DXGLOCK_EXCL);
+	adapter = dxgprocess_get_adapter(process, handle);
+	if (adapter)
+		hmgrtable_free_handle(&process->local_handle_table,
+				      HMGRENTRY_TYPE_DXGADAPTER, handle);
+	hmgrtable_unlock(&process->local_handle_table, DXGLOCK_EXCL);
+
+	if (adapter) {
+		adapter_info = dxgprocess_get_adapter_info(process, adapter);
+		if (adapter_info) {
+			dxgglobal_acquire_process_adapter_lock();
+			dxgprocess_adapter_release(adapter_info);
+			dxgglobal_release_process_adapter_lock();
+		} else {
+			ret = STATUS_INVALID_PARAMETER;
+		}
+	} else {
+		pr_err("%s failed %x", __func__, handle);
+		ret = STATUS_INVALID_PARAMETER;
+	}
+
+	return ret;
+}
+
+struct dxgadapter *dxgprocess_get_adapter(struct dxgprocess *process,
+					  d3dkmt_handle handle)
+{
+	struct dxgadapter *adapter;
+
+	adapter = hmgrtable_get_object_by_type(&process->local_handle_table,
+					       HMGRENTRY_TYPE_DXGADAPTER,
+					       handle);
+	if (adapter == NULL)
+		pr_err("%s failed %x\n", __func__, handle);
+	return adapter;
+}
+
+/*
+ * Gets the adapter object from the process handle table.
+ * The adapter object is referenced.
+ * The function acquired the handle table lock shared.
+ */
+struct dxgadapter *dxgprocess_adapter_by_handle(struct dxgprocess *process,
+						d3dkmt_handle handle)
+{
+	struct dxgadapter *adapter;
+
+	hmgrtable_lock(&process->local_handle_table, DXGLOCK_SHARED);
+	adapter = hmgrtable_get_object_by_type(&process->local_handle_table,
+					       HMGRENTRY_TYPE_DXGADAPTER,
+					       handle);
+	if (adapter == NULL)
+		pr_err("adapter_by_handle failed %x\n", handle);
+	else if (!dxgadapter_acquire_reference(adapter)) {
+		pr_err("failed to acquire adapter reference\n");
+		adapter = NULL;
+	}
+	hmgrtable_unlock(&process->local_handle_table, DXGLOCK_SHARED);
+	return adapter;
+}
+
+struct dxgdevice *dxgprocess_device_by_object_handle(struct dxgprocess *process,
+						     enum hmgrentry_type t,
+						     d3dkmt_handle handle)
+{
+	struct dxgdevice *device = NULL;
+	void *obj;
+
+	hmgrtable_lock(&process->handle_table, DXGLOCK_SHARED);
+	obj = hmgrtable_get_object_by_type(&process->handle_table, t, handle);
+	if (obj) {
+		d3dkmt_handle device_handle = 0;
+
+		switch (t) {
+		case HMGRENTRY_TYPE_DXGDEVICE:
+			device = obj;
+			break;
+		case HMGRENTRY_TYPE_DXGCONTEXT:
+			device_handle =
+			    ((struct dxgcontext *)obj)->device_handle;
+			break;
+		case HMGRENTRY_TYPE_DXGPAGINGQUEUE:
+			device_handle =
+			    ((struct dxgpagingqueue *)obj)->device_handle;
+			break;
+		case HMGRENTRY_TYPE_DXGHWQUEUE:
+			device_handle =
+			    ((struct dxghwqueue *)obj)->device_handle;
+			break;
+		default:
+			pr_err("invalid handle type: %d\n", t);
+			break;
+		}
+		if (device == NULL)
+			device = hmgrtable_get_object_by_type(
+					&process->handle_table,
+					 HMGRENTRY_TYPE_DXGDEVICE,
+					 device_handle);
+		if (device)
+			if (!dxgdevice_acquire_reference(device))
+				device = NULL;
+	}
+	if (device == NULL)
+		pr_err("device_by_handle failed: %d %x\n", t, handle);
+	hmgrtable_unlock(&process->handle_table, DXGLOCK_SHARED);
+	return device;
+}
+
+struct dxgdevice *dxgprocess_device_by_handle(struct dxgprocess *process,
+					      d3dkmt_handle handle)
+{
+	return dxgprocess_device_by_object_handle(process,
+						  HMGRENTRY_TYPE_DXGDEVICE,
+						  handle);
+}
+
+void dxgprocess_ht_lock_shared_down(struct dxgprocess *process)
+{
+	hmgrtable_lock(&process->handle_table, DXGLOCK_SHARED);
+}
+
+void dxgprocess_ht_lock_shared_up(struct dxgprocess *process)
+{
+	hmgrtable_unlock(&process->handle_table, DXGLOCK_SHARED);
+}
+
+void dxgprocess_ht_lock_exclusive_down(struct dxgprocess *process)
+{
+	hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL);
+}
+
+void dxgprocess_ht_lock_exclusive_up(struct dxgprocess *process)
+{
+	hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL);
+}
diff --git a/drivers/gpu/dxgkrnl/dxgvmbus.c b/drivers/gpu/dxgkrnl/dxgvmbus.c
new file mode 100644
index 000000000000..f903d659c237
--- /dev/null
+++ b/drivers/gpu/dxgkrnl/dxgvmbus.c
@@ -0,0 +1,2955 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (c) 2019, Microsoft Corporation.
+ *
+ * Author:
+ *   Iouri Tarassov <iourit at microsoft.com>
+ *
+ * Dxgkrnl Graphics Port Driver
+ * VM bus interface implementation
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/completion.h>
+#include <linux/slab.h>
+#include <linux/eventfd.h>
+#include <linux/hyperv.h>
+#include <linux/mman.h>
+
+#include "dxgkrnl.h"
+#include "dxgvmbus.h"
+
+/*
+ * The interface version is used to ensure that the host and the guest use the
+ * same VM bus protocol. It needs to be incremented every time the VM bus
+ * interface changes. DXGK_VMBUS_LAST_COMPATIBLE_INTERFACE_VERSION is
+ * incremented each time the earlier versions of the interface are no longer
+ * compatible with the current version.
+ */
+const uint DXGK_VMBUS_INTERFACE_VERSION = 27;
+const uint DXGK_VMBUS_LAST_COMPATIBLE_INTERFACE_VERSION = 16;
+
+#define RING_BUFSIZE (256 * 1024)
+
+struct dxgvmbuspacket {
+	struct list_head packet_list_entry;
+	u64 request_id;
+	struct completion wait;
+	void *buffer;
+	u32 buffer_length;
+	int status;
+};
+
+int dxgvmbuschannel_init(struct dxgvmbuschannel *ch, struct hv_device *hdev)
+{
+	int ret;
+
+	ch->hdev = hdev;
+	spin_lock_init(&ch->packet_list_mutex);
+	INIT_LIST_HEAD(&ch->packet_list_head);
+	atomic64_set(&ch->packet_request_id, 0);
+
+	ch->packet_cache = kmem_cache_create("DXGK packet cache",
+					     sizeof(struct dxgvmbuspacket), 0,
+					     0, NULL);
+	if (ch->packet_cache == NULL) {
+		pr_err("packet_cache alloc failed");
+		ret = STATUS_NO_MEMORY;
+		goto cleanup;
+	}
+
+	ret = vmbus_open(hdev->channel, RING_BUFSIZE, RING_BUFSIZE,
+			 NULL, 0, dxgvmbuschannel_receive, ch);
+	if (ret) {
+		pr_err("vmbus_open failed: %d", ret);
+		goto cleanup;
+	}
+
+	ch->channel = hdev->channel;
+
+cleanup:
+
+	return ret;
+}
+
+void dxgvmbuschannel_destroy(struct dxgvmbuschannel *ch)
+{
+	kmem_cache_destroy(ch->packet_cache);
+	ch->packet_cache = NULL;
+
+	if (ch->channel) {
+		vmbus_close(ch->channel);
+		ch->channel = NULL;
+	}
+}
+
+static inline void command_vm_to_host_init0(struct dxgkvmb_command_vm_to_host
+					    *command)
+{
+	command->command_type = DXGK_VMBCOMMAND_INVALID_VM_TO_HOST;
+	command->process = 0;
+	command->command_id = 0;
+	command->channel_type = DXGKVMB_VM_TO_HOST;
+}
+
+static inline void command_vm_to_host_init1(struct dxgkvmb_command_vm_to_host
+					    *command,
+					    enum dxgkvmb_commandtype_global
+					    type)
+{
+	command->command_type = type;
+	command->process = 0;
+	command->command_id = 0;
+	command->channel_type = DXGKVMB_VM_TO_HOST;
+}
+
+void signal_guest_event(struct dxgkvmb_command_host_to_vm *packet,
+			u32 packet_length)
+{
+	struct dxgkvmb_command_signalguestevent *command = (void *)packet;
+
+	TRACE_DEBUG(1, "%s global packet", __func__);
+
+	if (packet_length < sizeof(struct dxgkvmb_command_signalguestevent)) {
+		pr_err("invalid packet size");
+		return;
+	}
+	if (command->event == 0) {
+		pr_err("invalid event pointer");
+		return;
+	}
+	dxgglobal_signal_host_event(command->event);
+}
+
+void process_inband_packet(struct dxgvmbuschannel *channel,
+			   struct vmpacket_descriptor *desc)
+{
+	u32 packet_length = hv_pkt_datalen(desc);
+
+	if (channel->adapter == NULL) {
+		if (packet_length < sizeof(struct dxgkvmb_command_host_to_vm)) {
+			pr_err("Invalid global packet");
+		} else {
+			struct dxgkvmb_command_host_to_vm *packet =
+			    hv_pkt_data(desc);
+			TRACE_DEBUG(1, "global packet %d",
+				    packet->command_type);
+			switch (packet->command_type) {
+			case DXGK_VMBCOMMAND_SETGUESTDATA:
+				break;
+			case DXGK_VMBCOMMAND_SIGNALGUESTEVENT:
+			case DXGK_VMBCOMMAND_SIGNALGUESTEVENTPASSIVE:
+				signal_guest_event(packet, packet_length);
+				break;
+			case DXGK_VMBCOMMAND_SENDWNFNOTIFICATION:
+				break;
+			default:
+				pr_err("unexpected host message %d",
+					   packet->command_type);
+			}
+		}
+	} else {
+		pr_err("Unexpected packet for adapter channel");
+	}
+}
+
+void process_completion_packet(struct dxgvmbuschannel *channel,
+			       struct vmpacket_descriptor *desc)
+{
+	struct dxgvmbuspacket *packet = NULL;
+	struct dxgvmbuspacket *entry;
+	u32 packet_length = hv_pkt_datalen(desc);
+	unsigned long flags;
+
+	spin_lock_irqsave(&channel->packet_list_mutex, flags);
+	list_for_each_entry(entry, &channel->packet_list_head,
+			    packet_list_entry) {
+		if (desc->trans_id == entry->request_id) {
+			packet = entry;
+			list_del(&packet->packet_list_entry);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&channel->packet_list_mutex, flags);
+
+	if (packet) {
+		if (packet->buffer_length) {
+			if (packet_length < packet->buffer_length) {
+				TRACE_DEBUG(1, "invalid size %d Expected:%d",
+					    packet_length,
+					    packet->buffer_length);
+				packet->status = STATUS_BUFFER_OVERFLOW;
+			} else {
+				memcpy(packet->buffer, hv_pkt_data(desc),
+				       packet->buffer_length);
+			}
+		}
+		complete(&packet->wait);
+	} else {
+		pr_err("did not find packet to complete");
+	}
+}
+
+/* Receive callback for messages from the host */
+void dxgvmbuschannel_receive(void *ctx)
+{
+	struct dxgvmbuschannel *channel = ctx;
+	struct vmpacket_descriptor *desc;
+
+	TRACE_DEBUG(1, "%s %p", __func__, channel->adapter);
+	foreach_vmbus_pkt(desc, channel->channel) {
+		TRACE_DEFINE(u32 packet_length = hv_pkt_datalen(desc);
+		    )
+		    TRACE_DEBUG(1, "next packet (id, size, type): %llu %d %d",
+				desc->trans_id, packet_length, desc->type);
+		if (desc->type == VM_PKT_COMP) {
+			process_completion_packet(channel, desc);
+		} else {
+			if (desc->type != VM_PKT_DATA_INBAND)
+				pr_err("unexpected packet type");
+			process_inband_packet(channel, desc);
+		}
+	}
+}
+
+int dxgvmb_send_sync_msg(struct dxgvmbuschannel *channel,
+			 void *command,
+			 u32 cmd_size, void *result, u32 result_size)
+{
+	int ret = 0;
+	unsigned long t;
+	struct dxgvmbuspacket *packet = NULL;
+
+	if (cmd_size > DXG_MAX_VM_BUS_PACKET_SIZE ||
+	    result_size > DXG_MAX_VM_BUS_PACKET_SIZE) {
+		pr_err("%s invalid data size", __func__);
+		return STATUS_INVALID_PARAMETER;
+	}
+
+	packet = kmem_cache_alloc(channel->packet_cache, 0);
+	if (packet == NULL) {
+		pr_err("kmem_cache_alloc failed");
+		return STATUS_NO_MEMORY;
+	}
+
+	if (channel->adapter == NULL) {
+		TRACE_DEFINE(struct dxgkvmb_command_vm_to_host *cmd = command;
+		    )
+		    TRACE_DEBUG(1, "send_sync_msg global: %d %p %d %d",
+				cmd->command_type, command, cmd_size,
+				result_size);
+	} else {
+		TRACE_DEFINE(struct dxgkvmb_command_vgpu_to_host *cmd = command;
+		    )
+		    TRACE_DEBUG(1, "send_sync_msg adapter: %d %p %d %d",
+				cmd->command_type, command, cmd_size,
+				result_size);
+	}
+
+	packet->request_id = atomic64_inc_return(&channel->packet_request_id);
+	init_completion(&packet->wait);
+	packet->buffer = result;
+	packet->buffer_length = result_size;
+	packet->status = 0;
+	spin_lock_irq(&channel->packet_list_mutex);
+	list_add_tail(&packet->packet_list_entry, &channel->packet_list_head);
+	spin_unlock_irq(&channel->packet_list_mutex);
+
+	ret = vmbus_sendpacket(channel->channel, command, cmd_size,
+			       packet->request_id, VM_PKT_DATA_INBAND,
+			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+	if (ret) {
+		pr_err("vmbus_sendpacket failed");
+		goto cleanup;
+	}
+
+	TRACE_DEBUG(1, "waiting for completion: %llu", packet->request_id);
+	t = wait_for_completion_timeout(&packet->wait, (1000 * HZ));
+	if (!t) {
+		TRACE_DEBUG(1, "timeout waiting for completion");
+		ret = STATUS_TIMEOUT;
+	} else {
+		TRACE_DEBUG(1, "completion done: %llu %x",
+			    packet->request_id, packet->status);
+		if (!NT_SUCCESS(packet->status))
+			ret = packet->status;
+	}
+
+cleanup:
+
+	kmem_cache_free(channel->packet_cache, packet);
+	return ret;
+}
+
+static ntstatus dxgvmb_send_sync_msg_ntstatus(struct dxgvmbuschannel *channel,
+					      void *command, u32 cmd_size)
+{
+	ntstatus status = STATUS_SUCCESS;
+	int ret = dxgvmb_send_sync_msg(channel, command, cmd_size,
+				       &status, sizeof(ntstatus));
+	if (ret)
+		status = STATUS_UNSUCCESSFUL;
+	return status;
+}
+
+static int check_iospace_address(unsigned long address, uint size)
+{
+	if (address < dxgglobal->mmiospace_base ||
+	    size > dxgglobal->mmiospace_size ||
+	    address >= (dxgglobal->mmiospace_base +
+			dxgglobal->mmiospace_size - size)) {
+		pr_err("invalid iospace address %lx", address);
+		return STATUS_INVALID_PARAMETER;
+	}
+	return 0;
+}
+
+int dxg_unmap_iospace(void *va, uint size)
+{
+	int ret = 0;
+
+	TRACE_DEBUG(1, "%s %p %x", __func__, va, size);
+
+	/*
+	 * When an app calls exit(), dxgkrnl is called to close the device
+	 * with current->mm equal to NULL.
+	 */
+	if (current->mm) {
+		ret = vm_munmap((unsigned long)va, size);
+		if (ret)
+			pr_err("vm_munmap failed %d", ret);
+	}
+	return ret;
+}
+
+static uint8_t *dxg_map_iospace(uint64_t iospace_address, uint size,
+				unsigned long protection, bool cached)
+{
+	struct vm_area_struct *vma;
+	unsigned long va;
+	int ret = 0;
+
+	TRACE_DEBUG(1, "%s: %llx %x %lx",
+		    __func__, iospace_address, size, protection);
+	if (check_iospace_address(iospace_address, size)) {
+		pr_err("%s: invalid address", __func__);
+		return NULL;
+	}
+
+	va = vm_mmap(NULL, 0, size, protection, MAP_SHARED | MAP_ANONYMOUS, 0);
+	if ((long)va <= 0) {
+		pr_err("vm_mmap failed %lx %d", va, size);
+		return NULL;
+	}
+
+	down_read(&current->mm->mmap_sem);
+	vma = find_vma(current->mm, (unsigned long)va);
+	if (vma) {
+		pgprot_t prot = vma->vm_page_prot;
+
+		if (!cached)
+			prot = pgprot_writecombine(prot);
+		TRACE_DEBUG(1, "vma: %lx %lx %lx",
+			    vma->vm_start, vma->vm_end, va);
+		vma->vm_pgoff = iospace_address >> PAGE_SHIFT;
+		ret = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+					 size, prot);
+		if (ret)
+			pr_err("io_remap_pfn_range failed: %d", ret);
+	} else {
+		pr_err("failed to find vma: %p %lx", vma, va);
+		ret = STATUS_NO_MEMORY;
+	}
+	up_read(&current->mm->mmap_sem);
+
+	if (ret) {
+		dxg_unmap_iospace((void *)va, size);
+		return NULL;
+	}
+	TRACE_DEBUG(1, "%s end: %lx", __func__, va);
+	return (uint8_t *) va;
+}
+
+/*
+ * Messages to the host
+ */
+
+int dxgvmb_send_set_iospace_region(u64 start, u64 len, u32 shared_mem_gpadl)
+{
+	ntstatus status;
+	struct dxgkvmb_command_setiospaceregion command = { };
+	int ret = dxgglobal_acquire_channel_lock();
+
+	if (ret)
+		goto cleanup;
+
+	command_vm_to_host_init1(&command.hdr,
+				 DXGK_VMBCOMMAND_SETIOSPACEREGION);
+	command.start = start;
+	command.length = len;
+	command.shared_page_gpadl = shared_mem_gpadl;
+	status = dxgvmb_send_sync_msg_ntstatus(&dxgglobal->channel, &command,
+					       sizeof(command));
+	if (!NT_SUCCESS(status)) {
+		pr_err("send_set_iospace_region failed %x", status);
+		ret = status;
+	}
+
+	dxgglobal_release_channel_lock();
+cleanup:
+	TRACE_FUNC_EXIT_ERR(__func__, ret);
+	return ret;
+}
+
+int dxgvmb_send_create_process(struct dxgprocess *process)
+{
+	int ret = 0;
+	struct dxgkvmb_command_createprocess command = { 0 };
+	struct dxgkvmb_command_createprocess_return result = { 0 };
+	char s[W_MAX_PATH];
+	int i;
+
+	ret = dxgglobal_acquire_channel_lock();
+	if (ret)
+		goto cleanup;
+
+	TRACE_DEBUG(1, "%s", __func__);
+	command_vm_to_host_init1(&command.hdr, DXGK_VMBCOMMAND_CREATEPROCESS);
+	command.process = process;
+	command.process_id = process->process->pid;
+	command.linux_process = 1;
+	s[0] = 0;
+	__get_task_comm(s, W_MAX_PATH, process->process);
+	for (i = 0; i < W_MAX_PATH; i++) {
+		command.process_name[i] = s[i];
+		if (s[i] == 0)
+			break;
+	}
+
+	TRACE_DEBUG(1, "create_process msg %d %d",
+		    command.hdr.command_type, (u32) sizeof(command));
+	ret = dxgvmb_send_sync_msg(&dxgglobal->channel, &command,
+				   sizeof(command), &result, sizeof(result));
+	if (ret) {
+		pr_err("create_process failed %d", ret);
+	} else if (result.hprocess == 0) {
+		pr_err("create_process returned 0 handle");
+		ret = STATUS_INTERNAL_ERROR;
+	} else {
+		process->host_handle = result.hprocess;
+		TRACE_DEBUG(1, "create_process returned %x",
+			    process->host_handle);
+	}
+
+	dxgglobal_release_channel_lock();
+
+cleanup:
+	TRACE_FUNC_EXIT_ERR(__func__, ret);
+	return ret;
+}
+
+int dxgvmb_send_destroy_process(d3dkmt_handle process)
+{
+	ntstatus status;
+	struct dxgkvmb_command_destroyprocess command = { 0 };
+
+	status = dxgglobal_acquire_channel_lock();
+	if (!NT_SUCCESS(status))
+		goto cleanup;
+	command_vm_to_host_init2(&command.hdr, DXGK_VMBCOMMAND_DESTROYPROCESS,
+				 process);
+	status = dxgvmb_send_sync_msg_ntstatus(&dxgglobal->channel,
+					       &command, sizeof(command));
+	dxgglobal_release_channel_lock();
+
+cleanup:
+	TRACE_FUNC_EXIT_ERR(__func__, status);
+	return status;
+}
+
+int dxgvmb_send_open_adapter(struct dxgadapter *adapter)
+{
+	int ret;
+	struct dxgkvmb_command_openadapter command = { };
+	struct dxgkvmb_command_openadapter_return result = { };
+
+	command_vgpu_to_host_init1(&command.hdr, DXGK_VMBCOMMAND_OPENADAPTER);
+	command.vmbus_interface_version = DXGK_VMBUS_INTERFACE_VERSION,
+	    command.vmbus_last_compatible_interface_version =
+	    DXGK_VMBUS_LAST_COMPATIBLE_INTERFACE_VERSION;
+
+	ret = dxgvmb_send_sync_msg(&adapter->channel, &command, sizeof(command),
+				   &result, sizeof(result));
+	if (ret)
+		goto cleanup;
+
+	ret = result.status;
+	adapter->host_handle = result.host_adapter_handle;
+
+cleanup:
+	TRACE_FUNC_EXIT_ERR(__func__, ret);
+	return ret;
+}
+
+int dxgvmb_send_close_adapter(struct dxgadapter *adapter)
+{
+	int ret = 0;
+	struct dxgkvmb_command_closeadapter command;
+
+	command_vgpu_to_host_init1(&command.hdr, DXGK_VMBCOMMAND_CLOSEADAPTER);
+	command.host_handle = adapter->host_handle;
+
+	ret = dxgvmb_send_sync_msg(&adapter->channel, &command, sizeof(command),
+				   NULL, 0);
+	TRACE_FUNC_EXIT_ERR(__func__, ret);
+	return ret;
+}
+
+int dxgvmb_send_get_internal_adapter_info(struct dxgadapter *adapter)
+{
+	int ret = 0;
+	struct dxgkvmb_command_getinternaladapterinfo command = { };
+	struct dxgkvmb_command_getinternaladapterinfo_return result = { };
+
+	command_vgpu_to_host_init1(&command.hdr,
+				   DXGK_VMBCOMMAND_GETINTERNALADAPTERINFO);
+
+	ret = dxgvmb_send_sync_msg(&adapter->channel, &command, sizeof(command),
+				   &result, sizeof(result));
+	if (NT_SUCCESS(ret)) {
+		adapter->host_adapter_luid = result.host_adapter_luid;
+		wcsncpy(adapter->device_description, result.device_description,
+			sizeof(adapter->device_description) / sizeof(winwchar));
+		wcsncpy(adapter->device_instance_id, result.device_instance_id,
+			sizeof(adapter->device_instance_id) / sizeof(winwchar));
+	}
+	TRACE_FUNC_EXIT_ERR(__func__, ret);
+	return ret;
+}
+
+d3dkmt_handle dxgvmb_send_create_device(struct dxgadapter *adapter,
+					struct dxgprocess *process,
+					struct d3dkmt_createdevice *args)
+{
+	int ret;
+	struct dxgkvmb_command_createdevice command = { };
+	uint cmd_size = sizeof(command);
+	struct dxgkvmb_command_createdevice_return result = { };
+
+	command_vgpu_to_host_init2(&command.hdr, DXGK_VMBCOMMAND_CREATEDEVICE,
+				   process->host_handle);
+	command.flags = args->flags;
+	ret = dxgvmb_send_sync_msg(&adapter->channel, &command, cmd_size,
+				   &result, sizeof(result));
+	if (ret)
+		result.device = 0;
+	TRACE_FUNC_EXIT_ERR(__func__, ret);
+	return result.device;
+}
+
+int dxgvmb_send_destroy_device(struct dxgadapter *adapter,
+			       struct dxgprocess *process, d3dkmt_handle h)
+{
+	ntstatus status;
+	struct dxgkvmb_command_destroydevice command = { };
+	uint cmd_size = sizeof(command);
+
+	command_vgpu_to_host_init2(&command.hdr, DXGK_VMBCOMMAND_DESTROYDEVICE,
+				   process->host_handle);
+	command.device = h;
+
+	status = dxgvmb_send_sync_msg_ntstatus(&adapter->channel, &command,
+					       cmd_size);
+	TRACE_FUNC_EXIT_ERR(__func__, status);
+	return status;
+}
+
+d3dkmt_handle dxgvmb_send_create_context(struct dxgadapter *adapter,
+					 struct dxgprocess *process,
+					 struct d3dkmt_createcontextvirtual
+					 *args)
+{
+	struct dxgkvmb_command_createcontextvirtual *command = NULL;
+	uint cmd_size;
+	int ret;
+	d3dkmt_handle context = 0;
+
+	if (args->priv_drv_data_size > DXG_MAX_VM_BUS_PACKET_SIZE) {
+		pr_err("PrivateDriverDataSize is invalid");
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+	cmd_size = sizeof(struct dxgkvmb_command_createcontextvirtual) +
+	    args->priv_drv_data_size - 1;
+	command = dxgmem_alloc(process, DXGMEM_VMBUS, cmd_size);
+	if (command == NULL) {
+		pr_err("failed to allocate memory for command");
+		ret = STATUS_NO_MEMORY;
+		goto cleanup;
+	}
+	command_vgpu_to_host_init2(&command->hdr,
+				   DXGK_VMBCOMMAND_CREATECONTEXTVIRTUAL,
+				   process->host_handle);
+	command->device = args->device;
+	command->node_ordinal = args->node_ordinal;
+	command->engine_affinity = args->engine_affinity;
+	command->flags = args->flags;
+	command->client_hint = args->client_hint;
+	command->priv_drv_data_size = args->priv_drv_data_size;
+	if (args->priv_drv_data_size) {
+		ret = dxg_copy_from_user(command->priv_drv_data,
+					 args->priv_drv_data,
+					 args->priv_drv_data_size);
+		if (ret)
+			goto cleanup;
+	}
+	/* Input command is returned back as output */
+	ret = dxgvmb_send_sync_msg(&adapter->channel, command, cmd_size,
+				   command, cmd_size);
+	if (ret) {
+		goto cleanup;
+	} else {
+		context = command->context;
+		if (args->priv_drv_data_size) {
+			ret = dxg_copy_to_user(args->priv_drv_data,
+					       command->priv_drv_data,
+					       args->priv_drv_data_size);
+			if (ret) {
+				dxgvmb_send_destroy_context(adapter, process,
+							    context);
+				context = 0;
+			}
+		}
+	}
+
+cleanup:
+	TRACE_FUNC_EXIT_ERR(__func__, ret);
+	if (command)
+		dxgmem_free(process, DXGMEM_VMBUS, command);
+	return context;
+}
+
+int dxgvmb_send_destroy_context(struct dxgadapter *adapter,
+				struct dxgprocess *process, d3dkmt_handle h)
+{
+	ntstatus status;
+	struct dxgkvmb_command_destroycontext command = { };
+	uint cmd_size = sizeof(command);
+
+	command_vgpu_to_host_init2(&command.hdr, DXGK_VMBCOMMAND_DESTROYCONTEXT,
+				   process->host_handle);
+	command.context = h;
+
+	status = dxgvmb_send_sync_msg_ntstatus(&adapter->channel, &command,
+					       cmd_size);
+	TRACE_FUNC_EXIT_ERR(__func__, status);
+	return status;
+}
+
+int dxgvmb_send_create_paging_queue(struct dxgprocess *process,
+				    struct dxgvmbuschannel *channel,
+				    struct dxgdevice *device,
+				    struct d3dkmt_createpagingqueue *args,
+				    struct dxgpagingqueue *pqueue)
+{
+	struct dxgkvmb_command_createpagingqueue_return result;
+	struct dxgkvmb_command_createpagingqueue command;
+	int ret;
+
+	command_vgpu_to_host_init2(&command.hdr,
+				   DXGK_VMBCOMMAND_CREATEPAGINGQUEUE,
+				   process->host_handle);
+	command.args = *args;
+	args->paging_queue = 0;
+
+	ret = dxgvmb_send_sync_msg(channel, &command, sizeof(command), &result,
+				   sizeof(result));
+	if (ret) {
+		pr_err("send_create_paging_queue failed %x", ret);
+		goto cleanup;
+	}
+
+	args->paging_queue = result.paging_queue;
+	args->sync_object = result.sync_object;
+	args->fence_cpu_virtual_address =
+	    dxg_map_iospace(result.fence_storage_physical_address, PAGE_SIZE,
+			    PROT_READ | PROT_WRITE, true);
+	if (args->fence_cpu_virtual_address == NULL) {
+		ret = STATUS_NO_MEMORY;
+		goto cleanup;
+	}
+	pqueue->mapped_address = args->fence_cpu_virtual_address;
+	pqueue->handle = args->paging_queue;
+
+cleanup:
+	TRACE_FUNC_EXIT_ERR(__func__, ret);
+	return ret;
+}
+
+int dxgvmb_send_destroy_paging_queue(struct dxgprocess *process,
+				     struct dxgvmbuschannel *channel,
+				     d3dkmt_handle h)
+{
+	int ret;
+	struct dxgkvmb_command_destroypagingqueue command;
+	uint cmd_size = sizeof(command);
+
+	ret = dxgglobal_acquire_channel_lock();
+	if (ret)
+		goto cleanup;
+
+	command_vgpu_to_host_init2(&command.hdr,
+				   DXGK_VMBCOMMAND_DESTROYPAGINGQUEUE,
+				   process->host_handle);
+	command.paging_queue = h;
+
+	ret = dxgvmb_send_sync_msg(channel, &command, cmd_size, NULL, 0);
+
+	dxgglobal_release_channel_lock();
+
+cleanup:
+
+	TRACE_FUNC_EXIT_ERR(__func__, ret);
+	return ret;
+}
+
+static int copy_private_data(struct d3dkmt_createallocation *args,
+			     struct dxgkvmb_command_createallocation *command,
+			     struct d3dddi_allocationinfo2 *input_alloc_info,
+			     struct d3dkmt_createstandardallocation
+			     *standard_alloc)
+{
+	struct dxgkvmb_command_createallocation_allocinfo *alloc_info;
+	struct d3dddi_allocationinfo2 *input_alloc;
+	int ret = 0;
+	int i;
+	uint8_t *private_data_dest = (uint8_t *) &command[1] +
+	    (args->alloc_count *
+	     sizeof(struct dxgkvmb_command_createallocation_allocinfo));
+
+	if (args->private_runtime_data_size) {
+		ret = dxg_copy_from_user(private_data_dest,
+					 args->private_runtime_data,
+					 args->private_runtime_data_size);
+		if (ret)
+			goto cleanup;
+		private_data_dest += args->private_runtime_data_size;
+	}
+
+	if (args->flags.standard_allocation) {
+		TRACE_DEBUG2(1, 1, "private data offset %d",
+			     (uint) (private_data_dest - (uint8_t *) command));
+
+		args->priv_drv_data_size = sizeof(*args->standard_allocation);
+		memcpy(private_data_dest, standard_alloc,
+		       sizeof(*standard_alloc));
+		private_data_dest += args->priv_drv_data_size;
+	} else if (args->priv_drv_data_size) {
+		ret = dxg_copy_from_user(private_data_dest,
+					 args->priv_drv_data,
+					 args->priv_drv_data_size);
+		if (ret)
+			goto cleanup;
+		private_data_dest += args->priv_drv_data_size;
+	}
+
+	alloc_info = (void *)&command[1];
+	input_alloc = input_alloc_info;
+	if (input_alloc_info[0].sysmem)
+		command->flags.existing_sysmem = 1;
+	for (i = 0; i < args->alloc_count; i++) {
+		alloc_info->flags = input_alloc->flags.value;
+		alloc_info->vidpn_source_id = input_alloc->vidpn_source_id;
+		alloc_info->priv_drv_data_size =
+		    input_alloc->priv_drv_data_size;
+		if (input_alloc->priv_drv_data_size) {
+			ret = dxg_copy_from_user(private_data_dest,
+						 input_alloc->priv_drv_data,
+						 input_alloc->
+						 priv_drv_data_size);
+			if (ret)
+				goto cleanup;
+			private_data_dest += input_alloc->priv_drv_data_size;
+		}
+		alloc_info++;
+		input_alloc++;
+	}
+
+cleanup:
+	TRACE_FUNC_EXIT_ERR(__func__, ret);
+	return ret;
+}
+
+int create_existing_sysmem(struct dxgdevice *device,
+			   struct dxgkvmb_command_allocinfo_return *host_alloc,
+			   struct dxgallocation *dxgalloc,
+			   bool read_only,
+			   const void *sysmem)
+{
+	int ret = 0;
+	void *kmem = NULL;
+	struct dxgkvmb_command_setexistingsysmemstore set_store_command = { };
+	u64 alloc_size = host_alloc->allocation_size;
+	uint npages = alloc_size >> PAGE_SHIFT;
+	struct dxgvmbuschannel *channel = &device->adapter->channel;
+
+	/*
+	 * Create a guest physical address list and set it as the allocation
+	 * backing store in the host. This is done after creating the host
+	 * allocation, because only now the allocation size is known.
+	 */
+
+	TRACE_DEBUG(2, "alloc size: %lld", alloc_size);
+
+	dxgalloc->cpu_address = (void *)sysmem;
+	dxgalloc->pages = dxgmem_alloc(dxgalloc->process, DXGMEM_ALLOCATION,
+				       npages * sizeof(void *));
+	if (dxgalloc->pages == NULL) {
+		pr_err("failed to allocate pages");
+		ret = STATUS_NO_MEMORY;
+		goto cleanup;
+	}
+	ret = get_user_pages_fast((unsigned long)sysmem, npages, !read_only,
+				  dxgalloc->pages);
+	if (ret != npages) {
+		pr_err("get_user_pages_fast failed: %d", ret);
+		ret = STATUS_NO_MEMORY;
+		goto cleanup;
+	}
+	kmem = vmap(dxgalloc->pages, npages, VM_MAP, PAGE_KERNEL);
+	if (kmem == NULL) {
+		pr_err("vmap failed");
+		ret = STATUS_NO_MEMORY;
+		goto cleanup;
+	}
+	ret = vmbus_establish_gpadl(dxgglobal_get_vmbus(), kmem,
+				    alloc_size, &dxgalloc->gpadl);
+	if (ret) {
+		pr_err("establish_gpadl failed: %d", ret);
+		ret = STATUS_NO_MEMORY;
+		goto cleanup;
+	}
+	TRACE_DEBUG(1, "New gpadl %d", dxgalloc->gpadl);
+
+	command_vgpu_to_host_init2(&set_store_command.hdr,
+				   DXGK_VMBCOMMAND_SETEXISTINGSYSMEMSTORE,
+				   device->process->host_handle);
+	set_store_command.device = device->handle;
+	set_store_command.allocation = host_alloc->allocation;
+	set_store_command.gpadl = dxgalloc->gpadl;
+	ret = dxgvmb_send_sync_msg_ntstatus(channel, &set_store_command,
+					    sizeof(set_store_command));
+	if (!NT_SUCCESS(ret)) {
+		pr_err("failed to set existing store: %x", ret);
+		goto cleanup;
+	}
+
+cleanup:
+	if (kmem)
+		vunmap(kmem);
+	TRACE_FUNC_EXIT_ERR(__func__, ret);
+	return ret;
+}
+
+static int process_allocation_handles(struct dxgprocess *process,
+				      struct dxgdevice *device,
+				      struct d3dkmt_createallocation *args,
+				      struct
+				      dxgkvmb_command_createallocation_return
+				      *result, struct dxgallocation **dxgalloc,
+				      struct dxgresource *resource)
+{
+	int ret = 0, i;
+
+	hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL);
+	if (args->flags.create_resource) {
+		ret = hmgrtable_assign_handle(&process->handle_table, resource,
+					      HMGRENTRY_TYPE_DXGRESOURCE,
+					      result->resource);
+		if (ret) {
+			pr_err("failed to assign resource handle %x",
+				   result->resource);
+		} else {
+			resource->handle = result->resource;
+			resource->handle_valid = 1;
+		}
+	}
+	for (i = 0; i < args->alloc_count; i++) {
+		struct dxgkvmb_command_allocinfo_return *host_alloc;
+
+		host_alloc = &result->allocation_info[i];
+		ret = hmgrtable_assign_handle(&process->handle_table,
+					      dxgalloc[i],
+					      HMGRENTRY_TYPE_DXGALLOCATION,
+					      host_alloc->allocation);
+		if (ret) {
+			pr_err("failed to assign alloc handle %x %d %d",
+				   host_alloc->allocation,
+				   args->alloc_count, i);
+			break;
+		}
+		dxgalloc[i]->alloc_handle = host_alloc->allocation;
+		dxgalloc[i]->handle_valid = 1;
+	}
+	hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL);
+
+	if (ret)
+		goto cleanup;
+
+	if (args->flags.create_shared && !args->flags.nt_security_sharing) {
+		struct dxgsharedresource *shared_resource =
+		    resource->shared_owner;
+		shared_resource->host_shared_handle = result->global_share;
+		shared_resource->global_handle =
+		    hmgrtable_alloc_handle_safe(&dxgglobal->handle_table,
+						shared_resource,
+						HMGRENTRY_TYPE_DXGSHAREDRESOURCE,
+						true);
+		if (shared_resource->global_handle == 0) {
+			ret = STATUS_NO_MEMORY;
+			goto cleanup;
+		}
+		args->global_share = shared_resource->global_handle;
+		TRACE_DEBUG(1, "Shared resource global handles: %x %x",
+			    shared_resource->global_handle,
+			    shared_resource->host_shared_handle);
+	}
+
+cleanup:
+	TRACE_FUNC_EXIT_ERR(__func__, ret);
+	return ret;
+}
+
+static int create_local_allocations(struct dxgprocess *process,
+				    struct dxgdevice *device,
+				    struct d3dkmt_createallocation *args,
+				    struct d3dkmt_createallocation *__user
+				    input_args,
+				    struct d3dddi_allocationinfo2 *alloc_info,
+				    struct
+				    dxgkvmb_command_createallocation_return
+				    *result, struct dxgresource *resource,
+				    struct dxgallocation **dxgalloc,
+				    struct dxgkvmb_command_destroyallocation
+				    *destroy_buffer, uint destroy_buffer_size)
+{
+	int i;
+	int alloc_count = args->alloc_count;
+	uint8_t *alloc_private_data = NULL;
+	int ret = 0;
+	ntstatus status = STATUS_SUCCESS;
+	struct dxgvmbuschannel *channel = &device->adapter->channel;
+
+	/* Prepare the command to destroy allocation in case of failure */
+	command_vgpu_to_host_init2(&destroy_buffer->hdr,
+				   DXGK_VMBCOMMAND_DESTROYALLOCATION,
+				   process->host_handle);
+	destroy_buffer->device = args->device;
+	destroy_buffer->resource = args->resource;
+	destroy_buffer->alloc_count = alloc_count;
+	destroy_buffer->flags.assume_not_in_use = 1;
+	for (i = 0; i < alloc_count; i++) {
+		TRACE_DEBUG2(1, 1, "host allocation: %d %x",
+			     i, result->allocation_info[i].allocation);
+		destroy_buffer->allocations[i] =
+		    result->allocation_info[i].allocation;
+	}
+
+	if (args->flags.create_resource) {
+		TRACE_DEBUG(1, "created resource: %x", result->resource);
+		ret = dxg_copy_to_user(&input_args->resource, &result->resource,
+				       sizeof(d3dkmt_handle));
+		if (ret)
+			goto cleanup;
+	}
+
+	alloc_private_data = (uint8_t *) result +
+	    sizeof(struct dxgkvmb_command_createallocation_return) +
+	    sizeof(struct dxgkvmb_command_allocinfo_return) * (alloc_count - 1);
+
+	for (i = 0; i < alloc_count; i++) {
+		struct dxgkvmb_command_allocinfo_return *host_alloc;
+		struct d3dddi_allocationinfo2 *user_alloc;
+
+		host_alloc = &result->allocation_info[i];
+		user_alloc = &alloc_info[i];
+		if (alloc_info->sysmem) {
+			ret = create_existing_sysmem(device, host_alloc,
+						     dxgalloc[i],
+						     args->flags.read_only != 0,
+						     alloc_info->sysmem);
+			if (ret)
+				goto cleanup;
+		}
+		dxgalloc[i]->num_pages =
+		    host_alloc->allocation_size >> PAGE_SHIFT;
+		dxgalloc[i]->cached = host_alloc->allocation_flags.cached;
+		if (host_alloc->priv_drv_data_size) {
+			ret = dxg_copy_to_user(user_alloc->priv_drv_data,
+					       alloc_private_data,
+					       host_alloc->priv_drv_data_size);
+			if (ret)
+				goto cleanup;
+			alloc_private_data += host_alloc->priv_drv_data_size;
+		}
+		ret = dxg_copy_to_user(&args->allocation_info[i].allocation,
+				       &host_alloc->allocation,
+				       sizeof(d3dkmt_handle));
+		if (ret)
+			goto cleanup;
+	}
+
+	ret = process_allocation_handles(process, device, args, result,
+					 dxgalloc, resource);
+	if (ret)
+		goto cleanup;
+
+	ret = dxg_copy_to_user(&input_args->global_share, &args->global_share,
+			       sizeof(d3dkmt_handle));
+
+cleanup:
+
+	if (ret) {
+		/* Free local handles before freeing the handles in the host */
+		dxgdevice_acquire_alloc_list_lock(device);
+		if (dxgalloc)
+			for (i = 0; i < alloc_count; i++)
+				if (dxgalloc[i])
+					dxgallocation_free_handle(dxgalloc[i]);
+		if (resource && args->flags.create_resource)
+			dxgresource_free_handle(resource);
+		dxgdevice_release_alloc_list_lock(device);
+
+		/* Destroy allocations in the host to unmap gpadls */
+		status = dxgvmb_send_sync_msg_ntstatus(channel, destroy_buffer,
+						       destroy_buffer_size);
+		if (!NT_SUCCESS(status))
+			pr_err("failed to destroy allocations: %x", status);
+
+		dxgdevice_acquire_alloc_list_lock(device);
+		if (dxgalloc) {
+			for (i = 0; i < alloc_count; i++) {
+				if (dxgalloc[i]) {
+					dxgalloc[i]->alloc_handle = 0;
+					dxgallocation_destroy(dxgalloc[i]);
+					dxgalloc[i] = NULL;
+				}
+			}
+		}
+		if (resource && args->flags.create_resource) {
+			/*
+			 * Prevent the resource memory from freeing.
+			 * It will be freed in the top level function.
+			 */
+			dxgresource_acquire_reference(resource);
+			dxgresource_destroy(resource);
+		}
+		dxgdevice_release_alloc_list_lock(device);
+	}
+
+	TRACE_FUNC_EXIT_ERR(__func__, ret);
+	return ret;
+}
+
+int dxgvmb_send_create_allocation(struct dxgprocess *process,
+				  struct dxgdevice *device,
+				  struct d3dkmt_createallocation *args,
+				  struct d3dkmt_createallocation *__user
+				  input_args, struct dxgresource *resource,
+				  struct dxgallocation **dxgalloc,
+				  struct d3dddi_allocationinfo2 *alloc_info,
+				  struct d3dkmt_createstandardallocation
+				  *standard_alloc)
+{
+	struct dxgkvmb_command_createallocation *command = NULL;
+	struct dxgkvmb_command_destroyallocation *destroy_buffer = NULL;
+	struct dxgkvmb_command_createallocation_return *result = NULL;
+	int ret;
+	int i;
+	uint result_size = 0;
+	uint cmd_size = 0;
+	uint destroy_buffer_size = 0;
+	uint priv_drv_data_size;
+	struct dxgvmbuschannel *channel = &device->adapter->channel;
+
+	if (args->private_runtime_data_size >= DXG_MAX_VM_BUS_PACKET_SIZE ||
+	    args->priv_drv_data_size >= DXG_MAX_VM_BUS_PACKET_SIZE) {
+		ret = STATUS_BUFFER_OVERFLOW;
+		goto cleanup;
+	}
+
+	/*
+	 * Preallocate the buffer, which will be used for destruction in case
+	 * of a failure
+	 */
+	destroy_buffer_size = sizeof(struct dxgkvmb_command_destroyallocation) +
+	    args->alloc_count * sizeof(d3dkmt_handle);
+	destroy_buffer = dxgmem_alloc(process, DXGMEM_TMP, destroy_buffer_size);
+	if (destroy_buffer == NULL) {
+		ret = STATUS_NO_MEMORY;
+		goto cleanup;
+	}
+
+	/* Compute the total private driver size */
+
+	priv_drv_data_size = 0;
+
+	for (i = 0; i < args->alloc_count; i++) {
+		if (alloc_info[i].priv_drv_data_size >=
+		    DXG_MAX_VM_BUS_PACKET_SIZE) {
+			ret = STATUS_BUFFER_OVERFLOW;
+			goto cleanup;
+		} else {
+			priv_drv_data_size += alloc_info[i].priv_drv_data_size;
+		}
+		if (priv_drv_data_size >= DXG_MAX_VM_BUS_PACKET_SIZE) {
+			ret = STATUS_BUFFER_OVERFLOW;
+			goto cleanup;
+		}
+	}
+
+	/*
+	 * Private driver data for the result includes only per allocation
+	 * private data
+	 */
+	result_size = sizeof(struct dxgkvmb_command_createallocation_return) +
+	    (args->alloc_count - 1) *
+	    sizeof(struct dxgkvmb_command_allocinfo_return) +
+	    priv_drv_data_size;
+	result = dxgmem_alloc(process, DXGMEM_VMBUS, result_size);
+	if (result == NULL) {
+		ret = STATUS_NO_MEMORY;
+		goto cleanup;
+	}
+
+	/* Private drv data for the command includes the global private data */
+	priv_drv_data_size += args->priv_drv_data_size;
+
+	cmd_size = sizeof(struct dxgkvmb_command_createallocation) +
+	    args->alloc_count *
+	    sizeof(struct dxgkvmb_command_createallocation_allocinfo) +
+	    args->private_runtime_data_size + priv_drv_data_size;
+	if (cmd_size > DXG_MAX_VM_BUS_PACKET_SIZE) {
+		ret = STATUS_BUFFER_OVERFLOW;
+		goto cleanup;
+	}
+
+	TRACE_DEBUG(1, "command size, driver_data_size %d %d %ld %ld",
+		    cmd_size, priv_drv_data_size,
+		    sizeof(struct dxgkvmb_command_createallocation),
+		    sizeof(struct dxgkvmb_command_createallocation_allocinfo));
+
+	command = dxgmem_alloc(process, DXGMEM_VMBUS, cmd_size);
+	if (command == NULL) {
+		ret = STATUS_NO_MEMORY;
+		goto cleanup;
+	}
+
+	command_vgpu_to_host_init2(&command->hdr,
+				   DXGK_VMBCOMMAND_CREATEALLOCATION,
+				   process->host_handle);
+	command->device = args->device;
+	command->flags = args->flags;
+	command->resource = args->resource;
+	command->private_runtime_resource_handle =
+	    args->private_runtime_resource_handle;
+	command->alloc_count = args->alloc_count;
+	command->private_runtime_data_size = args->private_runtime_data_size;
+	command->priv_drv_data_size = args->priv_drv_data_size;
+	if (args->flags.standard_allocation) {
+		/*
+		 * Flags.ExistingSysMem cannot be set from user mode, so it
+		 * needs to be set it here.
+		 */
+		command->flags.existing_sysmem = 1;
+	}
+
+	ret = copy_private_data(args, command, alloc_info, standard_alloc);
+	if (ret)
+		goto cleanup;
+
+	ret = dxgvmb_send_sync_msg(channel, command, cmd_size,
+				   result, result_size);
+	if (ret) {
+		pr_err("send_create_allocation failed %x", ret);
+		goto cleanup;
+	}
+
+	ret = create_local_allocations(process, device, args, input_args,
+				       alloc_info, result, resource, dxgalloc,
+				       destroy_buffer, destroy_buffer_size);
+	if (ret)
+		goto cleanup;
+
+cleanup:
+
+	if (destroy_buffer)
+		dxgmem_free(process, DXGMEM_TMP, destroy_buffer);
+	if (command)
+		dxgmem_free(process, DXGMEM_VMBUS, command);
+	if (result)
+		dxgmem_free(process, DXGMEM_VMBUS, result);
+
+	TRACE_FUNC_EXIT_ERR(__func__, ret);
+	return ret;
+}
+
+int dxgvmb_send_destroy_allocation(struct dxgprocess *process,
+				   struct dxgdevice *device,
+				   struct dxgvmbuschannel *channel,
+				   struct d3dkmt_destroyallocation2 *args,
+				   d3dkmt_handle *alloc_handles)
+{
+	struct dxgkvmb_command_destroyallocation *destroy_buffer = NULL;
+	uint destroy_buffer_size = 0;
+	int ret = 0;
+	int allocations_size = args->alloc_count * sizeof(d3dkmt_handle);
+
+	destroy_buffer_size = sizeof(struct dxgkvmb_command_destroyallocation) +
+	    allocations_size;
+	destroy_buffer = dxgmem_alloc(process, DXGMEM_TMP, destroy_buffer_size);
+	if (destroy_buffer == NULL) {
+		ret = STATUS_NO_MEMORY;
+		goto cleanup;
+	}
+
+	command_vgpu_to_host_init2(&destroy_buffer->hdr,
+				   DXGK_VMBCOMMAND_DESTROYALLOCATION,
+				   process->host_handle);
+	destroy_buffer->device = args->device;
+	destroy_buffer->resource = args->resource;
+	destroy_buffer->alloc_count = args->alloc_count;
+	destroy_buffer->flags = args->flags;
+	if (allocations_size)
+		memcpy(destroy_buffer->allocations, alloc_handles,
+		       allocations_size);
+
+	ret = dxgvmb_send_sync_msg_ntstatus(channel, destroy_buffer,
+					    destroy_buffer_size);
+
+cleanup:
+
+	if (destroy_buffer)
+		dxgmem_free(process, DXGMEM_TMP, destroy_buffer);
+	TRACE_FUNC_EXIT_ERR(__func__, ret);
+	return ret;
+}
+
+int dxgvmb_send_make_resident(struct dxgprocess *process,
+			      struct dxgdevice *device,
+			      struct dxgvmbuschannel *channel,
+			      struct d3dddi_makeresident *args)
+{
+	int ret = 0;
+	uint cmd_size;
+	struct dxgkvmb_command_makeresident_return result = { };
+	struct dxgkvmb_command_makeresident *command = NULL;
+
+	cmd_size = (args->alloc_count - 1) * sizeof(d3dkmt_handle) +
+	    sizeof(struct dxgkvmb_command_makeresident);
+	command = dxgmem_alloc(process, DXGMEM_VMBUS, cmd_size);
+	if (command == NULL) {
+		ret = STATUS_NO_MEMORY;
+		goto cleanup;
+	}
+	ret = dxg_copy_from_user(command->allocations, args->allocation_list,
+				 args->alloc_count * sizeof(d3dkmt_handle));
+	if (ret)
+		goto cleanup;
+	command_vgpu_to_host_init2(&command->hdr,
+				   DXGK_VMBCOMMAND_MAKERESIDENT,
+				   process->host_handle);
+	command->alloc_count = args->alloc_count;
+	command->paging_queue = args->paging_queue;
+	if (device)
+		command->device = device->handle;
+	command->flags = args->flags;
+
+	ret = dxgvmb_send_sync_msg(channel, command, cmd_size,
+				   &result, sizeof(result));
+	if (ret) {
+		pr_err("send_make_resident failed %x", ret);
+		goto cleanup;
+	}
+
+	args->paging_fence_value = result.paging_fence_value;
+	args->num_bytes_to_trim = result.num_bytes_to_trim;
+	ret = result.status;
+
+cleanup:
+
+	if (command)
+		dxgmem_free(process, DXGMEM_VMBUS, command);
+	TRACE_FUNC_EXIT_ERR(__func__, ret);
+	return ret;
+}
+
+int dxgvmb_send_evict(struct dxgprocess *process,
+		      struct dxgvmbuschannel *channel,
+		      struct d3dkmt_evict *args)
+{
+	int ret = 0;
+	uint cmd_size;
+	struct dxgkvmb_command_evict_return result = { };
+	struct dxgkvmb_command_evict *command = NULL;
+
+	cmd_size = (args->alloc_count - 1) * sizeof(d3dkmt_handle) +
+	    sizeof(struct dxgkvmb_command_evict);
+	command = dxgmem_alloc(process, DXGMEM_VMBUS, cmd_size);
+	if (command == NULL) {
+		ret = STATUS_NO_MEMORY;
+		goto cleanup;
+	}
+	ret = dxg_copy_from_user(command->allocations, args->allocations,
+				 args->alloc_count * sizeof(d3dkmt_handle));
+	if (ret)
+		goto cleanup;
+	command_vgpu_to_host_init2(&command->hdr,
+				   DXGK_VMBCOMMAND_EVICT, process->host_handle);
+	command->alloc_count = args->alloc_count;
+	command->device = args->device;
+	command->flags = args->flags;
+
+	ret = dxgvmb_send_sync_msg(channel, command, cmd_size,
+				   &result, sizeof(result));
+	if (ret) {
+		pr_err("send_evict failed %x", ret);
+		goto cleanup;
+	}
+	args->num_bytes_to_trim = result.num_bytes_to_trim;
+
+cleanup:
+
+	if (command)
+		dxgmem_free(process, DXGMEM_VMBUS, command);
+	TRACE_FUNC_EXIT_ERR(__func__, ret);
+	return ret;
+}
+
+int dxgvmb_send_submit_command(struct dxgprocess *process,
+			       struct dxgvmbuschannel *channel,
+			       struct d3dkmt_submitcommand *args)
+{
+	int ret = 0;
+	uint cmd_size;
+	struct dxgkvmb_command_submitcommand *command = NULL;
+	uint hbufsize = args->num_history_buffers * sizeof(d3dkmt_handle);
+
+	cmd_size = sizeof(struct dxgkvmb_command_submitcommand) +
+	    hbufsize + args->priv_drv_data_size;
+	command = dxgmem_alloc(process, DXGMEM_VMBUS, cmd_size);
+	if (command == NULL) {
+		ret = STATUS_NO_MEMORY;
+		goto cleanup;
+	}
+	ret = dxg_copy_from_user(&command[1], args->history_buffer_array,
+				 hbufsize);
+	if (ret)
+		goto cleanup;
+	ret = dxg_copy_from_user((uint8_t *) &command[1] + hbufsize,
+				 args->priv_drv_data, args->priv_drv_data_size);
+	if (ret)
+		goto cleanup;
+
+	command_vgpu_to_host_init2(&command->hdr,
+				   DXGK_VMBCOMMAND_SUBMITCOMMAND,
+				   process->host_handle);
+	command->args = *args;
+
+	ret = dxgvmb_send_sync_msg_ntstatus(channel, command, cmd_size);
+
+cleanup:
+
+	if (command)
+		dxgmem_free(process, DXGMEM_VMBUS, command);
+	TRACE_FUNC_EXIT_ERR(__func__, ret);
+	return ret;
+}
+
+int dxgvmb_send_map_gpu_va(struct dxgprocess *process,
+			   d3dkmt_handle device,
+			   struct dxgvmbuschannel *channel,
+			   struct d3dddi_mapgpuvirtualaddress *args)
+{
+	struct dxgkvmb_command_mapgpuvirtualaddress command;
+	struct dxgkvmb_command_mapgpuvirtualaddress_return result;
+	int ret = 0;
+
+	command_vgpu_to_host_init2(&command.hdr,
+				   DXGK_VMBCOMMAND_MAPGPUVIRTUALADDRESS,
+				   process->host_handle);
+	command.args = *args;
+	command.device = device;
+
+	ret = dxgvmb_send_sync_msg(channel, &command, sizeof(command), &result,
+				   sizeof(result));
+	if (ret) {
+		pr_err("%s failed %x", __func__, ret);
+		goto cleanup;
+	}
+	args->virtual_address = result.virtual_address;
+	args->paging_fence_value = result.paging_fence_value;
+	ret = result.status;
+
+cleanup:
+
+	TRACE_FUNC_EXIT_ERR(__func__, ret);
+	return ret;
+}
+
+int dxgvmb_send_reserve_gpu_va(struct dxgprocess *process,
+			       struct dxgvmbuschannel *channel,
+			       struct d3dddi_reservegpuvirtualaddress *args)
+{
+	struct dxgkvmb_command_reservegpuvirtualaddress command;
+	struct dxgkvmb_command_reservegpuvirtualaddress_return result;
+	int ret = 0;
+
+	command_vgpu_to_host_init2(&command.hdr,
+				   DXGK_VMBCOMMAND_RESERVEGPUVIRTUALADDRESS,
+				   process->host_handle);
+	command.args = *args;
+
+	ret = dxgvmb_send_sync_msg(channel, &command, sizeof(command), &result,
+				   sizeof(result));
+	args->virtual_address = result.virtual_address;
+
+	TRACE_FUNC_EXIT_ERR(__func__, ret);
+	return ret;
+}
+
+int dxgvmb_send_free_gpu_va(struct dxgprocess *process,
+			    struct dxgvmbuschannel *channel,
+			    struct d3dkmt_freegpuvirtualaddress *args)
+{
+	struct dxgkvmb_command_freegpuvirtualaddress command;
+	ntstatus status;
+
+	command_vgpu_to_host_init2(&command.hdr,
+				   DXGK_VMBCOMMAND_FREEGPUVIRTUALADDRESS,
+				   process->host_handle);
+	command.args = *args;
+
+	status = dxgvmb_send_sync_msg_ntstatus(channel, &command,
+					       sizeof(command));
+	TRACE_FUNC_EXIT_ERR(__func__, status);
+	return status;
+}
+
+int dxgvmb_send_update_gpu_va(struct dxgprocess *process,
+			      struct dxgvmbuschannel *channel,
+			      struct d3dkmt_updategpuvirtualaddress *args)
+{
+	struct dxgkvmb_command_updategpuvirtualaddress *command = NULL;
+	uint cmd_size;
+	uint op_size;
+	int ret = 0;
+
+	if (args->num_operations == 0 ||
+	    (DXG_MAX_VM_BUS_PACKET_SIZE /
+	     sizeof(struct d3dddi_updategpuvirtualaddress_operation)) <
+	    args->num_operations) {
+		ret = STATUS_INVALID_PARAMETER;
+		pr_err("Invalid number of operations: %d",
+			   args->num_operations);
+		goto cleanup;
+	}
+
+	op_size = args->num_operations *
+	    sizeof(struct d3dddi_updategpuvirtualaddress_operation);
+	cmd_size = sizeof(struct dxgkvmb_command_updategpuvirtualaddress) +
+	    op_size - sizeof(args->operations[0]);
+	command = dxgmem_alloc(process, DXGMEM_VMBUS, cmd_size);
+	if (command == NULL) {
+		ret = STATUS_NO_MEMORY;
+		pr_err("Failed to allocate command");
+		goto cleanup;
+	}
+
+	command_vgpu_to_host_init2(&command->hdr,
+				   DXGK_VMBCOMMAND_UPDATEGPUVIRTUALADDRESS,
+				   process->host_handle);
+	command->fence_value = args->fence_value;
+	command->device = args->device;
+	command->context = args->context;
+	command->fence_object = args->fence_object;
+	command->num_operations = args->num_operations;
+	command->flags = args->flags.value;
+	ret = dxg_copy_from_user(command->operations, args->operations,
+				 op_size);
+	if (ret)
+		goto cleanup;
+
+	ret = dxgvmb_send_sync_msg_ntstatus(channel, command, cmd_size);
+
+cleanup:
+
+	if (command)
+		dxgmem_free(process, DXGMEM_VMBUS, command);
+	TRACE_FUNC_EXIT_ERR(__func__, ret);
+	return ret;
+}
+
+static void set_result(struct d3dkmt_createsynchronizationobject2 *args,
+		       d3dgpu_virtual_address fence_gpu_va, uint8_t *va)
+{
+	args->info.periodic_monitored_fence.fence_gpu_virtual_address =
+	    fence_gpu_va;
+	args->info.periodic_monitored_fence.fence_cpu_virtual_address = va;
+}
+
+int dxgvmb_send_create_sync_object(struct dxgprocess *process,
+				   struct dxgvmbuschannel *channel,
+				   struct d3dkmt_createsynchronizationobject2
+				   *args, struct dxgsyncobject *syncobj)
+{
+	struct dxgkvmb_command_createsyncobject_return result = { };
+	struct dxgkvmb_command_createsyncobject command = { };
+	int ret = 0;
+	uint8_t *va = 0;
+
+	command_vgpu_to_host_init2(&command.hdr,
+				   DXGK_VMBCOMMAND_CREATESYNCOBJECT,
+				   process->host_handle);
+	command.args = *args;
+	command.client_hint = 1;	/* CLIENTHINT_UMD */
+
+	ret = dxgvmb_send_sync_msg(channel, &command, sizeof(command), &result,
+				   sizeof(result));
+	if (ret) {
+		pr_err("%s failed %d", __func__, ret);
+		goto cleanup;
+	}
+	args->sync_object = result.sync_object;
+	if (syncobj->shared) {
+		if (result.global_sync_object == 0) {
+			pr_err("shared handle is 0");
+			ret = STATUS_INVALID_PARAMETER;
+			goto cleanup;
+		}
+		args->info.shared_handle = result.global_sync_object;
+	}
+
+	if (syncobj->monitored_fence) {
+		va = dxg_map_iospace(result.fence_storage_address, PAGE_SIZE,
+				     PROT_READ | PROT_WRITE, true);
+		if (va == NULL) {
+			ret = STATUS_NO_MEMORY;
+			goto cleanup;
+		}
+		if (args->info.type == D3DDDI_MONITORED_FENCE) {
+			args->info.monitored_fence.fence_gpu_virtual_address =
+			    result.fence_gpu_va;
+			args->info.monitored_fence.fence_cpu_virtual_address =
+			    va;
+			{
+				unsigned long value;
+
+				TRACE_DEBUG(1, "fence cpu address: %p", va);
+				ret = dxg_copy_from_user(&value, va,
+							 sizeof(uint64_t));
+				if (ret)
+					pr_err("failed to read fence");
+				else
+					TRACE_DEBUG(1, "fence value: %lx",
+						    value);
+			}
+		} else {
+			set_result(args, result.fence_gpu_va, va);
+		}
+		syncobj->mapped_address = va;
+	}
+
+cleanup:
+	TRACE_FUNC_EXIT_ERR(__func__, ret);
+	return ret;
+}
+
+int dxgvmb_send_destroy_sync_object(struct dxgprocess *process,
+				    d3dkmt_handle sync_object)
+{
+	struct dxgkvmb_command_destroysyncobject command = { };
+	ntstatus status;
+
+	status = dxgglobal_acquire_channel_lock();
+	if (status)
+		goto cleanup;
+
+	command_vm_to_host_init2(&command.hdr,
+				 DXGK_VMBCOMMAND_DESTROYSYNCOBJECT,
+				 process->host_handle);
+	command.sync_object = sync_object;
+
+	status = dxgvmb_send_sync_msg_ntstatus(dxgglobal_get_dxgvmbuschannel(),
+					       &command, sizeof(command));
+
+	dxgglobal_release_channel_lock();
+
+cleanup:
+	TRACE_FUNC_EXIT_ERR(__func__, status);
+	return status;
+}
+
+int dxgvmb_send_signal_sync_object(struct dxgprocess *process,
+				   struct dxgvmbuschannel *channel,
+				   struct d3dddicb_signalflags flags,
+				   uint64_t legacy_fence_value,
+				   d3dkmt_handle context,
+				   uint object_count,
+				   d3dkmt_handle __user *objects,
+				   uint context_count,
+				   d3dkmt_handle __user *contexts,
+				   uint fence_count,
+				   uint64_t __user *fences,
+				   struct eventfd_ctx *cpu_event_handle,
+				   d3dkmt_handle device)
+{
+	int ret = 0;
+	struct dxgkvmb_command_signalsyncobject *command = NULL;
+	uint object_size = object_count * sizeof(d3dkmt_handle);
+	uint context_size = context_count * sizeof(d3dkmt_handle);
+	uint fence_size = fences ? fence_count * sizeof(uint64_t) : 0;
+	uint8_t *current_pos;
+	uint cmd_size = sizeof(struct dxgkvmb_command_signalsyncobject) +
+	    object_size + context_size + fence_size;
+
+	if (context)
+		cmd_size += sizeof(d3dkmt_handle);
+
+	command = dxgmem_alloc(process, DXGMEM_VMBUS, cmd_size);
+	if (command == NULL) {
+		ret = STATUS_NO_MEMORY;
+		goto cleanup;
+	}
+
+	command_vgpu_to_host_init2(&command->hdr,
+				   DXGK_VMBCOMMAND_SIGNALSYNCOBJECT,
+				   process->host_handle);
+
+	if (flags.enqueue_cpu_event)
+		command->cpu_event_handle = (winhandle) cpu_event_handle;
+	else
+		command->device = device;
+	command->flags = flags;
+	command->fence_value = legacy_fence_value;
+	command->object_count = object_count;
+	command->context_count = context_count;
+	current_pos = (uint8_t *) &command[1];
+	ret = dxg_copy_from_user(current_pos, objects, object_size);
+	if (ret) {
+		pr_err("Failed to read objects %p %d",
+			   objects, object_size);
+		goto cleanup;
+	}
+	current_pos += object_size;
+	if (context) {
+		command->context_count++;
+		*(d3dkmt_handle *) current_pos = context;
+		current_pos += sizeof(d3dkmt_handle);
+	}
+	if (context_size) {
+		ret = dxg_copy_from_user(current_pos, contexts, context_size);
+		if (ret) {
+			pr_err("Failed to read contexts %p %d",
+				   contexts, context_size);
+			goto cleanup;
+		}
+		current_pos += context_size;
+	}
+	if (fence_size) {
+		ret = dxg_copy_from_user(current_pos, fences, fence_size);
+		if (ret) {
+			pr_err("Failed to read fences %p %d",
+				   fences, fence_size);
+			goto cleanup;
+		}
+	}
+
+	ret = dxgvmb_send_sync_msg_ntstatus(channel, command, cmd_size);
+
+cleanup:
+	if (command)
+		dxgmem_free(process, DXGMEM_VMBUS, command);
+	TRACE_FUNC_EXIT_ERR(__func__, ret);
+	return ret;
+}
+
+int dxgvmb_send_wait_sync_object_cpu(struct dxgprocess *process,
+				     struct dxgvmbuschannel *channel,
+				     struct
+				     d3dkmt_waitforsynchronizationobjectfromcpu
+				     *args, u64 cpu_event)
+{
+	int ret = 0;
+	struct dxgkvmb_command_waitforsyncobjectfromcpu *command = NULL;
+	uint object_size = args->object_count * sizeof(d3dkmt_handle);
+	uint fence_size = args->object_count * sizeof(uint64_t);
+	uint8_t *current_pos;
+	uint cmd_size = sizeof(*command) + object_size + fence_size;
+
+	command = dxgmem_alloc(process, DXGMEM_VMBUS, cmd_size);
+	if (command == NULL) {
+		ret = STATUS_NO_MEMORY;
+		goto cleanup;
+	}
+	command_vgpu_to_host_init2(&command->hdr,
+				   DXGK_VMBCOMMAND_WAITFORSYNCOBJECTFROMCPU,
+				   process->host_handle);
+	command->device = args->device;
+	command->flags = args->flags;
+	command->object_count = args->object_count;
+	command->guest_event_pointer = (uint64_t) cpu_event;
+	current_pos = (uint8_t *) &command[1];
+	ret = dxg_copy_from_user(current_pos, args->objects, object_size);
+	if (ret)
+		goto cleanup;
+	current_pos += object_size;
+	ret = dxg_copy_from_user(current_pos, args->fence_values, fence_size);
+	if (ret)
+		goto cleanup;
+
+	ret = dxgvmb_send_sync_msg_ntstatus(channel, command, cmd_size);
+
+cleanup:
+	if (command)
+		dxgmem_free(process, DXGMEM_VMBUS, command);
+	TRACE_FUNC_EXIT_ERR(__func__, ret);
+	return ret;
+}
+
+int dxgvmb_send_wait_sync_object_gpu(struct dxgprocess *process,
+				     struct dxgvmbuschannel *channel,
+				     d3dkmt_handle context, uint object_count,
+				     d3dkmt_handle *objects, uint64_t *fences,
+				     bool legacy_fence)
+{
+	ntstatus status;
+	struct dxgkvmb_command_waitforsyncobjectfromgpu *command = NULL;
+	uint fence_size = object_count * sizeof(uint64_t);
+	uint object_size = object_count * sizeof(d3dkmt_handle);
+	uint8_t *current_pos;
+	uint cmd_size = object_size + fence_size - sizeof(uint64_t) +
+	    sizeof(struct dxgkvmb_command_waitforsyncobjectfromgpu);
+
+	if (object_count == 0 || object_count > D3DDDI_MAX_OBJECT_WAITED_ON) {
+		status = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+	command = dxgmem_alloc(process, DXGMEM_VMBUS, cmd_size);
+	if (command == NULL) {
+		status = STATUS_NO_MEMORY;
+		goto cleanup;
+	}
+	command_vgpu_to_host_init2(&command->hdr,
+				   DXGK_VMBCOMMAND_WAITFORSYNCOBJECTFROMGPU,
+				   process->host_handle);
+	command->context = context;
+	command->object_count = object_count;
+	command->legacy_fence_object = legacy_fence;
+	current_pos = (uint8_t *) command->fence_values;
+	memcpy(current_pos, fences, fence_size);
+	current_pos += fence_size;
+	memcpy(current_pos, objects, object_size);
+
+	status = dxgvmb_send_sync_msg_ntstatus(channel, command, cmd_size);
+
+cleanup:
+	if (command)
+		dxgmem_free(process, DXGMEM_VMBUS, command);
+	TRACE_FUNC_EXIT_ERR(__func__, status);
+	return status;
+}
+
+int dxgvmb_send_lock2(struct dxgprocess *process,
+		      struct dxgvmbuschannel *channel,
+		      struct d3dkmt_lock2 *args,
+		      struct d3dkmt_lock2 *__user outargs)
+{
+	int ret = 0;
+	struct dxgkvmb_command_lock2 command = { };
+	struct dxgkvmb_command_lock2_return result = { };
+	struct dxgallocation *alloc = NULL;
+
+	command_vgpu_to_host_init2(&command.hdr,
+				   DXGK_VMBCOMMAND_LOCK2, process->host_handle);
+	command.args = *args;
+
+	ret = dxgvmb_send_sync_msg(channel, &command, sizeof(command),
+				   &result, sizeof(result));
+	if (ret)
+		goto cleanup;
+	if (!NT_SUCCESS(result.status)) {
+		ret = result.status;
+		goto cleanup;
+	}
+
+	hmgrtable_lock(&process->handle_table, DXGLOCK_SHARED);
+	alloc = hmgrtable_get_object_by_type(&process->handle_table,
+					     HMGRENTRY_TYPE_DXGALLOCATION,
+					     args->allocation);
+	if (alloc == NULL || alloc->cpu_address || alloc->cpu_address_refcount
+	    || alloc->cpu_address_mapped) {
+		pr_err("%s invalid alloc", __func__);
+		ret = STATUS_INVALID_PARAMETER;
+	} else {
+		args->data = dxg_map_iospace((uint64_t) result.
+					     cpu_visible_buffer_offset,
+					     alloc->num_pages << PAGE_SHIFT,
+					     PROT_READ | PROT_WRITE,
+					     alloc->cached);
+		if (args->data == NULL) {
+			ret = STATUS_NO_MEMORY;
+		} else {
+			ret = dxg_copy_to_user(&outargs->data, &args->data,
+					       sizeof(args->data));
+			if (!ret) {
+				alloc->cpu_address = args->data;
+				alloc->cpu_address_mapped = true;
+				alloc->cpu_address_refcount = 1;
+			} else {
+				dxg_unmap_iospace(alloc->cpu_address,
+						  alloc->
+						  num_pages << PAGE_SHIFT);
+			}
+		}
+	}
+	hmgrtable_unlock(&process->handle_table, DXGLOCK_SHARED);
+
+cleanup:
+	TRACE_FUNC_EXIT_ERR(__func__, ret);
+	return ret;
+}
+
+int dxgvmb_send_unlock2(struct dxgprocess *process,
+			struct dxgvmbuschannel *channel,
+			struct d3dkmt_unlock2 *args)
+{
+	ntstatus status;
+	struct dxgkvmb_command_unlock2 command = { };
+
+	command_vgpu_to_host_init2(&command.hdr,
+				   DXGK_VMBCOMMAND_UNLOCK2,
+				   process->host_handle);
+	command.args = *args;
+
+	status = dxgvmb_send_sync_msg_ntstatus(channel, &command,
+					       sizeof(command));
+	TRACE_FUNC_EXIT_ERR(__func__, status);
+	return status;
+}
+
+int dxgvmb_send_update_alloc_property(struct dxgprocess *process,
+				      struct dxgvmbuschannel *channel,
+				      struct d3dddi_updateallocproperty *args,
+				      struct d3dddi_updateallocproperty *__user
+				      inargs)
+{
+	int ret;
+	ntstatus status;
+	struct dxgkvmb_command_updateallocationproperty command = { };
+	struct dxgkvmb_command_updateallocationproperty_return result = { };
+
+	command_vgpu_to_host_init2(&command.hdr,
+				   DXGK_VMBCOMMAND_UPDATEALLOCATIONPROPERTY,
+				   process->host_handle);
+	command.args = *args;
+
+	status = dxgvmb_send_sync_msg(channel, &command, sizeof(command),
+				      &result, sizeof(result));
+	if (status == STATUS_PENDING) {
+		ret = dxg_copy_to_user(&inargs->paging_fence_value,
+				       &result.paging_fence_value,
+				       sizeof(uint64_t));
+		if (ret)
+			status = ret;
+	}
+	TRACE_FUNC_EXIT_ERR(__func__, status);
+	return status;
+}
+
+int dxgvmb_send_mark_device_as_error(struct dxgprocess *process,
+				     struct dxgvmbuschannel *channel,
+				     struct d3dkmt_markdeviceaserror *args)
+{
+	struct dxgkvmb_command_markdeviceaserror command = { };
+	ntstatus status;
+
+	command_vgpu_to_host_init2(&command.hdr,
+				   DXGK_VMBCOMMAND_MARKDEVICEASERROR,
+				   process->host_handle);
+	command.args = *args;
+	status = dxgvmb_send_sync_msg_ntstatus(channel, &command,
+					       sizeof(command));
+	TRACE_FUNC_EXIT_ERR(__func__, status);
+	return status;
+}
+
+int dxgvmb_send_set_allocation_priority(struct dxgprocess *process,
+					struct dxgvmbuschannel *channel,
+					struct d3dkmt_setallocationpriority
+					*args)
+{
+	uint cmd_size = sizeof(struct dxgkvmb_command_setallocationpriority);
+	uint alloc_size = 0;
+	uint priority_size = 0;
+	struct dxgkvmb_command_setallocationpriority *command = NULL;
+	int ret = 0;
+	d3dkmt_handle *allocations;
+
+	if (args->allocation_count > DXG_MAX_VM_BUS_PACKET_SIZE) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+	if (args->resource) {
+		priority_size = sizeof(uint);
+		if (args->allocation_count != 0) {
+			ret = STATUS_INVALID_PARAMETER;
+			goto cleanup;
+		}
+	} else {
+		if (args->allocation_count == 0) {
+			ret = STATUS_INVALID_PARAMETER;
+			goto cleanup;
+		}
+		alloc_size = args->allocation_count * sizeof(d3dkmt_handle);
+		cmd_size += alloc_size;
+		priority_size = sizeof(uint) * args->allocation_count;
+	}
+	cmd_size += priority_size;
+	command = dxgmem_alloc(process, DXGMEM_VMBUS, cmd_size);
+	if (command == NULL) {
+		ret = STATUS_NO_MEMORY;
+		goto cleanup;
+	}
+	command_vgpu_to_host_init2(&command->hdr,
+				   DXGK_VMBCOMMAND_SETALLOCATIONPRIORITY,
+				   process->host_handle);
+	command->device = args->device;
+	command->allocation_count = args->allocation_count;
+	command->resource = args->resource;
+	allocations = (d3dkmt_handle *) &command[1];
+	ret = dxg_copy_from_user(allocations, args->allocation_list,
+				 alloc_size);
+	if (ret)
+		goto cleanup;
+	ret = dxg_copy_from_user((uint8_t *) allocations + alloc_size,
+				 args->priorities, priority_size);
+	if (ret)
+		goto cleanup;
+
+	ret = dxgvmb_send_sync_msg_ntstatus(channel, command, cmd_size);
+
+cleanup:
+	if (command)
+		dxgmem_free(process, DXGMEM_VMBUS, command);
+	TRACE_FUNC_EXIT_ERR(__func__, ret);
+	return ret;
+}
+
+int dxgvmb_send_get_allocation_priority(struct dxgprocess *process,
+					struct dxgvmbuschannel *channel,
+					struct d3dkmt_getallocationpriority
+					*args)
+{
+	uint cmd_size = sizeof(struct dxgkvmb_command_getallocationpriority);
+	uint result_size;
+	uint alloc_size = 0;
+	uint priority_size = 0;
+	struct dxgkvmb_command_getallocationpriority *command = NULL;
+	struct dxgkvmb_command_getallocationpriority_return *result;
+	int ret = 0;
+	d3dkmt_handle *allocations;
+
+	if (args->allocation_count > DXG_MAX_VM_BUS_PACKET_SIZE) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+	if (args->resource) {
+		priority_size = sizeof(uint);
+		if (args->allocation_count != 0) {
+			ret = STATUS_INVALID_PARAMETER;
+			goto cleanup;
+		}
+	} else {
+		if (args->allocation_count == 0) {
+			ret = STATUS_INVALID_PARAMETER;
+			goto cleanup;
+		}
+		alloc_size = args->allocation_count * sizeof(d3dkmt_handle);
+		cmd_size += alloc_size;
+		priority_size = sizeof(uint) * args->allocation_count;
+	}
+	result_size =
+	    sizeof(struct dxgkvmb_command_getallocationpriority_return) +
+	    priority_size;
+	cmd_size += result_size;
+	command = dxgmem_alloc(process, DXGMEM_VMBUS, cmd_size);
+	if (command == NULL) {
+		ret = STATUS_NO_MEMORY;
+		goto cleanup;
+	}
+	command_vgpu_to_host_init2(&command->hdr,
+				   DXGK_VMBCOMMAND_GETALLOCATIONPRIORITY,
+				   process->host_handle);
+	command->device = args->device;
+	command->allocation_count = args->allocation_count;
+	command->resource = args->resource;
+	allocations = (d3dkmt_handle *) &command[1];
+	ret = dxg_copy_from_user(allocations, args->allocation_list,
+				 alloc_size);
+	if (ret)
+		goto cleanup;
+
+	result = (void *)((uint8_t *) &command[1] + alloc_size);
+
+	ret = dxgvmb_send_sync_msg(channel, command, cmd_size,
+				   result, result_size);
+	if (ret)
+		goto cleanup;
+	if (!NT_SUCCESS(result->status)) {
+		ret = result->status;
+		goto cleanup;
+	}
+
+	ret = dxg_copy_to_user(args->priorities, (uint8_t *) result +
+			       sizeof(struct
+				      dxgkvmb_command_getallocationpriority_return),
+			       priority_size);
+
+cleanup:
+	if (command)
+		dxgmem_free(process, DXGMEM_VMBUS, command);
+	TRACE_FUNC_EXIT_ERR(__func__, ret);
+	return ret;
+}
+
+int dxgvmb_send_set_context_scheduling_priority(struct dxgprocess *process,
+						struct dxgvmbuschannel *channel,
+						d3dkmt_handle context,
+						int priority, bool in_process)
+{
+	struct dxgkvmb_command_setcontextschedulingpriority2 command = { };
+	ntstatus status;
+
+	command_vgpu_to_host_init2(&command.hdr,
+				   DXGK_VMBCOMMAND_SETCONTEXTSCHEDULINGPRIORITY,
+				   process->host_handle);
+	command.context = context;
+	command.priority = priority;
+	command.in_process = in_process;
+	status = dxgvmb_send_sync_msg_ntstatus(channel, &command,
+					       sizeof(command));
+	TRACE_FUNC_EXIT_ERR(__func__, status);
+	return status;
+}
+
+int dxgvmb_send_get_context_scheduling_priority(struct dxgprocess *process,
+						struct dxgvmbuschannel *channel,
+						d3dkmt_handle context,
+						int *priority, bool in_process)
+{
+	struct dxgkvmb_command_getcontextschedulingpriority command = { };
+	struct dxgkvmb_command_getcontextschedulingpriority_return result = { };
+	int ret;
+
+	command_vgpu_to_host_init2(&command.hdr,
+				   DXGK_VMBCOMMAND_GETCONTEXTSCHEDULINGPRIORITY,
+				   process->host_handle);
+	command.context = context;
+	command.in_process = in_process;
+	ret = dxgvmb_send_sync_msg(channel, &command, sizeof(command),
+				   &result, sizeof(result));
+	if (!ret) {
+		ret = result.status;
+		*priority = result.priority;
+	}
+	TRACE_FUNC_EXIT_ERR(__func__, ret);
+	return ret;
+}
+
+int dxgvmb_send_offer_allocations(struct dxgprocess *process,
+				  struct dxgvmbuschannel *channel,
+				  struct d3dkmt_offerallocations *args)
+{
+	struct dxgkvmb_command_offerallocations *command;
+	int ret = 0;
+	uint alloc_size = sizeof(d3dkmt_handle) * args->allocation_count;
+	uint cmd_size = sizeof(struct dxgkvmb_command_offerallocations) +
+	    alloc_size - sizeof(d3dkmt_handle);
+
+	command = dxgmem_alloc(process, DXGMEM_VMBUS, cmd_size);
+	if (command == NULL) {
+		ret = STATUS_NO_MEMORY;
+		goto cleanup;
+	}
+
+	command_vgpu_to_host_init2(&command->hdr,
+				   DXGK_VMBCOMMAND_OFFERALLOCATIONS,
+				   process->host_handle);
+	command->flags = args->flags;
+	command->priority = args->priority;
+	command->device = args->device;
+	command->allocation_count = args->allocation_count;
+	if (args->resources) {
+		command->resources = true;
+		ret = dxg_copy_from_user(command->allocations, args->resources,
+					 alloc_size);
+	} else {
+		ret = dxg_copy_from_user(command->allocations,
+					 args->allocations, alloc_size);
+	}
+	if (ret)
+		goto cleanup;
+
+	ret = dxgvmb_send_sync_msg_ntstatus(channel, command, cmd_size);
+
+cleanup:
+	if (command)
+		dxgmem_free(process, DXGMEM_VMBUS, command);
+	TRACE_FUNC_EXIT_ERR(__func__, ret);
+	return ret;
+}
+
+int dxgvmb_send_reclaim_allocations(struct dxgprocess *process,
+				    struct dxgvmbuschannel *channel,
+				    d3dkmt_handle device,
+				    struct d3dkmt_reclaimallocations2 *args,
+				    uint64_t * __user paging_fence_value)
+{
+	struct dxgkvmb_command_reclaimallocations *command = NULL;
+	struct dxgkvmb_command_reclaimallocations_return *result = NULL;
+	int ret = 0;
+	uint alloc_size = sizeof(d3dkmt_handle) * args->allocation_count;
+	uint cmd_size = sizeof(struct dxgkvmb_command_reclaimallocations) +
+	    alloc_size - sizeof(d3dkmt_handle);
+	uint result_size = sizeof(*result);
+
+	command = dxgmem_alloc(process, DXGMEM_VMBUS, cmd_size);
+	if (command == NULL) {
+		ret = STATUS_NO_MEMORY;
+		goto cleanup;
+	}
+
+	command_vgpu_to_host_init2(&command->hdr,
+				   DXGK_VMBCOMMAND_RECLAIMALLOCATIONS,
+				   process->host_handle);
+	command->device = device;
+	command->paging_queue = args->paging_queue;
+	command->allocation_count = args->allocation_count;
+	command->write_results = args->results != NULL;
+	if (args->resources) {
+		command->resources = true;
+		ret = dxg_copy_from_user(command->allocations, args->resources,
+					 alloc_size);
+	} else {
+		ret = dxg_copy_from_user(command->allocations,
+					 args->allocations, alloc_size);
+	}
+	if (ret)
+		goto cleanup;
+
+	if (command->write_results)
+		result_size += (args->allocation_count - 1) *
+		    sizeof(enum d3dddi_reclaim_result);
+	result = dxgmem_alloc(process, DXGMEM_VMBUS, result_size);
+	if (result == NULL) {
+		ret = STATUS_NO_MEMORY;
+		goto cleanup;
+	}
+
+	ret = dxgvmb_send_sync_msg(channel, command, cmd_size,
+				   result, result_size);
+	if (ret)
+		goto cleanup;
+	ret = dxg_copy_to_user(paging_fence_value,
+			       &result->paging_fence_value, sizeof(uint64_t));
+	if (ret)
+		goto cleanup;
+
+	ret = result->status;
+	if (NT_SUCCESS(result->status) && args->results)
+		ret = dxg_copy_to_user(args->results, result->discarded,
+				       sizeof(result->discarded[0]) *
+				       args->allocation_count);
+
+cleanup:
+	if (command)
+		dxgmem_free(process, DXGMEM_VMBUS, command);
+	if (result)
+		dxgmem_free(process, DXGMEM_VMBUS, result);
+	TRACE_FUNC_EXIT_ERR(__func__, ret);
+	return ret;
+}
+
+int dxgvmb_send_change_vidmem_reservation(struct dxgprocess *process,
+					  struct dxgvmbuschannel *channel,
+					  d3dkmt_handle other_process,
+					  struct
+					  d3dkmt_changevideomemoryreservation
+					  *args)
+{
+	struct dxgkvmb_command_changevideomemoryreservation command = { };
+	ntstatus status;
+
+	command_vgpu_to_host_init2(&command.hdr,
+				   DXGK_VMBCOMMAND_CHANGEVIDEOMEMORYRESERVATION,
+				   process->host_handle);
+	command.args = *args;
+	command.args.process = other_process;
+
+	status = dxgvmb_send_sync_msg_ntstatus(channel, &command,
+					       sizeof(command));
+	TRACE_FUNC_EXIT_ERR(__func__, status);
+	return status;
+}
+
+int dxgvmb_send_create_hwqueue(struct dxgprocess *process,
+			       struct dxgvmbuschannel *channel,
+			       struct d3dkmt_createhwqueue *args,
+			       struct d3dkmt_createhwqueue *__user inargs,
+			       struct dxghwqueue *hwqueue)
+{
+	struct dxgkvmb_command_createhwqueue command_on_stack = { };
+	struct dxgkvmb_command_createhwqueue *command = &command_on_stack;
+	uint cmd_size = sizeof(struct dxgkvmb_command_createhwqueue);
+	int ret = 0;
+	bool command_allocated = false;
+
+	if (args->priv_drv_data_size > DXG_MAX_VM_BUS_PACKET_SIZE) {
+		pr_err("invalid private driver data size");
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	if (args->priv_drv_data_size)
+		cmd_size += args->priv_drv_data_size - 1;
+
+	/* Input command is returned back as output */
+	cmd_size = DXGK_DECL_VMBUS_ALIGN_FOR_OUTPUT(cmd_size);
+
+	if (args->priv_drv_data_size) {
+		command = dxgmem_alloc(process, DXGMEM_VMBUS, cmd_size);
+		if (command == NULL) {
+			pr_err("failed to allocate memory");
+			ret = STATUS_NO_MEMORY;
+			goto cleanup;
+		}
+		command_allocated = true;
+	}
+
+	command_vgpu_to_host_init2(&command->hdr,
+				   DXGK_VMBCOMMAND_CREATEHWQUEUE,
+				   process->host_handle);
+	command->context = args->context;
+	command->flags = args->flags;
+	command->priv_drv_data_size = args->priv_drv_data_size;
+	if (args->priv_drv_data_size) {
+		ret = dxg_copy_from_user(command->priv_drv_data,
+					 args->priv_drv_data,
+					 args->priv_drv_data_size);
+		if (ret)
+			goto cleanup;
+	}
+
+	ret = dxgvmb_send_sync_msg(channel, command, cmd_size,
+				   command, cmd_size);
+	if (ret)
+		goto cleanup;
+
+	if (!NT_SUCCESS(command->status)) {
+		ret = command->status;
+		goto cleanup;
+	}
+
+	ret = hmgrtable_assign_handle_safe(&process->handle_table, hwqueue,
+					   HMGRENTRY_TYPE_DXGHWQUEUE,
+					   command->hwqueue);
+	if (ret)
+		goto cleanup;
+
+	hwqueue->handle = command->hwqueue;
+
+	hwqueue->progress_fence_mapped_address =
+	    dxg_map_iospace((unsigned long)command->
+			    hwqueue_progress_fence_cpuva, PAGE_SIZE,
+			    PROT_READ | PROT_WRITE, true);
+	if (hwqueue->progress_fence_mapped_address == NULL)
+		goto cleanup;
+
+	hwqueue->progress_fence_sync_object = command->hwqueue_progress_fence;
+
+	ret = dxg_copy_to_user(&inargs->queue, &command->hwqueue,
+			       sizeof(d3dkmt_handle));
+	ret |= dxg_copy_to_user(&inargs->queue_progress_fence,
+				&command->hwqueue_progress_fence,
+				sizeof(d3dkmt_handle));
+	ret |=
+	    dxg_copy_to_user(&inargs->queue_progress_fence_cpu_va,
+			     &hwqueue->progress_fence_mapped_address,
+			     sizeof(inargs->queue_progress_fence_cpu_va));
+	ret |=
+	    dxg_copy_to_user(&inargs->queue_progress_fence_gpu_va,
+			     &command->hwqueue_progress_fence_gpuva,
+			     sizeof(uint64_t));
+	if (args->priv_drv_data_size)
+		ret |= dxg_copy_to_user(args->priv_drv_data,
+					command->priv_drv_data,
+					args->priv_drv_data_size);
+
+cleanup:
+	if (ret) {
+		pr_err("%s failed %x", __func__, ret);
+		if (hwqueue->handle) {
+			hmgrtable_free_handle_safe(&process->handle_table,
+						   HMGRENTRY_TYPE_DXGHWQUEUE,
+						   hwqueue->handle);
+			hwqueue->handle = 0;
+		}
+		if (command->hwqueue)
+			dxgvmb_send_destroy_hwqueue(process, channel,
+						    command->hwqueue);
+	}
+	if (command_allocated)
+		dxgmem_free(process, DXGMEM_VMBUS, command);
+	return ret;
+}
+
+int dxgvmb_send_destroy_hwqueue(struct dxgprocess *process,
+				struct dxgvmbuschannel *channel,
+				d3dkmt_handle handle)
+{
+	ntstatus status;
+	struct dxgkvmb_command_destroyhwqueue command = { };
+
+	command_vgpu_to_host_init2(&command.hdr, DXGK_VMBCOMMAND_DESTROYHWQUEUE,
+				   process->host_handle);
+	command.hwqueue = handle;
+
+	status = dxgvmb_send_sync_msg_ntstatus(channel, &command,
+					       sizeof(command));
+	TRACE_FUNC_EXIT_ERR(__func__, status);
+	return status;
+}
+
+int dxgvmb_send_query_adapter_info(struct dxgprocess *process,
+				   struct dxgvmbuschannel *channel,
+				   struct d3dkmt_queryadapterinfo *args)
+{
+	struct dxgkvmb_command_queryadapterinfo *command = NULL;
+	uint cmd_size;
+	int ret = 0;
+
+	cmd_size = sizeof(*command) + args->private_data_size - 1;
+	command = dxgmem_alloc(process, DXGMEM_VMBUS, cmd_size);
+	if (command == NULL) {
+		ret = STATUS_NO_MEMORY;
+		goto cleanup;
+	}
+
+	ret = dxg_copy_from_user(command->private_data,
+				 args->private_data, args->private_data_size);
+	if (ret)
+		goto cleanup;
+
+	command_vgpu_to_host_init2(&command->hdr,
+				   DXGK_VMBCOMMAND_QUERYADAPTERINFO,
+				   process->host_handle);
+	command->private_data_size = args->private_data_size;
+	command->query_type = args->type;
+
+	ret = dxgvmb_send_sync_msg(channel, command, cmd_size,
+				   command->private_data,
+				   command->private_data_size);
+	if (ret)
+		goto cleanup;
+	switch (args->type) {
+	case KMTQAITYPE_ADAPTERTYPE:
+	case KMTQAITYPE_ADAPTERTYPE_RENDER:
+		{
+			struct d3dkmt_adaptertype *adapter_type =
+			    (void *)command->private_data;
+			adapter_type->paravirtualized = 1;
+			adapter_type->display_supported = 0;
+			adapter_type->post_device = 0;
+			adapter_type->indirect_display_device = 0;
+			adapter_type->acg_supported = 0;
+			adapter_type->support_set_timings_from_vidpn = 0;
+			break;
+		}
+	default:
+		break;
+	}
+	ret = dxg_copy_to_user(args->private_data, command->private_data,
+			       args->private_data_size);
+
+cleanup:
+	if (command)
+		dxgmem_free(process, DXGMEM_VMBUS, command);
+	TRACE_FUNC_EXIT_ERR(__func__, ret);
+	return ret;
+}
+
+int dxgvmb_send_submit_command_to_hwqueue(struct dxgprocess *process,
+					  struct dxgvmbuschannel *channel,
+					  struct d3dkmt_submitcommandtohwqueue
+					  *args)
+{
+	int ret = 0;
+	uint cmd_size;
+	struct dxgkvmb_command_submitcommandtohwqueue *command = NULL;
+	uint primaries_size = args->num_primaries * sizeof(d3dkmt_handle);
+
+	cmd_size = sizeof(*command) + args->priv_drv_data_size + primaries_size;
+	command = dxgmem_alloc(process, DXGMEM_VMBUS, cmd_size);
+	if (command == NULL) {
+		ret = STATUS_NO_MEMORY;
+		goto cleanup;
+	}
+	if (primaries_size) {
+		ret = dxg_copy_from_user(&command[1], args->written_primaries,
+					 primaries_size);
+		if (ret)
+			goto cleanup;
+	}
+	if (args->priv_drv_data_size) {
+		ret = dxg_copy_from_user((char *)&command[1] + primaries_size,
+					 args->priv_drv_data,
+					 args->priv_drv_data_size);
+		if (ret)
+			goto cleanup;
+	}
+
+	command_vgpu_to_host_init2(&command->hdr,
+				   DXGK_VMBCOMMAND_SUBMITCOMMANDTOHWQUEUE,
+				   process->host_handle);
+	command->args = *args;
+
+	ret = dxgvmb_send_sync_msg_ntstatus(channel, command, cmd_size);
+
+cleanup:
+	if (command)
+		dxgmem_free(process, DXGMEM_VMBUS, command);
+	TRACE_FUNC_EXIT_ERR(__func__, ret);
+	return ret;
+}
+
+int dxgvmb_send_query_clock_calibration(struct dxgprocess *process,
+					struct dxgvmbuschannel *channel,
+					struct d3dkmt_queryclockcalibration
+					*args,
+					struct d3dkmt_queryclockcalibration
+					*__user inargs)
+{
+	struct dxgkvmb_command_queryclockcalibration command;
+	struct dxgkvmb_command_queryclockcalibration_return result;
+	int ret;
+
+	command_vgpu_to_host_init2(&command.hdr,
+				   DXGK_VMBCOMMAND_QUERYCLOCKCALIBRATION,
+				   process->host_handle);
+	command.args = *args;
+
+	ret = dxgvmb_send_sync_msg(channel, &command, sizeof(command),
+				   &result, sizeof(result));
+	if (ret)
+		goto cleanup;
+	ret = dxg_copy_to_user(&inargs->clock_data, &result.clock_data,
+			       sizeof(result.clock_data));
+	if (ret)
+		goto cleanup;
+	ret = result.status;
+
+cleanup:
+	TRACE_FUNC_EXIT_ERR(__func__, ret);
+	return ret;
+}
+
+int dxgvmb_send_flush_heap_transitions(struct dxgprocess *process,
+				       struct dxgvmbuschannel *channel,
+				       struct d3dkmt_flushheaptransitions *args)
+{
+	struct dxgkvmb_command_flushheaptransitions command;
+	ntstatus status;
+
+	command_vgpu_to_host_init2(&command.hdr,
+				   DXGK_VMBCOMMAND_FLUSHHEAPTRANSITIONS,
+				   process->host_handle);
+	status =
+	    dxgvmb_send_sync_msg_ntstatus(channel, &command, sizeof(command));
+	TRACE_FUNC_EXIT_ERR(__func__, status);
+	return status;
+}
+
+int dxgvmb_send_query_alloc_residency(struct dxgprocess *process,
+				      struct dxgvmbuschannel *channel,
+				      struct d3dkmt_queryallocationresidency
+				      *args)
+{
+	int ret = 0;
+	struct dxgkvmb_command_queryallocationresidency *command = NULL;
+	uint cmd_size = sizeof(*command);
+	uint alloc_size = 0;
+	uint result_allocation_size = 0;
+	struct dxgkvmb_command_queryallocationresidency_return *result = NULL;
+	uint result_size = sizeof(*result);
+
+	if (args->allocation_count > DXG_MAX_VM_BUS_PACKET_SIZE) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	if (args->allocation_count) {
+		alloc_size = args->allocation_count * sizeof(d3dkmt_handle);
+		cmd_size += alloc_size;
+		result_allocation_size = args->allocation_count *
+		    sizeof(args->residency_status[0]);
+	} else {
+		result_allocation_size = sizeof(args->residency_status[0]);
+	}
+	result_size += result_allocation_size;
+
+	command = dxgmem_alloc(process, DXGMEM_VMBUS, cmd_size);
+	if (command == NULL) {
+		ret = STATUS_NO_MEMORY;
+		goto cleanup;
+	}
+	command_vgpu_to_host_init2(&command->hdr,
+				   DXGK_VMBCOMMAND_QUERYALLOCATIONRESIDENCY,
+				   process->host_handle);
+	command->args = *args;
+	if (alloc_size) {
+		ret = dxg_copy_from_user(&command[1], args->allocations,
+					 alloc_size);
+		if (ret)
+			goto cleanup;
+	}
+
+	result = dxgmem_alloc(process, DXGMEM_VMBUS, result_size);
+	if (result == NULL) {
+		ret = STATUS_NO_MEMORY;
+		goto cleanup;
+	}
+
+	ret = dxgvmb_send_sync_msg(channel, command, cmd_size,
+				   result, result_size);
+	if (ret)
+		goto cleanup;
+	if (!NT_SUCCESS(result->status)) {
+		ret = result->status;
+		goto cleanup;
+	}
+	ret = dxg_copy_to_user(args->residency_status, &result[1],
+			       result_allocation_size);
+
+cleanup:
+	TRACE_FUNC_EXIT_ERR(__func__, ret);
+	if (command)
+		dxgmem_free(process, DXGMEM_VMBUS, command);
+	if (result)
+		dxgmem_free(process, DXGMEM_VMBUS, result);
+	return ret;
+}
+
+int dxgvmb_send_escape(struct dxgprocess *process,
+		       struct dxgvmbuschannel *channel,
+		       struct d3dkmt_escape *args)
+{
+	int ret = 0;
+	struct dxgkvmb_command_escape *command = NULL;
+	uint cmd_size = sizeof(*command);
+
+	if (args->priv_drv_data_size > DXG_MAX_VM_BUS_PACKET_SIZE) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	cmd_size = cmd_size - sizeof(args->priv_drv_data[0]) +
+	    args->priv_drv_data_size;
+
+	command = dxgmem_alloc(process, DXGMEM_VMBUS, cmd_size);
+	if (command == NULL) {
+		ret = STATUS_NO_MEMORY;
+		goto cleanup;
+	}
+	command_vgpu_to_host_init2(&command->hdr,
+				   DXGK_VMBCOMMAND_ESCAPE,
+				   process->host_handle);
+	command->adapter = args->adapter;
+	command->device = args->device;
+	command->type = args->type;
+	command->flags = args->flags;
+	command->priv_drv_data_size = args->priv_drv_data_size;
+	command->context = args->context;
+	if (args->priv_drv_data_size)
+		ret = dxg_copy_from_user(command->priv_drv_data,
+					 args->priv_drv_data,
+					 args->priv_drv_data_size);
+	if (ret)
+		goto cleanup;
+
+	ret = dxgvmb_send_sync_msg(channel, command, cmd_size,
+				   command->priv_drv_data,
+				   args->priv_drv_data_size);
+	if (ret)
+		goto cleanup;
+
+	if (args->priv_drv_data_size)
+		ret = dxg_copy_to_user(args->priv_drv_data,
+				       command->priv_drv_data,
+				       args->priv_drv_data_size);
+
+cleanup:
+	TRACE_FUNC_EXIT_ERR(__func__, ret);
+	if (command)
+		dxgmem_free(process, DXGMEM_VMBUS, command);
+	return ret;
+}
+
+int dxgvmb_send_query_vidmem_info(struct dxgprocess *process,
+				  struct dxgvmbuschannel *channel,
+				  struct d3dkmt_queryvideomemoryinfo *args,
+				  struct d3dkmt_queryvideomemoryinfo *__user
+				  output)
+{
+	int ret = 0;
+	struct dxgkvmb_command_queryvideomemoryinfo command = { };
+	struct dxgkvmb_command_queryvideomemoryinfo_return result = { };
+
+	command_vgpu_to_host_init2(&command.hdr,
+				   dxgk_vmbcommand_queryvideomemoryinfo,
+				   process->host_handle);
+	command.adapter = args->adapter;
+	command.memory_segment_group = args->memory_segment_group;
+	command.physical_adapter_index = args->physical_adapter_index;
+
+	ret = dxgvmb_send_sync_msg(channel, &command, sizeof(command),
+				   &result, sizeof(result));
+	if (ret)
+		goto cleanup;
+
+	ret = dxg_copy_to_user(&output->budget, &result.budget,
+			       sizeof(output->budget));
+	if (ret)
+		goto cleanup;
+	ret = dxg_copy_to_user(&output->current_usage, &result.current_usage,
+			       sizeof(output->current_usage));
+	if (ret)
+		goto cleanup;
+	ret = dxg_copy_to_user(&output->current_reservation,
+			       &result.current_reservation,
+			       sizeof(output->current_reservation));
+	if (ret)
+		goto cleanup;
+	ret = dxg_copy_to_user(&output->available_for_reservation,
+			       &result.available_for_reservation,
+			       sizeof(output->available_for_reservation));
+
+cleanup:
+	TRACE_FUNC_EXIT_ERR(__func__, ret);
+	return ret;
+}
+
+int dxgvmb_send_get_device_state(struct dxgprocess *process,
+				 struct dxgvmbuschannel *channel,
+				 struct d3dkmt_getdevicestate *args,
+				 struct d3dkmt_getdevicestate *__user output)
+{
+	int ret;
+	struct dxgkvmb_command_getdevicestate command = { };
+	struct dxgkvmb_command_getdevicestate_return result = { };
+
+	command_vgpu_to_host_init2(&command.hdr,
+				   dxgk_vmbcommand_getdevicestate,
+				   process->host_handle);
+	command.args = *args;
+
+	ret = dxgvmb_send_sync_msg(channel, &command, sizeof(command),
+				   &result, sizeof(result));
+	if (ret)
+		goto cleanup;
+
+	if (!NT_SUCCESS(result.status)) {
+		ret = result.status;
+		goto cleanup;
+	}
+	ret = dxg_copy_to_user(output, &result.args, sizeof(result.args));
+
+cleanup:
+	TRACE_FUNC_EXIT_ERR(__func__, ret);
+	return ret;
+}
+
+int dxgvmb_send_open_sync_object(struct dxgprocess *process,
+				 struct dxgvmbuschannel *channel,
+				 d3dkmt_handle shared_handle,
+				 d3dkmt_handle *host_handle)
+{
+	struct dxgkvmb_command_opensyncobject command = { };
+	struct dxgkvmb_command_opensyncobject_return result = { };
+	int ret = 0;
+
+	command_vm_to_host_init2(&command.hdr, DXGK_VMBCOMMAND_OPENSYNCOBJECT,
+				 process->host_handle);
+	command.global_sync_object = shared_handle;
+
+	ret = dxgglobal_acquire_channel_lock();
+	if (ret)
+		goto cleanup;
+
+	ret = dxgvmb_send_sync_msg(channel, &command, sizeof(command),
+				   &result, sizeof(result));
+
+	dxgglobal_release_channel_lock();
+
+	if (ret)
+		goto cleanup;
+
+	if (!NT_SUCCESS(result.status)) {
+		ret = result.status;
+		goto cleanup;
+	}
+
+	*host_handle = result.sync_object;
+
+cleanup:
+	TRACE_FUNC_EXIT_ERR(__func__, ret);
+	return ret;
+}
+
+int dxgvmb_send_open_sync_object_nt(struct dxgprocess *process,
+				    struct dxgvmbuschannel *channel,
+				    struct d3dkmt_opensyncobjectfromnthandle2
+				    *args, struct dxgsyncobject *syncobj)
+{
+	struct dxgkvmb_command_opensyncobject command = { };
+	struct dxgkvmb_command_opensyncobject_return result = { };
+	int ret = 0;
+
+	command_vm_to_host_init2(&command.hdr, DXGK_VMBCOMMAND_OPENSYNCOBJECT,
+				 process->host_handle);
+	command.device = args->device;
+	command.global_sync_object = syncobj->shared_owner->host_shared_handle;
+	command.flags = args->flags;
+	if (syncobj->monitored_fence)
+		command.engine_affinity = args->monitored_fence.engine_affinity;
+
+	ret = dxgglobal_acquire_channel_lock();
+	if (ret)
+		goto cleanup;
+
+	ret = dxgvmb_send_sync_msg(channel, &command, sizeof(command),
+				   &result, sizeof(result));
+
+	dxgglobal_release_channel_lock();
+
+	if (ret)
+		goto cleanup;
+
+	if (!NT_SUCCESS(result.status)) {
+		ret = result.status;
+		goto cleanup;
+	}
+
+	args->sync_object = result.sync_object;
+	if (syncobj->monitored_fence) {
+		void *va = dxg_map_iospace(result.guest_cpu_physical_address,
+					   PAGE_SIZE, PROT_READ | PROT_WRITE,
+					   true);
+		if (va == NULL) {
+			ret = STATUS_NO_MEMORY;
+			goto cleanup;
+		}
+		args->monitored_fence.fence_value_cpu_va = va;
+		args->monitored_fence.fence_value_gpu_va =
+		    result.gpu_virtual_address;
+		syncobj->mapped_address = va;
+	}
+
+cleanup:
+	TRACE_FUNC_EXIT_ERR(__func__, ret);
+	return ret;
+}
+
+int dxgvmb_send_create_nt_shared_object(struct dxgprocess *process,
+					d3dkmt_handle object,
+					d3dkmt_handle *shared_handle)
+{
+	struct dxgkvmb_command_createntsharedobject command = { };
+	int ret;
+
+	command_vm_to_host_init2(&command.hdr,
+				 DXGK_VMBCOMMAND_CREATENTSHAREDOBJECT,
+				 process->host_handle);
+	command.object = object;
+
+	ret = dxgglobal_acquire_channel_lock();
+	if (ret)
+		goto cleanup;
+
+	ret = dxgvmb_send_sync_msg(dxgglobal_get_dxgvmbuschannel(),
+				   &command, sizeof(command), shared_handle,
+				   sizeof(*shared_handle));
+
+	dxgglobal_release_channel_lock();
+
+	if (ret)
+		goto cleanup;
+	if (*shared_handle == 0) {
+		pr_err("failed to create NT shared object");
+		ret = STATUS_INTERNAL_ERROR;
+	}
+
+cleanup:
+	TRACE_FUNC_EXIT_ERR(__func__, ret);
+	return ret;
+}
+
+int dxgvmb_send_destroy_nt_shared_object(d3dkmt_handle shared_handle)
+{
+	struct dxgkvmb_command_destroyntsharedobject command = { };
+	int ret;
+
+	command_vm_to_host_init1(&command.hdr,
+				 DXGK_VMBCOMMAND_DESTROYNTSHAREDOBJECT);
+	command.shared_handle = shared_handle;
+
+	ret = dxgglobal_acquire_channel_lock();
+	if (ret)
+		goto cleanup;
+
+	ret = dxgvmb_send_sync_msg_ntstatus(dxgglobal_get_dxgvmbuschannel(),
+					    &command, sizeof(command));
+
+	dxgglobal_release_channel_lock();
+
+cleanup:
+	TRACE_FUNC_EXIT_ERR(__func__, ret);
+	return ret;
+}
+
+int dxgvmb_send_open_resource(struct dxgprocess *process,
+			      struct dxgvmbuschannel *channel,
+			      d3dkmt_handle device,
+			      bool nt_security_sharing,
+			      d3dkmt_handle global_share,
+			      uint allocation_count,
+			      uint total_priv_drv_data_size,
+			      d3dkmt_handle *resource_handle,
+			      d3dkmt_handle *alloc_handles)
+{
+	struct dxgkvmb_command_openresource command = { };
+	struct dxgkvmb_command_openresource_return *result = NULL;
+	d3dkmt_handle *handles;
+	int ret = 0;
+	int i;
+	uint result_size = allocation_count * sizeof(d3dkmt_handle) +
+	    sizeof(*result);
+
+	result = dxgmem_alloc(process, DXGMEM_VMBUS, result_size);
+	if (result == NULL) {
+		ret = STATUS_NO_MEMORY;
+		goto cleanup;
+	}
+
+	command_vgpu_to_host_init2(&command.hdr, DXGK_VMBCOMMAND_OPENRESOURCE,
+				   process->host_handle);
+	command.device = device;
+	command.nt_security_sharing = nt_security_sharing;
+	command.global_share = global_share;
+	command.allocation_count = allocation_count;
+	command.total_priv_drv_data_size = total_priv_drv_data_size;
+
+	ret = dxgvmb_send_sync_msg(channel, &command, sizeof(command),
+				   result, result_size);
+	if (ret)
+		goto cleanup;
+	if (!NT_SUCCESS(result->status)) {
+		ret = result->status;
+		goto cleanup;
+	}
+
+	*resource_handle = result->resource;
+	handles = (d3dkmt_handle *) &result[1];
+	for (i = 0; i < allocation_count; i++)
+		alloc_handles[i] = handles[i];
+
+cleanup:
+	TRACE_FUNC_EXIT_ERR(__func__, ret);
+	if (result)
+		dxgmem_free(process, DXGMEM_VMBUS, result);
+	return ret;
+}
+
+int dxgvmb_send_get_standard_alloc_priv_data(struct dxgdevice *device,
+					     enum d3dkmdt_standardallocationtype
+					     alloc_type,
+					     struct d3dkmdt_gdisurfacedata
+					     *alloc_data,
+					     uint physical_adapter_index,
+					     uint *alloc_priv_driver_size,
+					     void *priv_alloc_data)
+{
+	struct dxgkvmb_command_getstandardallocprivdata command = { };
+	struct dxgkvmb_command_getstandardallocprivdata_return *result = NULL;
+	uint result_size = sizeof(*result);
+	int ret = 0;
+
+	TRACE_DEBUG(1, "%s", __func__);
+
+	if (priv_alloc_data) {
+		result_size = *alloc_priv_driver_size;
+	}
+	result = dxgmem_alloc(device->process, DXGMEM_VMBUS, result_size);
+	if (result == NULL) {
+		ret = STATUS_NO_MEMORY;
+		goto cleanup;
+	}
+
+	command_vgpu_to_host_init2(&command.hdr,
+				   DXGK_VMBCOMMAND_DDIGETSTANDARDALLOCATIONDRIVERDATA,
+				   device->process->host_handle);
+
+	command.alloc_type = alloc_type;
+	command.priv_driver_data_size = *alloc_priv_driver_size;
+	command.physical_adapter_index = physical_adapter_index;
+	switch (alloc_type) {
+	case D3DKMDT_STANDARDALLOCATION_GDISURFACE:
+		command.gdi_surface = *alloc_data;
+		break;
+	case D3DKMDT_STANDARDALLOCATION_SHAREDPRIMARYSURFACE:
+	case D3DKMDT_STANDARDALLOCATION_SHADOWSURFACE:
+	case D3DKMDT_STANDARDALLOCATION_STAGINGSURFACE:
+	default:
+		pr_err("Invalid standard alloc type");
+		goto cleanup;
+	}
+
+	ret = dxgvmb_send_sync_msg(&device->adapter->channel,
+				   &command, sizeof(command), result,
+				   result_size);
+	if (ret)
+		goto cleanup;
+	if (!NT_SUCCESS(result->status)) {
+		ret = result->status;
+		goto cleanup;
+	}
+	if (*alloc_priv_driver_size &&
+	    result->priv_driver_data_size != *alloc_priv_driver_size) {
+		pr_err("Priv data size mismatch");
+		goto cleanup;
+	}
+	*alloc_priv_driver_size = result->priv_driver_data_size;
+	if (priv_alloc_data) {
+		memcpy(priv_alloc_data, &result[1],
+		       result->priv_driver_data_size);
+	}
+
+cleanup:
+
+	if (result)
+		dxgmem_free(device->process, DXGMEM_VMBUS, result);
+	TRACE_FUNC_EXIT_ERR(__func__, ret);
+	return ret;
+}
diff --git a/drivers/gpu/dxgkrnl/dxgvmbus.h b/drivers/gpu/dxgkrnl/dxgvmbus.h
new file mode 100644
index 000000000000..70f7b36d3245
--- /dev/null
+++ b/drivers/gpu/dxgkrnl/dxgvmbus.h
@@ -0,0 +1,859 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (c) 2019, Microsoft Corporation.
+ *
+ * Author:
+ *   Iouri Tarassov <iourit at microsoft.com>
+ *
+ * Dxgkrnl Graphics Port Driver
+ * VM bus interface with the host definitions
+ *
+ */
+
+#ifndef _DXGVMBUS_H
+#define _DXGVMBUS_H
+
+#include "d3dkmthk.h"
+
+struct dxgprocess;
+struct dxgadapter;
+
+#define DXG_MAX_VM_BUS_PACKET_SIZE   (1024 * 128)
+
+#define DXGK_DECL_VMBUS_OUTPUTSIZE(Type)\
+	((sizeof(##Type) + 0x7) & ~(uint)0x7)
+#define DXGK_DECL_VMBUS_ALIGN_FOR_OUTPUT(Size) (((Size) + 0x7) & ~(uint)0x7)
+/*
+ * Defines a structure, which has the size, multiple of 8 bytes.
+ */
+#define DXGK_DECL_ALIGNED8_STRUCT(Type, Name, OutputSize)	\
+	const uint _Size	= DXGK_DECL_VMBUS_OUTPUTSIZE(Type);	\
+	uint8_t _AlignedStruct[_Size];				\
+	##Type & Name	= (##Type &)_AlignedStruct;		\
+	uint OutputSize	= _Size
+
+#define DXGK_BUFFER_VMBUS_ALIGNED(Buffer) (((Buffer) & 7)	== 0)
+
+enum dxgkvmb_commandchanneltype {
+	DXGKVMB_VGPU_TO_HOST,
+	DXGKVMB_VM_TO_HOST,
+	DXGKVMB_HOST_TO_VM
+};
+
+/*
+ *
+ * Commands, sent to the host via the guest global VM bus channel
+ * DXG_GUEST_GLOBAL_VMBUS
+ *
+ */
+
+#define DXG_VM_PROCESS_NAME_LENGTH 260
+
+enum dxgkvmb_commandtype_global {
+	DXGK_VMBCOMMAND_VM_TO_HOST_FIRST	= 1000,
+	DXGK_VMBCOMMAND_CREATEPROCESS	= DXGK_VMBCOMMAND_VM_TO_HOST_FIRST,
+	DXGK_VMBCOMMAND_DESTROYPROCESS		= 1001,
+	DXGK_VMBCOMMAND_OPENSYNCOBJECT		= 1002,
+	DXGK_VMBCOMMAND_DESTROYSYNCOBJECT	= 1003,
+	DXGK_VMBCOMMAND_CREATENTSHAREDOBJECT	= 1004,
+	DXGK_VMBCOMMAND_DESTROYNTSHAREDOBJECT	= 1005,
+	DXGK_VMBCOMMAND_SIGNALFENCE		= 1006,
+	DXGK_VMBCOMMAND_NOTIFYPROCESSFREEZE	= 1007,
+	DXGK_VMBCOMMAND_NOTIFYPROCESSTHAW	= 1008,
+	DXGK_VMBCOMMAND_QUERYETWSESSION		= 1009,
+	DXGK_VMBCOMMAND_SETIOSPACEREGION	= 1010,
+	DXGK_VMBCOMMAND_COMPLETETRANSACTION	= 1011,
+	DXGK_VMBCOMMAND_INVALID_VM_TO_HOST
+};
+
+/*
+ *
+ * Commands, sent to the host via the per adapter VM bus channel
+ * DXG_GUEST_VGPU_VMBUS
+ *
+ */
+
+enum dxgkvmb_commandtype {
+	DXGK_VMBCOMMAND_CREATEDEVICE		= 0,
+	DXGK_VMBCOMMAND_DESTROYDEVICE		= 1,
+	DXGK_VMBCOMMAND_QUERYADAPTERINFO	= 2,
+	DXGK_VMBCOMMAND_DDIQUERYADAPTERINFO	= 3,
+	DXGK_VMBCOMMAND_CREATEALLOCATION	= 4,
+	DXGK_VMBCOMMAND_DESTROYALLOCATION	= 5,
+	DXGK_VMBCOMMAND_CREATECONTEXTVIRTUAL	= 6,
+	DXGK_VMBCOMMAND_DESTROYCONTEXT		= 7,
+	DXGK_VMBCOMMAND_CREATESYNCOBJECT	= 8,
+	DXGK_VMBCOMMAND_CREATEPAGINGQUEUE	= 9,
+	DXGK_VMBCOMMAND_DESTROYPAGINGQUEUE	= 10,
+	DXGK_VMBCOMMAND_MAKERESIDENT		= 11,
+	DXGK_VMBCOMMAND_EVICT			= 12,
+	DXGK_VMBCOMMAND_ESCAPE			= 13,
+	DXGK_VMBCOMMAND_OPENADAPTER		= 14,
+	DXGK_VMBCOMMAND_CLOSEADAPTER		= 15,
+	DXGK_VMBCOMMAND_FREEGPUVIRTUALADDRESS	= 16,
+	DXGK_VMBCOMMAND_MAPGPUVIRTUALADDRESS	= 17,
+	DXGK_VMBCOMMAND_RESERVEGPUVIRTUALADDRESS = 18,
+	DXGK_VMBCOMMAND_UPDATEGPUVIRTUALADDRESS	= 19,
+	DXGK_VMBCOMMAND_SUBMITCOMMAND		= 20,
+	dxgk_vmbcommand_queryvideomemoryinfo	= 21,
+	DXGK_VMBCOMMAND_WAITFORSYNCOBJECTFROMCPU = 22,
+	DXGK_VMBCOMMAND_LOCK2			= 23,
+	DXGK_VMBCOMMAND_UNLOCK2			= 24,
+	DXGK_VMBCOMMAND_WAITFORSYNCOBJECTFROMGPU = 25,
+	DXGK_VMBCOMMAND_SIGNALSYNCOBJECT	= 26,
+	DXGK_VMBCOMMAND_SIGNALFENCENTSHAREDBYREF = 27,
+	dxgk_vmbcommand_getdevicestate		= 28,
+	DXGK_VMBCOMMAND_MARKDEVICEASERROR	= 29,
+	DXGK_VMBCOMMAND_ADAPTERSTOP		= 30,
+	DXGK_VMBCOMMAND_SETQUEUEDLIMIT		= 31,
+	DXGK_VMBCOMMAND_OPENRESOURCE		= 32,
+	DXGK_VMBCOMMAND_SETCONTEXTSCHEDULINGPRIORITY = 33,
+	DXGK_VMBCOMMAND_PRESENTHISTORYTOKEN	= 34,
+	DXGK_VMBCOMMAND_SETREDIRECTEDFLIPFENCEVALUE = 35,
+	DXGK_VMBCOMMAND_GETINTERNALADAPTERINFO	= 36,
+	DXGK_VMBCOMMAND_FLUSHHEAPTRANSITIONS	= 37,
+	DXGK_VMBCOMMAND_BLT			= 38,
+	DXGK_VMBCOMMAND_DDIGETSTANDARDALLOCATIONDRIVERDATA = 39,
+	DXGK_VMBCOMMAND_CDDGDICOMMAND		= 40,
+	DXGK_VMBCOMMAND_QUERYALLOCATIONRESIDENCY = 41,
+	DXGK_VMBCOMMAND_FLUSHDEVICE		= 42,
+	DXGK_VMBCOMMAND_FLUSHADAPTER		= 43,
+	DXGK_VMBCOMMAND_DDIGETNODEMETADATA	= 44,
+	DXGK_VMBCOMMAND_SETEXISTINGSYSMEMSTORE	= 45,
+	DXGK_VMBCOMMAND_ISSYNCOBJECTSIGNALED	= 46,
+	DXGK_VMBCOMMAND_CDDSYNCGPUACCESS	= 47,
+	DXGK_VMBCOMMAND_QUERYSTATISTICS		= 48,
+	DXGK_VMBCOMMAND_CHANGEVIDEOMEMORYRESERVATION = 49,
+	DXGK_VMBCOMMAND_CREATEHWQUEUE		= 50,
+	DXGK_VMBCOMMAND_DESTROYHWQUEUE		= 51,
+	DXGK_VMBCOMMAND_SUBMITCOMMANDTOHWQUEUE	= 52,
+	DXGK_VMBCOMMAND_GETDRIVERSTOREFILE	= 53,
+	DXGK_VMBCOMMAND_READDRIVERSTOREFILE	= 54,
+	DXGK_VMBCOMMAND_GETNEXTHARDLINK		= 55,
+	DXGK_VMBCOMMAND_UPDATEALLOCATIONPROPERTY = 56,
+	DXGK_VMBCOMMAND_OFFERALLOCATIONS	= 57,
+	DXGK_VMBCOMMAND_RECLAIMALLOCATIONS	= 58,
+	DXGK_VMBCOMMAND_SETALLOCATIONPRIORITY	= 59,
+	DXGK_VMBCOMMAND_GETALLOCATIONPRIORITY	= 60,
+	DXGK_VMBCOMMAND_GETCONTEXTSCHEDULINGPRIORITY = 61,
+	DXGK_VMBCOMMAND_QUERYCLOCKCALIBRATION	= 62,
+	DXGK_VMBCOMMAND_QUERYRESOURCEINFO	= 64,
+	DXGK_VMBCOMMAND_INVALID
+};
+
+enum dxgkvmb_commandtype_host_to_vm {
+	DXGK_VMBCOMMAND_SIGNALGUESTEVENT,
+	DXGK_VMBCOMMAND_PROPAGATEPRESENTHISTORYTOKEN,
+	DXGK_VMBCOMMAND_SETGUESTDATA,
+	DXGK_VMBCOMMAND_SIGNALGUESTEVENTPASSIVE,
+	DXGK_VMBCOMMAND_SENDWNFNOTIFICATION,
+	DXGK_VMBCOMMAND_INVALID_HOST_TO_VM
+};
+
+struct dxgkvmb_command_vm_to_host {
+	uint64_t			command_id;
+	d3dkmt_handle			process;
+	enum dxgkvmb_commandchanneltype	channel_type;
+	enum dxgkvmb_commandtype_global	command_type;
+};
+
+struct dxgkvmb_command_vgpu_to_host {
+	uint64_t			command_id;
+	d3dkmt_handle			process;
+	u32				channel_type;
+	enum dxgkvmb_commandtype	command_type;
+};
+
+struct dxgkvmb_command_host_to_vm {
+	uint64_t			command_id;
+	d3dkmt_handle			process;
+	enum dxgkvmb_commandchanneltype	channel_type;
+	enum dxgkvmb_commandtype_host_to_vm command_type;
+};
+
+struct dxgkvmb_command_signalguestevent {
+	struct dxgkvmb_command_host_to_vm hdr;
+	uint64_t			event;
+	winhandle			process_id;
+	bool				dereference_event;
+};
+
+struct dxgkvmb_command_opensyncobject {
+	struct dxgkvmb_command_vm_to_host hdr;
+	d3dkmt_handle			device;
+	d3dkmt_handle			global_sync_object;
+	uint				engine_affinity;
+	struct d3dddi_synchronizationobject_flags flags;
+};
+
+struct dxgkvmb_command_opensyncobject_return {
+	d3dkmt_handle			sync_object;
+	ntstatus			status;
+	d3dgpu_virtual_address		gpu_virtual_address;
+	uint64_t			guest_cpu_physical_address;
+};
+
+/*
+ * The command returns d3dkmt_handle of a shared object for the
+ * given pre-process object
+ */
+struct dxgkvmb_command_createntsharedobject {
+	struct dxgkvmb_command_vm_to_host hdr;
+	d3dkmt_handle			object;
+};
+
+/* The command returns ntstatus */
+struct dxgkvmb_command_destroyntsharedobject {
+	struct dxgkvmb_command_vm_to_host hdr;
+	d3dkmt_handle			shared_handle;
+};
+
+/* Returns ntstatus */
+struct dxgkvmb_command_setiospaceregion {
+	struct dxgkvmb_command_vm_to_host hdr;
+	uint64_t			start;
+	uint64_t			length;
+	uint				shared_page_gpadl;
+};
+
+/* Returns ntstatus */
+struct dxgkvmb_command_setexistingsysmemstore {
+	struct dxgkvmb_command_vgpu_to_host hdr;
+	d3dkmt_handle			device;
+	d3dkmt_handle			allocation;
+	uint				gpadl;
+};
+
+struct dxgkvmb_command_createprocess {
+	struct dxgkvmb_command_vm_to_host hdr;
+	void				*process;
+	winhandle			process_id;
+	winwchar			process_name[DXG_VM_PROCESS_NAME_LENGTH + 1];
+	bool				csrss_process:1;
+	bool				dwm_process:1;
+	bool				wow64_process:1;
+	bool				linux_process:1;
+};
+
+struct dxgkvmb_command_createprocess_return {
+	d3dkmt_handle			hprocess;
+};
+
+// The command returns ntstatus
+struct dxgkvmb_command_destroyprocess {
+	struct dxgkvmb_command_vm_to_host hdr;
+};
+
+struct dxgkvmb_command_openadapter {
+	struct dxgkvmb_command_vgpu_to_host hdr;
+	uint				vmbus_interface_version;
+	uint				vmbus_last_compatible_interface_version;
+	struct winluid			guest_adapter_luid;
+};
+
+struct dxgkvmb_command_openadapter_return {
+	d3dkmt_handle			host_adapter_handle;
+	ntstatus			status;
+	uint				vmbus_interface_version;
+	uint				vmbus_last_compatible_interface_version;
+};
+
+struct dxgkvmb_command_closeadapter {
+	struct dxgkvmb_command_vgpu_to_host hdr;
+	d3dkmt_handle			host_handle;
+};
+
+struct dxgkvmb_command_getinternaladapterinfo {
+	struct dxgkvmb_command_vgpu_to_host hdr;
+};
+
+struct dxgkvmb_command_getinternaladapterinfo_return {
+	struct dxgk_device_types	device_types;
+	uint				driver_store_copy_mode;
+	uint				driver_ddi_version;
+	uint				secure_virtual_machine:1;
+	uint				virtual_machine_reset:1;
+	uint				is_vail_supported:1;
+	struct winluid			host_adapter_luid;
+	winwchar			device_description[80];
+	winwchar			device_instance_id[W_MAX_PATH];
+};
+
+struct dxgkvmb_command_queryadapterinfo {
+	struct dxgkvmb_command_vgpu_to_host hdr;
+	enum kmtqueryadapterinfotype	query_type;
+	uint				private_data_size;
+	uint8_t				private_data[1];
+};
+
+/* Returns ntstatus */
+struct dxgkvmb_command_setallocationpriority {
+	struct dxgkvmb_command_vgpu_to_host hdr;
+	d3dkmt_handle			device;
+	d3dkmt_handle			resource;
+	uint				allocation_count;
+	/* d3dkmt_handle    allocations[allocation_count or 0]; */
+	/* uint priorities[allocation_count or 1]; */
+};
+
+struct dxgkvmb_command_getallocationpriority {
+	struct dxgkvmb_command_vgpu_to_host hdr;
+	d3dkmt_handle			device;
+	d3dkmt_handle			resource;
+	uint				allocation_count;
+	/* d3dkmt_handle allocations[allocation_count or 0]; */
+};
+
+struct dxgkvmb_command_getallocationpriority_return {
+	ntstatus			status;
+	/* uint priorities[allocation_count or 1]; */
+};
+
+/* Returns ntstatus */
+struct dxgkvmb_command_setcontextschedulingpriority {
+	struct dxgkvmb_command_vgpu_to_host hdr;
+	d3dkmt_handle			context;
+	int				priority;
+};
+
+/* Returns ntstatus */
+struct dxgkvmb_command_setcontextschedulingpriority2 {
+	struct dxgkvmb_command_vgpu_to_host hdr;
+	d3dkmt_handle			context;
+	int				priority;
+	bool				in_process;
+};
+
+struct dxgkvmb_command_getcontextschedulingpriority {
+	struct dxgkvmb_command_vgpu_to_host hdr;
+	d3dkmt_handle			context;
+	bool				in_process;
+};
+
+struct dxgkvmb_command_getcontextschedulingpriority_return {
+	ntstatus			status;
+	int				priority;
+};
+
+struct dxgkvmb_command_createdevice {
+	struct dxgkvmb_command_vgpu_to_host hdr;
+	struct d3dkmt_createdeviceflags	flags;
+	bool				cdd_device;
+	void				*error_code;
+};
+
+struct dxgkvmb_command_createdevice_return {
+	d3dkmt_handle			device;
+};
+
+struct dxgkvmb_command_destroydevice {
+	struct dxgkvmb_command_vgpu_to_host hdr;
+	d3dkmt_handle			device;
+};
+
+struct dxgkvmb_command_makeresident {
+	struct dxgkvmb_command_vgpu_to_host hdr;
+	d3dkmt_handle			device;
+	d3dkmt_handle			paging_queue;
+	struct d3dddi_makeresident_flags flags;
+	uint				alloc_count;
+	d3dkmt_handle			allocations[1];
+};
+
+struct dxgkvmb_command_makeresident_return {
+	uint64_t			paging_fence_value;
+	uint64_t			num_bytes_to_trim;
+	ntstatus			status;
+};
+
+struct dxgkvmb_command_evict {
+	struct dxgkvmb_command_vgpu_to_host hdr;
+	d3dkmt_handle			device;
+	struct d3dddi_evict_flags	flags;
+	uint				alloc_count;
+	d3dkmt_handle			allocations[1];
+};
+
+struct dxgkvmb_command_evict_return {
+	uint64_t			num_bytes_to_trim;
+};
+
+struct dxgkvmb_command_submitcommand {
+	struct dxgkvmb_command_vgpu_to_host hdr;
+	struct d3dkmt_submitcommand	args;
+	/* HistoryBufferHandles */
+	/* PrivateDriverData    */
+};
+
+struct dxgkvmb_command_submitcommandtohwqueue {
+	struct dxgkvmb_command_vgpu_to_host hdr;
+	struct d3dkmt_submitcommandtohwqueue args;
+	/* Written primaries */
+	/* PrivateDriverData */
+};
+
+/* Returns  ntstatus */
+struct dxgkvmb_command_flushheaptransitions {
+	struct dxgkvmb_command_vgpu_to_host hdr;
+};
+
+struct dxgkvmb_command_freegpuvirtualaddress {
+	struct dxgkvmb_command_vgpu_to_host hdr;
+	struct d3dkmt_freegpuvirtualaddress args;
+};
+
+struct dxgkvmb_command_mapgpuvirtualaddress {
+	struct dxgkvmb_command_vgpu_to_host hdr;
+	struct d3dddi_mapgpuvirtualaddress args;
+	d3dkmt_handle			device;
+};
+
+struct dxgkvmb_command_mapgpuvirtualaddress_return {
+	d3dgpu_virtual_address		virtual_address;
+	uint64_t			paging_fence_value;
+	ntstatus			status;
+};
+
+struct dxgkvmb_command_reservegpuvirtualaddress {
+	struct dxgkvmb_command_vgpu_to_host hdr;
+	struct d3dddi_reservegpuvirtualaddress args;
+};
+
+struct dxgkvmb_command_reservegpuvirtualaddress_return {
+	d3dgpu_virtual_address		virtual_address;
+	uint64_t			paging_fence_value;
+};
+
+struct dxgkvmb_command_updategpuvirtualaddress {
+	struct dxgkvmb_command_vgpu_to_host hdr;
+	uint64_t			fence_value;
+	d3dkmt_handle			device;
+	d3dkmt_handle			context;
+	d3dkmt_handle			fence_object;
+	uint				num_operations;
+	uint				flags;
+	struct d3dddi_updategpuvirtualaddress_operation operations[1];
+};
+
+struct dxgkvmb_command_queryclockcalibration {
+	struct dxgkvmb_command_vgpu_to_host hdr;
+	struct d3dkmt_queryclockcalibration args;
+};
+
+struct dxgkvmb_command_queryclockcalibration_return {
+	ntstatus			status;
+	struct dxgk_gpuclockdata	clock_data;
+};
+
+struct dxgkvmb_command_createallocation_allocinfo {
+	uint				flags;
+	uint				priv_drv_data_size;
+	uint				vidpn_source_id;
+};
+
+struct dxgkvmb_command_createallocation {
+	struct dxgkvmb_command_vgpu_to_host hdr;
+	d3dkmt_handle			device;
+	d3dkmt_handle			resource;
+	uint				private_runtime_data_size;
+	uint				priv_drv_data_size;
+	uint				alloc_count;
+	struct d3dkmt_createallocationflags flags;
+	winhandle			private_runtime_resource_handle;
+	bool				make_resident;
+/* dxgkvmb_command_createallocation_allocinfo alloc_info[alloc_count]; */
+/* uint8_t private_rutime_data[private_runtime_data_size] */
+/* uint8_t priv_drv_data[] for each alloc_info */
+};
+
+struct dxgkvmb_command_openresource {
+	struct dxgkvmb_command_vgpu_to_host hdr;
+	d3dkmt_handle			device;
+	bool				nt_security_sharing;
+	d3dkmt_handle			global_share;
+	uint				allocation_count;
+	uint				total_priv_drv_data_size;
+};
+
+struct dxgkvmb_command_openresource_return {
+	d3dkmt_handle			resource;
+	ntstatus			status;
+/* d3dkmt_handle   allocation[allocation_count]; */
+};
+
+struct dxgkvmb_command_getstandardallocprivdata {
+	struct dxgkvmb_command_vgpu_to_host hdr;
+	enum d3dkmdt_standardallocationtype alloc_type;
+	uint				priv_driver_data_size;
+	uint				priv_driver_resource_size;
+	uint				physical_adapter_index;
+	union {
+		struct d3dkmdt_sharedprimarysurfacedata	primary;
+		struct d3dkmdt_shadowsurfacedata	shadow;
+		struct d3dkmtd_stagingsurfacedata	staging;
+		struct d3dkmdt_gdisurfacedata		gdi_surface;
+	};
+};
+
+struct dxgkvmb_command_getstandardallocprivdata_return {
+	ntstatus			status;
+	uint				priv_driver_data_size;
+	uint				priv_driver_resource_size;
+	union {
+		struct d3dkmdt_sharedprimarysurfacedata	primary;
+		struct d3dkmdt_shadowsurfacedata	shadow;
+		struct d3dkmtd_stagingsurfacedata	staging;
+		struct d3dkmdt_gdisurfacedata		gdi_surface;
+	};
+/* char alloc_priv_data[priv_driver_data_size]; */
+/* char resource_priv_data[priv_driver_resource_size]; */
+};
+
+struct dxgkarg_describeallocation {
+	winhandle			allocation;
+	uint				width;
+	uint				height;
+	uint				format;
+	uint				multisample_method;
+	struct d3dddi_rational		refresh_rate;
+	uint				private_driver_attribute;
+	uint				flags;
+	uint				rotation;
+};
+
+struct dxgkvmb_allocflags {
+	union {
+		uint			flags;
+		struct {
+			uint		primary:1;
+			uint		cdd_primary:1;
+			uint		dod_primary:1;
+			uint		overlay:1;
+			uint		reserved6:1;
+			uint		capture:1;
+			uint		reserved0:4;
+			uint		reserved1:1;
+			uint		existing_sysmem:1;
+			uint		stereo:1;
+			uint		direct_flip:1;
+			uint		hardware_protected:1;
+			uint		reserved2:1;
+			uint		reserved3:1;
+			uint		reserved4:1;
+			uint		protected:1;
+			uint		cached:1;
+			uint		independent_primary:1;
+			uint		reserved:11;
+		};
+	};
+};
+
+struct dxgkvmb_command_allocinfo_return {
+	d3dkmt_handle			allocation;
+	uint				priv_drv_data_size;
+	struct dxgkvmb_allocflags	allocation_flags;
+	uint64_t			allocation_size;
+	struct dxgkarg_describeallocation driver_info;
+};
+
+struct dxgkvmb_command_createallocation_return {
+	struct d3dkmt_createallocationflags flags;
+	d3dkmt_handle			resource;
+	d3dkmt_handle			global_share;
+	uint				vgpu_flags;
+	struct dxgkvmb_command_allocinfo_return allocation_info[1];
+	/* Private driver data for allocations */
+};
+
+/* The command returns ntstatus */
+struct dxgkvmb_command_destroyallocation {
+	struct dxgkvmb_command_vgpu_to_host hdr;
+	d3dkmt_handle			device;
+	d3dkmt_handle			resource;
+	uint				alloc_count;
+	struct d3dddicb_destroyallocation2flags flags;
+	d3dkmt_handle			allocations[1];
+};
+
+struct dxgkvmb_command_createcontextvirtual {
+	struct dxgkvmb_command_vgpu_to_host hdr;
+	d3dkmt_handle			context;
+	d3dkmt_handle			device;
+	uint				node_ordinal;
+	uint				engine_affinity;
+	struct d3dddi_createcontextflags flags;
+	enum d3dkmt_clienthint		client_hint;
+	uint				priv_drv_data_size;
+	uint8_t				priv_drv_data[1];
+};
+
+/* The command returns ntstatus */
+struct dxgkvmb_command_destroycontext {
+	struct dxgkvmb_command_vgpu_to_host hdr;
+	d3dkmt_handle			context;
+};
+
+struct dxgkvmb_command_createpagingqueue {
+	struct dxgkvmb_command_vgpu_to_host hdr;
+	struct d3dkmt_createpagingqueue	args;
+};
+
+struct dxgkvmb_command_createpagingqueue_return {
+	d3dkmt_handle			paging_queue;
+	d3dkmt_handle			sync_object;
+	uint64_t			fence_storage_physical_address;
+	uint64_t			fence_storage_offset;
+};
+
+struct dxgkvmb_command_destroypagingqueue {
+	struct dxgkvmb_command_vgpu_to_host hdr;
+	d3dkmt_handle			paging_queue;
+};
+
+struct dxgkvmb_command_createsyncobject {
+	struct dxgkvmb_command_vgpu_to_host hdr;
+	struct d3dkmt_createsynchronizationobject2 args;
+	uint				client_hint;
+};
+
+struct dxgkvmb_command_createsyncobject_return {
+	d3dkmt_handle			sync_object;
+	d3dkmt_handle			global_sync_object;
+	d3dgpu_virtual_address		fence_gpu_va;
+	uint64_t			fence_storage_address;
+	uint				fence_storage_offset;
+};
+
+/* The command returns ntstatus */
+struct dxgkvmb_command_destroysyncobject {
+	struct dxgkvmb_command_vm_to_host hdr;
+	d3dkmt_handle			sync_object;
+};
+
+/* The command returns ntstatus */
+struct dxgkvmb_command_signalsyncobject {
+	struct dxgkvmb_command_vgpu_to_host hdr;
+	uint				object_count;
+	struct d3dddicb_signalflags	flags;
+	uint				context_count;
+	uint64_t			fence_value;
+	union {
+		/* Pointer to the guest event object */
+		winhandle		cpu_event_handle;
+		/* Non zero when signal from CPU is done */
+		d3dkmt_handle		device;
+	};
+	/* d3dkmt_handle ObjectHandleArray[object_count] */
+	/* d3dkmt_handle ContextArray[context_count]     */
+	/* uint64_t MonitoredFenceValueArray[object_count] */
+};
+
+/* The command returns ntstatus */
+struct dxgkvmb_command_waitforsyncobjectfromcpu {
+	struct dxgkvmb_command_vgpu_to_host hdr;
+	d3dkmt_handle			device;
+	uint				object_count;
+	struct d3dddi_waitforsynchronizationobjectfromcpu_flags flags;
+	uint64_t			guest_event_pointer;
+	bool				dereference_event;
+	/* d3dkmt_handle ObjectHandleArray[object_count] */
+	/* uint64_t FenceValueArray [object_count] */
+};
+
+/* The command returns ntstatus */
+struct dxgkvmb_command_waitforsyncobjectfromgpu {
+	struct dxgkvmb_command_vgpu_to_host hdr;
+	d3dkmt_handle			context;
+	/* Must be 1 when bLegacyFenceObject is TRUE */
+	uint				object_count;
+	bool				legacy_fence_object;
+	uint64_t			fence_values[1];
+	/* d3dkmt_handle ObjectHandles[object_count] */
+};
+
+struct dxgkvmb_command_lock2 {
+	struct dxgkvmb_command_vgpu_to_host hdr;
+	struct d3dkmt_lock2		args;
+	bool				use_legacy_lock;
+	uint				flags;
+	uint				priv_drv_data;
+};
+
+struct dxgkvmb_command_lock2_return {
+	ntstatus			status;
+	void				*cpu_visible_buffer_offset;
+};
+
+struct dxgkvmb_command_unlock2 {
+	struct dxgkvmb_command_vgpu_to_host hdr;
+	struct d3dkmt_unlock2		args;
+	bool				use_legacy_unlock;
+};
+
+struct dxgkvmb_command_updateallocationproperty {
+	struct dxgkvmb_command_vgpu_to_host hdr;
+	struct d3dddi_updateallocproperty args;
+};
+
+struct dxgkvmb_command_updateallocationproperty_return {
+	uint64_t			paging_fence_value;
+	ntstatus			status;
+};
+
+struct dxgkvmb_command_markdeviceaserror {
+	struct dxgkvmb_command_vgpu_to_host hdr;
+	struct d3dkmt_markdeviceaserror args;
+};
+
+/* Returns ntstatus */
+struct dxgkvmb_command_offerallocations {
+	struct dxgkvmb_command_vgpu_to_host hdr;
+	d3dkmt_handle			device;
+	uint				allocation_count;
+	enum d3dkmt_offer_priority	priority;
+	struct d3dkmt_offer_flags	flags;
+	bool				resources;
+	d3dkmt_handle			allocations[1];
+};
+
+struct dxgkvmb_command_reclaimallocations {
+	struct dxgkvmb_command_vgpu_to_host hdr;
+	d3dkmt_handle			device;
+	d3dkmt_handle			paging_queue;
+	uint				allocation_count;
+	bool				resources;
+	bool				write_results;
+	d3dkmt_handle			allocations[1];
+};
+
+struct dxgkvmb_command_reclaimallocations_return {
+	uint64_t			paging_fence_value;
+	ntstatus			status;
+	enum d3dddi_reclaim_result	discarded[1];
+};
+
+/* Returns ntstatus */
+struct dxgkvmb_command_changevideomemoryreservation {
+	struct dxgkvmb_command_vgpu_to_host hdr;
+	struct d3dkmt_changevideomemoryreservation args;
+};
+
+/* Returns the same structure */
+struct dxgkvmb_command_createhwqueue {
+	struct dxgkvmb_command_vgpu_to_host hdr;
+	ntstatus			status;
+	d3dkmt_handle			hwqueue;
+	d3dkmt_handle			hwqueue_progress_fence;
+	void 				*hwqueue_progress_fence_cpuva;
+	d3dgpu_virtual_address		hwqueue_progress_fence_gpuva;
+	d3dkmt_handle			context;
+	struct d3dddi_createhwqueueflags flags;
+	uint				priv_drv_data_size;
+	uint8_t				priv_drv_data[1];
+};
+
+/* The command returns ntstatus */
+struct dxgkvmb_command_destroyhwqueue {
+	struct dxgkvmb_command_vgpu_to_host hdr;
+	d3dkmt_handle			hwqueue;
+};
+
+struct dxgkvmb_command_queryallocationresidency {
+	struct dxgkvmb_command_vgpu_to_host hdr;
+	struct d3dkmt_queryallocationresidency args;
+	/* d3dkmt_handle allocations[0 or number of allocations] */
+};
+
+struct dxgkvmb_command_queryallocationresidency_return {
+	ntstatus			status;
+	/* d3dkmt_allocationresidencystatus[NumAllocations] */
+};
+
+/* Returns only private data */
+struct dxgkvmb_command_escape {
+	struct dxgkvmb_command_vgpu_to_host hdr;
+	d3dkmt_handle			adapter;
+	d3dkmt_handle			device;
+	enum d3dkmt_escapetype		type;
+	struct d3dddi_escapeflags	flags;
+	uint				priv_drv_data_size;
+	d3dkmt_handle			context;
+	uint8_t				priv_drv_data[1];
+};
+
+struct dxgkvmb_command_queryvideomemoryinfo {
+	struct dxgkvmb_command_vgpu_to_host hdr;
+	d3dkmt_handle			adapter;
+	enum d3dkmt_memory_segment_group memory_segment_group;
+	uint				physical_adapter_index;
+};
+
+struct dxgkvmb_command_queryvideomemoryinfo_return {
+	uint64_t			budget;
+	uint64_t			current_usage;
+	uint64_t			current_reservation;
+	uint64_t			available_for_reservation;
+};
+
+struct dxgkvmb_command_getdevicestate {
+	struct dxgkvmb_command_vgpu_to_host hdr;
+	struct d3dkmt_getdevicestate	args;
+};
+
+struct dxgkvmb_command_getdevicestate_return {
+	struct d3dkmt_getdevicestate	args;
+	ntstatus			status;
+};
+
+/*
+ * Helper functions
+ */
+static inline void command_vm_to_host_init2(struct dxgkvmb_command_vm_to_host
+					    *command,
+					    enum dxgkvmb_commandtype_global
+					    type, d3dkmt_handle process)
+{
+	command->command_type	= type;
+	command->process	= process;
+	command->command_id	= 0;
+	command->channel_type	= DXGKVMB_VM_TO_HOST;
+}
+
+static inline void command_vgpu_to_host_init0(struct dxgkvmb_command_vm_to_host
+					      *command)
+{
+	command->command_type	= DXGK_VMBCOMMAND_INVALID;
+	command->process	= 0;
+	command->command_id	= 0;
+	command->channel_type	= DXGKVMB_VGPU_TO_HOST;
+}
+
+static inline void command_vgpu_to_host_init1(struct
+					      dxgkvmb_command_vgpu_to_host
+					      *command,
+					      enum dxgkvmb_commandtype type)
+{
+	command->command_type	= type;
+	command->process	= 0;
+	command->command_id	= 0;
+	command->channel_type	= DXGKVMB_VGPU_TO_HOST;
+}
+
+static inline void command_vgpu_to_host_init2(struct
+					      dxgkvmb_command_vgpu_to_host
+					      *command,
+					      enum dxgkvmb_commandtype type,
+					      d3dkmt_handle process_handle)
+{
+	command->command_type	= type;
+	command->process	= process_handle;
+	command->command_id	= 0;
+	command->channel_type	= DXGKVMB_VGPU_TO_HOST;
+}
+
+int dxgvmb_send_sync_msg(struct dxgvmbuschannel *channel,
+			 void *command,
+			 u32 command_size, void *result, u32 result_size);
+
+#endif /* _DXGVMBUS_H */
diff --git a/drivers/gpu/dxgkrnl/hmgr.c b/drivers/gpu/dxgkrnl/hmgr.c
new file mode 100644
index 000000000000..450fb67e703f
--- /dev/null
+++ b/drivers/gpu/dxgkrnl/hmgr.c
@@ -0,0 +1,593 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (c) 2019, Microsoft Corporation.
+ *
+ * Author:
+ *   Iouri Tarassov <iourit at microsoft.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ */
+
+/*
+ * Dxgkrnl Graphics Port Driver handle manager
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/rwsem.h>
+
+#include "misc.h"
+#include "dxgkrnl.h"
+#include "hmgr.h"
+
+/*
+ * Handle parameters
+ */
+#define HMGRHANDLE_INSTANCE_BITS	6
+#define HMGRHANDLE_INDEX_BITS		24
+#define HMGRHANDLE_UNIQUE_BITS		2
+
+#define HMGRHANDLE_INSTANCE_SHIFT	0
+#define HMGRHANDLE_INDEX_SHIFT	\
+	(HMGRHANDLE_INSTANCE_BITS + HMGRHANDLE_INSTANCE_SHIFT)
+#define HMGRHANDLE_UNIQUE_SHIFT	\
+	(HMGRHANDLE_INDEX_BITS + HMGRHANDLE_INDEX_SHIFT)
+
+#define HMGRHANDLE_INSTANCE_MASK \
+	(((1 << HMGRHANDLE_INSTANCE_BITS) - 1) << HMGRHANDLE_INSTANCE_SHIFT)
+#define HMGRHANDLE_INDEX_MASK      \
+	(((1 << HMGRHANDLE_INDEX_BITS)    - 1) << HMGRHANDLE_INDEX_SHIFT)
+#define HMGRHANDLE_UNIQUE_MASK     \
+	(((1 << HMGRHANDLE_UNIQUE_BITS)   - 1) << HMGRHANDLE_UNIQUE_SHIFT)
+
+#define HMGRHANDLE_INSTANCE_MAX	((1 << HMGRHANDLE_INSTANCE_BITS) - 1)
+#define HMGRHANDLE_INDEX_MAX	((1 << HMGRHANDLE_INDEX_BITS) - 1)
+#define HMGRHANDLE_UNIQUE_MAX	((1 << HMGRHANDLE_UNIQUE_BITS) - 1)
+
+/*
+ * Handle entry
+ */
+struct hmgrentry {
+	union {
+		void *object;
+		struct {
+			uint prev_free_index;
+			uint next_free_index;
+		};
+	};
+	uint type:HMGRENTRY_TYPE_BITS + 1;
+	uint unique:HMGRHANDLE_UNIQUE_BITS;
+	uint instance:HMGRHANDLE_INSTANCE_BITS;
+	uint destroyed:1;
+};
+
+#define HMGRTABLE_SIZE_INCREMENT	1024
+#define HMGRTABLE_MIN_FREE_ENTRIES 128
+#define HMGRTABLE_INVALID_INDEX (~((1 << HMGRHANDLE_INDEX_BITS) - 1))
+#define HMGRTABLE_SIZE_MAX		0xFFFFFFF
+
+static uint table_size_increment = HMGRTABLE_SIZE_INCREMENT;
+
+static inline uint get_unique(d3dkmt_handle h)
+{
+	return (h & HMGRHANDLE_UNIQUE_MASK) >> HMGRHANDLE_UNIQUE_SHIFT;
+}
+
+static uint get_index(d3dkmt_handle h)
+{
+	return (h & HMGRHANDLE_INDEX_MASK) >> HMGRHANDLE_INDEX_SHIFT;
+}
+
+uint get_instance(d3dkmt_handle h)
+{
+	return (h & HMGRHANDLE_INSTANCE_MASK) >> HMGRHANDLE_INSTANCE_SHIFT;
+}
+
+static bool is_handle_valid(struct hmgrtable *table, d3dkmt_handle h,
+			    bool ignore_destroyed, enum hmgrentry_type t)
+{
+	uint index = get_index(h);
+	uint unique = get_unique(h);
+	struct hmgrentry *entry;
+
+	if (index >= table->table_size) {
+		pr_err("%s Invalid index %x %d\n", __func__, h, index);
+		return false;
+	}
+
+	entry = &table->entry_table[index];
+	if (unique != entry->unique) {
+		pr_err("%s Invalid unique %x %d %d %d %p",
+			   __func__, h, unique, entry->unique,
+			   index, entry->object);
+		return false;
+	}
+
+	if (entry->destroyed && !ignore_destroyed) {
+		pr_err("%s Invalid destroyed", __func__);
+		return false;
+	}
+
+	if (entry->type == HMGRENTRY_TYPE_FREE) {
+		pr_err("%s Entry is freed %x %d", __func__, h, index);
+		return false;
+	}
+
+	if (t != HMGRENTRY_TYPE_FREE && t != entry->type) {
+		pr_err("%s type mismatch %x %d %d", __func__, h,
+			   t, entry->type);
+		return false;
+	}
+
+	return true;
+}
+
+static d3dkmt_handle build_handle(uint index, uint unique, uint instance)
+{
+	uint handle_bits;
+
+	handle_bits = (index << HMGRHANDLE_INDEX_SHIFT) & HMGRHANDLE_INDEX_MASK;
+	handle_bits |= (unique << HMGRHANDLE_UNIQUE_SHIFT) &
+	    HMGRHANDLE_UNIQUE_MASK;
+	handle_bits |= (instance << HMGRHANDLE_INSTANCE_SHIFT) &
+	    HMGRHANDLE_INSTANCE_MASK;
+
+	return (d3dkmt_handle) handle_bits;
+}
+
+inline uint hmgrtable_get_used_entry_count(struct hmgrtable *table)
+{
+	DXGKRNL_ASSERT(table->table_size >= table->free_count);
+	return (table->table_size - table->free_count);
+}
+
+bool hmgrtable_mark_destroyed(struct hmgrtable *table, d3dkmt_handle h)
+{
+	if (!is_handle_valid(table, h, false, HMGRENTRY_TYPE_FREE))
+		return false;
+
+	table->entry_table[get_index(h)].destroyed = true;
+	return true;
+}
+
+bool hmgrtable_unmark_destroyed(struct hmgrtable *table, d3dkmt_handle h)
+{
+	if (!is_handle_valid(table, h, true, HMGRENTRY_TYPE_FREE))
+		return true;
+
+	DXGKRNL_ASSERT(table->entry_table[get_index(h)].destroyed);
+	table->entry_table[get_index(h)].destroyed = 0;
+	return true;
+}
+
+static inline bool is_empty(struct hmgrtable *table)
+{
+	return (table->free_count == table->table_size);
+}
+
+void print_status(struct hmgrtable *table)
+{
+	int i;
+
+	TRACE_DEBUG(1, "hmgrtable head, tail %p %d %d\n",
+		    table, table->free_handle_list_head,
+		    table->free_handle_list_tail);
+	if (table->entry_table == NULL)
+		return;
+	for (i = 0; i < 3; i++) {
+		if (table->entry_table[i].type != HMGRENTRY_TYPE_FREE)
+			TRACE_DEBUG(1, "hmgrtable entry %p %d %p\n",
+				    table, i, table->entry_table[i].object);
+		else
+			TRACE_DEBUG(1, "hmgrtable entry %p %d %d %d\n",
+				    table, i,
+				    table->entry_table[i].next_free_index,
+				    table->entry_table[i].prev_free_index);
+	}
+}
+
+static bool expand_table(struct hmgrtable *table, uint NumEntries)
+{
+	uint new_table_size;
+	struct hmgrentry *new_entry;
+	uint table_index;
+	uint new_free_count;
+	uint prev_free_index;
+	uint tail_index = table->free_handle_list_tail;
+
+	TRACE_DEBUG(1, "%s\n", __func__);
+
+	/* The tail should point to the last free element in the list */
+	if (!(table->free_count == 0 ||
+	      table->entry_table[tail_index].next_free_index ==
+	      HMGRTABLE_INVALID_INDEX)) {
+		pr_err("%s:corruption\n", __func__);
+		return false;
+	}
+
+	new_table_size = table->table_size + table_size_increment;
+	if (new_table_size < NumEntries)
+		new_table_size = NumEntries;
+
+	if (new_table_size > HMGRHANDLE_INDEX_MAX) {
+		pr_err("%s:corruption\n", __func__);
+		return false;
+	}
+
+	new_entry = (struct hmgrentry *)
+	    dxgmem_alloc(table->process, DXGMEM_HANDLE_TABLE,
+			 new_table_size * sizeof(struct hmgrentry));
+	if (new_entry == NULL) {
+		pr_err("%s:allocation failed\n", __func__);
+		return false;
+	}
+
+	if (table->entry_table) {
+		memcpy(new_entry, table->entry_table,
+		       table->table_size * sizeof(struct hmgrentry));
+		dxgmem_free(table->process, DXGMEM_HANDLE_TABLE,
+			    table->entry_table);
+	} else {
+		table->free_handle_list_head = 0;
+	}
+
+	table->entry_table = new_entry;
+
+	/* Initialize new table entries and add to the free list */
+	table_index = table->table_size;
+	new_free_count = table->free_count + table_size_increment;
+
+	prev_free_index = table->free_handle_list_tail;
+
+	while (table_index < new_table_size) {
+		struct hmgrentry *entry = &table->entry_table[table_index];
+
+		entry->prev_free_index = prev_free_index;
+		entry->next_free_index = table_index + 1;
+		entry->type = HMGRENTRY_TYPE_FREE;
+		entry->unique = 1;
+		entry->instance = 0;
+		prev_free_index = table_index;
+
+		table_index++;
+	}
+
+	table->entry_table[table_index - 1].next_free_index =
+	    (uint) HMGRTABLE_INVALID_INDEX;
+
+	if (table->free_count != 0) {
+		/* Link the current free list with the new entries */
+		struct hmgrentry *entry;
+
+		entry = &table->entry_table[table->free_handle_list_tail];
+		entry->next_free_index = table->table_size;
+	}
+	table->free_handle_list_tail = new_table_size - 1;
+	if (table->free_handle_list_head == HMGRTABLE_INVALID_INDEX)
+		table->free_handle_list_head = table->table_size;
+
+	table->table_size = new_table_size;
+	table->free_count = new_free_count;
+
+	TRACE_DEBUG(1, "%s end\n", __func__);
+	return true;
+}
+
+void hmgrtable_init(struct hmgrtable *table, struct dxgprocess *process)
+{
+	table->process = process;
+	table->entry_table = NULL;
+	table->table_size = 0;
+	table->free_handle_list_head = HMGRTABLE_INVALID_INDEX;
+	table->free_handle_list_tail = HMGRTABLE_INVALID_INDEX;
+	table->free_count = 0;
+	init_rwsem(&table->table_lock);
+}
+
+void hmgrtable_destroy(struct hmgrtable *table)
+{
+	if (table->entry_table) {
+		dxgmem_free(table->process, DXGMEM_HANDLE_TABLE,
+			    table->entry_table);
+		table->entry_table = NULL;
+	}
+}
+
+void hmgrtable_lock(struct hmgrtable *table, enum dxglockstate state)
+{
+	dxglockorder_acquire(DXGLOCK_HANDLETABLE);
+	if (state == DXGLOCK_EXCL)
+		down_write(&table->table_lock);
+	else
+		down_read(&table->table_lock);
+}
+
+void hmgrtable_unlock(struct hmgrtable *table, enum dxglockstate state)
+{
+	if (state == DXGLOCK_EXCL)
+		up_write(&table->table_lock);
+	else
+		up_read(&table->table_lock);
+	dxglockorder_release(DXGLOCK_HANDLETABLE);
+}
+
+d3dkmt_handle hmgrtable_alloc_handle(struct hmgrtable *table, void *object,
+				     enum hmgrentry_type type, bool make_valid)
+{
+	uint index;
+	struct hmgrentry *entry;
+	uint unique;
+
+	DXGKRNL_ASSERT(type <= HMGRENTRY_TYPE_LIMIT);
+	DXGKRNL_ASSERT(type > HMGRENTRY_TYPE_FREE);
+
+	if (table->free_count <= HMGRTABLE_MIN_FREE_ENTRIES) {
+		if (!expand_table(table, 0)) {
+			pr_err("hmgrtable expand_table failed\n");
+			return 0;
+		}
+	}
+
+	if (table->free_handle_list_head >= table->table_size) {
+		pr_err("hmgrtable corrupted handle table head\n");
+		return 0;
+	}
+
+	index = table->free_handle_list_head;
+	entry = &table->entry_table[index];
+
+	if (entry->type != HMGRENTRY_TYPE_FREE) {
+		pr_err("hmgrtable expected free handle\n");
+		return 0;
+	}
+
+	table->free_handle_list_head = entry->next_free_index;
+
+	if (entry->next_free_index != table->free_handle_list_tail) {
+		if (entry->next_free_index >= table->table_size) {
+			pr_err("hmgrtable invalid next free index\n");
+			return 0;
+		}
+		table->entry_table[entry->next_free_index].prev_free_index =
+		    HMGRTABLE_INVALID_INDEX;
+	}
+
+	unique = table->entry_table[index].unique;
+
+	table->entry_table[index].object = object;
+	table->entry_table[index].type = type;
+	table->entry_table[index].instance = 0;
+	table->entry_table[index].destroyed = !make_valid;
+	table->free_count--;
+
+	return build_handle(index, unique, table->entry_table[index].instance);
+}
+
+int hmgrtable_assign_handle_safe(struct hmgrtable *table, void *object,
+				 enum hmgrentry_type type, d3dkmt_handle h)
+{
+	int ret;
+
+	hmgrtable_lock(table, DXGLOCK_EXCL);
+	ret = hmgrtable_assign_handle(table, object, type, h);
+	hmgrtable_unlock(table, DXGLOCK_EXCL);
+	return ret;
+}
+
+int hmgrtable_assign_handle(struct hmgrtable *table, void *object,
+			    enum hmgrentry_type type, d3dkmt_handle h)
+{
+	uint index = get_index(h);
+	uint unique = get_unique(h);
+	struct hmgrentry *entry = NULL;
+
+	TRACE_DEBUG(1, "%s %x, %d %p, %p\n",
+		    __func__, h, index, object, table);
+
+	if (index >= HMGRHANDLE_INDEX_MAX) {
+		pr_err("handle index is too big: %x %d", h, index);
+		return STATUS_INVALID_PARAMETER;
+	}
+
+	if (index >= table->table_size) {
+		uint new_size = index + HMGRTABLE_SIZE_INCREMENT;
+
+		if (new_size > HMGRHANDLE_INDEX_MAX)
+			new_size = HMGRHANDLE_INDEX_MAX;
+		if (!expand_table(table, new_size)) {
+			pr_err("failed to expand table\n");
+			return STATUS_NO_MEMORY;
+		}
+	}
+
+	entry = &table->entry_table[index];
+
+	if (entry->type != HMGRENTRY_TYPE_FREE) {
+		pr_err("the entry is already busy: %d %x",
+			   entry->type,
+			   hmgrtable_build_entry_handle(table, index));
+		return STATUS_INVALID_PARAMETER;
+	}
+
+	if (index != table->free_handle_list_tail) {
+		if (entry->next_free_index >= table->table_size) {
+			pr_err("hmgr: invalid next free index %d\n",
+				   entry->next_free_index);
+			return STATUS_INVALID_PARAMETER;
+		}
+		table->entry_table[entry->next_free_index].prev_free_index =
+		    entry->prev_free_index;
+	} else {
+		table->free_handle_list_tail = entry->prev_free_index;
+	}
+
+	if (index != table->free_handle_list_head) {
+		if (entry->prev_free_index >= table->table_size) {
+			pr_err("hmgr: invalid next prev index %d\n",
+				   entry->prev_free_index);
+			return STATUS_INVALID_PARAMETER;
+		}
+		table->entry_table[entry->prev_free_index].next_free_index =
+		    entry->next_free_index;
+	} else {
+		table->free_handle_list_head = entry->next_free_index;
+	}
+
+	entry->prev_free_index = HMGRTABLE_INVALID_INDEX;
+	entry->next_free_index = HMGRTABLE_INVALID_INDEX;
+	entry->object = object;
+	entry->type = type;
+	entry->instance = 0;
+	entry->unique = unique;
+	entry->destroyed = false;
+
+	table->free_count--;
+	return 0;
+}
+
+d3dkmt_handle hmgrtable_alloc_handle_safe(struct hmgrtable *table, void *obj,
+					  enum hmgrentry_type type,
+					  bool make_valid)
+{
+	d3dkmt_handle h;
+
+	hmgrtable_lock(table, DXGLOCK_EXCL);
+	h = hmgrtable_alloc_handle(table, obj, type, make_valid);
+	hmgrtable_unlock(table, DXGLOCK_EXCL);
+	return h;
+}
+
+void hmgrtable_free_handle(struct hmgrtable *table, enum hmgrentry_type t,
+			   d3dkmt_handle h)
+{
+	struct hmgrentry *entry;
+	uint i = get_index(h);
+
+	DXGKRNL_ASSERT(table->free_count < table->table_size);
+	DXGKRNL_ASSERT(table->free_count >= HMGRTABLE_MIN_FREE_ENTRIES);
+
+	TRACE_DEBUG(1, "%s: %p %x\n", __func__, table, h);
+
+	/* Ignore the destroyed flag when checking the handle */
+	if (is_handle_valid(table, h, true, t)) {
+		entry = &table->entry_table[i];
+		entry->unique = 1;
+		entry->type = HMGRENTRY_TYPE_FREE;
+		entry->destroyed = 0;
+		if (entry->unique != HMGRHANDLE_UNIQUE_MAX)
+			entry->unique += 1;
+		else
+			entry->unique = 1;
+
+		table->free_count++;
+
+		/*
+		 * Insert the index to the free list at the tail.
+		 */
+		entry->next_free_index = HMGRTABLE_INVALID_INDEX;
+		entry->prev_free_index = table->free_handle_list_tail;
+		entry = &table->entry_table[table->free_handle_list_tail];
+		entry->next_free_index = i;
+		table->free_handle_list_tail = i;
+	} else {
+		pr_err("%s:error: %d %x\n", __func__, i, h);
+	}
+}
+
+void hmgrtable_free_handle_safe(struct hmgrtable *table, enum hmgrentry_type t,
+				d3dkmt_handle h)
+{
+	hmgrtable_lock(table, DXGLOCK_EXCL);
+	hmgrtable_free_handle(table, t, h);
+	hmgrtable_unlock(table, DXGLOCK_EXCL);
+}
+
+d3dkmt_handle hmgrtable_build_entry_handle(struct hmgrtable *table, uint index)
+{
+	DXGKRNL_ASSERT(index < table->table_size);
+
+	return build_handle(index, table->entry_table[index].unique,
+			    table->entry_table[index].instance);
+}
+
+void *hmgrtable_get_object(struct hmgrtable *table, d3dkmt_handle h)
+{
+	if (!is_handle_valid(table, h, false, HMGRENTRY_TYPE_FREE))
+		return NULL;
+
+	return table->entry_table[get_index(h)].object;
+}
+
+void *hmgrtable_get_object_by_type(struct hmgrtable *table,
+				   enum hmgrentry_type type, d3dkmt_handle h)
+{
+	if (!is_handle_valid(table, h, false, type)) {
+		pr_err("%s invalid handle %x\n", __func__, h);
+		return NULL;
+	}
+	return table->entry_table[get_index(h)].object;
+}
+
+void *hmgrtable_get_entry_object(struct hmgrtable *table, uint index)
+{
+	DXGKRNL_ASSERT(index < table->table_size);
+	DXGKRNL_ASSERT(table->entry_table[index].type != HMGRENTRY_TYPE_FREE);
+
+	return table->entry_table[index].object;
+}
+
+enum hmgrentry_type hmgrtable_get_entry_type(struct hmgrtable *table,
+					     uint index)
+{
+	DXGKRNL_ASSERT(index < table->table_size);
+	return (enum hmgrentry_type)table->entry_table[index].type;
+}
+
+enum hmgrentry_type hmgrtable_get_object_type(struct hmgrtable *table,
+					      d3dkmt_handle h)
+{
+	if (!is_handle_valid(table, h, false, HMGRENTRY_TYPE_FREE))
+		return HMGRENTRY_TYPE_FREE;
+
+	return hmgrtable_get_entry_type(table, get_index(h));
+}
+
+void *hmgrtable_get_object_ignore_destroyed(struct hmgrtable *table,
+					    d3dkmt_handle h,
+					    enum hmgrentry_type type)
+{
+	if (!is_handle_valid(table, h, true, type))
+		return NULL;
+	return table->entry_table[get_index(h)].object;
+}
+
+bool hmgrtable_next_entry(struct hmgrtable *tbl, uint *index,
+			  enum hmgrentry_type *type, d3dkmt_handle *handle,
+			  void **object)
+{
+	uint i;
+	struct hmgrentry *entry;
+
+	for (i = *index; i < tbl->table_size; i++) {
+		entry = &tbl->entry_table[i];
+		if (entry->type != HMGRENTRY_TYPE_FREE) {
+			*index = i + 1;
+			*object = entry->object;
+			*handle = build_handle(i, entry->unique,
+					       entry->instance);
+			*type = entry->type;
+			return true;
+		}
+	}
+	return false;
+}
diff --git a/drivers/gpu/dxgkrnl/hmgr.h b/drivers/gpu/dxgkrnl/hmgr.h
new file mode 100644
index 000000000000..2b508c2f7fcd
--- /dev/null
+++ b/drivers/gpu/dxgkrnl/hmgr.h
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (c) 2019, Microsoft Corporation.
+ *
+ * Author:
+ *   Iouri Tarassov <iourit at microsoft.com>
+ *
+ * Dxgkrnl Graphics Port Driver
+ * Handle manager definitions
+ *
+ */
+
+#ifndef _HMGR_H_
+#define _HMGR_H_
+
+#include "misc.h"
+
+struct hmgrentry;
+
+/*
+ * Handle manager table.
+ *
+ * Implementation notes:
+ *   A list of free handles is built on top of the array of table entries.
+ *   free_handle_list_head is the index of the first entry in the list.
+ *   m_FreeHandleListTail is the index of an entry in the list, which is
+ *   HMGRTABLE_MIN_FREE_ENTRIES from the head. It means that when a handle is
+ *   freed, the next time the handle can be re-used is after allocating
+ *   HMGRTABLE_MIN_FREE_ENTRIES number of handles.
+ *   Handles are allocated from the start of the list and free handles are
+ *   inserted after the tail of the list.
+ *
+ */
+struct hmgrtable {
+	struct dxgprocess	*process;
+	struct hmgrentry	*entry_table;
+	uint			free_handle_list_head;
+	uint			free_handle_list_tail;
+	uint			table_size;
+	uint			free_count;
+	struct rw_semaphore	table_lock;
+};
+
+/*
+ * Handle entry data types.
+ */
+#define HMGRENTRY_TYPE_BITS 5
+
+enum hmgrentry_type {
+	HMGRENTRY_TYPE_FREE				= 0,
+	HMGRENTRY_TYPE_DXGADAPTER			= 1,
+	HMGRENTRY_TYPE_DXGSHAREDRESOURCE		= 2,
+	HMGRENTRY_TYPE_DXGDEVICE			= 3,
+	HMGRENTRY_TYPE_DXGRESOURCE			= 4,
+	HMGRENTRY_TYPE_DXGALLOCATION			= 5,
+	HMGRENTRY_TYPE_DXGOVERLAY			= 6,
+	HMGRENTRY_TYPE_DXGCONTEXT			= 7,
+	HMGRENTRY_TYPE_DXGSYNCOBJECT			= 8,
+	HMGRENTRY_TYPE_DXGKEYEDMUTEX			= 9,
+	HMGRENTRY_TYPE_DXGPAGINGQUEUE			= 10,
+	HMGRENTRY_TYPE_DXGDEVICESYNCOBJECT		= 11,
+	HMGRENTRY_TYPE_DXGPROCESS			= 12,
+	HMGRENTRY_TYPE_DXGSHAREDVMOBJECT		= 13,
+	HMGRENTRY_TYPE_DXGPROTECTEDSESSION		= 14,
+	HMGRENTRY_TYPE_DXGHWQUEUE			= 15,
+	HMGRENTRY_TYPE_DXGREMOTEBUNDLEOBJECT		= 16,
+	HMGRENTRY_TYPE_DXGCOMPOSITIONSURFACEOBJECT	= 17,
+	HMGRENTRY_TYPE_DXGCOMPOSITIONSURFACEPROXY	= 18,
+	HMGRENTRY_TYPE_DXGTRACKEDWORKLOAD		= 19,
+	HMGRENTRY_TYPE_LIMIT				= ((1 << HMGRENTRY_TYPE_BITS) - 1),
+	HMGRENTRY_TYPE_MONITOREDFENCE			= HMGRENTRY_TYPE_LIMIT + 1,
+};
+
+void hmgrtable_init(struct hmgrtable *tbl, struct dxgprocess *process);
+void hmgrtable_destroy(struct hmgrtable *tbl);
+void hmgrtable_lock(struct hmgrtable *tbl, enum dxglockstate state);
+void hmgrtable_unlock(struct hmgrtable *tbl, enum dxglockstate state);
+d3dkmt_handle hmgrtable_alloc_handle(struct hmgrtable *tbl, void *object,
+				     enum hmgrentry_type t, bool make_valid);
+d3dkmt_handle hmgrtable_alloc_handle_safe(struct hmgrtable *tbl, void *obj,
+					  enum hmgrentry_type t, bool reserve);
+int hmgrtable_assign_handle(struct hmgrtable *tbl, void *obj,
+			    enum hmgrentry_type, d3dkmt_handle h);
+int hmgrtable_assign_handle_safe(struct hmgrtable *tbl, void *obj,
+				 enum hmgrentry_type t, d3dkmt_handle h);
+void hmgrtable_free_handle(struct hmgrtable *tbl, enum hmgrentry_type t,
+			   d3dkmt_handle h);
+void hmgrtable_free_handle_safe(struct hmgrtable *tbl, enum hmgrentry_type t,
+				d3dkmt_handle h);
+d3dkmt_handle hmgrtable_build_entry_handle(struct hmgrtable *tbl, uint index);
+enum hmgrentry_type hmgrtable_get_object_type(struct hmgrtable *tbl,
+					      d3dkmt_handle h);
+void *hmgrtable_get_object(struct hmgrtable *tbl, d3dkmt_handle h);
+void *hmgrtable_get_object_by_type(struct hmgrtable *tbl, enum hmgrentry_type t,
+				   d3dkmt_handle h);
+void *hmgrtable_get_object_ignore_destroyed(struct hmgrtable *tbl,
+					    d3dkmt_handle h,
+					    enum hmgrentry_type t);
+bool hmgrtable_mark_destroyed(struct hmgrtable *tbl, d3dkmt_handle h);
+bool hmgrtable_unmark_destroyed(struct hmgrtable *tbl, d3dkmt_handle h);
+void *hmgrtable_get_entry_object(struct hmgrtable *tbl, uint index);
+bool hmgrtable_next_entry(struct hmgrtable *tbl, uint *start_index,
+			  enum hmgrentry_type *type, d3dkmt_handle *handle,
+			  void **object);
+
+#endif
diff --git a/drivers/gpu/dxgkrnl/ioctl.c b/drivers/gpu/dxgkrnl/ioctl.c
new file mode 100644
index 000000000000..7d585b2cece8
--- /dev/null
+++ b/drivers/gpu/dxgkrnl/ioctl.c
@@ -0,0 +1,5269 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (c) 2019, Microsoft Corporation.
+ *
+ * Author:
+ *   Iouri Tarassov <iourit at microsoft.com>
+ *
+ * Dxgkrnl Graphics Driver
+ * Ioctl implementation
+ *
+ */
+
+#include <linux/eventfd.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/anon_inodes.h>
+#include <linux/mman.h>
+
+#include "dxgkrnl.h"
+#include "dxgvmbus.h"
+
+struct ioctl_desc {
+	ntstatus(*ioctl_callback) (struct dxgprocess *p, void *__user arg);
+	u32 ioctl;
+	u32 arg_size;
+};
+static struct ioctl_desc ioctls[LX_IO_MAX + 1];
+
+static int dxgsyncobj_release(struct inode *inode, struct file *file)
+{
+	struct dxgsharedsyncobject *syncobj = file->private_data;
+	struct dxgthreadinfo *thread = dxglockorder_get_thread();
+
+	TRACE_DEBUG(1, "%s: %p", __func__, syncobj);
+	dxgmutex_lock(&syncobj->fd_mutex);
+	dxgsharedsyncobj_acquire_reference(syncobj);
+	syncobj->host_shared_handle_nt_reference--;
+	if (syncobj->host_shared_handle_nt_reference == 0) {
+		if (syncobj->host_shared_handle_nt) {
+			dxgvmb_send_destroy_nt_shared_object(
+					syncobj->host_shared_handle_nt);
+			TRACE_DEBUG(1, "Syncobj host_handle_nt destroyed: %x",
+				    syncobj->host_shared_handle_nt);
+			syncobj->host_shared_handle_nt = 0;
+		}
+		dxgsharedsyncobj_release_reference(syncobj);
+	}
+	dxgmutex_unlock(&syncobj->fd_mutex);
+	dxgsharedsyncobj_release_reference(syncobj);
+	dxglockorder_put_thread(thread);
+	return 0;
+}
+
+static const struct file_operations dxg_syncobj_fops = {
+	.release = dxgsyncobj_release,
+};
+
+static int dxgsharedresource_release(struct inode *inode, struct file *file)
+{
+	struct dxgsharedresource *resource = file->private_data;
+	struct dxgthreadinfo *thread = dxglockorder_get_thread();
+
+	TRACE_DEBUG(1, "%s: %p", __func__, resource);
+	dxgmutex_lock(&resource->fd_mutex);
+	dxgsharedresource_acquire_reference(resource);
+	resource->host_shared_handle_nt_reference--;
+	if (resource->host_shared_handle_nt_reference == 0) {
+		if (resource->host_shared_handle_nt) {
+			dxgvmb_send_destroy_nt_shared_object(
+					resource->host_shared_handle_nt);
+			TRACE_DEBUG(1, "Resource host_handle_nt destroyed: %x",
+				    resource->host_shared_handle_nt);
+			resource->host_shared_handle_nt = 0;
+		}
+		dxgsharedresource_release_reference(resource);
+	}
+	dxgmutex_unlock(&resource->fd_mutex);
+	dxgsharedresource_release_reference(resource);
+	dxglockorder_put_thread(thread);
+	return 0;
+}
+
+static const struct file_operations dxg_resource_fops = {
+	.release = dxgsharedresource_release,
+};
+
+static int dxgk_open_adapter_from_luid(struct dxgprocess *process,
+				       void *__user inargs)
+{
+	struct d3dkmt_openadapterfromluid args;
+	int ret = 0;
+	struct dxgadapter *entry;
+	struct dxgadapter *adapter = NULL;
+	struct d3dkmt_openadapterfromluid *__user result = inargs;
+
+	TRACE_FUNC_ENTER(__func__);
+
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	dxgglobal_acquire_adapter_list_lock(DXGLOCK_SHARED);
+	dxgglobal_acquire_process_adapter_lock();
+
+	list_for_each_entry(entry, &dxgglobal->adapter_list_head,
+			    adapter_list_entry) {
+		if (dxgadapter_acquire_lock_shared(entry) == 0) {
+			TRACE_DEBUG(1, "Compare luids: %d:%d  %d:%d",
+				    entry->luid.b, entry->luid.a,
+				    args.adapter_luid.b, args.adapter_luid.a);
+			if (*(u64 *) &entry->luid ==
+			    *(u64 *) &args.adapter_luid) {
+				ret =
+				    dxgprocess_open_adapter(process, entry,
+						    &args.adapter_handle);
+
+				if (NT_SUCCESS(ret)) {
+					ret = dxg_copy_to_user(
+						&result->adapter_handle,
+						&args.adapter_handle,
+						sizeof(d3dkmt_handle));
+				}
+				adapter = entry;
+			}
+			dxgadapter_release_lock_shared(entry);
+			if (adapter)
+				break;
+		}
+	}
+
+	dxgglobal_release_process_adapter_lock();
+	dxgglobal_release_adapter_list_lock(DXGLOCK_SHARED);
+
+	if (args.adapter_handle == 0)
+		ret = STATUS_INVALID_PARAMETER;
+
+cleanup:
+
+	if (ret)
+		dxgprocess_close_adapter(process, args.adapter_handle);
+
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static int dxgkp_enum_adapters(struct dxgprocess *process,
+			       union d3dkmt_enumadapters_filter filter,
+			       uint adapter_count_max,
+			       struct d3dkmt_adapterinfo *__user info_out,
+			       uint * __user adapter_count_out)
+{
+	int ret = 0;
+	struct dxgadapter *entry;
+	struct d3dkmt_adapterinfo *info = NULL;
+	struct dxgadapter **adapters = NULL;
+	int adapter_count = 0;
+	int i;
+
+	TRACE_FUNC_ENTER(__func__);
+	if (info_out == NULL || adapter_count_max == 0) {
+		ret = 0;
+		TRACE_DEBUG(1, "buffer is NULL");
+		ret = dxg_copy_to_user(adapter_count_out,
+				       &dxgglobal->num_adapters, sizeof(uint));
+		goto cleanup;
+	}
+
+	if (adapter_count_max > 0xFFFF) {
+		pr_err("too many adapters");
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	info = dxgmem_alloc(process, DXGMEM_TMP,
+			    sizeof(struct d3dkmt_adapterinfo) *
+			    adapter_count_max);
+	if (info == NULL) {
+		ret = STATUS_NO_MEMORY;
+		goto cleanup;
+	}
+
+	adapters = dxgmem_alloc(process, DXGMEM_TMP,
+				sizeof(struct dxgadapter *) *
+				adapter_count_max);
+	if (adapters == NULL) {
+		ret = STATUS_NO_MEMORY;
+		goto cleanup;
+	}
+
+	dxgglobal_acquire_adapter_list_lock(DXGLOCK_SHARED);
+	dxgglobal_acquire_process_adapter_lock();
+
+	list_for_each_entry(entry, &dxgglobal->adapter_list_head,
+			    adapter_list_entry) {
+		if (dxgadapter_acquire_lock_shared(entry) == 0) {
+			struct d3dkmt_adapterinfo *inf = &info[adapter_count];
+
+			ret = dxgprocess_open_adapter(process, entry,
+						      &inf->adapter_handle);
+			if (NT_SUCCESS(ret)) {
+				inf->adapter_luid = entry->luid;
+				adapters[adapter_count] = entry;
+				TRACE_DEBUG(1, "adapter: %x %x:%x",
+					    inf->adapter_handle,
+					    inf->adapter_luid.b,
+					    inf->adapter_luid.a);
+				adapter_count++;
+			}
+			dxgadapter_release_lock_shared(entry);
+		}
+		if (ret)
+			break;
+	}
+
+	dxgglobal_release_process_adapter_lock();
+	dxgglobal_release_adapter_list_lock(DXGLOCK_SHARED);
+
+	if (adapter_count > adapter_count_max) {
+		ret = STATUS_BUFFER_TOO_SMALL;
+		TRACE_DEBUG(1, "Too many adapters");
+		ret = dxg_copy_to_user(adapter_count_out,
+				       &dxgglobal->num_adapters, sizeof(uint));
+		goto cleanup;
+	}
+
+	ret = dxg_copy_to_user(adapter_count_out, &adapter_count,
+			       sizeof(adapter_count));
+	if (ret)
+		goto cleanup;
+	ret = dxg_copy_to_user(info_out, info, sizeof(info[0]) * adapter_count);
+
+cleanup:
+
+	if (NT_SUCCESS(ret)) {
+		TRACE_DEBUG(1, "found %d adapters", adapter_count);
+		goto success;
+	}
+	if (info) {
+		for (i = 0; i < adapter_count; i++)
+			dxgprocess_close_adapter(process,
+						 info[i].adapter_handle);
+	}
+success:
+	if (info)
+		dxgmem_free(process, DXGMEM_TMP, info);
+	if (adapters)
+		dxgmem_free(process, DXGMEM_TMP, adapters);
+
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static int dxgsharedresource_seal(struct dxgsharedresource *shared_resource)
+{
+	int ret = 0;
+	int i = 0;
+	uint8_t *private_data;
+	uint data_size;
+	struct dxgresource *resource;
+	struct dxgallocation *alloc;
+
+	TRACE_DEBUG(1, "Sealing resource: %p", shared_resource);
+
+	down_write(&shared_resource->adapter->shared_resource_list_lock);
+	if (shared_resource->sealed) {
+		TRACE_DEBUG(1, "Resource already sealed");
+		goto cleanup;
+	}
+	shared_resource->sealed = 1;
+	if (!list_empty(&shared_resource->resource_list_head)) {
+		resource =
+		    list_first_entry(&shared_resource->resource_list_head,
+				     struct dxgresource,
+				     shared_resource_list_entry);
+		TRACE_DEBUG(1, "First resource: %p", resource);
+		dxgmutex_lock(&resource->resource_mutex);
+		list_for_each_entry(alloc, &resource->alloc_list_head,
+				    alloc_list_entry) {
+			TRACE_DEBUG(1, "Resource alloc: %p %d", alloc,
+				    alloc->priv_drv_data->data_size);
+			shared_resource->allocation_count++;
+			shared_resource->alloc_private_data_size +=
+			    alloc->priv_drv_data->data_size;
+			if (shared_resource->alloc_private_data_size <
+			    alloc->priv_drv_data->data_size) {
+				pr_err("alloc private data overflow");
+				ret = STATUS_INVALID_PARAMETER;
+				goto cleanup1;
+			}
+		}
+		if (shared_resource->alloc_private_data_size == 0)
+			goto cleanup1;
+		shared_resource->alloc_private_data =
+			dxgmem_alloc(NULL, DXGMEM_ALLOCPRIVATE,
+				shared_resource->alloc_private_data_size);
+		if (shared_resource->alloc_private_data == NULL) {
+			ret = STATUS_INVALID_PARAMETER;
+			goto cleanup1;
+		}
+		shared_resource->alloc_private_data_sizes =
+			dxgmem_alloc(NULL, DXGMEM_ALLOCPRIVATE,
+			sizeof(uint)*shared_resource->allocation_count);
+		if (shared_resource->alloc_private_data_sizes == NULL) {
+			ret = STATUS_INVALID_PARAMETER;
+			goto cleanup1;
+		}
+		private_data = shared_resource->alloc_private_data;
+		data_size = shared_resource->alloc_private_data_size;
+		i = 0;
+		list_for_each_entry(alloc, &resource->alloc_list_head,
+				    alloc_list_entry) {
+			uint alloc_data_size = alloc->priv_drv_data->data_size;
+
+			if (alloc_data_size) {
+				if (data_size < alloc_data_size) {
+					pr_err("Invalid private data size");
+					ret = STATUS_INVALID_PARAMETER;
+					goto cleanup1;
+				}
+				shared_resource->alloc_private_data_sizes[i] =
+				    alloc_data_size;
+				memcpy(private_data,
+				       alloc->priv_drv_data->data,
+				       alloc_data_size);
+				dxgmem_free(alloc->process, DXGMEM_ALLOCPRIVATE,
+					    alloc->priv_drv_data);
+				alloc->priv_drv_data = NULL;
+				private_data += alloc_data_size;
+				data_size -= alloc_data_size;
+			}
+			i++;
+		}
+		if (data_size != 0) {
+			pr_err("Data size mismatch");
+			ret = STATUS_INVALID_PARAMETER;
+		}
+cleanup1:
+		dxgmutex_unlock(&resource->resource_mutex);
+	}
+cleanup:
+	up_write(&shared_resource->adapter->shared_resource_list_lock);
+	return ret;
+}
+
+static int dxgk_enum_adapters(struct dxgprocess *process, void *__user inargs)
+{
+	struct d3dkmt_enumadapters2 args;
+	int ret = 0;
+	struct dxgadapter *entry;
+	struct d3dkmt_adapterinfo *info = NULL;
+	struct dxgadapter **adapters = NULL;
+	int adapter_count = 0;
+	int i;
+
+	TRACE_FUNC_ENTER(__func__);
+
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	if (args.adapters == NULL) {
+		ret = 0;
+		TRACE_DEBUG(1, "buffer is NULL");
+		args.num_adapters = dxgglobal->num_adapters;
+		ret = dxg_copy_to_user(inargs, &args, sizeof(args));
+		goto cleanup;
+	}
+	if (args.num_adapters < dxgglobal->num_adapters) {
+		args.num_adapters = dxgglobal->num_adapters;
+		TRACE_DEBUG(1, "buffer is too small");
+		ret = STATUS_BUFFER_TOO_SMALL;
+		goto cleanup;
+	}
+
+	if (args.num_adapters > D3DKMT_ADAPTERS_MAX) {
+		TRACE_DEBUG(1, "too many adapters");
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	info = dxgmem_alloc(process, DXGMEM_TMP,
+			    sizeof(struct d3dkmt_adapterinfo) *
+			    args.num_adapters);
+	if (info == NULL) {
+		ret = STATUS_NO_MEMORY;
+		goto cleanup;
+	}
+
+	adapters = dxgmem_alloc(process, DXGMEM_TMP,
+				sizeof(struct dxgadapter *) *
+				args.num_adapters);
+	if (adapters == NULL) {
+		ret = STATUS_NO_MEMORY;
+		goto cleanup;
+	}
+
+	dxgglobal_acquire_adapter_list_lock(DXGLOCK_SHARED);
+	dxgglobal_acquire_process_adapter_lock();
+
+	list_for_each_entry(entry, &dxgglobal->adapter_list_head,
+			    adapter_list_entry) {
+		if (dxgadapter_acquire_lock_shared(entry) == 0) {
+			struct d3dkmt_adapterinfo *inf = &info[adapter_count];
+
+			ret = dxgprocess_open_adapter(process, entry,
+						      &inf->adapter_handle);
+			if (NT_SUCCESS(ret)) {
+				inf->adapter_luid = entry->luid;
+				adapters[adapter_count] = entry;
+				TRACE_DEBUG(1, "adapter: %x %llx",
+					    inf->adapter_handle,
+					    *(u64 *) &inf->adapter_luid);
+				adapter_count++;
+			}
+			dxgadapter_release_lock_shared(entry);
+		}
+		if (ret)
+			break;
+	}
+
+	dxgglobal_release_process_adapter_lock();
+	dxgglobal_release_adapter_list_lock(DXGLOCK_SHARED);
+
+	args.num_adapters = adapter_count;
+
+	ret = dxg_copy_to_user(inargs, &args, sizeof(args));
+	if (ret)
+		goto cleanup;
+	ret = dxg_copy_to_user(args.adapters, info,
+			       sizeof(info[0]) * args.num_adapters);
+	if (ret)
+		goto cleanup;
+
+cleanup:
+
+	if (ret) {
+		if (info) {
+			for (i = 0; i < args.num_adapters; i++) {
+				dxgprocess_close_adapter(process,
+							 info[i].
+							 adapter_handle);
+			}
+		}
+	} else {
+		TRACE_DEBUG(1, "found %d adapters", args.num_adapters);
+	}
+
+	if (info)
+		dxgmem_free(process, DXGMEM_TMP, info);
+	if (adapters)
+		dxgmem_free(process, DXGMEM_TMP, adapters);
+
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static int dxgk_enum_adapters3(struct dxgprocess *process, void *__user inargs)
+{
+	struct d3dkmt_enumadapters3 args;
+	int ret = 0;
+
+	TRACE_FUNC_ENTER(__func__);
+
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	ret = dxgkp_enum_adapters(process, args.filter,
+				  args.adapter_count,
+				  args.adapters,
+				  &((struct d3dkmt_enumadapters3 *)inargs)->
+				  adapter_count);
+
+cleanup:
+
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static int dxgk_close_adapter(struct dxgprocess *process, void *__user inargs)
+{
+	d3dkmt_handle args;
+	int ret = 0;
+
+	TRACE_FUNC_ENTER(__func__);
+
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	ret = dxgprocess_close_adapter(process, args);
+	if (ret)
+		pr_err("%s failed", __func__);
+
+cleanup:
+
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static int dxgk_query_adapter_info(struct dxgprocess *process,
+				   void *__user inargs)
+{
+	struct d3dkmt_queryadapterinfo args;
+	int ret = 0;
+	struct dxgadapter *adapter = NULL;
+
+	TRACE_FUNC_ENTER(__func__);
+
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	if (args.private_data_size > DXG_MAX_VM_BUS_PACKET_SIZE ||
+	    args.private_data_size == 0) {
+		pr_err("invalid private data size");
+		goto cleanup;
+	}
+
+	TRACE_DEBUG(1, "Type: %d Size: %x", args.type, args.private_data_size);
+
+	adapter = dxgprocess_adapter_by_handle(process, args.adapter);
+	if (adapter == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	ret = dxgadapter_acquire_lock_shared(adapter);
+	if (ret)
+		goto cleanup;
+
+	ret = dxgvmb_send_query_adapter_info(process, &adapter->channel, &args);
+
+	dxgadapter_release_lock_shared(adapter);
+
+cleanup:
+
+	if (adapter)
+		dxgadapter_release_reference(adapter);
+
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static int dxgk_create_device(struct dxgprocess *process, void *__user inargs)
+{
+	struct d3dkmt_createdevice args;
+	int ret = 0;
+	struct dxgadapter *adapter = NULL;
+	struct dxgdevice *device = NULL;
+	d3dkmt_handle host_device_handle = 0;
+	bool adapter_locked = false;
+
+	TRACE_FUNC_ENTER(__func__);
+
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	/* The call acquires reference on the adapter */
+	adapter = dxgprocess_adapter_by_handle(process, args.adapter);
+	if (adapter == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	device = dxgdevice_create(adapter, process);
+	if (device == NULL) {
+		ret = STATUS_NO_MEMORY;
+		goto cleanup;
+	}
+
+	ret = dxgadapter_acquire_lock_shared(adapter);
+	if (ret)
+		goto cleanup;
+
+	adapter_locked = true;
+
+	host_device_handle = dxgvmb_send_create_device(adapter, process, &args);
+	if (host_device_handle) {
+		ret =
+		    dxg_copy_to_user(&((struct d3dkmt_createdevice *)inargs)->
+				     device, &host_device_handle,
+				     sizeof(d3dkmt_handle));
+		if (ret)
+			goto cleanup;
+
+		hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL);
+		ret = hmgrtable_assign_handle(&process->handle_table, device,
+					      HMGRENTRY_TYPE_DXGDEVICE,
+					      host_device_handle);
+		if (!ret) {
+			device->handle = host_device_handle;
+			device->handle_valid = 1;
+			device->object_state = DXGOBJECTSTATE_ACTIVE;
+		}
+		hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL);
+	}
+
+cleanup:
+
+	if (ret) {
+		if (host_device_handle)
+			dxgvmb_send_destroy_device(adapter, process,
+						   host_device_handle);
+		if (device)
+			dxgdevice_destroy(device);
+	}
+
+	if (adapter_locked)
+		dxgadapter_release_lock_shared(adapter);
+
+	if (adapter)
+		dxgadapter_release_reference(adapter);
+
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static int dxgk_destroy_device(struct dxgprocess *process, void *__user inargs)
+{
+	struct d3dkmt_destroydevice args;
+	int ret = 0;
+	struct dxgadapter *adapter = NULL;
+	struct dxgdevice *device = NULL;
+
+	TRACE_FUNC_ENTER(__func__);
+
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL);
+	device = hmgrtable_get_object_by_type(&process->handle_table,
+					      HMGRENTRY_TYPE_DXGDEVICE,
+					      args.device);
+	if (device) {
+		hmgrtable_free_handle(&process->handle_table,
+				      HMGRENTRY_TYPE_DXGDEVICE, args.device);
+		device->handle_valid = 0;
+	}
+	hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL);
+
+	if (device == NULL) {
+		pr_err("invalid device handle: %x", args.device);
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	adapter = device->adapter;
+
+	dxgdevice_destroy(device);
+
+	if (dxgadapter_acquire_lock_shared(adapter) == 0) {
+		dxgvmb_send_destroy_device(adapter, process, args.device);
+		dxgadapter_release_lock_shared(adapter);
+	}
+
+cleanup:
+
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static int dxgk_create_context_virtual(struct dxgprocess *process,
+				       void *__user inargs)
+{
+	struct d3dkmt_createcontextvirtual args;
+	int ret = 0;
+	struct dxgadapter *adapter = NULL;
+	struct dxgdevice *device = NULL;
+	struct dxgcontext *context = NULL;
+	d3dkmt_handle host_context_handle = 0;
+	bool device_lock_acquired = false;
+
+	TRACE_FUNC_ENTER(__func__);
+
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	/*
+	 * The call acquires reference on the device. It is safe to access the
+	 * adapter, because the device holds reference on it.
+	 */
+	device = dxgprocess_device_by_handle(process, args.device);
+	if (device == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	ret = dxgdevice_acquire_lock_shared(device);
+	if (ret)
+		goto cleanup;
+
+	device_lock_acquired = true;
+
+	adapter = device->adapter;
+	ret = dxgadapter_acquire_lock_shared(adapter);
+	if (ret) {
+		adapter = NULL;
+		goto cleanup;
+	}
+
+	context = dxgcontext_create(device);
+	if (context == NULL) {
+		ret = STATUS_NO_MEMORY;
+		goto cleanup;
+	}
+
+	host_context_handle = dxgvmb_send_create_context(adapter,
+							 process, &args);
+	if (host_context_handle) {
+		hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL);
+		ret = hmgrtable_assign_handle(&process->handle_table, context,
+					      HMGRENTRY_TYPE_DXGCONTEXT,
+					      host_context_handle);
+		if (!ret)
+			context->handle = host_context_handle;
+		hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL);
+		if (ret)
+			goto cleanup;
+		ret =
+		    dxg_copy_to_user(&
+				     ((struct d3dkmt_createcontextvirtual *)
+				      inargs)->context, &host_context_handle,
+				     sizeof(d3dkmt_handle));
+	} else {
+		pr_err("invalid host handle");
+		ret = STATUS_INVALID_PARAMETER;
+	}
+
+cleanup:
+
+	if (ret) {
+		if (host_context_handle) {
+			dxgvmb_send_destroy_context(adapter, process,
+						    host_context_handle);
+		}
+		if (context)
+			dxgcontext_destroy_safe(process, context);
+	}
+
+	if (adapter)
+		dxgadapter_release_lock_shared(adapter);
+
+	if (device) {
+		if (device_lock_acquired)
+			dxgdevice_release_lock_shared(device);
+		dxgdevice_release_reference(device);
+	}
+
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static int dxgk_destroy_context(struct dxgprocess *process, void *__user inargs)
+{
+	struct d3dkmt_destroycontext args;
+	int ret = 0;
+	struct dxgadapter *adapter = NULL;
+	struct dxgcontext *context = NULL;
+	struct dxgdevice *device = NULL;
+	d3dkmt_handle device_handle = 0;
+
+	TRACE_FUNC_ENTER(__func__);
+
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL);
+	context = hmgrtable_get_object_by_type(&process->handle_table,
+					       HMGRENTRY_TYPE_DXGCONTEXT,
+					       args.context);
+	if (context) {
+		hmgrtable_free_handle(&process->handle_table,
+				      HMGRENTRY_TYPE_DXGCONTEXT, args.context);
+		context->handle = 0;
+		device_handle = context->device_handle;
+		context->object_state = DXGOBJECTSTATE_DESTROYED;
+	}
+	hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL);
+
+	if (context == NULL) {
+		pr_err("invalid context handle: %x", args.context);
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	/*
+	 * The call acquires reference on the device. It is safe to access the
+	 * adapter, because the device holds reference on it.
+	 */
+	device = dxgprocess_device_by_handle(process, device_handle);
+	if (device == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	adapter = device->adapter;
+	ret = dxgadapter_acquire_lock_shared(adapter);
+	if (ret) {
+		adapter = NULL;
+		goto cleanup;
+	}
+
+	ret = dxgvmb_send_destroy_context(adapter, process, args.context);
+
+	dxgcontext_destroy_safe(process, context);
+
+cleanup:
+
+	if (adapter)
+		dxgadapter_release_lock_shared(adapter);
+
+	if (device)
+		dxgdevice_release_reference(device);
+
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static int dxgk_create_hwcontext(struct dxgprocess *process,
+				 void *__user inargs)
+{
+	/* This is obsolete entry point */
+	return STATUS_NOT_SUPPORTED;
+}
+
+static int dxgk_destroy_hwcontext(struct dxgprocess *process,
+				  void *__user inargs)
+{
+	/* This is obsolete entry point */
+	return STATUS_NOT_SUPPORTED;
+}
+
+static int dxgk_create_hwqueue(struct dxgprocess *process, void *__user inargs)
+{
+	struct d3dkmt_createhwqueue args;
+	struct dxgdevice *device = NULL;
+	struct dxgcontext *context = NULL;
+	struct dxgadapter *adapter = NULL;
+	struct dxghwqueue *hwqueue = NULL;
+	int ret = 0;
+	bool device_lock_acquired = false;
+
+	TRACE_FUNC_ENTER(__func__);
+
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	/*
+	 * The call acquires reference on the device. It is safe to access the
+	 * adapter, because the device holds reference on it.
+	 */
+	device = dxgprocess_device_by_object_handle(process,
+						    HMGRENTRY_TYPE_DXGCONTEXT,
+						    args.context);
+	if (device == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	ret = dxgdevice_acquire_lock_shared(device);
+	if (ret)
+		goto cleanup;
+
+	device_lock_acquired = true;
+
+	hmgrtable_lock(&process->handle_table, DXGLOCK_SHARED);
+	context = hmgrtable_get_object_by_type(&process->handle_table,
+					       HMGRENTRY_TYPE_DXGCONTEXT,
+					       args.context);
+	hmgrtable_unlock(&process->handle_table, DXGLOCK_SHARED);
+
+	if (context == NULL) {
+		pr_err("Invalid context handle %x", args.context);
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	hwqueue = dxghwqueue_create(context);
+	if (hwqueue == NULL) {
+		ret = STATUS_NO_MEMORY;
+		goto cleanup;
+	}
+
+	adapter = device->adapter;
+	ret = dxgadapter_acquire_lock_shared(adapter);
+	if (ret) {
+		adapter = NULL;
+		goto cleanup;
+	}
+
+	ret = dxgvmb_send_create_hwqueue(process, &adapter->channel, &args,
+					 inargs, hwqueue);
+
+cleanup:
+
+	if (ret && hwqueue)
+		dxghwqueue_destroy(process, hwqueue);
+
+	if (adapter)
+		dxgadapter_release_lock_shared(adapter);
+
+	if (device_lock_acquired)
+		dxgdevice_release_lock_shared(device);
+
+	if (device)
+		dxgdevice_release_reference(device);
+
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static int dxgk_destroy_hwqueue(struct dxgprocess *process, void *__user inargs)
+{
+	struct d3dkmt_destroyhwqueue args;
+	int ret = 0;
+	struct dxgadapter *adapter = NULL;
+	struct dxgdevice *device = NULL;
+	struct dxghwqueue *hwqueue = NULL;
+	d3dkmt_handle device_handle = 0;
+
+	TRACE_FUNC_ENTER(__func__);
+
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL);
+	hwqueue = hmgrtable_get_object_by_type(&process->handle_table,
+					       HMGRENTRY_TYPE_DXGHWQUEUE,
+					       args.queue);
+	if (hwqueue) {
+		hmgrtable_free_handle(&process->handle_table,
+				      HMGRENTRY_TYPE_DXGHWQUEUE, args.queue);
+		hwqueue->handle = 0;
+		device_handle = hwqueue->device_handle;
+	}
+	hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL);
+
+	if (hwqueue == NULL) {
+		pr_err("invalid hwqueue handle: %x", args.queue);
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	/*
+	 * The call acquires reference on the device. It is safe to access the
+	 * adapter, because the device holds reference on it.
+	 */
+	device = dxgprocess_device_by_handle(process, device_handle);
+	if (device == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	adapter = device->adapter;
+	ret = dxgadapter_acquire_lock_shared(adapter);
+	if (ret) {
+		adapter = NULL;
+		goto cleanup;
+	}
+
+	ret = dxgvmb_send_destroy_hwqueue(process, &adapter->channel,
+					  args.queue);
+
+	dxghwqueue_destroy(process, hwqueue);
+
+cleanup:
+
+	if (adapter)
+		dxgadapter_release_lock_shared(adapter);
+
+	if (device)
+		dxgdevice_release_reference(device);
+
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static int dxgk_create_paging_queue(struct dxgprocess *process,
+				    void *__user inargs)
+{
+	struct d3dkmt_createpagingqueue args;
+	struct dxgdevice *device = NULL;
+	struct dxgadapter *adapter = NULL;
+	struct dxgpagingqueue *pqueue = NULL;
+	int ret = 0;
+	d3dkmt_handle host_handle = 0;
+	bool device_lock_acquired = false;
+
+	TRACE_FUNC_ENTER(__func__);
+
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	/*
+	 * The call acquires reference on the device. It is safe to access the
+	 * adapter, because the device holds reference on it.
+	 */
+	device = dxgprocess_device_by_handle(process, args.device);
+	if (device == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	ret = dxgdevice_acquire_lock_shared(device);
+	if (ret)
+		goto cleanup;
+
+	device_lock_acquired = true;
+	adapter = device->adapter;
+
+	ret = dxgadapter_acquire_lock_shared(adapter);
+	if (ret) {
+		adapter = NULL;
+		goto cleanup;
+	}
+
+	pqueue = dxgpagingqueue_create(device);
+	if (pqueue == NULL) {
+		ret = STATUS_NO_MEMORY;
+		goto cleanup;
+	}
+
+	ret = dxgvmb_send_create_paging_queue(process, &adapter->channel,
+					      device, &args, pqueue);
+	if (NT_SUCCESS(ret)) {
+		host_handle = args.paging_queue;
+
+		ret = dxg_copy_to_user(inargs, &args, sizeof(args));
+		if (ret)
+			goto cleanup;
+
+		hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL);
+		ret = hmgrtable_assign_handle(&process->handle_table, pqueue,
+					      HMGRENTRY_TYPE_DXGPAGINGQUEUE,
+					      host_handle);
+		if (NT_SUCCESS(ret)) {
+			pqueue->handle = host_handle;
+			ret = hmgrtable_assign_handle(&process->handle_table,
+						      NULL,
+						      HMGRENTRY_TYPE_MONITOREDFENCE,
+						      args.sync_object);
+			if (NT_SUCCESS(ret))
+				pqueue->syncobj_handle = args.sync_object;
+		}
+		hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL);
+		/* should not fail after this */
+	}
+
+cleanup:
+
+	if (ret) {
+		if (pqueue)
+			dxgpagingqueue_destroy(pqueue);
+		if (host_handle)
+			dxgvmb_send_destroy_paging_queue(process,
+							 &adapter->channel,
+							 host_handle);
+	}
+
+	if (adapter)
+		dxgadapter_release_lock_shared(adapter);
+
+	if (device) {
+		if (device_lock_acquired)
+			dxgdevice_release_lock_shared(device);
+		dxgdevice_release_reference(device);
+	}
+
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static int dxgk_destroy_paging_queue(struct dxgprocess *process,
+				     void *__user inargs)
+{
+	struct d3dddi_destroypagingqueue args;
+	struct dxgpagingqueue *paging_queue = NULL;
+	int ret = 0;
+	d3dkmt_handle device_handle = 0;
+	struct dxgdevice *device = NULL;
+	struct dxgadapter *adapter = NULL;
+
+	TRACE_FUNC_ENTER(__func__);
+
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL);
+	paging_queue = hmgrtable_get_object_by_type(&process->handle_table,
+						    HMGRENTRY_TYPE_DXGPAGINGQUEUE,
+						    args.paging_queue);
+	if (paging_queue) {
+		device_handle = paging_queue->device_handle;
+		hmgrtable_free_handle(&process->handle_table,
+				      HMGRENTRY_TYPE_DXGPAGINGQUEUE,
+				      args.paging_queue);
+		hmgrtable_free_handle(&process->handle_table,
+				      HMGRENTRY_TYPE_MONITOREDFENCE,
+				      paging_queue->syncobj_handle);
+		paging_queue->syncobj_handle = 0;
+		paging_queue->handle = 0;
+	}
+	hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL);
+
+	/*
+	 * The call acquires reference on the device. It is safe to access the
+	 * adapter, because the device holds reference on it.
+	 */
+	if (device_handle)
+		device = dxgprocess_device_by_handle(process, device_handle);
+	if (device == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	ret = dxgdevice_acquire_lock_shared(device);
+	if (ret) {
+		dxgdevice_release_reference(device);
+		device = NULL;
+		goto cleanup;
+	}
+
+	adapter = device->adapter;
+	ret = dxgadapter_acquire_lock_shared(adapter);
+	if (ret) {
+		adapter = NULL;
+		goto cleanup;
+	}
+
+	ret = dxgvmb_send_destroy_paging_queue(process, &adapter->channel,
+					       args.paging_queue);
+
+	dxgpagingqueue_destroy(paging_queue);
+
+cleanup:
+
+	if (adapter)
+		dxgadapter_release_lock_shared(adapter);
+
+	if (device) {
+		dxgdevice_release_lock_shared(device);
+		dxgdevice_release_reference(device);
+	}
+
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static int get_standard_alloc_priv_data(struct dxgdevice *device,
+					struct d3dkmt_createstandardallocation
+					*alloc_info,
+					uint *standard_alloc_priv_data_size,
+					void **standard_alloc_priv_data)
+{
+	int ret = 0;
+	struct d3dkmdt_gdisurfacedata gdi_data = { };
+	uint priv_data_size = 0;
+	void *priv_data = NULL;
+
+	TRACE_DEBUG(1, "%s", __func__);
+
+	gdi_data.type = D3DKMDT_GDISURFACE_TEXTURE_CROSSADAPTER;
+	gdi_data.width = alloc_info->existing_heap_data.size;
+	gdi_data.height = 1;
+	gdi_data.format = D3DDDIFMT_UNKNOWN;
+
+	*standard_alloc_priv_data_size = 0;
+	ret = dxgvmb_send_get_standard_alloc_priv_data(device,
+						       D3DKMDT_STANDARDALLOCATION_GDISURFACE,
+						       &gdi_data, 0,
+						       &priv_data_size, NULL);
+	if (ret)
+		goto cleanup;
+	TRACE_DEBUG(1, "Priv data size: %d", priv_data_size);
+	if (priv_data_size == 0)
+		goto cleanup;
+	priv_data = dxgmem_alloc(device->process, DXGMEM_TMP, priv_data_size);
+	if (priv_data == NULL) {
+		ret = STATUS_NO_MEMORY;
+		pr_err("failed to allocate memory for priv data: %d",
+			   priv_data_size);
+		goto cleanup;
+	}
+	ret = dxgvmb_send_get_standard_alloc_priv_data(device,
+						       D3DKMDT_STANDARDALLOCATION_GDISURFACE,
+						       &gdi_data, 0,
+						       &priv_data_size,
+						       priv_data);
+	if (ret)
+		goto cleanup;
+	*standard_alloc_priv_data_size = priv_data_size;
+	*standard_alloc_priv_data = priv_data;
+	priv_data = NULL;
+
+cleanup:
+	if (priv_data)
+		dxgmem_free(device->process, DXGMEM_TMP, priv_data);
+	TRACE_FUNC_EXIT_ERR(__func__, ret);
+	return ret;
+}
+
+static int dxgk_create_allocation(struct dxgprocess *process,
+				  void *__user inargs)
+{
+	struct d3dkmt_createallocation args;
+	int ret = 0;
+	struct dxgadapter *adapter = NULL;
+	struct dxgdevice *device = NULL;
+	struct d3dddi_allocationinfo2 *alloc_info = NULL;
+	struct d3dkmt_createstandardallocation standard_alloc;
+	uint alloc_info_size = 0;
+	struct dxgresource *resource = NULL;
+	struct dxgallocation **dxgalloc = NULL;
+	struct dxgsharedresource *shared_resource = NULL;
+	bool resource_mutex_acquired = false;
+	uint standard_alloc_priv_data_size = 0;
+	void *standard_alloc_priv_data = NULL;
+	int i;
+
+	TRACE_FUNC_ENTER(__func__);
+
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	if (args.alloc_count > D3DKMT_CREATEALLOCATION_MAX ||
+	    args.alloc_count == 0) {
+		pr_err("invalid number of allocations to create");
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	alloc_info_size = sizeof(struct d3dddi_allocationinfo2) *
+	    args.alloc_count;
+	alloc_info = dxgmem_alloc(process, DXGMEM_TMP, alloc_info_size);
+	if (alloc_info == NULL) {
+		ret = STATUS_NO_MEMORY;
+		goto cleanup;
+	}
+	ret = dxg_copy_from_user(alloc_info, args.allocation_info,
+				 alloc_info_size);
+	if (ret)
+		goto cleanup;
+
+	for (i = 0; i < args.alloc_count; i++) {
+		if (args.flags.standard_allocation) {
+			if (alloc_info[i].priv_drv_data_size != 0) {
+				pr_err("private data size is not zero");
+				ret = STATUS_INVALID_PARAMETER;
+				goto cleanup;
+			}
+		}
+		if (alloc_info[i].priv_drv_data_size >=
+		    DXG_MAX_VM_BUS_PACKET_SIZE) {
+			pr_err("private data size is too big: %d %d %ld",
+				   i, alloc_info[i].priv_drv_data_size,
+				   sizeof(alloc_info[0]));
+			ret = STATUS_INVALID_PARAMETER;
+			goto cleanup;
+		}
+	}
+
+	if (args.flags.existing_section || args.flags.create_protected) {
+		pr_err("invalid allocation flags");
+		goto cleanup;
+	}
+
+	if (args.flags.standard_allocation) {
+		if (args.standard_allocation == NULL) {
+			pr_err("invalid standard allocation");
+			ret = STATUS_INVALID_PARAMETER;
+			goto cleanup;
+		}
+		ret = dxg_copy_from_user(&standard_alloc,
+					 args.standard_allocation,
+					 sizeof(standard_alloc));
+		if (ret)
+			goto cleanup;
+		if (alloc_info[0].sysmem == NULL ||
+		    args.priv_drv_data_size != 0 ||
+		    args.alloc_count != 1 ||
+		    standard_alloc.type !=
+		    D3DKMT_STANDARDALLOCATIONTYPE_EXISTINGHEAP ||
+		    standard_alloc.existing_heap_data.size == 0 ||
+		    standard_alloc.existing_heap_data.size & (PAGE_SIZE - 1) ||
+		    (unsigned long)alloc_info[0].sysmem & (PAGE_SIZE - 1)) {
+			pr_err("invalid standard allocation");
+			ret = STATUS_INVALID_PARAMETER;
+			goto cleanup;
+		}
+		args.priv_drv_data_size =
+		    sizeof(struct d3dkmt_createstandardallocation);
+	}
+
+	if (args.flags.create_shared && !args.flags.create_resource) {
+		pr_err("create_resource must be set for create_shared");
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	/*
+	 * The call acquires reference on the device. It is safe to access the
+	 * adapter, because the device holds reference on it.
+	 */
+	device = dxgprocess_device_by_handle(process, args.device);
+	if (device == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	ret = dxgdevice_acquire_lock_shared(device);
+	if (ret) {
+		dxgdevice_release_reference(device);
+		device = NULL;
+		goto cleanup;
+	}
+
+	adapter = device->adapter;
+	ret = dxgadapter_acquire_lock_shared(adapter);
+	if (ret) {
+		adapter = NULL;
+		goto cleanup;
+	}
+
+	if (args.flags.standard_allocation) {
+		ret = get_standard_alloc_priv_data(device,
+						   &standard_alloc,
+						   &standard_alloc_priv_data_size,
+						   &standard_alloc_priv_data);
+		if (ret)
+			goto cleanup;
+		TRACE_DEBUG(1, "Alloc private data: %d",
+			    standard_alloc_priv_data_size);
+	}
+
+	if (args.flags.create_resource) {
+		resource = dxgresource_create(device);
+		if (resource == NULL) {
+			ret = STATUS_NO_MEMORY;
+			goto cleanup;
+		}
+		resource->private_runtime_handle =
+		    args.private_runtime_resource_handle;
+		if (args.flags.create_shared) {
+			shared_resource = dxgsharedresource_create(adapter);
+			if (shared_resource == NULL) {
+				ret = STATUS_NO_MEMORY;
+				goto cleanup;
+			}
+			shared_resource->runtime_private_data_size =
+			    args.priv_drv_data_size;
+			shared_resource->resource_private_data_size =
+			    args.priv_drv_data_size;
+			if (args.flags.nt_security_sharing)
+				shared_resource->nt_security = 1;
+
+			shared_resource->runtime_private_data_size =
+			    args.private_runtime_data_size;
+			shared_resource->resource_private_data_size =
+			    args.priv_drv_data_size;
+			dxgsharedresource_add_resource(shared_resource,
+						       resource);
+			if (args.private_runtime_data_size) {
+				shared_resource->runtime_private_data =
+				    dxgmem_alloc(NULL,
+						 DXGMEM_RUNTIMEPRIVATE,
+						 args.
+						 private_runtime_data_size);
+				if (shared_resource->runtime_private_data ==
+				    NULL) {
+					ret = STATUS_NO_MEMORY;
+					goto cleanup;
+				}
+				ret =
+				    dxg_copy_from_user(shared_resource->
+						       runtime_private_data,
+						       args.
+						       private_runtime_data,
+						       args.
+						       private_runtime_data_size);
+				if (ret)
+					goto cleanup;
+			}
+			if (args.priv_drv_data_size) {
+				shared_resource->resource_private_data =
+				    dxgmem_alloc(NULL,
+						 DXGMEM_RESOURCEPRIVATE,
+						 args.priv_drv_data_size);
+				if (shared_resource->resource_private_data ==
+				    NULL) {
+					ret = STATUS_NO_MEMORY;
+					goto cleanup;
+				}
+				ret =
+				    dxg_copy_from_user(shared_resource->
+						       resource_private_data,
+						       args.priv_drv_data,
+						       args.priv_drv_data_size);
+				if (ret)
+					goto cleanup;
+			}
+		}
+	} else {
+		if (args.resource) {
+			/* Adding new allocations to the given resource */
+
+			dxgprocess_ht_lock_shared_down(process);
+			resource =
+			    hmgrtable_get_object_by_type(&process->handle_table,
+							 HMGRENTRY_TYPE_DXGRESOURCE,
+							 args.resource);
+			dxgresource_acquire_reference(resource);
+			dxgprocess_ht_lock_shared_up(process);
+
+			if (resource == NULL || resource->device != device) {
+				pr_err("invalid resource handle %x",
+					   args.resource);
+				ret = STATUS_INVALID_PARAMETER;
+				goto cleanup;
+			}
+			if (resource->shared_owner &&
+			    resource->shared_owner->sealed) {
+				pr_err("Resource is sealed");
+				ret = STATUS_INVALID_PARAMETER;
+				goto cleanup;
+			}
+			/* Synchronize with resource destruction */
+			dxgmutex_lock(&resource->resource_mutex);
+			if (!dxgresource_is_active(resource)) {
+				dxgmutex_unlock(&resource->resource_mutex);
+				ret = STATUS_INVALID_PARAMETER;
+				goto cleanup;
+			}
+			resource_mutex_acquired = true;
+		}
+	}
+
+	dxgalloc = dxgmem_alloc(process, DXGMEM_TMP,
+				sizeof(struct dxgallocation *) *
+				args.alloc_count);
+	if (dxgalloc == NULL) {
+		ret = STATUS_NO_MEMORY;
+		goto cleanup;
+	}
+
+	for (i = 0; i < args.alloc_count; i++) {
+		struct dxgallocation *alloc;
+		uint priv_data_size = alloc_info[i].priv_drv_data_size;
+
+		if (alloc_info[i].sysmem && !args.flags.standard_allocation) {
+			if ((unsigned long)
+			    alloc_info[i].sysmem & (PAGE_SIZE - 1)) {
+				pr_err("invalid sysmem alloc %d, %p",
+					   i, alloc_info[i].sysmem);
+				ret = STATUS_INVALID_PARAMETER;
+				goto cleanup;
+			}
+		}
+		if ((alloc_info[0].sysmem == NULL) !=
+		    (alloc_info[i].sysmem == NULL)) {
+			pr_err("All allocations must have sysmem pointer");
+			ret = STATUS_INVALID_PARAMETER;
+			goto cleanup;
+		}
+
+		dxgalloc[i] = dxgallocation_create(process);
+		if (dxgalloc[i] == NULL) {
+			ret = STATUS_NO_MEMORY;
+			goto cleanup;
+		}
+		alloc = dxgalloc[i];
+
+		if (resource)
+			dxgresource_add_alloc(resource, alloc);
+		else
+			dxgdevice_add_alloc(device, alloc);
+		if (args.flags.create_shared) {
+			/* Remember alloc private data to use it during open */
+			alloc->priv_drv_data = dxgmem_alloc(process,
+							    DXGMEM_ALLOCPRIVATE,
+							    priv_data_size +
+							    offsetof(struct
+								     privdata,
+								     data) - 1);
+			if (alloc->priv_drv_data == NULL) {
+				ret = STATUS_NO_MEMORY;
+				goto cleanup;
+			}
+			if (args.flags.standard_allocation) {
+				memcpy(alloc->priv_drv_data->data,
+				       standard_alloc_priv_data,
+				       standard_alloc_priv_data_size);
+				alloc->priv_drv_data->data_size =
+				    standard_alloc_priv_data_size;
+			} else {
+				ret =
+				    dxg_copy_from_user(alloc->priv_drv_data->
+						       data,
+						       alloc_info[i].
+						       priv_drv_data,
+						       priv_data_size);
+				if (ret)
+					goto cleanup;
+				alloc->priv_drv_data->data_size =
+				    priv_data_size;
+			}
+		}
+	}
+
+	ret = dxgvmb_send_create_allocation(process, device, &args, inargs,
+					    resource, dxgalloc, alloc_info,
+					    &standard_alloc);
+	if (ret)
+		goto cleanup;
+
+cleanup:
+
+	if (resource_mutex_acquired) {
+		dxgmutex_unlock(&resource->resource_mutex);
+		dxgresource_release_reference(resource);
+	}
+	if (ret) {
+		if (dxgalloc) {
+			for (i = 0; i < args.alloc_count; i++) {
+				if (dxgalloc[i])
+					dxgallocation_destroy(dxgalloc[i]);
+			}
+		}
+		if (resource && args.flags.create_resource) {
+			if (shared_resource) {
+				dxgsharedresource_remove_resource
+				    (shared_resource, resource);
+			}
+			dxgresource_destroy(resource);
+		}
+	}
+	if (shared_resource)
+		dxgsharedresource_release_reference(shared_resource);
+	if (dxgalloc)
+		dxgmem_free(process, DXGMEM_TMP, dxgalloc);
+	if (standard_alloc_priv_data)
+		dxgmem_free(process, DXGMEM_TMP, standard_alloc_priv_data);
+	if (alloc_info)
+		dxgmem_free(process, DXGMEM_TMP, alloc_info);
+
+	if (adapter)
+		dxgadapter_release_lock_shared(adapter);
+
+	if (device) {
+		dxgdevice_release_lock_shared(device);
+		dxgdevice_release_reference(device);
+	}
+
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+int validate_alloc(struct dxgallocation *alloc0,
+		   struct dxgallocation *alloc,
+		   struct dxgdevice *device, d3dkmt_handle alloc_handle)
+{
+	uint fail_reason;
+
+	if (alloc == NULL) {
+		fail_reason = 1;
+		goto cleanup;
+	}
+	if (alloc->resource_owner != alloc0->resource_owner) {
+		fail_reason = 2;
+		goto cleanup;
+	}
+	if (alloc->resource_owner) {
+		if (alloc->owner.resource != alloc0->owner.resource) {
+			fail_reason = 3;
+			goto cleanup;
+		}
+		if (alloc->owner.resource->device != device) {
+			fail_reason = 4;
+			goto cleanup;
+		}
+		if (alloc->owner.resource->shared_owner) {
+			fail_reason = 5;
+			goto cleanup;
+		}
+	} else {
+		if (alloc->owner.device != device) {
+			fail_reason = 6;
+			goto cleanup;
+		}
+	}
+	return 0;
+cleanup:
+	pr_err("Alloc validation failed: reason: %d %x",
+		   fail_reason, alloc_handle);
+	return STATUS_INVALID_PARAMETER;
+}
+
+static int dxgk_destroy_allocation(struct dxgprocess *process,
+				   void *__user inargs)
+{
+	struct d3dkmt_destroyallocation2 args;
+	struct dxgdevice *device = NULL;
+	struct dxgadapter *adapter = NULL;
+	int ret = 0;
+	d3dkmt_handle *alloc_handles = NULL;
+	struct dxgallocation **allocs = NULL;
+	struct dxgresource *resource = NULL;
+	int i;
+
+	TRACE_FUNC_ENTER(__func__);
+
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	if (args.alloc_count > D3DKMT_CREATEALLOCATION_MAX ||
+	    ((args.alloc_count == 0) == (args.resource == 0))) {
+		pr_err("invalid number of allocations");
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	if (args.alloc_count) {
+		uint handle_size = sizeof(d3dkmt_handle) * args.alloc_count;
+
+		alloc_handles = dxgmem_alloc(process, DXGMEM_TMP, handle_size);
+		if (alloc_handles == NULL) {
+			ret = STATUS_NO_MEMORY;
+			goto cleanup;
+		}
+		allocs = dxgmem_alloc(process, DXGMEM_TMP,
+				      sizeof(struct dxgallocation *) *
+				      args.alloc_count);
+		if (allocs == NULL) {
+			ret = STATUS_NO_MEMORY;
+			goto cleanup;
+		}
+		ret = dxg_copy_from_user(alloc_handles, args.allocations,
+					 handle_size);
+		if (ret)
+			goto cleanup;
+	}
+
+	/*
+	 * The call acquires reference on the device. It is safe to access the
+	 * adapter, because the device holds reference on it.
+	 */
+	device = dxgprocess_device_by_handle(process, args.device);
+	if (device == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	/* Acquire the device lock to synchronize with the device destriction */
+	ret = dxgdevice_acquire_lock_shared(device);
+	if (ret) {
+		dxgdevice_release_reference(device);
+		device = NULL;
+		goto cleanup;
+	}
+
+	adapter = device->adapter;
+	ret = dxgadapter_acquire_lock_shared(adapter);
+	if (ret) {
+		adapter = NULL;
+		goto cleanup;
+	}
+
+	/*
+	 * Destroy the local allocation handles first. If the host handle
+	 * is destroyed first, another object could be assigned to the process
+	 * table at he same place as the allocation handle and it will fail.
+	 */
+	if (args.alloc_count) {
+		dxgprocess_ht_lock_exclusive_down(process);
+		for (i = 0; i < args.alloc_count; i++) {
+			allocs[i] =
+			    hmgrtable_get_object_by_type(&process->handle_table,
+							 HMGRENTRY_TYPE_DXGALLOCATION,
+							 alloc_handles[i]);
+			ret =
+			    validate_alloc(allocs[0], allocs[i], device,
+					   alloc_handles[i]);
+			if (ret) {
+				dxgprocess_ht_lock_exclusive_up(process);
+				goto cleanup;
+			}
+		}
+		dxgprocess_ht_lock_exclusive_up(process);
+		for (i = 0; i < args.alloc_count; i++)
+			dxgallocation_free_handle(allocs[i]);
+	} else {
+		struct dxgallocation *alloc;
+
+		dxgprocess_ht_lock_exclusive_down(process);
+		resource = hmgrtable_get_object_by_type(&process->handle_table,
+							HMGRENTRY_TYPE_DXGRESOURCE,
+							args.resource);
+		if (resource == NULL) {
+			pr_err("Invalid resource handle: %x",
+				   args.resource);
+			ret = STATUS_INVALID_PARAMETER;
+		} else if (resource->device != device) {
+			pr_err("Resource belongs to wrong device: %x",
+				   args.resource);
+			ret = STATUS_INVALID_PARAMETER;
+		} else {
+			hmgrtable_free_handle(&process->handle_table,
+					      HMGRENTRY_TYPE_DXGRESOURCE,
+					      args.resource);
+			resource->object_state = DXGOBJECTSTATE_DESTROYED;
+			resource->handle = 0;
+			resource->handle_valid = 0;
+		}
+		dxgprocess_ht_lock_exclusive_up(process);
+
+		if (ret)
+			goto cleanup;
+
+		dxgdevice_acquire_alloc_list_lock_shared(device);
+		list_for_each_entry(alloc, &resource->alloc_list_head,
+				    alloc_list_entry) {
+			dxgallocation_free_handle(alloc);
+		}
+		dxgdevice_release_alloc_list_lock_shared(device);
+	}
+
+	if (args.alloc_count && allocs[0]->resource_owner)
+		resource = allocs[0]->owner.resource;
+
+	if (resource) {
+		dxgresource_acquire_reference(resource);
+		dxgmutex_lock(&resource->resource_mutex);
+	}
+
+	ret = dxgvmb_send_destroy_allocation(process, device, &adapter->channel,
+					     &args, alloc_handles);
+
+	/*
+	 * Destroy the allocations after the host destroyed it.
+	 * The allocation gpadl teardown will wait until the host unmaps its
+	 * gpadl.
+	 */
+	dxgdevice_acquire_alloc_list_lock(device);
+	if (args.alloc_count) {
+		for (i = 0; i < args.alloc_count; i++) {
+			if (allocs[i]) {
+				allocs[i]->alloc_handle = 0;
+				dxgallocation_destroy(allocs[i]);
+			}
+		}
+	} else {
+		dxgresource_destroy(resource);
+	}
+	dxgdevice_release_alloc_list_lock(device);
+
+	if (resource) {
+		dxgmutex_unlock(&resource->resource_mutex);
+		dxgresource_release_reference(resource);
+	}
+
+cleanup:
+
+	if (adapter)
+		dxgadapter_release_lock_shared(adapter);
+
+	if (device) {
+		dxgdevice_release_lock_shared(device);
+		dxgdevice_release_reference(device);
+	}
+
+	if (alloc_handles)
+		dxgmem_free(process, DXGMEM_TMP, alloc_handles);
+
+	if (allocs)
+		dxgmem_free(process, DXGMEM_TMP, allocs);
+
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static int dxgk_make_resident(struct dxgprocess *process, void *__user inargs)
+{
+	int ret = 0, ret2;
+	struct d3dddi_makeresident args;
+	struct d3dddi_makeresident *input = inargs;
+	struct dxgdevice *device = NULL;
+	struct dxgadapter *adapter = NULL;
+
+	TRACE_FUNC_ENTER(__func__);
+
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	if (args.alloc_count > D3DKMT_CREATEALLOCATION_MAX ||
+	    args.alloc_count == 0) {
+		pr_err("invalid number of allocations");
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+	if (args.paging_queue == 0) {
+		pr_err("paging queue is missing");
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	device = dxgprocess_device_by_object_handle(process,
+						    HMGRENTRY_TYPE_DXGPAGINGQUEUE,
+						    args.paging_queue);
+	if (device == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	adapter = device->adapter;
+	ret = dxgadapter_acquire_lock_shared(adapter);
+	if (ret) {
+		adapter = NULL;
+		goto cleanup;
+	}
+
+	ret = dxgvmb_send_make_resident(process, NULL, &adapter->channel,
+					&args);
+	if (ret && ret != STATUS_PENDING)
+		goto cleanup;
+
+	ret2 = dxg_copy_to_user(&input->paging_fence_value,
+				&args.paging_fence_value, sizeof(uint64_t));
+	if (ret2) {
+		ret = ret2;
+		goto cleanup;
+	}
+
+	ret2 = dxg_copy_to_user(&input->num_bytes_to_trim,
+				&args.num_bytes_to_trim, sizeof(uint64_t));
+	if (ret2) {
+		ret = ret2;
+		goto cleanup;
+	}
+
+cleanup:
+
+	if (adapter)
+		dxgadapter_release_lock_shared(adapter);
+	if (device)
+		dxgdevice_release_reference(device);
+
+	TRACE_FUNC_EXIT(__func__, ret);
+
+	return ret;
+}
+
+static int dxgk_evict(struct dxgprocess *process, void *__user inargs)
+{
+	int ret = 0;
+	struct d3dkmt_evict args;
+	struct d3dkmt_evict *input = inargs;
+	struct dxgdevice *device = NULL;
+	struct dxgadapter *adapter = NULL;
+
+	TRACE_FUNC_ENTER(__func__);
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	if (args.alloc_count > D3DKMT_CREATEALLOCATION_MAX ||
+	    args.alloc_count == 0) {
+		pr_err("invalid number of allocations");
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	device = dxgprocess_device_by_handle(process, args.device);
+	if (device == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	adapter = device->adapter;
+	ret = dxgadapter_acquire_lock_shared(adapter);
+	if (ret) {
+		adapter = NULL;
+		goto cleanup;
+	}
+
+	ret = dxgvmb_send_evict(process, &adapter->channel, &args);
+	if (ret)
+		goto cleanup;
+
+	ret = dxg_copy_to_user(&input->num_bytes_to_trim,
+			       &args.num_bytes_to_trim, sizeof(uint64_t));
+	if (ret)
+		goto cleanup;
+
+cleanup:
+
+	if (adapter)
+		dxgadapter_release_lock_shared(adapter);
+	if (device)
+		dxgdevice_release_reference(device);
+
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static int dxgk_offer_allocations(struct dxgprocess *process,
+				  void *__user inargs)
+{
+	int ret = 0;
+	struct d3dkmt_offerallocations args;
+	struct dxgdevice *device = NULL;
+	struct dxgadapter *adapter = NULL;
+
+	TRACE_FUNC_ENTER(__func__);
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	if (args.allocation_count > D3DKMT_CREATEALLOCATION_MAX ||
+	    args.allocation_count == 0) {
+		pr_err("invalid number of allocations");
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	if ((args.resources == NULL) == (args.allocations == NULL)) {
+		pr_err("invalid pointer to resources/allocations");
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	device = dxgprocess_device_by_handle(process, args.device);
+	if (device == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	adapter = device->adapter;
+	ret = dxgadapter_acquire_lock_shared(adapter);
+	if (ret) {
+		adapter = NULL;
+		goto cleanup;
+	}
+
+	ret = dxgvmb_send_offer_allocations(process, &adapter->channel, &args);
+
+cleanup:
+
+	if (adapter)
+		dxgadapter_release_lock_shared(adapter);
+	if (device)
+		dxgdevice_release_reference(device);
+
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static int dxgk_reclaim_allocations(struct dxgprocess *process,
+				    void *__user inargs)
+{
+	int ret = 0;
+	struct d3dkmt_reclaimallocations2 args;
+	struct dxgdevice *device = NULL;
+	struct dxgadapter *adapter = NULL;
+
+	TRACE_FUNC_ENTER(__func__);
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	if (args.allocation_count > D3DKMT_CREATEALLOCATION_MAX ||
+	    args.allocation_count == 0) {
+		pr_err("invalid number of allocations");
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	if ((args.resources == NULL) == (args.allocations == NULL)) {
+		pr_err("invalid pointer to resources/allocations");
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	device = dxgprocess_device_by_object_handle(process,
+						    HMGRENTRY_TYPE_DXGPAGINGQUEUE,
+						    args.paging_queue);
+	if (device == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	adapter = device->adapter;
+	ret = dxgadapter_acquire_lock_shared(adapter);
+	if (ret) {
+		adapter = NULL;
+		goto cleanup;
+	}
+
+	ret = dxgvmb_send_reclaim_allocations(process, &adapter->channel,
+					      device->handle, &args,
+					      &((struct
+						 d3dkmt_reclaimallocations2 *)
+						inargs)->paging_fence_value);
+
+cleanup:
+
+	if (adapter)
+		dxgadapter_release_lock_shared(adapter);
+	if (device)
+		dxgdevice_release_reference(device);
+
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static int dxgk_submit_command(struct dxgprocess *process, void *__user inargs)
+{
+	int ret = 0;
+	struct d3dkmt_submitcommand args;
+	struct dxgdevice *device = NULL;
+	struct dxgadapter *adapter = NULL;
+
+	TRACE_FUNC_ENTER(__func__);
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	if (args.broadcast_context_count > D3DDDI_MAX_BROADCAST_CONTEXT ||
+	    args.broadcast_context_count == 0) {
+		pr_err("invalid number of contexts");
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	if (args.priv_drv_data_size > DXG_MAX_VM_BUS_PACKET_SIZE) {
+		pr_err("invalid private data size");
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	if (args.num_history_buffers > 1024) {
+		pr_err("invalid number of history buffers");
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	if (args.num_primaries > DXG_MAX_VM_BUS_PACKET_SIZE) {
+		pr_err("invalid number of primaries");
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	device = dxgprocess_device_by_object_handle(process,
+						    HMGRENTRY_TYPE_DXGCONTEXT,
+						    args.broadcast_context[0]);
+	if (device == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	adapter = device->adapter;
+	ret = dxgadapter_acquire_lock_shared(adapter);
+	if (ret) {
+		adapter = NULL;
+		goto cleanup;
+	}
+
+	ret = dxgvmb_send_submit_command(process, &adapter->channel, &args);
+
+cleanup:
+
+	if (adapter)
+		dxgadapter_release_lock_shared(adapter);
+	if (device)
+		dxgdevice_release_reference(device);
+
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static int dxgk_submit_command_to_hwqueue(struct dxgprocess *process,
+					  void *__user inargs)
+{
+	int ret = 0;
+	struct d3dkmt_submitcommandtohwqueue args;
+	struct dxgdevice *device = NULL;
+	struct dxgadapter *adapter = NULL;
+
+	TRACE_FUNC_ENTER(__func__);
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	if (args.priv_drv_data_size > DXG_MAX_VM_BUS_PACKET_SIZE) {
+		pr_err("invalid private data size");
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	if (args.num_primaries > DXG_MAX_VM_BUS_PACKET_SIZE) {
+		pr_err("invalid number of primaries");
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	device = dxgprocess_device_by_object_handle(process,
+						    HMGRENTRY_TYPE_DXGHWQUEUE,
+						    args.hwqueue);
+	if (device == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	adapter = device->adapter;
+	ret = dxgadapter_acquire_lock_shared(adapter);
+	if (ret) {
+		adapter = NULL;
+		goto cleanup;
+	}
+
+	ret = dxgvmb_send_submit_command_to_hwqueue(process, &adapter->channel,
+						    &args);
+
+cleanup:
+
+	if (adapter)
+		dxgadapter_release_lock_shared(adapter);
+	if (device)
+		dxgdevice_release_reference(device);
+
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static int dxgk_submit_signal_to_hwqueue(struct dxgprocess *process,
+					 void *__user inargs)
+{
+	int ret = 0;
+	struct d3dkmt_submitsignalsyncobjectstohwqueue args;
+	struct dxgdevice *device = NULL;
+	struct dxgadapter *adapter = NULL;
+	d3dkmt_handle hwqueue;
+
+	TRACE_FUNC_ENTER(__func__);
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	if (args.hwqueue_count > D3DDDI_MAX_BROADCAST_CONTEXT ||
+	    args.hwqueue_count == 0) {
+		pr_err("invalid hwqueue count");
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	if (args.object_count > D3DDDI_MAX_OBJECT_SIGNALED ||
+	    args.object_count == 0) {
+		pr_err("invalid number of syn cobject");
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	ret = dxg_copy_from_user(&hwqueue, args.hwqueues,
+				 sizeof(d3dkmt_handle));
+	if (ret)
+		goto cleanup;
+
+	device = dxgprocess_device_by_object_handle(process,
+						    HMGRENTRY_TYPE_DXGHWQUEUE,
+						    hwqueue);
+	if (device == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	adapter = device->adapter;
+	ret = dxgadapter_acquire_lock_shared(adapter);
+	if (ret) {
+		adapter = NULL;
+		goto cleanup;
+	}
+
+	ret = dxgvmb_send_signal_sync_object(process, &adapter->channel,
+					     args.flags, 0, 0,
+					     args.object_count, args.objects,
+					     args.hwqueue_count, args.hwqueues,
+					     args.object_count,
+					     args.fence_values, NULL, 0);
+
+cleanup:
+
+	if (adapter)
+		dxgadapter_release_lock_shared(adapter);
+	if (device)
+		dxgdevice_release_reference(device);
+
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static int dxgk_submit_wait_to_hwqueue(struct dxgprocess *process,
+				       void *__user inargs)
+{
+	struct d3dkmt_submitwaitforsyncobjectstohwqueue args;
+	struct dxgdevice *device = NULL;
+	struct dxgadapter *adapter = NULL;
+	int ret = 0;
+	d3dkmt_handle *objects = NULL;
+	uint object_size;
+	uint64_t *fences = NULL;
+
+	TRACE_FUNC_ENTER(__func__);
+
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	if (args.object_count > D3DDDI_MAX_OBJECT_WAITED_ON ||
+	    args.object_count == 0) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	object_size = sizeof(d3dkmt_handle) * args.object_count;
+	objects = dxgmem_alloc(process, DXGMEM_TMP, object_size);
+	if (objects == NULL) {
+		ret = STATUS_NO_MEMORY;
+		goto cleanup;
+	}
+	ret = dxg_copy_from_user(objects, args.objects, object_size);
+	if (ret)
+		goto cleanup;
+
+	object_size = sizeof(uint64_t) * args.object_count;
+	fences = dxgmem_alloc(process, DXGMEM_TMP, object_size);
+	if (fences == NULL) {
+		ret = STATUS_NO_MEMORY;
+		goto cleanup;
+	}
+	ret = dxg_copy_from_user(fences, args.fence_values, object_size);
+	if (ret)
+		goto cleanup;
+
+	device = dxgprocess_device_by_object_handle(process,
+						    HMGRENTRY_TYPE_DXGHWQUEUE,
+						    args.hwqueue);
+	if (device == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	adapter = device->adapter;
+	ret = dxgadapter_acquire_lock_shared(adapter);
+	if (ret) {
+		adapter = NULL;
+		goto cleanup;
+	}
+
+	ret = dxgvmb_send_wait_sync_object_gpu(process, &adapter->channel,
+					       args.hwqueue, args.object_count,
+					       objects, fences, false);
+
+cleanup:
+
+	if (objects)
+		dxgmem_free(process, DXGMEM_TMP, objects);
+	if (fences)
+		dxgmem_free(process, DXGMEM_TMP, fences);
+	if (adapter)
+		dxgadapter_release_lock_shared(adapter);
+	if (device)
+		dxgdevice_release_reference(device);
+
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static int dxgk_map_gpu_va(struct dxgprocess *process, void *__user inargs)
+{
+	int ret = 0, ret2;
+	struct d3dddi_mapgpuvirtualaddress args;
+	struct d3dddi_mapgpuvirtualaddress *input = inargs;
+	struct dxgdevice *device = NULL;
+	struct dxgadapter *adapter = NULL;
+
+	TRACE_FUNC_ENTER(__func__);
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	device = dxgprocess_device_by_object_handle(process,
+						    HMGRENTRY_TYPE_DXGPAGINGQUEUE,
+						    args.paging_queue);
+	if (device == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	adapter = device->adapter;
+	ret = dxgadapter_acquire_lock_shared(adapter);
+	if (ret) {
+		adapter = NULL;
+		goto cleanup;
+	}
+
+	ret = dxgvmb_send_map_gpu_va(process, 0, &adapter->channel, &args);
+	if (ret && ret != (int)STATUS_PENDING)
+		goto cleanup;
+
+	ret2 = dxg_copy_to_user(&input->paging_fence_value,
+				&args.paging_fence_value, sizeof(uint64_t));
+	if (ret2) {
+		ret = ret2;
+		goto cleanup;
+	}
+
+	ret2 = dxg_copy_to_user(&input->virtual_address, &args.virtual_address,
+				sizeof(args.virtual_address));
+	if (ret2) {
+		ret = ret2;
+		goto cleanup;
+	}
+
+cleanup:
+
+	if (adapter)
+		dxgadapter_release_lock_shared(adapter);
+	if (device)
+		dxgdevice_release_reference(device);
+
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static int dxgk_reserve_gpu_va(struct dxgprocess *process, void *__user inargs)
+{
+	int ret = 0;
+	struct d3dddi_reservegpuvirtualaddress args;
+	struct d3dddi_reservegpuvirtualaddress *input = inargs;
+	struct dxgadapter *adapter = NULL;
+	struct dxgdevice *device = NULL;
+
+	TRACE_FUNC_ENTER(__func__);
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	adapter = dxgprocess_adapter_by_handle(process, args.adapter);
+	if (adapter == NULL) {
+		device = dxgprocess_device_by_object_handle(process,
+							    HMGRENTRY_TYPE_DXGPAGINGQUEUE,
+							    args.adapter);
+		if (device == NULL) {
+			pr_err("invalid adapter or paging queue: 0x%x",
+				   args.adapter);
+			ret = STATUS_INVALID_PARAMETER;
+			goto cleanup;
+		}
+		adapter = device->adapter;
+		dxgadapter_acquire_reference(adapter);
+		dxgdevice_release_reference(device);
+	} else {
+		args.adapter = adapter->host_handle;
+	}
+
+	ret = dxgadapter_acquire_lock_shared(adapter);
+	if (ret) {
+		dxgadapter_release_reference(adapter);
+		adapter = NULL;
+		goto cleanup;
+	}
+
+	ret = dxgvmb_send_reserve_gpu_va(process, &adapter->channel, &args);
+	if (ret)
+		goto cleanup;
+
+	ret = dxg_copy_to_user(&input->virtual_address, &args.virtual_address,
+			       sizeof(args.virtual_address));
+	if (ret)
+		goto cleanup;
+
+cleanup:
+
+	if (adapter) {
+		dxgadapter_release_lock_shared(adapter);
+		dxgadapter_release_reference(adapter);
+	}
+
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static int dxgk_free_gpu_va(struct dxgprocess *process, void *__user inargs)
+{
+	int ret = 0;
+	struct d3dkmt_freegpuvirtualaddress args;
+	struct dxgadapter *adapter = NULL;
+
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	adapter = dxgprocess_adapter_by_handle(process, args.adapter);
+	if (adapter == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	ret = dxgadapter_acquire_lock_shared(adapter);
+	if (ret) {
+		dxgadapter_release_reference(adapter);
+		adapter = NULL;
+		goto cleanup;
+	}
+
+	args.adapter = adapter->host_handle;
+	ret = dxgvmb_send_free_gpu_va(process, &adapter->channel, &args);
+
+cleanup:
+
+	if (adapter) {
+		dxgadapter_release_lock_shared(adapter);
+		dxgadapter_release_reference(adapter);
+	}
+
+	return ret;
+}
+
+static int dxgk_update_gpu_va(struct dxgprocess *process, void *__user inargs)
+{
+	int ret = 0;
+	struct d3dkmt_updategpuvirtualaddress args;
+	struct d3dkmt_updategpuvirtualaddress *input = inargs;
+	struct dxgadapter *adapter = NULL;
+	struct dxgdevice *device = NULL;
+
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	device = dxgprocess_device_by_handle(process, args.device);
+	if (device == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	adapter = device->adapter;
+	ret = dxgadapter_acquire_lock_shared(adapter);
+	if (ret) {
+		adapter = NULL;
+		goto cleanup;
+	}
+
+	ret = dxgvmb_send_update_gpu_va(process, &adapter->channel, &args);
+	if (ret)
+		goto cleanup;
+
+	ret = dxg_copy_to_user(&input->fence_value, &args.fence_value,
+			       sizeof(args.fence_value));
+	if (ret)
+		goto cleanup;
+
+cleanup:
+
+	if (adapter)
+		dxgadapter_release_lock_shared(adapter);
+	if (device)
+		dxgdevice_release_reference(device);
+
+	return ret;
+}
+
+static int dxgk_create_sync_object(struct dxgprocess *process,
+				   void *__user inargs)
+{
+	int ret = 0;
+	struct d3dkmt_createsynchronizationobject2 args;
+	struct dxgdevice *device = NULL;
+	struct dxgadapter *adapter = NULL;
+	struct eventfd_ctx *event = NULL;
+	struct dxgsyncobject *syncobj = NULL;
+	bool host_event_added = false;
+	bool device_lock_acquired = false;
+	struct dxgsharedsyncobject *syncobjgbl = NULL;
+
+	TRACE_FUNC_ENTER(__func__);
+
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	device = dxgprocess_device_by_handle(process, args.device);
+	if (device == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	ret = dxgdevice_acquire_lock_shared(device);
+	if (ret)
+		goto cleanup;
+
+	device_lock_acquired = true;
+
+	adapter = device->adapter;
+	ret = dxgadapter_acquire_lock_shared(adapter);
+	if (ret) {
+		adapter = NULL;
+		goto cleanup;
+	}
+
+	syncobj = dxgsyncobject_create(process, device, adapter, args.info.type,
+				       args.info.flags);
+	if (syncobj == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	if (args.info.flags.shared && syncobj->monitored_fence &&
+	    !args.info.flags.nt_security_sharing) {
+		pr_err("monitored fence requires nt_security_sharing");
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	if (args.info.type == D3DDDI_CPU_NOTIFICATION) {
+		event = eventfd_ctx_fdget((int)
+					  args.info.cpu_notification.event);
+		if (IS_ERR(event)) {
+			pr_err("failed to reference the event");
+			event = NULL;
+			ret = STATUS_INVALID_PARAMETER;
+			goto cleanup;
+		}
+		syncobj->host_event->event_id = dxgglobal_new_host_event_id();
+		syncobj->host_event->cpu_event = event;
+		syncobj->host_event->remove_from_list = false;
+		syncobj->host_event->destroy_after_signal = false;
+		dxgglobal_add_host_event(syncobj->host_event);
+		host_event_added = true;
+		args.info.cpu_notification.event =
+		    syncobj->host_event->event_id;
+		TRACE_DEBUG(1, "creating CPU notification event: %lld",
+			    args.info.cpu_notification.event);
+	}
+
+	ret = dxgvmb_send_create_sync_object(process, &adapter->channel, &args,
+					     syncobj);
+	if (ret)
+		goto cleanup;
+
+	if (args.info.flags.shared) {
+		if (args.info.shared_handle == 0) {
+			pr_err("shared handle should not be 0");
+			ret = STATUS_INVALID_PARAMETER;
+			goto cleanup;
+		}
+		syncobjgbl = dxgsharedsyncobj_create(device->adapter, syncobj);
+		if (syncobjgbl == NULL) {
+			ret = STATUS_NO_MEMORY;
+			goto cleanup;
+		}
+		dxgsharedsyncobj_add_syncobj(syncobjgbl, syncobj);
+
+		syncobjgbl->host_shared_handle = args.info.shared_handle;
+		if (!args.info.flags.nt_security_sharing) {
+			hmgrtable_lock(&dxgglobal->handle_table, DXGLOCK_EXCL);
+			syncobjgbl->global_shared_handle =
+			    hmgrtable_alloc_handle(&dxgglobal->handle_table,
+						   syncobjgbl,
+						   HMGRENTRY_TYPE_DXGSYNCOBJECT,
+						   true);
+			if (syncobjgbl->global_shared_handle) {
+				args.info.shared_handle =
+				    syncobjgbl->global_shared_handle;
+			} else {
+				ret = STATUS_NO_MEMORY;
+			}
+			hmgrtable_unlock(&dxgglobal->handle_table,
+					 DXGLOCK_EXCL);
+			if (ret)
+				goto cleanup;
+		}
+	}
+
+	ret = dxg_copy_to_user(inargs, &args, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL);
+	ret = hmgrtable_assign_handle(&process->handle_table, syncobj,
+				      HMGRENTRY_TYPE_DXGSYNCOBJECT,
+				      args.sync_object);
+	if (!ret)
+		syncobj->handle = args.sync_object;
+	hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL);
+
+cleanup:
+
+	if (ret) {
+		if (syncobj) {
+			dxgsyncobject_destroy(process, syncobj);
+			if (args.sync_object)
+				dxgvmb_send_destroy_sync_object(process,
+								args.
+								sync_object);
+			event = NULL;
+		}
+		if (event)
+			eventfd_ctx_put(event);
+	}
+	if (syncobjgbl)
+		dxgsharedsyncobj_release_reference(syncobjgbl);
+	if (adapter)
+		dxgadapter_release_lock_shared(adapter);
+	if (device_lock_acquired)
+		dxgdevice_release_lock_shared(device);
+	if (device)
+		dxgdevice_release_reference(device);
+
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static int dxgk_destroy_sync_object(struct dxgprocess *process,
+				    void *__user inargs)
+{
+	struct d3dkmt_destroysynchronizationobject args;
+	struct dxgsyncobject *syncobj = NULL;
+	int ret = 0;
+
+	TRACE_FUNC_ENTER(__func__);
+
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	TRACE_DEBUG(1, "handle 0x%x", args.sync_object);
+	hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL);
+	syncobj = hmgrtable_get_object_by_type(&process->handle_table,
+					       HMGRENTRY_TYPE_DXGSYNCOBJECT,
+					       args.sync_object);
+	if (syncobj) {
+		TRACE_DEBUG(1, "syncobj 0x%p", syncobj);
+		syncobj->handle = 0;
+		hmgrtable_free_handle(&process->handle_table,
+				      HMGRENTRY_TYPE_DXGSYNCOBJECT,
+				      args.sync_object);
+	}
+	hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL);
+
+	if (syncobj == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	dxgsyncobject_destroy(process, syncobj);
+
+	ret = dxgvmb_send_destroy_sync_object(process, args.sync_object);
+
+cleanup:
+
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static int dxgk_open_sync_object_nt(struct dxgprocess *process,
+				    void *__user inargs)
+{
+	struct d3dkmt_opensyncobjectfromnthandle2 args;
+	struct dxgsyncobject *syncobj = NULL;
+	struct dxgsharedsyncobject *syncobj_fd = NULL;
+	struct file *file = NULL;
+	struct dxgdevice *device = NULL;
+	struct dxgadapter *adapter = NULL;
+	struct d3dddi_synchronizationobject_flags flags = { };
+	int ret;
+	bool device_lock_acquired = false;
+
+	TRACE_FUNC_ENTER(__func__);
+
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	args.sync_object = 0;
+
+	if (args.device) {
+		device = dxgprocess_device_by_handle(process, args.device);
+		if (device == NULL)
+			goto cleanup;
+	} else {
+		pr_err("device handle is missing");
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	ret = dxgdevice_acquire_lock_shared(device);
+	if (ret)
+		goto cleanup;
+
+	device_lock_acquired = true;
+
+	adapter = device->adapter;
+	ret = dxgadapter_acquire_lock_shared(adapter);
+	if (ret) {
+		adapter = NULL;
+		goto cleanup;
+	}
+
+	file = fget(args.nt_handle);
+	if (!file) {
+		pr_err("failed to get file from handle: %llx",
+			   args.nt_handle);
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	if (file->f_op != &dxg_syncobj_fops) {
+		pr_err("invalid fd: %llx", args.nt_handle);
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	syncobj_fd = file->private_data;
+	if (syncobj_fd == NULL) {
+		pr_err("invalid private data: %llx", args.nt_handle);
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	flags.shared = 1;
+	flags.nt_security_sharing = 1;
+	syncobj = dxgsyncobject_create(process, device, adapter,
+				       syncobj_fd->type, flags);
+	if (syncobj == NULL) {
+		pr_err("failed to create sync object");
+		goto cleanup;
+	}
+
+	dxgsharedsyncobj_add_syncobj(syncobj_fd, syncobj);
+
+	ret = dxgvmb_send_open_sync_object_nt(process, &dxgglobal->channel,
+					      &args, syncobj);
+	if (ret) {
+		pr_err("failed to open sync object on host: %x",
+			   syncobj_fd->host_shared_handle);
+		goto cleanup;
+	}
+
+	hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL);
+	ret = hmgrtable_assign_handle(&process->handle_table, syncobj,
+				      HMGRENTRY_TYPE_DXGSYNCOBJECT,
+				      args.sync_object);
+	if (!ret) {
+		syncobj->handle = args.sync_object;
+		dxgsyncobject_acquire_reference(syncobj);
+	}
+	hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL);
+
+	if (ret)
+		goto cleanup;
+
+	ret = dxg_copy_to_user(inargs, &args, sizeof(args));
+	if (!ret)
+		goto success;
+
+cleanup:
+
+	if (syncobj) {
+		dxgsyncobject_destroy(process, syncobj);
+		syncobj = NULL;
+	}
+
+	if (args.sync_object)
+		dxgvmb_send_destroy_sync_object(process, args.sync_object);
+
+success:
+
+	if (file)
+		fput(file);
+	if (syncobj)
+		dxgsyncobject_release_reference(syncobj);
+	if (adapter)
+		dxgadapter_release_lock_shared(adapter);
+	if (device_lock_acquired)
+		dxgdevice_release_lock_shared(device);
+	if (device)
+		dxgdevice_release_reference(device);
+
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static int dxgk_open_sync_object(struct dxgprocess *process,
+				 void *__user inargs)
+{
+	d3dkmt_handle shared_handle = 0;
+	d3dkmt_handle new_handle = 0;
+	struct d3dkmt_opensynchronizationobject *__user inp = inargs;
+	struct dxgsyncobject *syncobj = NULL;
+	struct dxgsharedsyncobject *syncobjgbl = NULL;
+	struct d3dddi_synchronizationobject_flags flags = { };
+	int ret;
+
+	TRACE_FUNC_ENTER(__func__);
+
+	ret = dxg_copy_from_user(&shared_handle, &inp->shared_handle,
+				 sizeof(shared_handle));
+	if (ret)
+		goto cleanup;
+
+	hmgrtable_lock(&dxgglobal->handle_table, DXGLOCK_SHARED);
+	syncobjgbl = hmgrtable_get_object_by_type(&dxgglobal->handle_table,
+						  HMGRENTRY_TYPE_DXGSYNCOBJECT,
+						  shared_handle);
+	if (syncobjgbl)
+		dxgsharedsyncobj_acquire_reference(syncobjgbl);
+	hmgrtable_unlock(&dxgglobal->handle_table, DXGLOCK_SHARED);
+
+	if (syncobjgbl == NULL) {
+		pr_err("invalid sync object shared handle: %x",
+			   shared_handle);
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	if (syncobjgbl->monitored_fence) {
+		pr_err("Open monitored fence using global handle");
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	flags.shared = 1;
+	syncobj = dxgsyncobject_create(process, NULL, syncobjgbl->adapter,
+				       syncobjgbl->type, flags);
+	if (syncobj == NULL) {
+		pr_err("failed to create sync object");
+		goto cleanup;
+	}
+
+	dxgsharedsyncobj_add_syncobj(syncobjgbl, syncobj);
+
+	ret = dxgvmb_send_open_sync_object(process, &dxgglobal->channel,
+					   syncobjgbl->host_shared_handle,
+					   &new_handle);
+	if (ret) {
+		pr_err("failed to open sync object on host: %x",
+			   syncobjgbl->host_shared_handle);
+		goto cleanup;
+	}
+
+	hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL);
+	ret = hmgrtable_assign_handle(&process->handle_table, syncobj,
+				      HMGRENTRY_TYPE_DXGSYNCOBJECT, new_handle);
+	if (!ret) {
+		syncobj->handle = new_handle;
+		dxgsyncobject_acquire_reference(syncobj);
+	}
+	hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL);
+
+	if (ret)
+		goto cleanup;
+
+	ret = dxg_copy_to_user(&inp->sync_object, &new_handle,
+			       sizeof(new_handle));
+	if (!ret)
+		goto success;
+
+cleanup:
+
+	if (syncobj) {
+		dxgsyncobject_destroy(process, syncobj);
+		syncobj = NULL;
+	}
+
+	if (new_handle)
+		dxgvmb_send_destroy_sync_object(process, new_handle);
+
+success:
+
+	if (syncobj)
+		dxgsyncobject_release_reference(syncobj);
+	if (syncobjgbl)
+		dxgsharedsyncobj_release_reference(syncobjgbl);
+
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static int dxgk_signal_sync_object(struct dxgprocess *process,
+				   void *__user inargs)
+{
+	struct d3dkmt_signalsynchronizationobject2 args;
+	struct d3dkmt_signalsynchronizationobject2 *__user in_args = inargs;
+	struct dxgdevice *device = NULL;
+	struct dxgadapter *adapter = NULL;
+	int ret = 0;
+	uint fence_count = 1;
+	struct eventfd_ctx *event = NULL;
+	struct dxghostevent *host_event = NULL;
+	bool host_event_added = false;
+	u64 host_event_id = 0;
+
+	TRACE_FUNC_ENTER(__func__);
+
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	if (args.context_count >= D3DDDI_MAX_BROADCAST_CONTEXT ||
+	    args.object_count > D3DDDI_MAX_OBJECT_SIGNALED) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	if (args.flags.enqueue_cpu_event) {
+		host_event = dxgmem_alloc(process, DXGMEM_EVENT,
+					  sizeof(*host_event));
+		if (host_event == NULL) {
+			ret = STATUS_NO_MEMORY;
+			goto cleanup;
+		}
+		host_event->process = process;
+		event = eventfd_ctx_fdget((int)args.cpu_event_handle);
+		if (IS_ERR(event)) {
+			pr_err("failed to reference the event");
+			event = NULL;
+			ret = STATUS_INVALID_PARAMETER;
+			goto cleanup;
+		}
+		fence_count = 0;
+		host_event->cpu_event = event;
+		host_event_id = dxgglobal_new_host_event_id();
+		host_event->event_id = host_event_id;
+		host_event->remove_from_list = true;
+		host_event->destroy_after_signal = true;
+		dxgglobal_add_host_event(host_event);
+		host_event_added = true;
+	}
+
+	device = dxgprocess_device_by_object_handle(process,
+						    HMGRENTRY_TYPE_DXGCONTEXT,
+						    args.context);
+	if (device == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	adapter = device->adapter;
+	ret = dxgadapter_acquire_lock_shared(adapter);
+	if (ret) {
+		adapter = NULL;
+		goto cleanup;
+	}
+
+	ret = dxgvmb_send_signal_sync_object(process, &adapter->channel,
+					     args.flags, args.fence.fence_value,
+					     args.context, args.object_count,
+					     in_args->object_array,
+					     args.context_count,
+					     in_args->contexts, fence_count,
+					     NULL, (void *)host_event_id, 0);
+
+	/*
+	 * When the send operation succeeds, the host event will be destroyed
+	 * after signal from the host
+	 */
+
+cleanup:
+
+	if (ret) {
+		if (host_event_added) {
+			/* The event might be signaled and destroyed by host */
+			host_event = dxgglobal_get_host_event(host_event_id);
+			if (host_event) {
+				eventfd_ctx_put(event);
+				event = NULL;
+				dxgmem_free(process, DXGMEM_EVENT, host_event);
+				host_event = NULL;
+			}
+		}
+		if (event)
+			eventfd_ctx_put(event);
+		if (host_event)
+			dxgmem_free(process, DXGMEM_EVENT, host_event);
+	}
+	if (adapter)
+		dxgadapter_release_lock_shared(adapter);
+	if (device)
+		dxgdevice_release_reference(device);
+
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static int dxgk_signal_sync_object_cpu(struct dxgprocess *process,
+				       void *__user inargs)
+{
+	struct d3dkmt_signalsynchronizationobjectfromcpu args;
+	struct dxgdevice *device = NULL;
+	struct dxgadapter *adapter = NULL;
+	int ret = 0;
+
+	TRACE_FUNC_ENTER(__func__);
+
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	device = dxgprocess_device_by_handle(process, args.device);
+	if (device == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	adapter = device->adapter;
+	ret = dxgadapter_acquire_lock_shared(adapter);
+	if (ret) {
+		adapter = NULL;
+		goto cleanup;
+	}
+
+	ret = dxgvmb_send_signal_sync_object(process, &adapter->channel,
+					     args.flags, 0, 0,
+					     args.object_count, args.objects, 0,
+					     NULL, args.object_count,
+					     args.fence_values, NULL,
+					     args.device);
+
+cleanup:
+
+	if (adapter)
+		dxgadapter_release_lock_shared(adapter);
+	if (device)
+		dxgdevice_release_reference(device);
+
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static int dxgk_signal_sync_object_gpu(struct dxgprocess *process,
+				       void *__user inargs)
+{
+	struct d3dkmt_signalsynchronizationobjectfromgpu args;
+	struct d3dkmt_signalsynchronizationobjectfromgpu *__user user_args =
+	    inargs;
+	struct dxgdevice *device = NULL;
+	struct dxgadapter *adapter = NULL;
+	struct d3dddicb_signalflags flags = { };
+	int ret = 0;
+
+	TRACE_FUNC_ENTER(__func__);
+
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	if (args.object_count == 0 ||
+	    args.object_count > DXG_MAX_VM_BUS_PACKET_SIZE) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	device = dxgprocess_device_by_object_handle(process,
+						    HMGRENTRY_TYPE_DXGCONTEXT,
+						    args.context);
+	if (device == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	adapter = device->adapter;
+	ret = dxgadapter_acquire_lock_shared(adapter);
+	if (ret) {
+		adapter = NULL;
+		goto cleanup;
+	}
+
+	ret = dxgvmb_send_signal_sync_object(process, &adapter->channel,
+					     flags, 0, 0, args.object_count,
+					     args.objects, 1,
+					     &user_args->context,
+					     args.object_count,
+					     args.monitored_fence_values, NULL,
+					     0);
+
+cleanup:
+
+	if (adapter)
+		dxgadapter_release_lock_shared(adapter);
+	if (device)
+		dxgdevice_release_reference(device);
+
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static int dxgk_signal_sync_object_gpu2(struct dxgprocess *process,
+					void *__user inargs)
+{
+	struct d3dkmt_signalsynchronizationobjectfromgpu2 args;
+	struct dxgdevice *device = NULL;
+	struct dxgadapter *adapter = NULL;
+	d3dkmt_handle context_handle;
+	struct eventfd_ctx *event = NULL;
+	uint64_t *fences = NULL;
+	uint fence_count = 0;
+	int ret = 0;
+	struct dxghostevent *host_event = NULL;
+	bool host_event_added = false;
+	u64 host_event_id = 0;
+
+	TRACE_FUNC_ENTER(__func__);
+
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	if (args.flags.enqueue_cpu_event) {
+		if (args.object_count != 0 || args.cpu_event_handle == 0) {
+			pr_err("Bad input for EnqueueCpuEvent: %d %lld",
+				   args.object_count, args.cpu_event_handle);
+
+		}
+	} else if (args.object_count == 0 ||
+		   args.object_count > DXG_MAX_VM_BUS_PACKET_SIZE ||
+		   args.context_count == 0 ||
+		   args.context_count > DXG_MAX_VM_BUS_PACKET_SIZE) {
+		pr_err("Invalid input: %d %d",
+			   args.object_count, args.context_count);
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	ret = dxg_copy_from_user(&context_handle, args.contexts,
+				 sizeof(d3dkmt_handle));
+	if (ret)
+		goto cleanup;
+
+	if (args.flags.enqueue_cpu_event) {
+		host_event = dxgmem_alloc(process, DXGMEM_EVENT,
+					  sizeof(*host_event));
+		if (host_event == NULL) {
+			ret = STATUS_NO_MEMORY;
+			goto cleanup;
+		}
+		host_event->process = process;
+		event = eventfd_ctx_fdget((int)args.cpu_event_handle);
+		if (IS_ERR(event)) {
+			pr_err("failed to reference the event");
+			event = NULL;
+			ret = STATUS_INVALID_PARAMETER;
+			goto cleanup;
+		}
+		fence_count = 0;
+		host_event->cpu_event = event;
+		host_event_id = dxgglobal_new_host_event_id();
+		host_event->event_id = host_event_id;
+		host_event->remove_from_list = true;
+		host_event->destroy_after_signal = true;
+		dxgglobal_add_host_event(host_event);
+		host_event_added = true;
+	} else {
+		fences = args.monitored_fence_values;
+		fence_count = args.object_count;
+	}
+
+	device = dxgprocess_device_by_object_handle(process,
+						    HMGRENTRY_TYPE_DXGCONTEXT,
+						    context_handle);
+	if (device == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	adapter = device->adapter;
+	ret = dxgadapter_acquire_lock_shared(adapter);
+	if (ret) {
+		adapter = NULL;
+		goto cleanup;
+	}
+
+	ret = dxgvmb_send_signal_sync_object(process, &adapter->channel,
+					     args.flags, 0, 0,
+					     args.object_count, args.objects,
+					     args.context_count, args.contexts,
+					     fence_count, fences,
+					     (void *)host_event_id, 0);
+
+cleanup:
+
+	if (ret) {
+		if (host_event_added) {
+			/* The event might be signaled and destroyed by host */
+			host_event = dxgglobal_get_host_event(host_event_id);
+			if (host_event) {
+				eventfd_ctx_put(event);
+				event = NULL;
+				dxgmem_free(process, DXGMEM_EVENT, host_event);
+				host_event = NULL;
+			}
+		}
+		if (event)
+			eventfd_ctx_put(event);
+		if (host_event)
+			dxgmem_free(process, DXGMEM_EVENT, host_event);
+	}
+	if (adapter)
+		dxgadapter_release_lock_shared(adapter);
+	if (device)
+		dxgdevice_release_reference(device);
+
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static int dxgk_wait_sync_object(struct dxgprocess *process,
+				 void *__user inargs)
+{
+	struct d3dkmt_waitforsynchronizationobject2 args;
+	struct dxgdevice *device = NULL;
+	struct dxgadapter *adapter = NULL;
+	int ret = 0;
+
+	TRACE_FUNC_ENTER(__func__);
+
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	if (args.object_count > D3DDDI_MAX_OBJECT_WAITED_ON ||
+	    args.object_count == 0) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	device = dxgprocess_device_by_object_handle(process,
+						    HMGRENTRY_TYPE_DXGCONTEXT,
+						    args.context);
+	if (device == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	adapter = device->adapter;
+	ret = dxgadapter_acquire_lock_shared(adapter);
+	if (ret) {
+		adapter = NULL;
+		goto cleanup;
+	}
+
+	TRACE_DEBUG(1, "Fence value: %lld", args.fence.fence_value);
+	ret = dxgvmb_send_wait_sync_object_gpu(process, &adapter->channel,
+					       args.context, args.object_count,
+					       args.object_array,
+					       &args.fence.fence_value, true);
+
+cleanup:
+
+	if (adapter)
+		dxgadapter_release_lock_shared(adapter);
+	if (device)
+		dxgdevice_release_reference(device);
+
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static int dxgk_wait_sync_object_cpu(struct dxgprocess *process,
+				     void *__user inargs)
+{
+	struct d3dkmt_waitforsynchronizationobjectfromcpu args;
+	struct dxgdevice *device = NULL;
+	struct dxgadapter *adapter = NULL;
+	struct eventfd_ctx *event = NULL;
+	struct dxghostevent host_event = { };
+	struct dxghostevent *async_host_event = NULL;
+	struct completion local_event = { };
+	u64 event_id = 0;
+	int ret = 0;
+	unsigned long t;
+	bool host_event_added = false;
+
+	TRACE_FUNC_ENTER(__func__);
+
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	if (args.object_count > DXG_MAX_VM_BUS_PACKET_SIZE ||
+	    args.object_count == 0) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	if (args.async_event) {
+		async_host_event = dxgmem_alloc(process, DXGMEM_EVENT,
+						sizeof(*async_host_event));
+		if (async_host_event == NULL) {
+			ret = STATUS_INVALID_PARAMETER;
+			goto cleanup;
+		}
+		async_host_event->process = process;
+		event = eventfd_ctx_fdget((int)args.async_event);
+		if (IS_ERR(event)) {
+			pr_err("failed to reference the event");
+			event = NULL;
+			ret = STATUS_INVALID_PARAMETER;
+			goto cleanup;
+		}
+		async_host_event->cpu_event = event;
+		async_host_event->event_id = dxgglobal_new_host_event_id();
+		async_host_event->destroy_after_signal = true;
+		dxgglobal_add_host_event(async_host_event);
+		event_id = async_host_event->event_id;
+		host_event_added = true;
+	} else {
+		init_completion(&local_event);
+		host_event.completion_event = &local_event;
+		host_event.event_id = dxgglobal_new_host_event_id();
+		dxgglobal_add_host_event(&host_event);
+		event_id = host_event.event_id;
+	}
+
+	device = dxgprocess_device_by_handle(process, args.device);
+	if (device == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	adapter = device->adapter;
+	ret = dxgadapter_acquire_lock_shared(adapter);
+	if (ret) {
+		adapter = NULL;
+		goto cleanup;
+	}
+
+	ret = dxgvmb_send_wait_sync_object_cpu(process, &adapter->channel,
+					       &args, event_id);
+	if (ret)
+		goto cleanup;
+
+	if (args.async_event == 0) {
+		t = wait_for_completion_timeout(&local_event, (10 * HZ));
+		if (!t) {
+			TRACE_DEBUG(1, "timeout waiting for completion");
+			ret = STATUS_INVALID_PARAMETER;
+			goto cleanup;
+		}
+	}
+
+cleanup:
+
+	if (adapter)
+		dxgadapter_release_lock_shared(adapter);
+	if (device)
+		dxgdevice_release_reference(device);
+	if (host_event.event_id)
+		dxgglobal_remove_host_event(&host_event);
+	if (ret) {
+		if (host_event_added) {
+			async_host_event = dxgglobal_get_host_event(event_id);
+			if (async_host_event) {
+				eventfd_ctx_put(event);
+				event = NULL;
+				dxgmem_free(process, DXGMEM_EVENT,
+					    async_host_event);
+				async_host_event = NULL;
+			}
+		}
+		if (event)
+			eventfd_ctx_put(event);
+		if (async_host_event)
+			dxgmem_free(process, DXGMEM_EVENT, async_host_event);
+	}
+
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static int dxgk_wait_sync_object_gpu(struct dxgprocess *process,
+				     void *__user inargs)
+{
+	struct d3dkmt_waitforsynchronizationobjectfromgpu args;
+	struct dxgcontext *context = NULL;
+	d3dkmt_handle device_handle = 0;
+	struct dxgdevice *device = NULL;
+	struct dxgadapter *adapter = NULL;
+	struct dxgsyncobject *syncobj = NULL;
+	d3dkmt_handle *objects = NULL;
+	uint object_size;
+	uint64_t *fences = NULL;
+	int ret = 0;
+	enum hmgrentry_type syncobj_type = HMGRENTRY_TYPE_FREE;
+	bool monitored_fence = false;
+
+	TRACE_FUNC_ENTER(__func__);
+
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	if (args.object_count > DXG_MAX_VM_BUS_PACKET_SIZE ||
+	    args.object_count == 0) {
+		pr_err("Invalid object count: %d", args.object_count);
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	object_size = sizeof(d3dkmt_handle) * args.object_count;
+	objects = dxgmem_alloc(process, DXGMEM_TMP, object_size);
+	if (objects == NULL) {
+		ret = STATUS_NO_MEMORY;
+		goto cleanup;
+	}
+	ret = dxg_copy_from_user(objects, args.objects, object_size);
+	if (ret)
+		goto cleanup;
+
+	hmgrtable_lock(&process->handle_table, DXGLOCK_SHARED);
+	context = hmgrtable_get_object_by_type(&process->handle_table,
+					       HMGRENTRY_TYPE_DXGCONTEXT,
+					       args.context);
+	if (context) {
+		device_handle = context->device_handle;
+		syncobj_type =
+		    hmgrtable_get_object_type(&process->handle_table,
+					      objects[0]);
+	}
+	if (device_handle == 0) {
+		pr_err("Invalid context handle: %x", args.context);
+		ret = STATUS_INVALID_PARAMETER;
+	} else {
+		if (syncobj_type == HMGRENTRY_TYPE_MONITOREDFENCE) {
+			monitored_fence = true;
+		} else if (syncobj_type == HMGRENTRY_TYPE_DXGSYNCOBJECT) {
+			syncobj =
+			    hmgrtable_get_object_by_type(&process->handle_table,
+							 HMGRENTRY_TYPE_DXGSYNCOBJECT,
+							 objects[0]);
+			if (syncobj == NULL) {
+				pr_err("Invalid syncobj: %x", objects[0]);
+				ret = STATUS_INVALID_PARAMETER;
+			} else {
+				monitored_fence = syncobj->monitored_fence;
+			}
+		} else {
+			pr_err("Invalid syncobj type: %x", objects[0]);
+			ret = STATUS_INVALID_PARAMETER;
+		}
+	}
+	hmgrtable_unlock(&process->handle_table, DXGLOCK_SHARED);
+
+	if (ret)
+		goto cleanup;
+
+	if (monitored_fence) {
+		object_size = sizeof(uint64_t) * args.object_count;
+		fences = dxgmem_alloc(process, DXGMEM_TMP, object_size);
+		if (fences == NULL) {
+			ret = STATUS_NO_MEMORY;
+			goto cleanup;
+		}
+		ret = dxg_copy_from_user(fences, args.monitored_fence_values,
+					 object_size);
+		if (ret)
+			goto cleanup;
+	} else {
+		fences = &args.fence_value;
+	}
+
+	device = dxgprocess_device_by_handle(process, device_handle);
+	if (device == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	adapter = device->adapter;
+	ret = dxgadapter_acquire_lock_shared(adapter);
+	if (ret) {
+		adapter = NULL;
+		goto cleanup;
+	}
+
+	ret = dxgvmb_send_wait_sync_object_gpu(process, &adapter->channel,
+					       args.context, args.object_count,
+					       objects, fences,
+					       !monitored_fence);
+
+cleanup:
+
+	if (adapter)
+		dxgadapter_release_lock_shared(adapter);
+	if (device)
+		dxgdevice_release_reference(device);
+	if (objects)
+		dxgmem_free(process, DXGMEM_TMP, objects);
+	if (fences && fences != &args.fence_value)
+		dxgmem_free(process, DXGMEM_TMP, fences);
+
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static int dxgk_lock2(struct dxgprocess *process, void *__user inargs)
+{
+	struct d3dkmt_lock2 args;
+	struct d3dkmt_lock2 *__user result = inargs;
+	int ret = 0;
+	struct dxgadapter *adapter = NULL;
+	struct dxgdevice *device = NULL;
+	struct dxgallocation *alloc = NULL;
+
+	TRACE_FUNC_ENTER(__func__);
+
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	args.data = NULL;
+	hmgrtable_lock(&process->handle_table, DXGLOCK_SHARED);
+	alloc = hmgrtable_get_object_by_type(&process->handle_table,
+					     HMGRENTRY_TYPE_DXGALLOCATION,
+					     args.allocation);
+	if (alloc == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+	} else {
+		if (alloc->cpu_address) {
+			ret = dxg_copy_to_user(&result->data,
+					       &alloc->cpu_address,
+					       sizeof(args.data));
+			if (NT_SUCCESS(ret)) {
+				args.data = alloc->cpu_address;
+				if (alloc->cpu_address_mapped)
+					alloc->cpu_address_refcount++;
+			}
+		}
+	}
+	hmgrtable_unlock(&process->handle_table, DXGLOCK_SHARED);
+	if (ret)
+		goto cleanup;
+	if (args.data)
+		goto success;
+
+	/*
+	 * The call acquires reference on the device. It is safe to access the
+	 * adapter, because the device holds reference on it.
+	 */
+	device = dxgprocess_device_by_handle(process, args.device);
+	if (device == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+	adapter = device->adapter;
+	ret = dxgadapter_acquire_lock_shared(adapter);
+	if (ret) {
+		adapter = NULL;
+		goto cleanup;
+	}
+
+	ret = dxgvmb_send_lock2(process, &adapter->channel, &args, result);
+
+cleanup:
+
+	if (adapter)
+		dxgadapter_release_lock_shared(adapter);
+
+	if (device)
+		dxgdevice_release_reference(device);
+
+success:
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static int dxgk_unlock2(struct dxgprocess *process, void *__user inargs)
+{
+	struct d3dkmt_unlock2 args;
+	int ret = 0;
+	struct dxgadapter *adapter = NULL;
+	struct dxgdevice *device = NULL;
+	struct dxgallocation *alloc = NULL;
+	bool done = false;
+
+	TRACE_FUNC_ENTER(__func__);
+
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	hmgrtable_lock(&process->handle_table, DXGLOCK_SHARED);
+	alloc = hmgrtable_get_object_by_type(&process->handle_table,
+					     HMGRENTRY_TYPE_DXGALLOCATION,
+					     args.allocation);
+	if (alloc == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+	} else {
+		if (alloc->cpu_address == NULL) {
+			pr_err("Allocation is not locked: %p", alloc);
+			ret = STATUS_INVALID_PARAMETER;
+		} else if (alloc->cpu_address_mapped) {
+			if (alloc->cpu_address_refcount > 0) {
+				alloc->cpu_address_refcount--;
+				if (alloc->cpu_address_refcount != 0) {
+					done = true;
+				} else {
+					dxg_unmap_iospace(alloc->cpu_address,
+							  alloc->
+							  num_pages <<
+							  PAGE_SHIFT);
+					alloc->cpu_address_mapped = false;
+					alloc->cpu_address = NULL;
+				}
+			} else {
+				pr_err("Bad cpu access refcount");
+				done = true;
+			}
+		}
+	}
+	hmgrtable_unlock(&process->handle_table, DXGLOCK_SHARED);
+	if (done)
+		goto success;
+	if (ret)
+		goto cleanup;
+
+	/*
+	 * The call acquires reference on the device. It is safe to access the
+	 * adapter, because the device holds reference on it.
+	 */
+	device = dxgprocess_device_by_handle(process, args.device);
+	if (device == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+	adapter = device->adapter;
+	ret = dxgadapter_acquire_lock_shared(adapter);
+	if (ret) {
+		adapter = NULL;
+		goto cleanup;
+	}
+
+	ret = dxgvmb_send_unlock2(process, &adapter->channel, &args);
+	if (ret)
+		goto cleanup;
+
+cleanup:
+	if (adapter)
+		dxgadapter_release_lock_shared(adapter);
+
+	if (device)
+		dxgdevice_release_reference(device);
+
+success:
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static int dxgk_update_alloc_property(struct dxgprocess *process,
+				      void *__user inargs)
+{
+	struct d3dddi_updateallocproperty args;
+	int ret = 0;
+	struct dxgadapter *adapter = NULL;
+	struct dxgdevice *device = NULL;
+
+	TRACE_FUNC_ENTER(__func__);
+
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	device = dxgprocess_device_by_object_handle(process,
+						    HMGRENTRY_TYPE_DXGPAGINGQUEUE,
+						    args.paging_queue);
+	if (device == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+	adapter = device->adapter;
+	ret = dxgadapter_acquire_lock_shared(adapter);
+	if (ret) {
+		adapter = NULL;
+		goto cleanup;
+	}
+
+	ret = dxgvmb_send_update_alloc_property(process, &adapter->channel,
+						&args, inargs);
+
+cleanup:
+	if (adapter)
+		dxgadapter_release_lock_shared(adapter);
+
+	if (device)
+		dxgdevice_release_reference(device);
+
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static int dxgk_mark_device_as_error(struct dxgprocess *process,
+				     void *__user inargs)
+{
+	struct d3dkmt_markdeviceaserror args;
+	struct dxgadapter *adapter = NULL;
+	struct dxgdevice *device = NULL;
+	int ret;
+
+	TRACE_FUNC_ENTER(__func__);
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+	device = dxgprocess_device_by_handle(process, args.device);
+	if (device == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+	adapter = device->adapter;
+	ret = dxgadapter_acquire_lock_shared(adapter);
+	if (ret) {
+		adapter = NULL;
+		goto cleanup;
+	}
+	ret = dxgvmb_send_mark_device_as_error(process, &adapter->channel,
+					       &args);
+cleanup:
+	if (adapter)
+		dxgadapter_release_lock_shared(adapter);
+	if (device)
+		dxgdevice_release_reference(device);
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static int dxgk_query_alloc_residency(struct dxgprocess *process,
+				      void *__user inargs)
+{
+	struct d3dkmt_queryallocationresidency args;
+	struct dxgadapter *adapter = NULL;
+	struct dxgdevice *device = NULL;
+	int ret;
+
+	TRACE_FUNC_ENTER(__func__);
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	if ((args.allocation_count == 0) == (args.resource == 0)) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	device = dxgprocess_device_by_handle(process, args.device);
+	if (device == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+	adapter = device->adapter;
+	ret = dxgadapter_acquire_lock_shared(adapter);
+	if (ret) {
+		adapter = NULL;
+		goto cleanup;
+	}
+	ret = dxgvmb_send_query_alloc_residency(process, &adapter->channel,
+						&args);
+cleanup:
+	if (adapter)
+		dxgadapter_release_lock_shared(adapter);
+	if (device)
+		dxgdevice_release_reference(device);
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static int dxgk_set_allocation_priority(struct dxgprocess *process,
+					void *__user inargs)
+{
+	struct d3dkmt_setallocationpriority args;
+	struct dxgadapter *adapter = NULL;
+	struct dxgdevice *device = NULL;
+	int ret;
+
+	TRACE_FUNC_ENTER(__func__);
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+	device = dxgprocess_device_by_handle(process, args.device);
+	if (device == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+	adapter = device->adapter;
+	ret = dxgadapter_acquire_lock_shared(adapter);
+	if (ret) {
+		adapter = NULL;
+		goto cleanup;
+	}
+	ret = dxgvmb_send_set_allocation_priority(process, &adapter->channel,
+						  &args);
+cleanup:
+	if (adapter)
+		dxgadapter_release_lock_shared(adapter);
+	if (device)
+		dxgdevice_release_reference(device);
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static int dxgk_get_allocation_priority(struct dxgprocess *process,
+					void *__user inargs)
+{
+	struct d3dkmt_getallocationpriority args;
+	struct dxgadapter *adapter = NULL;
+	struct dxgdevice *device = NULL;
+	int ret;
+
+	TRACE_FUNC_ENTER(__func__);
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+	device = dxgprocess_device_by_handle(process, args.device);
+	if (device == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+	adapter = device->adapter;
+	ret = dxgadapter_acquire_lock_shared(adapter);
+	if (ret) {
+		adapter = NULL;
+		goto cleanup;
+	}
+	ret = dxgvmb_send_get_allocation_priority(process, &adapter->channel,
+						  &args);
+cleanup:
+	if (adapter)
+		dxgadapter_release_lock_shared(adapter);
+	if (device)
+		dxgdevice_release_reference(device);
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static long set_context_scheduling_priority(struct dxgprocess *process,
+					    d3dkmt_handle hcontext,
+					    int priority, bool in_process)
+{
+	int ret = 0;
+	struct dxgdevice *device = NULL;
+	struct dxgadapter *adapter = NULL;
+
+	device = dxgprocess_device_by_object_handle(process,
+						    HMGRENTRY_TYPE_DXGCONTEXT,
+						    hcontext);
+	if (device == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+	adapter = device->adapter;
+	ret = dxgadapter_acquire_lock_shared(adapter);
+	if (ret) {
+		adapter = NULL;
+		goto cleanup;
+	}
+	ret = dxgvmb_send_set_context_scheduling_priority(process,
+							  &adapter->channel,
+							  hcontext, priority,
+							  in_process);
+	if (ret)
+		pr_err("send_set_context_scheduling_priority failed");
+cleanup:
+	if (adapter)
+		dxgadapter_release_lock_shared(adapter);
+	if (device)
+		dxgdevice_release_reference(device);
+
+	return ret;
+}
+
+static int dxgk_set_context_scheduling_priority(struct dxgprocess *process,
+						void *__user inargs)
+{
+	struct d3dkmt_setcontextschedulingpriority args;
+	int ret;
+
+	TRACE_FUNC_ENTER(__func__);
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	ret = set_context_scheduling_priority(process, args.context,
+					      args.priority, false);
+cleanup:
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static long get_context_scheduling_priority(struct dxgprocess *process,
+					    d3dkmt_handle hcontext,
+					    __user int *priority,
+					    bool in_process)
+{
+	int ret = 0;
+	struct dxgdevice *device = NULL;
+	struct dxgadapter *adapter = NULL;
+	int pri = 0;
+
+	device = dxgprocess_device_by_object_handle(process,
+						    HMGRENTRY_TYPE_DXGCONTEXT,
+						    hcontext);
+	if (device == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+	adapter = device->adapter;
+	ret = dxgadapter_acquire_lock_shared(adapter);
+	if (ret) {
+		adapter = NULL;
+		goto cleanup;
+	}
+	ret = dxgvmb_send_get_context_scheduling_priority(process,
+							  &adapter->channel,
+							  hcontext, &pri,
+							  in_process);
+	if (ret)
+		goto cleanup;
+	ret = dxg_copy_to_user(priority, &pri, sizeof(pri));
+
+cleanup:
+	if (adapter)
+		dxgadapter_release_lock_shared(adapter);
+	if (device)
+		dxgdevice_release_reference(device);
+
+	return ret;
+}
+
+static int dxgk_get_context_scheduling_priority(struct dxgprocess *process,
+						void *__user inargs)
+{
+	struct d3dkmt_getcontextschedulingpriority args;
+	int ret;
+
+	TRACE_FUNC_ENTER(__func__);
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	ret = get_context_scheduling_priority(process, args.context,
+					      &((struct
+						 d3dkmt_getcontextschedulingpriority
+						 *)
+						inargs)->priority, false);
+cleanup:
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static int dxgk_set_context_process_scheduling_priority(struct dxgprocess
+							*process,
+							void *__user inargs)
+{
+	struct d3dkmt_setcontextinprocessschedulingpriority args;
+	int ret;
+
+	TRACE_FUNC_ENTER(__func__);
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	ret = set_context_scheduling_priority(process, args.context,
+					      args.priority, true);
+cleanup:
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static int dxgk_get_context_process_scheduling_priority(struct dxgprocess
+							*process,
+							void *__user inargs)
+{
+	struct d3dkmt_getcontextinprocessschedulingpriority args;
+	int ret;
+
+	TRACE_FUNC_ENTER(__func__);
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	ret = get_context_scheduling_priority(process, args.context,
+					      &((struct
+						 d3dkmt_getcontextinprocessschedulingpriority
+						 *)
+						inargs)->priority, true);
+cleanup:
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static int dxgk_change_vidmem_reservation(struct dxgprocess *process,
+					  void *__user inargs)
+{
+	struct d3dkmt_changevideomemoryreservation args;
+	int ret;
+	struct dxgadapter *adapter = NULL;
+	bool adapter_locked = false;
+
+	TRACE_FUNC_ENTER(__func__);
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	if (args.process != 0) {
+		pr_err("setting memory reservation for other process");
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	adapter = dxgprocess_adapter_by_handle(process, args.adapter);
+	if (adapter == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	ret = dxgadapter_acquire_lock_shared(adapter);
+	if (ret) {
+		adapter = NULL;
+		goto cleanup;
+	}
+	adapter_locked = true;
+	args.adapter = 0;
+	ret = dxgvmb_send_change_vidmem_reservation(process, &adapter->channel,
+						    0, &args);
+
+cleanup:
+
+	if (adapter_locked)
+		dxgadapter_release_lock_shared(adapter);
+	if (adapter)
+		dxgadapter_release_reference(adapter);
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static int dxgk_query_clock_calibration(struct dxgprocess *process,
+					void *__user inargs)
+{
+	struct d3dkmt_queryclockcalibration args;
+	int ret;
+	struct dxgadapter *adapter = NULL;
+	bool adapter_locked = false;
+
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	adapter = dxgprocess_adapter_by_handle(process, args.adapter);
+	if (adapter == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	ret = dxgadapter_acquire_lock_shared(adapter);
+	if (ret) {
+		adapter = NULL;
+		goto cleanup;
+	}
+	adapter_locked = true;
+
+	args.adapter = adapter->host_handle;
+	ret = dxgvmb_send_query_clock_calibration(process, &adapter->channel,
+						  &args, inargs);
+	if (ret)
+		goto cleanup;
+	ret = dxg_copy_to_user(inargs, &args, sizeof(args));
+
+cleanup:
+
+	if (adapter_locked)
+		dxgadapter_release_lock_shared(adapter);
+	if (adapter)
+		dxgadapter_release_reference(adapter);
+	return ret;
+}
+
+static int dxgk_flush_heap_transitions(struct dxgprocess *process,
+				       void *__user inargs)
+{
+	struct d3dkmt_flushheaptransitions args;
+	int ret;
+	struct dxgadapter *adapter = NULL;
+	bool adapter_locked = false;
+
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	adapter = dxgprocess_adapter_by_handle(process, args.adapter);
+	if (adapter == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	ret = dxgadapter_acquire_lock_shared(adapter);
+	if (ret) {
+		adapter = NULL;
+		goto cleanup;
+	}
+	adapter_locked = true;
+
+	args.adapter = adapter->host_handle;
+	ret = dxgvmb_send_flush_heap_transitions(process, &adapter->channel,
+						 &args);
+	if (ret)
+		goto cleanup;
+	ret = dxg_copy_to_user(inargs, &args, sizeof(args));
+
+cleanup:
+
+	if (adapter_locked)
+		dxgadapter_release_lock_shared(adapter);
+	if (adapter)
+		dxgadapter_release_reference(adapter);
+	return ret;
+}
+
+static int handle_table_escape(struct dxgprocess *process,
+			       struct d3dkmt_escape *args,
+			       struct d3dkmt_ht_desc *cmdin)
+{
+	int ret = 0;
+	struct d3dkmt_ht_desc cmd;
+	struct hmgrtable *table;
+
+	dxgmutex_lock(&process->process_mutex);
+	cmd = *cmdin;
+	if (cmd.index >= 2) {
+		pr_err("invalid table index");
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+	table = process->test_handle_table[cmd.index];
+	if (table == NULL) {
+		table = dxgmem_alloc(process, DXGMEM_HANDLE_TABLE,
+				     sizeof(*table));
+		if (table == NULL) {
+			pr_err("failed to allocate handle table");
+			ret = STATUS_INVALID_PARAMETER;
+			goto cleanup;
+		}
+		hmgrtable_init(table, process);
+		process->test_handle_table[cmd.index] = table;
+	}
+	switch (cmd.command) {
+	case D3DKMT_HT_COMMAND_ALLOC:
+		cmd.handle = hmgrtable_alloc_handle_safe(table, cmd.object,
+							 (enum hmgrentry_type)
+							 cmd.object_type, true);
+		ret = dxg_copy_to_user(args->priv_drv_data, &cmd, sizeof(cmd));
+		break;
+	case D3DKMT_HT_COMMAND_FREE:
+		hmgrtable_free_handle_safe(table,
+					   (enum hmgrentry_type)cmd.object_type,
+					   cmd.handle);
+		break;
+		break;
+	case D3DKMT_HT_COMMAND_ASSIGN:
+		ret = hmgrtable_assign_handle_safe(table, cmd.object,
+						   (enum hmgrentry_type)cmd.
+						   object_type, cmd.handle);
+		break;
+	case D3DKMT_HT_COMMAND_GET:
+		hmgrtable_lock(table, DXGLOCK_SHARED);
+		cmd.object = hmgrtable_get_object_by_type(table,
+							  (enum hmgrentry_type)
+							  cmd.object_type,
+							  cmd.handle);
+		hmgrtable_unlock(table, DXGLOCK_SHARED);
+		ret = dxg_copy_to_user(args->priv_drv_data, &cmd, sizeof(cmd));
+		break;
+	case D3DKMT_HT_COMMAND_DESTROY:
+		if (table) {
+			hmgrtable_destroy(table);
+			dxgmem_free(process, DXGMEM_HANDLE_TABLE, table);
+		}
+		process->test_handle_table[cmd.index] = NULL;
+		break;
+	default:
+		ret = STATUS_INVALID_PARAMETER;
+		pr_err("unknoen handle table command");
+		break;
+	}
+
+cleanup:
+	dxgmutex_unlock(&process->process_mutex);
+	return ret;
+}
+
+static int dxgk_escape(struct dxgprocess *process, void *__user inargs)
+{
+	struct d3dkmt_escape args;
+	int ret;
+	struct dxgadapter *adapter = NULL;
+	bool adapter_locked = false;
+
+	TRACE_FUNC_ENTER(__func__);
+
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	adapter = dxgprocess_adapter_by_handle(process, args.adapter);
+	if (adapter == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	ret = dxgadapter_acquire_lock_shared(adapter);
+	if (ret) {
+		adapter = NULL;
+		goto cleanup;
+	}
+	adapter_locked = true;
+
+	if (args.type == D3DKMT_ESCAPE_DRT_TEST) {
+		struct d3dkmt_ht_desc drtcmd;
+
+		if (args.priv_drv_data_size >= sizeof(drtcmd)) {
+			ret = dxg_copy_from_user(&drtcmd,
+						 args.priv_drv_data,
+						 sizeof(drtcmd));
+			if (ret)
+				goto cleanup;
+			if (drtcmd.head.command ==
+			    D3DKMT_DRT_TEST_COMMAND_HANDLETABLE) {
+				dxgadapter_release_lock_shared(adapter);
+				adapter_locked = false;
+				ret = handle_table_escape(process, &args,
+							  &drtcmd);
+				goto cleanup;
+			}
+		}
+	}
+
+	args.adapter = adapter->host_handle;
+	ret = dxgvmb_send_escape(process, &adapter->channel, &args);
+	if (ret)
+		goto cleanup;
+
+cleanup:
+
+	if (adapter_locked)
+		dxgadapter_release_lock_shared(adapter);
+	if (adapter)
+		dxgadapter_release_reference(adapter);
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static int dxgk_query_vidmem_info(struct dxgprocess *process,
+				  void *__user inargs)
+{
+	struct d3dkmt_queryvideomemoryinfo args;
+	int ret;
+	struct dxgadapter *adapter = NULL;
+	bool adapter_locked = false;
+
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	if (args.process != 0) {
+		pr_err("query vidmem info from another process ");
+		goto cleanup;
+	}
+
+	adapter = dxgprocess_adapter_by_handle(process, args.adapter);
+	if (adapter == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	ret = dxgadapter_acquire_lock_shared(adapter);
+	if (ret) {
+		adapter = NULL;
+		goto cleanup;
+	}
+	adapter_locked = true;
+
+	args.adapter = adapter->host_handle;
+	ret = dxgvmb_send_query_vidmem_info(process, &adapter->channel,
+					    &args, inargs);
+
+cleanup:
+
+	if (adapter_locked)
+		dxgadapter_release_lock_shared(adapter);
+	if (adapter)
+		dxgadapter_release_reference(adapter);
+	if (ret)
+		pr_err("%s failed: %x", __func__, ret);
+	return ret;
+}
+
+static int dxgk_get_device_state(struct dxgprocess *process,
+				 void *__user inargs)
+{
+	int ret = 0;
+	struct d3dkmt_getdevicestate args;
+	struct dxgdevice *device = NULL;
+	struct dxgadapter *adapter = NULL;
+
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	device = dxgprocess_device_by_handle(process, args.device);
+	if (device == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	adapter = device->adapter;
+	ret = dxgadapter_acquire_lock_shared(adapter);
+	if (ret) {
+		adapter = NULL;
+		goto cleanup;
+	}
+
+	ret = dxgvmb_send_get_device_state(process, &adapter->channel,
+					   &args, inargs);
+
+cleanup:
+
+	if (adapter)
+		dxgadapter_release_lock_shared(adapter);
+	if (device)
+		dxgdevice_release_reference(device);
+	if (ret)
+		pr_err("%s failed %x", __func__, ret);
+
+	return ret;
+}
+
+static int dxgsharedsyncobj_get_host_nt_handle(struct dxgsharedsyncobject
+					       *syncobj,
+					       struct dxgprocess *process,
+					       d3dkmt_handle object_handle)
+{
+	int ret = 0;
+
+	dxgmutex_lock(&syncobj->fd_mutex);
+	if (syncobj->host_shared_handle_nt_reference == 0) {
+		ret = dxgvmb_send_create_nt_shared_object(process,
+							  object_handle,
+							  &syncobj->
+							  host_shared_handle_nt);
+		if (ret)
+			goto cleanup;
+		TRACE_DEBUG(1, "Host_shared_handle_ht: %x",
+			    syncobj->host_shared_handle_nt);
+		dxgsharedsyncobj_acquire_reference(syncobj);
+	}
+	syncobj->host_shared_handle_nt_reference++;
+cleanup:
+	dxgmutex_unlock(&syncobj->fd_mutex);
+	return ret;
+}
+
+static int dxgsharedresource_get_host_nt_handle(struct dxgsharedresource
+						*resource,
+						struct dxgprocess *process,
+						d3dkmt_handle object_handle)
+{
+	int ret = 0;
+
+	dxgmutex_lock(&resource->fd_mutex);
+	if (resource->host_shared_handle_nt_reference == 0) {
+		ret = dxgvmb_send_create_nt_shared_object(process,
+							  object_handle,
+							  &resource->
+							  host_shared_handle_nt);
+		if (ret)
+			goto cleanup;
+		TRACE_DEBUG(1, "Resource host_shared_handle_ht: %x",
+			    resource->host_shared_handle_nt);
+		dxgsharedresource_acquire_reference(resource);
+	}
+	resource->host_shared_handle_nt_reference++;
+cleanup:
+	dxgmutex_unlock(&resource->fd_mutex);
+	return ret;
+}
+
+enum dxg_sharedobject_type {
+	DXG_SHARED_SYNCOBJECT,
+	DXG_SHARED_RESOURCE
+};
+
+static int get_object_fd(enum dxg_sharedobject_type type,
+			 void *object, int *fdout)
+{
+	struct file *file;
+	int fd;
+
+	fd = get_unused_fd_flags(O_CLOEXEC);
+	if (fd < 0) {
+		pr_err("get_unused_fd_flags failed: %x", fd);
+		return STATUS_INTERNAL_ERROR;
+	}
+
+	switch (type) {
+	case DXG_SHARED_SYNCOBJECT:
+		file = anon_inode_getfile("dxgsyncobj",
+					  &dxg_syncobj_fops, object, 0);
+		break;
+	case DXG_SHARED_RESOURCE:
+		file = anon_inode_getfile("dxgresource",
+					  &dxg_resource_fops, object, 0);
+		break;
+	default:
+		return STATUS_INVALID_PARAMETER;
+	};
+	if (IS_ERR(file)) {
+		pr_err("anon_inode_getfile failed: %x", fd);
+		put_unused_fd(fd);
+		return STATUS_INTERNAL_ERROR;
+	}
+
+	fd_install(fd, file);
+	*fdout = fd;
+	return 0;
+}
+
+static int dxgk_share_objects(struct dxgprocess *process, void *__user inargs)
+{
+	struct d3dkmt_shareobjects args;
+	enum hmgrentry_type object_type;
+	struct dxgsyncobject *syncobj = NULL;
+	struct dxgresource *resource = NULL;
+	struct dxgsharedsyncobject *shared_syncobj = NULL;
+	struct dxgsharedresource *shared_resource = NULL;
+	d3dkmt_handle *handles = NULL;
+	int object_fd = 0;
+	void *obj = NULL;
+	uint handle_size;
+	int ret;
+
+	TRACE_FUNC_ENTER(__func__);
+
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	if (args.object_count == 0 || args.object_count > 1) {
+		pr_err("invalid object count %d", args.object_count);
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	handle_size = args.object_count * sizeof(d3dkmt_handle);
+
+	handles = dxgmem_alloc(process, DXGMEM_TMP, handle_size);
+	if (handles == NULL) {
+		ret = STATUS_NO_MEMORY;
+		goto cleanup;
+	}
+
+	ret = dxg_copy_from_user(handles, args.objects, handle_size);
+	if (ret)
+		goto cleanup;
+
+	TRACE_DEBUG(1, "Sharing handle: %x", handles[0]);
+
+	hmgrtable_lock(&process->handle_table, DXGLOCK_SHARED);
+	object_type = hmgrtable_get_object_type(&process->handle_table,
+						handles[0]);
+	obj = hmgrtable_get_object(&process->handle_table, handles[0]);
+	if (obj == NULL) {
+		pr_err("invalid object handle %x", handles[0]);
+		ret = STATUS_INVALID_PARAMETER;
+	} else {
+		switch (object_type) {
+		case HMGRENTRY_TYPE_DXGSYNCOBJECT:
+			syncobj = obj;
+			if (syncobj->shared) {
+				dxgsyncobject_acquire_reference(syncobj);
+				shared_syncobj = syncobj->shared_owner;
+			} else {
+				pr_err("sync object is not shared");
+				syncobj = NULL;
+				ret = STATUS_INVALID_PARAMETER;
+			}
+			break;
+		case HMGRENTRY_TYPE_DXGRESOURCE:
+			resource = obj;
+			if (resource->shared_owner) {
+				dxgresource_acquire_reference(resource);
+				shared_resource = resource->shared_owner;
+			} else {
+				resource = NULL;
+				pr_err("resource object is not shared");
+				ret = STATUS_INVALID_PARAMETER;
+			}
+			break;
+		default:
+			pr_err("invalid object type %d", object_type);
+			ret = STATUS_INVALID_PARAMETER;
+			break;
+		}
+	}
+	hmgrtable_unlock(&process->handle_table, DXGLOCK_SHARED);
+
+	if (ret)
+		goto cleanup;
+
+	switch (object_type) {
+	case HMGRENTRY_TYPE_DXGSYNCOBJECT:
+		ret = get_object_fd(DXG_SHARED_SYNCOBJECT, shared_syncobj,
+				    &object_fd);
+		if (!ret)
+			ret =
+			    dxgsharedsyncobj_get_host_nt_handle(shared_syncobj,
+								process,
+								handles[0]);
+		break;
+	case HMGRENTRY_TYPE_DXGRESOURCE:
+		ret = get_object_fd(DXG_SHARED_RESOURCE, shared_resource,
+				    &object_fd);
+		if (!ret)
+			ret =
+			    dxgsharedresource_get_host_nt_handle
+			    (shared_resource, process, handles[0]);
+		break;
+	default:
+		ret = STATUS_INVALID_PARAMETER;
+		break;
+	}
+
+	if (ret)
+		goto cleanup;
+
+	TRACE_DEBUG(1, "Object FD: %x", object_fd);
+
+	{
+		winhandle tmp = (winhandle) object_fd;
+
+		ret = dxg_copy_to_user(args.shared_handle, &tmp,
+				       sizeof(winhandle));
+	}
+
+cleanup:
+	if (ret) {
+		if (object_fd > 0)
+			put_unused_fd(object_fd);
+	}
+
+	if (handles)
+		dxgmem_free(process, DXGMEM_TMP, handles);
+
+	if (syncobj)
+		dxgsyncobject_release_reference(syncobj);
+
+	if (resource)
+		dxgresource_release_reference(resource);
+
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static int dxgk_invalidate_cache(struct dxgprocess *process,
+				 void *__user inargs)
+{
+	pr_err("%s is not implemented", __func__);
+	return STATUS_NOT_IMPLEMENTED;
+}
+
+static int dxgk_query_resource_info(struct dxgprocess *process,
+				    void *__user inargs)
+{
+	struct d3dkmt_queryresourceinfo args;
+	struct dxgdevice *device = NULL;
+	struct dxgsharedresource *shared_resource = NULL;
+	int ret;
+
+	TRACE_FUNC_ENTER(__func__);
+
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	hmgrtable_lock(&dxgglobal->handle_table, DXGLOCK_SHARED);
+	shared_resource = hmgrtable_get_object_by_type(&dxgglobal->handle_table,
+						       HMGRENTRY_TYPE_DXGSHAREDRESOURCE,
+						       args.global_share);
+	if (shared_resource) {
+		if (!dxgsharedresource_acquire_reference(shared_resource))
+			shared_resource = NULL;
+	}
+	hmgrtable_unlock(&dxgglobal->handle_table, DXGLOCK_SHARED);
+
+	if (shared_resource == NULL) {
+		pr_err("Invalid shared resource handle: %x",
+			   args.global_share);
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	device = dxgprocess_device_by_handle(process, args.device);
+	if (device == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	ret = dxgdevice_acquire_lock_shared(device);
+	if (ret) {
+		dxgdevice_release_reference(device);
+		device = NULL;
+		goto cleanup;
+	}
+
+	ret = dxgsharedresource_seal(shared_resource);
+	if (ret)
+		goto cleanup;
+
+	args.private_runtime_data_size =
+	    shared_resource->runtime_private_data_size;
+	args.resource_priv_drv_data_size =
+	    shared_resource->resource_private_data_size;
+	args.allocation_count = shared_resource->allocation_count;
+	args.total_priv_drv_data_size =
+	    shared_resource->alloc_private_data_size;
+
+	ret = dxg_copy_to_user(inargs, &args, sizeof(args));
+
+cleanup:
+
+	if (shared_resource)
+		dxgsharedresource_release_reference(shared_resource);
+	if (device)
+		dxgdevice_release_lock_shared(device);
+	if (device)
+		dxgdevice_release_reference(device);
+
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static int dxgk_query_resource_info_nt(struct dxgprocess *process,
+				       void *__user inargs)
+{
+	struct d3dkmt_queryresourceinfofromnthandle args;
+	int ret;
+	struct dxgdevice *device = NULL;
+	struct dxgsharedresource *shared_resource = NULL;
+	struct file *file = NULL;
+
+	TRACE_FUNC_ENTER(__func__);
+
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	file = fget(args.nt_handle);
+	if (!file) {
+		pr_err("failed to get file from handle: %llx",
+			   args.nt_handle);
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	if (file->f_op != &dxg_resource_fops) {
+		pr_err("invalid fd: %llx", args.nt_handle);
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	shared_resource = file->private_data;
+	if (shared_resource == NULL) {
+		pr_err("invalid private data: %llx", args.nt_handle);
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	device = dxgprocess_device_by_handle(process, args.device);
+	if (device == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	ret = dxgdevice_acquire_lock_shared(device);
+	if (ret) {
+		dxgdevice_release_reference(device);
+		device = NULL;
+		goto cleanup;
+	}
+
+	ret = dxgsharedresource_seal(shared_resource);
+	if (ret)
+		goto cleanup;
+
+	args.private_runtime_data_size =
+	    shared_resource->runtime_private_data_size;
+	args.resource_priv_drv_data_size =
+	    shared_resource->resource_private_data_size;
+	args.allocation_count = shared_resource->allocation_count;
+	args.total_priv_drv_data_size =
+	    shared_resource->alloc_private_data_size;
+
+	ret = dxg_copy_to_user(inargs, &args, sizeof(args));
+
+cleanup:
+
+	if (file)
+		fput(file);
+	if (device)
+		dxgdevice_release_lock_shared(device);
+	if (device)
+		dxgdevice_release_reference(device);
+
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+int assign_resource_handles(struct dxgprocess *process,
+			    struct dxgsharedresource *shared_resource,
+			    struct d3dkmt_openresourcefromnthandle *args,
+			    d3dkmt_handle resource_handle,
+			    struct dxgresource *resource,
+			    struct dxgallocation **allocs,
+			    d3dkmt_handle *handles)
+{
+	int ret = 0;
+	int i;
+	uint8_t *cur_priv_data;
+	struct d3dddi_openallocationinfo2 open_alloc_info = { };
+
+	TRACE_DEBUG(1, "%s", __func__);
+
+	hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL);
+	ret = hmgrtable_assign_handle(&process->handle_table, resource,
+				      HMGRENTRY_TYPE_DXGRESOURCE,
+				      resource_handle);
+	if (ret)
+		goto cleanup;
+	resource->handle = resource_handle;
+	resource->handle_valid = 1;
+	cur_priv_data = shared_resource->alloc_private_data;
+	for (i = 0; i < args->allocation_count; i++) {
+		ret = hmgrtable_assign_handle(&process->handle_table, allocs[i],
+					      HMGRENTRY_TYPE_DXGALLOCATION,
+					      handles[i]);
+		if (ret)
+			goto cleanup;
+		allocs[i]->alloc_handle = handles[i];
+		allocs[i]->handle_valid = 1;
+		open_alloc_info.allocation = handles[i];
+		if (shared_resource->alloc_private_data_sizes)
+			open_alloc_info.priv_drv_data_size =
+			    shared_resource->alloc_private_data_sizes[i];
+		else
+			open_alloc_info.priv_drv_data_size = 0;
+
+		open_alloc_info.priv_drv_data = cur_priv_data;
+		cur_priv_data += open_alloc_info.priv_drv_data_size;
+
+		ret = dxg_copy_to_user(&args->open_alloc_info[i],
+				       &open_alloc_info,
+				       sizeof(open_alloc_info));
+		if (ret)
+			goto cleanup;
+	}
+cleanup:
+	hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL);
+	if (ret) {
+		for (i = 0; i < args->allocation_count; i++)
+			dxgallocation_free_handle(allocs[i]);
+		dxgresource_free_handle(resource);
+	}
+	TRACE_DEBUG(1, "%s end %x", __func__, ret);
+	return ret;
+}
+
+int open_resource(struct dxgprocess *process,
+		  struct d3dkmt_openresourcefromnthandle *args,
+		  bool nt_handle, __user d3dkmt_handle *res_out)
+{
+	int ret = 0;
+	int i;
+	d3dkmt_handle *alloc_handles = NULL;
+	int alloc_handles_size = sizeof(d3dkmt_handle) * args->allocation_count;
+	struct dxgsharedresource *shared_resource = NULL;
+	struct dxgresource *resource = NULL;
+	struct dxgallocation **allocs = NULL;
+	d3dkmt_handle global_share;
+	struct dxgdevice *device = NULL;
+	struct dxgadapter *adapter = NULL;
+	d3dkmt_handle resource_handle = 0;
+	struct file *file = NULL;
+
+	TRACE_DEBUG(1, "Opening resource handle: %llx", args->nt_handle);
+
+	if (nt_handle) {
+		file = fget(args->nt_handle);
+		if (!file) {
+			pr_err("failed to get file from handle: %llx",
+				   args->nt_handle);
+			ret = STATUS_INVALID_PARAMETER;
+			goto cleanup;
+		}
+		if (file->f_op != &dxg_resource_fops) {
+			pr_err("invalid fd type: %llx", args->nt_handle);
+			ret = STATUS_INVALID_PARAMETER;
+			goto cleanup;
+		}
+		shared_resource = file->private_data;
+		if (shared_resource == NULL) {
+			pr_err("invalid private data: %llx",
+				   args->nt_handle);
+			ret = STATUS_INVALID_PARAMETER;
+			goto cleanup;
+		}
+		if (!dxgsharedresource_acquire_reference(shared_resource))
+			shared_resource = NULL;
+		else
+			global_share = shared_resource->host_shared_handle_nt;
+	} else {
+		hmgrtable_lock(&dxgglobal->handle_table, DXGLOCK_SHARED);
+		shared_resource =
+		    hmgrtable_get_object_by_type(&dxgglobal->handle_table,
+						 HMGRENTRY_TYPE_DXGSHAREDRESOURCE,
+						 (d3dkmt_handle) args->
+						 nt_handle);
+		if (shared_resource) {
+			if (!dxgsharedresource_acquire_reference
+			    (shared_resource))
+				shared_resource = NULL;
+			else
+				global_share =
+				    shared_resource->host_shared_handle;
+		}
+		hmgrtable_unlock(&dxgglobal->handle_table, DXGLOCK_SHARED);
+	}
+
+	if (shared_resource == NULL) {
+		pr_err("Invalid shared resource handle: %x",
+			   (d3dkmt_handle) args->nt_handle);
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	TRACE_DEBUG(1, "Shared resource: %p %x", shared_resource,
+		    global_share);
+
+	device = dxgprocess_device_by_handle(process, args->device);
+	if (device == NULL) {
+		ret = STATUS_INVALID_PARAMETER;
+		goto cleanup;
+	}
+
+	ret = dxgdevice_acquire_lock_shared(device);
+	if (ret) {
+		dxgdevice_release_reference(device);
+		device = NULL;
+		goto cleanup;
+	}
+
+	adapter = device->adapter;
+	ret = dxgadapter_acquire_lock_shared(adapter);
+	if (ret) {
+		adapter = NULL;
+		goto cleanup;
+	}
+
+	ret = dxgsharedresource_seal(shared_resource);
+	if (ret)
+		goto cleanup;
+
+	if (args->allocation_count != shared_resource->allocation_count ||
+	    args->private_runtime_data_size <
+	    shared_resource->runtime_private_data_size ||
+	    args->resource_priv_drv_data_size <
+	    shared_resource->resource_private_data_size ||
+	    args->total_priv_drv_data_size <
+	    shared_resource->alloc_private_data_size) {
+		ret = STATUS_INVALID_PARAMETER;
+		pr_err("Invalid data sizes");
+		goto cleanup;
+	}
+
+	alloc_handles = dxgmem_alloc(process, DXGMEM_TMP, alloc_handles_size);
+	if (alloc_handles == NULL) {
+		ret = STATUS_NO_MEMORY;
+		goto cleanup;
+	}
+
+	allocs = dxgmem_alloc(process, DXGMEM_TMP,
+			      sizeof(void *) * args->allocation_count);
+	if (allocs == NULL) {
+		ret = STATUS_NO_MEMORY;
+		goto cleanup;
+	}
+
+	resource = dxgresource_create(device);
+	if (resource == NULL) {
+		ret = STATUS_NO_MEMORY;
+		goto cleanup;
+	}
+	dxgsharedresource_add_resource(shared_resource, resource);
+
+	for (i = 0; i < args->allocation_count; i++) {
+		allocs[i] = dxgallocation_create(process);
+		if (allocs[i] == NULL)
+			goto cleanup;
+		dxgresource_add_alloc(resource, allocs[i]);
+	}
+
+	ret = dxgvmb_send_open_resource(process, &adapter->channel,
+					device->handle, nt_handle, global_share,
+					args->allocation_count,
+					args->total_priv_drv_data_size,
+					&resource_handle, alloc_handles);
+	if (ret) {
+		pr_err("dxgvmb_send_open_resource failed");
+		goto cleanup;
+	}
+
+	if (shared_resource->runtime_private_data_size) {
+		ret = dxg_copy_to_user(args->private_runtime_data,
+				       shared_resource->runtime_private_data,
+				       shared_resource->
+				       runtime_private_data_size);
+		if (ret)
+			goto cleanup;
+	}
+
+	if (shared_resource->resource_private_data_size) {
+		ret = dxg_copy_to_user(args->resource_priv_drv_data,
+				       shared_resource->resource_private_data,
+				       shared_resource->
+				       resource_private_data_size);
+		if (ret)
+			goto cleanup;
+	}
+
+	if (shared_resource->alloc_private_data_size) {
+		ret = dxg_copy_to_user(args->total_priv_drv_data,
+				       shared_resource->alloc_private_data,
+				       shared_resource->
+				       alloc_private_data_size);
+		if (ret)
+			goto cleanup;
+	}
+
+	ret = assign_resource_handles(process, shared_resource, args,
+				      resource_handle, resource, allocs,
+				      alloc_handles);
+	if (ret)
+		goto cleanup;
+
+	ret = dxg_copy_to_user(res_out, &resource_handle,
+			       sizeof(d3dkmt_handle));
+
+cleanup:
+
+	if (ret) {
+		if (resource_handle) {
+			struct d3dkmt_destroyallocation2 tmp = { };
+
+			tmp.flags.assume_not_in_use = 1;
+			tmp.device = args->device;
+			tmp.resource = resource_handle;
+			ret = dxgvmb_send_destroy_allocation(process, device,
+							     &adapter->channel,
+							     &tmp, NULL);
+		}
+		if (resource)
+			dxgresource_destroy(resource);
+	}
+
+	if (file)
+		fput(file);
+	if (allocs)
+		dxgmem_free(process, DXGMEM_TMP, allocs);
+	if (shared_resource)
+		dxgsharedresource_release_reference(shared_resource);
+	if (alloc_handles)
+		dxgmem_free(process, DXGMEM_TMP, alloc_handles);
+	if (adapter)
+		dxgadapter_release_lock_shared(adapter);
+	if (device)
+		dxgdevice_release_lock_shared(device);
+	if (device)
+		dxgdevice_release_reference(device);
+
+	return ret;
+}
+
+static int dxgk_open_resource(struct dxgprocess *process, void *__user inargs)
+{
+	struct d3dkmt_openresource args;
+	struct d3dkmt_openresourcefromnthandle args_nt = { };
+	int ret;
+
+	TRACE_FUNC_ENTER(__func__);
+
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	args_nt.device = args.device;
+	args_nt.nt_handle = (winhandle) args.global_share;
+	args_nt.allocation_count = args.allocation_count;
+	args_nt.open_alloc_info = args.open_alloc_info;
+	args_nt.private_runtime_data_size = args.private_runtime_data_size;
+	args_nt.private_runtime_data = args.private_runtime_data;
+	args_nt.resource_priv_drv_data_size = args.resource_priv_drv_data_size;
+	args_nt.resource_priv_drv_data = args.resource_priv_drv_data;
+	args_nt.total_priv_drv_data_size = args.total_priv_drv_data_size;
+	args_nt.total_priv_drv_data = args.total_priv_drv_data;
+
+	ret = open_resource(process, &args_nt, false,
+			    &((struct d3dkmt_openresource *)inargs)->resource);
+
+cleanup:
+
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static ntstatus dxgk_open_resource_nt(struct dxgprocess *process,
+				      void *__user inargs)
+{
+	struct d3dkmt_openresourcefromnthandle args;
+	int ret;
+
+	TRACE_FUNC_ENTER(__func__);
+
+	ret = dxg_copy_from_user(&args, inargs, sizeof(args));
+	if (ret)
+		goto cleanup;
+
+	ret = open_resource(process, &args, true,
+			    &((struct d3dkmt_openresourcefromnthandle *)
+			      inargs)->resource);
+
+cleanup:
+
+	TRACE_FUNC_EXIT(__func__, ret);
+	return ret;
+}
+
+static int dxgk_render(struct dxgprocess *process, void *__user inargs)
+{
+	pr_err("%s is not implemented", __func__);
+	return STATUS_NOT_IMPLEMENTED;
+}
+
+static int dxgk_create_context(struct dxgprocess *process, void *__user inargs)
+{
+	pr_err("%s is not implemented", __func__);
+	return STATUS_NOT_IMPLEMENTED;
+}
+
+static int dxgk_get_shared_resource_adapter_luid(struct dxgprocess *process,
+						 void *__user inargs)
+{
+	pr_err("shared_resource_adapter_luid is not implemented");
+	return STATUS_NOT_IMPLEMENTED;
+}
+
+/*
+ * IOCTL processing
+ */
+static int dxgk_ioctl(struct file *f, unsigned int p1, unsigned long p2)
+{
+	struct dxgthreadinfo *thread;
+	int code = _IOC_NR(p1);
+	ntstatus status;
+	struct dxgprocess *process;
+
+	if (code < 1 || code > LX_IO_MAX) {
+		pr_err("bad ioctl %x %x %x %x",
+			   code, _IOC_TYPE(p1), _IOC_SIZE(p1), _IOC_DIR(p1));
+		return STATUS_INVALID_PARAMETER;
+	}
+	if (ioctls[code].ioctl_callback == NULL) {
+		pr_err("ioctl callback is NULL %x", code);
+		return STATUS_INTERNAL_ERROR;
+	}
+	if (ioctls[code].ioctl != p1) {
+		pr_err("ioctl mismatch. Code: %x User: %x Kernel: %x",
+			   code, p1, ioctls[code].ioctl);
+		return STATUS_INTERNAL_ERROR;
+	}
+	process = (struct dxgprocess *)f->private_data;
+	if (process->tgid != current->tgid) {
+		pr_err("Call from a wrong process: %d %d",
+			   process->tgid, current->tgid);
+		return STATUS_INVALID_PARAMETER;
+	}
+	thread = dxglockorder_get_thread();
+	status = ioctls[code].ioctl_callback(process, (void *__user)p2);
+	dxglockorder_check_empty(thread);
+	dxglockorder_put_thread(thread);
+	return status;
+}
+
+long dxgk_compat_ioctl(struct file *f, unsigned int p1, unsigned long p2)
+{
+	TRACE_DEBUG(2, "compat ioctl %x", p1);
+	return dxgk_ioctl(f, p1, p2);
+}
+
+long dxgk_unlocked_ioctl(struct file *f, unsigned int p1, unsigned long p2)
+{
+	TRACE_DEBUG(2, "unlocked ioctl %x Code:%d", p1, _IOC_NR(p1));
+	return dxgk_ioctl(f, p1, p2);
+}
+
+#define SET_IOCTL(callback, v)				\
+	ioctls[_IOC_NR(v)].ioctl_callback = callback;	\
+	ioctls[_IOC_NR(v)].ioctl = v
+
+void ioctl_desc_init(void)
+{
+	memset(ioctls, 0, sizeof(ioctls));
+	SET_IOCTL(/*0x1 */ dxgk_open_adapter_from_luid,
+		  LX_DXOPENADAPTERFROMLUID);
+	SET_IOCTL(/*0x2 */ dxgk_create_device,
+		  LX_DXCREATEDEVICE);
+	SET_IOCTL(/*0x3 */ dxgk_create_context,
+		  LX_DXCREATECONTEXT);
+	SET_IOCTL(/*0x4 */ dxgk_create_context_virtual,
+		  LX_DXCREATECONTEXTVIRTUAL);
+	SET_IOCTL(/*0x5 */ dxgk_destroy_context,
+		  LX_DXDESTROYCONTEXT);
+	SET_IOCTL(/*0x6 */ dxgk_create_allocation,
+		  LX_DXCREATEALLOCATION);
+	SET_IOCTL(/*0x7 */ dxgk_create_paging_queue,
+		  LX_DXCREATEPAGINGQUEUE);
+	SET_IOCTL(/*0x8 */ dxgk_reserve_gpu_va,
+		  LX_DXRESERVEGPUVIRTUALADDRESS);
+	SET_IOCTL(/*0x9 */ dxgk_query_adapter_info,
+		  LX_DXQUERYADAPTERINFO);
+	SET_IOCTL(/*0xa */ dxgk_query_vidmem_info,
+		  LX_DXQUERYVIDEOMEMORYINFO);
+	SET_IOCTL(/*0xb */ dxgk_make_resident,
+		  LX_DXMAKERESIDENT);
+	SET_IOCTL(/*0xc */ dxgk_map_gpu_va,
+		  LX_DXMAPGPUVIRTUALADDRESS);
+	SET_IOCTL(/*0xd */ dxgk_escape,
+		  LX_DXESCAPE);
+	SET_IOCTL(/*0xe */ dxgk_get_device_state,
+		  LX_DXGETDEVICESTATE);
+	SET_IOCTL(/*0xf */ dxgk_submit_command,
+		  LX_DXSUBMITCOMMAND);
+	SET_IOCTL(/*0x10 */ dxgk_create_sync_object,
+		  LX_DXCREATESYNCHRONIZATIONOBJECT);
+	SET_IOCTL(/*0x11 */ dxgk_signal_sync_object,
+		  LX_DXSIGNALSYNCHRONIZATIONOBJECT);
+	SET_IOCTL(/*0x12 */ dxgk_wait_sync_object,
+		  LX_DXWAITFORSYNCHRONIZATIONOBJECT);
+	SET_IOCTL(/*0x13 */ dxgk_destroy_allocation,
+		  LX_DXDESTROYALLOCATION2);
+	SET_IOCTL(/*0x14 */ dxgk_enum_adapters,
+		  LX_DXENUMADAPTERS2);
+	SET_IOCTL(/*0x15 */ dxgk_close_adapter,
+		  LX_DXCLOSEADAPTER);
+	SET_IOCTL(/*0x16 */ dxgk_change_vidmem_reservation,
+		  LX_DXCHANGEVIDEOMEMORYRESERVATION);
+	SET_IOCTL(/*0x17 */ dxgk_create_hwcontext,
+		  LX_DXCREATEHWCONTEXT);
+	SET_IOCTL(/*0x18 */ dxgk_create_hwqueue,
+		  LX_DXCREATEHWQUEUE);
+	SET_IOCTL(/*0x19 */ dxgk_destroy_device,
+		  LX_DXDESTROYDEVICE);
+	SET_IOCTL(/*0x1a */ dxgk_destroy_hwcontext,
+		  LX_DXDESTROYHWCONTEXT);
+	SET_IOCTL(/*0x1b */ dxgk_destroy_hwqueue,
+		  LX_DXDESTROYHWQUEUE);
+	SET_IOCTL(/*0x1c */ dxgk_destroy_paging_queue,
+		  LX_DXDESTROYPAGINGQUEUE);
+	SET_IOCTL(/*0x1d */ dxgk_destroy_sync_object,
+		  LX_DXDESTROYSYNCHRONIZATIONOBJECT);
+	SET_IOCTL(/*0x1e */ dxgk_evict,
+		  LX_DXEVICT);
+	SET_IOCTL(/*0x1f */ dxgk_flush_heap_transitions,
+		  LX_DXFLUSHHEAPTRANSITIONS);
+	SET_IOCTL(/*0x20 */ dxgk_free_gpu_va,
+		  LX_DXFREEGPUVIRTUALADDRESS);
+	SET_IOCTL(/*0x21 */ dxgk_get_context_process_scheduling_priority,
+		  LX_DXGETCONTEXTINPROCESSSCHEDULINGPRIORITY);
+	SET_IOCTL(/*0x22 */ dxgk_get_context_scheduling_priority,
+		  LX_DXGETCONTEXTSCHEDULINGPRIORITY);
+	SET_IOCTL(/*0x23 */ dxgk_get_shared_resource_adapter_luid,
+		  LX_DXGETSHAREDRESOURCEADAPTERLUID);
+	SET_IOCTL(/*0x24 */ dxgk_invalidate_cache,
+		  LX_DXINVALIDATECACHE);
+	SET_IOCTL(/*0x25 */ dxgk_lock2,
+		  LX_DXLOCK2);
+	SET_IOCTL(/*0x26 */ dxgk_mark_device_as_error,
+		  LX_DXMARKDEVICEASERROR);
+	SET_IOCTL(/*0x27 */ dxgk_offer_allocations,
+		  LX_DXOFFERALLOCATIONS);
+	SET_IOCTL(/*0x28 */ dxgk_open_resource,
+		  LX_DXOPENRESOURCE);
+	SET_IOCTL(/*0x29 */ dxgk_open_sync_object,
+		  LX_DXOPENSYNCHRONIZATIONOBJECT);
+	SET_IOCTL(/*0x2a */ dxgk_query_alloc_residency,
+		  LX_DXQUERYALLOCATIONRESIDENCY);
+	SET_IOCTL(/*0x2b */ dxgk_query_resource_info,
+		  LX_DXQUERYRESOURCEINFO);
+	SET_IOCTL(/*0x2c */ dxgk_reclaim_allocations,
+		  LX_DXRECLAIMALLOCATIONS2);
+	SET_IOCTL(/*0x2d */ dxgk_render,
+		  LX_DXRENDER);
+	SET_IOCTL(/*0x2e */ dxgk_set_allocation_priority,
+		  LX_DXSETALLOCATIONPRIORITY);
+	SET_IOCTL(/*0x2f */ dxgk_set_context_process_scheduling_priority,
+		  LX_DXSETCONTEXTINPROCESSSCHEDULINGPRIORITY);
+	SET_IOCTL(/*0x30 */ dxgk_set_context_scheduling_priority,
+		  LX_DXSETCONTEXTSCHEDULINGPRIORITY);
+	SET_IOCTL(/*0x31 */ dxgk_signal_sync_object_cpu,
+		  LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMCPU);
+	SET_IOCTL(/*0x32 */ dxgk_signal_sync_object_gpu,
+		  LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMGPU);
+	SET_IOCTL(/*0x33 */ dxgk_signal_sync_object_gpu2,
+		  LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMGPU2);
+	SET_IOCTL(/*0x34 */ dxgk_submit_command_to_hwqueue,
+		  LX_DXSUBMITCOMMANDTOHWQUEUE);
+	SET_IOCTL(/*0x35 */ dxgk_submit_wait_to_hwqueue,
+		  LX_DXSUBMITWAITFORSYNCOBJECTSTOHWQUEUE);
+	SET_IOCTL(/*0x36 */ dxgk_submit_signal_to_hwqueue,
+		  LX_DXSUBMITSIGNALSYNCOBJECTSTOHWQUEUE);
+	SET_IOCTL(/*0x37 */ dxgk_unlock2,
+		  LX_DXUNLOCK2);
+	SET_IOCTL(/*0x38 */ dxgk_update_alloc_property,
+		  LX_DXUPDATEALLOCPROPERTY);
+	SET_IOCTL(/*0x39 */ dxgk_update_gpu_va,
+		  LX_DXUPDATEGPUVIRTUALADDRESS);
+	SET_IOCTL(/*0x3a */ dxgk_wait_sync_object_cpu,
+		  LX_DXWAITFORSYNCHRONIZATIONOBJECTFROMCPU);
+	SET_IOCTL(/*0x3b */ dxgk_wait_sync_object_gpu,
+		  LX_DXWAITFORSYNCHRONIZATIONOBJECTFROMGPU);
+	SET_IOCTL(/*0x3c */ dxgk_get_allocation_priority,
+		  LX_DXGETALLOCATIONPRIORITY);
+	SET_IOCTL(/*0x3d */ dxgk_query_clock_calibration,
+		  LX_DXQUERYCLOCKCALIBRATION);
+	SET_IOCTL(/*0x3e */ dxgk_enum_adapters3,
+		  LX_DXENUMADAPTERS3);
+	SET_IOCTL(/*0x3f */ dxgk_share_objects,
+		  LX_DXSHAREOBJECTS);
+	SET_IOCTL(/*0x40 */ dxgk_open_sync_object_nt,
+		  LX_DXOPENSYNCOBJECTFROMNTHANDLE2);
+	SET_IOCTL(/*0x41 */ dxgk_query_resource_info_nt,
+		  LX_DXQUERYRESOURCEINFOFROMNTHANDLE);
+	SET_IOCTL(/*0x42 */ dxgk_open_resource_nt,
+		  LX_DXOPENRESOURCEFROMNTHANDLE);
+}
diff --git a/drivers/gpu/dxgkrnl/misc.c b/drivers/gpu/dxgkrnl/misc.c
new file mode 100644
index 000000000000..6f88e0108612
--- /dev/null
+++ b/drivers/gpu/dxgkrnl/misc.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (c) 2019, Microsoft Corporation.
+ *
+ * Author:
+ *   Iouri Tarassov <iourit at microsoft.com>
+ *
+ * Dxgkrnl Graphics Driver
+ * Helper functions
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/uaccess.h>
+
+#include "dxgkrnl.h"
+#include "misc.h"
+
+static atomic_t dxg_memory[DXGMEM_LAST];
+
+winwchar *wcsncpy(winwchar *dest, const winwchar *src, size_t n)
+{
+	int i;
+
+	for (i = 0; i < n; i++) {
+		dest[i] = src[i];
+		if (src[i] == 0) {
+			i++;
+			break;
+		}
+	}
+	dest[i - 1] = 0;
+	return dest;
+}
+
+int dxg_copy_from_user(void *to, const void __user *from, unsigned long len)
+{
+	int ret = copy_from_user(to, from, len);
+
+	if (ret) {
+		pr_err("copy_from_user failed: %p %p %lx\n",
+			   to, from, len);
+		ret = STATUS_INVALID_PARAMETER;
+	}
+	return ret;
+}
+
+int dxg_copy_to_user(void *to, const void __user *from, unsigned long len)
+{
+	int ret = copy_to_user(to, from, len);
+
+	if (ret) {
+		pr_err("copy_to_user failed: %p %p %lx\n", to, from, len);
+		ret = STATUS_INVALID_PARAMETER;
+	}
+	return ret;
+}
+
+void *dxgmem_alloc(struct dxgprocess *process, enum dxgk_memory_tag tag,
+		   size_t size)
+{
+	void *address = vzalloc(size);
+
+	if (address) {
+		if (process)
+			atomic_inc(&process->dxg_memory[tag]);
+		else
+			atomic_inc(&dxg_memory[tag]);
+	}
+	return address;
+}
+
+void dxgmem_free(struct dxgprocess *process, enum dxgk_memory_tag tag,
+		 void *address)
+{
+	vfree(address);
+	if (process) {
+		if (atomic_read(&process->dxg_memory[tag]) == 0) {
+			pr_err("%s process error: %d\n",
+					__func__, tag);
+			dump_stack();
+		} else {
+			atomic_dec(&process->dxg_memory[tag]);
+		}
+	} else {
+		if (atomic_read(&dxg_memory[tag]) == 0) {
+			pr_err("%s error: %d\n", __func__, tag);
+			dump_stack();
+		} else {
+			atomic_dec(&dxg_memory[tag]);
+		}
+	}
+}
+
+void *dxgmem_kalloc(enum dxgk_memory_tag tag, size_t size, gfp_t flags)
+{
+	void *address = kzalloc(size, flags);
+
+	if (address)
+		atomic_inc(&dxg_memory[tag]);
+	return address;
+}
+
+void dxgmem_kfree(enum dxgk_memory_tag tag, void *address)
+{
+	kfree(address);
+	if (atomic_read(&dxg_memory[tag]) == 0)
+		pr_err("dxgmem_free error: %d\n", tag);
+	atomic_dec(&dxg_memory[tag]);
+}
+
+void dxgmem_check(struct dxgprocess *process, enum dxgk_memory_tag ignore_tag)
+{
+	int i;
+	int v;
+
+	for (i = 0; i < DXGMEM_LAST; i++) {
+		if (process)
+			v = atomic_read(&process->dxg_memory[i]);
+		else
+			v = atomic_read(&dxg_memory[i]);
+		if (i != ignore_tag && v != 0)
+			pr_err("memory leak: %2d %3d\n", i, v);
+	}
+}
+
+void dxgmutex_init(struct dxgmutex *m, enum dxgk_lockorder order)
+{
+	mutex_init(&m->mutex);
+	m->lock_order = order;
+}
+
+void dxgmutex_lock(struct dxgmutex *m)
+{
+	dxglockorder_acquire(m->lock_order);
+	mutex_lock(&m->mutex);
+}
+
+void dxgmutex_unlock(struct dxgmutex *m)
+{
+	mutex_unlock(&m->mutex);
+	dxglockorder_release(m->lock_order);
+}
+
+void dxglockorder_acquire(enum dxgk_lockorder order)
+{
+	struct dxgthreadinfo *info = dxglockorder_get_thread();
+	int index = info->current_lock_index;
+
+	TRACE_LOCK_ORDER(1, "%s %p %d %d",
+			 __func__, info->thread, index, order);
+	if (index == DXGK_MAX_LOCK_DEPTH) {
+		pr_err("Lock depth exceeded");
+		dxg_panic();
+	}
+	if (index != 0) {
+		struct dxgk_lockinfo *lock_info = &info->lock_info[index - 1];
+
+		if (lock_info->lock_order <= order) {
+			pr_err("Invalid lock order: %d %d %d", index,
+				   lock_info->lock_order, order);
+			dxg_panic();
+		}
+	}
+	info->lock_info[index].lock_order = order;
+	info->current_lock_index++;
+	dxglockorder_put_thread(info);
+}
+
+void dxglockorder_release(enum dxgk_lockorder order)
+{
+	struct dxgthreadinfo *info = dxglockorder_get_thread();
+	int index;
+	int i;
+
+	info->current_lock_index--;
+	index = info->current_lock_index;
+	TRACE_LOCK_ORDER(1, "dxglockorder release %p %d %d",
+			 info->thread, index, order);
+	if (index < 0) {
+		pr_err("Lock depth underflow");
+		dxg_panic();
+	}
+	for (i = index; i >= 0; i--) {
+		if (info->lock_info[i].lock_order == order) {
+			memmove(&info->lock_info[i], &info->lock_info[index],
+				(index - i) * sizeof(info->lock_info[0]));
+			break;
+		}
+	}
+	if (i < 0) {
+		pr_err("Failed to find lock order to release");
+		dxg_panic();
+	}
+	memset(&info->lock_info[index], 0, sizeof(info->lock_info[0]));
+	dxglockorder_put_thread(info);
+}
+
+struct dxgthreadinfo *dxglockorder_get_thread(void)
+{
+	struct dxgthreadinfo *info = NULL;
+	struct dxgthreadinfo *entry = NULL;
+	struct task_struct *thread = current;
+
+	mutex_lock(&dxgglobal->thread_info_mutex);
+	list_for_each_entry(entry, &dxgglobal->thread_info_list_head,
+			    thread_info_list_entry) {
+		if (entry->thread == thread) {
+			info = entry;
+			TRACE_LOCK_ORDER(1, "dxglockorder found thread %p %d",
+					 thread, info->refcount + 1);
+			break;
+		}
+	}
+	if (info == NULL) {
+		info = dxgmem_kalloc(DXGMEM_THREADINFO,
+				     sizeof(struct dxgthreadinfo), GFP_ATOMIC);
+		if (info) {
+			TRACE_LOCK_ORDER(1, "dxglockorder new thread %p",
+					 thread);
+			info->thread = thread;
+			list_add(&info->thread_info_list_entry,
+				 &dxgglobal->thread_info_list_head);
+		}
+	}
+	if (info)
+		info->refcount++;
+	mutex_unlock(&dxgglobal->thread_info_mutex);
+	return info;
+}
+
+void dxglockorder_put_thread(struct dxgthreadinfo *info)
+{
+	if (info) {
+		TRACE_LOCK_ORDER(1, "dxglockorder remove thread %p %d",
+				 info->thread, info->refcount);
+		if (info->refcount <= 0) {
+			pr_err("Invalid refcount for thread info: %p %d",
+				   info, info->refcount);
+			dxg_panic();
+			return;
+		}
+		info->refcount--;
+		if (info->refcount == 0) {
+			TRACE_LOCK_ORDER(1, "dxglockorder remove thread %p",
+					 info->thread);
+			if (info->current_lock_index != 0) {
+				pr_err("A lock is not released: %d %d",
+				   info->current_lock_index,
+				   info->lock_info[
+				   info->current_lock_index].lock_order);
+				dxg_panic();
+			}
+
+			if (!info->lock_held) {
+				mutex_lock(&dxgglobal->thread_info_mutex);
+				list_del(&info->thread_info_list_entry);
+				mutex_unlock(&dxgglobal->thread_info_mutex);
+			}
+
+			dxgmem_kfree(DXGMEM_THREADINFO, info);
+		}
+	}
+}
+
+void dxglockorder_check_empty(struct dxgthreadinfo *info)
+{
+	if (info->refcount != 1) {
+		pr_err("A lock is not released");
+		dxg_panic();
+	}
+}
+
+void dxg_panic(void)
+{
+	dump_stack();
+	BUG_ON(true);
+}
diff --git a/drivers/gpu/dxgkrnl/misc.h b/drivers/gpu/dxgkrnl/misc.h
new file mode 100644
index 000000000000..b5130cce767f
--- /dev/null
+++ b/drivers/gpu/dxgkrnl/misc.h
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (c) 2019, Microsoft Corporation.
+ *
+ * Author:
+ *   Iouri Tarassov <iourit at microsoft.com>
+ *
+ * Dxgkrnl Graphics Driver
+ * Misc definitions
+ *
+ */
+
+#ifndef _MISC_H_
+#define _MISC_H_
+
+struct dxgprocess;
+
+enum dxgk_memory_tag {
+	DXGMEM_GLOBAL		= 0,
+	DXGMEM_ADAPTER		= 1,
+	DXGMEM_THREADINFO	= 2,
+	DXGMEM_PROCESS		= 3,
+	DXGMEM_VMBUS		= 4,
+	DXGMEM_DEVICE		= 5,
+	DXGMEM_RESOURCE		= 6,
+	DXGMEM_CONTEXT		= 7,
+	DXGMEM_PQUEUE		= 8,
+	DXGMEM_SYNCOBJ		= 9,
+	DXGMEM_PROCESS_ADAPTER	= 10,
+	DXGMEM_HWQUEUE		= 11,
+	DXGMEM_HANDLE_TABLE	= 12,
+	DXGMEM_TMP		= 13,
+	DXGMEM_ALLOCATION	= 14,
+	DXGMEM_EVENT		= 15,
+	DXGMEM_HOSTEVENT	= 16,
+	DXGMEM_SHAREDSYNCOBJ	= 17,
+	DXGMEM_SHAREDRESOURCE	= 18,
+	DXGMEM_ALLOCPRIVATE	= 19,
+	DXGMEM_RUNTIMEPRIVATE	= 20,
+	DXGMEM_RESOURCEPRIVATE	= 21,
+	DXGMEM_LAST
+};
+
+void dxgmem_check(struct dxgprocess *process, enum dxgk_memory_tag ignore_tag);
+void *dxgmem_alloc(struct dxgprocess *process, enum dxgk_memory_tag tag,
+		   size_t size);
+void dxgmem_free(struct dxgprocess *process, enum dxgk_memory_tag tag,
+		 void *address);
+void *dxgmem_kalloc(enum dxgk_memory_tag tag, size_t size, gfp_t flags);
+void dxgmem_kfree(enum dxgk_memory_tag tag, void *address);
+
+#define DXGK_MAX_LOCK_DEPTH	64
+#define W_MAX_PATH		260
+
+#define d3dkmt_handle		u32
+#define d3dgpu_virtual_address	u64
+#define winwchar		u16
+#define winhandle		u64
+#define ntstatus		int
+#define winbool			u32
+#define d3dgpu_size_t		u64
+
+struct winluid {
+	uint a;
+	uint b;
+};
+
+/*
+ * Synchronization lock hierarchy.
+ * dxgadapter->adapter_process_list_lock
+ *
+ * The higher enum value, the higher is the lock order.
+ * When a lower lock ois held, the higher lock should not be acquired.
+ */
+enum dxgk_lockorder {
+	DXGLOCK_INVALID = 0,
+	DXGLOCK_PROCESSADAPTERDEVICELIST,	/* device_list_mutex */
+	DXGLOCK_GLOBAL_HOSTEVENTLIST,		/* host_event_list_mutex */
+	DXGLOCK_GLOBAL_CHANNEL,			/* channel_lock */
+	DXGLOCK_FDMUTEX,			/* fd_mutex */
+	DXGLOCK_PROCESSLIST,			/* plistmutex */
+	DXGLOCK_HANDLETABLE,			/* table_lock */
+	DXGLOCK_DEVICE_CONTEXTLIST,		/* context_list_lock */
+	DXGLOCK_DEVICE_ALLOCLIST,		/* alloc_list_lock */
+	DXGLOCK_RESOURCE,			/* resource_mutex */
+	DXGLOCK_SHAREDRESOURCELIST,		/* shared_resource_list_lock */
+	DXGLOCK_ADAPTER,			/* core_lock */
+	DXGLOCK_DEVICE,				/* device_lock */
+	DXGLOCK_PROCESSMUTEX,			/* process->process_mutex */
+	DXGLOCK_PROCESSADAPTER,			/* process_adapter_mutex */
+	DXGLOCK_GLOBAL_ADAPTERLIST,		/* adapter_list_lock */
+	DXGLOCK_GLOBAL_DEVICE,			/* device_mutex */
+};
+
+struct dxgk_lockinfo {
+	enum dxgk_lockorder lock_order;
+};
+
+struct dxgthreadinfo {
+	struct list_head	thread_info_list_entry;
+	struct task_struct	*thread;
+	int			refcount;
+	int			current_lock_index;
+	struct dxgk_lockinfo	lock_info[DXGK_MAX_LOCK_DEPTH];
+	bool			lock_held;
+};
+
+void dxglockorder_acquire(enum dxgk_lockorder order);
+void dxglockorder_release(enum dxgk_lockorder order);
+void dxglockorder_check_empty(struct dxgthreadinfo *info);
+struct dxgthreadinfo *dxglockorder_get_thread(void);
+void dxglockorder_put_thread(struct dxgthreadinfo *info);
+void dxg_panic(void);
+
+struct dxgmutex {
+	struct mutex		mutex;
+	enum dxgk_lockorder	lock_order;
+};
+void dxgmutex_init(struct dxgmutex *m, enum dxgk_lockorder order);
+void dxgmutex_lock(struct dxgmutex *m);
+void dxgmutex_unlock(struct dxgmutex *m);
+
+winwchar *wcsncpy(winwchar *dest, const winwchar *src, size_t n);
+
+enum dxglockstate {
+	DXGLOCK_SHARED,
+	DXGLOCK_EXCL
+};
+
+#define STATUS_SUCCESS					((ntstatus)0)
+#define	STATUS_GRAPHICS_DRIVER_MISMATCH			((ntstatus)0xC01E0009L)
+#define	STATUS_OBJECT_NAME_INVALID			((ntstatus)0xC0000033L)
+#define	STATUS_OBJECT_PATH_INVALID			((ntstatus)0xC0000039L)
+#define	STATUS_DEVICE_REMOVED				((ntstatus)0xC00002B6L)
+#define	STATUS_DISK_FULL				((ntstatus)0xC000007FL)
+#define	STATUS_GRAPHICS_GPU_EXCEPTION_ON_DEVICE		((ntstatus)0xC01E0200L)
+#define	STATUS_GRAPHICS_ALLOCATION_CONTENT_LOST		((ntstatus)0xC01E0116L)
+#define	STATUS_GRAPHICS_ALLOCATION_CLOSED		((ntstatus)0xC01E0112L)
+#define	STATUS_GRAPHICS_INVALID_ALLOCATION_INSTANCE	((ntstatus)0xC01E0113L)
+#define	STATUS_GRAPHICS_INVALID_ALLOCATION_HANDLE	((ntstatus)0xC01E0114L)
+#define	STATUS_ILLEGAL_CHARACTER			((ntstatus)0xC0000161L)
+#define	STATUS_INVALID_HANDLE				((ntstatus)0xC0000008L)
+#define	STATUS_ILLEGAL_INSTRUCTION			((ntstatus)0xC000001DL)
+#define	STATUS_INVALID_PARAMETER_1			((ntstatus)0xC00000EFL)
+#define	STATUS_INVALID_PARAMETER_2			((ntstatus)0xC00000F0L)
+#define	STATUS_INVALID_PARAMETER_3			((ntstatus)0xC00000F1L)
+#define	STATUS_INVALID_PARAMETER_4			((ntstatus)0xC00000F2L)
+#define	STATUS_INVALID_PARAMETER_5			((ntstatus)0xC00000F3L)
+#define	STATUS_INVALID_PARAMETER_6			((ntstatus)0xC00000F4L)
+#define	STATUS_INVALID_PARAMETER_7			((ntstatus)0xC00000F5L)
+#define	STATUS_INVALID_PARAMETER_8			((ntstatus)0xC00000F6L)
+#define	STATUS_INVALID_PARAMETER_9			((ntstatus)0xC00000F7L)
+#define	STATUS_INVALID_PARAMETER_10			((ntstatus)0xC00000F8L)
+#define	STATUS_INVALID_PARAMETER_11			((ntstatus)0xC00000F9L)
+#define	STATUS_INVALID_PARAMETER_12			((ntstatus)0xC00000FAL)
+#define	STATUS_IN_PAGE_ERROR				((ntstatus)0xC0000006L)
+#define	STATUS_NOT_IMPLEMENTED				((ntstatus)0xC0000002L)
+#define	STATUS_PENDING					((ntstatus)0x00000103L)
+#define	STATUS_ACCESS_DENIED				((ntstatus)0xC0000022L)
+#define	STATUS_BUFFER_TOO_SMALL				((ntstatus)0xC0000023L)
+#define	STATUS_OBJECT_PATH_SYNTAX_BAD			((ntstatus)0xC000003BL)
+#define	STATUS_OBJECT_TYPE_MISMATCH			((ntstatus)0xC0000024L)
+#define	STATUS_GRAPHICS_ALLOCATION_BUSY			((ntstatus)0xC01E0102L)
+#define	STATUS_GRAPHICS_WRONG_ALLOCATION_DEVICE		((ntstatus)0xC01E0115L)
+#define	STATUS_PRIVILEGED_INSTRUCTION			((ntstatus)0xC0000096L)
+#define	STATUS_SHARING_VIOLATION			((ntstatus)0xC0000043L)
+#define	STATUS_BUFFER_OVERFLOW				((ntstatus)0x80000005L)
+#define	STATUS_MEDIA_WRITE_PROTECTED			((ntstatus)0xC00000A2L)
+#define	STATUS_INTEGER_OVERFLOW				((ntstatus)0xC0000095L)
+#define	STATUS_PRIVILEGE_NOT_HELD			((ntstatus)0xC0000061L)
+#define	STATUS_NOT_SUPPORTED				((ntstatus)0xC00000BBL)
+#define	STATUS_HOST_UNREACHABLE				((ntstatus)0xC000023DL)
+#define	STATUS_NETWORK_UNREACHABLE			((ntstatus)0xC000023CL)
+#define	STATUS_CONNECTION_REFUSED			((ntstatus)0xC0000236L)
+#define	STATUS_CONNECTION_REFUSED			((ntstatus)0xC0000236L)
+#define	STATUS_TIMEOUT					((ntstatus)0x00000102L)
+#define	STATUS_WRONG_VOLUME				((ntstatus)0xC0000012L)
+#define	STATUS_IO_TIMEOUT				((ntstatus)0xC00000B5L)
+#define	STATUS_RETRY					((ntstatus)0xC000022DL)
+#define	STATUS_CANCELLED				((ntstatus)0xC0000120L)
+#define	STATUS_CONNECTION_DISCONNECTED			((ntstatus)0xC000020CL)
+#define	STATUS_CONNECTION_RESET				((ntstatus)0xC000020DL)
+#define	STATUS_CONNECTION_ABORTED			((ntstatus)0xC0000241L)
+#define	STATUS_INVALID_PARAMETER			((ntstatus)0xC000000DL)
+#define	STATUS_INVALID_DEVICE_REQUEST			((ntstatus)0xC0000010L)
+#define	STATUS_OBJECT_NAME_NOT_FOUND			((ntstatus)0xC0000034L)
+#define	STATUS_OBJECT_PATH_NOT_FOUND			((ntstatus)0xC000003AL)
+#define	STATUS_NOT_FOUND				((ntstatus)0xC0000225L)
+#define	STATUS_DELETE_PENDING				((ntstatus)0xC0000056L)
+#define	STATUS_BAD_NETWORK_NAME				((ntstatus)0xC00000CCL)
+#define	STATUS_CANNOT_DELETE				((ntstatus)0xC0000121L)
+#define	STATUS_INTERNAL_ERROR				((ntstatus)0xC00000E5L)
+#define	STATUS_OBJECTID_EXISTS				((ntstatus)0xC000022BL)
+#define	STATUS_DUPLICATE_OBJECTID			((ntstatus)0xC000022AL)
+#define	STATUS_ADDRESS_ALREADY_EXISTS			((ntstatus)0xC000020AL)
+#define	STATUS_ACCESS_VIOLATION				((ntstatus)0xC0000005L)
+#define	STATUS_INSUFFICIENT_RESOURCES			((ntstatus)0xC000009AL)
+#define	STATUS_NO_MEMORY				((ntstatus)0xC0000017L)
+#define	STATUS_COMMITMENT_LIMIT				((ntstatus)0xC000012DL)
+#define	STATUS_GRAPHICS_NO_VIDEO_MEMORY			((ntstatus)0xC01E0100L)
+#define	STATUS_OBJECTID_NOT_FOUND			((ntstatus)0xC00002F0L)
+#define	STATUS_DIRECTORY_NOT_EMPTY			((ntstatus)0xC0000101L)
+#define	STATUS_OBJECT_NAME_EXISTS			((ntstatus)0x40000000L)
+#define	STATUS_OBJECT_NAME_COLLISION			((ntstatus)0xC0000035L)
+#define	STATUS_UNSUCCESSFUL				((ntstatus)0xC0000001L)
+#define	STATUS_NOT_IMPLEMENTED				((ntstatus)0xC0000002L)
+#define NT_SUCCESS(status)	((int)status >= 0)
+
+#define DXGKRNL_ASSERT(exp)
+#define UNUSED(x) (void)(x)
+
+#undef pr_fmt
+#define pr_fmt(fmt)	"dxgk:err: " fmt
+#define pr_fmt2(fmt)	"dxgk:err: " fmt
+
+#define DXGKDEBUG 1
+/* #define USEPRINTK 1 */
+
+#ifndef DXGKDEBUG
+#define TRACE_DEBUG(...)
+#define TRACE_DEFINE(...)
+#define TRACE_FUNC_ENTER(...)
+#define TRACE_FUNC_EXIT(...)
+#else
+#ifdef USEPRINTK
+#define TRACE_DEBUG(level, fmt, ...)\
+	printk(KERN_DEBUG pr_fmt2(fmt), ##__VA_ARGS__);
+
+#define TRACE_DEBUG2(level, offset, fmt, ...)				\
+do {									\
+	if (offset == 0)						\
+		printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__);	\
+	else								\
+		printk(KERN_DEBUG pr_fmt2(fmt), ##__VA_ARGS__);	\
+} while (false)
+
+#define TRACE_FUNC_ENTER(msg) \
+	printk(KERN_DEBUG "dxgk: %s", msg)
+#define TRACE_FUNC_EXIT(msg, ret)				\
+do {								\
+	if (!NT_SUCCESS(ret))					\
+		dxg_pr_err("%s %x %d", msg, ret, ret);		\
+	else							\
+		printk(KERN_DEBUG "dxgk: %s end", msg);		\
+} while (false)
+#define TRACE_FUNC_EXIT_ERR(msg, ret)				\
+do {								\
+	if (!NT_SUCCESS(ret))					\
+		dxg_pr_err("%s %x", msg, ret);			\
+} while (false)
+#else
+#define TRACE_DEBUG(level, fmt, ...)\
+	dev_dbg(dxgglobaldev, pr_fmt2(fmt), ##__VA_ARGS__)
+
+#define TRACE_DEBUG2(level, offset, fmt, ...)			\
+do {								\
+	if (offset == 0)					\
+		dev_dbg(dxgglobaldev, pr_fmt(fmt), ##__VA_ARGS__); \
+	else							\
+		dev_dbg(dxgglobaldev, pr_fmt2(fmt), ##__VA_ARGS__);\
+} while (false)
+
+#define TRACE_FUNC_ENTER(msg)				\
+	dev_dbg(dxgglobaldev, "dxgk: %s", msg)
+#define TRACE_FUNC_EXIT(msg, ret)			\
+do {							\
+	if (!NT_SUCCESS(ret))				\
+		dev_dbg(dxgglobaldev, "dxgk:err: %s %x", msg, ret); \
+	else						\
+		dev_dbg(dxgglobaldev, "dxgk: %s end", msg);	\
+} while (false)
+#define TRACE_FUNC_EXIT_ERR(msg, ret)			\
+do {							\
+	if (!NT_SUCCESS(ret))				\
+		dev_dbg(dxgglobaldev, "dxgk:err: %s %x", msg, ret); \
+} while (false)
+#endif /* USEPRINTK */
+#define TRACE_DEFINE(arg) arg
+#endif
+
+#ifdef DXGKDEBUGLOCKORDER
+#define TRACE_LOCK_ORDER(...)  TRACE_DEBUG(...)
+#else
+#define TRACE_LOCK_ORDER(...)
+#endif
+
+#endif
-- 
2.25.1



More information about the dri-devel mailing list