[PATCH] drm/radeon: more sanity checks (usRecordOffset) to obj info record parsing

Amol Surati suratiamol at gmail.com
Fri Nov 19 09:45:58 UTC 2021


When parsing Encoder, Connector, or Router records, if the
usRecordOffset field is 0, the driver ends up dereferencing
ATOM_COMMON_TABLE_HEADER of the Object Table as
ATOM_COMMON_RECORD_HEADER.

A BIOS, which triggers such dereferences when parsing the
Encoder records, is found on Cedar Radeon HD 7350/8350 GPU.

Allow record dereferences only if usRecordOffset is non-zero.

Signed-off-by: Amol Surati <suratiamol at gmail.com>
---
 drivers/gpu/drm/radeon/radeon_atombios.c | 23 ++++++++++++-----------
 1 file changed, 12 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 28c4413f4..bab0e1cc2 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -646,14 +646,15 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
 				if (grph_obj_type == GRAPH_OBJECT_TYPE_ENCODER) {
 					for (k = 0; k < enc_obj->ucNumberOfObjects; k++) {
 						u16 encoder_obj = le16_to_cpu(enc_obj->asObjects[k].usObjectID);
+						u16 rec_offset = le16_to_cpu(enc_obj->asObjects[k].usRecordOffset);
 						if (le16_to_cpu(path->usGraphicObjIds[j]) == encoder_obj) {
 							ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *)
-								(ctx->bios + data_offset +
-								 le16_to_cpu(enc_obj->asObjects[k].usRecordOffset));
+								(ctx->bios + data_offset + rec_offset);
 							ATOM_ENCODER_CAP_RECORD *cap_record;
 							u16 caps = 0;
 
-							while (record->ucRecordSize > 0 &&
+							while (rec_offset > 0 &&
+							       record->ucRecordSize > 0 &&
 							       record->ucRecordType > 0 &&
 							       record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) {
 								switch (record->ucRecordType) {
@@ -677,10 +678,10 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
 				} else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) {
 					for (k = 0; k < router_obj->ucNumberOfObjects; k++) {
 						u16 router_obj_id = le16_to_cpu(router_obj->asObjects[k].usObjectID);
+						u16 rec_offset = le16_to_cpu(router_obj->asObjects[k].usRecordOffset);
 						if (le16_to_cpu(path->usGraphicObjIds[j]) == router_obj_id) {
 							ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *)
-								(ctx->bios + data_offset +
-								 le16_to_cpu(router_obj->asObjects[k].usRecordOffset));
+								(ctx->bios + data_offset + rec_offset);
 							ATOM_I2C_RECORD *i2c_record;
 							ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
 							ATOM_ROUTER_DDC_PATH_SELECT_RECORD *ddc_path;
@@ -702,7 +703,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
 									break;
 							}
 
-							while (record->ucRecordSize > 0 &&
+							while (rec_offset > 0 &&
+							       record->ucRecordSize > 0 &&
 							       record->ucRecordType > 0 &&
 							       record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) {
 								switch (record->ucRecordType) {
@@ -753,19 +755,18 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
 					if (le16_to_cpu(path->usConnObjectId) ==
 					    le16_to_cpu(con_obj->asObjects[j].
 							usObjectID)) {
+						u16 rec_offset = le16_to_cpu(con_obj->asObjects[j].usRecordOffset);
 						ATOM_COMMON_RECORD_HEADER
 						    *record =
 						    (ATOM_COMMON_RECORD_HEADER
 						     *)
-						    (ctx->bios + data_offset +
-						     le16_to_cpu(con_obj->
-								 asObjects[j].
-								 usRecordOffset));
+						    (ctx->bios + data_offset + rec_offset);
 						ATOM_I2C_RECORD *i2c_record;
 						ATOM_HPD_INT_RECORD *hpd_record;
 						ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
 
-						while (record->ucRecordSize > 0 &&
+						while (rec_offset > 0 &&
+						       record->ucRecordSize > 0 &&
 						       record->ucRecordType > 0 &&
 						       record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) {
 							switch (record->ucRecordType) {
-- 
2.33.1



More information about the amd-gfx mailing list