mirror of
https://github.com/Telecominfraproject/wlan-ap.git
synced 2025-12-20 10:51:27 +00:00
215 lines
6.4 KiB
Diff
215 lines
6.4 KiB
Diff
From c48cc020346e00fc8d911feecbb030d505d79a7b Mon Sep 17 00:00:00 2001
|
|
From: Saahil Tomar <quic_saahtoma@quicinc.com>
|
|
Date: Tue, 20 Feb 2024 11:21:01 +0530
|
|
Subject: [PATCH] soc: qcom: smem: Add boundary checks for partitions
|
|
|
|
Add condition check to make sure that the end address
|
|
of private entry does not go out of partition.
|
|
|
|
Change-Id: I88b3c69d86d90905b214c13a8c632b134b487a49
|
|
Signed-off-by: Sarannya S <quic_sarannya@quicinc.com>
|
|
Signed-off-by: Pranav Mahesh Phansalkar <quic_pphansal@quicinc.com>
|
|
Signed-off-by: Saahil Tomar <quic_saahtoma@quicinc.com>
|
|
---
|
|
drivers/soc/qcom/smem.c | 103 ++++++++++++++++++++++++++++------------
|
|
1 file changed, 72 insertions(+), 31 deletions(-)
|
|
|
|
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
|
|
index d28ad6523934..fa8ce95c3b14 100644
|
|
--- a/drivers/soc/qcom/smem.c
|
|
+++ b/drivers/soc/qcom/smem.c
|
|
@@ -2,6 +2,7 @@
|
|
/*
|
|
* Copyright (c) 2015, Sony Mobile Communications AB.
|
|
* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
|
|
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
|
|
*/
|
|
|
|
#include <linux/hwspinlock.h>
|
|
@@ -88,6 +89,17 @@
|
|
/* Max number of processors/hosts in a system */
|
|
#define SMEM_HOST_COUNT 15
|
|
|
|
+/* Entry range check
|
|
+ * ptr >= start : Checks if ptr is greater than the start of access region
|
|
+ * ptr + size >= ptr: Check for integer overflow (On 32bit system where ptr
|
|
+ * and size are 32bits, ptr + size can wrap around to be a small integer)
|
|
+ * ptr + size <= end: Checks if ptr+size is less than the end of access region
|
|
+ */
|
|
+#define IN_PARTITION_RANGE(ptr, size, start, end) \
|
|
+ (((void *)(ptr) >= (void *)(start)) && \
|
|
+ (((void *)(ptr) + (size)) >= (void *)(ptr)) && \
|
|
+ (((void *)(ptr) + (size)) <= (void *)(end)))
|
|
+
|
|
/**
|
|
* struct smem_proc_comm - proc_comm communication struct (legacy)
|
|
* @command: current command to be executed
|
|
@@ -365,6 +377,7 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem,
|
|
size_t size)
|
|
{
|
|
struct smem_private_entry *hdr, *end;
|
|
+ struct smem_private_entry *next_hdr;
|
|
struct smem_partition_header *phdr;
|
|
size_t alloc_size;
|
|
void *cached;
|
|
@@ -377,19 +390,25 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem,
|
|
end = phdr_to_last_uncached_entry(phdr);
|
|
cached = phdr_to_last_cached_entry(phdr);
|
|
|
|
- if (WARN_ON((void *)end > p_end || cached > p_end))
|
|
+ if (WARN_ON(!IN_PARTITION_RANGE(end, 0, phdr, cached) ||
|
|
+ cached > p_end))
|
|
return -EINVAL;
|
|
|
|
- while (hdr < end) {
|
|
+ while ((hdr < end) && ((hdr + 1) < end)) {
|
|
if (hdr->canary != SMEM_PRIVATE_CANARY)
|
|
goto bad_canary;
|
|
if (le16_to_cpu(hdr->item) == item)
|
|
return -EEXIST;
|
|
|
|
- hdr = uncached_entry_next(hdr);
|
|
+ next_hdr = uncached_entry_next(hdr);
|
|
+
|
|
+ if (WARN_ON(next_hdr <= hdr))
|
|
+ return -EINVAL;
|
|
+
|
|
+ hdr = next_hdr;
|
|
}
|
|
|
|
- if (WARN_ON((void *)hdr > p_end))
|
|
+ if (WARN_ON((void *)hdr > (void *)end))
|
|
return -EINVAL;
|
|
|
|
/* Check that we don't grow into the cached region */
|
|
@@ -547,9 +566,11 @@ static void *qcom_smem_get_private(struct qcom_smem *smem,
|
|
unsigned item,
|
|
size_t *size)
|
|
{
|
|
- struct smem_private_entry *e, *end;
|
|
+ struct smem_private_entry *e, *uncached_end, *cached_end;
|
|
+ struct smem_private_entry *next_e;
|
|
struct smem_partition_header *phdr;
|
|
void *item_ptr, *p_end;
|
|
+ size_t entry_size = 0;
|
|
u32 padding_data;
|
|
u32 e_size;
|
|
|
|
@@ -557,67 +578,87 @@ static void *qcom_smem_get_private(struct qcom_smem *smem,
|
|
p_end = (void *)phdr + part->size;
|
|
|
|
e = phdr_to_first_uncached_entry(phdr);
|
|
- end = phdr_to_last_uncached_entry(phdr);
|
|
+ uncached_end = phdr_to_last_uncached_entry(phdr);
|
|
+ cached_end = phdr_to_last_cached_entry(phdr);
|
|
+
|
|
+ if (WARN_ON(!IN_PARTITION_RANGE(uncached_end, 0, phdr, cached_end)
|
|
+ || (void *)cached_end > p_end))
|
|
+ return ERR_PTR(-EINVAL);
|
|
|
|
- while (e < end) {
|
|
+ while ((e < uncached_end) && ((e + 1) < uncached_end)) {
|
|
if (e->canary != SMEM_PRIVATE_CANARY)
|
|
goto invalid_canary;
|
|
|
|
if (le16_to_cpu(e->item) == item) {
|
|
- if (size != NULL) {
|
|
- e_size = le32_to_cpu(e->size);
|
|
- padding_data = le16_to_cpu(e->padding_data);
|
|
+ e_size = le32_to_cpu(e->size);
|
|
+ padding_data = le16_to_cpu(e->padding_data);
|
|
|
|
- if (WARN_ON(e_size > part->size || padding_data > e_size))
|
|
- return ERR_PTR(-EINVAL);
|
|
+ if (e_size < part->size && padding_data < e_size)
|
|
+ entry_size = e_size - padding_data;
|
|
+ else
|
|
+ return ERR_PTR(-EINVAL);
|
|
|
|
- *size = e_size - padding_data;
|
|
- }
|
|
|
|
item_ptr = uncached_entry_to_item(e);
|
|
- if (WARN_ON(item_ptr > p_end))
|
|
+
|
|
+ if (WARN_ON(!IN_PARTITION_RANGE(item_ptr, entry_size, e, uncached_end)))
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
+ if (size != NULL)
|
|
+ *size = entry_size;
|
|
+
|
|
return item_ptr;
|
|
}
|
|
|
|
- e = uncached_entry_next(e);
|
|
+ next_e = uncached_entry_next(e);
|
|
+ if (WARN_ON(next_e <= e))
|
|
+ return ERR_PTR(-EINVAL);
|
|
+
|
|
+ e = next_e;
|
|
}
|
|
|
|
- if (WARN_ON((void *)e > p_end))
|
|
+ if (WARN_ON((void *)e > (void *)uncached_end))
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
/* Item was not found in the uncached list, search the cached list */
|
|
|
|
+ if (cached_end == p_end)
|
|
+ return ERR_PTR(-ENOENT);
|
|
+
|
|
e = phdr_to_first_cached_entry(phdr, part->cacheline);
|
|
- end = phdr_to_last_cached_entry(phdr);
|
|
|
|
- if (WARN_ON((void *)e < (void *)phdr || (void *)end > p_end))
|
|
+ if (WARN_ON(!IN_PARTITION_RANGE(cached_end, 0, uncached_end, p_end) ||
|
|
+ !IN_PARTITION_RANGE(e, sizeof(*e), cached_end, p_end)))
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
- while (e > end) {
|
|
+ while (e > cached_end) {
|
|
if (e->canary != SMEM_PRIVATE_CANARY)
|
|
goto invalid_canary;
|
|
|
|
if (le16_to_cpu(e->item) == item) {
|
|
- if (size != NULL) {
|
|
- e_size = le32_to_cpu(e->size);
|
|
- padding_data = le16_to_cpu(e->padding_data);
|
|
+ e_size = le32_to_cpu(e->size);
|
|
+ padding_data = le16_to_cpu(e->padding_data);
|
|
|
|
- if (WARN_ON(e_size > part->size || padding_data > e_size))
|
|
- return ERR_PTR(-EINVAL);
|
|
-
|
|
- *size = e_size - padding_data;
|
|
- }
|
|
+ if (e_size < part->size && padding_data < e_size)
|
|
+ entry_size = e_size - padding_data;
|
|
+ else
|
|
+ return ERR_PTR(-EINVAL);
|
|
|
|
- item_ptr = cached_entry_to_item(e);
|
|
- if (WARN_ON(item_ptr < (void *)phdr))
|
|
+ item_ptr = cached_entry_to_item(e);
|
|
+ if (WARN_ON(!IN_PARTITION_RANGE(item_ptr, entry_size, cached_end, e)))
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
+ if (size != NULL)
|
|
+ *size = entry_size;
|
|
+
|
|
return item_ptr;
|
|
}
|
|
|
|
- e = cached_entry_next(e, part->cacheline);
|
|
+ next_e = cached_entry_next(e, part->cacheline);
|
|
+ if (WARN_ON(next_e >= e))
|
|
+ return ERR_PTR(-EINVAL);
|
|
+
|
|
+ e = next_e;
|
|
}
|
|
|
|
if (WARN_ON((void *)e < (void *)phdr))
|
|
--
|
|
2.34.1
|
|
|