Commit d52e4048 authored by Chris Lew's avatar Chris Lew Committed by Andy Gross

soc: qcom: smem: Support global partition

SMEM V12 creates a global partition to allocate global smem items from
instead of a global heap. The global partition has the same structure as
a private partition.
Signed-off-by: default avatarChris Lew <clew@codeaurora.org>
Acked-by: default avatarBjorn Andersson <bjorn.andersson@linaro.org>
Signed-off-by: default avatarAndy Gross <andy.gross@linaro.org>
parent dcc0967d
...@@ -55,6 +55,10 @@ ...@@ -55,6 +55,10 @@
* is hence the region between the cached and non-cached offsets. The header of * is hence the region between the cached and non-cached offsets. The header of
* cached items comes after the data. * cached items comes after the data.
* *
* Version 12 (SMEM_GLOBAL_PART_VERSION) changes the item alloc/get procedure
* for the global heap. A new global partition is created from the global heap
* region with partition type (SMEM_GLOBAL_HOST) and the max smem item count is
* set by the bootloader.
* *
* To synchronize allocations in the shared memory heaps a remote spinlock must * To synchronize allocations in the shared memory heaps a remote spinlock must
* be held - currently lock number 3 of the sfpb or tcsr is used for this on all * be held - currently lock number 3 of the sfpb or tcsr is used for this on all
...@@ -68,7 +72,8 @@ ...@@ -68,7 +72,8 @@
* version is a valid version as a sanity check. * version is a valid version as a sanity check.
*/ */
#define SMEM_MASTER_SBL_VERSION_INDEX 7 #define SMEM_MASTER_SBL_VERSION_INDEX 7
#define SMEM_EXPECTED_VERSION 11 #define SMEM_GLOBAL_HEAP_VERSION 11
#define SMEM_GLOBAL_PART_VERSION 12
/* /*
* The first 8 items are only to be allocated by the boot loader while * The first 8 items are only to be allocated by the boot loader while
...@@ -82,6 +87,9 @@ ...@@ -82,6 +87,9 @@
/* Processor/host identifier for the application processor */ /* Processor/host identifier for the application processor */
#define SMEM_HOST_APPS 0 #define SMEM_HOST_APPS 0
/* Processor/host identifier for the global partition */
#define SMEM_GLOBAL_HOST 0xfffe
/* Max number of processors/hosts in a system */ /* Max number of processors/hosts in a system */
#define SMEM_HOST_COUNT 9 #define SMEM_HOST_COUNT 9
...@@ -230,6 +238,8 @@ struct smem_region { ...@@ -230,6 +238,8 @@ struct smem_region {
* struct qcom_smem - device data for the smem device * struct qcom_smem - device data for the smem device
* @dev: device pointer * @dev: device pointer
* @hwlock: reference to a hwspinlock * @hwlock: reference to a hwspinlock
* @global_partition: pointer to global partition when in use
* @global_cacheline: cacheline size for global partition
* @partitions: list of pointers to partitions affecting the current * @partitions: list of pointers to partitions affecting the current
* processor/host * processor/host
* @cacheline: list of cacheline sizes for each host * @cacheline: list of cacheline sizes for each host
...@@ -241,6 +251,8 @@ struct qcom_smem { ...@@ -241,6 +251,8 @@ struct qcom_smem {
struct hwspinlock *hwlock; struct hwspinlock *hwlock;
struct smem_partition_header *global_partition;
size_t global_cacheline;
struct smem_partition_header *partitions[SMEM_HOST_COUNT]; struct smem_partition_header *partitions[SMEM_HOST_COUNT];
size_t cacheline[SMEM_HOST_COUNT]; size_t cacheline[SMEM_HOST_COUNT];
...@@ -317,16 +329,14 @@ static struct qcom_smem *__smem; ...@@ -317,16 +329,14 @@ static struct qcom_smem *__smem;
#define HWSPINLOCK_TIMEOUT 1000 #define HWSPINLOCK_TIMEOUT 1000
static int qcom_smem_alloc_private(struct qcom_smem *smem, static int qcom_smem_alloc_private(struct qcom_smem *smem,
unsigned host, struct smem_partition_header *phdr,
unsigned item, unsigned item,
size_t size) size_t size)
{ {
struct smem_partition_header *phdr;
struct smem_private_entry *hdr, *end; struct smem_private_entry *hdr, *end;
size_t alloc_size; size_t alloc_size;
void *cached; void *cached;
phdr = smem->partitions[host];
hdr = phdr_to_first_uncached_entry(phdr); hdr = phdr_to_first_uncached_entry(phdr);
end = phdr_to_last_uncached_entry(phdr); end = phdr_to_last_uncached_entry(phdr);
cached = phdr_to_last_cached_entry(phdr); cached = phdr_to_last_cached_entry(phdr);
...@@ -334,8 +344,8 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem, ...@@ -334,8 +344,8 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem,
while (hdr < end) { while (hdr < end) {
if (hdr->canary != SMEM_PRIVATE_CANARY) { if (hdr->canary != SMEM_PRIVATE_CANARY) {
dev_err(smem->dev, dev_err(smem->dev,
"Found invalid canary in host %d partition\n", "Found invalid canary in hosts %d:%d partition\n",
host); phdr->host0, phdr->host1);
return -EINVAL; return -EINVAL;
} }
...@@ -373,8 +383,8 @@ static int qcom_smem_alloc_global(struct qcom_smem *smem, ...@@ -373,8 +383,8 @@ static int qcom_smem_alloc_global(struct qcom_smem *smem,
unsigned item, unsigned item,
size_t size) size_t size)
{ {
struct smem_header *header;
struct smem_global_entry *entry; struct smem_global_entry *entry;
struct smem_header *header;
if (WARN_ON(item >= SMEM_ITEM_COUNT)) if (WARN_ON(item >= SMEM_ITEM_COUNT))
return -EINVAL; return -EINVAL;
...@@ -416,6 +426,7 @@ static int qcom_smem_alloc_global(struct qcom_smem *smem, ...@@ -416,6 +426,7 @@ static int qcom_smem_alloc_global(struct qcom_smem *smem,
*/ */
int qcom_smem_alloc(unsigned host, unsigned item, size_t size) int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
{ {
struct smem_partition_header *phdr;
unsigned long flags; unsigned long flags;
int ret; int ret;
...@@ -434,10 +445,15 @@ int qcom_smem_alloc(unsigned host, unsigned item, size_t size) ...@@ -434,10 +445,15 @@ int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
if (ret) if (ret)
return ret; return ret;
if (host < SMEM_HOST_COUNT && __smem->partitions[host]) if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
ret = qcom_smem_alloc_private(__smem, host, item, size); phdr = __smem->partitions[host];
else ret = qcom_smem_alloc_private(__smem, phdr, item, size);
} else if (__smem->global_partition) {
phdr = __smem->global_partition;
ret = qcom_smem_alloc_private(__smem, phdr, item, size);
} else {
ret = qcom_smem_alloc_global(__smem, item, size); ret = qcom_smem_alloc_global(__smem, item, size);
}
hwspin_unlock_irqrestore(__smem->hwlock, &flags); hwspin_unlock_irqrestore(__smem->hwlock, &flags);
...@@ -479,16 +495,12 @@ static void *qcom_smem_get_global(struct qcom_smem *smem, ...@@ -479,16 +495,12 @@ static void *qcom_smem_get_global(struct qcom_smem *smem,
} }
static void *qcom_smem_get_private(struct qcom_smem *smem, static void *qcom_smem_get_private(struct qcom_smem *smem,
unsigned host, struct smem_partition_header *phdr,
size_t cacheline,
unsigned item, unsigned item,
size_t *size) size_t *size)
{ {
struct smem_partition_header *phdr;
struct smem_private_entry *e, *end; struct smem_private_entry *e, *end;
size_t cacheline;
phdr = smem->partitions[host];
cacheline = smem->cacheline[host];
e = phdr_to_first_uncached_entry(phdr); e = phdr_to_first_uncached_entry(phdr);
end = phdr_to_last_uncached_entry(phdr); end = phdr_to_last_uncached_entry(phdr);
...@@ -531,7 +543,8 @@ static void *qcom_smem_get_private(struct qcom_smem *smem, ...@@ -531,7 +543,8 @@ static void *qcom_smem_get_private(struct qcom_smem *smem,
return ERR_PTR(-ENOENT); return ERR_PTR(-ENOENT);
invalid_canary: invalid_canary:
dev_err(smem->dev, "Found invalid canary in host %d partition\n", host); dev_err(smem->dev, "Found invalid canary in hosts %d:%d partition\n",
phdr->host0, phdr->host1);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
...@@ -547,7 +560,9 @@ static void *qcom_smem_get_private(struct qcom_smem *smem, ...@@ -547,7 +560,9 @@ static void *qcom_smem_get_private(struct qcom_smem *smem,
*/ */
void *qcom_smem_get(unsigned host, unsigned item, size_t *size) void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
{ {
struct smem_partition_header *phdr;
unsigned long flags; unsigned long flags;
size_t cacheln;
int ret; int ret;
void *ptr = ERR_PTR(-EPROBE_DEFER); void *ptr = ERR_PTR(-EPROBE_DEFER);
...@@ -560,10 +575,17 @@ void *qcom_smem_get(unsigned host, unsigned item, size_t *size) ...@@ -560,10 +575,17 @@ void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
if (host < SMEM_HOST_COUNT && __smem->partitions[host]) if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
ptr = qcom_smem_get_private(__smem, host, item, size); phdr = __smem->partitions[host];
else cacheln = __smem->cacheline[host];
ptr = qcom_smem_get_private(__smem, phdr, cacheln, item, size);
} else if (__smem->global_partition) {
phdr = __smem->global_partition;
cacheln = __smem->global_cacheline;
ptr = qcom_smem_get_private(__smem, phdr, cacheln, item, size);
} else {
ptr = qcom_smem_get_global(__smem, item, size); ptr = qcom_smem_get_global(__smem, item, size);
}
hwspin_unlock_irqrestore(__smem->hwlock, &flags); hwspin_unlock_irqrestore(__smem->hwlock, &flags);
...@@ -592,6 +614,10 @@ int qcom_smem_get_free_space(unsigned host) ...@@ -592,6 +614,10 @@ int qcom_smem_get_free_space(unsigned host)
phdr = __smem->partitions[host]; phdr = __smem->partitions[host];
ret = le32_to_cpu(phdr->offset_free_cached) - ret = le32_to_cpu(phdr->offset_free_cached) -
le32_to_cpu(phdr->offset_free_uncached); le32_to_cpu(phdr->offset_free_uncached);
} else if (__smem->global_partition) {
phdr = __smem->global_partition;
ret = le32_to_cpu(phdr->offset_free_cached) -
le32_to_cpu(phdr->offset_free_uncached);
} else { } else {
header = __smem->regions[0].virt_base; header = __smem->regions[0].virt_base;
ret = le32_to_cpu(header->available); ret = le32_to_cpu(header->available);
...@@ -612,27 +638,106 @@ static int qcom_smem_get_sbl_version(struct qcom_smem *smem) ...@@ -612,27 +638,106 @@ static int qcom_smem_get_sbl_version(struct qcom_smem *smem)
return le32_to_cpu(versions[SMEM_MASTER_SBL_VERSION_INDEX]); return le32_to_cpu(versions[SMEM_MASTER_SBL_VERSION_INDEX]);
} }
static int qcom_smem_enumerate_partitions(struct qcom_smem *smem, static struct smem_ptable *qcom_smem_get_ptable(struct qcom_smem *smem)
unsigned local_host)
{ {
struct smem_partition_header *header;
struct smem_ptable_entry *entry;
struct smem_ptable *ptable; struct smem_ptable *ptable;
unsigned remote_host; u32 version;
u32 version, host0, host1;
int i;
ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K; ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K;
if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic))) if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic)))
return 0; return ERR_PTR(-ENOENT);
version = le32_to_cpu(ptable->version); version = le32_to_cpu(ptable->version);
if (version != 1) { if (version != 1) {
dev_err(smem->dev, dev_err(smem->dev,
"Unsupported partition header version %d\n", version); "Unsupported partition header version %d\n", version);
return ERR_PTR(-EINVAL);
}
return ptable;
}
static int qcom_smem_set_global_partition(struct qcom_smem *smem)
{
struct smem_partition_header *header;
struct smem_ptable_entry *entry = NULL;
struct smem_ptable *ptable;
u32 host0, host1, size;
int i;
ptable = qcom_smem_get_ptable(smem);
if (IS_ERR(ptable))
return PTR_ERR(ptable);
for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
entry = &ptable->entry[i];
host0 = le16_to_cpu(entry->host0);
host1 = le16_to_cpu(entry->host1);
if (host0 == SMEM_GLOBAL_HOST && host0 == host1)
break;
}
if (!entry) {
dev_err(smem->dev, "Missing entry for global partition\n");
return -EINVAL;
}
if (!le32_to_cpu(entry->offset) || !le32_to_cpu(entry->size)) {
dev_err(smem->dev, "Invalid entry for global partition\n");
return -EINVAL;
}
if (smem->global_partition) {
dev_err(smem->dev, "Already found the global partition\n");
return -EINVAL;
}
header = smem->regions[0].virt_base + le32_to_cpu(entry->offset);
host0 = le16_to_cpu(header->host0);
host1 = le16_to_cpu(header->host1);
if (memcmp(header->magic, SMEM_PART_MAGIC, sizeof(header->magic))) {
dev_err(smem->dev, "Global partition has invalid magic\n");
return -EINVAL;
}
if (host0 != SMEM_GLOBAL_HOST && host1 != SMEM_GLOBAL_HOST) {
dev_err(smem->dev, "Global partition hosts are invalid\n");
return -EINVAL;
}
if (le32_to_cpu(header->size) != le32_to_cpu(entry->size)) {
dev_err(smem->dev, "Global partition has invalid size\n");
return -EINVAL; return -EINVAL;
} }
size = le32_to_cpu(header->offset_free_uncached);
if (size > le32_to_cpu(header->size)) {
dev_err(smem->dev,
"Global partition has invalid free pointer\n");
return -EINVAL;
}
smem->global_partition = header;
smem->global_cacheline = le32_to_cpu(entry->cacheline);
return 0;
}
static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
unsigned int local_host)
{
struct smem_partition_header *header;
struct smem_ptable_entry *entry;
struct smem_ptable *ptable;
unsigned int remote_host;
u32 host0, host1;
int i;
ptable = qcom_smem_get_ptable(smem);
if (IS_ERR(ptable))
return PTR_ERR(ptable);
for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) { for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
entry = &ptable->entry[i]; entry = &ptable->entry[i];
host0 = le16_to_cpu(entry->host0); host0 = le16_to_cpu(entry->host0);
...@@ -773,13 +878,20 @@ static int qcom_smem_probe(struct platform_device *pdev) ...@@ -773,13 +878,20 @@ static int qcom_smem_probe(struct platform_device *pdev)
} }
version = qcom_smem_get_sbl_version(smem); version = qcom_smem_get_sbl_version(smem);
if (version >> 16 != SMEM_EXPECTED_VERSION) { switch (version >> 16) {
case SMEM_GLOBAL_PART_VERSION:
ret = qcom_smem_set_global_partition(smem);
if (ret < 0)
return ret;
case SMEM_GLOBAL_HEAP_VERSION:
break;
default:
dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version); dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version);
return -EINVAL; return -EINVAL;
} }
ret = qcom_smem_enumerate_partitions(smem, SMEM_HOST_APPS); ret = qcom_smem_enumerate_partitions(smem, SMEM_HOST_APPS);
if (ret < 0) if (ret < 0 && ret != -ENOENT)
return ret; return ret;
hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0); hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment