Commit 6081089f authored by Tejun Heo's avatar Tejun Heo

percpu: reorganize chunk creation and destruction

Reorganize alloc/free_pcpu_chunk() such that chunk struct alloc/free
live in pcpu_alloc/free_chunk() and the rest in
pcpu_create/destroy_chunk().  While at it, add missing error handling
for chunk->map allocation failure.

This is to allow alternate chunk management implementation for percpu
nommu support.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Reviewed-by: default avatarDavid Howells <dhowells@redhat.com>
Cc: Graff Yang <graff.yang@gmail.com>
Cc: Sonic Zhang <sonic.adi@gmail.com>
parent 020ec653
...@@ -636,6 +636,38 @@ static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme) ...@@ -636,6 +636,38 @@ static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
pcpu_chunk_relocate(chunk, oslot); pcpu_chunk_relocate(chunk, oslot);
} }
static struct pcpu_chunk *pcpu_alloc_chunk(void)
{
struct pcpu_chunk *chunk;
chunk = kzalloc(pcpu_chunk_struct_size, GFP_KERNEL);
if (!chunk)
return NULL;
chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0]));
if (!chunk->map) {
kfree(chunk);
return NULL;
}
chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
chunk->map[chunk->map_used++] = pcpu_unit_size;
INIT_LIST_HEAD(&chunk->list);
chunk->free_size = pcpu_unit_size;
chunk->contig_hint = pcpu_unit_size;
return chunk;
}
static void pcpu_free_chunk(struct pcpu_chunk *chunk)
{
if (!chunk)
return;
pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0]));
kfree(chunk);
}
/** /**
* pcpu_get_pages_and_bitmap - get temp pages array and bitmap * pcpu_get_pages_and_bitmap - get temp pages array and bitmap
* @chunk: chunk of interest * @chunk: chunk of interest
...@@ -1028,41 +1060,31 @@ static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size) ...@@ -1028,41 +1060,31 @@ static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size)
return rc; return rc;
} }
static void free_pcpu_chunk(struct pcpu_chunk *chunk) static void pcpu_destroy_chunk(struct pcpu_chunk *chunk)
{ {
if (!chunk) if (chunk && chunk->vms)
return;
if (chunk->vms)
pcpu_free_vm_areas(chunk->vms, pcpu_nr_groups); pcpu_free_vm_areas(chunk->vms, pcpu_nr_groups);
pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0])); pcpu_free_chunk(chunk);
kfree(chunk);
} }
static struct pcpu_chunk *alloc_pcpu_chunk(void) static struct pcpu_chunk *pcpu_create_chunk(void)
{ {
struct pcpu_chunk *chunk; struct pcpu_chunk *chunk;
struct vm_struct **vms;
chunk = kzalloc(pcpu_chunk_struct_size, GFP_KERNEL); chunk = pcpu_alloc_chunk();
if (!chunk) if (!chunk)
return NULL; return NULL;
chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0])); vms = pcpu_get_vm_areas(pcpu_group_offsets, pcpu_group_sizes,
chunk->map_alloc = PCPU_DFL_MAP_ALLOC; pcpu_nr_groups, pcpu_atom_size, GFP_KERNEL);
chunk->map[chunk->map_used++] = pcpu_unit_size; if (!vms) {
pcpu_free_chunk(chunk);
chunk->vms = pcpu_get_vm_areas(pcpu_group_offsets, pcpu_group_sizes,
pcpu_nr_groups, pcpu_atom_size,
GFP_KERNEL);
if (!chunk->vms) {
free_pcpu_chunk(chunk);
return NULL; return NULL;
} }
INIT_LIST_HEAD(&chunk->list); chunk->vms = vms;
chunk->free_size = pcpu_unit_size; chunk->base_addr = vms[0]->addr - pcpu_group_offsets[0];
chunk->contig_hint = pcpu_unit_size;
chunk->base_addr = chunk->vms[0]->addr - pcpu_group_offsets[0];
return chunk; return chunk;
} }
...@@ -1155,7 +1177,7 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved) ...@@ -1155,7 +1177,7 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
/* hmmm... no space left, create a new chunk */ /* hmmm... no space left, create a new chunk */
spin_unlock_irqrestore(&pcpu_lock, flags); spin_unlock_irqrestore(&pcpu_lock, flags);
chunk = alloc_pcpu_chunk(); chunk = pcpu_create_chunk();
if (!chunk) { if (!chunk) {
err = "failed to allocate new chunk"; err = "failed to allocate new chunk";
goto fail_unlock_mutex; goto fail_unlock_mutex;
...@@ -1267,7 +1289,7 @@ static void pcpu_reclaim(struct work_struct *work) ...@@ -1267,7 +1289,7 @@ static void pcpu_reclaim(struct work_struct *work)
list_for_each_entry_safe(chunk, next, &todo, list) { list_for_each_entry_safe(chunk, next, &todo, list) {
pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size); pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size);
free_pcpu_chunk(chunk); pcpu_destroy_chunk(chunk);
} }
mutex_unlock(&pcpu_alloc_mutex); mutex_unlock(&pcpu_alloc_mutex);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment