Commit 1d3da3fe authored by Alan Cox's avatar Alan Cox Committed by Greg Kroah-Hartman

atomisp: USE_KMEM_CACHE is always defined so remove the dead code

Signed-off-by: default avatarAlan Cox <alan@linux.intel.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 2267a750
...@@ -371,7 +371,7 @@ DEFINES := -DHRT_HW -DHRT_ISP_CSS_CUSTOM_HOST -DHRT_USE_VIR_ADDRS -D__HOST__ ...@@ -371,7 +371,7 @@ DEFINES := -DHRT_HW -DHRT_ISP_CSS_CUSTOM_HOST -DHRT_USE_VIR_ADDRS -D__HOST__
#DEFINES += -DUSE_INTERRUPTS #DEFINES += -DUSE_INTERRUPTS
#DEFINES += -DUSE_SSSE3 #DEFINES += -DUSE_SSSE3
#DEFINES += -DPUNIT_CAMERA_BUSY #DEFINES += -DPUNIT_CAMERA_BUSY
DEFINES += -DUSE_KMEM_CACHE #DEFINES += -DUSE_KMEM_CACHE
DEFINES += -DATOMISP_POSTFIX=\"css2400b0_v21\" -DISP2400B0 DEFINES += -DATOMISP_POSTFIX=\"css2400b0_v21\" -DISP2400B0
DEFINES += -DSYSTEM_hive_isp_css_2400_system -DISP2400 DEFINES += -DSYSTEM_hive_isp_css_2400_system -DISP2400
......
...@@ -60,11 +60,7 @@ static unsigned int get_pages_from_dynamic_pool(void *pool, ...@@ -60,11 +60,7 @@ static unsigned int get_pages_from_dynamic_pool(void *pool,
page_obj[i].page = hmm_page->page; page_obj[i].page = hmm_page->page;
page_obj[i++].type = HMM_PAGE_TYPE_DYNAMIC; page_obj[i++].type = HMM_PAGE_TYPE_DYNAMIC;
#ifdef USE_KMEM_CACHE
kmem_cache_free(dypool_info->pgptr_cache, hmm_page); kmem_cache_free(dypool_info->pgptr_cache, hmm_page);
#else
atomisp_kernel_free(hmm_page);
#endif
if (i == size) if (i == size)
return i; return i;
...@@ -117,12 +113,8 @@ static void free_pages_to_dynamic_pool(void *pool, ...@@ -117,12 +113,8 @@ static void free_pages_to_dynamic_pool(void *pool,
} }
return; return;
} }
#ifdef USE_KMEM_CACHE
hmm_page = kmem_cache_zalloc(dypool_info->pgptr_cache, hmm_page = kmem_cache_zalloc(dypool_info->pgptr_cache,
GFP_KERNEL); GFP_KERNEL);
#else
hmm_page = atomisp_kernel_malloc(sizeof(struct hmm_page));
#endif
if (!hmm_page) { if (!hmm_page) {
dev_err(atomisp_dev, "out of memory for hmm_page.\n"); dev_err(atomisp_dev, "out of memory for hmm_page.\n");
...@@ -164,7 +156,6 @@ static int hmm_dynamic_pool_init(void **pool, unsigned int pool_size) ...@@ -164,7 +156,6 @@ static int hmm_dynamic_pool_init(void **pool, unsigned int pool_size)
return -ENOMEM; return -ENOMEM;
} }
#ifdef USE_KMEM_CACHE
dypool_info->pgptr_cache = kmem_cache_create("pgptr_cache", dypool_info->pgptr_cache = kmem_cache_create("pgptr_cache",
sizeof(struct hmm_page), 0, sizeof(struct hmm_page), 0,
SLAB_HWCACHE_ALIGN, NULL); SLAB_HWCACHE_ALIGN, NULL);
...@@ -172,7 +163,6 @@ static int hmm_dynamic_pool_init(void **pool, unsigned int pool_size) ...@@ -172,7 +163,6 @@ static int hmm_dynamic_pool_init(void **pool, unsigned int pool_size)
atomisp_kernel_free(dypool_info); atomisp_kernel_free(dypool_info);
return -ENOMEM; return -ENOMEM;
} }
#endif
INIT_LIST_HEAD(&dypool_info->pages_list); INIT_LIST_HEAD(&dypool_info->pages_list);
spin_lock_init(&dypool_info->list_lock); spin_lock_init(&dypool_info->list_lock);
...@@ -219,19 +209,13 @@ static void hmm_dynamic_pool_exit(void **pool) ...@@ -219,19 +209,13 @@ static void hmm_dynamic_pool_exit(void **pool)
hmm_mem_stat.dyc_size--; hmm_mem_stat.dyc_size--;
hmm_mem_stat.sys_size--; hmm_mem_stat.sys_size--;
} }
#ifdef USE_KMEM_CACHE
kmem_cache_free(dypool_info->pgptr_cache, hmm_page); kmem_cache_free(dypool_info->pgptr_cache, hmm_page);
#else
atomisp_kernel_free(hmm_page);
#endif
spin_lock_irqsave(&dypool_info->list_lock, flags); spin_lock_irqsave(&dypool_info->list_lock, flags);
} }
spin_unlock_irqrestore(&dypool_info->list_lock, flags); spin_unlock_irqrestore(&dypool_info->list_lock, flags);
#ifdef USE_KMEM_CACHE
kmem_cache_destroy(dypool_info->pgptr_cache); kmem_cache_destroy(dypool_info->pgptr_cache);
#endif
atomisp_kernel_free(dypool_info); atomisp_kernel_free(dypool_info);
......
...@@ -101,9 +101,7 @@ struct hmm_dynamic_pool_info { ...@@ -101,9 +101,7 @@ struct hmm_dynamic_pool_info {
/* list lock is used to protect the free pages block lists */ /* list lock is used to protect the free pages block lists */
spinlock_t list_lock; spinlock_t list_lock;
#ifdef USE_KMEM_CACHE
struct kmem_cache *pgptr_cache; struct kmem_cache *pgptr_cache;
#endif
bool initialized; bool initialized;
unsigned int pool_size; unsigned int pool_size;
......
...@@ -119,9 +119,7 @@ struct isp_mmu { ...@@ -119,9 +119,7 @@ struct isp_mmu {
phys_addr_t base_address; phys_addr_t base_address;
struct mutex pt_mutex; struct mutex pt_mutex;
#ifdef USE_KMEM_CACHE
struct kmem_cache *tbl_cache; struct kmem_cache *tbl_cache;
#endif
}; };
/* flags for PDE and PTE */ /* flags for PDE and PTE */
......
...@@ -103,14 +103,10 @@ static phys_addr_t alloc_page_table(struct isp_mmu *mmu) ...@@ -103,14 +103,10 @@ static phys_addr_t alloc_page_table(struct isp_mmu *mmu)
* The slab allocator(kmem_cache and kmalloc family) doesn't handle * The slab allocator(kmem_cache and kmalloc family) doesn't handle
* GFP_DMA32 flag, so we have to use buddy allocator. * GFP_DMA32 flag, so we have to use buddy allocator.
*/ */
#ifdef USE_KMEM_CACHE
if (totalram_pages > (unsigned long)NR_PAGES_2GB) if (totalram_pages > (unsigned long)NR_PAGES_2GB)
virt = (void *)__get_free_page(GFP_KERNEL | GFP_DMA32); virt = (void *)__get_free_page(GFP_KERNEL | GFP_DMA32);
else else
virt = kmem_cache_zalloc(mmu->tbl_cache, GFP_KERNEL); virt = kmem_cache_zalloc(mmu->tbl_cache, GFP_KERNEL);
#else
virt = (void *)__get_free_page(GFP_KERNEL | GFP_DMA32);
#endif
if (!virt) if (!virt)
return (phys_addr_t)NULL_PAGE; return (phys_addr_t)NULL_PAGE;
...@@ -144,11 +140,7 @@ static void free_page_table(struct isp_mmu *mmu, phys_addr_t page) ...@@ -144,11 +140,7 @@ static void free_page_table(struct isp_mmu *mmu, phys_addr_t page)
set_memory_wb((unsigned long)virt, 1); set_memory_wb((unsigned long)virt, 1);
#endif #endif
#ifdef USE_KMEM_CACHE
kmem_cache_free(mmu->tbl_cache, virt); kmem_cache_free(mmu->tbl_cache, virt);
#else
free_page((unsigned long)virt);
#endif
} }
static void mmu_remap_error(struct isp_mmu *mmu, static void mmu_remap_error(struct isp_mmu *mmu,
...@@ -559,13 +551,11 @@ int isp_mmu_init(struct isp_mmu *mmu, struct isp_mmu_client *driver) ...@@ -559,13 +551,11 @@ int isp_mmu_init(struct isp_mmu *mmu, struct isp_mmu_client *driver)
mutex_init(&mmu->pt_mutex); mutex_init(&mmu->pt_mutex);
#ifdef USE_KMEM_CACHE
mmu->tbl_cache = kmem_cache_create("iopte_cache", ISP_PAGE_SIZE, mmu->tbl_cache = kmem_cache_create("iopte_cache", ISP_PAGE_SIZE,
ISP_PAGE_SIZE, SLAB_HWCACHE_ALIGN, ISP_PAGE_SIZE, SLAB_HWCACHE_ALIGN,
NULL); NULL);
if (!mmu->tbl_cache) if (!mmu->tbl_cache)
return -ENOMEM; return -ENOMEM;
#endif
return 0; return 0;
} }
...@@ -600,7 +590,5 @@ void isp_mmu_exit(struct isp_mmu *mmu) ...@@ -600,7 +590,5 @@ void isp_mmu_exit(struct isp_mmu *mmu)
free_page_table(mmu, l1_pt); free_page_table(mmu, l1_pt);
#ifdef USE_KMEM_CACHE
kmem_cache_destroy(mmu->tbl_cache); kmem_cache_destroy(mmu->tbl_cache);
#endif
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment