Commit aa2e7100 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (patches from Andrew Morton)

Merge misc fixes from Andrew Morton:
 "A few hotfixes and various leftovers which were awaiting other merges.

  Mainly movement of zram into mm/"

* emailed patches fron Andrew Morton <akpm@linux-foundation.org>: (25 commits)
  memcg: fix mutex not unlocked on memcg_create_kmem_cache fail path
  Documentation/filesystems/vfs.txt: update file_operations documentation
  mm, oom: base root bonus on current usage
  mm: don't lose the SOFT_DIRTY flag on mprotect
  mm/slub.c: fix page->_count corruption (again)
  mm/mempolicy.c: fix mempolicy printing in numa_maps
  zram: remove zram->lock in read path and change it with mutex
  zram: remove workqueue for freeing removed pending slot
  zram: introduce zram->tb_lock
  zram: use atomic operation for stat
  zram: remove unnecessary free
  zram: delay pending free request in read path
  zram: fix race between reset and flushing pending work
  zsmalloc: add maintainers
  zram: add zram maintainers
  zsmalloc: add copyright
  zram: add copyright
  zram: remove old private project comment
  zram: promote zram from staging
  zsmalloc: move it under mm
  ...
parents 2def2ef2 7c094fd6
zram: Compressed RAM based block devices zram: Compressed RAM based block devices
---------------------------------------- ----------------------------------------
Project home: http://compcache.googlecode.com/
* Introduction * Introduction
The zram module creates RAM based block devices named /dev/zram<id> The zram module creates RAM based block devices named /dev/zram<id>
...@@ -69,9 +67,5 @@ Following shows a typical sequence of steps for using zram. ...@@ -69,9 +67,5 @@ Following shows a typical sequence of steps for using zram.
resets the disksize to zero. You must set the disksize again resets the disksize to zero. You must set the disksize again
before reusing the device. before reusing the device.
Please report any problems at:
- Mailing list: linux-mm-cc at laptop dot org
- Issue tracker: http://code.google.com/p/compcache/issues/list
Nitin Gupta Nitin Gupta
ngupta@vflare.org ngupta@vflare.org
...@@ -1386,8 +1386,8 @@ may allocate from based on an estimation of its current memory and swap use. ...@@ -1386,8 +1386,8 @@ may allocate from based on an estimation of its current memory and swap use.
For example, if a task is using all allowed memory, its badness score will be For example, if a task is using all allowed memory, its badness score will be
1000. If it is using half of its allowed memory, its score will be 500. 1000. If it is using half of its allowed memory, its score will be 500.
There is an additional factor included in the badness score: root There is an additional factor included in the badness score: the current memory
processes are given 3% extra memory over other tasks. and swap usage is discounted by 3% for root processes.
The amount of "allowed" memory depends on the context in which the oom killer The amount of "allowed" memory depends on the context in which the oom killer
was called. If it is due to the memory assigned to the allocating task's cpuset was called. If it is due to the memory assigned to the allocating task's cpuset
......
...@@ -782,7 +782,7 @@ struct file_operations ...@@ -782,7 +782,7 @@ struct file_operations
---------------------- ----------------------
This describes how the VFS can manipulate an open file. As of kernel This describes how the VFS can manipulate an open file. As of kernel
3.5, the following members are defined: 3.12, the following members are defined:
struct file_operations { struct file_operations {
struct module *owner; struct module *owner;
...@@ -803,9 +803,6 @@ struct file_operations { ...@@ -803,9 +803,6 @@ struct file_operations {
int (*aio_fsync) (struct kiocb *, int datasync); int (*aio_fsync) (struct kiocb *, int datasync);
int (*fasync) (int, struct file *, int); int (*fasync) (int, struct file *, int);
int (*lock) (struct file *, int, struct file_lock *); int (*lock) (struct file *, int, struct file_lock *);
ssize_t (*readv) (struct file *, const struct iovec *, unsigned long, loff_t *);
ssize_t (*writev) (struct file *, const struct iovec *, unsigned long, loff_t *);
ssize_t (*sendfile) (struct file *, loff_t *, size_t, read_actor_t, void *);
ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int); ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);
unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
int (*check_flags)(int); int (*check_flags)(int);
...@@ -814,6 +811,7 @@ struct file_operations { ...@@ -814,6 +811,7 @@ struct file_operations {
ssize_t (*splice_read)(struct file *, struct pipe_inode_info *, size_t, unsigned int); ssize_t (*splice_read)(struct file *, struct pipe_inode_info *, size_t, unsigned int);
int (*setlease)(struct file *, long arg, struct file_lock **); int (*setlease)(struct file *, long arg, struct file_lock **);
long (*fallocate)(struct file *, int mode, loff_t offset, loff_t len); long (*fallocate)(struct file *, int mode, loff_t offset, loff_t len);
int (*show_fdinfo)(struct seq_file *m, struct file *f);
}; };
Again, all methods are called without any locks being held, unless Again, all methods are called without any locks being held, unless
...@@ -864,12 +862,6 @@ otherwise noted. ...@@ -864,12 +862,6 @@ otherwise noted.
lock: called by the fcntl(2) system call for F_GETLK, F_SETLK, and F_SETLKW lock: called by the fcntl(2) system call for F_GETLK, F_SETLK, and F_SETLKW
commands commands
readv: called by the readv(2) system call
writev: called by the writev(2) system call
sendfile: called by the sendfile(2) system call
get_unmapped_area: called by the mmap(2) system call get_unmapped_area: called by the mmap(2) system call
check_flags: called by the fcntl(2) system call for F_SETFL command check_flags: called by the fcntl(2) system call for F_SETFL command
......
...@@ -9740,11 +9740,27 @@ T: Mercurial http://linuxtv.org/hg/v4l-dvb ...@@ -9740,11 +9740,27 @@ T: Mercurial http://linuxtv.org/hg/v4l-dvb
S: Odd Fixes S: Odd Fixes
F: drivers/media/pci/zoran/ F: drivers/media/pci/zoran/
ZRAM COMPRESSED RAM BLOCK DEVICE DRVIER
M: Minchan Kim <minchan@kernel.org>
M: Nitin Gupta <ngupta@vflare.org>
L: linux-kernel@vger.kernel.org
S: Maintained
F: drivers/block/zram/
F: Documentation/blockdev/zram.txt
ZS DECSTATION Z85C30 SERIAL DRIVER ZS DECSTATION Z85C30 SERIAL DRIVER
M: "Maciej W. Rozycki" <macro@linux-mips.org> M: "Maciej W. Rozycki" <macro@linux-mips.org>
S: Maintained S: Maintained
F: drivers/tty/serial/zs.* F: drivers/tty/serial/zs.*
ZSMALLOC COMPRESSED SLAB MEMORY ALLOCATOR
M: Minchan Kim <minchan@kernel.org>
M: Nitin Gupta <ngupta@vflare.org>
L: linux-mm@kvack.org
S: Maintained
F: mm/zsmalloc.c
F: include/linux/zsmalloc.h
ZSWAP COMPRESSED SWAP CACHING ZSWAP COMPRESSED SWAP CACHING
M: Seth Jennings <sjenning@linux.vnet.ibm.com> M: Seth Jennings <sjenning@linux.vnet.ibm.com>
L: linux-mm@kvack.org L: linux-mm@kvack.org
......
...@@ -121,7 +121,8 @@ ...@@ -121,7 +121,8 @@
/* Set of bits not changed in pte_modify */ /* Set of bits not changed in pte_modify */
#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \ #define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
_PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY) _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \
_PAGE_SOFT_DIRTY)
#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE) #define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
#define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT) #define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT)
......
...@@ -108,6 +108,8 @@ source "drivers/block/paride/Kconfig" ...@@ -108,6 +108,8 @@ source "drivers/block/paride/Kconfig"
source "drivers/block/mtip32xx/Kconfig" source "drivers/block/mtip32xx/Kconfig"
source "drivers/block/zram/Kconfig"
config BLK_CPQ_DA config BLK_CPQ_DA
tristate "Compaq SMART2 support" tristate "Compaq SMART2 support"
depends on PCI && VIRT_TO_BUS && 0 depends on PCI && VIRT_TO_BUS && 0
......
...@@ -42,6 +42,7 @@ obj-$(CONFIG_BLK_DEV_PCIESSD_MTIP32XX) += mtip32xx/ ...@@ -42,6 +42,7 @@ obj-$(CONFIG_BLK_DEV_PCIESSD_MTIP32XX) += mtip32xx/
obj-$(CONFIG_BLK_DEV_RSXX) += rsxx/ obj-$(CONFIG_BLK_DEV_RSXX) += rsxx/
obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk.o obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk.o
obj-$(CONFIG_ZRAM) += zram/
nvme-y := nvme-core.o nvme-scsi.o nvme-y := nvme-core.o nvme-scsi.o
skd-y := skd_main.o skd-y := skd_main.o
......
...@@ -14,7 +14,6 @@ config ZRAM ...@@ -14,7 +14,6 @@ config ZRAM
disks and maybe many more. disks and maybe many more.
See zram.txt for more information. See zram.txt for more information.
Project home: <https://compcache.googlecode.com/>
config ZRAM_DEBUG config ZRAM_DEBUG
bool "Compressed RAM block device debug support" bool "Compressed RAM block device debug support"
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
* Compressed RAM block device * Compressed RAM block device
* *
* Copyright (C) 2008, 2009, 2010 Nitin Gupta * Copyright (C) 2008, 2009, 2010 Nitin Gupta
* 2012, 2013 Minchan Kim
* *
* This code is released using a dual license strategy: BSD/GPL * This code is released using a dual license strategy: BSD/GPL
* You can choose the licence that better fits your requirements. * You can choose the licence that better fits your requirements.
...@@ -9,7 +10,6 @@ ...@@ -9,7 +10,6 @@
* Released under the terms of 3-clause BSD License * Released under the terms of 3-clause BSD License
* Released under the terms of GNU General Public License Version 2.0 * Released under the terms of GNU General Public License Version 2.0
* *
* Project home: http://compcache.googlecode.com
*/ */
#define KMSG_COMPONENT "zram" #define KMSG_COMPONENT "zram"
...@@ -104,7 +104,7 @@ static ssize_t zero_pages_show(struct device *dev, ...@@ -104,7 +104,7 @@ static ssize_t zero_pages_show(struct device *dev,
{ {
struct zram *zram = dev_to_zram(dev); struct zram *zram = dev_to_zram(dev);
return sprintf(buf, "%u\n", zram->stats.pages_zero); return sprintf(buf, "%u\n", atomic_read(&zram->stats.pages_zero));
} }
static ssize_t orig_data_size_show(struct device *dev, static ssize_t orig_data_size_show(struct device *dev,
...@@ -113,7 +113,7 @@ static ssize_t orig_data_size_show(struct device *dev, ...@@ -113,7 +113,7 @@ static ssize_t orig_data_size_show(struct device *dev,
struct zram *zram = dev_to_zram(dev); struct zram *zram = dev_to_zram(dev);
return sprintf(buf, "%llu\n", return sprintf(buf, "%llu\n",
(u64)(zram->stats.pages_stored) << PAGE_SHIFT); (u64)(atomic_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
} }
static ssize_t compr_data_size_show(struct device *dev, static ssize_t compr_data_size_show(struct device *dev,
...@@ -140,6 +140,7 @@ static ssize_t mem_used_total_show(struct device *dev, ...@@ -140,6 +140,7 @@ static ssize_t mem_used_total_show(struct device *dev,
return sprintf(buf, "%llu\n", val); return sprintf(buf, "%llu\n", val);
} }
/* flag operations needs meta->tb_lock */
static int zram_test_flag(struct zram_meta *meta, u32 index, static int zram_test_flag(struct zram_meta *meta, u32 index,
enum zram_pageflags flag) enum zram_pageflags flag)
{ {
...@@ -228,6 +229,8 @@ static struct zram_meta *zram_meta_alloc(u64 disksize) ...@@ -228,6 +229,8 @@ static struct zram_meta *zram_meta_alloc(u64 disksize)
goto free_table; goto free_table;
} }
rwlock_init(&meta->tb_lock);
mutex_init(&meta->buffer_lock);
return meta; return meta;
free_table: free_table:
...@@ -280,6 +283,7 @@ static void handle_zero_page(struct bio_vec *bvec) ...@@ -280,6 +283,7 @@ static void handle_zero_page(struct bio_vec *bvec)
flush_dcache_page(page); flush_dcache_page(page);
} }
/* NOTE: caller should hold meta->tb_lock with write-side */
static void zram_free_page(struct zram *zram, size_t index) static void zram_free_page(struct zram *zram, size_t index)
{ {
struct zram_meta *meta = zram->meta; struct zram_meta *meta = zram->meta;
...@@ -293,21 +297,21 @@ static void zram_free_page(struct zram *zram, size_t index) ...@@ -293,21 +297,21 @@ static void zram_free_page(struct zram *zram, size_t index)
*/ */
if (zram_test_flag(meta, index, ZRAM_ZERO)) { if (zram_test_flag(meta, index, ZRAM_ZERO)) {
zram_clear_flag(meta, index, ZRAM_ZERO); zram_clear_flag(meta, index, ZRAM_ZERO);
zram->stats.pages_zero--; atomic_dec(&zram->stats.pages_zero);
} }
return; return;
} }
if (unlikely(size > max_zpage_size)) if (unlikely(size > max_zpage_size))
zram->stats.bad_compress--; atomic_dec(&zram->stats.bad_compress);
zs_free(meta->mem_pool, handle); zs_free(meta->mem_pool, handle);
if (size <= PAGE_SIZE / 2) if (size <= PAGE_SIZE / 2)
zram->stats.good_compress--; atomic_dec(&zram->stats.good_compress);
atomic64_sub(meta->table[index].size, &zram->stats.compr_size); atomic64_sub(meta->table[index].size, &zram->stats.compr_size);
zram->stats.pages_stored--; atomic_dec(&zram->stats.pages_stored);
meta->table[index].handle = 0; meta->table[index].handle = 0;
meta->table[index].size = 0; meta->table[index].size = 0;
...@@ -319,20 +323,26 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index) ...@@ -319,20 +323,26 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
size_t clen = PAGE_SIZE; size_t clen = PAGE_SIZE;
unsigned char *cmem; unsigned char *cmem;
struct zram_meta *meta = zram->meta; struct zram_meta *meta = zram->meta;
unsigned long handle = meta->table[index].handle; unsigned long handle;
u16 size;
read_lock(&meta->tb_lock);
handle = meta->table[index].handle;
size = meta->table[index].size;
if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) { if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
read_unlock(&meta->tb_lock);
clear_page(mem); clear_page(mem);
return 0; return 0;
} }
cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO); cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
if (meta->table[index].size == PAGE_SIZE) if (size == PAGE_SIZE)
copy_page(mem, cmem); copy_page(mem, cmem);
else else
ret = lzo1x_decompress_safe(cmem, meta->table[index].size, ret = lzo1x_decompress_safe(cmem, size, mem, &clen);
mem, &clen);
zs_unmap_object(meta->mem_pool, handle); zs_unmap_object(meta->mem_pool, handle);
read_unlock(&meta->tb_lock);
/* Should NEVER happen. Return bio error if it does. */ /* Should NEVER happen. Return bio error if it does. */
if (unlikely(ret != LZO_E_OK)) { if (unlikely(ret != LZO_E_OK)) {
...@@ -353,11 +363,14 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, ...@@ -353,11 +363,14 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
struct zram_meta *meta = zram->meta; struct zram_meta *meta = zram->meta;
page = bvec->bv_page; page = bvec->bv_page;
read_lock(&meta->tb_lock);
if (unlikely(!meta->table[index].handle) || if (unlikely(!meta->table[index].handle) ||
zram_test_flag(meta, index, ZRAM_ZERO)) { zram_test_flag(meta, index, ZRAM_ZERO)) {
read_unlock(&meta->tb_lock);
handle_zero_page(bvec); handle_zero_page(bvec);
return 0; return 0;
} }
read_unlock(&meta->tb_lock);
if (is_partial_io(bvec)) if (is_partial_io(bvec))
/* Use a temporary buffer to decompress the page */ /* Use a temporary buffer to decompress the page */
...@@ -400,6 +413,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, ...@@ -400,6 +413,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
struct page *page; struct page *page;
unsigned char *user_mem, *cmem, *src, *uncmem = NULL; unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
struct zram_meta *meta = zram->meta; struct zram_meta *meta = zram->meta;
bool locked = false;
page = bvec->bv_page; page = bvec->bv_page;
src = meta->compress_buffer; src = meta->compress_buffer;
...@@ -419,6 +433,8 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, ...@@ -419,6 +433,8 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
goto out; goto out;
} }
mutex_lock(&meta->buffer_lock);
locked = true;
user_mem = kmap_atomic(page); user_mem = kmap_atomic(page);
if (is_partial_io(bvec)) { if (is_partial_io(bvec)) {
...@@ -433,25 +449,18 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, ...@@ -433,25 +449,18 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
if (page_zero_filled(uncmem)) { if (page_zero_filled(uncmem)) {
kunmap_atomic(user_mem); kunmap_atomic(user_mem);
/* Free memory associated with this sector now. */ /* Free memory associated with this sector now. */
write_lock(&zram->meta->tb_lock);
zram_free_page(zram, index); zram_free_page(zram, index);
zram->stats.pages_zero++;
zram_set_flag(meta, index, ZRAM_ZERO); zram_set_flag(meta, index, ZRAM_ZERO);
write_unlock(&zram->meta->tb_lock);
atomic_inc(&zram->stats.pages_zero);
ret = 0; ret = 0;
goto out; goto out;
} }
/*
* zram_slot_free_notify could miss free so that let's
* double check.
*/
if (unlikely(meta->table[index].handle ||
zram_test_flag(meta, index, ZRAM_ZERO)))
zram_free_page(zram, index);
ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen, ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
meta->compress_workmem); meta->compress_workmem);
if (!is_partial_io(bvec)) { if (!is_partial_io(bvec)) {
kunmap_atomic(user_mem); kunmap_atomic(user_mem);
user_mem = NULL; user_mem = NULL;
...@@ -464,7 +473,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, ...@@ -464,7 +473,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
} }
if (unlikely(clen > max_zpage_size)) { if (unlikely(clen > max_zpage_size)) {
zram->stats.bad_compress++; atomic_inc(&zram->stats.bad_compress);
clen = PAGE_SIZE; clen = PAGE_SIZE;
src = NULL; src = NULL;
if (is_partial_io(bvec)) if (is_partial_io(bvec))
...@@ -494,18 +503,22 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, ...@@ -494,18 +503,22 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
* Free memory associated with this sector * Free memory associated with this sector
* before overwriting unused sectors. * before overwriting unused sectors.
*/ */
write_lock(&zram->meta->tb_lock);
zram_free_page(zram, index); zram_free_page(zram, index);
meta->table[index].handle = handle; meta->table[index].handle = handle;
meta->table[index].size = clen; meta->table[index].size = clen;
write_unlock(&zram->meta->tb_lock);
/* Update stats */ /* Update stats */
atomic64_add(clen, &zram->stats.compr_size); atomic64_add(clen, &zram->stats.compr_size);
zram->stats.pages_stored++; atomic_inc(&zram->stats.pages_stored);
if (clen <= PAGE_SIZE / 2) if (clen <= PAGE_SIZE / 2)
zram->stats.good_compress++; atomic_inc(&zram->stats.good_compress);
out: out:
if (locked)
mutex_unlock(&meta->buffer_lock);
if (is_partial_io(bvec)) if (is_partial_io(bvec))
kfree(uncmem); kfree(uncmem);
...@@ -514,36 +527,15 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, ...@@ -514,36 +527,15 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
return ret; return ret;
} }
static void handle_pending_slot_free(struct zram *zram)
{
struct zram_slot_free *free_rq;
spin_lock(&zram->slot_free_lock);
while (zram->slot_free_rq) {
free_rq = zram->slot_free_rq;
zram->slot_free_rq = free_rq->next;
zram_free_page(zram, free_rq->index);
kfree(free_rq);
}
spin_unlock(&zram->slot_free_lock);
}
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
int offset, struct bio *bio, int rw) int offset, struct bio *bio, int rw)
{ {
int ret; int ret;
if (rw == READ) { if (rw == READ)
down_read(&zram->lock);
handle_pending_slot_free(zram);
ret = zram_bvec_read(zram, bvec, index, offset, bio); ret = zram_bvec_read(zram, bvec, index, offset, bio);
up_read(&zram->lock); else
} else {
down_write(&zram->lock);
handle_pending_slot_free(zram);
ret = zram_bvec_write(zram, bvec, index, offset); ret = zram_bvec_write(zram, bvec, index, offset);
up_write(&zram->lock);
}
return ret; return ret;
} }
...@@ -553,8 +545,6 @@ static void zram_reset_device(struct zram *zram, bool reset_capacity) ...@@ -553,8 +545,6 @@ static void zram_reset_device(struct zram *zram, bool reset_capacity)
size_t index; size_t index;
struct zram_meta *meta; struct zram_meta *meta;
flush_work(&zram->free_work);
down_write(&zram->init_lock); down_write(&zram->init_lock);
if (!zram->init_done) { if (!zram->init_done) {
up_write(&zram->init_lock); up_write(&zram->init_lock);
...@@ -762,40 +752,19 @@ static void zram_make_request(struct request_queue *queue, struct bio *bio) ...@@ -762,40 +752,19 @@ static void zram_make_request(struct request_queue *queue, struct bio *bio)
bio_io_error(bio); bio_io_error(bio);
} }
static void zram_slot_free(struct work_struct *work)
{
struct zram *zram;
zram = container_of(work, struct zram, free_work);
down_write(&zram->lock);
handle_pending_slot_free(zram);
up_write(&zram->lock);
}
static void add_slot_free(struct zram *zram, struct zram_slot_free *free_rq)
{
spin_lock(&zram->slot_free_lock);
free_rq->next = zram->slot_free_rq;
zram->slot_free_rq = free_rq;
spin_unlock(&zram->slot_free_lock);
}
static void zram_slot_free_notify(struct block_device *bdev, static void zram_slot_free_notify(struct block_device *bdev,
unsigned long index) unsigned long index)
{ {
struct zram *zram; struct zram *zram;
struct zram_slot_free *free_rq; struct zram_meta *meta;
zram = bdev->bd_disk->private_data; zram = bdev->bd_disk->private_data;
atomic64_inc(&zram->stats.notify_free); meta = zram->meta;
free_rq = kmalloc(sizeof(struct zram_slot_free), GFP_ATOMIC);
if (!free_rq)
return;
free_rq->index = index; write_lock(&meta->tb_lock);
add_slot_free(zram, free_rq); zram_free_page(zram, index);
schedule_work(&zram->free_work); write_unlock(&meta->tb_lock);
atomic64_inc(&zram->stats.notify_free);
} }
static const struct block_device_operations zram_devops = { static const struct block_device_operations zram_devops = {
...@@ -839,13 +808,8 @@ static int create_device(struct zram *zram, int device_id) ...@@ -839,13 +808,8 @@ static int create_device(struct zram *zram, int device_id)
{ {
int ret = -ENOMEM; int ret = -ENOMEM;
init_rwsem(&zram->lock);
init_rwsem(&zram->init_lock); init_rwsem(&zram->init_lock);
INIT_WORK(&zram->free_work, zram_slot_free);
spin_lock_init(&zram->slot_free_lock);
zram->slot_free_rq = NULL;
zram->queue = blk_alloc_queue(GFP_KERNEL); zram->queue = blk_alloc_queue(GFP_KERNEL);
if (!zram->queue) { if (!zram->queue) {
pr_err("Error allocating disk queue for device %d\n", pr_err("Error allocating disk queue for device %d\n",
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
* Compressed RAM block device * Compressed RAM block device
* *
* Copyright (C) 2008, 2009, 2010 Nitin Gupta * Copyright (C) 2008, 2009, 2010 Nitin Gupta
* 2012, 2013 Minchan Kim
* *
* This code is released using a dual license strategy: BSD/GPL * This code is released using a dual license strategy: BSD/GPL
* You can choose the licence that better fits your requirements. * You can choose the licence that better fits your requirements.
...@@ -9,7 +10,6 @@ ...@@ -9,7 +10,6 @@
* Released under the terms of 3-clause BSD License * Released under the terms of 3-clause BSD License
* Released under the terms of GNU General Public License Version 2.0 * Released under the terms of GNU General Public License Version 2.0
* *
* Project home: http://compcache.googlecode.com
*/ */
#ifndef _ZRAM_DRV_H_ #ifndef _ZRAM_DRV_H_
...@@ -17,8 +17,7 @@ ...@@ -17,8 +17,7 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/zsmalloc.h>
#include "../zsmalloc/zsmalloc.h"
/* /*
* Some arbitrary value. This is just to catch * Some arbitrary value. This is just to catch
...@@ -69,10 +68,6 @@ struct table { ...@@ -69,10 +68,6 @@ struct table {
u8 flags; u8 flags;
} __aligned(4); } __aligned(4);
/*
* All 64bit fields should only be manipulated by 64bit atomic accessors.
* All modifications to 32bit counter should be protected by zram->lock.
*/
struct zram_stats { struct zram_stats {
atomic64_t compr_size; /* compressed size of pages stored */ atomic64_t compr_size; /* compressed size of pages stored */
atomic64_t num_reads; /* failed + successful */ atomic64_t num_reads; /* failed + successful */
...@@ -81,33 +76,23 @@ struct zram_stats { ...@@ -81,33 +76,23 @@ struct zram_stats {
atomic64_t failed_writes; /* can happen when memory is too low */ atomic64_t failed_writes; /* can happen when memory is too low */
atomic64_t invalid_io; /* non-page-aligned I/O requests */ atomic64_t invalid_io; /* non-page-aligned I/O requests */
atomic64_t notify_free; /* no. of swap slot free notifications */ atomic64_t notify_free; /* no. of swap slot free notifications */
u32 pages_zero; /* no. of zero filled pages */ atomic_t pages_zero; /* no. of zero filled pages */
u32 pages_stored; /* no. of pages currently stored */ atomic_t pages_stored; /* no. of pages currently stored */
u32 good_compress; /* % of pages with compression ratio<=50% */ atomic_t good_compress; /* % of pages with compression ratio<=50% */
u32 bad_compress; /* % of pages with compression ratio>=75% */ atomic_t bad_compress; /* % of pages with compression ratio>=75% */
}; };
struct zram_meta { struct zram_meta {
rwlock_t tb_lock; /* protect table */
void *compress_workmem; void *compress_workmem;
void *compress_buffer; void *compress_buffer;
struct table *table; struct table *table;
struct zs_pool *mem_pool; struct zs_pool *mem_pool;
}; struct mutex buffer_lock; /* protect compress buffers */
struct zram_slot_free {
unsigned long index;
struct zram_slot_free *next;
}; };
struct zram { struct zram {
struct zram_meta *meta; struct zram_meta *meta;
struct rw_semaphore lock; /* protect compression buffers, table,
* 32bit stat counters against concurrent
* notifications, reads and writes */
struct work_struct free_work; /* handle pending free request */
struct zram_slot_free *slot_free_rq; /* list head of free request */
struct request_queue *queue; struct request_queue *queue;
struct gendisk *disk; struct gendisk *disk;
int init_done; int init_done;
...@@ -118,7 +103,6 @@ struct zram { ...@@ -118,7 +103,6 @@ struct zram {
* we can store in a disk. * we can store in a disk.
*/ */
u64 disksize; /* bytes */ u64 disksize; /* bytes */
spinlock_t slot_free_lock;
struct zram_stats stats; struct zram_stats stats;
}; };
......
...@@ -150,6 +150,7 @@ int mdiobus_register(struct mii_bus *bus) ...@@ -150,6 +150,7 @@ int mdiobus_register(struct mii_bus *bus)
err = device_register(&bus->dev); err = device_register(&bus->dev);
if (err) { if (err) {
pr_err("mii_bus %s failed to register\n", bus->id); pr_err("mii_bus %s failed to register\n", bus->id);
put_device(&bus->dev);
return -EINVAL; return -EINVAL;
} }
......
...@@ -76,10 +76,6 @@ source "drivers/staging/sep/Kconfig" ...@@ -76,10 +76,6 @@ source "drivers/staging/sep/Kconfig"
source "drivers/staging/iio/Kconfig" source "drivers/staging/iio/Kconfig"
source "drivers/staging/zsmalloc/Kconfig"
source "drivers/staging/zram/Kconfig"
source "drivers/staging/wlags49_h2/Kconfig" source "drivers/staging/wlags49_h2/Kconfig"
source "drivers/staging/wlags49_h25/Kconfig" source "drivers/staging/wlags49_h25/Kconfig"
......
...@@ -32,8 +32,6 @@ obj-$(CONFIG_VT6656) += vt6656/ ...@@ -32,8 +32,6 @@ obj-$(CONFIG_VT6656) += vt6656/
obj-$(CONFIG_VME_BUS) += vme/ obj-$(CONFIG_VME_BUS) += vme/
obj-$(CONFIG_DX_SEP) += sep/ obj-$(CONFIG_DX_SEP) += sep/
obj-$(CONFIG_IIO) += iio/ obj-$(CONFIG_IIO) += iio/
obj-$(CONFIG_ZRAM) += zram/
obj-$(CONFIG_ZSMALLOC) += zsmalloc/
obj-$(CONFIG_WLAGS49_H2) += wlags49_h2/ obj-$(CONFIG_WLAGS49_H2) += wlags49_h2/
obj-$(CONFIG_WLAGS49_H25) += wlags49_h25/ obj-$(CONFIG_WLAGS49_H25) += wlags49_h25/
obj-$(CONFIG_FB_SM7XX) += sm7xxfb/ obj-$(CONFIG_FB_SM7XX) += sm7xxfb/
......
config ZSMALLOC
bool "Memory allocator for compressed pages"
depends on MMU
default n
help
zsmalloc is a slab-based memory allocator designed to store
compressed RAM pages. zsmalloc uses virtual memory mapping
in order to reduce fragmentation. However, this results in a
non-standard allocator interface where a handle, not a pointer, is
returned by an alloc(). This handle must be mapped in order to
access the allocated space.
config PGTABLE_MAPPING
bool "Use page table mapping to access object in zsmalloc"
depends on ZSMALLOC
help
By default, zsmalloc uses a copy-based object mapping method to
access allocations that span two pages. However, if a particular
architecture (ex, ARM) performs VM mapping faster than copying,
then you should select this. This causes zsmalloc to use page table
mapping rather than copying for object mapping.
You can check speed with zsmalloc benchmark[1].
[1] https://github.com/spartacus06/zsmalloc
zsmalloc-y := zsmalloc-main.o
obj-$(CONFIG_ZSMALLOC) += zsmalloc.o
...@@ -228,7 +228,7 @@ struct lcd_device *lcd_device_register(const char *name, struct device *parent, ...@@ -228,7 +228,7 @@ struct lcd_device *lcd_device_register(const char *name, struct device *parent,
rc = device_register(&new_ld->dev); rc = device_register(&new_ld->dev);
if (rc) { if (rc) {
kfree(new_ld); put_device(&new_ld->dev);
return ERR_PTR(rc); return ERR_PTR(rc);
} }
......
...@@ -95,10 +95,7 @@ enum rq_cmd_type_bits { ...@@ -95,10 +95,7 @@ enum rq_cmd_type_bits {
* as well! * as well!
*/ */
struct request { struct request {
union { struct list_head queuelist;
struct list_head queuelist;
struct llist_node ll_list;
};
union { union {
struct call_single_data csd; struct call_single_data csd;
struct work_struct mq_flush_data; struct work_struct mq_flush_data;
......
...@@ -264,7 +264,7 @@ static inline void * __init memblock_virt_alloc_low( ...@@ -264,7 +264,7 @@ static inline void * __init memblock_virt_alloc_low(
{ {
if (!align) if (!align)
align = SMP_CACHE_BYTES; align = SMP_CACHE_BYTES;
return __alloc_bootmem_low(size, align, BOOTMEM_LOW_LIMIT); return __alloc_bootmem_low(size, align, 0);
} }
static inline void * __init memblock_virt_alloc_low_nopanic( static inline void * __init memblock_virt_alloc_low_nopanic(
...@@ -272,7 +272,7 @@ static inline void * __init memblock_virt_alloc_low_nopanic( ...@@ -272,7 +272,7 @@ static inline void * __init memblock_virt_alloc_low_nopanic(
{ {
if (!align) if (!align)
align = SMP_CACHE_BYTES; align = SMP_CACHE_BYTES;
return __alloc_bootmem_low_nopanic(size, align, BOOTMEM_LOW_LIMIT); return __alloc_bootmem_low_nopanic(size, align, 0);
} }
static inline void * __init memblock_virt_alloc_from_nopanic( static inline void * __init memblock_virt_alloc_from_nopanic(
......
...@@ -11,12 +11,16 @@ ...@@ -11,12 +11,16 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/llist.h>
extern void cpu_idle(void); extern void cpu_idle(void);
typedef void (*smp_call_func_t)(void *info); typedef void (*smp_call_func_t)(void *info);
struct call_single_data { struct call_single_data {
struct list_head list; union {
struct list_head list;
struct llist_node llist;
};
smp_call_func_t func; smp_call_func_t func;
void *info; void *info;
u16 flags; u16 flags;
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
* zsmalloc memory allocator * zsmalloc memory allocator
* *
* Copyright (C) 2011 Nitin Gupta * Copyright (C) 2011 Nitin Gupta
* Copyright (C) 2012, 2013 Minchan Kim
* *
* This code is released using a dual license strategy: BSD/GPL * This code is released using a dual license strategy: BSD/GPL
* You can choose the license that better fits your requirements. * You can choose the license that better fits your requirements.
......
...@@ -23,17 +23,11 @@ enum { ...@@ -23,17 +23,11 @@ enum {
struct call_function_data { struct call_function_data {
struct call_single_data __percpu *csd; struct call_single_data __percpu *csd;
cpumask_var_t cpumask; cpumask_var_t cpumask;
cpumask_var_t cpumask_ipi;
}; };
static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data); static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
struct call_single_queue { static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
struct list_head list;
raw_spinlock_t lock;
};
static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_queue, call_single_queue);
static int static int
hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
...@@ -47,14 +41,8 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) ...@@ -47,14 +41,8 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
cpu_to_node(cpu))) cpu_to_node(cpu)))
return notifier_from_errno(-ENOMEM); return notifier_from_errno(-ENOMEM);
if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
cpu_to_node(cpu))) {
free_cpumask_var(cfd->cpumask);
return notifier_from_errno(-ENOMEM);
}
cfd->csd = alloc_percpu(struct call_single_data); cfd->csd = alloc_percpu(struct call_single_data);
if (!cfd->csd) { if (!cfd->csd) {
free_cpumask_var(cfd->cpumask_ipi);
free_cpumask_var(cfd->cpumask); free_cpumask_var(cfd->cpumask);
return notifier_from_errno(-ENOMEM); return notifier_from_errno(-ENOMEM);
} }
...@@ -67,7 +55,6 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) ...@@ -67,7 +55,6 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
case CPU_DEAD: case CPU_DEAD:
case CPU_DEAD_FROZEN: case CPU_DEAD_FROZEN:
free_cpumask_var(cfd->cpumask); free_cpumask_var(cfd->cpumask);
free_cpumask_var(cfd->cpumask_ipi);
free_percpu(cfd->csd); free_percpu(cfd->csd);
break; break;
#endif #endif
...@@ -85,12 +72,8 @@ void __init call_function_init(void) ...@@ -85,12 +72,8 @@ void __init call_function_init(void)
void *cpu = (void *)(long)smp_processor_id(); void *cpu = (void *)(long)smp_processor_id();
int i; int i;
for_each_possible_cpu(i) { for_each_possible_cpu(i)
struct call_single_queue *q = &per_cpu(call_single_queue, i); init_llist_head(&per_cpu(call_single_queue, i));
raw_spin_lock_init(&q->lock);
INIT_LIST_HEAD(&q->list);
}
hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu); hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
register_cpu_notifier(&hotplug_cfd_notifier); register_cpu_notifier(&hotplug_cfd_notifier);
...@@ -141,18 +124,9 @@ static void csd_unlock(struct call_single_data *csd) ...@@ -141,18 +124,9 @@ static void csd_unlock(struct call_single_data *csd)
*/ */
static void generic_exec_single(int cpu, struct call_single_data *csd, int wait) static void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
{ {
struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
unsigned long flags;
int ipi;
if (wait) if (wait)
csd->flags |= CSD_FLAG_WAIT; csd->flags |= CSD_FLAG_WAIT;
raw_spin_lock_irqsave(&dst->lock, flags);
ipi = list_empty(&dst->list);
list_add_tail(&csd->list, &dst->list);
raw_spin_unlock_irqrestore(&dst->lock, flags);
/* /*
* The list addition should be visible before sending the IPI * The list addition should be visible before sending the IPI
* handler locks the list to pull the entry off it because of * handler locks the list to pull the entry off it because of
...@@ -164,7 +138,7 @@ static void generic_exec_single(int cpu, struct call_single_data *csd, int wait) ...@@ -164,7 +138,7 @@ static void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
* locking and barrier primitives. Generic code isn't really * locking and barrier primitives. Generic code isn't really
* equipped to do the right thing... * equipped to do the right thing...
*/ */
if (ipi) if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
arch_send_call_function_single_ipi(cpu); arch_send_call_function_single_ipi(cpu);
if (wait) if (wait)
...@@ -177,27 +151,26 @@ static void generic_exec_single(int cpu, struct call_single_data *csd, int wait) ...@@ -177,27 +151,26 @@ static void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
*/ */
void generic_smp_call_function_single_interrupt(void) void generic_smp_call_function_single_interrupt(void)
{ {
struct call_single_queue *q = &__get_cpu_var(call_single_queue); struct llist_node *entry, *next;
LIST_HEAD(list);
/* /*
* Shouldn't receive this interrupt on a cpu that is not yet online. * Shouldn't receive this interrupt on a cpu that is not yet online.
*/ */
WARN_ON_ONCE(!cpu_online(smp_processor_id())); WARN_ON_ONCE(!cpu_online(smp_processor_id()));
raw_spin_lock(&q->lock); entry = llist_del_all(&__get_cpu_var(call_single_queue));
list_replace_init(&q->list, &list); entry = llist_reverse_order(entry);
raw_spin_unlock(&q->lock);
while (!list_empty(&list)) { while (entry) {
struct call_single_data *csd; struct call_single_data *csd;
csd = list_entry(list.next, struct call_single_data, list); next = entry->next;
list_del(&csd->list);
csd = llist_entry(entry, struct call_single_data, llist);
csd->func(csd->info); csd->func(csd->info);
csd_unlock(csd); csd_unlock(csd);
entry = next;
} }
} }
...@@ -402,30 +375,17 @@ void smp_call_function_many(const struct cpumask *mask, ...@@ -402,30 +375,17 @@ void smp_call_function_many(const struct cpumask *mask,
if (unlikely(!cpumask_weight(cfd->cpumask))) if (unlikely(!cpumask_weight(cfd->cpumask)))
return; return;
/*
* After we put an entry into the list, cfd->cpumask may be cleared
* again when another CPU sends another IPI for a SMP function call, so
* cfd->cpumask will be zero.
*/
cpumask_copy(cfd->cpumask_ipi, cfd->cpumask);
for_each_cpu(cpu, cfd->cpumask) { for_each_cpu(cpu, cfd->cpumask) {
struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu); struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu);
struct call_single_queue *dst =
&per_cpu(call_single_queue, cpu);
unsigned long flags;
csd_lock(csd); csd_lock(csd);
csd->func = func; csd->func = func;
csd->info = info; csd->info = info;
llist_add(&csd->llist, &per_cpu(call_single_queue, cpu));
raw_spin_lock_irqsave(&dst->lock, flags);
list_add_tail(&csd->list, &dst->list);
raw_spin_unlock_irqrestore(&dst->lock, flags);
} }
/* Send a message to all CPUs in the map */ /* Send a message to all CPUs in the map */
arch_send_call_function_ipi_mask(cfd->cpumask_ipi); arch_send_call_function_ipi_mask(cfd->cpumask);
if (wait) { if (wait) {
for_each_cpu(cpu, cfd->cpumask) { for_each_cpu(cpu, cfd->cpumask) {
......
...@@ -552,3 +552,28 @@ config MEM_SOFT_DIRTY ...@@ -552,3 +552,28 @@ config MEM_SOFT_DIRTY
it can be cleared by hands. it can be cleared by hands.
See Documentation/vm/soft-dirty.txt for more details. See Documentation/vm/soft-dirty.txt for more details.
config ZSMALLOC
bool "Memory allocator for compressed pages"
depends on MMU
default n
help
zsmalloc is a slab-based memory allocator designed to store
compressed RAM pages. zsmalloc uses virtual memory mapping
in order to reduce fragmentation. However, this results in a
non-standard allocator interface where a handle, not a pointer, is
returned by an alloc(). This handle must be mapped in order to
access the allocated space.
config PGTABLE_MAPPING
bool "Use page table mapping to access object in zsmalloc"
depends on ZSMALLOC
help
By default, zsmalloc uses a copy-based object mapping method to
access allocations that span two pages. However, if a particular
architecture (ex, ARM) performs VM mapping faster than copying,
then you should select this. This causes zsmalloc to use page table
mapping rather than copying for object mapping.
You can check speed with zsmalloc benchmark[1].
[1] https://github.com/spartacus06/zsmalloc
...@@ -60,3 +60,4 @@ obj-$(CONFIG_DEBUG_KMEMLEAK_TEST) += kmemleak-test.o ...@@ -60,3 +60,4 @@ obj-$(CONFIG_DEBUG_KMEMLEAK_TEST) += kmemleak-test.o
obj-$(CONFIG_CLEANCACHE) += cleancache.o obj-$(CONFIG_CLEANCACHE) += cleancache.o
obj-$(CONFIG_MEMORY_ISOLATION) += page_isolation.o obj-$(CONFIG_MEMORY_ISOLATION) += page_isolation.o
obj-$(CONFIG_ZBUD) += zbud.o obj-$(CONFIG_ZBUD) += zbud.o
obj-$(CONFIG_ZSMALLOC) += zsmalloc.o
...@@ -3400,7 +3400,7 @@ void mem_cgroup_destroy_cache(struct kmem_cache *cachep) ...@@ -3400,7 +3400,7 @@ void mem_cgroup_destroy_cache(struct kmem_cache *cachep)
static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg, static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg,
struct kmem_cache *s) struct kmem_cache *s)
{ {
struct kmem_cache *new; struct kmem_cache *new = NULL;
static char *tmp_name = NULL; static char *tmp_name = NULL;
static DEFINE_MUTEX(mutex); /* protects tmp_name */ static DEFINE_MUTEX(mutex); /* protects tmp_name */
...@@ -3416,7 +3416,7 @@ static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg, ...@@ -3416,7 +3416,7 @@ static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg,
if (!tmp_name) { if (!tmp_name) {
tmp_name = kmalloc(PATH_MAX, GFP_KERNEL); tmp_name = kmalloc(PATH_MAX, GFP_KERNEL);
if (!tmp_name) if (!tmp_name)
return NULL; goto out;
} }
rcu_read_lock(); rcu_read_lock();
...@@ -3426,12 +3426,11 @@ static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg, ...@@ -3426,12 +3426,11 @@ static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg,
new = kmem_cache_create_memcg(memcg, tmp_name, s->object_size, s->align, new = kmem_cache_create_memcg(memcg, tmp_name, s->object_size, s->align,
(s->flags & ~SLAB_PANIC), s->ctor, s); (s->flags & ~SLAB_PANIC), s->ctor, s);
if (new) if (new)
new->allocflags |= __GFP_KMEMCG; new->allocflags |= __GFP_KMEMCG;
else else
new = s; new = s;
out:
mutex_unlock(&mutex); mutex_unlock(&mutex);
return new; return new;
} }
......
...@@ -2930,7 +2930,7 @@ void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) ...@@ -2930,7 +2930,7 @@ void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
unsigned short mode = MPOL_DEFAULT; unsigned short mode = MPOL_DEFAULT;
unsigned short flags = 0; unsigned short flags = 0;
if (pol && pol != &default_policy) { if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
mode = pol->mode; mode = pol->mode;
flags = pol->flags; flags = pol->flags;
} }
......
...@@ -178,7 +178,7 @@ unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg, ...@@ -178,7 +178,7 @@ unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
* implementation used by LSMs. * implementation used by LSMs.
*/ */
if (has_capability_noaudit(p, CAP_SYS_ADMIN)) if (has_capability_noaudit(p, CAP_SYS_ADMIN))
adj -= 30; points -= (points * 3) / 100;
/* Normalize to oom_score_adj units */ /* Normalize to oom_score_adj units */
adj *= totalpages / 1000; adj *= totalpages / 1000;
......
...@@ -355,6 +355,21 @@ static __always_inline void slab_unlock(struct page *page) ...@@ -355,6 +355,21 @@ static __always_inline void slab_unlock(struct page *page)
__bit_spin_unlock(PG_locked, &page->flags); __bit_spin_unlock(PG_locked, &page->flags);
} }
static inline void set_page_slub_counters(struct page *page, unsigned long counters_new)
{
struct page tmp;
tmp.counters = counters_new;
/*
* page->counters can cover frozen/inuse/objects as well
* as page->_count. If we assign to ->counters directly
* we run the risk of losing updates to page->_count, so
* be careful and only assign to the fields we need.
*/
page->frozen = tmp.frozen;
page->inuse = tmp.inuse;
page->objects = tmp.objects;
}
/* Interrupts must be disabled (for the fallback code to work right) */ /* Interrupts must be disabled (for the fallback code to work right) */
static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page, static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
void *freelist_old, unsigned long counters_old, void *freelist_old, unsigned long counters_old,
...@@ -376,7 +391,7 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page ...@@ -376,7 +391,7 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page
if (page->freelist == freelist_old && if (page->freelist == freelist_old &&
page->counters == counters_old) { page->counters == counters_old) {
page->freelist = freelist_new; page->freelist = freelist_new;
page->counters = counters_new; set_page_slub_counters(page, counters_new);
slab_unlock(page); slab_unlock(page);
return 1; return 1;
} }
...@@ -415,7 +430,7 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page, ...@@ -415,7 +430,7 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
if (page->freelist == freelist_old && if (page->freelist == freelist_old &&
page->counters == counters_old) { page->counters == counters_old) {
page->freelist = freelist_new; page->freelist = freelist_new;
page->counters = counters_new; set_page_slub_counters(page, counters_new);
slab_unlock(page); slab_unlock(page);
local_irq_restore(flags); local_irq_restore(flags);
return 1; return 1;
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
* zsmalloc memory allocator * zsmalloc memory allocator
* *
* Copyright (C) 2011 Nitin Gupta * Copyright (C) 2011 Nitin Gupta
* Copyright (C) 2012, 2013 Minchan Kim
* *
* This code is released using a dual license strategy: BSD/GPL * This code is released using a dual license strategy: BSD/GPL
* You can choose the license that better fits your requirements. * You can choose the license that better fits your requirements.
...@@ -90,8 +91,7 @@ ...@@ -90,8 +91,7 @@
#include <linux/hardirq.h> #include <linux/hardirq.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/zsmalloc.h>
#include "zsmalloc.h"
/* /*
* This must be power of 2 and greater than of equal to sizeof(link_free). * This must be power of 2 and greater than of equal to sizeof(link_free).
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment