Commit f7c8ce44 authored by David Hildenbrand's avatar David Hildenbrand Committed by Linus Torvalds

mm/vmalloc: remove vwrite()

The last user (/dev/kmem) is gone. Let's drop it.

Link: https://lkml.kernel.org/r/20210324102351.6932-4-david@redhat.comSigned-off-by: default avatarDavid Hildenbrand <david@redhat.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Hillf Danton <hdanton@sina.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Oleksiy Avramchenko <oleksiy.avramchenko@sonymobile.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: huang ying <huang.ying.caritas@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent f2e762ba
...@@ -229,7 +229,6 @@ static inline void set_vm_flush_reset_perms(void *addr) ...@@ -229,7 +229,6 @@ static inline void set_vm_flush_reset_perms(void *addr)
/* for /proc/kcore */ /* for /proc/kcore */
extern long vread(char *buf, char *addr, unsigned long count); extern long vread(char *buf, char *addr, unsigned long count);
extern long vwrite(char *buf, char *addr, unsigned long count);
/* /*
* Internals. Dont't use.. * Internals. Dont't use..
......
...@@ -210,16 +210,6 @@ long vread(char *buf, char *addr, unsigned long count) ...@@ -210,16 +210,6 @@ long vread(char *buf, char *addr, unsigned long count)
return count; return count;
} }
long vwrite(char *buf, char *addr, unsigned long count)
{
/* Don't allow overflow */
if ((unsigned long) addr + count < count)
count = -(unsigned long) addr;
memcpy(addr, buf, count);
return count;
}
/* /*
* vmalloc - allocate virtually contiguous memory * vmalloc - allocate virtually contiguous memory
* *
......
...@@ -3146,10 +3146,7 @@ static int aligned_vread(char *buf, char *addr, unsigned long count) ...@@ -3146,10 +3146,7 @@ static int aligned_vread(char *buf, char *addr, unsigned long count)
* kmap() and get small overhead in this access function. * kmap() and get small overhead in this access function.
*/ */
if (p) { if (p) {
/* /* We can expect USER0 is not used -- see vread() */
* we can expect USER0 is not used (see vread/vwrite's
* function description)
*/
void *map = kmap_atomic(p); void *map = kmap_atomic(p);
memcpy(buf, map + offset, length); memcpy(buf, map + offset, length);
kunmap_atomic(map); kunmap_atomic(map);
...@@ -3164,43 +3161,6 @@ static int aligned_vread(char *buf, char *addr, unsigned long count) ...@@ -3164,43 +3161,6 @@ static int aligned_vread(char *buf, char *addr, unsigned long count)
return copied; return copied;
} }
static int aligned_vwrite(char *buf, char *addr, unsigned long count)
{
struct page *p;
int copied = 0;
while (count) {
unsigned long offset, length;
offset = offset_in_page(addr);
length = PAGE_SIZE - offset;
if (length > count)
length = count;
p = vmalloc_to_page(addr);
/*
* To do safe access to this _mapped_ area, we need
* lock. But adding lock here means that we need to add
* overhead of vmalloc()/vfree() calles for this _debug_
* interface, rarely used. Instead of that, we'll use
* kmap() and get small overhead in this access function.
*/
if (p) {
/*
* we can expect USER0 is not used (see vread/vwrite's
* function description)
*/
void *map = kmap_atomic(p);
memcpy(map + offset, buf, length);
kunmap_atomic(map);
}
addr += length;
buf += length;
copied += length;
count -= length;
}
return copied;
}
/** /**
* vread() - read vmalloc area in a safe way. * vread() - read vmalloc area in a safe way.
* @buf: buffer for reading data * @buf: buffer for reading data
...@@ -3283,80 +3243,6 @@ long vread(char *buf, char *addr, unsigned long count) ...@@ -3283,80 +3243,6 @@ long vread(char *buf, char *addr, unsigned long count)
return buflen; return buflen;
} }
/**
* vwrite() - write vmalloc area in a safe way.
* @buf: buffer for source data
* @addr: vm address.
* @count: number of bytes to be read.
*
* This function checks that addr is a valid vmalloc'ed area, and
* copy data from a buffer to the given addr. If specified range of
* [addr...addr+count) includes some valid address, data is copied from
* proper area of @buf. If there are memory holes, no copy to hole.
* IOREMAP area is treated as memory hole and no copy is done.
*
* If [addr...addr+count) doesn't includes any intersects with alive
* vm_struct area, returns 0. @buf should be kernel's buffer.
*
* Note: In usual ops, vwrite() is never necessary because the caller
* should know vmalloc() area is valid and can use memcpy().
* This is for routines which have to access vmalloc area without
* any information, as /dev/kmem.
*
* Return: number of bytes for which addr and buf should be
* increased (same number as @count) or %0 if [addr...addr+count)
* doesn't include any intersection with valid vmalloc area
*/
long vwrite(char *buf, char *addr, unsigned long count)
{
struct vmap_area *va;
struct vm_struct *vm;
char *vaddr;
unsigned long n, buflen;
int copied = 0;
/* Don't allow overflow */
if ((unsigned long) addr + count < count)
count = -(unsigned long) addr;
buflen = count;
spin_lock(&vmap_area_lock);
list_for_each_entry(va, &vmap_area_list, list) {
if (!count)
break;
if (!va->vm)
continue;
vm = va->vm;
vaddr = (char *) vm->addr;
if (addr >= vaddr + get_vm_area_size(vm))
continue;
while (addr < vaddr) {
if (count == 0)
goto finished;
buf++;
addr++;
count--;
}
n = vaddr + get_vm_area_size(vm) - addr;
if (n > count)
n = count;
if (!(vm->flags & VM_IOREMAP)) {
aligned_vwrite(buf, addr, n);
copied++;
}
buf += n;
addr += n;
count -= n;
}
finished:
spin_unlock(&vmap_area_lock);
if (!copied)
return 0;
return buflen;
}
/** /**
* remap_vmalloc_range_partial - map vmalloc pages to userspace * remap_vmalloc_range_partial - map vmalloc pages to userspace
* @vma: vma to cover * @vma: vma to cover
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment