Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
9b04c5fe
Commit
9b04c5fe
authored
Nov 25, 2011
by
Cong Wang
Committed by
Cong Wang
Mar 20, 2012
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
mm: remove the second argument of k[un]map_atomic()
Signed-off-by:
Cong Wang
<
amwang@redhat.com
>
parent
c3eede8e
Changes
7
Show whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
35 additions
and
35 deletions
+35
-35
mm/bounce.c
mm/bounce.c
+2
-2
mm/filemap.c
mm/filemap.c
+4
-4
mm/ksm.c
mm/ksm.c
+6
-6
mm/memory.c
mm/memory.c
+2
-2
mm/shmem.c
mm/shmem.c
+2
-2
mm/swapfile.c
mm/swapfile.c
+15
-15
mm/vmalloc.c
mm/vmalloc.c
+4
-4
No files found.
mm/bounce.c
View file @
9b04c5fe
...
@@ -50,9 +50,9 @@ static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
...
@@ -50,9 +50,9 @@ static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
unsigned
char
*
vto
;
unsigned
char
*
vto
;
local_irq_save
(
flags
);
local_irq_save
(
flags
);
vto
=
kmap_atomic
(
to
->
bv_page
,
KM_BOUNCE_READ
);
vto
=
kmap_atomic
(
to
->
bv_page
);
memcpy
(
vto
+
to
->
bv_offset
,
vfrom
,
to
->
bv_len
);
memcpy
(
vto
+
to
->
bv_offset
,
vfrom
,
to
->
bv_len
);
kunmap_atomic
(
vto
,
KM_BOUNCE_READ
);
kunmap_atomic
(
vto
);
local_irq_restore
(
flags
);
local_irq_restore
(
flags
);
}
}
...
...
mm/filemap.c
View file @
9b04c5fe
...
@@ -1318,10 +1318,10 @@ int file_read_actor(read_descriptor_t *desc, struct page *page,
...
@@ -1318,10 +1318,10 @@ int file_read_actor(read_descriptor_t *desc, struct page *page,
* taking the kmap.
* taking the kmap.
*/
*/
if
(
!
fault_in_pages_writeable
(
desc
->
arg
.
buf
,
size
))
{
if
(
!
fault_in_pages_writeable
(
desc
->
arg
.
buf
,
size
))
{
kaddr
=
kmap_atomic
(
page
,
KM_USER0
);
kaddr
=
kmap_atomic
(
page
);
left
=
__copy_to_user_inatomic
(
desc
->
arg
.
buf
,
left
=
__copy_to_user_inatomic
(
desc
->
arg
.
buf
,
kaddr
+
offset
,
size
);
kaddr
+
offset
,
size
);
kunmap_atomic
(
kaddr
,
KM_USER0
);
kunmap_atomic
(
kaddr
);
if
(
left
==
0
)
if
(
left
==
0
)
goto
success
;
goto
success
;
}
}
...
@@ -2045,7 +2045,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
...
@@ -2045,7 +2045,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
size_t
copied
;
size_t
copied
;
BUG_ON
(
!
in_atomic
());
BUG_ON
(
!
in_atomic
());
kaddr
=
kmap_atomic
(
page
,
KM_USER0
);
kaddr
=
kmap_atomic
(
page
);
if
(
likely
(
i
->
nr_segs
==
1
))
{
if
(
likely
(
i
->
nr_segs
==
1
))
{
int
left
;
int
left
;
char
__user
*
buf
=
i
->
iov
->
iov_base
+
i
->
iov_offset
;
char
__user
*
buf
=
i
->
iov
->
iov_base
+
i
->
iov_offset
;
...
@@ -2055,7 +2055,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
...
@@ -2055,7 +2055,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
copied
=
__iovec_copy_from_user_inatomic
(
kaddr
+
offset
,
copied
=
__iovec_copy_from_user_inatomic
(
kaddr
+
offset
,
i
->
iov
,
i
->
iov_offset
,
bytes
);
i
->
iov
,
i
->
iov_offset
,
bytes
);
}
}
kunmap_atomic
(
kaddr
,
KM_USER0
);
kunmap_atomic
(
kaddr
);
return
copied
;
return
copied
;
}
}
...
...
mm/ksm.c
View file @
9b04c5fe
...
@@ -672,9 +672,9 @@ static int unmerge_and_remove_all_rmap_items(void)
...
@@ -672,9 +672,9 @@ static int unmerge_and_remove_all_rmap_items(void)
static
u32
calc_checksum
(
struct
page
*
page
)
static
u32
calc_checksum
(
struct
page
*
page
)
{
{
u32
checksum
;
u32
checksum
;
void
*
addr
=
kmap_atomic
(
page
,
KM_USER0
);
void
*
addr
=
kmap_atomic
(
page
);
checksum
=
jhash2
(
addr
,
PAGE_SIZE
/
4
,
17
);
checksum
=
jhash2
(
addr
,
PAGE_SIZE
/
4
,
17
);
kunmap_atomic
(
addr
,
KM_USER0
);
kunmap_atomic
(
addr
);
return
checksum
;
return
checksum
;
}
}
...
@@ -683,11 +683,11 @@ static int memcmp_pages(struct page *page1, struct page *page2)
...
@@ -683,11 +683,11 @@ static int memcmp_pages(struct page *page1, struct page *page2)
char
*
addr1
,
*
addr2
;
char
*
addr1
,
*
addr2
;
int
ret
;
int
ret
;
addr1
=
kmap_atomic
(
page1
,
KM_USER0
);
addr1
=
kmap_atomic
(
page1
);
addr2
=
kmap_atomic
(
page2
,
KM_USER1
);
addr2
=
kmap_atomic
(
page2
);
ret
=
memcmp
(
addr1
,
addr2
,
PAGE_SIZE
);
ret
=
memcmp
(
addr1
,
addr2
,
PAGE_SIZE
);
kunmap_atomic
(
addr2
,
KM_USER1
);
kunmap_atomic
(
addr2
);
kunmap_atomic
(
addr1
,
KM_USER0
);
kunmap_atomic
(
addr1
);
return
ret
;
return
ret
;
}
}
...
...
mm/memory.c
View file @
9b04c5fe
...
@@ -2447,7 +2447,7 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
...
@@ -2447,7 +2447,7 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
* fails, we just zero-fill it. Live with it.
* fails, we just zero-fill it. Live with it.
*/
*/
if
(
unlikely
(
!
src
))
{
if
(
unlikely
(
!
src
))
{
void
*
kaddr
=
kmap_atomic
(
dst
,
KM_USER0
);
void
*
kaddr
=
kmap_atomic
(
dst
);
void
__user
*
uaddr
=
(
void
__user
*
)(
va
&
PAGE_MASK
);
void
__user
*
uaddr
=
(
void
__user
*
)(
va
&
PAGE_MASK
);
/*
/*
...
@@ -2458,7 +2458,7 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
...
@@ -2458,7 +2458,7 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
*/
*/
if
(
__copy_from_user_inatomic
(
kaddr
,
uaddr
,
PAGE_SIZE
))
if
(
__copy_from_user_inatomic
(
kaddr
,
uaddr
,
PAGE_SIZE
))
clear_page
(
kaddr
);
clear_page
(
kaddr
);
kunmap_atomic
(
kaddr
,
KM_USER0
);
kunmap_atomic
(
kaddr
);
flush_dcache_page
(
dst
);
flush_dcache_page
(
dst
);
}
else
}
else
copy_user_highpage
(
dst
,
src
,
va
,
vma
);
copy_user_highpage
(
dst
,
src
,
va
,
vma
);
...
...
mm/shmem.c
View file @
9b04c5fe
...
@@ -1656,9 +1656,9 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
...
@@ -1656,9 +1656,9 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
}
}
inode
->
i_mapping
->
a_ops
=
&
shmem_aops
;
inode
->
i_mapping
->
a_ops
=
&
shmem_aops
;
inode
->
i_op
=
&
shmem_symlink_inode_operations
;
inode
->
i_op
=
&
shmem_symlink_inode_operations
;
kaddr
=
kmap_atomic
(
page
,
KM_USER0
);
kaddr
=
kmap_atomic
(
page
);
memcpy
(
kaddr
,
symname
,
len
);
memcpy
(
kaddr
,
symname
,
len
);
kunmap_atomic
(
kaddr
,
KM_USER0
);
kunmap_atomic
(
kaddr
);
set_page_dirty
(
page
);
set_page_dirty
(
page
);
unlock_page
(
page
);
unlock_page
(
page
);
page_cache_release
(
page
);
page_cache_release
(
page
);
...
...
mm/swapfile.c
View file @
9b04c5fe
...
@@ -2427,9 +2427,9 @@ int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
...
@@ -2427,9 +2427,9 @@ int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
if
(
!
(
count
&
COUNT_CONTINUED
))
if
(
!
(
count
&
COUNT_CONTINUED
))
goto
out
;
goto
out
;
map
=
kmap_atomic
(
list_page
,
KM_USER0
)
+
offset
;
map
=
kmap_atomic
(
list_page
)
+
offset
;
count
=
*
map
;
count
=
*
map
;
kunmap_atomic
(
map
,
KM_USER0
);
kunmap_atomic
(
map
);
/*
/*
* If this continuation count now has some space in it,
* If this continuation count now has some space in it,
...
@@ -2472,7 +2472,7 @@ static bool swap_count_continued(struct swap_info_struct *si,
...
@@ -2472,7 +2472,7 @@ static bool swap_count_continued(struct swap_info_struct *si,
offset
&=
~
PAGE_MASK
;
offset
&=
~
PAGE_MASK
;
page
=
list_entry
(
head
->
lru
.
next
,
struct
page
,
lru
);
page
=
list_entry
(
head
->
lru
.
next
,
struct
page
,
lru
);
map
=
kmap_atomic
(
page
,
KM_USER0
)
+
offset
;
map
=
kmap_atomic
(
page
)
+
offset
;
if
(
count
==
SWAP_MAP_MAX
)
/* initial increment from swap_map */
if
(
count
==
SWAP_MAP_MAX
)
/* initial increment from swap_map */
goto
init_map
;
/* jump over SWAP_CONT_MAX checks */
goto
init_map
;
/* jump over SWAP_CONT_MAX checks */
...
@@ -2482,26 +2482,26 @@ static bool swap_count_continued(struct swap_info_struct *si,
...
@@ -2482,26 +2482,26 @@ static bool swap_count_continued(struct swap_info_struct *si,
* Think of how you add 1 to 999
* Think of how you add 1 to 999
*/
*/
while
(
*
map
==
(
SWAP_CONT_MAX
|
COUNT_CONTINUED
))
{
while
(
*
map
==
(
SWAP_CONT_MAX
|
COUNT_CONTINUED
))
{
kunmap_atomic
(
map
,
KM_USER0
);
kunmap_atomic
(
map
);
page
=
list_entry
(
page
->
lru
.
next
,
struct
page
,
lru
);
page
=
list_entry
(
page
->
lru
.
next
,
struct
page
,
lru
);
BUG_ON
(
page
==
head
);
BUG_ON
(
page
==
head
);
map
=
kmap_atomic
(
page
,
KM_USER0
)
+
offset
;
map
=
kmap_atomic
(
page
)
+
offset
;
}
}
if
(
*
map
==
SWAP_CONT_MAX
)
{
if
(
*
map
==
SWAP_CONT_MAX
)
{
kunmap_atomic
(
map
,
KM_USER0
);
kunmap_atomic
(
map
);
page
=
list_entry
(
page
->
lru
.
next
,
struct
page
,
lru
);
page
=
list_entry
(
page
->
lru
.
next
,
struct
page
,
lru
);
if
(
page
==
head
)
if
(
page
==
head
)
return
false
;
/* add count continuation */
return
false
;
/* add count continuation */
map
=
kmap_atomic
(
page
,
KM_USER0
)
+
offset
;
map
=
kmap_atomic
(
page
)
+
offset
;
init_map:
*
map
=
0
;
/* we didn't zero the page */
init_map:
*
map
=
0
;
/* we didn't zero the page */
}
}
*
map
+=
1
;
*
map
+=
1
;
kunmap_atomic
(
map
,
KM_USER0
);
kunmap_atomic
(
map
);
page
=
list_entry
(
page
->
lru
.
prev
,
struct
page
,
lru
);
page
=
list_entry
(
page
->
lru
.
prev
,
struct
page
,
lru
);
while
(
page
!=
head
)
{
while
(
page
!=
head
)
{
map
=
kmap_atomic
(
page
,
KM_USER0
)
+
offset
;
map
=
kmap_atomic
(
page
)
+
offset
;
*
map
=
COUNT_CONTINUED
;
*
map
=
COUNT_CONTINUED
;
kunmap_atomic
(
map
,
KM_USER0
);
kunmap_atomic
(
map
);
page
=
list_entry
(
page
->
lru
.
prev
,
struct
page
,
lru
);
page
=
list_entry
(
page
->
lru
.
prev
,
struct
page
,
lru
);
}
}
return
true
;
/* incremented */
return
true
;
/* incremented */
...
@@ -2512,22 +2512,22 @@ init_map: *map = 0; /* we didn't zero the page */
...
@@ -2512,22 +2512,22 @@ init_map: *map = 0; /* we didn't zero the page */
*/
*/
BUG_ON
(
count
!=
COUNT_CONTINUED
);
BUG_ON
(
count
!=
COUNT_CONTINUED
);
while
(
*
map
==
COUNT_CONTINUED
)
{
while
(
*
map
==
COUNT_CONTINUED
)
{
kunmap_atomic
(
map
,
KM_USER0
);
kunmap_atomic
(
map
);
page
=
list_entry
(
page
->
lru
.
next
,
struct
page
,
lru
);
page
=
list_entry
(
page
->
lru
.
next
,
struct
page
,
lru
);
BUG_ON
(
page
==
head
);
BUG_ON
(
page
==
head
);
map
=
kmap_atomic
(
page
,
KM_USER0
)
+
offset
;
map
=
kmap_atomic
(
page
)
+
offset
;
}
}
BUG_ON
(
*
map
==
0
);
BUG_ON
(
*
map
==
0
);
*
map
-=
1
;
*
map
-=
1
;
if
(
*
map
==
0
)
if
(
*
map
==
0
)
count
=
0
;
count
=
0
;
kunmap_atomic
(
map
,
KM_USER0
);
kunmap_atomic
(
map
);
page
=
list_entry
(
page
->
lru
.
prev
,
struct
page
,
lru
);
page
=
list_entry
(
page
->
lru
.
prev
,
struct
page
,
lru
);
while
(
page
!=
head
)
{
while
(
page
!=
head
)
{
map
=
kmap_atomic
(
page
,
KM_USER0
)
+
offset
;
map
=
kmap_atomic
(
page
)
+
offset
;
*
map
=
SWAP_CONT_MAX
|
count
;
*
map
=
SWAP_CONT_MAX
|
count
;
count
=
COUNT_CONTINUED
;
count
=
COUNT_CONTINUED
;
kunmap_atomic
(
map
,
KM_USER0
);
kunmap_atomic
(
map
);
page
=
list_entry
(
page
->
lru
.
prev
,
struct
page
,
lru
);
page
=
list_entry
(
page
->
lru
.
prev
,
struct
page
,
lru
);
}
}
return
count
==
COUNT_CONTINUED
;
return
count
==
COUNT_CONTINUED
;
...
...
mm/vmalloc.c
View file @
9b04c5fe
...
@@ -1906,9 +1906,9 @@ static int aligned_vread(char *buf, char *addr, unsigned long count)
...
@@ -1906,9 +1906,9 @@ static int aligned_vread(char *buf, char *addr, unsigned long count)
* we can expect USER0 is not used (see vread/vwrite's
* we can expect USER0 is not used (see vread/vwrite's
* function description)
* function description)
*/
*/
void
*
map
=
kmap_atomic
(
p
,
KM_USER0
);
void
*
map
=
kmap_atomic
(
p
);
memcpy
(
buf
,
map
+
offset
,
length
);
memcpy
(
buf
,
map
+
offset
,
length
);
kunmap_atomic
(
map
,
KM_USER0
);
kunmap_atomic
(
map
);
}
else
}
else
memset
(
buf
,
0
,
length
);
memset
(
buf
,
0
,
length
);
...
@@ -1945,9 +1945,9 @@ static int aligned_vwrite(char *buf, char *addr, unsigned long count)
...
@@ -1945,9 +1945,9 @@ static int aligned_vwrite(char *buf, char *addr, unsigned long count)
* we can expect USER0 is not used (see vread/vwrite's
* we can expect USER0 is not used (see vread/vwrite's
* function description)
* function description)
*/
*/
void
*
map
=
kmap_atomic
(
p
,
KM_USER0
);
void
*
map
=
kmap_atomic
(
p
);
memcpy
(
map
+
offset
,
buf
,
length
);
memcpy
(
map
+
offset
,
buf
,
length
);
kunmap_atomic
(
map
,
KM_USER0
);
kunmap_atomic
(
map
);
}
}
addr
+=
length
;
addr
+=
length
;
buf
+=
length
;
buf
+=
length
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment