Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
31151ba2
Commit
31151ba2
authored
Aug 28, 2005
by
James Bottomley
Browse files
Options
Browse Files
Download
Plain Diff
fix mismerge in ll_rw_blk.c
parents
3d52acb3
73747aed
Changes
7
Hide whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
404 additions
and
108 deletions
+404
-108
drivers/block/ll_rw_blk.c
drivers/block/ll_rw_blk.c
+163
-29
drivers/block/scsi_ioctl.c
drivers/block/scsi_ioctl.c
+36
-24
drivers/cdrom/cdrom.c
drivers/cdrom/cdrom.c
+10
-5
drivers/ide/ide-disk.c
drivers/ide/ide-disk.c
+1
-1
fs/bio.c
fs/bio.c
+182
-45
include/linux/bio.h
include/linux/bio.h
+6
-0
include/linux/blkdev.h
include/linux/blkdev.h
+6
-4
No files found.
drivers/block/ll_rw_blk.c
View file @
31151ba2
...
...
@@ -284,6 +284,7 @@ static inline void rq_init(request_queue_t *q, struct request *rq)
rq
->
special
=
NULL
;
rq
->
data_len
=
0
;
rq
->
data
=
NULL
;
rq
->
nr_phys_segments
=
0
;
rq
->
sense
=
NULL
;
rq
->
end_io
=
NULL
;
rq
->
end_io_data
=
NULL
;
...
...
@@ -2115,7 +2116,7 @@ EXPORT_SYMBOL(blk_insert_request);
/**
* blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
* @q: request queue where request should be inserted
* @r
w: READ or WRITE data
* @r
q: request structure to fill
* @ubuf: the user buffer
* @len: length of user data
*
...
...
@@ -2132,21 +2133,19 @@ EXPORT_SYMBOL(blk_insert_request);
* original bio must be passed back in to blk_rq_unmap_user() for proper
* unmapping.
*/
struct
request
*
blk_rq_map_user
(
request_queue_t
*
q
,
int
rw
,
void
__user
*
ubuf
,
unsigned
int
len
)
int
blk_rq_map_user
(
request_queue_t
*
q
,
struct
request
*
rq
,
void
__user
*
ubuf
,
unsigned
int
len
)
{
unsigned
long
uaddr
;
struct
request
*
rq
;
struct
bio
*
bio
;
int
reading
;
if
(
len
>
(
q
->
max_sectors
<<
9
))
return
ERR_PTR
(
-
EINVAL
)
;
if
(
(
!
len
&&
ubuf
)
||
(
len
&&
!
ubuf
)
)
return
ERR_PTR
(
-
EINVAL
)
;
return
-
EINVAL
;
if
(
!
len
||
!
ubuf
)
return
-
EINVAL
;
rq
=
blk_get_request
(
q
,
rw
,
__GFP_WAIT
);
if
(
!
rq
)
return
ERR_PTR
(
-
ENOMEM
);
reading
=
rq_data_dir
(
rq
)
==
READ
;
/*
* if alignment requirement is satisfied, map in user pages for
...
...
@@ -2154,9 +2153,9 @@ struct request *blk_rq_map_user(request_queue_t *q, int rw, void __user *ubuf,
*/
uaddr
=
(
unsigned
long
)
ubuf
;
if
(
!
(
uaddr
&
queue_dma_alignment
(
q
))
&&
!
(
len
&
queue_dma_alignment
(
q
)))
bio
=
bio_map_user
(
q
,
NULL
,
uaddr
,
len
,
r
w
==
READ
);
bio
=
bio_map_user
(
q
,
NULL
,
uaddr
,
len
,
r
eading
);
else
bio
=
bio_copy_user
(
q
,
uaddr
,
len
,
r
w
==
READ
);
bio
=
bio_copy_user
(
q
,
uaddr
,
len
,
r
eading
);
if
(
!
IS_ERR
(
bio
))
{
rq
->
bio
=
rq
->
biotail
=
bio
;
...
...
@@ -2164,28 +2163,70 @@ struct request *blk_rq_map_user(request_queue_t *q, int rw, void __user *ubuf,
rq
->
buffer
=
rq
->
data
=
NULL
;
rq
->
data_len
=
len
;
return
rq
;
return
0
;
}
/*
* bio is the err-ptr
*/
blk_put_request
(
rq
);
return
(
struct
request
*
)
bio
;
return
PTR_ERR
(
bio
);
}
EXPORT_SYMBOL
(
blk_rq_map_user
);
/**
* blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage
* @q: request queue where request should be inserted
* @rq: request to map data to
* @iov: pointer to the iovec
* @iov_count: number of elements in the iovec
*
* Description:
* Data will be mapped directly for zero copy io, if possible. Otherwise
* a kernel bounce buffer is used.
*
* A matching blk_rq_unmap_user() must be issued at the end of io, while
* still in process context.
*
* Note: The mapped bio may need to be bounced through blk_queue_bounce()
* before being submitted to the device, as pages mapped may be out of
* reach. It's the callers responsibility to make sure this happens. The
* original bio must be passed back in to blk_rq_unmap_user() for proper
* unmapping.
*/
int
blk_rq_map_user_iov
(
request_queue_t
*
q
,
struct
request
*
rq
,
struct
sg_iovec
*
iov
,
int
iov_count
)
{
struct
bio
*
bio
;
if
(
!
iov
||
iov_count
<=
0
)
return
-
EINVAL
;
/* we don't allow misaligned data like bio_map_user() does. If the
* user is using sg, they're expected to know the alignment constraints
* and respect them accordingly */
bio
=
bio_map_user_iov
(
q
,
NULL
,
iov
,
iov_count
,
rq_data_dir
(
rq
)
==
READ
);
if
(
IS_ERR
(
bio
))
return
PTR_ERR
(
bio
);
rq
->
bio
=
rq
->
biotail
=
bio
;
blk_rq_bio_prep
(
q
,
rq
,
bio
);
rq
->
buffer
=
rq
->
data
=
NULL
;
rq
->
data_len
=
bio
->
bi_size
;
return
0
;
}
EXPORT_SYMBOL
(
blk_rq_map_user_iov
);
/**
* blk_rq_unmap_user - unmap a request with user data
* @rq: request to be unmapped
* @bio: bio for the request
* @bio: bio to be unmapped
* @ulen: length of user buffer
*
* Description:
* Unmap a
request
previously mapped by blk_rq_map_user().
* Unmap a
bio
previously mapped by blk_rq_map_user().
*/
int
blk_rq_unmap_user
(
struct
request
*
rq
,
struct
bio
*
bio
,
unsigned
int
ulen
)
int
blk_rq_unmap_user
(
struct
bio
*
bio
,
unsigned
int
ulen
)
{
int
ret
=
0
;
...
...
@@ -2196,31 +2237,89 @@ int blk_rq_unmap_user(struct request *rq, struct bio *bio, unsigned int ulen)
ret
=
bio_uncopy_user
(
bio
);
}
blk_put_request
(
rq
);
return
ret
;
return
0
;
}
EXPORT_SYMBOL
(
blk_rq_unmap_user
);
/**
* blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage
* @q: request queue where request should be inserted
* @rq: request to fill
* @kbuf: the kernel buffer
* @len: length of user data
* @gfp_mask: memory allocation flags
*/
int
blk_rq_map_kern
(
request_queue_t
*
q
,
struct
request
*
rq
,
void
*
kbuf
,
unsigned
int
len
,
unsigned
int
gfp_mask
)
{
struct
bio
*
bio
;
if
(
len
>
(
q
->
max_sectors
<<
9
))
return
-
EINVAL
;
if
(
!
len
||
!
kbuf
)
return
-
EINVAL
;
bio
=
bio_map_kern
(
q
,
kbuf
,
len
,
gfp_mask
);
if
(
IS_ERR
(
bio
))
return
PTR_ERR
(
bio
);
if
(
rq_data_dir
(
rq
)
==
WRITE
)
bio
->
bi_rw
|=
(
1
<<
BIO_RW
);
rq
->
bio
=
rq
->
biotail
=
bio
;
blk_rq_bio_prep
(
q
,
rq
,
bio
);
rq
->
buffer
=
rq
->
data
=
NULL
;
rq
->
data_len
=
len
;
return
0
;
}
EXPORT_SYMBOL
(
blk_rq_map_kern
);
/**
* blk_execute_rq_nowait - insert a request into queue for execution
* @q: queue to insert the request in
* @bd_disk: matching gendisk
* @rq: request to insert
* @at_head: insert request at head or tail of queue
* @done: I/O completion handler
*
* Description:
* Insert a fully prepared request at the back of the io scheduler queue
* for execution. Don't wait for completion.
*/
void
blk_execute_rq_nowait
(
request_queue_t
*
q
,
struct
gendisk
*
bd_disk
,
struct
request
*
rq
,
int
at_head
,
void
(
*
done
)(
struct
request
*
))
{
int
where
=
at_head
?
ELEVATOR_INSERT_FRONT
:
ELEVATOR_INSERT_BACK
;
rq
->
rq_disk
=
bd_disk
;
rq
->
flags
|=
REQ_NOMERGE
;
rq
->
end_io
=
done
;
elv_add_request
(
q
,
rq
,
where
,
1
);
generic_unplug_device
(
q
);
}
/**
* blk_execute_rq - insert a request into queue for execution
* @q: queue to insert the request in
* @bd_disk: matching gendisk
* @rq: request to insert
* @at_head: insert request at head or tail of queue
*
* Description:
* Insert a fully prepared request at the back of the io scheduler queue
* for execution.
* for execution
and wait for completion
.
*/
int
blk_execute_rq
(
request_queue_t
*
q
,
struct
gendisk
*
bd_disk
,
struct
request
*
rq
)
struct
request
*
rq
,
int
at_head
)
{
DECLARE_COMPLETION
(
wait
);
char
sense
[
SCSI_SENSE_BUFFERSIZE
];
int
err
=
0
;
rq
->
rq_disk
=
bd_disk
;
/*
* we need an extra reference to the request, so we can look at
* it after io completion
...
...
@@ -2233,11 +2332,8 @@ int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk,
rq
->
sense_len
=
0
;
}
rq
->
flags
|=
REQ_NOMERGE
;
rq
->
waiting
=
&
wait
;
rq
->
end_io
=
blk_end_sync_rq
;
elv_add_request
(
q
,
rq
,
ELEVATOR_INSERT_BACK
,
1
);
generic_unplug_device
(
q
);
blk_execute_rq_nowait
(
q
,
bd_disk
,
rq
,
at_head
,
blk_end_sync_rq
);
wait_for_completion
(
&
wait
);
rq
->
waiting
=
NULL
;
...
...
@@ -2277,6 +2373,44 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
EXPORT_SYMBOL
(
blkdev_issue_flush
);
/**
* blkdev_scsi_issue_flush_fn - issue flush for SCSI devices
* @q: device queue
* @disk: gendisk
* @error_sector: error offset
*
* Description:
* Devices understanding the SCSI command set, can use this function as
* a helper for issuing a cache flush. Note: driver is required to store
* the error offset (in case of error flushing) in ->sector of struct
* request.
*/
int
blkdev_scsi_issue_flush_fn
(
request_queue_t
*
q
,
struct
gendisk
*
disk
,
sector_t
*
error_sector
)
{
struct
request
*
rq
=
blk_get_request
(
q
,
WRITE
,
__GFP_WAIT
);
int
ret
;
rq
->
flags
|=
REQ_BLOCK_PC
|
REQ_SOFTBARRIER
;
rq
->
sector
=
0
;
memset
(
rq
->
cmd
,
0
,
sizeof
(
rq
->
cmd
));
rq
->
cmd
[
0
]
=
0x35
;
rq
->
cmd_len
=
12
;
rq
->
data
=
NULL
;
rq
->
data_len
=
0
;
rq
->
timeout
=
60
*
HZ
;
ret
=
blk_execute_rq
(
q
,
disk
,
rq
,
0
);
if
(
ret
&&
error_sector
)
*
error_sector
=
rq
->
sector
;
blk_put_request
(
rq
);
return
ret
;
}
EXPORT_SYMBOL
(
blkdev_scsi_issue_flush_fn
);
static
void
drive_stat_acct
(
struct
request
*
rq
,
int
nr_sectors
,
int
new_io
)
{
int
rw
=
rq_data_dir
(
rq
);
...
...
drivers/block/scsi_ioctl.c
View file @
31151ba2
...
...
@@ -216,7 +216,7 @@ static int sg_io(struct file *file, request_queue_t *q,
struct
gendisk
*
bd_disk
,
struct
sg_io_hdr
*
hdr
)
{
unsigned
long
start_time
;
int
reading
,
writing
;
int
writing
=
0
,
ret
=
0
;
struct
request
*
rq
;
struct
bio
*
bio
;
char
sense
[
SCSI_SENSE_BUFFERSIZE
];
...
...
@@ -231,38 +231,48 @@ static int sg_io(struct file *file, request_queue_t *q,
if
(
verify_command
(
file
,
cmd
))
return
-
EPERM
;
/*
* we'll do that later
*/
if
(
hdr
->
iovec_count
)
return
-
EOPNOTSUPP
;
if
(
hdr
->
dxfer_len
>
(
q
->
max_sectors
<<
9
))
return
-
EIO
;
reading
=
writing
=
0
;
if
(
hdr
->
dxfer_len
)
{
if
(
hdr
->
dxfer_len
)
switch
(
hdr
->
dxfer_direction
)
{
default:
return
-
EINVAL
;
case
SG_DXFER_TO_FROM_DEV
:
reading
=
1
;
/* fall through */
case
SG_DXFER_TO_DEV
:
writing
=
1
;
break
;
case
SG_DXFER_FROM_DEV
:
reading
=
1
;
break
;
}
rq
=
blk_rq_map_user
(
q
,
writing
?
WRITE
:
READ
,
hdr
->
dxferp
,
hdr
->
dxfer_len
);
rq
=
blk_get_request
(
q
,
writing
?
WRITE
:
READ
,
GFP_KERNEL
);
if
(
!
rq
)
return
-
ENOMEM
;
if
(
hdr
->
iovec_count
)
{
const
int
size
=
sizeof
(
struct
sg_iovec
)
*
hdr
->
iovec_count
;
struct
sg_iovec
*
iov
;
iov
=
kmalloc
(
size
,
GFP_KERNEL
);
if
(
!
iov
)
{
ret
=
-
ENOMEM
;
goto
out
;
}
if
(
copy_from_user
(
iov
,
hdr
->
dxferp
,
size
))
{
kfree
(
iov
);
ret
=
-
EFAULT
;
goto
out
;
}
ret
=
blk_rq_map_user_iov
(
q
,
rq
,
iov
,
hdr
->
iovec_count
);
kfree
(
iov
);
}
else
if
(
hdr
->
dxfer_len
)
ret
=
blk_rq_map_user
(
q
,
rq
,
hdr
->
dxferp
,
hdr
->
dxfer_len
);
if
(
IS_ERR
(
rq
))
return
PTR_ERR
(
rq
);
}
else
rq
=
blk_get_request
(
q
,
READ
,
__GFP_WAIT
);
if
(
ret
)
goto
out
;
/*
* fill in request structure
...
...
@@ -298,7 +308,7 @@ static int sg_io(struct file *file, request_queue_t *q,
* (if he doesn't check that is his problem).
* N.B. a non-zero SCSI status is _not_ necessarily an error.
*/
blk_execute_rq
(
q
,
bd_disk
,
rq
);
blk_execute_rq
(
q
,
bd_disk
,
rq
,
0
);
/* write to all output members */
hdr
->
status
=
0xff
&
rq
->
errors
;
...
...
@@ -320,12 +330,14 @@ static int sg_io(struct file *file, request_queue_t *q,
hdr
->
sb_len_wr
=
len
;
}
if
(
blk_rq_unmap_user
(
rq
,
bio
,
hdr
->
dxfer_len
))
ret
urn
-
EFAULT
;
if
(
blk_rq_unmap_user
(
bio
,
hdr
->
dxfer_len
))
ret
=
-
EFAULT
;
/* may not have succeeded, but output values written to control
* structure (struct sg_io_hdr). */
return
0
;
out:
blk_put_request
(
rq
);
return
ret
;
}
#define OMAX_SB_LEN 16
/* For backward compatibility */
...
...
@@ -408,7 +420,7 @@ static int sg_scsi_ioctl(struct file *file, request_queue_t *q,
rq
->
data_len
=
bytes
;
rq
->
flags
|=
REQ_BLOCK_PC
;
blk_execute_rq
(
q
,
bd_disk
,
rq
);
blk_execute_rq
(
q
,
bd_disk
,
rq
,
0
);
err
=
rq
->
errors
&
0xff
;
/* only 8 bit SCSI status */
if
(
err
)
{
if
(
rq
->
sense_len
&&
rq
->
sense
)
{
...
...
@@ -561,7 +573,7 @@ int scsi_cmd_ioctl(struct file *file, struct gendisk *bd_disk, unsigned int cmd,
rq
->
cmd
[
0
]
=
GPCMD_START_STOP_UNIT
;
rq
->
cmd
[
4
]
=
0x02
+
(
close
!=
0
);
rq
->
cmd_len
=
6
;
err
=
blk_execute_rq
(
q
,
bd_disk
,
rq
);
err
=
blk_execute_rq
(
q
,
bd_disk
,
rq
,
0
);
blk_put_request
(
rq
);
break
;
default:
...
...
drivers/cdrom/cdrom.c
View file @
31151ba2
...
...
@@ -2097,6 +2097,10 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
if
(
!
q
)
return
-
ENXIO
;
rq
=
blk_get_request
(
q
,
READ
,
GFP_KERNEL
);
if
(
!
rq
)
return
-
ENOMEM
;
cdi
->
last_sense
=
0
;
while
(
nframes
)
{
...
...
@@ -2108,9 +2112,9 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
len
=
nr
*
CD_FRAMESIZE_RAW
;
r
q
=
blk_rq_map_user
(
q
,
READ
,
ubuf
,
len
);
if
(
IS_ERR
(
rq
)
)
return
PTR_ERR
(
rq
)
;
r
et
=
blk_rq_map_user
(
q
,
rq
,
ubuf
,
len
);
if
(
ret
)
break
;
memset
(
rq
->
cmd
,
0
,
sizeof
(
rq
->
cmd
));
rq
->
cmd
[
0
]
=
GPCMD_READ_CD
;
...
...
@@ -2132,13 +2136,13 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
if
(
rq
->
bio
)
blk_queue_bounce
(
q
,
&
rq
->
bio
);
if
(
blk_execute_rq
(
q
,
cdi
->
disk
,
rq
))
{
if
(
blk_execute_rq
(
q
,
cdi
->
disk
,
rq
,
0
))
{
struct
request_sense
*
s
=
rq
->
sense
;
ret
=
-
EIO
;
cdi
->
last_sense
=
s
->
sense_key
;
}
if
(
blk_rq_unmap_user
(
rq
,
bio
,
len
))
if
(
blk_rq_unmap_user
(
bio
,
len
))
ret
=
-
EFAULT
;
if
(
ret
)
...
...
@@ -2149,6 +2153,7 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
ubuf
+=
len
;
}
blk_put_request
(
rq
);
return
ret
;
}
...
...
drivers/ide/ide-disk.c
View file @
31151ba2
...
...
@@ -754,7 +754,7 @@ static int idedisk_issue_flush(request_queue_t *q, struct gendisk *disk,
idedisk_prepare_flush
(
q
,
rq
);
ret
=
blk_execute_rq
(
q
,
disk
,
rq
);
ret
=
blk_execute_rq
(
q
,
disk
,
rq
,
0
);
/*
* if we failed and caller wants error offset, get it
...
...
fs/bio.c
View file @
31151ba2
...
...
@@ -25,6 +25,7 @@
#include <linux/module.h>
#include <linux/mempool.h>
#include <linux/workqueue.h>
#include <scsi/sg.h>
/* for struct sg_iovec */
#define BIO_POOL_SIZE 256
...
...
@@ -546,22 +547,34 @@ struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr,
return
ERR_PTR
(
ret
);
}
static
struct
bio
*
__bio_map_user
(
request_queue_t
*
q
,
struct
block_device
*
bdev
,
unsigned
long
uaddr
,
unsigned
int
len
,
int
write_to_vm
)
static
struct
bio
*
__bio_map_user_iov
(
request_queue_t
*
q
,
struct
block_device
*
bdev
,
struct
sg_iovec
*
iov
,
int
iov_count
,
int
write_to_vm
)
{
unsigned
long
end
=
(
uaddr
+
len
+
PAGE_SIZE
-
1
)
>>
PAGE_SHIFT
;
unsigned
long
start
=
uaddr
>>
PAGE_SHIFT
;
const
int
nr_pages
=
end
-
start
;
int
ret
,
offset
,
i
;
int
i
,
j
;
int
nr_pages
=
0
;
struct
page
**
pages
;
struct
bio
*
bio
;
int
cur_page
=
0
;
int
ret
,
offset
;
/*
* transfer and buffer must be aligned to at least hardsector
* size for now, in the future we can relax this restriction
*/
if
((
uaddr
&
queue_dma_alignment
(
q
))
||
(
len
&
queue_dma_alignment
(
q
)))
for
(
i
=
0
;
i
<
iov_count
;
i
++
)
{
unsigned
long
uaddr
=
(
unsigned
long
)
iov
[
i
].
iov_base
;
unsigned
long
len
=
iov
[
i
].
iov_len
;
unsigned
long
end
=
(
uaddr
+
len
+
PAGE_SIZE
-
1
)
>>
PAGE_SHIFT
;
unsigned
long
start
=
uaddr
>>
PAGE_SHIFT
;
nr_pages
+=
end
-
start
;
/*
* transfer and buffer must be aligned to at least hardsector
* size for now, in the future we can relax this restriction
*/
if
((
uaddr
&
queue_dma_alignment
(
q
))
||
(
len
&
queue_dma_alignment
(
q
)))
return
ERR_PTR
(
-
EINVAL
);
}
if
(
!
nr_pages
)
return
ERR_PTR
(
-
EINVAL
);
bio
=
bio_alloc
(
GFP_KERNEL
,
nr_pages
);
...
...
@@ -573,42 +586,54 @@ static struct bio *__bio_map_user(request_queue_t *q, struct block_device *bdev,
if
(
!
pages
)
goto
out
;
down_read
(
&
current
->
mm
->
mmap_sem
);
ret
=
get_user_pages
(
current
,
current
->
mm
,
uaddr
,
nr_pages
,
write_to_vm
,
0
,
pages
,
NULL
);
up_read
(
&
current
->
mm
->
mmap_sem
);
if
(
ret
<
nr_pages
)
goto
out
;
bio
->
bi_bdev
=
bdev
;
offset
=
uaddr
&
~
PAGE_MASK
;
for
(
i
=
0
;
i
<
nr_pages
;
i
++
)
{
unsigned
int
bytes
=
PAGE_SIZE
-
offset
;
if
(
len
<=
0
)
break
;
if
(
bytes
>
len
)
bytes
=
len
;
memset
(
pages
,
0
,
nr_pages
*
sizeof
(
struct
page
*
));
for
(
i
=
0
;
i
<
iov_count
;
i
++
)
{
unsigned
long
uaddr
=
(
unsigned
long
)
iov
[
i
].
iov_base
;
unsigned
long
len
=
iov
[
i
].
iov_len
;
unsigned
long
end
=
(
uaddr
+
len
+
PAGE_SIZE
-
1
)
>>
PAGE_SHIFT
;
unsigned
long
start
=
uaddr
>>
PAGE_SHIFT
;
const
int
local_nr_pages
=
end
-
start
;
const
int
page_limit
=
cur_page
+
local_nr_pages
;
down_read
(
&
current
->
mm
->
mmap_sem
);
ret
=
get_user_pages
(
current
,
current
->
mm
,
uaddr
,
local_nr_pages
,
write_to_vm
,
0
,
&
pages
[
cur_page
],
NULL
);
up_read
(
&
current
->
mm
->
mmap_sem
);
if
(
ret
<
local_nr_pages
)
goto
out_unmap
;
offset
=
uaddr
&
~
PAGE_MASK
;
for
(
j
=
cur_page
;
j
<
page_limit
;
j
++
)
{
unsigned
int
bytes
=
PAGE_SIZE
-
offset
;
if
(
len
<=
0
)
break
;
if
(
bytes
>
len
)
bytes
=
len
;
/*
* sorry...
*/
if
(
__bio_add_page
(
q
,
bio
,
pages
[
j
],
bytes
,
offset
)
<
bytes
)
break
;
len
-=
bytes
;
offset
=
0
;
}
cur_page
=
j
;
/*
*
sorry...
*
release the pages we didn't map into the bio, if any
*/
if
(
__bio_add_page
(
q
,
bio
,
pages
[
i
],
bytes
,
offset
)
<
bytes
)
break
;
len
-=
bytes
;
offset
=
0
;
while
(
j
<
page_limit
)
page_cache_release
(
pages
[
j
++
]);
}
/*
* release the pages we didn't map into the bio, if any
*/
while
(
i
<
nr_pages
)
page_cache_release
(
pages
[
i
++
]);
kfree
(
pages
);
/*
...
...
@@ -617,9 +642,17 @@ static struct bio *__bio_map_user(request_queue_t *q, struct block_device *bdev,
if
(
!
write_to_vm
)
bio
->
bi_rw
|=
(
1
<<
BIO_RW
);
bio
->
bi_bdev
=
bdev
;
bio
->
bi_flags
|=
(
1
<<
BIO_USER_MAPPED
);
return
bio
;
out:
out_unmap:
for
(
i
=
0
;
i
<
nr_pages
;
i
++
)
{
if
(
!
pages
[
i
])
break
;
page_cache_release
(
pages
[
i
]);
}
out:
kfree
(
pages
);
bio_put
(
bio
);
return
ERR_PTR
(
ret
);
...
...
@@ -638,10 +671,34 @@ static struct bio *__bio_map_user(request_queue_t *q, struct block_device *bdev,
*/
struct
bio
*
bio_map_user
(
request_queue_t
*
q
,
struct
block_device
*
bdev
,
unsigned
long
uaddr
,
unsigned
int
len
,
int
write_to_vm
)
{
struct
sg_iovec
iov
;
iov
.
iov_base
=
(
__user
void
*
)
uaddr
;
iov
.
iov_len
=
len
;
return
bio_map_user_iov
(
q
,
bdev
,
&
iov
,
1
,
write_to_vm
);
}
/**
* bio_map_user_iov - map user sg_iovec table into bio
* @q: the request_queue_t for the bio
* @bdev: destination block device
* @iov: the iovec.
* @iov_count: number of elements in the iovec
* @write_to_vm: bool indicating writing to pages or not
*
* Map the user space address into a bio suitable for io to a block
* device. Returns an error pointer in case of error.
*/
struct
bio
*
bio_map_user_iov
(
request_queue_t
*
q
,
struct
block_device
*
bdev
,
struct
sg_iovec
*
iov
,
int
iov_count
,
int
write_to_vm
)
{
struct
bio
*
bio
;
int
len
=
0
,
i
;
bio
=
__bio_map_user
(
q
,
bdev
,
uaddr
,
len
,
write_to_vm
);
bio
=
__bio_map_user
_iov
(
q
,
bdev
,
iov
,
iov_count
,
write_to_vm
);
if
(
IS_ERR
(
bio
))
return
bio
;
...
...
@@ -654,6 +711,9 @@ struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev,
*/
bio_get
(
bio
);
for
(
i
=
0
;
i
<
iov_count
;
i
++
)
len
+=
iov
[
i
].
iov_len
;
if
(
bio
->
bi_size
==
len
)
return
bio
;
...
...
@@ -698,6 +758,82 @@ void bio_unmap_user(struct bio *bio)
bio_put
(
bio
);
}
static
int
bio_map_kern_endio
(
struct
bio
*
bio
,
unsigned
int
bytes_done
,
int
err
)
{
if
(
bio
->
bi_size
)
return
1
;
bio_put
(
bio
);
return
0
;
}
static
struct
bio
*
__bio_map_kern
(
request_queue_t
*
q
,
void
*
data
,
unsigned
int
len
,
unsigned
int
gfp_mask
)
{
unsigned
long
kaddr
=
(
unsigned
long
)
data
;
unsigned
long
end
=
(
kaddr
+
len
+
PAGE_SIZE
-
1
)
>>
PAGE_SHIFT
;
unsigned
long
start
=
kaddr
>>
PAGE_SHIFT
;
const
int
nr_pages
=
end
-
start
;
int
offset
,
i
;
struct
bio
*
bio
;
bio
=
bio_alloc
(
gfp_mask
,
nr_pages
);
if
(
!
bio
)
return
ERR_PTR
(
-
ENOMEM
);
offset
=
offset_in_page
(
kaddr
);
for
(
i
=
0
;
i
<
nr_pages
;
i
++
)
{
unsigned
int
bytes
=
PAGE_SIZE
-
offset
;
if
(
len
<=
0
)
break
;
if
(
bytes
>
len
)
bytes
=
len
;
if
(
__bio_add_page
(
q
,
bio
,
virt_to_page
(
data
),
bytes
,
offset
)
<
bytes
)
break
;
data
+=
bytes
;
len
-=
bytes
;
offset
=
0
;
}
bio
->
bi_end_io
=
bio_map_kern_endio
;
return
bio
;
}
/**
* bio_map_kern - map kernel address into bio
* @q: the request_queue_t for the bio
* @data: pointer to buffer to map
* @len: length in bytes
* @gfp_mask: allocation flags for bio allocation
*
* Map the kernel address into a bio suitable for io to a block
* device. Returns an error pointer in case of error.
*/
struct
bio
*
bio_map_kern
(
request_queue_t
*
q
,
void
*
data
,
unsigned
int
len
,
unsigned
int
gfp_mask
)
{
struct
bio
*
bio
;
bio
=
__bio_map_kern
(
q
,
data
,
len
,
gfp_mask
);
if
(
IS_ERR
(
bio
))
return
bio
;
if
(
bio
->
bi_size
==
len
)
return
bio
;
/*
* Don't support partial mappings.
*/
bio_put
(
bio
);
return
ERR_PTR
(
-
EINVAL
);
}
/*
* bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
* for performing direct-IO in BIOs.
...
...
@@ -1085,6 +1221,7 @@ EXPORT_SYMBOL(bio_add_page);
EXPORT_SYMBOL
(
bio_get_nr_vecs
);
EXPORT_SYMBOL
(
bio_map_user
);
EXPORT_SYMBOL
(
bio_unmap_user
);
EXPORT_SYMBOL
(
bio_map_kern
);
EXPORT_SYMBOL
(
bio_pair_release
);
EXPORT_SYMBOL
(
bio_split
);
EXPORT_SYMBOL
(
bio_split_pool
);
...
...
include/linux/bio.h
View file @
31151ba2
...
...
@@ -295,7 +295,13 @@ extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
extern
int
bio_get_nr_vecs
(
struct
block_device
*
);
extern
struct
bio
*
bio_map_user
(
struct
request_queue
*
,
struct
block_device
*
,
unsigned
long
,
unsigned
int
,
int
);
struct
sg_iovec
;
extern
struct
bio
*
bio_map_user_iov
(
struct
request_queue
*
,
struct
block_device
*
,
struct
sg_iovec
*
,
int
,
int
);
extern
void
bio_unmap_user
(
struct
bio
*
);
extern
struct
bio
*
bio_map_kern
(
struct
request_queue
*
,
void
*
,
unsigned
int
,
unsigned
int
);
extern
void
bio_set_pages_dirty
(
struct
bio
*
bio
);
extern
void
bio_check_pages_dirty
(
struct
bio
*
bio
);
extern
struct
bio
*
bio_copy_user
(
struct
request_queue
*
,
unsigned
long
,
unsigned
int
,
int
);
...
...
include/linux/blkdev.h
View file @
31151ba2
...
...
@@ -563,10 +563,12 @@ extern void blk_sync_queue(struct request_queue *q);
extern
void
__blk_stop_queue
(
request_queue_t
*
q
);
extern
void
blk_run_queue
(
request_queue_t
*
);
extern
void
blk_queue_activity_fn
(
request_queue_t
*
,
activity_fn
*
,
void
*
);
extern
struct
request
*
blk_rq_map_user
(
request_queue_t
*
,
int
,
void
__user
*
,
unsigned
int
);
extern
int
blk_rq_unmap_user
(
struct
request
*
,
struct
bio
*
,
unsigned
int
);
extern
int
blk_execute_rq
(
request_queue_t
*
,
struct
gendisk
*
,
struct
request
*
);
extern
int
blk_rq_map_user
(
request_queue_t
*
,
struct
request
*
,
void
__user
*
,
unsigned
int
);
extern
int
blk_rq_unmap_user
(
struct
bio
*
,
unsigned
int
);
extern
int
blk_rq_map_kern
(
request_queue_t
*
,
struct
request
*
,
void
*
,
unsigned
int
,
unsigned
int
);
extern
int
blk_rq_map_user_iov
(
request_queue_t
*
,
struct
request
*
,
struct
sg_iovec
*
,
int
);
extern
int
blk_execute_rq
(
request_queue_t
*
,
struct
gendisk
*
,
struct
request
*
,
int
);
static
inline
request_queue_t
*
bdev_get_queue
(
struct
block_device
*
bdev
)
{
return
bdev
->
bd_disk
->
queue
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment