Commit 0eb0b63c authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

block: consistently use GFP_NOIO instead of __GFP_NORECLAIM

Same numerical value (for now at least), but a much better documentation
of intent.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent c3036021
...@@ -499,7 +499,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode, ...@@ -499,7 +499,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
break; break;
} }
if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_RECLAIM)) { if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, GFP_NOIO)) {
err = DRIVER_ERROR << 24; err = DRIVER_ERROR << 24;
goto error; goto error;
} }
......
...@@ -1014,7 +1014,8 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho ...@@ -1014,7 +1014,8 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
bm_set_page_unchanged(b->bm_pages[page_nr]); bm_set_page_unchanged(b->bm_pages[page_nr]);
if (ctx->flags & BM_AIO_COPY_PAGES) { if (ctx->flags & BM_AIO_COPY_PAGES) {
page = mempool_alloc(drbd_md_io_page_pool, __GFP_HIGHMEM|__GFP_RECLAIM); page = mempool_alloc(drbd_md_io_page_pool,
GFP_NOIO | __GFP_HIGHMEM);
copy_highpage(page, b->bm_pages[page_nr]); copy_highpage(page, b->bm_pages[page_nr]);
bm_store_page_idx(page, page_nr); bm_store_page_idx(page, page_nr);
} else } else
......
...@@ -710,7 +710,7 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command * ...@@ -710,7 +710,7 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
if (cgc->buflen) { if (cgc->buflen) {
ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen, ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen,
__GFP_RECLAIM); GFP_NOIO);
if (ret) if (ret)
goto out; goto out;
} }
......
...@@ -862,7 +862,7 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int size) ...@@ -862,7 +862,7 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int size)
if (size) { if (size) {
ret = blk_rq_map_kern(drive->queue, rq, tape->buf, size, ret = blk_rq_map_kern(drive->queue, rq, tape->buf, size,
__GFP_RECLAIM); GFP_NOIO);
if (ret) if (ret)
goto out_put; goto out_put;
} }
......
...@@ -442,7 +442,7 @@ int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf, ...@@ -442,7 +442,7 @@ int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf,
*/ */
if (nsect) { if (nsect) {
error = blk_rq_map_kern(drive->queue, rq, buf, error = blk_rq_map_kern(drive->queue, rq, buf,
nsect * SECTOR_SIZE, __GFP_RECLAIM); nsect * SECTOR_SIZE, GFP_NOIO);
if (error) if (error)
goto put_req; goto put_req;
} }
......
...@@ -273,7 +273,7 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, ...@@ -273,7 +273,7 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
rq = scsi_req(req); rq = scsi_req(req);
if (bufflen && blk_rq_map_kern(sdev->request_queue, req, if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
buffer, bufflen, __GFP_RECLAIM)) buffer, bufflen, GFP_NOIO))
goto out; goto out;
rq->cmd_len = COMMAND_SIZE(cmd[0]); rq->cmd_len = COMMAND_SIZE(cmd[0]);
......
...@@ -432,8 +432,8 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio, ...@@ -432,8 +432,8 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
struct bio *bio; struct bio *bio;
/* /*
* bio_alloc() is guaranteed to return a bio when called with * bio_alloc() is guaranteed to return a bio when allowed to sleep and
* __GFP_RECLAIM and we request a valid number of vectors. * we request a valid number of vectors.
*/ */
bio = bio_alloc(GFP_KERNEL, nr_vecs); bio = bio_alloc(GFP_KERNEL, nr_vecs);
......
...@@ -269,7 +269,7 @@ static int hib_submit_io(int op, int op_flags, pgoff_t page_off, void *addr, ...@@ -269,7 +269,7 @@ static int hib_submit_io(int op, int op_flags, pgoff_t page_off, void *addr,
struct bio *bio; struct bio *bio;
int error = 0; int error = 0;
bio = bio_alloc(__GFP_RECLAIM | __GFP_HIGH, 1); bio = bio_alloc(GFP_NOIO | __GFP_HIGH, 1);
bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9); bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
bio_set_dev(bio, hib_resume_bdev); bio_set_dev(bio, hib_resume_bdev);
bio_set_op_attrs(bio, op, op_flags); bio_set_op_attrs(bio, op, op_flags);
...@@ -376,7 +376,7 @@ static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb) ...@@ -376,7 +376,7 @@ static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
return -ENOSPC; return -ENOSPC;
if (hb) { if (hb) {
src = (void *)__get_free_page(__GFP_RECLAIM | __GFP_NOWARN | src = (void *)__get_free_page(GFP_NOIO | __GFP_NOWARN |
__GFP_NORETRY); __GFP_NORETRY);
if (src) { if (src) {
copy_page(src, buf); copy_page(src, buf);
...@@ -384,7 +384,7 @@ static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb) ...@@ -384,7 +384,7 @@ static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
ret = hib_wait_io(hb); /* Free pages */ ret = hib_wait_io(hb); /* Free pages */
if (ret) if (ret)
return ret; return ret;
src = (void *)__get_free_page(__GFP_RECLAIM | src = (void *)__get_free_page(GFP_NOIO |
__GFP_NOWARN | __GFP_NOWARN |
__GFP_NORETRY); __GFP_NORETRY);
if (src) { if (src) {
...@@ -691,7 +691,7 @@ static int save_image_lzo(struct swap_map_handle *handle, ...@@ -691,7 +691,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
nr_threads = num_online_cpus() - 1; nr_threads = num_online_cpus() - 1;
nr_threads = clamp_val(nr_threads, 1, LZO_THREADS); nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
page = (void *)__get_free_page(__GFP_RECLAIM | __GFP_HIGH); page = (void *)__get_free_page(GFP_NOIO | __GFP_HIGH);
if (!page) { if (!page) {
pr_err("Failed to allocate LZO page\n"); pr_err("Failed to allocate LZO page\n");
ret = -ENOMEM; ret = -ENOMEM;
...@@ -989,7 +989,7 @@ static int get_swap_reader(struct swap_map_handle *handle, ...@@ -989,7 +989,7 @@ static int get_swap_reader(struct swap_map_handle *handle,
last = tmp; last = tmp;
tmp->map = (struct swap_map_page *) tmp->map = (struct swap_map_page *)
__get_free_page(__GFP_RECLAIM | __GFP_HIGH); __get_free_page(GFP_NOIO | __GFP_HIGH);
if (!tmp->map) { if (!tmp->map) {
release_swap_reader(handle); release_swap_reader(handle);
return -ENOMEM; return -ENOMEM;
...@@ -1261,8 +1261,8 @@ static int load_image_lzo(struct swap_map_handle *handle, ...@@ -1261,8 +1261,8 @@ static int load_image_lzo(struct swap_map_handle *handle,
for (i = 0; i < read_pages; i++) { for (i = 0; i < read_pages; i++) {
page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ? page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
__GFP_RECLAIM | __GFP_HIGH : GFP_NOIO | __GFP_HIGH :
__GFP_RECLAIM | __GFP_NOWARN | GFP_NOIO | __GFP_NOWARN |
__GFP_NORETRY); __GFP_NORETRY);
if (!page[i]) { if (!page[i]) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment