Newer
Older
David Howells
committed
/* bounce buffer handling for block devices
*
* - Split from highmem.c
*/
#include <linux/mm.h>
#include <linux/export.h>
David Howells
committed
#include <linux/swap.h>
#include <linux/gfp.h>
David Howells
committed
#include <linux/bio.h>
#include <linux/pagemap.h>
#include <linux/mempool.h>
#include <linux/blkdev.h>
#include <linux/init.h>
#include <linux/hash.h>
#include <linux/highmem.h>
#include <linux/bootmem.h>
David Howells
committed
#include <asm/tlbflush.h>
#include <trace/events/block.h>
David Howells
committed
#define POOL_SIZE 64
#define ISA_POOL_SIZE 16
static mempool_t *page_pool, *isa_page_pool;
#if defined(CONFIG_HIGHMEM) || defined(CONFIG_NEED_BOUNCE_POOL)
David Howells
committed
static __init int init_emergency_pool(void)
{
#if defined(CONFIG_HIGHMEM) && !defined(CONFIG_MEMORY_HOTPLUG)
if (max_pfn <= max_low_pfn)
David Howells
committed
return 0;
#endif
David Howells
committed
page_pool = mempool_create_page_pool(POOL_SIZE, 0);
BUG_ON(!page_pool);
printk("bounce pool size: %d pages\n", POOL_SIZE);
David Howells
committed
return 0;
}
__initcall(init_emergency_pool);
David Howells
committed
David Howells
committed
/*
* highmem version, map in to vec
*/
static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
{
unsigned long flags;
unsigned char *vto;
local_irq_save(flags);
vto = kmap_atomic(to->bv_page);
David Howells
committed
memcpy(vto + to->bv_offset, vfrom, to->bv_len);
David Howells
committed
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
local_irq_restore(flags);
}
#else /* CONFIG_HIGHMEM */
#define bounce_copy_vec(to, vfrom) \
memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len)
#endif /* CONFIG_HIGHMEM */
/*
* allocate pages in the DMA region for the ISA pool
*/
static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data)
{
return mempool_alloc_pages(gfp_mask | GFP_DMA, data);
}
/*
* gets called "every" time someone init's a queue with BLK_BOUNCE_ISA
* as the max address, so check if the pool has already been created.
*/
int init_emergency_isa_pool(void)
{
if (isa_page_pool)
return 0;
isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa,
mempool_free_pages, (void *) 0);
BUG_ON(!isa_page_pool);
printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE);
return 0;
}
/*
* Simple bounce buffer support for highmem pages. Depending on the
* queue gfp mask set, *to may or may not be a highmem page. kmap it
* always, it will do the Right Thing
*/
static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
{
unsigned char *vfrom;
struct bio_vec *tovec, *fromvec;
int i;
bio_for_each_segment(tovec, to, i) {
David Howells
committed
fromvec = from->bi_io_vec + i;
/*
* not bounced
*/
if (tovec->bv_page == fromvec->bv_page)
continue;
/*
* fromvec->bv_offset and fromvec->bv_len might have been
* modified by the block layer, so use the original copy,
* bounce_copy_vec already uses tovec->bv_len
*/
vfrom = page_address(fromvec->bv_page) + tovec->bv_offset;
bounce_copy_vec(tovec, vfrom);
flush_dcache_page(tovec->bv_page);
David Howells
committed
}
}
static void bounce_end_io(struct bio *bio, mempool_t *pool, int err)
{
struct bio *bio_orig = bio->bi_private;
struct bio_vec *bvec, *org_vec;
int i;
if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags))
set_bit(BIO_EOPNOTSUPP, &bio_orig->bi_flags);
/*
* free up bounce indirect pages used
*/
bio_for_each_segment_all(bvec, bio, i) {
David Howells
committed
org_vec = bio_orig->bi_io_vec + i;
if (bvec->bv_page == org_vec->bv_page)
continue;
dec_zone_page_state(bvec->bv_page, NR_BOUNCE);
mempool_free(bvec->bv_page, pool);
}
David Howells
committed
bio_put(bio);
}
static void bounce_end_io_write(struct bio *bio, int err)
David Howells
committed
{
bounce_end_io(bio, page_pool, err);
}
static void bounce_end_io_write_isa(struct bio *bio, int err)
David Howells
committed
{
bounce_end_io(bio, isa_page_pool, err);
}
static void __bounce_end_io_read(struct bio *bio, mempool_t *pool, int err)
{
struct bio *bio_orig = bio->bi_private;
if (test_bit(BIO_UPTODATE, &bio->bi_flags))
copy_to_high_bio_irq(bio_orig, bio);
bounce_end_io(bio, pool, err);
}
static void bounce_end_io_read(struct bio *bio, int err)
David Howells
committed
{
__bounce_end_io_read(bio, page_pool, err);
}
static void bounce_end_io_read_isa(struct bio *bio, int err)
David Howells
committed
{
__bounce_end_io_read(bio, isa_page_pool, err);
}
Darrick J. Wong
committed
#ifdef CONFIG_NEED_BOUNCE_POOL
static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio)
{
if (bio_data_dir(bio) != WRITE)
return 0;
if (!bdi_cap_stable_pages_required(&q->backing_dev_info))
return 0;
return test_bit(BIO_SNAP_STABLE, &bio->bi_flags);
Darrick J. Wong
committed
}
#else
static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio)
{
return 0;
}
#endif /* CONFIG_NEED_BOUNCE_POOL */
static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
Darrick J. Wong
committed
mempool_t *pool, int force)
David Howells
committed
{
struct bio *bio;
int rw = bio_data_dir(*bio_orig);
David Howells
committed
struct bio_vec *to, *from;
David Howells
committed
Darrick J. Wong
committed
if (force)
goto bounce;
bio_for_each_segment(from, *bio_orig, i)
if (page_to_pfn(from->bv_page) > queue_bounce_pfn(q))
goto bounce;
David Howells
committed
return;
bounce:
bio = bio_clone_bioset(*bio_orig, GFP_NOIO, fs_bio_set);
David Howells
committed
bio_for_each_segment_all(to, bio, i) {
struct page *page = to->bv_page;
if (page_to_pfn(page) <= queue_bounce_pfn(q) && !force)
continue;
David Howells
committed
inc_zone_page_state(to->bv_page, NR_BOUNCE);
to->bv_page = mempool_alloc(pool, q->bounce_gfp);
David Howells
committed
if (rw == WRITE) {
char *vto, *vfrom;
flush_dcache_page(page);
David Howells
committed
vto = page_address(to->bv_page) + to->bv_offset;
vfrom = kmap_atomic(page) + to->bv_offset;
David Howells
committed
memcpy(vto, vfrom, to->bv_len);
kunmap_atomic(vfrom);
David Howells
committed
}
}
trace_block_bio_bounce(q, *bio_orig);
David Howells
committed
bio->bi_flags |= (1 << BIO_BOUNCED);
if (pool == page_pool) {
bio->bi_end_io = bounce_end_io_write;
if (rw == READ)
bio->bi_end_io = bounce_end_io_read;
} else {
bio->bi_end_io = bounce_end_io_write_isa;
if (rw == READ)
bio->bi_end_io = bounce_end_io_read_isa;
}
bio->bi_private = *bio_orig;
*bio_orig = bio;
}
void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
David Howells
committed
{
Darrick J. Wong
committed
int must_bounce;
David Howells
committed
mempool_t *pool;
/*
* Data-less bio, nothing to bounce
*/
if (!bio_has_data(*bio_orig))
return;
Darrick J. Wong
committed
must_bounce = must_snapshot_stable_pages(q, *bio_orig);
David Howells
committed
/*
* for non-isa bounce case, just check if the bounce pfn is equal
* to or bigger than the highest pfn in the system -- in that case,
* don't waste time iterating over bio segments
*/
if (!(q->bounce_gfp & GFP_DMA)) {
Darrick J. Wong
committed
if (queue_bounce_pfn(q) >= blk_max_pfn && !must_bounce)
David Howells
committed
return;
pool = page_pool;
} else {
BUG_ON(!isa_page_pool);
pool = isa_page_pool;
}
/*
* slow path
*/
Darrick J. Wong
committed
__blk_queue_bounce(q, bio_orig, pool, must_bounce);
David Howells
committed
}
EXPORT_SYMBOL(blk_queue_bounce);