4 * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public Licens
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
23 #include <linux/highmem.h>
24 #include <linux/mempool.h>
26 /* Platforms may set this to teach the BIO layer about IOMMU hardware. */
28 #ifndef BIO_VMERGE_BOUNDARY
29 #define BIO_VMERGE_BOUNDARY 0
35 #define BIO_BUG_ON BUG_ON
40 #define BIO_MAX_PAGES (256)
41 #define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_CACHE_SHIFT)
42 #define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9)
45 * was unsigned short, but we might as well be ready for > 64kB I/O pages
50 unsigned int bv_offset;
54 typedef int (bio_end_io_t) (struct bio *, unsigned int, int);
55 typedef void (bio_destructor_t) (struct bio *);
58 * main unit of I/O for the block layer and lower layers (ie drivers and
63 struct bio *bi_next; /* request queue link */
64 struct block_device *bi_bdev;
65 unsigned long bi_flags; /* status, command, etc */
66 unsigned long bi_rw; /* bottom bits READ/WRITE,
70 unsigned short bi_vcnt; /* how many bio_vec's */
71 unsigned short bi_idx; /* current index into bvl_vec */
73 /* Number of segments in this BIO after
74 * physical address coalescing is performed.
76 unsigned short bi_phys_segments;
78 /* Number of segments after physical and DMA remapping
79 * hardware coalescing is performed.
81 unsigned short bi_hw_segments;
83 unsigned int bi_size; /* residual I/O count */
84 unsigned int bi_max_vecs; /* max bvl_vecs we can hold */
86 struct bio_vec *bi_io_vec; /* the actual vec list */
88 bio_end_io_t *bi_end_io;
89 atomic_t bi_cnt; /* pin count */
93 bio_destructor_t *bi_destructor; /* destructor */
99 #define BIO_UPTODATE 0 /* ok after I/O completion */
100 #define BIO_RW_BLOCK 1 /* RW_AHEAD set, and read/write would block */
101 #define BIO_EOF 2 /* out-out-bounds error */
102 #define BIO_SEG_VALID 3 /* nr_hw_seg valid */
103 #define BIO_CLONED 4 /* doesn't own data */
104 #define BIO_BOUNCED 5 /* bio is a bounce bio */
105 #define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
108 * top 4 bits of bio flags indicate the pool this bio came from
110 #define BIO_POOL_BITS (4)
111 #define BIO_POOL_OFFSET (BITS_PER_LONG - BIO_POOL_BITS)
112 #define BIO_POOL_MASK (1UL << BIO_POOL_OFFSET)
113 #define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET)
118 * bit 0 -- read (not set) or write (set)
119 * bit 1 -- rw-ahead when set
121 * bit 3 -- fail fast, don't want low level driver retries
122 * bit 4 -- synchronous I/O hint: the block layer will unplug immediately
125 #define BIO_RW_AHEAD 1
126 #define BIO_RW_BARRIER 2
127 #define BIO_RW_FAILFAST 3
128 #define BIO_RW_SYNC 4
131 * various member access, note that bio_data should of course not be used
132 * on highmem page vectors
134 #define bio_iovec_idx(bio, idx) (&((bio)->bi_io_vec[(idx)]))
135 #define bio_iovec(bio) bio_iovec_idx((bio), (bio)->bi_idx)
136 #define bio_page(bio) bio_iovec((bio))->bv_page
137 #define bio_offset(bio) bio_iovec((bio))->bv_offset
138 #define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx)
139 #define bio_sectors(bio) ((bio)->bi_size >> 9)
140 #define bio_cur_sectors(bio) (bio_iovec(bio)->bv_len >> 9)
141 #define bio_data(bio) (page_address(bio_page((bio))) + bio_offset((bio)))
142 #define bio_barrier(bio) ((bio)->bi_rw & (1 << BIO_RW_BARRIER))
143 #define bio_sync(bio) ((bio)->bi_rw & (1 << BIO_RW_SYNC))
148 #define bio_to_phys(bio) (page_to_phys(bio_page((bio))) + (unsigned long) bio_offset((bio)))
149 #define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset)
152 * queues that have highmem support enabled may still need to revert to
153 * PIO transfers occasionally and thus map high pages temporarily. For
154 * permanent PIO fall back, user is probably better off disabling highmem
155 * I/O completely on that queue (see ide-dma for example)
157 #define __bio_kmap_atomic(bio, idx, kmtype) \
158 (kmap_atomic(bio_iovec_idx((bio), (idx))->bv_page, kmtype) + \
159 bio_iovec_idx((bio), (idx))->bv_offset)
161 #define __bio_kunmap_atomic(addr, kmtype) kunmap_atomic(addr, kmtype)
167 #define __BVEC_END(bio) bio_iovec_idx((bio), (bio)->bi_vcnt - 1)
168 #define __BVEC_START(bio) bio_iovec_idx((bio), (bio)->bi_idx)
169 #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
170 ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
171 #define BIOVEC_VIRT_MERGEABLE(vec1, vec2) \
172 ((((bvec_to_phys((vec1)) + (vec1)->bv_len) | bvec_to_phys((vec2))) & (BIO_VMERGE_BOUNDARY - 1)) == 0)
173 #define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
174 (((addr1) | (mask)) == (((addr2) - 1) | (mask)))
175 #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
176 __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, (q)->seg_boundary_mask)
177 #define BIO_SEG_BOUNDARY(q, b1, b2) \
178 BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2)))
180 #define bio_io_error(bio, bytes) bio_endio((bio), (bytes), -EIO)
183 * drivers should not use the __ version unless they _really_ want to
184 * run through the entire bio and not just pending pieces
186 #define __bio_for_each_segment(bvl, bio, i, start_idx) \
187 for (bvl = bio_iovec_idx((bio), (start_idx)), i = (start_idx); \
188 i < (bio)->bi_vcnt; \
191 #define bio_for_each_segment(bvl, bio, i) \
192 __bio_for_each_segment(bvl, bio, i, (bio)->bi_idx)
195 * get a reference to a bio, so it won't disappear. the intended use is
199 * submit_bio(rw, bio);
200 * if (bio->bi_flags ...)
204 * without the bio_get(), it could potentially complete I/O before submit_bio
205 * returns. and then bio would be freed memory when if (bio->bi_flags ...)
208 #define bio_get(bio) atomic_inc(&(bio)->bi_cnt)
212 * A bio_pair is used when we need to split a bio.
213 * This can only happen for a bio that refers to just one
214 * page of data, and in the unusual situation when the
215 * page crosses a chunk/device boundary
217 * The address of the master bio is stored in bio1.bi_private
218 * The address of the pool the pair was allocated from is stored
222 struct bio bio1, bio2;
223 struct bio_vec bv1, bv2;
227 extern struct bio_pair *bio_split(struct bio *bi, mempool_t *pool,
229 extern mempool_t *bio_split_pool;
230 extern void bio_pair_release(struct bio_pair *dbio);
232 extern struct bio *bio_alloc(int, int);
233 extern void bio_put(struct bio *);
235 extern void bio_endio(struct bio *, unsigned int, int);
236 struct request_queue;
237 extern int bio_phys_segments(struct request_queue *, struct bio *);
238 extern int bio_hw_segments(struct request_queue *, struct bio *);
240 extern void __bio_clone(struct bio *, struct bio *);
241 extern struct bio *bio_clone(struct bio *, int);
243 extern void bio_init(struct bio *);
245 extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
246 extern int bio_get_nr_vecs(struct block_device *);
247 extern struct bio *bio_map_user(struct request_queue *, struct block_device *,
248 unsigned long, unsigned int, int);
249 extern void bio_unmap_user(struct bio *, int);
250 extern void bio_set_pages_dirty(struct bio *bio);
251 extern void bio_check_pages_dirty(struct bio *bio);
253 #ifdef CONFIG_HIGHMEM
255 * remember to add offset! and never ever reenable interrupts between a
256 * bvec_kmap_irq and bvec_kunmap_irq!!
258 * This function MUST be inlined - it plays with the CPU interrupt flags.
259 * Hence the `extern inline'.
261 extern inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
266 * might not be a highmem page, but the preempt/irq count
267 * balancing is a lot nicer this way
269 local_irq_save(*flags);
270 addr = (unsigned long) kmap_atomic(bvec->bv_page, KM_BIO_SRC_IRQ);
272 BUG_ON(addr & ~PAGE_MASK);
274 return (char *) addr + bvec->bv_offset;
277 extern inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
279 unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
281 kunmap_atomic((void *) ptr, KM_BIO_SRC_IRQ);
282 local_irq_restore(*flags);
286 #define bvec_kmap_irq(bvec, flags) (page_address((bvec)->bv_page) + (bvec)->bv_offset)
287 #define bvec_kunmap_irq(buf, flags) do { *(flags) = 0; } while (0)
290 extern inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
291 unsigned long *flags)
293 return bvec_kmap_irq(bio_iovec_idx(bio, idx), flags);
295 #define __bio_kunmap_irq(buf, flags) bvec_kunmap_irq(buf, flags)
297 #define bio_kmap_irq(bio, flags) \
298 __bio_kmap_irq((bio), (bio)->bi_idx, (flags))
299 #define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags)
301 #endif /* __LINUX_BIO_H */