struct request_queue;
typedef struct request_queue request_queue_t;
-struct elevator_s;
-typedef struct elevator_s elevator_t;
+struct elevator_queue;
+typedef struct elevator_queue elevator_t;
struct request_pm_state;
#define BLKDEV_MIN_RQ 4
sector_t seek_mean;
};
+struct cfq_queue;
+struct cfq_io_context {
+ void (*dtor)(struct cfq_io_context *);
+ void (*exit)(struct cfq_io_context *);
+
+ struct io_context *ioc;
+
+ /*
+ * circular list of cfq_io_contexts belonging to a process io context
+ */
+ struct list_head list;
+ struct cfq_queue *cfqq;
+};
+
/*
* This is the per-process I/O subsystem state. It is refcounted and
* kmalloc'ed. Currently all fields are modified in process io context
unsigned long last_waited; /* Time last woken after wait for request */
int nr_batch_requests; /* Number of requests left in the batch */
+ spinlock_t lock;
+
struct as_io_context *aic;
+ struct cfq_io_context *cic;
};
void put_io_context(struct io_context *ioc);
struct request_list {
int count[2];
+ int starved[2];
mempool_t *rq_pool;
wait_queue_head_t wait[2];
+ wait_queue_head_t drain;
};
#define BLK_MAX_CDB 16
/* no. of sectors left to complete in the current segment */
unsigned int hard_cur_sectors;
- /* no. of segments left to submit in the current bio */
- unsigned short nr_cbio_segments;
- /* no. of sectors left to submit in the current bio */
- unsigned long nr_cbio_sectors;
-
- struct bio *cbio; /* next bio to submit */
- struct bio *bio; /* next unfinished bio to complete */
+ struct bio *bio;
struct bio *biotail;
void *elevator_private;
__REQ_PM_SUSPEND, /* suspend request */
__REQ_PM_RESUME, /* resume request */
__REQ_PM_SHUTDOWN, /* shutdown request */
+ __REQ_BAR_PREFLUSH, /* barrier pre-flush done */
+ __REQ_BAR_POSTFLUSH, /* barrier post-flush */
__REQ_NR_BITS, /* stops here */
};
#define REQ_PM_SUSPEND (1 << __REQ_PM_SUSPEND)
#define REQ_PM_RESUME (1 << __REQ_PM_RESUME)
#define REQ_PM_SHUTDOWN (1 << __REQ_PM_SHUTDOWN)
+#define REQ_BAR_PREFLUSH (1 << __REQ_BAR_PREFLUSH)
+#define REQ_BAR_POSTFLUSH (1 << __REQ_BAR_POSTFLUSH)
/*
* State information carried for REQ_PM_SUSPEND and REQ_PM_RESUME
struct bio_vec;
typedef int (merge_bvec_fn) (request_queue_t *, struct bio *, struct bio_vec *);
typedef void (activity_fn) (void *data, int rw);
+typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *);
enum blk_queue_state {
Queue_down,
*/
struct list_head queue_head;
struct request *last_merge;
- elevator_t elevator;
+ elevator_t *elevator;
/*
* the queue request freelist, one for reads and one for writes
unplug_fn *unplug_fn;
merge_bvec_fn *merge_bvec_fn;
activity_fn *activity_fn;
+ issue_flush_fn *issue_flush_fn;
/*
* Auto-unplugging state
* queue settings
*/
unsigned long nr_requests; /* Max # of requests */
+ unsigned int nr_congestion_on;
+ unsigned int nr_congestion_off;
+ unsigned int nr_batching;
unsigned short max_sectors;
+ unsigned short max_hw_sectors;
unsigned short max_phys_segments;
unsigned short max_hw_segments;
unsigned short hardsect_size;
*/
unsigned int sg_timeout;
unsigned int sg_reserved_size;
+
+ struct list_head drain_list;
};
#define RQ_INACTIVE (-1)
#define QUEUE_FLAG_DEAD 5 /* queue being torn down */
#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */
#define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */
+#define QUEUE_FLAG_ORDERED 8 /* supports ordered writes */
+#define QUEUE_FLAG_DRAIN 9 /* draining queue for sched switch */
#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
#define blk_pm_request(rq) \
((rq)->flags & (REQ_PM_SUSPEND | REQ_PM_RESUME))
+#define blk_barrier_rq(rq) ((rq)->flags & REQ_HARDBARRIER)
+#define blk_barrier_preflush(rq) ((rq)->flags & REQ_BAR_PREFLUSH)
+#define blk_barrier_postflush(rq) ((rq)->flags & REQ_BAR_POSTFLUSH)
+
#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
#define rq_data_dir(rq) ((rq)->flags & 1)
*/
#define blk_queue_headactive(q, head_active)
-/* current index into bio being processed for submission */
-#define blk_rq_idx(rq) ((rq)->cbio->bi_vcnt - (rq)->nr_cbio_segments)
-
-/* current bio vector being processed */
-#define blk_rq_vec(rq) (bio_iovec_idx((rq)->cbio, blk_rq_idx(rq)))
-
-/* current offset with respect to start of the segment being submitted */
-#define blk_rq_offset(rq) \
- (((rq)->hard_cur_sectors - (rq)->current_nr_sectors) << 9)
-
-/*
- * temporarily mapping a (possible) highmem bio (typically for PIO transfer)
- */
-
-/* Assumes rq->cbio != NULL */
-static inline char * rq_map_buffer(struct request *rq, unsigned long *flags)
-{
- return (__bio_kmap_irq(rq->cbio, blk_rq_idx(rq), flags)
- + blk_rq_offset(rq));
-}
-
-static inline void rq_unmap_buffer(char *buffer, unsigned long *flags)
-{
- __bio_kunmap_irq(buffer, flags);
-}
-
/*
* q->prep_rq_fn return values
*/
extern void blk_recount_segments(request_queue_t *, struct bio *);
extern int blk_phys_contig_segment(request_queue_t *q, struct bio *, struct bio *);
extern int blk_hw_contig_segment(request_queue_t *q, struct bio *, struct bio *);
-extern int scsi_cmd_ioctl(struct gendisk *, unsigned int, unsigned long);
+extern int scsi_cmd_ioctl(struct file *, struct gendisk *, unsigned int, void __user *);
extern void blk_start_queue(request_queue_t *q);
extern void blk_stop_queue(request_queue_t *q);
+extern void blk_sync_queue(struct request_queue *q);
extern void __blk_stop_queue(request_queue_t *q);
extern void blk_run_queue(request_queue_t *);
extern void blk_queue_activity_fn(request_queue_t *, activity_fn *, void *);
extern struct request *blk_rq_map_user(request_queue_t *, int, void __user *, unsigned int);
-extern int blk_rq_unmap_user(struct request *, void __user *, struct bio *, unsigned int);
+extern int blk_rq_unmap_user(struct request *, struct bio *, unsigned int);
extern int blk_execute_rq(request_queue_t *, struct gendisk *, struct request *);
static inline request_queue_t *bdev_get_queue(struct block_device *bdev)
return bdev->bd_disk->queue;
}
-static inline void blk_run_backing_dev(struct backing_dev_info *bdi)
+static inline void blk_run_backing_dev(struct backing_dev_info *bdi,
+ struct page *page)
{
if (bdi && bdi->unplug_io_fn)
- bdi->unplug_io_fn(bdi);
+ bdi->unplug_io_fn(bdi, page);
}
static inline void blk_run_address_space(struct address_space *mapping)
{
if (mapping)
- blk_run_backing_dev(mapping->backing_dev_info);
+ blk_run_backing_dev(mapping->backing_dev_info, NULL);
}
/*
extern int end_that_request_first(struct request *, int, int);
extern int end_that_request_chunk(struct request *, int, int);
extern void end_that_request_last(struct request *);
-extern int process_that_request_first(struct request *, unsigned int);
extern void end_request(struct request *req, int uptodate);
+/*
+ * end_that_request_first/chunk() takes an uptodate argument. we account
+ * any value <= as an io error. 0 means -EIO for compatability reasons,
+ * any other < 0 value is the direct error type. An uptodate value of
+ * 1 indicates successful io completion
+ */
+#define end_io_error(uptodate) (unlikely((uptodate) <= 0))
+
static inline void blkdev_dequeue_request(struct request *req)
{
BUG_ON(list_empty(&req->queuelist));
extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *);
extern void blk_queue_dma_alignment(request_queue_t *, int);
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
+extern void blk_queue_ordered(request_queue_t *, int);
+extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *);
+extern int blkdev_scsi_issue_flush_fn(request_queue_t *, struct gendisk *, sector_t *);
extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);
extern void blk_dump_rq_flags(struct request *, char *);
extern void generic_unplug_device(request_queue_t *);
+extern void __generic_unplug_device(request_queue_t *);
extern long nr_blockdev_pages(void);
+extern void blk_wait_queue_drained(request_queue_t *, int);
+extern void blk_finish_queue_drain(request_queue_t *);
int blk_get_queue(request_queue_t *);
request_queue_t *blk_alloc_queue(int);
extern long blk_congestion_wait(int rw, long timeout);
extern void blk_rq_bio_prep(request_queue_t *, struct request *, struct bio *);
-extern void blk_rq_prep_restart(struct request *);
+extern int blkdev_issue_flush(struct block_device *, sector_t *);
#define MAX_PHYS_SEGMENTS 128
#define MAX_HW_SEGMENTS 128