__REQ_PM_SUSPEND, /* suspend request */
__REQ_PM_RESUME, /* resume request */
__REQ_PM_SHUTDOWN, /* shutdown request */
+ __REQ_BAR_PREFLUSH, /* barrier pre-flush done */
+ __REQ_BAR_POSTFLUSH, /* barrier post-flush */
__REQ_NR_BITS, /* stops here */
};
#define REQ_PM_SUSPEND (1 << __REQ_PM_SUSPEND)
#define REQ_PM_RESUME (1 << __REQ_PM_RESUME)
#define REQ_PM_SHUTDOWN (1 << __REQ_PM_SHUTDOWN)
+#define REQ_BAR_PREFLUSH (1 << __REQ_BAR_PREFLUSH)
+#define REQ_BAR_POSTFLUSH (1 << __REQ_BAR_POSTFLUSH)
/*
* State information carried for REQ_PM_SUSPEND and REQ_PM_RESUME
struct bio_vec;
typedef int (merge_bvec_fn) (request_queue_t *, struct bio *, struct bio_vec *);
typedef void (activity_fn) (void *data, int rw);
+typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *);
enum blk_queue_state {
Queue_down,
unplug_fn *unplug_fn;
merge_bvec_fn *merge_bvec_fn;
activity_fn *activity_fn;
+ issue_flush_fn *issue_flush_fn;
/*
* Auto-unplugging state
unsigned int nr_congestion_off;
unsigned short max_sectors;
+ unsigned short max_hw_sectors;
unsigned short max_phys_segments;
unsigned short max_hw_segments;
unsigned short hardsect_size;
#define QUEUE_FLAG_DEAD 5 /* queue being torn down */
#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */
#define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */
+#define QUEUE_FLAG_ORDERED 8 /* supports ordered writes */
#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
#define blk_pm_request(rq) \
((rq)->flags & (REQ_PM_SUSPEND | REQ_PM_RESUME))
+#define blk_barrier_rq(rq) ((rq)->flags & REQ_HARDBARRIER)
+#define blk_barrier_preflush(rq) ((rq)->flags & REQ_BAR_PREFLUSH)
+#define blk_barrier_postflush(rq) ((rq)->flags & REQ_BAR_POSTFLUSH)
+
#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
#define rq_data_dir(rq) ((rq)->flags & 1)
extern int process_that_request_first(struct request *, unsigned int);
extern void end_request(struct request *req, int uptodate);
+/*
+ * end_that_request_first/chunk() takes an uptodate argument. we account
+ * any value <= as an io error. 0 means -EIO for compatability reasons,
+ * any other < 0 value is the direct error type. An uptodate value of
+ * 1 indicates successful io completion
+ */
+#define end_io_error(uptodate) (unlikely((uptodate) <= 0))
+
static inline void blkdev_dequeue_request(struct request *req)
{
BUG_ON(list_empty(&req->queuelist));
extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *);
extern void blk_queue_dma_alignment(request_queue_t *, int);
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
+extern void blk_queue_ordered(request_queue_t *, int);
+extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *);
+extern int blkdev_scsi_issue_flush_fn(request_queue_t *, struct gendisk *, sector_t *);
extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);
extern void blk_dump_rq_flags(struct request *, char *);
extern void blk_rq_bio_prep(request_queue_t *, struct request *, struct bio *);
extern void blk_rq_prep_restart(struct request *);
+extern int blkdev_issue_flush(struct block_device *, sector_t *);
#define MAX_PHYS_SEGMENTS 128
#define MAX_HW_SEGMENTS 128