From: Ingo Molnar <mingo@elte.hu>

Introduces two new /sys/block values:

  /sys/block/*/queue/max_hw_sectors_kb
  /sys/block/*/queue/max_sectors_kb

max_hw_sectors_kb is the maximum that the driver can handle and is
readonly.  max_sectors_kb is the current max_sectors value and can be tuned
by root.  PAGE_SIZE granularity is enforced.

It's all locking-safe and all affected layered drivers have been updated as
well.  The patch has been in testing for a couple of weeks already as part
of the voluntary-preempt patches and it works just fine - people use it to
reduce IDE IRQ handling latencies.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
---

 25-akpm/drivers/block/ll_rw_blk.c |   70 +++++++++++++++++++++++++++++++++++---
 25-akpm/drivers/md/dm-table.c     |    2 -
 25-akpm/drivers/md/linear.c       |    2 -
 25-akpm/drivers/md/multipath.c    |    4 +-
 25-akpm/drivers/md/raid0.c        |    2 -
 25-akpm/drivers/md/raid1.c        |    4 +-
 25-akpm/include/linux/blkdev.h    |    1 
 7 files changed, 74 insertions(+), 11 deletions(-)

diff -puN drivers/block/ll_rw_blk.c~blk-max_sectors-tunables drivers/block/ll_rw_blk.c
--- 25/drivers/block/ll_rw_blk.c~blk-max_sectors-tunables	Wed Sep  8 14:23:46 2004
+++ 25-akpm/drivers/block/ll_rw_blk.c	Wed Sep  8 14:23:46 2004
@@ -353,7 +353,7 @@ void blk_queue_max_sectors(request_queue
 		printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors);
 	}
 
-	q->max_sectors = max_sectors;
+	q->max_sectors = q->max_hw_sectors = max_sectors;
 }
 
 EXPORT_SYMBOL(blk_queue_max_sectors);
@@ -455,7 +455,8 @@ EXPORT_SYMBOL(blk_queue_hardsect_size);
 void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b)
 {
 	/* zero is "infinity" */
-	t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors);
+	t->max_sectors = t->max_hw_sectors =
+		min_not_zero(t->max_sectors,b->max_sectors);
 
 	t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments);
 	t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments);
@@ -2600,11 +2601,11 @@ end_io:
 			break;
 		}
 
-		if (unlikely(bio_sectors(bio) > q->max_sectors)) {
+		if (unlikely(bio_sectors(bio) > q->max_hw_sectors)) {
 			printk("bio too big device %s (%u > %u)\n", 
 				bdevname(bio->bi_bdev, b),
 				bio_sectors(bio),
-				q->max_sectors);
+				q->max_hw_sectors);
 			goto end_io;
 		}
 
@@ -3246,13 +3247,61 @@ queue_ra_store(struct request_queue *q, 
 	unsigned long ra_kb;
 	ssize_t ret = queue_var_store(&ra_kb, page, count);
 
+	spin_lock_irq(q->queue_lock);
 	if (ra_kb > (q->max_sectors >> 1))
 		ra_kb = (q->max_sectors >> 1);
 
 	q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
+	spin_unlock_irq(q->queue_lock);
+
+	return ret;
+}
+
+static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
+{
+	int max_sectors_kb = q->max_sectors >> 1;
+
+	return queue_var_show(max_sectors_kb, (page));
+}
+
+static ssize_t
+queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
+{
+	unsigned long max_sectors_kb,
+			max_hw_sectors_kb = q->max_hw_sectors >> 1,
+			page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
+	ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
+	int ra_kb;
+
+	if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
+		return -EINVAL;
+	/*
+	 * Take the queue lock to update the readahead and max_sectors
+	 * values synchronously:
+	 */
+	spin_lock_irq(q->queue_lock);
+	/*
+	 * Trim readahead window as well, if necessary:
+	 */
+	ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10);
+	if (ra_kb > max_sectors_kb)
+		q->backing_dev_info.ra_pages =
+				max_sectors_kb >> (PAGE_CACHE_SHIFT - 10);
+
+	q->max_sectors = max_sectors_kb << 1;
+	spin_unlock_irq(q->queue_lock);
+
 	return ret;
 }
 
+static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
+{
+	int max_hw_sectors_kb = q->max_hw_sectors >> 1;
+
+	return queue_var_show(max_hw_sectors_kb, (page));
+}
+
+
 static struct queue_sysfs_entry queue_requests_entry = {
 	.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
 	.show = queue_requests_show,
@@ -3265,9 +3314,22 @@ static struct queue_sysfs_entry queue_ra
 	.store = queue_ra_store,
 };
 
+static struct queue_sysfs_entry queue_max_sectors_entry = {
+	.attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR },
+	.show = queue_max_sectors_show,
+	.store = queue_max_sectors_store,
+};
+
+static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
+	.attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO },
+	.show = queue_max_hw_sectors_show,
+};
+
 static struct attribute *default_attrs[] = {
 	&queue_requests_entry.attr,
 	&queue_ra_entry.attr,
+	&queue_max_hw_sectors_entry.attr,
+	&queue_max_sectors_entry.attr,
 	NULL,
 };
 
diff -puN drivers/md/dm-table.c~blk-max_sectors-tunables drivers/md/dm-table.c
--- 25/drivers/md/dm-table.c~blk-max_sectors-tunables	Wed Sep  8 14:23:46 2004
+++ 25-akpm/drivers/md/dm-table.c	Wed Sep  8 14:23:46 2004
@@ -825,7 +825,7 @@ void dm_table_set_restrictions(struct dm
 	 * Make sure we obey the optimistic sub devices
 	 * restrictions.
 	 */
-	q->max_sectors = t->limits.max_sectors;
+	blk_queue_max_sectors(q, t->limits.max_sectors);
 	q->max_phys_segments = t->limits.max_phys_segments;
 	q->max_hw_segments = t->limits.max_hw_segments;
 	q->hardsect_size = t->limits.hardsect_size;
diff -puN drivers/md/linear.c~blk-max_sectors-tunables drivers/md/linear.c
--- 25/drivers/md/linear.c~blk-max_sectors-tunables	Wed Sep  8 14:23:46 2004
+++ 25-akpm/drivers/md/linear.c	Wed Sep  8 14:23:46 2004
@@ -154,7 +154,7 @@ static int linear_run (mddev_t *mddev)
 		 */
 		if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
 		    mddev->queue->max_sectors > (PAGE_SIZE>>9))
-			mddev->queue->max_sectors = (PAGE_SIZE>>9);
+			blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
 
 		disk->size = rdev->size;
 		mddev->array_size += rdev->size;
diff -puN drivers/md/multipath.c~blk-max_sectors-tunables drivers/md/multipath.c
--- 25/drivers/md/multipath.c~blk-max_sectors-tunables	Wed Sep  8 14:23:46 2004
+++ 25-akpm/drivers/md/multipath.c	Wed Sep  8 14:23:46 2004
@@ -333,7 +333,7 @@ static int multipath_add_disk(mddev_t *m
 		 */
 			if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
 			    mddev->queue->max_sectors > (PAGE_SIZE>>9))
-				mddev->queue->max_sectors = (PAGE_SIZE>>9);
+				blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
 
 			conf->working_disks++;
 			rdev->raid_disk = path;
@@ -498,7 +498,7 @@ static int multipath_run (mddev_t *mddev
 		 * a merge_bvec_fn to be involved in multipath */
 		if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
 		    mddev->queue->max_sectors > (PAGE_SIZE>>9))
-			mddev->queue->max_sectors = (PAGE_SIZE>>9);
+			blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
 
 		if (!rdev->faulty) 
 			conf->working_disks++;
diff -puN drivers/md/raid0.c~blk-max_sectors-tunables drivers/md/raid0.c
--- 25/drivers/md/raid0.c~blk-max_sectors-tunables	Wed Sep  8 14:23:46 2004
+++ 25-akpm/drivers/md/raid0.c	Wed Sep  8 14:23:46 2004
@@ -158,7 +158,7 @@ static int create_strip_zones (mddev_t *
 
 		if (rdev1->bdev->bd_disk->queue->merge_bvec_fn &&
 		    mddev->queue->max_sectors > (PAGE_SIZE>>9))
-			mddev->queue->max_sectors = (PAGE_SIZE>>9);
+			blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
 
 		if (!smallest || (rdev1->size <smallest->size))
 			smallest = rdev1;
diff -puN drivers/md/raid1.c~blk-max_sectors-tunables drivers/md/raid1.c
--- 25/drivers/md/raid1.c~blk-max_sectors-tunables	Wed Sep  8 14:23:46 2004
+++ 25-akpm/drivers/md/raid1.c	Wed Sep  8 14:23:46 2004
@@ -752,7 +752,7 @@ static int raid1_add_disk(mddev_t *mddev
 			 */
 			if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
 			    mddev->queue->max_sectors > (PAGE_SIZE>>9))
-				mddev->queue->max_sectors = (PAGE_SIZE>>9);
+				blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
 
 			p->head_position = 0;
 			rdev->raid_disk = mirror;
@@ -1198,7 +1198,7 @@ static int run(mddev_t *mddev)
 		 */
 		if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
 		    mddev->queue->max_sectors > (PAGE_SIZE>>9))
-			mddev->queue->max_sectors = (PAGE_SIZE>>9);
+			blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
 
 		disk->head_position = 0;
 		if (!rdev->faulty && rdev->in_sync)
diff -puN include/linux/blkdev.h~blk-max_sectors-tunables include/linux/blkdev.h
--- 25/include/linux/blkdev.h~blk-max_sectors-tunables	Wed Sep  8 14:23:46 2004
+++ 25-akpm/include/linux/blkdev.h	Wed Sep  8 14:23:46 2004
@@ -362,6 +362,7 @@ struct request_queue
 	unsigned int		nr_batching;
 
 	unsigned short		max_sectors;
+	unsigned short		max_hw_sectors;
 	unsigned short		max_phys_segments;
 	unsigned short		max_hw_segments;
 	unsigned short		hardsect_size;
_