Updated BFQ to v8r3

Changes since v8r2:

* BUGFIX:
Update weight-raising coefficient when switching from interactive to soft
real-time.
This commit is contained in:
Evgenii Shatokhin 2016-09-09 17:55:10 +03:00
parent 779aaf6307
commit d621eef633
3 changed files with 116 additions and 110 deletions

View file

@ -1,16 +1,16 @@
From 0061399c3c07fb8d119c0d581b613b870e63b165 Mon Sep 17 00:00:00 2001
From d384ccf796a992e27691b7359ce54534db57e74c Mon Sep 17 00:00:00 2001
From: Paolo Valente <paolo.valente@linaro.org>
Date: Tue, 17 May 2016 08:28:04 +0200
Subject: [PATCH 4/4] block, bfq: turn BFQ-v7r11 for 4.7.0 into BFQ-v8r2 for
Subject: [PATCH 4/4] block, bfq: turn BFQ-v7r11 for 4.7.0 into BFQ-v8r3 for
4.7.0
---
block/Kconfig.iosched | 2 +-
block/bfq-cgroup.c | 480 +++++----
block/bfq-iosched.c | 2601 +++++++++++++++++++++++++++++--------------------
block/bfq-iosched.c | 2602 +++++++++++++++++++++++++++++--------------------
block/bfq-sched.c | 441 +++++++--
block/bfq.h | 708 +++++++-------
5 files changed, 2483 insertions(+), 1749 deletions(-)
5 files changed, 2484 insertions(+), 1749 deletions(-)
diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
index f78cd1a..6d92579 100644
@ -881,7 +881,7 @@ index 5ee99ec..c83d90c 100644
static struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index d1f648d..5bff378 100644
index d1f648d..3bc1f8b 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -7,25 +7,26 @@
@ -1368,7 +1368,7 @@ index d1f648d..5bff378 100644
}
/*
@@ -856,25 +875,497 @@ static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq,
@@ -856,25 +875,498 @@ static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq,
* queue. Then we add bfqq to the burst.
*/
bfq_add_to_burst(bfqd, bfqq);
@ -1603,9 +1603,10 @@ index d1f648d..5bff378 100644
+ jiffies,
+ jiffies_to_msecs(bfqq->wr_cur_max_time));
+ } else if (old_wr_coeff > 1) {
+ if (interactive) /* update wr duration */
+ if (interactive) { /* update wr coeff and duration */
+ bfqq->wr_coeff = bfqd->bfq_wr_coeff;
+ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
+ else if (in_burst) {
+ } else if (in_burst) {
+ bfqq->wr_coeff = 1;
+ bfq_log_bfqq(bfqd, bfqq,
+ "wrais ending at %lu, rais_max_time %u",
@ -1870,7 +1871,7 @@ index d1f648d..5bff378 100644
*/
prev = bfqq->next_rq;
next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position);
@@ -887,160 +1378,10 @@ static void bfq_add_request(struct request *rq)
@@ -887,160 +1379,10 @@ static void bfq_add_request(struct request *rq)
if (prev != bfqq->next_rq)
bfq_pos_tree_add_move(bfqd, bfqq);
@ -2035,7 +2036,7 @@ index d1f648d..5bff378 100644
if (bfqd->low_latency && old_wr_coeff == 1 && !rq_is_sync(rq) &&
time_is_before_jiffies(
bfqq->last_wr_start_finish +
@@ -1049,16 +1390,43 @@ add_bfqq_busy:
@@ -1049,16 +1391,43 @@ add_bfqq_busy:
bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
bfqd->wr_busy_queues++;
@ -2083,7 +2084,7 @@ index d1f648d..5bff378 100644
if (bfqd->low_latency &&
(old_wr_coeff == 1 || bfqq->wr_coeff == 1 || interactive))
bfqq->last_wr_start_finish = jiffies;
@@ -1106,6 +1474,9 @@ static void bfq_remove_request(struct request *rq)
@@ -1106,6 +1475,9 @@ static void bfq_remove_request(struct request *rq)
struct bfq_data *bfqd = bfqq->bfqd;
const int sync = rq_is_sync(rq);
@ -2093,7 +2094,7 @@ index d1f648d..5bff378 100644
if (bfqq->next_rq == rq) {
bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq);
bfq_updated_next_req(bfqd, bfqq);
@@ -1119,8 +1490,25 @@ static void bfq_remove_request(struct request *rq)
@@ -1119,8 +1491,25 @@ static void bfq_remove_request(struct request *rq)
elv_rb_del(&bfqq->sort_list, rq);
if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
@ -2120,7 +2121,7 @@ index d1f648d..5bff378 100644
/*
* Remove queue from request-position tree as it is empty.
*/
@@ -1134,9 +1522,7 @@ static void bfq_remove_request(struct request *rq)
@@ -1134,9 +1523,7 @@ static void bfq_remove_request(struct request *rq)
BUG_ON(bfqq->meta_pending == 0);
bfqq->meta_pending--;
}
@ -2130,7 +2131,7 @@ index d1f648d..5bff378 100644
}
static int bfq_merge(struct request_queue *q, struct request **req,
@@ -1221,21 +1607,25 @@ static void bfq_merged_requests(struct request_queue *q, struct request *rq,
@@ -1221,21 +1608,25 @@ static void bfq_merged_requests(struct request_queue *q, struct request *rq,
bfqq->next_rq = rq;
bfq_remove_request(next);
@ -2159,7 +2160,7 @@ index d1f648d..5bff378 100644
}
static void bfq_end_wr_async_queues(struct bfq_data *bfqd,
@@ -1278,7 +1668,7 @@ static int bfq_rq_close_to_sector(void *io_struct, bool request,
@@ -1278,7 +1669,7 @@ static int bfq_rq_close_to_sector(void *io_struct, bool request,
sector_t sector)
{
return abs(bfq_io_struct_pos(io_struct, request) - sector) <=
@ -2168,7 +2169,7 @@ index d1f648d..5bff378 100644
}
static struct bfq_queue *bfqq_find_close(struct bfq_data *bfqd,
@@ -1400,7 +1790,7 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
@@ -1400,7 +1791,7 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
* throughput.
*/
bfqq->new_bfqq = new_bfqq;
@ -2177,7 +2178,7 @@ index d1f648d..5bff378 100644
return new_bfqq;
}
@@ -1431,9 +1821,23 @@ static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq,
@@ -1431,9 +1822,23 @@ static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq,
}
/*
@ -2204,7 +2205,7 @@ index d1f648d..5bff378 100644
* structure otherwise.
*
* The OOM queue is not allowed to participate to cooperation: in fact, since
@@ -1442,6 +1846,18 @@ static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq,
@@ -1442,6 +1847,18 @@ static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq,
* handle merging with the OOM queue would be quite complex and expensive
* to maintain. Besides, in such a critical condition as an out of memory,
* the benefits of queue merging may be little relevant, or even negligible.
@ -2223,7 +2224,7 @@ index d1f648d..5bff378 100644
*/
static struct bfq_queue *
bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
@@ -1451,16 +1867,32 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
@@ -1451,16 +1868,32 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
if (bfqq->new_bfqq)
return bfqq->new_bfqq;
@ -2259,7 +2260,7 @@ index d1f648d..5bff378 100644
unlikely(in_service_bfqq == &bfqd->oom_bfqq))
goto check_scheduled;
@@ -1482,7 +1914,15 @@ check_scheduled:
@@ -1482,7 +1915,15 @@ check_scheduled:
BUG_ON(new_bfqq && bfqq->entity.parent != new_bfqq->entity.parent);
@ -2276,7 +2277,7 @@ index d1f648d..5bff378 100644
bfq_may_be_close_cooperator(bfqq, new_bfqq))
return bfq_setup_merge(bfqq, new_bfqq);
@@ -1498,46 +1938,11 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
@@ -1498,46 +1939,11 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
*/
if (!bfqq->bic)
return;
@ -2324,7 +2325,7 @@ index d1f648d..5bff378 100644
}
static void bfq_get_bic_reference(struct bfq_queue *bfqq)
@@ -1562,6 +1967,40 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
@@ -1562,6 +1968,40 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
if (bfq_bfqq_IO_bound(bfqq))
bfq_mark_bfqq_IO_bound(new_bfqq);
bfq_clear_bfqq_IO_bound(bfqq);
@ -2365,7 +2366,7 @@ index d1f648d..5bff378 100644
/*
* Grab a reference to the bic, to prevent it from being destroyed
* before being possibly touched by a bfq_split_bfqq().
@@ -1588,18 +2027,6 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
@@ -1588,18 +2028,6 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
bfq_put_queue(bfqq);
}
@ -2384,7 +2385,7 @@ index d1f648d..5bff378 100644
static int bfq_allow_merge(struct request_queue *q, struct request *rq,
struct bio *bio)
{
@@ -1637,30 +2064,86 @@ static int bfq_allow_merge(struct request_queue *q, struct request *rq,
@@ -1637,30 +2065,86 @@ static int bfq_allow_merge(struct request_queue *q, struct request *rq,
* to decide whether bio and rq can be merged.
*/
bfqq = new_bfqq;
@ -2477,7 +2478,7 @@ index d1f648d..5bff378 100644
bfqd->in_service_queue = bfqq;
}
@@ -1676,31 +2159,6 @@ static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd)
@@ -1676,31 +2160,6 @@ static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd)
return bfqq;
}
@ -2509,7 +2510,7 @@ index d1f648d..5bff378 100644
static void bfq_arm_slice_timer(struct bfq_data *bfqd)
{
struct bfq_queue *bfqq = bfqd->in_service_queue;
@@ -1725,62 +2183,34 @@ static void bfq_arm_slice_timer(struct bfq_data *bfqd)
@@ -1725,62 +2184,34 @@ static void bfq_arm_slice_timer(struct bfq_data *bfqd)
* being too ill-treated, grant them a small fraction of the
* assigned budget before reducing the waiting time to
* BFQ_MIN_TT. This happened to help reduce latency.
@ -2593,7 +2594,7 @@ index d1f648d..5bff378 100644
struct bfq_queue *bfqq = RQ_BFQQ(rq);
/*
@@ -1794,15 +2224,9 @@ static void bfq_dispatch_insert(struct request_queue *q, struct request *rq)
@@ -1794,15 +2225,9 @@ static void bfq_dispatch_insert(struct request_queue *q, struct request *rq)
* incrementing bfqq->dispatched.
*/
bfqq->dispatched++;
@ -2610,7 +2611,7 @@ index d1f648d..5bff378 100644
}
/*
@@ -1822,18 +2246,12 @@ static struct request *bfq_check_fifo(struct bfq_queue *bfqq)
@@ -1822,18 +2247,12 @@ static struct request *bfq_check_fifo(struct bfq_queue *bfqq)
rq = rq_entry_fifo(bfqq->fifo.next);
@ -2630,7 +2631,7 @@ index d1f648d..5bff378 100644
static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
{
BUG_ON(bfqq != bfqd->in_service_queue);
@@ -1850,12 +2268,15 @@ static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
@@ -1850,12 +2269,15 @@ static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
bfq_mark_bfqq_split_coop(bfqq);
if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
@ -2652,7 +2653,7 @@ index d1f648d..5bff378 100644
bfq_del_bfqq_busy(bfqd, bfqq, 1);
} else {
bfq_activate_bfqq(bfqd, bfqq);
@@ -1882,10 +2303,19 @@ static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd,
@@ -1882,10 +2304,19 @@ static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd,
struct request *next_rq;
int budget, min_budget;
@ -2674,7 +2675,7 @@ index d1f648d..5bff378 100644
bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %d, budg left %d",
bfqq->entity.budget, bfq_bfqq_budget_left(bfqq));
@@ -1894,7 +2324,7 @@ static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd,
@@ -1894,7 +2325,7 @@ static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd,
bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d",
bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue));
@ -2683,7 +2684,7 @@ index d1f648d..5bff378 100644
switch (reason) {
/*
* Caveat: in all the following cases we trade latency
@@ -1936,14 +2366,10 @@ static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd,
@@ -1936,14 +2367,10 @@ static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd,
break;
case BFQ_BFQQ_BUDGET_TIMEOUT:
/*
@ -2702,7 +2703,7 @@ index d1f648d..5bff378 100644
*/
budget = min(budget * 2, bfqd->bfq_max_budget);
break;
@@ -1960,17 +2386,49 @@ static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd,
@@ -1960,17 +2387,49 @@ static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd,
budget = min(budget * 4, bfqd->bfq_max_budget);
break;
case BFQ_BFQQ_NO_MORE_REQUESTS:
@ -2759,7 +2760,7 @@ index d1f648d..5bff378 100644
*/
budget = bfqd->bfq_max_budget;
@@ -1981,65 +2439,105 @@ static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd,
@@ -1981,65 +2440,105 @@ static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd,
bfqq->max_budget = min(bfqq->max_budget, bfqd->bfq_max_budget);
/*
@ -2894,7 +2895,7 @@ index d1f648d..5bff378 100644
/*
* Calculate the bandwidth for the last slice. We use a 64 bit
@@ -2048,32 +2546,51 @@ static bool bfq_update_peak_rate(struct bfq_data *bfqd, struct bfq_queue *bfqq,
@@ -2048,32 +2547,51 @@ static bool bfq_update_peak_rate(struct bfq_data *bfqd, struct bfq_queue *bfqq,
* and to avoid overflows.
*/
bw = (u64)bfqq->entity.service << BFQ_RATE_SHIFT;
@ -2962,7 +2963,7 @@ index d1f648d..5bff378 100644
}
update |= bfqd->peak_rate_samples == BFQ_PEAK_RATE_SAMPLES - 1;
@@ -2086,9 +2603,8 @@ static bool bfq_update_peak_rate(struct bfq_data *bfqd, struct bfq_queue *bfqq,
@@ -2086,9 +2604,8 @@ static bool bfq_update_peak_rate(struct bfq_data *bfqd, struct bfq_queue *bfqq,
int dev_type = blk_queue_nonrot(bfqd->queue);
if (bfqd->bfq_user_max_budget == 0) {
bfqd->bfq_max_budget =
@ -2974,7 +2975,7 @@ index d1f648d..5bff378 100644
bfqd->bfq_max_budget);
}
if (bfqd->device_speed == BFQ_BFQD_FAST &&
@@ -2102,38 +2618,35 @@ static bool bfq_update_peak_rate(struct bfq_data *bfqd, struct bfq_queue *bfqq,
@@ -2102,38 +2619,35 @@ static bool bfq_update_peak_rate(struct bfq_data *bfqd, struct bfq_queue *bfqq,
bfqd->RT_prod = R_fast[dev_type] *
T_fast[dev_type];
}
@ -3038,7 +3039,7 @@ index d1f648d..5bff378 100644
}
/*
@@ -2191,6 +2704,15 @@ static bool bfq_update_peak_rate(struct bfq_data *bfqd, struct bfq_queue *bfqq,
@@ -2191,6 +2705,15 @@ static bool bfq_update_peak_rate(struct bfq_data *bfqd, struct bfq_queue *bfqq,
static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
struct bfq_queue *bfqq)
{
@ -3054,7 +3055,7 @@ index d1f648d..5bff378 100644
return max(bfqq->last_idle_bklogged +
HZ * bfqq->service_from_backlogged /
bfqd->bfq_wr_max_softrt_rate,
@@ -2198,13 +2720,21 @@ static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
@@ -2198,13 +2721,21 @@ static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
}
/*
@ -3081,7 +3082,7 @@ index d1f648d..5bff378 100644
}
/**
@@ -2214,28 +2744,24 @@ static unsigned long bfq_infinity_from_now(unsigned long now)
@@ -2214,28 +2745,24 @@ static unsigned long bfq_infinity_from_now(unsigned long now)
* @compensate: if true, compensate for the time spent idling.
* @reason: the reason causing the expiration.
*
@ -3127,7 +3128,7 @@ index d1f648d..5bff378 100644
*/
static void bfq_bfqq_expire(struct bfq_data *bfqd,
struct bfq_queue *bfqq,
@@ -2243,40 +2769,53 @@ static void bfq_bfqq_expire(struct bfq_data *bfqd,
@@ -2243,40 +2770,53 @@ static void bfq_bfqq_expire(struct bfq_data *bfqd,
enum bfqq_expiration reason)
{
bool slow;
@ -3203,7 +3204,7 @@ index d1f648d..5bff378 100644
bfq_clear_bfqq_IO_bound(bfqq);
if (bfqd->low_latency && bfqq->wr_coeff == 1)
@@ -2285,19 +2824,23 @@ static void bfq_bfqq_expire(struct bfq_data *bfqd,
@@ -2285,19 +2825,23 @@ static void bfq_bfqq_expire(struct bfq_data *bfqd,
if (bfqd->low_latency && bfqd->bfq_wr_max_softrt_rate > 0 &&
RB_EMPTY_ROOT(&bfqq->sort_list)) {
/*
@ -3235,7 +3236,7 @@ index d1f648d..5bff378 100644
/*
* The application is still waiting for the
* completion of one or more requests:
@@ -2314,7 +2857,7 @@ static void bfq_bfqq_expire(struct bfq_data *bfqd,
@@ -2314,7 +2858,7 @@ static void bfq_bfqq_expire(struct bfq_data *bfqd,
* happened to be in the past.
*/
bfqq->soft_rt_next_start =
@ -3244,7 +3245,7 @@ index d1f648d..5bff378 100644
/*
* Schedule an update of soft_rt_next_start to when
* the task may be discovered to be isochronous.
@@ -2324,15 +2867,27 @@ static void bfq_bfqq_expire(struct bfq_data *bfqd,
@@ -2324,15 +2868,27 @@ static void bfq_bfqq_expire(struct bfq_data *bfqd,
}
bfq_log_bfqq(bfqd, bfqq,
@ -3274,7 +3275,7 @@ index d1f648d..5bff378 100644
}
/*
@@ -2342,20 +2897,17 @@ static void bfq_bfqq_expire(struct bfq_data *bfqd,
@@ -2342,20 +2898,17 @@ static void bfq_bfqq_expire(struct bfq_data *bfqd,
*/
static bool bfq_bfqq_budget_timeout(struct bfq_queue *bfqq)
{
@ -3303,7 +3304,7 @@ index d1f648d..5bff378 100644
static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
{
bfq_log_bfqq(bfqq->bfqd, bfqq,
@@ -2397,10 +2949,12 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
@@ -2397,10 +2950,12 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
{
struct bfq_data *bfqd = bfqq->bfqd;
bool idling_boosts_thr, idling_boosts_thr_without_issues,
@ -3317,7 +3318,7 @@ index d1f648d..5bff378 100644
/*
* The next variable takes into account the cases where idling
* boosts the throughput.
@@ -2422,7 +2976,7 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
@@ -2422,7 +2977,7 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
*/
idling_boosts_thr = !bfqd->hw_tag ||
(!blk_queue_nonrot(bfqd->queue) && bfq_bfqq_IO_bound(bfqq) &&
@ -3326,7 +3327,7 @@ index d1f648d..5bff378 100644
/*
* The value of the next variable,
@@ -2463,74 +3017,27 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
@@ -2463,74 +3018,27 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
bfqd->wr_busy_queues == 0;
/*
@ -3420,7 +3421,7 @@ index d1f648d..5bff378 100644
* (i) each of these processes must get the same throughput as
* the others;
* (ii) all these processes have the same I/O pattern
@@ -2552,26 +3059,53 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
@@ -2552,26 +3060,53 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
* words, only if sub-condition (i) holds, then idling is
* allowed, and the device tends to be prevented from queueing
* many requests, possibly of several processes. The reason
@ -3494,7 +3495,7 @@ index d1f648d..5bff378 100644
*
* According to the above considerations, the next variable is
* true (only) if sub-condition (i) holds. To compute the
@@ -2579,7 +3113,7 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
@@ -2579,7 +3114,7 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
* the function bfq_symmetric_scenario(), but also check
* whether bfqq is being weight-raised, because
* bfq_symmetric_scenario() does not take into account also
@ -3503,7 +3504,7 @@ index d1f648d..5bff378 100644
* bfq_weights_tree_add()).
*
* As a side note, it is worth considering that the above
@@ -2601,17 +3135,16 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
@@ -2601,17 +3136,16 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
* bfqq. Such a case is when bfqq became active in a burst of
* queue activations. Queues that became active during a large
* burst benefit only from throughput, as discussed in the
@ -3526,7 +3527,7 @@ index d1f648d..5bff378 100644
/*
* We have now all the components we need to compute the return
@@ -2621,6 +3154,14 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
@@ -2621,6 +3155,14 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
* 2) idling either boosts the throughput (without issues), or
* is necessary to preserve service guarantees.
*/
@ -3541,7 +3542,7 @@ index d1f648d..5bff378 100644
return bfq_bfqq_sync(bfqq) &&
(idling_boosts_thr_without_issues ||
idling_needed_for_service_guarantees);
@@ -2632,7 +3173,7 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
@@ -2632,7 +3174,7 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
* 1) the queue must remain in service and cannot be expired, and
* 2) the device must be idled to wait for the possible arrival of a new
* request for the queue.
@ -3550,7 +3551,7 @@ index d1f648d..5bff378 100644
* why performing device idling is the best choice to boost the throughput
* and preserve service guarantees when bfq_bfqq_may_idle itself
* returns true.
@@ -2698,9 +3239,7 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
@@ -2698,9 +3240,7 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
*/
bfq_clear_bfqq_wait_request(bfqq);
del_timer(&bfqd->idle_slice_timer);
@ -3560,7 +3561,7 @@ index d1f648d..5bff378 100644
}
goto keep_queue;
}
@@ -2745,14 +3284,11 @@ static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
@@ -2745,14 +3285,11 @@ static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change");
/*
@ -3578,7 +3579,7 @@ index d1f648d..5bff378 100644
time_is_before_jiffies(bfqq->last_wr_start_finish +
bfqq->wr_cur_max_time)) {
bfqq->last_wr_start_finish = jiffies;
@@ -2811,13 +3347,29 @@ static int bfq_dispatch_request(struct bfq_data *bfqd,
@@ -2811,13 +3348,29 @@ static int bfq_dispatch_request(struct bfq_data *bfqd,
*/
if (!bfqd->rq_in_driver)
bfq_schedule_dispatch(bfqd);
@ -3608,7 +3609,7 @@ index d1f648d..5bff378 100644
bfq_update_wr_data(bfqd, bfqq);
bfq_log_bfqq(bfqd, bfqq,
@@ -2833,9 +3385,7 @@ static int bfq_dispatch_request(struct bfq_data *bfqd,
@@ -2833,9 +3386,7 @@ static int bfq_dispatch_request(struct bfq_data *bfqd,
bfqd->in_service_bic = RQ_BIC(rq);
}
@ -3619,7 +3620,7 @@ index d1f648d..5bff378 100644
goto expire;
return dispatched;
@@ -2881,8 +3431,8 @@ static int bfq_forced_dispatch(struct bfq_data *bfqd)
@@ -2881,8 +3432,8 @@ static int bfq_forced_dispatch(struct bfq_data *bfqd)
st = bfq_entity_service_tree(&bfqq->entity);
dispatched += __bfq_forced_dispatch_bfqq(bfqq);
@ -3629,7 +3630,7 @@ index d1f648d..5bff378 100644
bfq_forget_idle(st);
}
@@ -2895,9 +3445,9 @@ static int bfq_dispatch_requests(struct request_queue *q, int force)
@@ -2895,9 +3446,9 @@ static int bfq_dispatch_requests(struct request_queue *q, int force)
{
struct bfq_data *bfqd = q->elevator->elevator_data;
struct bfq_queue *bfqq;
@ -3640,7 +3641,7 @@ index d1f648d..5bff378 100644
if (bfqd->busy_queues == 0)
return 0;
@@ -2908,21 +3458,7 @@ static int bfq_dispatch_requests(struct request_queue *q, int force)
@@ -2908,21 +3459,7 @@ static int bfq_dispatch_requests(struct request_queue *q, int force)
if (!bfqq)
return 0;
@ -3663,7 +3664,7 @@ index d1f648d..5bff378 100644
bfq_clear_bfqq_wait_request(bfqq);
BUG_ON(timer_pending(&bfqd->idle_slice_timer));
@@ -2933,6 +3469,8 @@ static int bfq_dispatch_requests(struct request_queue *q, int force)
@@ -2933,6 +3470,8 @@ static int bfq_dispatch_requests(struct request_queue *q, int force)
bfq_log_bfqq(bfqd, bfqq, "dispatched %s request",
bfq_bfqq_sync(bfqq) ? "sync" : "async");
@ -3672,7 +3673,7 @@ index d1f648d..5bff378 100644
return 1;
}
@@ -2944,23 +3482,22 @@ static int bfq_dispatch_requests(struct request_queue *q, int force)
@@ -2944,23 +3483,22 @@ static int bfq_dispatch_requests(struct request_queue *q, int force)
*/
static void bfq_put_queue(struct bfq_queue *bfqq)
{
@ -3701,7 +3702,7 @@ index d1f648d..5bff378 100644
if (bfq_bfqq_sync(bfqq))
/*
@@ -2973,7 +3510,7 @@ static void bfq_put_queue(struct bfq_queue *bfqq)
@@ -2973,7 +3511,7 @@ static void bfq_put_queue(struct bfq_queue *bfqq)
*/
hlist_del_init(&bfqq->burst_list_node);
@ -3710,7 +3711,7 @@ index d1f648d..5bff378 100644
kmem_cache_free(bfq_pool, bfqq);
#ifdef CONFIG_BFQ_GROUP_IOSCHED
@@ -3007,8 +3544,7 @@ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
@@ -3007,8 +3545,7 @@ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
bfq_schedule_dispatch(bfqd);
}
@ -3720,7 +3721,7 @@ index d1f648d..5bff378 100644
bfq_put_cooperator(bfqq);
@@ -3019,26 +3555,7 @@ static void bfq_init_icq(struct io_cq *icq)
@@ -3019,26 +3556,7 @@ static void bfq_init_icq(struct io_cq *icq)
{
struct bfq_io_cq *bic = icq_to_bic(icq);
@ -3748,7 +3749,7 @@ index d1f648d..5bff378 100644
}
static void bfq_exit_icq(struct io_cq *icq)
@@ -3046,21 +3563,21 @@ static void bfq_exit_icq(struct io_cq *icq)
@@ -3046,21 +3564,21 @@ static void bfq_exit_icq(struct io_cq *icq)
struct bfq_io_cq *bic = icq_to_bic(icq);
struct bfq_data *bfqd = bic_to_bfqd(bic);
@ -3777,7 +3778,7 @@ index d1f648d..5bff378 100644
}
}
@@ -3068,7 +3585,8 @@ static void bfq_exit_icq(struct io_cq *icq)
@@ -3068,7 +3586,8 @@ static void bfq_exit_icq(struct io_cq *icq)
* Update the entity prio values; note that the new values will not
* be used until the next (re)activation.
*/
@ -3787,7 +3788,7 @@ index d1f648d..5bff378 100644
{
struct task_struct *tsk = current;
int ioprio_class;
@@ -3100,7 +3618,7 @@ static void bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *b
@@ -3100,7 +3619,7 @@ static void bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *b
break;
}
@ -3796,7 +3797,7 @@ index d1f648d..5bff378 100644
printk(KERN_CRIT "bfq_set_next_ioprio_data: new_ioprio %d\n",
bfqq->new_ioprio);
BUG();
@@ -3108,45 +3626,40 @@ static void bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *b
@@ -3108,45 +3627,40 @@ static void bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *b
bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio);
bfqq->entity.prio_changed = 1;
@ -3856,7 +3857,7 @@ index d1f648d..5bff378 100644
}
static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
@@ -3155,8 +3668,9 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
@@ -3155,8 +3669,9 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
RB_CLEAR_NODE(&bfqq->entity.rb_node);
INIT_LIST_HEAD(&bfqq->fifo);
INIT_HLIST_NODE(&bfqq->burst_list_node);
@ -3867,7 +3868,7 @@ index d1f648d..5bff378 100644
bfqq->bfqd = bfqd;
if (bic)
@@ -3166,6 +3680,7 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
@@ -3166,6 +3681,7 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
if (!bfq_class_idle(bfqq))
bfq_mark_bfqq_idle_window(bfqq);
bfq_mark_bfqq_sync(bfqq);
@ -3875,7 +3876,7 @@ index d1f648d..5bff378 100644
} else
bfq_clear_bfqq_sync(bfqq);
bfq_mark_bfqq_IO_bound(bfqq);
@@ -3175,72 +3690,17 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
@@ -3175,72 +3691,17 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
bfqq->pid = pid;
bfqq->wr_coeff = 1;
@ -3954,7 +3955,7 @@ index d1f648d..5bff378 100644
}
static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd,
@@ -3263,44 +3723,60 @@ static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd,
@@ -3263,44 +3724,60 @@ static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd,
}
static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
@ -4033,7 +4034,7 @@ index d1f648d..5bff378 100644
return bfqq;
}
@@ -3316,37 +3792,21 @@ static void bfq_update_io_thinktime(struct bfq_data *bfqd,
@@ -3316,37 +3793,21 @@ static void bfq_update_io_thinktime(struct bfq_data *bfqd,
bic->ttime.ttime_samples;
}
@ -4084,7 +4085,7 @@ index d1f648d..5bff378 100644
}
/*
@@ -3364,7 +3824,8 @@ static void bfq_update_idle_window(struct bfq_data *bfqd,
@@ -3364,7 +3825,8 @@ static void bfq_update_idle_window(struct bfq_data *bfqd,
return;
/* Idle window just restored, statistics are meaningless. */
@ -4094,7 +4095,7 @@ index d1f648d..5bff378 100644
return;
enable_idle = bfq_bfqq_idle_window(bfqq);
@@ -3404,22 +3865,13 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
@@ -3404,22 +3866,13 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
bfq_update_io_thinktime(bfqd, bic);
bfq_update_io_seektime(bfqd, bfqq, rq);
@ -4119,7 +4120,7 @@ index d1f648d..5bff378 100644
bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
@@ -3433,14 +3885,15 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
@@ -3433,14 +3886,15 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
* is small and the queue is not to be expired, then
* just exit.
*
@ -4143,7 +4144,7 @@ index d1f648d..5bff378 100644
*/
if (small_req && !budget_timeout)
return;
@@ -3453,9 +3906,7 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
@@ -3453,9 +3907,7 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
*/
bfq_clear_bfqq_wait_request(bfqq);
del_timer(&bfqd->idle_slice_timer);
@ -4153,7 +4154,7 @@ index d1f648d..5bff378 100644
/*
* The queue is not empty, because a new request just
@@ -3499,27 +3950,19 @@ static void bfq_insert_request(struct request_queue *q, struct request *rq)
@@ -3499,27 +3951,19 @@ static void bfq_insert_request(struct request_queue *q, struct request *rq)
*/
new_bfqq->allocated[rq_data_dir(rq)]++;
bfqq->allocated[rq_data_dir(rq)]--;
@ -4184,7 +4185,7 @@ index d1f648d..5bff378 100644
rq->fifo_time = jiffies + bfqd->bfq_fifo_expire[rq_is_sync(rq)];
list_add_tail(&rq->queuelist, &bfqq->fifo);
@@ -3528,8 +3971,8 @@ static void bfq_insert_request(struct request_queue *q, struct request *rq)
@@ -3528,8 +3972,8 @@ static void bfq_insert_request(struct request_queue *q, struct request *rq)
static void bfq_update_hw_tag(struct bfq_data *bfqd)
{
@ -4195,7 +4196,7 @@ index d1f648d..5bff378 100644
if (bfqd->hw_tag == 1)
return;
@@ -3555,48 +3998,45 @@ static void bfq_completed_request(struct request_queue *q, struct request *rq)
@@ -3555,48 +3999,45 @@ static void bfq_completed_request(struct request_queue *q, struct request *rq)
{
struct bfq_queue *bfqq = RQ_BFQQ(rq);
struct bfq_data *bfqd = bfqq->bfqd;
@ -4264,7 +4265,7 @@ index d1f648d..5bff378 100644
*/
if (bfq_bfqq_softrt_update(bfqq) && bfqq->dispatched == 0 &&
RB_EMPTY_ROOT(&bfqq->sort_list))
@@ -3608,10 +4048,7 @@ static void bfq_completed_request(struct request_queue *q, struct request *rq)
@@ -3608,10 +4049,7 @@ static void bfq_completed_request(struct request_queue *q, struct request *rq)
* or if we want to idle in case it has no pending requests.
*/
if (bfqd->in_service_queue == bfqq) {
@ -4276,7 +4277,7 @@ index d1f648d..5bff378 100644
bfq_arm_slice_timer(bfqd);
goto out;
} else if (bfq_may_expire_for_budg_timeout(bfqq))
@@ -3682,14 +4119,14 @@ static void bfq_put_request(struct request *rq)
@@ -3682,14 +4120,14 @@ static void bfq_put_request(struct request *rq)
rq->elv.priv[1] = NULL;
bfq_log_bfqq(bfqq->bfqd, bfqq, "put_request %p, %d",
@ -4293,7 +4294,7 @@ index d1f648d..5bff378 100644
*/
static struct bfq_queue *
bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
@@ -3727,11 +4164,8 @@ static int bfq_set_request(struct request_queue *q, struct request *rq,
@@ -3727,11 +4165,8 @@ static int bfq_set_request(struct request_queue *q, struct request *rq,
unsigned long flags;
bool split = false;
@ -4306,7 +4307,7 @@ index d1f648d..5bff378 100644
if (!bic)
goto queue_fail;
@@ -3741,23 +4175,47 @@ static int bfq_set_request(struct request_queue *q, struct request *rq,
@@ -3741,23 +4176,47 @@ static int bfq_set_request(struct request_queue *q, struct request *rq,
new_queue:
bfqq = bic_to_bfqq(bic, is_sync);
if (!bfqq || bfqq == &bfqd->oom_bfqq) {
@ -4361,7 +4362,7 @@ index d1f648d..5bff378 100644
bfqq = bfq_split_bfqq(bic, bfqq);
split = true;
if (!bfqq)
@@ -3766,9 +4224,8 @@ new_queue:
@@ -3766,9 +4225,8 @@ new_queue:
}
bfqq->allocated[rw]++;
@ -4373,7 +4374,7 @@ index d1f648d..5bff378 100644
rq->elv.priv[0] = bic;
rq->elv.priv[1] = bfqq;
@@ -3783,7 +4240,6 @@ new_queue:
@@ -3783,7 +4241,6 @@ new_queue:
if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) {
bfqq->bic = bic;
if (split) {
@ -4381,7 +4382,7 @@ index d1f648d..5bff378 100644
/*
* If the queue has just been split from a shared
* queue, restore the idle window and the possible
@@ -3793,6 +4249,9 @@ new_queue:
@@ -3793,6 +4250,9 @@ new_queue:
}
}
@ -4391,7 +4392,7 @@ index d1f648d..5bff378 100644
spin_unlock_irqrestore(q->queue_lock, flags);
return 0;
@@ -3872,6 +4331,7 @@ static void bfq_shutdown_timer_wq(struct bfq_data *bfqd)
@@ -3872,6 +4332,7 @@ static void bfq_shutdown_timer_wq(struct bfq_data *bfqd)
cancel_work_sync(&bfqd->unplug_work);
}
@ -4399,7 +4400,7 @@ index d1f648d..5bff378 100644
static void __bfq_put_async_bfqq(struct bfq_data *bfqd,
struct bfq_queue **bfqq_ptr)
{
@@ -3880,9 +4340,9 @@ static void __bfq_put_async_bfqq(struct bfq_data *bfqd,
@@ -3880,9 +4341,9 @@ static void __bfq_put_async_bfqq(struct bfq_data *bfqd,
bfq_log(bfqd, "put_async_bfqq: %p", bfqq);
if (bfqq) {
@ -4411,7 +4412,7 @@ index d1f648d..5bff378 100644
bfq_put_queue(bfqq);
*bfqq_ptr = NULL;
}
@@ -3904,6 +4364,7 @@ static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
@@ -3904,6 +4365,7 @@ static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
__bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq);
}
@ -4419,7 +4420,7 @@ index d1f648d..5bff378 100644
static void bfq_exit_queue(struct elevator_queue *e)
{
@@ -3923,8 +4384,6 @@ static void bfq_exit_queue(struct elevator_queue *e)
@@ -3923,8 +4385,6 @@ static void bfq_exit_queue(struct elevator_queue *e)
bfq_shutdown_timer_wq(bfqd);
@ -4428,7 +4429,7 @@ index d1f648d..5bff378 100644
BUG_ON(timer_pending(&bfqd->idle_slice_timer));
#ifdef CONFIG_BFQ_GROUP_IOSCHED
@@ -3973,11 +4432,14 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
@@ -3973,11 +4433,14 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
* will not attempt to free it.
*/
bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, NULL, 1, 0);
@ -4444,7 +4445,7 @@ index d1f648d..5bff378 100644
/*
* Trigger weight initialization, according to ioprio, at the
* oom_bfqq's first activation. The oom_bfqq's ioprio and ioprio
@@ -3996,9 +4458,6 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
@@ -3996,9 +4459,6 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
goto out_free;
bfq_init_root_group(bfqd->root_group, bfqd);
bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group);
@ -4454,7 +4455,7 @@ index d1f648d..5bff378 100644
init_timer(&bfqd->idle_slice_timer);
bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
@@ -4023,20 +4482,19 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
@@ -4023,20 +4483,19 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
bfqd->bfq_back_penalty = bfq_back_penalty;
bfqd->bfq_slice_idle = bfq_slice_idle;
bfqd->bfq_class_idle_last_service = 0;
@ -4482,7 +4483,7 @@ index d1f648d..5bff378 100644
bfqd->bfq_wr_rt_max_time = msecs_to_jiffies(300);
bfqd->bfq_wr_max_time = 0;
bfqd->bfq_wr_min_idle_time = msecs_to_jiffies(2000);
@@ -4048,16 +4506,15 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
@@ -4048,16 +4507,15 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
* video.
*/
bfqd->wr_busy_queues = 0;
@ -4503,7 +4504,7 @@ index d1f648d..5bff378 100644
bfqd->device_speed = BFQ_BFQD_FAST;
return 0;
@@ -4161,10 +4618,8 @@ SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0);
@@ -4161,10 +4619,8 @@ SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0);
SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0);
SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 1);
SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0);
@ -4516,7 +4517,7 @@ index d1f648d..5bff378 100644
SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0);
SHOW_FUNCTION(bfq_wr_coeff_show, bfqd->bfq_wr_coeff, 0);
SHOW_FUNCTION(bfq_wr_rt_max_time_show, bfqd->bfq_wr_rt_max_time, 1);
@@ -4199,10 +4654,6 @@ STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0);
@@ -4199,10 +4655,6 @@ STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0);
STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1,
INT_MAX, 0);
STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 1);
@ -4527,7 +4528,7 @@ index d1f648d..5bff378 100644
STORE_FUNCTION(bfq_wr_coeff_store, &bfqd->bfq_wr_coeff, 1, INT_MAX, 0);
STORE_FUNCTION(bfq_wr_max_time_store, &bfqd->bfq_wr_max_time, 0, INT_MAX, 1);
STORE_FUNCTION(bfq_wr_rt_max_time_store, &bfqd->bfq_wr_rt_max_time, 0, INT_MAX,
@@ -4224,10 +4675,8 @@ static ssize_t bfq_weights_store(struct elevator_queue *e,
@@ -4224,10 +4676,8 @@ static ssize_t bfq_weights_store(struct elevator_queue *e,
static unsigned long bfq_estimated_max_budget(struct bfq_data *bfqd)
{
@ -4539,7 +4540,7 @@ index d1f648d..5bff378 100644
else
return bfq_default_max_budget;
}
@@ -4252,6 +4701,10 @@ static ssize_t bfq_max_budget_store(struct elevator_queue *e,
@@ -4252,6 +4702,10 @@ static ssize_t bfq_max_budget_store(struct elevator_queue *e,
return ret;
}
@ -4550,7 +4551,7 @@ index d1f648d..5bff378 100644
static ssize_t bfq_timeout_sync_store(struct elevator_queue *e,
const char *page, size_t count)
{
@@ -4264,13 +4717,31 @@ static ssize_t bfq_timeout_sync_store(struct elevator_queue *e,
@@ -4264,13 +4718,31 @@ static ssize_t bfq_timeout_sync_store(struct elevator_queue *e,
else if (__data > INT_MAX)
__data = INT_MAX;
@ -4583,7 +4584,7 @@ index d1f648d..5bff378 100644
static ssize_t bfq_low_latency_store(struct elevator_queue *e,
const char *page, size_t count)
{
@@ -4297,9 +4768,8 @@ static struct elv_fs_entry bfq_attrs[] = {
@@ -4297,9 +4769,8 @@ static struct elv_fs_entry bfq_attrs[] = {
BFQ_ATTR(back_seek_penalty),
BFQ_ATTR(slice_idle),
BFQ_ATTR(max_budget),
@ -4594,7 +4595,7 @@ index d1f648d..5bff378 100644
BFQ_ATTR(low_latency),
BFQ_ATTR(wr_coeff),
BFQ_ATTR(wr_max_time),
@@ -4342,9 +4812,28 @@ static struct elevator_type iosched_bfq = {
@@ -4342,9 +4813,28 @@ static struct elevator_type iosched_bfq = {
.elevator_owner = THIS_MODULE,
};
@ -4619,11 +4620,11 @@ index d1f648d..5bff378 100644
static int __init bfq_init(void)
{
int ret;
+ char msg[50] = "BFQ I/O-scheduler: v8r2";
+ char msg[50] = "BFQ I/O-scheduler: v8r3";
/*
* Can be 0 on HZ < 1000 setups.
@@ -4352,9 +4841,6 @@ static int __init bfq_init(void)
@@ -4352,9 +4842,6 @@ static int __init bfq_init(void)
if (bfq_slice_idle == 0)
bfq_slice_idle = 1;
@ -4633,7 +4634,7 @@ index d1f648d..5bff378 100644
#ifdef CONFIG_BFQ_GROUP_IOSCHED
ret = blkcg_policy_register(&blkcg_policy_bfq);
if (ret)
@@ -4370,23 +4856,34 @@ static int __init bfq_init(void)
@@ -4370,23 +4857,34 @@ static int __init bfq_init(void)
* installed on the reference devices (see the comments before the
* definitions of the two arrays).
*/
@ -5425,13 +5426,13 @@ index a64fec1..7d73b9d 100644
bfqd->wr_busy_queues++;
}
diff --git a/block/bfq.h b/block/bfq.h
index f73c942..c6ba099 100644
index f73c942..49d28b9 100644
--- a/block/bfq.h
+++ b/block/bfq.h
@@ -1,5 +1,5 @@
/*
- * BFQ-v7r11 for 4.5.0: data structures and common functions prototypes.
+ * BFQ-v8r2 for 4.7.0: data structures and common functions prototypes.
+ * BFQ-v8r3 for 4.7.0: data structures and common functions prototypes.
*
* Based on ideas and code from CFQ:
* Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>

View file

@ -1,7 +1,7 @@
Budget Fair Queueing I/O Scheduler
==================================
This patchset introduces BFQ-v8r2 into Linux 4.7.0.
This patchset introduces BFQ-v8r3 into Linux 4.7.0.
For further information: http://algogroup.unimore.it/people/paolo/disk_sched/.
The overall diffstat is the following:
@ -18,6 +18,11 @@ The overall diffstat is the following:
CHANGELOG
v8r3
. BUGFIX Update weight-raising coefficient when switching from
interactive to soft real-time.
v8r2
. BUGFIX Removed variables that are not used if tracing is

View file

@ -208,7 +208,7 @@ Patch109: fs-aufs4.patch
Patch111: 0001-block-cgroups-kconfig-build-bits-for-BFQ-v7r11-4.7.0.patch
Patch112: 0002-block-introduce-the-BFQ-v7r11-I-O-sched-for-4.7.0.patch
Patch113: 0003-block-bfq-add-Early-Queue-Merge-EQM-to-BFQ-v7r11-for.patch
Patch114: 0004-block-bfq-turn-BFQ-v7r11-for-4.7.0-into-BFQ-v8r2-for.patch
Patch114: 0004-block-bfq-turn-BFQ-v7r11-for-4.7.0-into-BFQ-v8r3-for.patch
# Sanitizing kernel memory
# We do not use "Patch:" here because apply_patched would always apply it