Merge tag 'for-5.17/drivers-2022-01-11' of git://git.kernel.dk/linux-block
Pull block driver updates from Jens Axboe:
- mtip32xx pci cleanups (Bjorn)
- mtip32xx conversion to generic power management (Vaibhav)
- rsxx pci powermanagement cleanups (Bjorn)
- Remove the rsxx driver. This hardware never saw much adoption, and
it's been end of lifed for a while. (Christoph)
- MD pull request from Song:
- REQ_NOWAIT support (Vishal Verma)
- raid6 benchmark optimization (Dirk Müller)
- Fix for acct bioset (Xiao Ni)
- Clean up max_queued_requests (Mariusz Tkaczyk)
- PREEMPT_RT optimization (Davidlohr Bueso)
- Use default_groups in kobj_type (Greg Kroah-Hartman)
- Use attribute groups in pktcdvd and rnbd (Greg)
- NVMe pull request from Christoph:
- increment request genctr on completion (Keith Busch, Geliang
Tang)
- add a 'iopolicy' module parameter (Hannes Reinecke)
- print out valid arguments when reading from /dev/nvme-fabrics
(Hannes Reinecke)
- Use struct_group() in drbd (Kees)
- null_blk fixes (Ming)
- Get rid of congestion logic in pktcdvd (Neil)
- Floppy ejection hang fix (Tasos)
- Floppy max user request size fix (Xiongwei)
- Loop locking fix (Tetsuo)
* tag 'for-5.17/drivers-2022-01-11' of git://git.kernel.dk/linux-block: (32 commits)
md: use default_groups in kobj_type
md: Move alloc/free acct bioset in to personality
lib/raid6: Use strict priority ranking for pq gen() benchmarking
lib/raid6: skip benchmark of non-chosen xor_syndrome functions
md: fix spelling of "its"
md: raid456 add nowait support
md: raid10 add nowait support
md: raid1 add nowait support
md: add support for REQ_NOWAIT
md: drop queue limitation for RAID1 and RAID10
md/raid5: play nice with PREEMPT_RT
block/rnbd-clt-sysfs: use default_groups in kobj_type
pktcdvd: convert to use attribute groups
block: null_blk: only set set->nr_maps as 3 if active poll_queues is > 0
nvme: add 'iopolicy' module parameter
nvme: drop unused variable ctrl in nvme_setup_cmd
nvme: increment request genctr on completion
nvme-fabrics: print out valid arguments when reading from /dev/nvme-fabrics
block: remove the rsxx driver
rsxx: Drop PCI legacy power management
...
This commit is contained in:
@@ -574,7 +574,7 @@ static int process_recvd_msg(struct mddev *mddev, struct cluster_msg *msg)
|
||||
int ret = 0;
|
||||
|
||||
if (WARN(mddev->cluster_info->slot_number - 1 == le32_to_cpu(msg->slot),
|
||||
"node %d received it's own msg\n", le32_to_cpu(msg->slot)))
|
||||
"node %d received its own msg\n", le32_to_cpu(msg->slot)))
|
||||
return -1;
|
||||
switch (le32_to_cpu(msg->type)) {
|
||||
case METADATA_UPDATED:
|
||||
|
||||
@@ -418,6 +418,12 @@ check_suspended:
|
||||
rcu_read_lock();
|
||||
if (is_suspended(mddev, bio)) {
|
||||
DEFINE_WAIT(__wait);
|
||||
/* Bail out if REQ_NOWAIT is set for the bio */
|
||||
if (bio->bi_opf & REQ_NOWAIT) {
|
||||
rcu_read_unlock();
|
||||
bio_wouldblock_error(bio);
|
||||
return;
|
||||
}
|
||||
for (;;) {
|
||||
prepare_to_wait(&mddev->sb_wait, &__wait,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
@@ -3603,6 +3609,7 @@ static struct attribute *rdev_default_attrs[] = {
|
||||
&rdev_ppl_size.attr,
|
||||
NULL,
|
||||
};
|
||||
ATTRIBUTE_GROUPS(rdev_default);
|
||||
static ssize_t
|
||||
rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
|
||||
{
|
||||
@@ -3652,7 +3659,7 @@ static const struct sysfs_ops rdev_sysfs_ops = {
|
||||
static struct kobj_type rdev_ktype = {
|
||||
.release = rdev_free,
|
||||
.sysfs_ops = &rdev_sysfs_ops,
|
||||
.default_attrs = rdev_default_attrs,
|
||||
.default_groups = rdev_default_groups,
|
||||
};
|
||||
|
||||
int md_rdev_init(struct md_rdev *rdev)
|
||||
@@ -5788,6 +5795,7 @@ int md_run(struct mddev *mddev)
|
||||
int err;
|
||||
struct md_rdev *rdev;
|
||||
struct md_personality *pers;
|
||||
bool nowait = true;
|
||||
|
||||
if (list_empty(&mddev->disks))
|
||||
/* cannot run an array with no devices.. */
|
||||
@@ -5858,8 +5866,13 @@ int md_run(struct mddev *mddev)
|
||||
}
|
||||
}
|
||||
sysfs_notify_dirent_safe(rdev->sysfs_state);
|
||||
nowait = nowait && blk_queue_nowait(bdev_get_queue(rdev->bdev));
|
||||
}
|
||||
|
||||
/* Set the NOWAIT flags if all underlying devices support it */
|
||||
if (nowait)
|
||||
blk_queue_flag_set(QUEUE_FLAG_NOWAIT, mddev->queue);
|
||||
|
||||
if (!bioset_initialized(&mddev->bio_set)) {
|
||||
err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
|
||||
if (err)
|
||||
@@ -5870,13 +5883,6 @@ int md_run(struct mddev *mddev)
|
||||
if (err)
|
||||
goto exit_bio_set;
|
||||
}
|
||||
if (mddev->level != 1 && mddev->level != 10 &&
|
||||
!bioset_initialized(&mddev->io_acct_set)) {
|
||||
err = bioset_init(&mddev->io_acct_set, BIO_POOL_SIZE,
|
||||
offsetof(struct md_io_acct, bio_clone), 0);
|
||||
if (err)
|
||||
goto exit_sync_set;
|
||||
}
|
||||
|
||||
spin_lock(&pers_lock);
|
||||
pers = find_pers(mddev->level, mddev->clevel);
|
||||
@@ -6053,9 +6059,6 @@ bitmap_abort:
|
||||
module_put(pers->owner);
|
||||
md_bitmap_destroy(mddev);
|
||||
abort:
|
||||
if (mddev->level != 1 && mddev->level != 10)
|
||||
bioset_exit(&mddev->io_acct_set);
|
||||
exit_sync_set:
|
||||
bioset_exit(&mddev->sync_set);
|
||||
exit_bio_set:
|
||||
bioset_exit(&mddev->bio_set);
|
||||
@@ -7004,6 +7007,15 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
|
||||
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
|
||||
if (!mddev->thread)
|
||||
md_update_sb(mddev, 1);
|
||||
/*
|
||||
* If the new disk does not support REQ_NOWAIT,
|
||||
* disable on the whole MD.
|
||||
*/
|
||||
if (!blk_queue_nowait(bdev_get_queue(rdev->bdev))) {
|
||||
pr_info("%s: Disabling nowait because %s does not support nowait\n",
|
||||
mdname(mddev), bdevname(rdev->bdev, b));
|
||||
blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, mddev->queue);
|
||||
}
|
||||
/*
|
||||
* Kick recovery, maybe this spare has to be added to the
|
||||
* array immediately.
|
||||
@@ -8402,7 +8414,7 @@ int md_setup_cluster(struct mddev *mddev, int nodes)
|
||||
spin_lock(&pers_lock);
|
||||
/* ensure module won't be unloaded */
|
||||
if (!md_cluster_ops || !try_module_get(md_cluster_mod)) {
|
||||
pr_warn("can't find md-cluster module or get it's reference.\n");
|
||||
pr_warn("can't find md-cluster module or get its reference.\n");
|
||||
spin_unlock(&pers_lock);
|
||||
return -ENOENT;
|
||||
}
|
||||
@@ -8589,6 +8601,23 @@ void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(md_submit_discard_bio);
|
||||
|
||||
int acct_bioset_init(struct mddev *mddev)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
if (!bioset_initialized(&mddev->io_acct_set))
|
||||
err = bioset_init(&mddev->io_acct_set, BIO_POOL_SIZE,
|
||||
offsetof(struct md_io_acct, bio_clone), 0);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acct_bioset_init);
|
||||
|
||||
void acct_bioset_exit(struct mddev *mddev)
|
||||
{
|
||||
bioset_exit(&mddev->io_acct_set);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acct_bioset_exit);
|
||||
|
||||
static void md_end_io_acct(struct bio *bio)
|
||||
{
|
||||
struct md_io_acct *md_io_acct = bio->bi_private;
|
||||
|
||||
@@ -721,6 +721,8 @@ extern void md_error(struct mddev *mddev, struct md_rdev *rdev);
|
||||
extern void md_finish_reshape(struct mddev *mddev);
|
||||
void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
|
||||
struct bio *bio, sector_t start, sector_t size);
|
||||
int acct_bioset_init(struct mddev *mddev);
|
||||
void acct_bioset_exit(struct mddev *mddev);
|
||||
void md_account_bio(struct mddev *mddev, struct bio **bio);
|
||||
|
||||
extern bool __must_check md_flush_request(struct mddev *mddev, struct bio *bio);
|
||||
|
||||
@@ -356,7 +356,21 @@ static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks
|
||||
return array_sectors;
|
||||
}
|
||||
|
||||
static void raid0_free(struct mddev *mddev, void *priv);
|
||||
static void free_conf(struct mddev *mddev, struct r0conf *conf)
|
||||
{
|
||||
kfree(conf->strip_zone);
|
||||
kfree(conf->devlist);
|
||||
kfree(conf);
|
||||
mddev->private = NULL;
|
||||
}
|
||||
|
||||
static void raid0_free(struct mddev *mddev, void *priv)
|
||||
{
|
||||
struct r0conf *conf = priv;
|
||||
|
||||
free_conf(mddev, conf);
|
||||
acct_bioset_exit(mddev);
|
||||
}
|
||||
|
||||
static int raid0_run(struct mddev *mddev)
|
||||
{
|
||||
@@ -370,11 +384,16 @@ static int raid0_run(struct mddev *mddev)
|
||||
if (md_check_no_bitmap(mddev))
|
||||
return -EINVAL;
|
||||
|
||||
if (acct_bioset_init(mddev)) {
|
||||
pr_err("md/raid0:%s: alloc acct bioset failed.\n", mdname(mddev));
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* if private is not null, we are here after takeover */
|
||||
if (mddev->private == NULL) {
|
||||
ret = create_strip_zones(mddev, &conf);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
goto exit_acct_set;
|
||||
mddev->private = conf;
|
||||
}
|
||||
conf = mddev->private;
|
||||
@@ -413,17 +432,16 @@ static int raid0_run(struct mddev *mddev)
|
||||
dump_zones(mddev);
|
||||
|
||||
ret = md_integrity_register(mddev);
|
||||
if (ret)
|
||||
goto free;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void raid0_free(struct mddev *mddev, void *priv)
|
||||
{
|
||||
struct r0conf *conf = priv;
|
||||
|
||||
kfree(conf->strip_zone);
|
||||
kfree(conf->devlist);
|
||||
kfree(conf);
|
||||
free:
|
||||
free_conf(mddev, conf);
|
||||
exit_acct_set:
|
||||
acct_bioset_exit(mddev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
|
||||
|
||||
@@ -22,12 +22,6 @@
|
||||
|
||||
#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
|
||||
|
||||
/* When there are this many requests queue to be written by
|
||||
* the raid thread, we become 'congested' to provide back-pressure
|
||||
* for writeback.
|
||||
*/
|
||||
static int max_queued_requests = 1024;
|
||||
|
||||
/* for managing resync I/O pages */
|
||||
struct resync_pages {
|
||||
void *raid_bio;
|
||||
|
||||
@@ -929,8 +929,10 @@ static void lower_barrier(struct r1conf *conf, sector_t sector_nr)
|
||||
wake_up(&conf->wait_barrier);
|
||||
}
|
||||
|
||||
static void _wait_barrier(struct r1conf *conf, int idx)
|
||||
static bool _wait_barrier(struct r1conf *conf, int idx, bool nowait)
|
||||
{
|
||||
bool ret = true;
|
||||
|
||||
/*
|
||||
* We need to increase conf->nr_pending[idx] very early here,
|
||||
* then raise_barrier() can be blocked when it waits for
|
||||
@@ -961,7 +963,7 @@ static void _wait_barrier(struct r1conf *conf, int idx)
|
||||
*/
|
||||
if (!READ_ONCE(conf->array_frozen) &&
|
||||
!atomic_read(&conf->barrier[idx]))
|
||||
return;
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* After holding conf->resync_lock, conf->nr_pending[idx]
|
||||
@@ -979,18 +981,27 @@ static void _wait_barrier(struct r1conf *conf, int idx)
|
||||
*/
|
||||
wake_up(&conf->wait_barrier);
|
||||
/* Wait for the barrier in same barrier unit bucket to drop. */
|
||||
wait_event_lock_irq(conf->wait_barrier,
|
||||
!conf->array_frozen &&
|
||||
!atomic_read(&conf->barrier[idx]),
|
||||
conf->resync_lock);
|
||||
atomic_inc(&conf->nr_pending[idx]);
|
||||
|
||||
/* Return false when nowait flag is set */
|
||||
if (nowait) {
|
||||
ret = false;
|
||||
} else {
|
||||
wait_event_lock_irq(conf->wait_barrier,
|
||||
!conf->array_frozen &&
|
||||
!atomic_read(&conf->barrier[idx]),
|
||||
conf->resync_lock);
|
||||
atomic_inc(&conf->nr_pending[idx]);
|
||||
}
|
||||
|
||||
atomic_dec(&conf->nr_waiting[idx]);
|
||||
spin_unlock_irq(&conf->resync_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr)
|
||||
static bool wait_read_barrier(struct r1conf *conf, sector_t sector_nr, bool nowait)
|
||||
{
|
||||
int idx = sector_to_idx(sector_nr);
|
||||
bool ret = true;
|
||||
|
||||
/*
|
||||
* Very similar to _wait_barrier(). The difference is, for read
|
||||
@@ -1002,7 +1013,7 @@ static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr)
|
||||
atomic_inc(&conf->nr_pending[idx]);
|
||||
|
||||
if (!READ_ONCE(conf->array_frozen))
|
||||
return;
|
||||
return ret;
|
||||
|
||||
spin_lock_irq(&conf->resync_lock);
|
||||
atomic_inc(&conf->nr_waiting[idx]);
|
||||
@@ -1013,19 +1024,28 @@ static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr)
|
||||
*/
|
||||
wake_up(&conf->wait_barrier);
|
||||
/* Wait for array to be unfrozen */
|
||||
wait_event_lock_irq(conf->wait_barrier,
|
||||
!conf->array_frozen,
|
||||
conf->resync_lock);
|
||||
atomic_inc(&conf->nr_pending[idx]);
|
||||
|
||||
/* Return false when nowait flag is set */
|
||||
if (nowait) {
|
||||
/* Return false when nowait flag is set */
|
||||
ret = false;
|
||||
} else {
|
||||
wait_event_lock_irq(conf->wait_barrier,
|
||||
!conf->array_frozen,
|
||||
conf->resync_lock);
|
||||
atomic_inc(&conf->nr_pending[idx]);
|
||||
}
|
||||
|
||||
atomic_dec(&conf->nr_waiting[idx]);
|
||||
spin_unlock_irq(&conf->resync_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void wait_barrier(struct r1conf *conf, sector_t sector_nr)
|
||||
static bool wait_barrier(struct r1conf *conf, sector_t sector_nr, bool nowait)
|
||||
{
|
||||
int idx = sector_to_idx(sector_nr);
|
||||
|
||||
_wait_barrier(conf, idx);
|
||||
return _wait_barrier(conf, idx, nowait);
|
||||
}
|
||||
|
||||
static void _allow_barrier(struct r1conf *conf, int idx)
|
||||
@@ -1236,7 +1256,11 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
|
||||
* Still need barrier for READ in case that whole
|
||||
* array is frozen.
|
||||
*/
|
||||
wait_read_barrier(conf, bio->bi_iter.bi_sector);
|
||||
if (!wait_read_barrier(conf, bio->bi_iter.bi_sector,
|
||||
bio->bi_opf & REQ_NOWAIT)) {
|
||||
bio_wouldblock_error(bio);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!r1_bio)
|
||||
r1_bio = alloc_r1bio(mddev, bio);
|
||||
@@ -1336,6 +1360,10 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
|
||||
bio->bi_iter.bi_sector, bio_end_sector(bio))) {
|
||||
|
||||
DEFINE_WAIT(w);
|
||||
if (bio->bi_opf & REQ_NOWAIT) {
|
||||
bio_wouldblock_error(bio);
|
||||
return;
|
||||
}
|
||||
for (;;) {
|
||||
prepare_to_wait(&conf->wait_barrier,
|
||||
&w, TASK_IDLE);
|
||||
@@ -1353,17 +1381,15 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
|
||||
* thread has put up a bar for new requests.
|
||||
* Continue immediately if no resync is active currently.
|
||||
*/
|
||||
wait_barrier(conf, bio->bi_iter.bi_sector);
|
||||
if (!wait_barrier(conf, bio->bi_iter.bi_sector,
|
||||
bio->bi_opf & REQ_NOWAIT)) {
|
||||
bio_wouldblock_error(bio);
|
||||
return;
|
||||
}
|
||||
|
||||
r1_bio = alloc_r1bio(mddev, bio);
|
||||
r1_bio->sectors = max_write_sectors;
|
||||
|
||||
if (conf->pending_count >= max_queued_requests) {
|
||||
md_wakeup_thread(mddev->thread);
|
||||
raid1_log(mddev, "wait queued");
|
||||
wait_event(conf->wait_barrier,
|
||||
conf->pending_count < max_queued_requests);
|
||||
}
|
||||
/* first select target devices under rcu_lock and
|
||||
* inc refcount on their rdev. Record them by setting
|
||||
* bios[x] to bio
|
||||
@@ -1458,9 +1484,14 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
|
||||
rdev_dec_pending(conf->mirrors[j].rdev, mddev);
|
||||
r1_bio->state = 0;
|
||||
allow_barrier(conf, bio->bi_iter.bi_sector);
|
||||
|
||||
if (bio->bi_opf & REQ_NOWAIT) {
|
||||
bio_wouldblock_error(bio);
|
||||
return;
|
||||
}
|
||||
raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
|
||||
md_wait_for_blocked_rdev(blocked_rdev, mddev);
|
||||
wait_barrier(conf, bio->bi_iter.bi_sector);
|
||||
wait_barrier(conf, bio->bi_iter.bi_sector, false);
|
||||
goto retry_write;
|
||||
}
|
||||
|
||||
@@ -1688,7 +1719,7 @@ static void close_sync(struct r1conf *conf)
|
||||
int idx;
|
||||
|
||||
for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) {
|
||||
_wait_barrier(conf, idx);
|
||||
_wait_barrier(conf, idx, false);
|
||||
_allow_barrier(conf, idx);
|
||||
}
|
||||
|
||||
@@ -3410,5 +3441,3 @@ MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
|
||||
MODULE_ALIAS("md-personality-3"); /* RAID1 */
|
||||
MODULE_ALIAS("md-raid1");
|
||||
MODULE_ALIAS("md-level-1");
|
||||
|
||||
module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);
|
||||
|
||||
@@ -952,8 +952,10 @@ static void lower_barrier(struct r10conf *conf)
|
||||
wake_up(&conf->wait_barrier);
|
||||
}
|
||||
|
||||
static void wait_barrier(struct r10conf *conf)
|
||||
static bool wait_barrier(struct r10conf *conf, bool nowait)
|
||||
{
|
||||
bool ret = true;
|
||||
|
||||
spin_lock_irq(&conf->resync_lock);
|
||||
if (conf->barrier) {
|
||||
struct bio_list *bio_list = current->bio_list;
|
||||
@@ -967,27 +969,35 @@ static void wait_barrier(struct r10conf *conf)
|
||||
* that queue to get the nr_pending
|
||||
* count down.
|
||||
*/
|
||||
raid10_log(conf->mddev, "wait barrier");
|
||||
wait_event_lock_irq(conf->wait_barrier,
|
||||
!conf->barrier ||
|
||||
(atomic_read(&conf->nr_pending) &&
|
||||
bio_list &&
|
||||
(!bio_list_empty(&bio_list[0]) ||
|
||||
!bio_list_empty(&bio_list[1]))) ||
|
||||
/* move on if recovery thread is
|
||||
* blocked by us
|
||||
*/
|
||||
(conf->mddev->thread->tsk == current &&
|
||||
test_bit(MD_RECOVERY_RUNNING,
|
||||
&conf->mddev->recovery) &&
|
||||
conf->nr_queued > 0),
|
||||
conf->resync_lock);
|
||||
/* Return false when nowait flag is set */
|
||||
if (nowait) {
|
||||
ret = false;
|
||||
} else {
|
||||
raid10_log(conf->mddev, "wait barrier");
|
||||
wait_event_lock_irq(conf->wait_barrier,
|
||||
!conf->barrier ||
|
||||
(atomic_read(&conf->nr_pending) &&
|
||||
bio_list &&
|
||||
(!bio_list_empty(&bio_list[0]) ||
|
||||
!bio_list_empty(&bio_list[1]))) ||
|
||||
/* move on if recovery thread is
|
||||
* blocked by us
|
||||
*/
|
||||
(conf->mddev->thread->tsk == current &&
|
||||
test_bit(MD_RECOVERY_RUNNING,
|
||||
&conf->mddev->recovery) &&
|
||||
conf->nr_queued > 0),
|
||||
conf->resync_lock);
|
||||
}
|
||||
conf->nr_waiting--;
|
||||
if (!conf->nr_waiting)
|
||||
wake_up(&conf->wait_barrier);
|
||||
}
|
||||
atomic_inc(&conf->nr_pending);
|
||||
/* Only increment nr_pending when we wait */
|
||||
if (ret)
|
||||
atomic_inc(&conf->nr_pending);
|
||||
spin_unlock_irq(&conf->resync_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void allow_barrier(struct r10conf *conf)
|
||||
@@ -1098,21 +1108,30 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
|
||||
* currently.
|
||||
* 2. If IO spans the reshape position. Need to wait for reshape to pass.
|
||||
*/
|
||||
static void regular_request_wait(struct mddev *mddev, struct r10conf *conf,
|
||||
static bool regular_request_wait(struct mddev *mddev, struct r10conf *conf,
|
||||
struct bio *bio, sector_t sectors)
|
||||
{
|
||||
wait_barrier(conf);
|
||||
/* Bail out if REQ_NOWAIT is set for the bio */
|
||||
if (!wait_barrier(conf, bio->bi_opf & REQ_NOWAIT)) {
|
||||
bio_wouldblock_error(bio);
|
||||
return false;
|
||||
}
|
||||
while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
|
||||
bio->bi_iter.bi_sector < conf->reshape_progress &&
|
||||
bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
|
||||
raid10_log(conf->mddev, "wait reshape");
|
||||
allow_barrier(conf);
|
||||
if (bio->bi_opf & REQ_NOWAIT) {
|
||||
bio_wouldblock_error(bio);
|
||||
return false;
|
||||
}
|
||||
raid10_log(conf->mddev, "wait reshape");
|
||||
wait_event(conf->wait_barrier,
|
||||
conf->reshape_progress <= bio->bi_iter.bi_sector ||
|
||||
conf->reshape_progress >= bio->bi_iter.bi_sector +
|
||||
sectors);
|
||||
wait_barrier(conf);
|
||||
wait_barrier(conf, false);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static void raid10_read_request(struct mddev *mddev, struct bio *bio,
|
||||
@@ -1157,7 +1176,8 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
regular_request_wait(mddev, conf, bio, r10_bio->sectors);
|
||||
if (!regular_request_wait(mddev, conf, bio, r10_bio->sectors))
|
||||
return;
|
||||
rdev = read_balance(conf, r10_bio, &max_sectors);
|
||||
if (!rdev) {
|
||||
if (err_rdev) {
|
||||
@@ -1179,7 +1199,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
|
||||
bio_chain(split, bio);
|
||||
allow_barrier(conf);
|
||||
submit_bio_noacct(bio);
|
||||
wait_barrier(conf);
|
||||
wait_barrier(conf, false);
|
||||
bio = split;
|
||||
r10_bio->master_bio = bio;
|
||||
r10_bio->sectors = max_sectors;
|
||||
@@ -1338,7 +1358,7 @@ retry_wait:
|
||||
raid10_log(conf->mddev, "%s wait rdev %d blocked",
|
||||
__func__, blocked_rdev->raid_disk);
|
||||
md_wait_for_blocked_rdev(blocked_rdev, mddev);
|
||||
wait_barrier(conf);
|
||||
wait_barrier(conf, false);
|
||||
goto retry_wait;
|
||||
}
|
||||
}
|
||||
@@ -1356,6 +1376,11 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
|
||||
bio->bi_iter.bi_sector,
|
||||
bio_end_sector(bio)))) {
|
||||
DEFINE_WAIT(w);
|
||||
/* Bail out if REQ_NOWAIT is set for the bio */
|
||||
if (bio->bi_opf & REQ_NOWAIT) {
|
||||
bio_wouldblock_error(bio);
|
||||
return;
|
||||
}
|
||||
for (;;) {
|
||||
prepare_to_wait(&conf->wait_barrier,
|
||||
&w, TASK_IDLE);
|
||||
@@ -1368,7 +1393,8 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
|
||||
}
|
||||
|
||||
sectors = r10_bio->sectors;
|
||||
regular_request_wait(mddev, conf, bio, sectors);
|
||||
if (!regular_request_wait(mddev, conf, bio, sectors))
|
||||
return;
|
||||
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
|
||||
(mddev->reshape_backwards
|
||||
? (bio->bi_iter.bi_sector < conf->reshape_safe &&
|
||||
@@ -1380,6 +1406,11 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
|
||||
set_mask_bits(&mddev->sb_flags, 0,
|
||||
BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
|
||||
md_wakeup_thread(mddev->thread);
|
||||
if (bio->bi_opf & REQ_NOWAIT) {
|
||||
allow_barrier(conf);
|
||||
bio_wouldblock_error(bio);
|
||||
return;
|
||||
}
|
||||
raid10_log(conf->mddev, "wait reshape metadata");
|
||||
wait_event(mddev->sb_wait,
|
||||
!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
|
||||
@@ -1387,12 +1418,6 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
|
||||
conf->reshape_safe = mddev->reshape_position;
|
||||
}
|
||||
|
||||
if (conf->pending_count >= max_queued_requests) {
|
||||
md_wakeup_thread(mddev->thread);
|
||||
raid10_log(mddev, "wait queued");
|
||||
wait_event(conf->wait_barrier,
|
||||
conf->pending_count < max_queued_requests);
|
||||
}
|
||||
/* first select target devices under rcu_lock and
|
||||
* inc refcount on their rdev. Record them by setting
|
||||
* bios[x] to bio
|
||||
@@ -1482,7 +1507,7 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
|
||||
bio_chain(split, bio);
|
||||
allow_barrier(conf);
|
||||
submit_bio_noacct(bio);
|
||||
wait_barrier(conf);
|
||||
wait_barrier(conf, false);
|
||||
bio = split;
|
||||
r10_bio->master_bio = bio;
|
||||
}
|
||||
@@ -1607,7 +1632,11 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
|
||||
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
|
||||
return -EAGAIN;
|
||||
|
||||
wait_barrier(conf);
|
||||
if (WARN_ON_ONCE(bio->bi_opf & REQ_NOWAIT)) {
|
||||
bio_wouldblock_error(bio);
|
||||
return 0;
|
||||
}
|
||||
wait_barrier(conf, false);
|
||||
|
||||
/*
|
||||
* Check reshape again to avoid reshape happens after checking
|
||||
@@ -1649,7 +1678,7 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
|
||||
allow_barrier(conf);
|
||||
/* Resend the fist split part */
|
||||
submit_bio_noacct(split);
|
||||
wait_barrier(conf);
|
||||
wait_barrier(conf, false);
|
||||
}
|
||||
div_u64_rem(bio_end, stripe_size, &remainder);
|
||||
if (remainder) {
|
||||
@@ -1660,7 +1689,7 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
|
||||
/* Resend the second split part */
|
||||
submit_bio_noacct(bio);
|
||||
bio = split;
|
||||
wait_barrier(conf);
|
||||
wait_barrier(conf, false);
|
||||
}
|
||||
|
||||
bio_start = bio->bi_iter.bi_sector;
|
||||
@@ -1816,7 +1845,7 @@ retry_discard:
|
||||
end_disk_offset += geo->stride;
|
||||
atomic_inc(&first_r10bio->remaining);
|
||||
raid_end_discard_bio(r10_bio);
|
||||
wait_barrier(conf);
|
||||
wait_barrier(conf, false);
|
||||
goto retry_discard;
|
||||
}
|
||||
|
||||
@@ -2011,7 +2040,7 @@ static void print_conf(struct r10conf *conf)
|
||||
|
||||
static void close_sync(struct r10conf *conf)
|
||||
{
|
||||
wait_barrier(conf);
|
||||
wait_barrier(conf, false);
|
||||
allow_barrier(conf);
|
||||
|
||||
mempool_exit(&conf->r10buf_pool);
|
||||
@@ -4819,7 +4848,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
|
||||
if (need_flush ||
|
||||
time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
|
||||
/* Need to update reshape_position in metadata */
|
||||
wait_barrier(conf);
|
||||
wait_barrier(conf, false);
|
||||
mddev->reshape_position = conf->reshape_progress;
|
||||
if (mddev->reshape_backwards)
|
||||
mddev->curr_resync_completed = raid10_size(mddev, 0, 0)
|
||||
@@ -5242,5 +5271,3 @@ MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD");
|
||||
MODULE_ALIAS("md-personality-9"); /* RAID10 */
|
||||
MODULE_ALIAS("md-raid10");
|
||||
MODULE_ALIAS("md-level-10");
|
||||
|
||||
module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);
|
||||
|
||||
@@ -2215,10 +2215,9 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
|
||||
struct r5conf *conf = sh->raid_conf;
|
||||
int level = conf->level;
|
||||
struct raid5_percpu *percpu;
|
||||
unsigned long cpu;
|
||||
|
||||
cpu = get_cpu();
|
||||
percpu = per_cpu_ptr(conf->percpu, cpu);
|
||||
local_lock(&conf->percpu->lock);
|
||||
percpu = this_cpu_ptr(conf->percpu);
|
||||
if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
|
||||
ops_run_biofill(sh);
|
||||
overlap_clear++;
|
||||
@@ -2271,13 +2270,14 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
|
||||
BUG();
|
||||
}
|
||||
|
||||
if (overlap_clear && !sh->batch_head)
|
||||
if (overlap_clear && !sh->batch_head) {
|
||||
for (i = disks; i--; ) {
|
||||
struct r5dev *dev = &sh->dev[i];
|
||||
if (test_and_clear_bit(R5_Overlap, &dev->flags))
|
||||
wake_up(&sh->raid_conf->wait_for_overlap);
|
||||
}
|
||||
put_cpu();
|
||||
}
|
||||
local_unlock(&conf->percpu->lock);
|
||||
}
|
||||
|
||||
static void free_stripe(struct kmem_cache *sc, struct stripe_head *sh)
|
||||
@@ -5686,6 +5686,10 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
|
||||
struct stripe_head *sh;
|
||||
int stripe_sectors;
|
||||
|
||||
/* We need to handle this when io_uring supports discard/trim */
|
||||
if (WARN_ON_ONCE(bi->bi_opf & REQ_NOWAIT))
|
||||
return;
|
||||
|
||||
if (mddev->reshape_position != MaxSector)
|
||||
/* Skip discard while reshape is happening */
|
||||
return;
|
||||
@@ -5819,6 +5823,17 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
|
||||
last_sector = bio_end_sector(bi);
|
||||
bi->bi_next = NULL;
|
||||
|
||||
/* Bail out if conflicts with reshape and REQ_NOWAIT is set */
|
||||
if ((bi->bi_opf & REQ_NOWAIT) &&
|
||||
(conf->reshape_progress != MaxSector) &&
|
||||
(mddev->reshape_backwards
|
||||
? (logical_sector > conf->reshape_progress && logical_sector <= conf->reshape_safe)
|
||||
: (logical_sector >= conf->reshape_safe && logical_sector < conf->reshape_progress))) {
|
||||
bio_wouldblock_error(bi);
|
||||
if (rw == WRITE)
|
||||
md_write_end(mddev);
|
||||
return true;
|
||||
}
|
||||
md_account_bio(mddev, &bi);
|
||||
prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
|
||||
for (; logical_sector < last_sector; logical_sector += RAID5_STRIPE_SECTORS(conf)) {
|
||||
@@ -7052,6 +7067,7 @@ static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
local_lock_init(&percpu->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -7446,12 +7462,19 @@ static int raid5_run(struct mddev *mddev)
|
||||
struct md_rdev *rdev;
|
||||
struct md_rdev *journal_dev = NULL;
|
||||
sector_t reshape_offset = 0;
|
||||
int i;
|
||||
int i, ret = 0;
|
||||
long long min_offset_diff = 0;
|
||||
int first = 1;
|
||||
|
||||
if (mddev_init_writes_pending(mddev) < 0)
|
||||
if (acct_bioset_init(mddev)) {
|
||||
pr_err("md/raid456:%s: alloc acct bioset failed.\n", mdname(mddev));
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (mddev_init_writes_pending(mddev) < 0) {
|
||||
ret = -ENOMEM;
|
||||
goto exit_acct_set;
|
||||
}
|
||||
|
||||
if (mddev->recovery_cp != MaxSector)
|
||||
pr_notice("md/raid:%s: not clean -- starting background reconstruction\n",
|
||||
@@ -7482,7 +7505,8 @@ static int raid5_run(struct mddev *mddev)
|
||||
(mddev->bitmap_info.offset || mddev->bitmap_info.file)) {
|
||||
pr_notice("md/raid:%s: array cannot have both journal and bitmap\n",
|
||||
mdname(mddev));
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto exit_acct_set;
|
||||
}
|
||||
|
||||
if (mddev->reshape_position != MaxSector) {
|
||||
@@ -7507,13 +7531,15 @@ static int raid5_run(struct mddev *mddev)
|
||||
if (journal_dev) {
|
||||
pr_warn("md/raid:%s: don't support reshape with journal - aborting.\n",
|
||||
mdname(mddev));
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto exit_acct_set;
|
||||
}
|
||||
|
||||
if (mddev->new_level != mddev->level) {
|
||||
pr_warn("md/raid:%s: unsupported reshape required - aborting.\n",
|
||||
mdname(mddev));
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto exit_acct_set;
|
||||
}
|
||||
old_disks = mddev->raid_disks - mddev->delta_disks;
|
||||
/* reshape_position must be on a new-stripe boundary, and one
|
||||
@@ -7529,7 +7555,8 @@ static int raid5_run(struct mddev *mddev)
|
||||
if (sector_div(here_new, chunk_sectors * new_data_disks)) {
|
||||
pr_warn("md/raid:%s: reshape_position not on a stripe boundary\n",
|
||||
mdname(mddev));
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto exit_acct_set;
|
||||
}
|
||||
reshape_offset = here_new * chunk_sectors;
|
||||
/* here_new is the stripe we will write to */
|
||||
@@ -7551,7 +7578,8 @@ static int raid5_run(struct mddev *mddev)
|
||||
else if (mddev->ro == 0) {
|
||||
pr_warn("md/raid:%s: in-place reshape must be started in read-only mode - aborting\n",
|
||||
mdname(mddev));
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto exit_acct_set;
|
||||
}
|
||||
} else if (mddev->reshape_backwards
|
||||
? (here_new * chunk_sectors + min_offset_diff <=
|
||||
@@ -7561,7 +7589,8 @@ static int raid5_run(struct mddev *mddev)
|
||||
/* Reading from the same stripe as writing to - bad */
|
||||
pr_warn("md/raid:%s: reshape_position too early for auto-recovery - aborting.\n",
|
||||
mdname(mddev));
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto exit_acct_set;
|
||||
}
|
||||
pr_debug("md/raid:%s: reshape will continue\n", mdname(mddev));
|
||||
/* OK, we should be able to continue; */
|
||||
@@ -7585,8 +7614,10 @@ static int raid5_run(struct mddev *mddev)
|
||||
else
|
||||
conf = mddev->private;
|
||||
|
||||
if (IS_ERR(conf))
|
||||
return PTR_ERR(conf);
|
||||
if (IS_ERR(conf)) {
|
||||
ret = PTR_ERR(conf);
|
||||
goto exit_acct_set;
|
||||
}
|
||||
|
||||
if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
|
||||
if (!journal_dev) {
|
||||
@@ -7783,7 +7814,10 @@ abort:
|
||||
free_conf(conf);
|
||||
mddev->private = NULL;
|
||||
pr_warn("md/raid:%s: failed to run raid set.\n", mdname(mddev));
|
||||
return -EIO;
|
||||
ret = -EIO;
|
||||
exit_acct_set:
|
||||
acct_bioset_exit(mddev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void raid5_free(struct mddev *mddev, void *priv)
|
||||
@@ -7791,6 +7825,7 @@ static void raid5_free(struct mddev *mddev, void *priv)
|
||||
struct r5conf *conf = priv;
|
||||
|
||||
free_conf(conf);
|
||||
acct_bioset_exit(mddev);
|
||||
mddev->to_remove = &raid5_attrs_group;
|
||||
}
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
|
||||
#include <linux/raid/xor.h>
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/local_lock.h>
|
||||
|
||||
/*
|
||||
*
|
||||
@@ -640,7 +641,8 @@ struct r5conf {
|
||||
* lists and performing address
|
||||
* conversions
|
||||
*/
|
||||
int scribble_obj_size;
|
||||
int scribble_obj_size;
|
||||
local_lock_t lock;
|
||||
} __percpu *percpu;
|
||||
int scribble_disks;
|
||||
int scribble_sectors;
|
||||
|
||||
Reference in New Issue
Block a user