[Devel] [PATCH RHEL7 COMMIT] Revert "ploop: Remove now unused PLOOP_REQ_RELOC_S branches"
Konstantin Khorenko
khorenko at virtuozzo.com
Tue May 21 17:44:28 MSK 2019
The commit is pushed to "branch-rh7-3.10.0-957.12.2.vz7.96.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-957.12.2.vz7.96.1
------>
commit 2c5a610c21fb2c71bb2c106bf850cf108d55318c
Author: Kirill Tkhai <ktkhai at virtuozzo.com>
Date: Tue May 21 17:44:26 2019 +0300
Revert "ploop: Remove now unused PLOOP_REQ_RELOC_S branches"
This reverts commit cc8aaafb78d725b0447d5fb2d7929d55e5292b27.
Signed-off-by: Kirill Tkhai <ktkhai at virtuozzo.com>
=====================
Patchset description:
ploop: Return maintaince mode
This patch set enables it for vstorage.
Also, added file /sys/block/ploopXXX/pstate/native_discard,
which shows, whether we should use maintaince-mode based
discard or not.
https://jira.sw.ru/browse/PSBM-94662
Kirill Tkhai (8):
Revert "ploop: Remove now unused PLOOP_E_ZERO_INDEX and PLOOP_E_DELTA_ZERO_INDEX branches"
Revert "ploop: Remove now unused PLOOP_REQ_RELOC_S branches"
Revert "ploop: Remove now unused PLOOP_REQ_DISCARD branches"
Revert "ploop: Remove now unused PLOOP_REQ_ZERO branches"
Revert "ploop: Remove obsolete ioctls"
Partial revert "ploop: Enable native discard support for kaio engine"
ploop: Return maintaince mode ioctls again
ploop: Show whether device supports native discard
---
drivers/block/ploop/dev.c | 54 +++++++++++++++++++++++++++++++++++++++----
drivers/block/ploop/events.h | 1 +
drivers/block/ploop/io_kaio.c | 1 +
drivers/block/ploop/map.c | 28 +++++++++++++++++++---
include/linux/ploop/ploop.h | 13 ++++++++++-
5 files changed, 89 insertions(+), 8 deletions(-)
diff --git a/drivers/block/ploop/dev.c b/drivers/block/ploop/dev.c
index 9a7c6aa4f2bf..e9501f45da0c 100644
--- a/drivers/block/ploop/dev.c
+++ b/drivers/block/ploop/dev.c
@@ -1291,6 +1291,7 @@ static void ploop_complete_request(struct ploop_request * preq)
WARN_ON(!preq->error && test_bit(PLOOP_REQ_ISSUE_FLUSH, &preq->state));
if (test_bit(PLOOP_REQ_RELOC_A, &preq->state) ||
+ test_bit(PLOOP_REQ_RELOC_S, &preq->state) ||
test_bit(PLOOP_REQ_RELOC_N, &preq->state)) {
if (preq->error)
set_bit(PLOOP_S_ABORT, &plo->state);
@@ -1768,6 +1769,7 @@ static inline bool preq_is_special(struct ploop_request * preq)
return state & (PLOOP_REQ_MERGE_FL |
PLOOP_REQ_RELOC_A_FL |
+ PLOOP_REQ_RELOC_S_FL |
PLOOP_REQ_RELOC_N_FL);
}
@@ -1865,7 +1867,8 @@ ploop_entry_request(struct ploop_request * preq)
preq->req_rw |= REQ_SYNC;
restart:
- if (test_bit(PLOOP_REQ_RELOC_A, &preq->state)) {
+ if (test_bit(PLOOP_REQ_RELOC_A, &preq->state) ||
+ test_bit(PLOOP_REQ_RELOC_S, &preq->state)) {
err = ploop_entry_reloc_req(preq, &iblk);
if (err)
goto error;
@@ -2055,7 +2058,8 @@ ploop_entry_request(struct ploop_request * preq)
if (delta) {
if (delta == top_delta) {
/* Block exists in top delta. Good. */
- if (plo->maintenance_type == PLOOP_MNTN_GROW) {
+ if (plo->maintenance_type == PLOOP_MNTN_GROW ||
+ plo->maintenance_type == PLOOP_MNTN_RELOC) {
spin_lock_irq(&plo->lock);
ploop_add_lockout(preq, 0);
spin_unlock_irq(&plo->lock);
@@ -2241,7 +2245,36 @@ static void ploop_req_state_process(struct ploop_request * preq)
ploop_entry_request(preq);
break;
+ case PLOOP_E_RELOC_COMPLETE:
+ BUG_ON (!test_bit(PLOOP_REQ_RELOC_S, &preq->state));
+ if (!preq->error) {
+ spin_lock_irq(&plo->lock);
+ if (!list_empty(&preq->delay_list)) {
+ struct ploop_request *pr;
+ pr = list_entry(preq->delay_list.next,
+ struct ploop_request, list);
+ list_splice_init(&preq->delay_list,
+ plo->ready_queue.prev);
+ }
+ spin_unlock_irq(&plo->lock);
+ preq->req_cluster = ~0U;
+ preq->src_iblock = ~0U; /* redundant */
+ preq->dst_cluster = ~0U; /* redundant */
+ preq->dst_iblock = ~0U; /* redundant */
+ preq->eng_state = PLOOP_E_ENTRY;
+ goto restart;
+ }
+ /* drop down to PLOOP_E_COMPLETE case ... */
case PLOOP_E_COMPLETE:
+ if (unlikely(test_bit(PLOOP_REQ_RELOC_S, &preq->state) &&
+ preq->error)) {
+ printk("RELOC_S completed with err %d"
+ " (%u %u %u %u %u)\n",
+ preq->error, preq->req_cluster, preq->iblock,
+ preq->src_iblock, preq->dst_cluster,
+ preq->dst_iblock);
+ }
+
if (!preq->error &&
test_bit(PLOOP_REQ_TRANS, &preq->state)) {
u32 iblk;
@@ -2363,8 +2396,21 @@ static void ploop_req_state_process(struct ploop_request * preq)
top_delta = ploop_top_delta(plo);
sbl.head = sbl.tail = preq->aux_bio;
- top_delta->ops->allocate(top_delta, preq, &sbl,
- cluster_size_in_sec(plo));
+ /* Relocated data write required sync before BAT update
+ * this will happen inside index_update */
+
+ if (test_bit(PLOOP_REQ_RELOC_S, &preq->state)) {
+ preq->eng_state = PLOOP_E_DATA_WBI;
+ plo->st.bio_out++;
+ preq->iblock = preq->dst_iblock;
+ top_delta->io.ops->submit(&top_delta->io, preq,
+ preq->req_rw, &sbl,
+ preq->iblock,
+ cluster_size_in_sec(plo));
+ } else {
+ top_delta->ops->allocate(top_delta, preq, &sbl,
+ cluster_size_in_sec(plo));
+ }
break;
}
case PLOOP_E_RELOC_NULLIFY:
diff --git a/drivers/block/ploop/events.h b/drivers/block/ploop/events.h
index 7e0b8e4ad9f7..57f72e6b3b80 100644
--- a/drivers/block/ploop/events.h
+++ b/drivers/block/ploop/events.h
@@ -42,6 +42,7 @@
{ 1 << PLOOP_REQ_TRANS, "T"}, \
{ 1 << PLOOP_REQ_MERGE, "M"}, \
{ 1 << PLOOP_REQ_RELOC_A, "RA"}, \
+ { 1 << PLOOP_REQ_RELOC_S, "RS"}, \
{ 1 << PLOOP_REQ_RELOC_N, "RN"})
#define PREQ_FORMAT "preq=0x%p cluster=0x%x iblock=0x%x size=0x%x eng_state=0x%lx state=%s rw=%s"
diff --git a/drivers/block/ploop/io_kaio.c b/drivers/block/ploop/io_kaio.c
index 940250e2a260..daad912c3a91 100644
--- a/drivers/block/ploop/io_kaio.c
+++ b/drivers/block/ploop/io_kaio.c
@@ -68,6 +68,7 @@ static void kaio_complete_io_state(struct ploop_request * preq)
int need_fua = !!(preq->req_rw & REQ_FUA);
unsigned long state = READ_ONCE(preq->state);
int reloc = !!(state & (PLOOP_REQ_RELOC_A_FL|
+ PLOOP_REQ_RELOC_S_FL|
PLOOP_REQ_RELOC_N_FL));
if (preq->error || !(preq->req_rw & REQ_FUA) ||
diff --git a/drivers/block/ploop/map.c b/drivers/block/ploop/map.c
index e2893ab78468..e03df205b0ab 100644
--- a/drivers/block/ploop/map.c
+++ b/drivers/block/ploop/map.c
@@ -894,6 +894,8 @@ void ploop_index_update(struct ploop_request * preq)
map_index_t blk;
int old_level;
struct page * page;
+ unsigned long state = READ_ONCE(preq->state);
+ int do_fsync_if_delayed = 0;
/* No way back, we are going to initiate index write. */
@@ -946,7 +948,13 @@ void ploop_index_update(struct ploop_request * preq)
will do the FLUSH */
preq->req_rw &= ~REQ_FLUSH;
- ploop_index_wb_proceed_or_delay(preq, 0);
+ /* Relocate requires consistent index update */
+ if (state & (PLOOP_REQ_RELOC_A_FL|PLOOP_REQ_RELOC_S_FL)) {
+ preq->req_index_update_rw |= (REQ_FLUSH | REQ_FUA);
+ do_fsync_if_delayed = 1;
+ }
+
+ ploop_index_wb_proceed_or_delay(preq, do_fsync_if_delayed);
return;
enomem:
@@ -1009,11 +1017,25 @@ static void map_wb_complete_post_process(struct ploop_map *map,
{
struct ploop_device *plo = map->plo;
- if (likely(err || !test_bit(PLOOP_REQ_RELOC_A, &preq->state))) {
+ if (likely(err ||
+ (!test_bit(PLOOP_REQ_RELOC_A, &preq->state) &&
+ !test_bit(PLOOP_REQ_RELOC_S, &preq->state)))) {
+
requeue_req(preq, PLOOP_E_COMPLETE);
return;
}
+ if (test_bit(PLOOP_REQ_RELOC_S, &preq->state)) {
+ spin_lock_irq(&plo->lock);
+ del_lockout(preq);
+ map_release(preq->map);
+ preq->map = NULL;
+ spin_unlock_irq(&plo->lock);
+
+ requeue_req(preq, PLOOP_E_RELOC_COMPLETE);
+ return;
+ }
+
BUG_ON (!test_bit(PLOOP_REQ_RELOC_A, &preq->state));
BUG_ON (!preq->aux_bio);
@@ -1136,7 +1158,7 @@ static void map_wb_complete(struct map_node * m, int err)
state = READ_ONCE(preq->state);
/* Relocate requires consistent index update */
- if (state & PLOOP_REQ_RELOC_A_FL) {
+ if (state & (PLOOP_REQ_RELOC_A_FL|PLOOP_REQ_RELOC_S_FL)) {
rw |= (REQ_FLUSH | REQ_FUA);
do_fsync_if_delayed = 1;
}
diff --git a/include/linux/ploop/ploop.h b/include/linux/ploop/ploop.h
index 0bf75437ade3..4beaa8a9f5d1 100644
--- a/include/linux/ploop/ploop.h
+++ b/include/linux/ploop/ploop.h
@@ -486,6 +486,7 @@ enum
PLOOP_REQ_TRANS,
PLOOP_REQ_MERGE,
PLOOP_REQ_RELOC_A, /* 'A' stands for allocate() */
+ PLOOP_REQ_RELOC_S, /* Obsolete: 'S' stands for submit() */
PLOOP_REQ_RELOC_N, /* 'N' stands for "nullify" */
PLOOP_REQ_RSYNC,
PLOOP_REQ_KAIO_FSYNC, /*force image fsync by KAIO module */
@@ -498,12 +499,14 @@ enum
#define PLOOP_REQ_MERGE_FL (1 << PLOOP_REQ_MERGE)
#define PLOOP_REQ_RELOC_A_FL (1 << PLOOP_REQ_RELOC_A)
+#define PLOOP_REQ_RELOC_S_FL (1 << PLOOP_REQ_RELOC_S) /* Obsolete */
#define PLOOP_REQ_RELOC_N_FL (1 << PLOOP_REQ_RELOC_N)
enum
{
PLOOP_E_ENTRY, /* Not yet processed */
PLOOP_E_COMPLETE, /* Complete. Maybe, with an error */
+ PLOOP_E_RELOC_COMPLETE, /* Reloc complete. Maybe, with an error */
PLOOP_E_INDEX_READ, /* Reading an index page */
PLOOP_E_TRANS_INDEX_READ,/* Reading a trans index page */
PLOOP_E_DELTA_READ, /* Write request reads data from previos delta */
@@ -562,7 +565,15 @@ struct ploop_request
iblock_t iblock;
- unsigned long ppb_state;
+ /* relocation info */
+ union {
+ struct {
+ iblock_t src_iblock;
+ iblock_t dst_iblock;
+ };
+ unsigned long ppb_state;
+ };
+ cluster_t dst_cluster;
struct rb_node reloc_link;
/* State specific information */
More information about the Devel
mailing list