mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6.git
synced 2026-05-01 15:15:19 -04:00
drbd: Local variable renames: e -> peer_req
Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
This commit is contained in:
committed by
Philipp Reisner
parent
6c852beca1
commit
db830c464b
@@ -80,47 +80,47 @@ void drbd_md_io_complete(struct bio *bio, int error)
|
||||
/* reads on behalf of the partner,
|
||||
* "submitted" by the receiver
|
||||
*/
|
||||
void drbd_endio_read_sec_final(struct drbd_peer_request *e) __releases(local)
|
||||
void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __releases(local)
|
||||
{
|
||||
unsigned long flags = 0;
|
||||
struct drbd_conf *mdev = e->mdev;
|
||||
struct drbd_conf *mdev = peer_req->mdev;
|
||||
|
||||
spin_lock_irqsave(&mdev->tconn->req_lock, flags);
|
||||
mdev->read_cnt += e->i.size >> 9;
|
||||
list_del(&e->w.list);
|
||||
mdev->read_cnt += peer_req->i.size >> 9;
|
||||
list_del(&peer_req->w.list);
|
||||
if (list_empty(&mdev->read_ee))
|
||||
wake_up(&mdev->ee_wait);
|
||||
if (test_bit(__EE_WAS_ERROR, &e->flags))
|
||||
if (test_bit(__EE_WAS_ERROR, &peer_req->flags))
|
||||
__drbd_chk_io_error(mdev, false);
|
||||
spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
|
||||
|
||||
drbd_queue_work(&mdev->tconn->data.work, &e->w);
|
||||
drbd_queue_work(&mdev->tconn->data.work, &peer_req->w);
|
||||
put_ldev(mdev);
|
||||
}
|
||||
|
||||
/* writes on behalf of the partner, or resync writes,
|
||||
* "submitted" by the receiver, final stage. */
|
||||
static void drbd_endio_write_sec_final(struct drbd_peer_request *e) __releases(local)
|
||||
static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(local)
|
||||
{
|
||||
unsigned long flags = 0;
|
||||
struct drbd_conf *mdev = e->mdev;
|
||||
struct drbd_conf *mdev = peer_req->mdev;
|
||||
sector_t e_sector;
|
||||
int do_wake;
|
||||
u64 block_id;
|
||||
int do_al_complete_io;
|
||||
|
||||
/* after we moved e to done_ee,
|
||||
/* after we moved peer_req to done_ee,
|
||||
* we may no longer access it,
|
||||
* it may be freed/reused already!
|
||||
* (as soon as we release the req_lock) */
|
||||
e_sector = e->i.sector;
|
||||
do_al_complete_io = e->flags & EE_CALL_AL_COMPLETE_IO;
|
||||
block_id = e->block_id;
|
||||
e_sector = peer_req->i.sector;
|
||||
do_al_complete_io = peer_req->flags & EE_CALL_AL_COMPLETE_IO;
|
||||
block_id = peer_req->block_id;
|
||||
|
||||
spin_lock_irqsave(&mdev->tconn->req_lock, flags);
|
||||
mdev->writ_cnt += e->i.size >> 9;
|
||||
list_del(&e->w.list); /* has been on active_ee or sync_ee */
|
||||
list_add_tail(&e->w.list, &mdev->done_ee);
|
||||
mdev->writ_cnt += peer_req->i.size >> 9;
|
||||
list_del(&peer_req->w.list); /* has been on active_ee or sync_ee */
|
||||
list_add_tail(&peer_req->w.list, &mdev->done_ee);
|
||||
|
||||
/*
|
||||
* Do not remove from the write_requests tree here: we did not send the
|
||||
@@ -132,7 +132,7 @@ static void drbd_endio_write_sec_final(struct drbd_peer_request *e) __releases(l
|
||||
|
||||
do_wake = list_empty(block_id == ID_SYNCER ? &mdev->sync_ee : &mdev->active_ee);
|
||||
|
||||
if (test_bit(__EE_WAS_ERROR, &e->flags))
|
||||
if (test_bit(__EE_WAS_ERROR, &peer_req->flags))
|
||||
__drbd_chk_io_error(mdev, false);
|
||||
spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
|
||||
|
||||
@@ -154,20 +154,20 @@ static void drbd_endio_write_sec_final(struct drbd_peer_request *e) __releases(l
|
||||
*/
|
||||
void drbd_endio_sec(struct bio *bio, int error)
|
||||
{
|
||||
struct drbd_peer_request *e = bio->bi_private;
|
||||
struct drbd_conf *mdev = e->mdev;
|
||||
struct drbd_peer_request *peer_req = bio->bi_private;
|
||||
struct drbd_conf *mdev = peer_req->mdev;
|
||||
int uptodate = bio_flagged(bio, BIO_UPTODATE);
|
||||
int is_write = bio_data_dir(bio) == WRITE;
|
||||
|
||||
if (error && __ratelimit(&drbd_ratelimit_state))
|
||||
dev_warn(DEV, "%s: error=%d s=%llus\n",
|
||||
is_write ? "write" : "read", error,
|
||||
(unsigned long long)e->i.sector);
|
||||
(unsigned long long)peer_req->i.sector);
|
||||
if (!error && !uptodate) {
|
||||
if (__ratelimit(&drbd_ratelimit_state))
|
||||
dev_warn(DEV, "%s: setting error to -EIO s=%llus\n",
|
||||
is_write ? "write" : "read",
|
||||
(unsigned long long)e->i.sector);
|
||||
(unsigned long long)peer_req->i.sector);
|
||||
/* strange behavior of some lower level drivers...
|
||||
* fail the request by clearing the uptodate flag,
|
||||
* but do not return any error?! */
|
||||
@@ -175,14 +175,14 @@ void drbd_endio_sec(struct bio *bio, int error)
|
||||
}
|
||||
|
||||
if (error)
|
||||
set_bit(__EE_WAS_ERROR, &e->flags);
|
||||
set_bit(__EE_WAS_ERROR, &peer_req->flags);
|
||||
|
||||
bio_put(bio); /* no need for the bio anymore */
|
||||
if (atomic_dec_and_test(&e->pending_bios)) {
|
||||
if (atomic_dec_and_test(&peer_req->pending_bios)) {
|
||||
if (is_write)
|
||||
drbd_endio_write_sec_final(e);
|
||||
drbd_endio_write_sec_final(peer_req);
|
||||
else
|
||||
drbd_endio_read_sec_final(e);
|
||||
drbd_endio_read_sec_final(peer_req);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -248,11 +248,11 @@ int w_read_retry_remote(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
|
||||
}
|
||||
|
||||
void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm,
|
||||
struct drbd_peer_request *e, void *digest)
|
||||
struct drbd_peer_request *peer_req, void *digest)
|
||||
{
|
||||
struct hash_desc desc;
|
||||
struct scatterlist sg;
|
||||
struct page *page = e->pages;
|
||||
struct page *page = peer_req->pages;
|
||||
struct page *tmp;
|
||||
unsigned len;
|
||||
|
||||
@@ -269,7 +269,7 @@ void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm,
|
||||
page = tmp;
|
||||
}
|
||||
/* and now the last, possibly only partially used page */
|
||||
len = e->i.size & (PAGE_SIZE - 1);
|
||||
len = peer_req->i.size & (PAGE_SIZE - 1);
|
||||
sg_set_page(&sg, page, len ?: PAGE_SIZE, 0);
|
||||
crypto_hash_update(&desc, &sg, sg.length);
|
||||
crypto_hash_final(&desc, digest);
|
||||
@@ -298,7 +298,8 @@ void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *
|
||||
/* TODO merge common code with w_e_end_ov_req */
|
||||
int w_e_send_csum(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
|
||||
{
|
||||
struct drbd_peer_request *e = container_of(w, struct drbd_peer_request, w);
|
||||
struct drbd_peer_request *peer_req =
|
||||
container_of(w, struct drbd_peer_request, w);
|
||||
int digest_size;
|
||||
void *digest;
|
||||
int ok = 1;
|
||||
@@ -306,22 +307,22 @@ int w_e_send_csum(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
|
||||
if (unlikely(cancel))
|
||||
goto out;
|
||||
|
||||
if (likely((e->flags & EE_WAS_ERROR) != 0))
|
||||
if (likely((peer_req->flags & EE_WAS_ERROR) != 0))
|
||||
goto out;
|
||||
|
||||
digest_size = crypto_hash_digestsize(mdev->csums_tfm);
|
||||
digest = kmalloc(digest_size, GFP_NOIO);
|
||||
if (digest) {
|
||||
sector_t sector = e->i.sector;
|
||||
unsigned int size = e->i.size;
|
||||
drbd_csum_ee(mdev, mdev->csums_tfm, e, digest);
|
||||
sector_t sector = peer_req->i.sector;
|
||||
unsigned int size = peer_req->i.size;
|
||||
drbd_csum_ee(mdev, mdev->csums_tfm, peer_req, digest);
|
||||
/* Free e and pages before send.
|
||||
* In case we block on congestion, we could otherwise run into
|
||||
* some distributed deadlock, if the other side blocks on
|
||||
* congestion as well, because our receiver blocks in
|
||||
* drbd_pp_alloc due to pp_in_use > max_buffers. */
|
||||
drbd_free_ee(mdev, e);
|
||||
e = NULL;
|
||||
drbd_free_ee(mdev, peer_req);
|
||||
peer_req = NULL;
|
||||
inc_rs_pending(mdev);
|
||||
ok = drbd_send_drequest_csum(mdev, sector, size,
|
||||
digest, digest_size,
|
||||
@@ -333,8 +334,8 @@ int w_e_send_csum(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
|
||||
}
|
||||
|
||||
out:
|
||||
if (e)
|
||||
drbd_free_ee(mdev, e);
|
||||
if (peer_req)
|
||||
drbd_free_ee(mdev, peer_req);
|
||||
|
||||
if (unlikely(!ok))
|
||||
dev_err(DEV, "drbd_send_drequest(..., csum) failed\n");
|
||||
@@ -345,7 +346,7 @@ out:
|
||||
|
||||
static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
|
||||
{
|
||||
struct drbd_peer_request *e;
|
||||
struct drbd_peer_request *peer_req;
|
||||
|
||||
if (!get_ldev(mdev))
|
||||
return -EIO;
|
||||
@@ -355,17 +356,17 @@ static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
|
||||
|
||||
/* GFP_TRY, because if there is no memory available right now, this may
|
||||
* be rescheduled for later. It is "only" background resync, after all. */
|
||||
e = drbd_alloc_ee(mdev, ID_SYNCER /* unused */, sector, size, GFP_TRY);
|
||||
if (!e)
|
||||
peer_req = drbd_alloc_ee(mdev, ID_SYNCER /* unused */, sector, size, GFP_TRY);
|
||||
if (!peer_req)
|
||||
goto defer;
|
||||
|
||||
e->w.cb = w_e_send_csum;
|
||||
peer_req->w.cb = w_e_send_csum;
|
||||
spin_lock_irq(&mdev->tconn->req_lock);
|
||||
list_add(&e->w.list, &mdev->read_ee);
|
||||
list_add(&peer_req->w.list, &mdev->read_ee);
|
||||
spin_unlock_irq(&mdev->tconn->req_lock);
|
||||
|
||||
atomic_add(size >> 9, &mdev->rs_sect_ev);
|
||||
if (drbd_submit_ee(mdev, e, READ, DRBD_FAULT_RS_RD) == 0)
|
||||
if (drbd_submit_ee(mdev, peer_req, READ, DRBD_FAULT_RS_RD) == 0)
|
||||
return 0;
|
||||
|
||||
/* If it failed because of ENOMEM, retry should help. If it failed
|
||||
@@ -373,10 +374,10 @@ static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
|
||||
* retry may or may not help.
|
||||
* If it does not, you may need to force disconnect. */
|
||||
spin_lock_irq(&mdev->tconn->req_lock);
|
||||
list_del(&e->w.list);
|
||||
list_del(&peer_req->w.list);
|
||||
spin_unlock_irq(&mdev->tconn->req_lock);
|
||||
|
||||
drbd_free_ee(mdev, e);
|
||||
drbd_free_ee(mdev, peer_req);
|
||||
defer:
|
||||
put_ldev(mdev);
|
||||
return -EAGAIN;
|
||||
@@ -901,19 +902,19 @@ out:
|
||||
}
|
||||
|
||||
/* helper */
|
||||
static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_peer_request *e)
|
||||
static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_peer_request *peer_req)
|
||||
{
|
||||
if (drbd_ee_has_active_page(e)) {
|
||||
if (drbd_ee_has_active_page(peer_req)) {
|
||||
/* This might happen if sendpage() has not finished */
|
||||
int i = (e->i.size + PAGE_SIZE -1) >> PAGE_SHIFT;
|
||||
int i = (peer_req->i.size + PAGE_SIZE -1) >> PAGE_SHIFT;
|
||||
atomic_add(i, &mdev->pp_in_use_by_net);
|
||||
atomic_sub(i, &mdev->pp_in_use);
|
||||
spin_lock_irq(&mdev->tconn->req_lock);
|
||||
list_add_tail(&e->w.list, &mdev->net_ee);
|
||||
list_add_tail(&peer_req->w.list, &mdev->net_ee);
|
||||
spin_unlock_irq(&mdev->tconn->req_lock);
|
||||
wake_up(&drbd_pp_wait);
|
||||
} else
|
||||
drbd_free_ee(mdev, e);
|
||||
drbd_free_ee(mdev, peer_req);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -924,28 +925,28 @@ static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_peer_requ
|
||||
*/
|
||||
int w_e_end_data_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
|
||||
{
|
||||
struct drbd_peer_request *e = container_of(w, struct drbd_peer_request, w);
|
||||
struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
|
||||
int ok;
|
||||
|
||||
if (unlikely(cancel)) {
|
||||
drbd_free_ee(mdev, e);
|
||||
drbd_free_ee(mdev, peer_req);
|
||||
dec_unacked(mdev);
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (likely((e->flags & EE_WAS_ERROR) == 0)) {
|
||||
ok = drbd_send_block(mdev, P_DATA_REPLY, e);
|
||||
if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
|
||||
ok = drbd_send_block(mdev, P_DATA_REPLY, peer_req);
|
||||
} else {
|
||||
if (__ratelimit(&drbd_ratelimit_state))
|
||||
dev_err(DEV, "Sending NegDReply. sector=%llus.\n",
|
||||
(unsigned long long)e->i.sector);
|
||||
(unsigned long long)peer_req->i.sector);
|
||||
|
||||
ok = drbd_send_ack(mdev, P_NEG_DREPLY, e);
|
||||
ok = drbd_send_ack(mdev, P_NEG_DREPLY, peer_req);
|
||||
}
|
||||
|
||||
dec_unacked(mdev);
|
||||
|
||||
move_to_net_ee_or_free(mdev, e);
|
||||
move_to_net_ee_or_free(mdev, peer_req);
|
||||
|
||||
if (unlikely(!ok))
|
||||
dev_err(DEV, "drbd_send_block() failed\n");
|
||||
@@ -960,26 +961,26 @@ int w_e_end_data_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
|
||||
*/
|
||||
int w_e_end_rsdata_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
|
||||
{
|
||||
struct drbd_peer_request *e = container_of(w, struct drbd_peer_request, w);
|
||||
struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
|
||||
int ok;
|
||||
|
||||
if (unlikely(cancel)) {
|
||||
drbd_free_ee(mdev, e);
|
||||
drbd_free_ee(mdev, peer_req);
|
||||
dec_unacked(mdev);
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (get_ldev_if_state(mdev, D_FAILED)) {
|
||||
drbd_rs_complete_io(mdev, e->i.sector);
|
||||
drbd_rs_complete_io(mdev, peer_req->i.sector);
|
||||
put_ldev(mdev);
|
||||
}
|
||||
|
||||
if (mdev->state.conn == C_AHEAD) {
|
||||
ok = drbd_send_ack(mdev, P_RS_CANCEL, e);
|
||||
} else if (likely((e->flags & EE_WAS_ERROR) == 0)) {
|
||||
ok = drbd_send_ack(mdev, P_RS_CANCEL, peer_req);
|
||||
} else if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
|
||||
if (likely(mdev->state.pdsk >= D_INCONSISTENT)) {
|
||||
inc_rs_pending(mdev);
|
||||
ok = drbd_send_block(mdev, P_RS_DATA_REPLY, e);
|
||||
ok = drbd_send_block(mdev, P_RS_DATA_REPLY, peer_req);
|
||||
} else {
|
||||
if (__ratelimit(&drbd_ratelimit_state))
|
||||
dev_err(DEV, "Not sending RSDataReply, "
|
||||
@@ -989,17 +990,17 @@ int w_e_end_rsdata_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
|
||||
} else {
|
||||
if (__ratelimit(&drbd_ratelimit_state))
|
||||
dev_err(DEV, "Sending NegRSDReply. sector %llus.\n",
|
||||
(unsigned long long)e->i.sector);
|
||||
(unsigned long long)peer_req->i.sector);
|
||||
|
||||
ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, e);
|
||||
ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, peer_req);
|
||||
|
||||
/* update resync data with failure */
|
||||
drbd_rs_failed_io(mdev, e->i.sector, e->i.size);
|
||||
drbd_rs_failed_io(mdev, peer_req->i.sector, peer_req->i.size);
|
||||
}
|
||||
|
||||
dec_unacked(mdev);
|
||||
|
||||
move_to_net_ee_or_free(mdev, e);
|
||||
move_to_net_ee_or_free(mdev, peer_req);
|
||||
|
||||
if (unlikely(!ok))
|
||||
dev_err(DEV, "drbd_send_block() failed\n");
|
||||
@@ -1008,26 +1009,26 @@ int w_e_end_rsdata_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
|
||||
|
||||
int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
|
||||
{
|
||||
struct drbd_peer_request *e = container_of(w, struct drbd_peer_request, w);
|
||||
struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
|
||||
struct digest_info *di;
|
||||
int digest_size;
|
||||
void *digest = NULL;
|
||||
int ok, eq = 0;
|
||||
|
||||
if (unlikely(cancel)) {
|
||||
drbd_free_ee(mdev, e);
|
||||
drbd_free_ee(mdev, peer_req);
|
||||
dec_unacked(mdev);
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (get_ldev(mdev)) {
|
||||
drbd_rs_complete_io(mdev, e->i.sector);
|
||||
drbd_rs_complete_io(mdev, peer_req->i.sector);
|
||||
put_ldev(mdev);
|
||||
}
|
||||
|
||||
di = e->digest;
|
||||
di = peer_req->digest;
|
||||
|
||||
if (likely((e->flags & EE_WAS_ERROR) == 0)) {
|
||||
if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
|
||||
/* quick hack to try to avoid a race against reconfiguration.
|
||||
* a real fix would be much more involved,
|
||||
* introducing more locking mechanisms */
|
||||
@@ -1037,31 +1038,31 @@ int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
|
||||
digest = kmalloc(digest_size, GFP_NOIO);
|
||||
}
|
||||
if (digest) {
|
||||
drbd_csum_ee(mdev, mdev->csums_tfm, e, digest);
|
||||
drbd_csum_ee(mdev, mdev->csums_tfm, peer_req, digest);
|
||||
eq = !memcmp(digest, di->digest, digest_size);
|
||||
kfree(digest);
|
||||
}
|
||||
|
||||
if (eq) {
|
||||
drbd_set_in_sync(mdev, e->i.sector, e->i.size);
|
||||
drbd_set_in_sync(mdev, peer_req->i.sector, peer_req->i.size);
|
||||
/* rs_same_csums unit is BM_BLOCK_SIZE */
|
||||
mdev->rs_same_csum += e->i.size >> BM_BLOCK_SHIFT;
|
||||
ok = drbd_send_ack(mdev, P_RS_IS_IN_SYNC, e);
|
||||
mdev->rs_same_csum += peer_req->i.size >> BM_BLOCK_SHIFT;
|
||||
ok = drbd_send_ack(mdev, P_RS_IS_IN_SYNC, peer_req);
|
||||
} else {
|
||||
inc_rs_pending(mdev);
|
||||
e->block_id = ID_SYNCER; /* By setting block_id, digest pointer becomes invalid! */
|
||||
e->flags &= ~EE_HAS_DIGEST; /* This e no longer has a digest pointer */
|
||||
peer_req->block_id = ID_SYNCER; /* By setting block_id, digest pointer becomes invalid! */
|
||||
peer_req->flags &= ~EE_HAS_DIGEST; /* This peer request no longer has a digest pointer */
|
||||
kfree(di);
|
||||
ok = drbd_send_block(mdev, P_RS_DATA_REPLY, e);
|
||||
ok = drbd_send_block(mdev, P_RS_DATA_REPLY, peer_req);
|
||||
}
|
||||
} else {
|
||||
ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, e);
|
||||
ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, peer_req);
|
||||
if (__ratelimit(&drbd_ratelimit_state))
|
||||
dev_err(DEV, "Sending NegDReply. I guess it gets messy.\n");
|
||||
}
|
||||
|
||||
dec_unacked(mdev);
|
||||
move_to_net_ee_or_free(mdev, e);
|
||||
move_to_net_ee_or_free(mdev, peer_req);
|
||||
|
||||
if (unlikely(!ok))
|
||||
dev_err(DEV, "drbd_send_block/ack() failed\n");
|
||||
@@ -1071,9 +1072,9 @@ int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
|
||||
/* TODO merge common code with w_e_send_csum */
|
||||
int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
|
||||
{
|
||||
struct drbd_peer_request *e = container_of(w, struct drbd_peer_request, w);
|
||||
sector_t sector = e->i.sector;
|
||||
unsigned int size = e->i.size;
|
||||
struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
|
||||
sector_t sector = peer_req->i.sector;
|
||||
unsigned int size = peer_req->i.size;
|
||||
int digest_size;
|
||||
void *digest;
|
||||
int ok = 1;
|
||||
@@ -1088,8 +1089,8 @@ int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (likely(!(e->flags & EE_WAS_ERROR)))
|
||||
drbd_csum_ee(mdev, mdev->verify_tfm, e, digest);
|
||||
if (likely(!(peer_req->flags & EE_WAS_ERROR)))
|
||||
drbd_csum_ee(mdev, mdev->verify_tfm, peer_req, digest);
|
||||
else
|
||||
memset(digest, 0, digest_size);
|
||||
|
||||
@@ -1098,8 +1099,8 @@ int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
|
||||
* some distributed deadlock, if the other side blocks on
|
||||
* congestion as well, because our receiver blocks in
|
||||
* drbd_pp_alloc due to pp_in_use > max_buffers. */
|
||||
drbd_free_ee(mdev, e);
|
||||
e = NULL;
|
||||
drbd_free_ee(mdev, peer_req);
|
||||
peer_req = NULL;
|
||||
inc_rs_pending(mdev);
|
||||
ok = drbd_send_drequest_csum(mdev, sector, size,
|
||||
digest, digest_size,
|
||||
@@ -1109,8 +1110,8 @@ int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
|
||||
kfree(digest);
|
||||
|
||||
out:
|
||||
if (e)
|
||||
drbd_free_ee(mdev, e);
|
||||
if (peer_req)
|
||||
drbd_free_ee(mdev, peer_req);
|
||||
dec_unacked(mdev);
|
||||
return ok;
|
||||
}
|
||||
@@ -1128,16 +1129,16 @@ void drbd_ov_oos_found(struct drbd_conf *mdev, sector_t sector, int size)
|
||||
|
||||
int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
|
||||
{
|
||||
struct drbd_peer_request *e = container_of(w, struct drbd_peer_request, w);
|
||||
struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
|
||||
struct digest_info *di;
|
||||
void *digest;
|
||||
sector_t sector = e->i.sector;
|
||||
unsigned int size = e->i.size;
|
||||
sector_t sector = peer_req->i.sector;
|
||||
unsigned int size = peer_req->i.size;
|
||||
int digest_size;
|
||||
int ok, eq = 0;
|
||||
|
||||
if (unlikely(cancel)) {
|
||||
drbd_free_ee(mdev, e);
|
||||
drbd_free_ee(mdev, peer_req);
|
||||
dec_unacked(mdev);
|
||||
return 1;
|
||||
}
|
||||
@@ -1145,17 +1146,17 @@ int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
|
||||
/* after "cancel", because after drbd_disconnect/drbd_rs_cancel_all
|
||||
* the resync lru has been cleaned up already */
|
||||
if (get_ldev(mdev)) {
|
||||
drbd_rs_complete_io(mdev, e->i.sector);
|
||||
drbd_rs_complete_io(mdev, peer_req->i.sector);
|
||||
put_ldev(mdev);
|
||||
}
|
||||
|
||||
di = e->digest;
|
||||
di = peer_req->digest;
|
||||
|
||||
if (likely((e->flags & EE_WAS_ERROR) == 0)) {
|
||||
if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
|
||||
digest_size = crypto_hash_digestsize(mdev->verify_tfm);
|
||||
digest = kmalloc(digest_size, GFP_NOIO);
|
||||
if (digest) {
|
||||
drbd_csum_ee(mdev, mdev->verify_tfm, e, digest);
|
||||
drbd_csum_ee(mdev, mdev->verify_tfm, peer_req, digest);
|
||||
|
||||
D_ASSERT(digest_size == di->digest_size);
|
||||
eq = !memcmp(digest, di->digest, digest_size);
|
||||
@@ -1168,7 +1169,7 @@ int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
|
||||
* some distributed deadlock, if the other side blocks on
|
||||
* congestion as well, because our receiver blocks in
|
||||
* drbd_pp_alloc due to pp_in_use > max_buffers. */
|
||||
drbd_free_ee(mdev, e);
|
||||
drbd_free_ee(mdev, peer_req);
|
||||
if (!eq)
|
||||
drbd_ov_oos_found(mdev, sector, size);
|
||||
else
|
||||
|
||||
Reference in New Issue
Block a user