This is a note to let you know that I've just added the patch titled

    libceph: encapsulate advancing msg page

to the 3.4-stable tree which can be found at:
    
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary

The filename of the patch is:
     0064-libceph-encapsulate-advancing-msg-page.patch
and it can be found in the queue-3.4 subdirectory.

If you, or anyone else, feels it should not be added to the stable tree,
please let <[email protected]> know about it.


>From 1a8d8ec0fb2260e062574c95480e6d74b7d19181 Mon Sep 17 00:00:00 2001
From: Alex Elder <[email protected]>
Date: Mon, 11 Jun 2012 14:57:13 -0500
Subject: libceph: encapsulate advancing msg page

From: Alex Elder <[email protected]>

(cherry picked from commit 84ca8fc87fcf4ab97bb8acdb59bf97bb4820cb14)

In write_partial_msg_pages(), once all the data from a page has been
sent we advance to the next one.  Put the code that takes care of
this into its own function.

While modifying write_partial_msg_pages(), make its local variable
"in_trail" be Boolean, and use the local variable "msg" (which is
just the connection's current out_msg pointer) consistently.

Signed-off-by: Alex Elder <[email protected]>
Reviewed-by: Sage Weil <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
---
 net/ceph/messenger.c |   58 +++++++++++++++++++++++++++++----------------------
 1 file changed, 34 insertions(+), 24 deletions(-)

--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -891,6 +891,33 @@ static void iter_bio_next(struct bio **b
 }
 #endif
 
+static void out_msg_pos_next(struct ceph_connection *con, struct page *page,
+                       size_t len, size_t sent, bool in_trail)
+{
+       struct ceph_msg *msg = con->out_msg;
+
+       BUG_ON(!msg);
+       BUG_ON(!sent);
+
+       con->out_msg_pos.data_pos += sent;
+       con->out_msg_pos.page_pos += sent;
+       if (sent == len) {
+               con->out_msg_pos.page_pos = 0;
+               con->out_msg_pos.page++;
+               con->out_msg_pos.did_page_crc = false;
+               if (in_trail)
+                       list_move_tail(&page->lru,
+                                      &msg->trail->head);
+               else if (msg->pagelist)
+                       list_move_tail(&page->lru,
+                                      &msg->pagelist->head);
+#ifdef CONFIG_BLOCK
+               else if (msg->bio)
+                       iter_bio_next(&msg->bio_iter, &msg->bio_seg);
+#endif
+       }
+}
+
 /*
  * Write as much message data payload as we can.  If we finish, queue
  * up the footer.
@@ -906,11 +933,11 @@ static int write_partial_msg_pages(struc
        bool do_datacrc = !con->msgr->nocrc;
        int ret;
        int total_max_write;
-       int in_trail = 0;
+       bool in_trail = false;
        size_t trail_len = (msg->trail ? msg->trail->length : 0);
 
        dout("write_partial_msg_pages %p msg %p page %d/%d offset %d\n",
-            con, con->out_msg, con->out_msg_pos.page, con->out_msg->nr_pages,
+            con, msg, con->out_msg_pos.page, msg->nr_pages,
             con->out_msg_pos.page_pos);
 
 #ifdef CONFIG_BLOCK
@@ -934,13 +961,12 @@ static int write_partial_msg_pages(struc
 
                /* have we reached the trail part of the data? */
                if (con->out_msg_pos.data_pos >= data_len - trail_len) {
-                       in_trail = 1;
+                       in_trail = true;
 
                        total_max_write = data_len - con->out_msg_pos.data_pos;
 
                        page = list_first_entry(&msg->trail->head,
                                                struct page, lru);
-                       max_write = PAGE_SIZE;
                } else if (msg->pages) {
                        page = msg->pages[con->out_msg_pos.page];
                } else if (msg->pagelist) {
@@ -964,14 +990,14 @@ static int write_partial_msg_pages(struc
                if (do_datacrc && !con->out_msg_pos.did_page_crc) {
                        void *base;
                        u32 crc;
-                       u32 tmpcrc = le32_to_cpu(con->out_msg->footer.data_crc);
+                       u32 tmpcrc = le32_to_cpu(msg->footer.data_crc);
                        char *kaddr;
 
                        kaddr = kmap(page);
                        BUG_ON(kaddr == NULL);
                        base = kaddr + con->out_msg_pos.page_pos + bio_offset;
                        crc = crc32c(tmpcrc, base, len);
-                       con->out_msg->footer.data_crc = cpu_to_le32(crc);
+                       msg->footer.data_crc = cpu_to_le32(crc);
                        con->out_msg_pos.did_page_crc = true;
                }
                ret = ceph_tcp_sendpage(con->sock, page,
@@ -984,30 +1010,14 @@ static int write_partial_msg_pages(struc
                if (ret <= 0)
                        goto out;
 
-               con->out_msg_pos.data_pos += ret;
-               con->out_msg_pos.page_pos += ret;
-               if (ret == len) {
-                       con->out_msg_pos.page_pos = 0;
-                       con->out_msg_pos.page++;
-                       con->out_msg_pos.did_page_crc = false;
-                       if (in_trail)
-                               list_move_tail(&page->lru,
-                                              &msg->trail->head);
-                       else if (msg->pagelist)
-                               list_move_tail(&page->lru,
-                                              &msg->pagelist->head);
-#ifdef CONFIG_BLOCK
-                       else if (msg->bio)
-                               iter_bio_next(&msg->bio_iter, &msg->bio_seg);
-#endif
-               }
+               out_msg_pos_next(con, page, len, (size_t) ret, in_trail);
        }
 
        dout("write_partial_msg_pages %p msg %p done\n", con, msg);
 
        /* prepare and queue up footer, too */
        if (!do_datacrc)
-               con->out_msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC;
+               msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC;
        con_out_kvec_reset(con);
        prepare_write_message_footer(con);
        ret = 1;


Patches currently in stable-queue which might be from [email protected] are

queue-3.4/0073-libceph-clear-CONNECTING-in-ceph_con_close.patch
queue-3.4/0020-ceph-ensure-auth-ops-are-defined-before-use.patch
queue-3.4/0025-ceph-add-auth-buf-in-prepare_write_connect.patch
queue-3.4/0021-ceph-have-get_authorizer-methods-return-pointers.patch
queue-3.4/0026-libceph-avoid-unregistering-osd-request-when-not-reg.patch
queue-3.4/0077-libceph-distinguish-two-phases-of-connect-sequence.patch
queue-3.4/0045-libceph-provide-osd-number-when-creating-osd.patch
queue-3.4/0059-libceph-transition-socket-state-prior-to-actual-conn.patch
queue-3.4/0005-crush-fix-memory-leak-when-destroying-tree-buckets.patch
queue-3.4/0002-crush-adjust-local-retry-threshold.patch
queue-3.4/0023-ceph-return-pointer-from-prepare_connect_authorizer.patch
queue-3.4/0055-libceph-make-ceph_con_revoke_message-a-msg-op.patch
queue-3.4/0036-rbd-Fix-ceph_snap_context-size-calculation.patch
queue-3.4/0054-libceph-make-ceph_con_revoke-a-msg-operation.patch
queue-3.4/0066-libceph-move-init_bio_-functions-up.patch
queue-3.4/0018-ceph-define-ceph_auth_handshake-type.patch
queue-3.4/0063-libceph-encapsulate-out-message-data-setup.patch
queue-3.4/0076-libceph-separate-banner-and-connect-writes.patch
queue-3.4/0040-libceph-rename-socket-callbacks.patch
queue-3.4/0011-ceph-messenger-reset-connection-kvec-caller.patch
queue-3.4/0070-libceph-don-t-change-socket-state-on-sock-event.patch
queue-3.4/0061-libceph-use-con-get-put-methods.patch
queue-3.4/0074-libceph-clear-NEGOTIATING-when-done.patch
queue-3.4/0019-ceph-messenger-reduce-args-to-create_authorizer.patch
queue-3.4/0041-libceph-rename-kvec_reset-and-kvec_add-functions.patch
queue-3.4/0047-libceph-embed-ceph-connection-structure-in-mon_clien.patch
queue-3.4/0029-libceph-use-con-get-put-ops-from-osd_client.patch
queue-3.4/0051-libceph-tweak-ceph_alloc_msg.patch
queue-3.4/0064-libceph-encapsulate-advancing-msg-page.patch
queue-3.4/0075-libceph-define-and-use-an-explicit-CONNECTED-state.patch
queue-3.4/0060-libceph-fix-NULL-dereference-in-reset_connection.patch
queue-3.4/0015-ceph-messenger-check-prepare_write_connect-result.patch
queue-3.4/0003-crush-be-more-tolerant-of-nonsensical-crush-maps.patch
queue-3.4/0028-libceph-osd_client-don-t-drop-reply-reference-too-ea.patch
queue-3.4/0038-libceph-eliminate-connection-state-DEAD.patch
queue-3.4/0014-ceph-don-t-set-WRITE_PENDING-too-early.patch
queue-3.4/0049-libceph-init-monitor-connection-when-opening.patch
queue-3.4/0016-ceph-messenger-rework-prepare_connect_authorizer.patch
queue-3.4/0068-libceph-don-t-use-bio_iter-as-a-flag.patch
queue-3.4/0058-libceph-fix-overflow-in-osdmap_apply_incremental.patch
queue-3.4/0048-libceph-drop-connection-refcounting-for-mon_client.patch
queue-3.4/0039-libceph-kill-bad_proto-ceph-connection-op.patch
queue-3.4/0031-libceph-flush-msgr-queue-during-mon_client-shutdown.patch
queue-3.4/0013-ceph-drop-msgr-argument-from-prepare_write_connect.patch
queue-3.4/0030-rbd-Clear-ceph_msg-bio_iter-for-retransmitted-messag.patch
queue-3.4/0043-libceph-start-separating-connection-flags-from-state.patch
queue-3.4/0046-libceph-set-CLOSED-state-bit-in-con_init.patch
queue-3.4/0057-libceph-fix-overflow-in-osdmap_decode.patch
queue-3.4/0009-ceph-messenger-change-read_partial-to-take-end-arg.patch
queue-3.4/0017-ceph-messenger-check-return-from-get_authorizer.patch
queue-3.4/0056-libceph-fix-overflow-in-__decode_pool_names.patch
queue-3.4/0001-crush-clean-up-types-const-ness.patch
queue-3.4/0072-libceph-don-t-touch-con-state-in-con_close_socket.patch
queue-3.4/0044-libceph-start-tracking-connection-socket-state.patch
queue-3.4/0071-libceph-just-set-SOCK_CLOSED-when-state-changes.patch
queue-3.4/0022-ceph-use-info-returned-by-get_authorizer.patch
queue-3.4/0065-libceph-don-t-mark-footer-complete-before-it-is.patch
queue-3.4/0027-libceph-fix-pg_temp-updates.patch
queue-3.4/0079-libceph-add-some-fine-ASCII-art.patch
queue-3.4/0008-ceph-messenger-update-to-in-read_partial-caller.patch
queue-3.4/0007-ceph-messenger-use-read_partial-in-read_partial_mess.patch
queue-3.4/0078-libceph-small-changes-to-messenger.c.patch
queue-3.4/0010-libceph-don-t-reset-kvec-in-prepare_write_banner.patch
queue-3.4/0052-libceph-have-messages-point-to-their-connection.patch
queue-3.4/0067-libceph-move-init-of-bio_iter.patch
queue-3.4/0050-libceph-fully-initialize-connection-in-con_init.patch
queue-3.4/0053-libceph-have-messages-take-a-connection-reference.patch
queue-3.4/0012-ceph-messenger-send-banner-in-process_connect.patch
queue-3.4/0024-ceph-rename-prepare_connect_authorizer.patch
queue-3.4/0042-libceph-embed-ceph-messenger-structure-in-ceph_clien.patch
queue-3.4/0069-libceph-SOCK_CLOSED-is-a-flag-not-a-state.patch
queue-3.4/0004-crush-fix-tree-node-weight-lookup.patch
--
To unsubscribe from this list: send the line "unsubscribe stable" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to