This is a note to let you know that I've just added the patch titled

    libceph: set peer name on con_open, not init

to the 3.4-stable tree which can be found at:
    
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary

The filename of the patch is:
     0080-libceph-set-peer-name-on-con_open-not-init.patch
and it can be found in the queue-3.4 subdirectory.

If you, or anyone else, feels it should not be added to the stable tree,
please let <[email protected]> know about it.


>From 59e72ea89b6ed9d4dedd6bd25dbebef20c4e8e31 Mon Sep 17 00:00:00 2001
From: Sage Weil <[email protected]>
Date: Wed, 27 Jun 2012 12:24:08 -0700
Subject: libceph: set peer name on con_open, not init

From: Sage Weil <[email protected]>

(cherry picked from commit b7a9e5dd40f17a48a72f249b8bbc989b63bae5fd)

The peer name may change on each open attempt, even when the connection is
reused.

Signed-off-by: Sage Weil <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
---
 fs/ceph/mds_client.c           |    7 ++++---
 include/linux/ceph/messenger.h |    4 ++--
 net/ceph/messenger.c           |   12 +++++++-----
 net/ceph/mon_client.c          |    4 ++--
 net/ceph/osd_client.c          |   10 ++++++----
 5 files changed, 21 insertions(+), 16 deletions(-)

--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -394,8 +394,7 @@ static struct ceph_mds_session *register
        s->s_seq = 0;
        mutex_init(&s->s_mutex);
 
-       ceph_con_init(&s->s_con, s, &mds_con_ops, &mdsc->fsc->client->msgr,
-               CEPH_ENTITY_TYPE_MDS, mds);
+       ceph_con_init(&s->s_con, s, &mds_con_ops, &mdsc->fsc->client->msgr);
 
        spin_lock_init(&s->s_gen_ttl_lock);
        s->s_cap_gen = 0;
@@ -437,7 +436,8 @@ static struct ceph_mds_session *register
        mdsc->sessions[mds] = s;
        atomic_inc(&s->s_ref);  /* one ref to sessions[], one to caller */
 
-       ceph_con_open(&s->s_con, ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
+       ceph_con_open(&s->s_con, CEPH_ENTITY_TYPE_MDS, mds,
+                     ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
 
        return s;
 
@@ -2529,6 +2529,7 @@ static void send_mds_reconnect(struct ce
        session->s_seq = 0;
 
        ceph_con_open(&session->s_con,
+                     CEPH_ENTITY_TYPE_MDS, mds,
                      ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
 
        /* replay unsafe requests */
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -224,9 +224,9 @@ extern void ceph_messenger_init(struct c
 
 extern void ceph_con_init(struct ceph_connection *con, void *private,
                        const struct ceph_connection_operations *ops,
-                       struct ceph_messenger *msgr, __u8 entity_type,
-                       __u64 entity_num);
+                       struct ceph_messenger *msgr);
 extern void ceph_con_open(struct ceph_connection *con,
+                         __u8 entity_type, __u64 entity_num,
                          struct ceph_entity_addr *addr);
 extern bool ceph_con_opened(struct ceph_connection *con);
 extern void ceph_con_close(struct ceph_connection *con);
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -523,12 +523,17 @@ EXPORT_SYMBOL(ceph_con_close);
 /*
  * Reopen a closed connection, with a new peer address.
  */
-void ceph_con_open(struct ceph_connection *con, struct ceph_entity_addr *addr)
+void ceph_con_open(struct ceph_connection *con,
+                  __u8 entity_type, __u64 entity_num,
+                  struct ceph_entity_addr *addr)
 {
        dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr));
        set_bit(OPENING, &con->state);
        WARN_ON(!test_and_clear_bit(CLOSED, &con->state));
 
+       con->peer_name.type = (__u8) entity_type;
+       con->peer_name.num = cpu_to_le64(entity_num);
+
        memcpy(&con->peer_addr, addr, sizeof(*addr));
        con->delay = 0;      /* reset backoff memory */
        queue_con(con);
@@ -548,7 +553,7 @@ bool ceph_con_opened(struct ceph_connect
  */
 void ceph_con_init(struct ceph_connection *con, void *private,
        const struct ceph_connection_operations *ops,
-       struct ceph_messenger *msgr, __u8 entity_type, __u64 entity_num)
+       struct ceph_messenger *msgr)
 {
        dout("con_init %p\n", con);
        memset(con, 0, sizeof(*con));
@@ -558,9 +563,6 @@ void ceph_con_init(struct ceph_connectio
 
        con_sock_state_init(con);
 
-       con->peer_name.type = (__u8) entity_type;
-       con->peer_name.num = cpu_to_le64(entity_num);
-
        mutex_init(&con->mutex);
        INIT_LIST_HEAD(&con->out_queue);
        INIT_LIST_HEAD(&con->out_sent);
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -143,11 +143,11 @@ static int __open_session(struct ceph_mo
                monc->want_next_osdmap = !!monc->want_next_osdmap;
 
                ceph_con_init(&monc->con, monc, &mon_con_ops,
-                       &monc->client->msgr,
-                       CEPH_ENTITY_TYPE_MON, monc->cur_mon);
+                       &monc->client->msgr);
 
                dout("open_session mon%d opening\n", monc->cur_mon);
                ceph_con_open(&monc->con,
+                             CEPH_ENTITY_TYPE_MON, monc->cur_mon,
                              &monc->monmap->mon_inst[monc->cur_mon].addr);
 
                /* initiatiate authentication handshake */
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -639,8 +639,7 @@ static struct ceph_osd *create_osd(struc
        INIT_LIST_HEAD(&osd->o_osd_lru);
        osd->o_incarnation = 1;
 
-       ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr,
-               CEPH_ENTITY_TYPE_OSD, onum);
+       ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr);
 
        INIT_LIST_HEAD(&osd->o_keepalive_item);
        return osd;
@@ -750,7 +749,8 @@ static int __reset_osd(struct ceph_osd_c
                ret = -EAGAIN;
        } else {
                ceph_con_close(&osd->o_con);
-               ceph_con_open(&osd->o_con, &osdc->osdmap->osd_addr[osd->o_osd]);
+               ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd,
+                             &osdc->osdmap->osd_addr[osd->o_osd]);
                osd->o_incarnation++;
        }
        return ret;
@@ -1005,7 +1005,9 @@ static int __map_request(struct ceph_osd
                dout("map_request osd %p is osd%d\n", req->r_osd, o);
                __insert_osd(osdc, req->r_osd);
 
-               ceph_con_open(&req->r_osd->o_con, &osdc->osdmap->osd_addr[o]);
+               ceph_con_open(&req->r_osd->o_con,
+                             CEPH_ENTITY_TYPE_OSD, o,
+                             &osdc->osdmap->osd_addr[o]);
        }
 
        if (req->r_osd) {


Patches currently in stable-queue which might be from [email protected] are

queue-3.4/0073-libceph-clear-CONNECTING-in-ceph_con_close.patch
queue-3.4/0020-ceph-ensure-auth-ops-are-defined-before-use.patch
queue-3.4/0025-ceph-add-auth-buf-in-prepare_write_connect.patch
queue-3.4/0021-ceph-have-get_authorizer-methods-return-pointers.patch
queue-3.4/0026-libceph-avoid-unregistering-osd-request-when-not-reg.patch
queue-3.4/0077-libceph-distinguish-two-phases-of-connect-sequence.patch
queue-3.4/0045-libceph-provide-osd-number-when-creating-osd.patch
queue-3.4/0059-libceph-transition-socket-state-prior-to-actual-conn.patch
queue-3.4/0084-libceph-prevent-the-race-of-incoming-work-during-tea.patch
queue-3.4/0005-crush-fix-memory-leak-when-destroying-tree-buckets.patch
queue-3.4/0002-crush-adjust-local-retry-threshold.patch
queue-3.4/0091-libceph-fix-fault-locking-close-socket-on-lossy-faul.patch
queue-3.4/0088-libceph-re-initialize-bio_iter-on-start-of-message-r.patch
queue-3.4/0023-ceph-return-pointer-from-prepare_connect_authorizer.patch
queue-3.4/0055-libceph-make-ceph_con_revoke_message-a-msg-op.patch
queue-3.4/0090-libceph-reset-connection-retry-on-successfully-negot.patch
queue-3.4/0054-libceph-make-ceph_con_revoke-a-msg-operation.patch
queue-3.4/0098-libceph-clean-up-con-flags.patch
queue-3.4/0093-libceph-move-ceph_con_send-closed-check-under-the-co.patch
queue-3.4/0066-libceph-move-init_bio_-functions-up.patch
queue-3.4/0018-ceph-define-ceph_auth_handshake-type.patch
queue-3.4/0063-libceph-encapsulate-out-message-data-setup.patch
queue-3.4/0076-libceph-separate-banner-and-connect-writes.patch
queue-3.4/0040-libceph-rename-socket-callbacks.patch
queue-3.4/0011-ceph-messenger-reset-connection-kvec-caller.patch
queue-3.4/0032-libceph-fix-messenger-retry.patch
queue-3.4/0070-libceph-don-t-change-socket-state-on-sock-event.patch
queue-3.4/0061-libceph-use-con-get-put-methods.patch
queue-3.4/0074-libceph-clear-NEGOTIATING-when-done.patch
queue-3.4/0019-ceph-messenger-reduce-args-to-create_authorizer.patch
queue-3.4/0041-libceph-rename-kvec_reset-and-kvec_add-functions.patch
queue-3.4/0047-libceph-embed-ceph-connection-structure-in-mon_clien.patch
queue-3.4/0029-libceph-use-con-get-put-ops-from-osd_client.patch
queue-3.4/0051-libceph-tweak-ceph_alloc_msg.patch
queue-3.4/0064-libceph-encapsulate-advancing-msg-page.patch
queue-3.4/0075-libceph-define-and-use-an-explicit-CONNECTED-state.patch
queue-3.4/0082-libceph-allow-sock-transition-from-CONNECTING-to-CLO.patch
queue-3.4/0015-ceph-messenger-check-prepare_write_connect-result.patch
queue-3.4/0003-crush-be-more-tolerant-of-nonsensical-crush-maps.patch
queue-3.4/0028-libceph-osd_client-don-t-drop-reply-reference-too-ea.patch
queue-3.4/0014-ceph-don-t-set-WRITE_PENDING-too-early.patch
queue-3.4/0049-libceph-init-monitor-connection-when-opening.patch
queue-3.4/0016-ceph-messenger-rework-prepare_connect_authorizer.patch
queue-3.4/0097-libceph-replace-connection-state-bits-with-states.patch
queue-3.4/0068-libceph-don-t-use-bio_iter-as-a-flag.patch
queue-3.4/0062-libceph-drop-ceph_con_get-put-helpers-and-nref-membe.patch
queue-3.4/0089-libceph-protect-ceph_con_open-with-mutex.patch
queue-3.4/0048-libceph-drop-connection-refcounting-for-mon_client.patch
queue-3.4/0031-libceph-flush-msgr-queue-during-mon_client-shutdown.patch
queue-3.4/0094-libceph-drop-gratuitous-socket-close-calls-in-con_wo.patch
queue-3.4/0013-ceph-drop-msgr-argument-from-prepare_write_connect.patch
queue-3.4/0080-libceph-set-peer-name-on-con_open-not-init.patch
queue-3.4/0043-libceph-start-separating-connection-flags-from-state.patch
queue-3.4/0046-libceph-set-CLOSED-state-bit-in-con_init.patch
queue-3.4/0085-libceph-report-socket-read-write-error-message.patch
queue-3.4/0083-libceph-initialize-msgpool-message-types.patch
queue-3.4/0092-libceph-move-msgr-clear_standby-under-con-mutex-prot.patch
queue-3.4/0095-libceph-close-socket-directly-from-ceph_con_close.patch
queue-3.4/0009-ceph-messenger-change-read_partial-to-take-end-arg.patch
queue-3.4/0096-libceph-drop-unnecessary-CLOSED-check-in-socket-stat.patch
queue-3.4/0017-ceph-messenger-check-return-from-get_authorizer.patch
queue-3.4/0086-libceph-fix-mutex-coverage-for-ceph_con_close.patch
queue-3.4/0001-crush-clean-up-types-const-ness.patch
queue-3.4/0072-libceph-don-t-touch-con-state-in-con_close_socket.patch
queue-3.4/0037-ceph-check-PG_Private-flag-before-accessing-page-pri.patch
queue-3.4/0044-libceph-start-tracking-connection-socket-state.patch
queue-3.4/0099-libceph-clear-all-flags-on-con_close.patch
queue-3.4/0071-libceph-just-set-SOCK_CLOSED-when-state-changes.patch
queue-3.4/0022-ceph-use-info-returned-by-get_authorizer.patch
queue-3.4/0065-libceph-don-t-mark-footer-complete-before-it-is.patch
queue-3.4/0027-libceph-fix-pg_temp-updates.patch
queue-3.4/0079-libceph-add-some-fine-ASCII-art.patch
queue-3.4/0008-ceph-messenger-update-to-in-read_partial-caller.patch
queue-3.4/0007-ceph-messenger-use-read_partial-in-read_partial_mess.patch
queue-3.4/0078-libceph-small-changes-to-messenger.c.patch
queue-3.4/0010-libceph-don-t-reset-kvec-in-prepare_write_banner.patch
queue-3.4/0087-libceph-resubmit-linger-ops-when-pg-mapping-changes.patch
queue-3.4/0052-libceph-have-messages-point-to-their-connection.patch
queue-3.4/0067-libceph-move-init-of-bio_iter.patch
queue-3.4/0081-libceph-initialize-mon_client-con-only-once.patch
queue-3.4/0050-libceph-fully-initialize-connection-in-con_init.patch
queue-3.4/0053-libceph-have-messages-take-a-connection-reference.patch
queue-3.4/0012-ceph-messenger-send-banner-in-process_connect.patch
queue-3.4/0024-ceph-rename-prepare_connect_authorizer.patch
queue-3.4/0042-libceph-embed-ceph-messenger-structure-in-ceph_clien.patch
queue-3.4/0069-libceph-SOCK_CLOSED-is-a-flag-not-a-state.patch
queue-3.4/0004-crush-fix-tree-node-weight-lookup.patch
--
To unsubscribe from this list: send the line "unsubscribe stable" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to