- Sender may send broadcast big messages (> 65K) then small messages (< 65K).
Current MDS just loop via all destinations to unicast all fragmented messages
to one by one destinations. But sending multicast non-fragment messages to all
destinations. Therefor, receivers may get messages with incorrect order,
non-fragment messages may come before fragmented messages.
For example, it may lead to OUT OF ORDER for IMMNDs during IMMD sync.
- Solution: support send multicast each fragmented messages to avoid
disorder of arrived broadcast messages.
---
 src/mds/mds_c_sndrcv.c |  3 +--
 src/mds/mds_dt_tipc.c  | 55 ++++++++++++++++++++++++++++++++++++++++----------
 2 files changed, 45 insertions(+), 13 deletions(-)

diff --git a/src/mds/mds_c_sndrcv.c b/src/mds/mds_c_sndrcv.c
index 703bc8e..7850ac7 100644
--- a/src/mds/mds_c_sndrcv.c
+++ b/src/mds/mds_c_sndrcv.c
@@ -4496,8 +4496,7 @@ static uint32_t mcm_pvt_process_svc_bcast_common(
                                              info_result->key.vdest_id, req, 0,
                                              info_result->key.adest, pri);
                if ((svc_cb->subtn_info->prev_ver_sub_count == 0) &&
-                   (tipc_mode_enabled) && (tipc_mcast_enabled) &&
-                   (to_msg.bcast_buff_len < MDS_DIRECT_BUF_MAXSIZE)) {
+                   (tipc_mode_enabled) && (tipc_mcast_enabled)) {
                        m_MDS_LOG_DBG(
                            "MDTM: Break while(1) prev_ver_sub_count: %d  
svc_id =%s(%d)  to_msg.bcast_buff_len: %d ",
                            svc_cb->subtn_info->prev_ver_sub_count,
diff --git a/src/mds/mds_dt_tipc.c b/src/mds/mds_dt_tipc.c
index a3abff5..656f79c 100644
--- a/src/mds/mds_dt_tipc.c
+++ b/src/mds/mds_dt_tipc.c
@@ -2856,6 +2856,7 @@ uint32_t mdtm_frag_and_send(MDTM_SEND_REQ *req, uint32_t 
seq_num,
        uint16_t frag_val = 0;
        uint32_t sum_mds_hdr_plus_mdtm_hdr_plus_len;
        int version = req->msg_arch_word & 0x7;
+       uint32_t ret = NCSCC_RC_SUCCESS;
 
        if (version > 1) {
                sum_mds_hdr_plus_mdtm_hdr_plus_len =
@@ -2952,11 +2953,23 @@ uint32_t mdtm_frag_and_send(MDTM_SEND_REQ *req, 
uint32_t seq_num,
                                        free(body);
                                        return NCSCC_RC_FAILURE;
                                }
-                               m_MDS_LOG_DBG(
-                                   "MDTM:Sending message with Service 
Seqno=%d, Fragment Seqnum=%d, frag_num=%d, TO Dest_Tipc_id=<0x%08x:%u>",
-                                   req->svc_seq_num, seq_num, frag_val,
-                                   id.node, id.ref);
-                               mdtm_sendto(body, len_buf, id);
+                               if (((req->snd_type == MDS_SENDTYPE_RBCAST) ||
+                                    (req->snd_type == MDS_SENDTYPE_BCAST)) &&
+                                   (version > 0) && (tipc_mcast_enabled)) {
+                                       m_MDS_LOG_DBG(
+                                           "MDTM:Send Multicast message with 
Service Seqno=%d, Fragment Seqnum=%d, frag_num=%d "
+                                           "From svc_id = %s(%d) TO svc_id = 
%s(%d)",
+                                           req->svc_seq_num, seq_num, frag_val,
+                                           get_svc_names(req->src_svc_id), 
req->src_svc_id,
+                                           get_svc_names(req->dest_svc_id), 
req->dest_svc_id);
+                                       ret = mdtm_mcast_sendto(body, len_buf, 
req);
+                               } else {
+                                       m_MDS_LOG_DBG(
+                                           "MDTM:Sending message with Service 
Seqno=%d, Fragment Seqnum=%d, frag_num=%d, TO Dest_Tipc_id=<0x%08x:%u>",
+                                           req->svc_seq_num, seq_num, frag_val,
+                                           id.node, id.ref);
+                                       ret = mdtm_sendto(body, len_buf, id);
+                               }
                                m_MMGR_REMOVE_FROM_START(
                                    &usrbuf,
                                    len_buf -
@@ -2965,6 +2978,9 @@ uint32_t mdtm_frag_and_send(MDTM_SEND_REQ *req, uint32_t 
seq_num,
                                len =
                                    len - (len_buf -
                                           sum_mds_hdr_plus_mdtm_hdr_plus_len);
+                               if (ret != NCSCC_RC_SUCCESS)
+                                       // Failed to send a fragmented msg, 
stop sending
+                                       break;
                        } else {
                                p8 = (uint8_t *)m_MMGR_DATA_AT_START(
                                    usrbuf, len_buf - MDTM_FRAG_HDR_PLUS_LEN_2,
@@ -2984,11 +3000,23 @@ uint32_t mdtm_frag_and_send(MDTM_SEND_REQ *req, 
uint32_t seq_num,
                                        free(body);
                                        return NCSCC_RC_FAILURE;
                                }
-                               m_MDS_LOG_DBG(
-                                   "MDTM:Sending message with Service 
Seqno=%d, Fragment Seqnum=%d, frag_num=%d, TO Dest_Tipc_id=<0x%08x:%u>",
-                                   req->svc_seq_num, seq_num, frag_val,
-                                   id.node, id.ref);
-                               mdtm_sendto(body, len_buf, id);
+                               if (((req->snd_type == MDS_SENDTYPE_RBCAST) ||
+                                    (req->snd_type == MDS_SENDTYPE_BCAST)) &&
+                                   (version > 0) && (tipc_mcast_enabled)) {
+                                       m_MDS_LOG_DBG(
+                                           "MDTM:Send Multicast message with 
Service Seqno=%d, Fragment Seqnum=%d, frag_num=%d "
+                                           "From svc_id = %s(%d) TO svc_id = 
%s(%d)",
+                                           req->svc_seq_num, seq_num, frag_val,
+                                           get_svc_names(req->src_svc_id), 
req->src_svc_id,
+                                           get_svc_names(req->dest_svc_id), 
req->dest_svc_id);
+                                       ret = mdtm_mcast_sendto(body, len_buf, 
req);
+                               } else {
+                                       m_MDS_LOG_DBG(
+                                           "MDTM:Sending message with Service 
Seqno=%d, Fragment Seqnum=%d, frag_num=%d, TO Dest_Tipc_id=<0x%08x:%u>",
+                                           req->svc_seq_num, seq_num, frag_val,
+                                           id.node, id.ref);
+                                       ret = mdtm_sendto(body, len_buf, id);
+                               }
                                m_MMGR_REMOVE_FROM_START(
                                    &usrbuf,
                                    (len_buf - MDTM_FRAG_HDR_PLUS_LEN_2));
@@ -2997,12 +3025,15 @@ uint32_t mdtm_frag_and_send(MDTM_SEND_REQ *req, 
uint32_t seq_num,
                                    len - (len_buf - MDTM_FRAG_HDR_PLUS_LEN_2);
                                if (len == 0)
                                        break;
+                               if (ret != NCSCC_RC_SUCCESS)
+                                       // Failed to send a fragmented msg, 
stop sending
+                                       break;
                        }
                }
                i++;
                frag_val = 0;
        }
-       return NCSCC_RC_SUCCESS;
+       return ret;
 }
 
 /*********************************************************
@@ -3134,6 +3165,8 @@ static uint32_t mdtm_mcast_sendto(void *buffer, size_t 
size,
                m_MDS_LOG_INFO("MDTM: Successfully sent message");
                return NCSCC_RC_SUCCESS;
        } else {
+               m_MDS_LOG_ERR("MDTM: Failed to send Multicast message err :%s",
+                             strerror(errno));
                return NCSCC_RC_FAILURE;
        }
 }
-- 
2.7.4



_______________________________________________
Opensaf-devel mailing list
Opensaf-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/opensaf-devel

Reply via email to