Re: [devel] [PATCH 1/1] mds: Disable mds flow control for mds broadcast/multicast message [#3101]

2019-10-20 Thread Minh Hon Chau

Hi Thuan,

The patch is acked and I pushed it.

The commit message may cause you a missundestanding, but in this context 
it does not mention anything regarding to configuration, so I hope it's ok.


Another comment inline.

Thanks

Minh

On 21/10/19 1:56 pm, Tran Thuan wrote:

Hi Minh,

I suggest commit message as following
mds: skip flow control for bcast/mcast message if tipc multicast
enabled.
because "disable mds flow control" cause misunderstood overwrite configure
MDS_TIPC_FCTRL_ENABLED

And another comment with [Thuan] inline. Thanks.

Best Regards,
ThuanTr

-Original Message-
From: Minh Chau 
Sent: Thursday, October 17, 2019 10:00 AM
To: hans.nordeb...@ericsson.com; thuan.t...@dektech.com.au;
gary@dektech.com.au; vu.m.ngu...@dektech.com.au
Cc: opensaf-devel@lists.sourceforge.net; Minh Chau

Subject: [PATCH 1/1] mds: Disable mds flow control for mds
broadcast/multicast message [#3101]

The mds flow control has been disabled for broadcast/mulitcast unfragment
message if tipc multicast is enabled. This patch revisits and continues with
fragment messages.
---
  src/mds/mds_tipc_fctrl_intf.cc   | 47

  src/mds/mds_tipc_fctrl_msg.h | 11 +++---
  src/mds/mds_tipc_fctrl_portid.cc | 47
++--
  src/mds/mds_tipc_fctrl_portid.h  |  3 ++-
  4 files changed, 69 insertions(+), 39 deletions(-)

diff --git a/src/mds/mds_tipc_fctrl_intf.cc b/src/mds/mds_tipc_fctrl_intf.cc
index b803bfe..fe3dbd5 100644
--- a/src/mds/mds_tipc_fctrl_intf.cc
+++ b/src/mds/mds_tipc_fctrl_intf.cc
@@ -133,7 +133,7 @@ uint32_t process_flow_event(const Event& evt) {
kChunkAckSize, sock_buf_size);
portid_map[TipcPortId::GetUniqueId(evt.id_)] = portid;
rc = portid->ReceiveData(evt.mseq_, evt.mfrag_,
-evt.fseq_, evt.svc_id_);
+evt.fseq_, evt.svc_id_, evt.snd_type_, is_mcast_enabled);
  } else if (evt.type_ == Event::Type::kEvtRcvIntro) {
portid = new TipcPortId(evt.id_, data_sock_fd,
kChunkAckSize, sock_buf_size); @@ -147,7 +147,7 @@ uint32_t
process_flow_event(const Event& evt) {
} else {
  if (evt.type_ == Event::Type::kEvtRcvData) {
rc = portid->ReceiveData(evt.mseq_, evt.mfrag_,
-  evt.fseq_, evt.svc_id_);
+  evt.fseq_, evt.svc_id_, evt.snd_type_, is_mcast_enabled);
  }
  if (evt.type_ == Event::Type::kEvtRcvChunkAck) {
portid->ReceiveChunkAck(evt.fseq_, evt.chunk_size_); @@ -430,6 +430,7
@@ uint32_t mds_tipc_fctrl_drop_data(uint8_t *buffer, uint16_t len,
  
HeaderMessage header;

header.Decode(buffer);
+  Event* pevt = nullptr;
// if mds support flow control
if ((header.pro_ver_ & MDS_PROT_VER_MASK) == MDS_PROT_FCTRL) {
  if (header.pro_id_ == MDS_PROT_FCTRL_ID) { @@ -438,9 +439,10 @@
uint32_t mds_tipc_fctrl_drop_data(uint8_t *buffer, uint16_t len,
  ChunkAck ack;
  ack.Decode(buffer);
  // send to the event thread
-if (m_NCS_IPC_SEND(_events,
-new Event(Event::Type::kEvtSendChunkAck, id, ack.svc_id_,
-header.mseq_, header.mfrag_, ack.acked_fseq_,
ack.chunk_size_),
+pevt = new Event(Event::Type::kEvtSendChunkAck, id, ack.svc_id_,
+header.mseq_, header.mfrag_, ack.acked_fseq_);
+pevt->chunk_size_ = ack.chunk_size_;
+if (m_NCS_IPC_SEND(_events, pevt,
  NCS_IPC_PRIORITY_HIGH) != NCSCC_RC_SUCCESS) {
m_MDS_LOG_ERR("FCTRL: Failed to send msg to mbx_events,
Error[%s]",
strerror(errno));
@@ -453,9 +455,9 @@ uint32_t mds_tipc_fctrl_drop_data(uint8_t *buffer,
uint16_t len,
DataMessage data;
data.Decode(buffer);
// send to the event thread
-  if (m_NCS_IPC_SEND(_events,
-  new Event(Event::Type::kEvtDropData, id, data.svc_id_,
-  header.mseq_, header.mfrag_, header.fseq_),
+  pevt = new Event(Event::Type::kEvtDropData, id, data.svc_id_,
+  header.mseq_, header.mfrag_, header.fseq_);
+  if (m_NCS_IPC_SEND(_events, pevt,
NCS_IPC_PRIORITY_HIGH) != NCSCC_RC_SUCCESS) {
  m_MDS_LOG_ERR("FCTRL: Failed to send msg to mbx_events, Error[%s]",
  strerror(errno));
@@ -474,6 +476,7 @@ uint32_t mds_tipc_fctrl_rcv_data(uint8_t *buffer,
uint16_t len,
  
HeaderMessage header;

header.Decode(buffer);
+  Event* pevt = nullptr;
// if mds support flow control
if ((header.pro_ver_ & MDS_PROT_VER_MASK) == MDS_PROT_FCTRL) {
  if (header.pro_id_ == MDS_PROT_FCTRL_ID) { @@ -482,9 +485,10 @@
uint32_t mds_tipc_fctrl_rcv_data(uint8_t *buffer, uint16_t len,
  ChunkAck ack;
  ack.Decode(buffer);
  // send to the event thread
-if (m_NCS_IPC_SEND(_events,
-new Event(Event::Type::kEvtRcvChunkAck, id, ack.svc_id_,
-header.mseq_, header.mfrag_, ack.acked_fseq_,
ack.chunk_size_),
+pevt = new Event(Event::Type::kEvtRcvChunkAck, 

Re: [devel] [PATCH 1/1] mds: Disable mds flow control for mds broadcast/multicast message [#3101]

2019-10-20 Thread Tran Thuan
Hi Minh,

I suggest commit message as following 
mds: skip flow control for bcast/mcast message if tipc multicast
enabled.
because "disable mds flow control" cause misunderstood overwrite configure
MDS_TIPC_FCTRL_ENABLED

And another comment with [Thuan] inline. Thanks.

Best Regards,
ThuanTr

-Original Message-
From: Minh Chau  
Sent: Thursday, October 17, 2019 10:00 AM
To: hans.nordeb...@ericsson.com; thuan.t...@dektech.com.au;
gary@dektech.com.au; vu.m.ngu...@dektech.com.au
Cc: opensaf-devel@lists.sourceforge.net; Minh Chau

Subject: [PATCH 1/1] mds: Disable mds flow control for mds
broadcast/multicast message [#3101]

The mds flow control has been disabled for broadcast/mulitcast unfragment
message if tipc multicast is enabled. This patch revisits and continues with
fragment messages.
---
 src/mds/mds_tipc_fctrl_intf.cc   | 47

 src/mds/mds_tipc_fctrl_msg.h | 11 +++---
 src/mds/mds_tipc_fctrl_portid.cc | 47
++--
 src/mds/mds_tipc_fctrl_portid.h  |  3 ++-
 4 files changed, 69 insertions(+), 39 deletions(-)

diff --git a/src/mds/mds_tipc_fctrl_intf.cc b/src/mds/mds_tipc_fctrl_intf.cc
index b803bfe..fe3dbd5 100644
--- a/src/mds/mds_tipc_fctrl_intf.cc
+++ b/src/mds/mds_tipc_fctrl_intf.cc
@@ -133,7 +133,7 @@ uint32_t process_flow_event(const Event& evt) {
   kChunkAckSize, sock_buf_size);
   portid_map[TipcPortId::GetUniqueId(evt.id_)] = portid;
   rc = portid->ReceiveData(evt.mseq_, evt.mfrag_,
-evt.fseq_, evt.svc_id_);
+evt.fseq_, evt.svc_id_, evt.snd_type_, is_mcast_enabled);
 } else if (evt.type_ == Event::Type::kEvtRcvIntro) {
   portid = new TipcPortId(evt.id_, data_sock_fd,
   kChunkAckSize, sock_buf_size); @@ -147,7 +147,7 @@ uint32_t
process_flow_event(const Event& evt) {
   } else {
 if (evt.type_ == Event::Type::kEvtRcvData) {
   rc = portid->ReceiveData(evt.mseq_, evt.mfrag_,
-  evt.fseq_, evt.svc_id_);
+  evt.fseq_, evt.svc_id_, evt.snd_type_, is_mcast_enabled);
 }
 if (evt.type_ == Event::Type::kEvtRcvChunkAck) {
   portid->ReceiveChunkAck(evt.fseq_, evt.chunk_size_); @@ -430,6 +430,7
@@ uint32_t mds_tipc_fctrl_drop_data(uint8_t *buffer, uint16_t len,
 
   HeaderMessage header;
   header.Decode(buffer);
+  Event* pevt = nullptr;
   // if mds support flow control
   if ((header.pro_ver_ & MDS_PROT_VER_MASK) == MDS_PROT_FCTRL) {
 if (header.pro_id_ == MDS_PROT_FCTRL_ID) { @@ -438,9 +439,10 @@
uint32_t mds_tipc_fctrl_drop_data(uint8_t *buffer, uint16_t len,
 ChunkAck ack;
 ack.Decode(buffer);
 // send to the event thread
-if (m_NCS_IPC_SEND(_events,
-new Event(Event::Type::kEvtSendChunkAck, id, ack.svc_id_,
-header.mseq_, header.mfrag_, ack.acked_fseq_,
ack.chunk_size_),
+pevt = new Event(Event::Type::kEvtSendChunkAck, id, ack.svc_id_,
+header.mseq_, header.mfrag_, ack.acked_fseq_);
+pevt->chunk_size_ = ack.chunk_size_;
+if (m_NCS_IPC_SEND(_events, pevt,
 NCS_IPC_PRIORITY_HIGH) != NCSCC_RC_SUCCESS) {
   m_MDS_LOG_ERR("FCTRL: Failed to send msg to mbx_events,
Error[%s]",
   strerror(errno));
@@ -453,9 +455,9 @@ uint32_t mds_tipc_fctrl_drop_data(uint8_t *buffer,
uint16_t len,
   DataMessage data;
   data.Decode(buffer);
   // send to the event thread
-  if (m_NCS_IPC_SEND(_events,
-  new Event(Event::Type::kEvtDropData, id, data.svc_id_,
-  header.mseq_, header.mfrag_, header.fseq_),
+  pevt = new Event(Event::Type::kEvtDropData, id, data.svc_id_,
+  header.mseq_, header.mfrag_, header.fseq_);
+  if (m_NCS_IPC_SEND(_events, pevt,
   NCS_IPC_PRIORITY_HIGH) != NCSCC_RC_SUCCESS) {
 m_MDS_LOG_ERR("FCTRL: Failed to send msg to mbx_events, Error[%s]",
 strerror(errno));
@@ -474,6 +476,7 @@ uint32_t mds_tipc_fctrl_rcv_data(uint8_t *buffer,
uint16_t len,
 
   HeaderMessage header;
   header.Decode(buffer);
+  Event* pevt = nullptr;
   // if mds support flow control
   if ((header.pro_ver_ & MDS_PROT_VER_MASK) == MDS_PROT_FCTRL) {
 if (header.pro_id_ == MDS_PROT_FCTRL_ID) { @@ -482,9 +485,10 @@
uint32_t mds_tipc_fctrl_rcv_data(uint8_t *buffer, uint16_t len,
 ChunkAck ack;
 ack.Decode(buffer);
 // send to the event thread
-if (m_NCS_IPC_SEND(_events,
-new Event(Event::Type::kEvtRcvChunkAck, id, ack.svc_id_,
-header.mseq_, header.mfrag_, ack.acked_fseq_,
ack.chunk_size_),
+pevt = new Event(Event::Type::kEvtRcvChunkAck, id, ack.svc_id_,
+header.mseq_, header.mfrag_, ack.acked_fseq_);
+pevt->chunk_size_ = ack.chunk_size_;
+if (m_NCS_IPC_SEND(_events, pevt,
 NCS_IPC_PRIORITY_HIGH) != NCSCC_RC_SUCCESS) {
   m_MDS_LOG_ERR("FCTRL: Failed to send msg to mbx_events,
Error[%s]",
   

Re: [devel] [PATCH 1/1] mds: Disable mds flow control for mds broadcast/multicast message [#3101]

2019-10-20 Thread Gary Lee

Hi Minh

ack (review only)

Thanks

On 17/10/19 2:00 pm, Minh Chau wrote:

The mds flow control has been disabled for broadcast/mulitcast unfragment
message if tipc multicast is enabled. This patch revisits and continues
with fragment messages.
---
  src/mds/mds_tipc_fctrl_intf.cc   | 47 
  src/mds/mds_tipc_fctrl_msg.h | 11 +++---
  src/mds/mds_tipc_fctrl_portid.cc | 47 ++--
  src/mds/mds_tipc_fctrl_portid.h  |  3 ++-
  4 files changed, 69 insertions(+), 39 deletions(-)

diff --git a/src/mds/mds_tipc_fctrl_intf.cc b/src/mds/mds_tipc_fctrl_intf.cc
index b803bfe..fe3dbd5 100644
--- a/src/mds/mds_tipc_fctrl_intf.cc
+++ b/src/mds/mds_tipc_fctrl_intf.cc
@@ -133,7 +133,7 @@ uint32_t process_flow_event(const Event& evt) {
kChunkAckSize, sock_buf_size);
portid_map[TipcPortId::GetUniqueId(evt.id_)] = portid;
rc = portid->ReceiveData(evt.mseq_, evt.mfrag_,
-evt.fseq_, evt.svc_id_);
+evt.fseq_, evt.svc_id_, evt.snd_type_, is_mcast_enabled);
  } else if (evt.type_ == Event::Type::kEvtRcvIntro) {
portid = new TipcPortId(evt.id_, data_sock_fd,
kChunkAckSize, sock_buf_size);
@@ -147,7 +147,7 @@ uint32_t process_flow_event(const Event& evt) {
} else {
  if (evt.type_ == Event::Type::kEvtRcvData) {
rc = portid->ReceiveData(evt.mseq_, evt.mfrag_,
-  evt.fseq_, evt.svc_id_);
+  evt.fseq_, evt.svc_id_, evt.snd_type_, is_mcast_enabled);
  }
  if (evt.type_ == Event::Type::kEvtRcvChunkAck) {
portid->ReceiveChunkAck(evt.fseq_, evt.chunk_size_);
@@ -430,6 +430,7 @@ uint32_t mds_tipc_fctrl_drop_data(uint8_t *buffer, uint16_t 
len,
  
HeaderMessage header;

header.Decode(buffer);
+  Event* pevt = nullptr;
// if mds support flow control
if ((header.pro_ver_ & MDS_PROT_VER_MASK) == MDS_PROT_FCTRL) {
  if (header.pro_id_ == MDS_PROT_FCTRL_ID) {
@@ -438,9 +439,10 @@ uint32_t mds_tipc_fctrl_drop_data(uint8_t *buffer, 
uint16_t len,
  ChunkAck ack;
  ack.Decode(buffer);
  // send to the event thread
-if (m_NCS_IPC_SEND(_events,
-new Event(Event::Type::kEvtSendChunkAck, id, ack.svc_id_,
-header.mseq_, header.mfrag_, ack.acked_fseq_, ack.chunk_size_),
+pevt = new Event(Event::Type::kEvtSendChunkAck, id, ack.svc_id_,
+header.mseq_, header.mfrag_, ack.acked_fseq_);
+pevt->chunk_size_ = ack.chunk_size_;
+if (m_NCS_IPC_SEND(_events, pevt,
  NCS_IPC_PRIORITY_HIGH) != NCSCC_RC_SUCCESS) {
m_MDS_LOG_ERR("FCTRL: Failed to send msg to mbx_events, Error[%s]",
strerror(errno));
@@ -453,9 +455,9 @@ uint32_t mds_tipc_fctrl_drop_data(uint8_t *buffer, uint16_t 
len,
DataMessage data;
data.Decode(buffer);
// send to the event thread
-  if (m_NCS_IPC_SEND(_events,
-  new Event(Event::Type::kEvtDropData, id, data.svc_id_,
-  header.mseq_, header.mfrag_, header.fseq_),
+  pevt = new Event(Event::Type::kEvtDropData, id, data.svc_id_,
+  header.mseq_, header.mfrag_, header.fseq_);
+  if (m_NCS_IPC_SEND(_events, pevt,
NCS_IPC_PRIORITY_HIGH) != NCSCC_RC_SUCCESS) {
  m_MDS_LOG_ERR("FCTRL: Failed to send msg to mbx_events, Error[%s]",
  strerror(errno));
@@ -474,6 +476,7 @@ uint32_t mds_tipc_fctrl_rcv_data(uint8_t *buffer, uint16_t 
len,
  
HeaderMessage header;

header.Decode(buffer);
+  Event* pevt = nullptr;
// if mds support flow control
if ((header.pro_ver_ & MDS_PROT_VER_MASK) == MDS_PROT_FCTRL) {
  if (header.pro_id_ == MDS_PROT_FCTRL_ID) {
@@ -482,9 +485,10 @@ uint32_t mds_tipc_fctrl_rcv_data(uint8_t *buffer, uint16_t 
len,
  ChunkAck ack;
  ack.Decode(buffer);
  // send to the event thread
-if (m_NCS_IPC_SEND(_events,
-new Event(Event::Type::kEvtRcvChunkAck, id, ack.svc_id_,
-header.mseq_, header.mfrag_, ack.acked_fseq_, ack.chunk_size_),
+pevt = new Event(Event::Type::kEvtRcvChunkAck, id, ack.svc_id_,
+header.mseq_, header.mfrag_, ack.acked_fseq_);
+pevt->chunk_size_ = ack.chunk_size_;
+if (m_NCS_IPC_SEND(_events, pevt,
  NCS_IPC_PRIORITY_HIGH) != NCSCC_RC_SUCCESS) {
m_MDS_LOG_ERR("FCTRL: Failed to send msg to mbx_events, Error[%s]",
strerror(errno));
@@ -494,9 +498,9 @@ uint32_t mds_tipc_fctrl_rcv_data(uint8_t *buffer, uint16_t 
len,
  Nack nack;
  nack.Decode(buffer);
  // send to the event thread
-if (m_NCS_IPC_SEND(_events,
-new Event(Event::Type::kEvtRcvNack, id, nack.svc_id_,
-header.mseq_, header.mfrag_, nack.nacked_fseq_),
+pevt = new Event(Event::Type::kEvtRcvNack, id, nack.svc_id_,
+header.mseq_, header.mfrag_, nack.nacked_fseq_);
+if 

[devel] [PATCH 1/1] mds: Disable mds flow control for mds broadcast/multicast message [#3101]

2019-10-16 Thread Minh Chau
The mds flow control has been disabled for broadcast/mulitcast unfragment
message if tipc multicast is enabled. This patch revisits and continues
with fragment messages.
---
 src/mds/mds_tipc_fctrl_intf.cc   | 47 
 src/mds/mds_tipc_fctrl_msg.h | 11 +++---
 src/mds/mds_tipc_fctrl_portid.cc | 47 ++--
 src/mds/mds_tipc_fctrl_portid.h  |  3 ++-
 4 files changed, 69 insertions(+), 39 deletions(-)

diff --git a/src/mds/mds_tipc_fctrl_intf.cc b/src/mds/mds_tipc_fctrl_intf.cc
index b803bfe..fe3dbd5 100644
--- a/src/mds/mds_tipc_fctrl_intf.cc
+++ b/src/mds/mds_tipc_fctrl_intf.cc
@@ -133,7 +133,7 @@ uint32_t process_flow_event(const Event& evt) {
   kChunkAckSize, sock_buf_size);
   portid_map[TipcPortId::GetUniqueId(evt.id_)] = portid;
   rc = portid->ReceiveData(evt.mseq_, evt.mfrag_,
-evt.fseq_, evt.svc_id_);
+evt.fseq_, evt.svc_id_, evt.snd_type_, is_mcast_enabled);
 } else if (evt.type_ == Event::Type::kEvtRcvIntro) {
   portid = new TipcPortId(evt.id_, data_sock_fd,
   kChunkAckSize, sock_buf_size);
@@ -147,7 +147,7 @@ uint32_t process_flow_event(const Event& evt) {
   } else {
 if (evt.type_ == Event::Type::kEvtRcvData) {
   rc = portid->ReceiveData(evt.mseq_, evt.mfrag_,
-  evt.fseq_, evt.svc_id_);
+  evt.fseq_, evt.svc_id_, evt.snd_type_, is_mcast_enabled);
 }
 if (evt.type_ == Event::Type::kEvtRcvChunkAck) {
   portid->ReceiveChunkAck(evt.fseq_, evt.chunk_size_);
@@ -430,6 +430,7 @@ uint32_t mds_tipc_fctrl_drop_data(uint8_t *buffer, uint16_t 
len,
 
   HeaderMessage header;
   header.Decode(buffer);
+  Event* pevt = nullptr;
   // if mds support flow control
   if ((header.pro_ver_ & MDS_PROT_VER_MASK) == MDS_PROT_FCTRL) {
 if (header.pro_id_ == MDS_PROT_FCTRL_ID) {
@@ -438,9 +439,10 @@ uint32_t mds_tipc_fctrl_drop_data(uint8_t *buffer, 
uint16_t len,
 ChunkAck ack;
 ack.Decode(buffer);
 // send to the event thread
-if (m_NCS_IPC_SEND(_events,
-new Event(Event::Type::kEvtSendChunkAck, id, ack.svc_id_,
-header.mseq_, header.mfrag_, ack.acked_fseq_, ack.chunk_size_),
+pevt = new Event(Event::Type::kEvtSendChunkAck, id, ack.svc_id_,
+header.mseq_, header.mfrag_, ack.acked_fseq_);
+pevt->chunk_size_ = ack.chunk_size_;
+if (m_NCS_IPC_SEND(_events, pevt,
 NCS_IPC_PRIORITY_HIGH) != NCSCC_RC_SUCCESS) {
   m_MDS_LOG_ERR("FCTRL: Failed to send msg to mbx_events, Error[%s]",
   strerror(errno));
@@ -453,9 +455,9 @@ uint32_t mds_tipc_fctrl_drop_data(uint8_t *buffer, uint16_t 
len,
   DataMessage data;
   data.Decode(buffer);
   // send to the event thread
-  if (m_NCS_IPC_SEND(_events,
-  new Event(Event::Type::kEvtDropData, id, data.svc_id_,
-  header.mseq_, header.mfrag_, header.fseq_),
+  pevt = new Event(Event::Type::kEvtDropData, id, data.svc_id_,
+  header.mseq_, header.mfrag_, header.fseq_);
+  if (m_NCS_IPC_SEND(_events, pevt,
   NCS_IPC_PRIORITY_HIGH) != NCSCC_RC_SUCCESS) {
 m_MDS_LOG_ERR("FCTRL: Failed to send msg to mbx_events, Error[%s]",
 strerror(errno));
@@ -474,6 +476,7 @@ uint32_t mds_tipc_fctrl_rcv_data(uint8_t *buffer, uint16_t 
len,
 
   HeaderMessage header;
   header.Decode(buffer);
+  Event* pevt = nullptr;
   // if mds support flow control
   if ((header.pro_ver_ & MDS_PROT_VER_MASK) == MDS_PROT_FCTRL) {
 if (header.pro_id_ == MDS_PROT_FCTRL_ID) {
@@ -482,9 +485,10 @@ uint32_t mds_tipc_fctrl_rcv_data(uint8_t *buffer, uint16_t 
len,
 ChunkAck ack;
 ack.Decode(buffer);
 // send to the event thread
-if (m_NCS_IPC_SEND(_events,
-new Event(Event::Type::kEvtRcvChunkAck, id, ack.svc_id_,
-header.mseq_, header.mfrag_, ack.acked_fseq_, ack.chunk_size_),
+pevt = new Event(Event::Type::kEvtRcvChunkAck, id, ack.svc_id_,
+header.mseq_, header.mfrag_, ack.acked_fseq_);
+pevt->chunk_size_ = ack.chunk_size_;
+if (m_NCS_IPC_SEND(_events, pevt,
 NCS_IPC_PRIORITY_HIGH) != NCSCC_RC_SUCCESS) {
   m_MDS_LOG_ERR("FCTRL: Failed to send msg to mbx_events, Error[%s]",
   strerror(errno));
@@ -494,9 +498,9 @@ uint32_t mds_tipc_fctrl_rcv_data(uint8_t *buffer, uint16_t 
len,
 Nack nack;
 nack.Decode(buffer);
 // send to the event thread
-if (m_NCS_IPC_SEND(_events,
-new Event(Event::Type::kEvtRcvNack, id, nack.svc_id_,
-header.mseq_, header.mfrag_, nack.nacked_fseq_),
+pevt = new Event(Event::Type::kEvtRcvNack, id, nack.svc_id_,
+header.mseq_, header.mfrag_, nack.nacked_fseq_);
+if (m_NCS_IPC_SEND(_events, pevt,
 NCS_IPC_PRIORITY_HIGH) != NCSCC_RC_SUCCESS) {
   m_MDS_LOG_ERR("FCTRL: Failed to send