Hi Jon, What triggers the nodes to have non-applied members? Does this depend on the discovery domain specified when creating a bearer?
Can you please explain this concept. I have been testing my patches for listing the monitoring peers, but Iam unable to verify the above. regards Partha ________________________________________ From: Jon Maloy [[email protected]] Sent: 14 May 2016 02:07 To: [email protected]; Parthasarathy Bhuvaragan; Ying Xue; Richard Alpe; Jon Maloy Cc: [email protected] Subject: [PATCH net-next v6 4/4] tipc: debugging_tracing_profiling support for neighbor monitoring code NOT TO BE RELEASED!!!! - Atomic counters for profiling, to trace behavior of and intercation between local name spaces, and with namespaces in other VMs. - Printout function for above. The counters are written to the kernel log as a side effect of doing "tipc bearer list". - Function to print out whole monitor table in a comprehensible format. Also dumped to kernel log with "tipc bearer list". - Conditional, timeout driven printout of both of the above: - Periodically printout for all nodes. - Periodically for one configurable node. - At specific events, e.g. just after a link went up. - Tracing printouts for each individual function, according to same conditioning as above. Only feasible for very small clusters. - Disabled the "send RESET on bearer disable" (== "peer RESTART") feature in node.c for test purposes. This feature doesn't allow nodes themeselved to discover peer loss by monitoring, which we need for our testing here. - An experimental "discovery rate control" function. Not yet working, and uncertain if it is needed. Be careful to not let the printouts disturb the working of the monitoring: - Beyond 10 nodes tracing printouts should be enabled for only one node. - Beyond 30 nodes tracing printouts should be disabled, and periodical statistics and monitor printouts made for only one node. Signed-off-by: Jon Maloy <[email protected]> --- net/tipc/addr.c | 1 + net/tipc/bearer.c | 15 +- net/tipc/core.h | 2 +- net/tipc/link.c | 39 +++- net/tipc/monitor.c | 621 +++++++++++++++++++++++++++++++++++++++++++++++++++-- net/tipc/monitor.h | 152 ++++++++++++- net/tipc/node.c | 4 +- 7 files changed, 815 insertions(+), 19 deletions(-) diff --git a/net/tipc/addr.c b/net/tipc/addr.c index 48fd3b5..a5b2d8e 100644 --- a/net/tipc/addr.c +++ b/net/tipc/addr.c @@ -38,6 +38,7 @@ #include "addr.h" #include "core.h" + /** * in_own_cluster - test for cluster inclusion; <0.0.0> always matches */ diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index 9a70e1d..25c8ebb 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c @@ -419,7 +419,8 @@ int tipc_l2_send_msg(struct net *net, struct sk_buff *skb, if ((delta > 0) && pskb_expand_head(skb, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) goto drop; - + if ((msg_user(buf_msg(skb)) == LINK_PROTOCOL) && (msg_type(buf_msg(skb)) == STATE_MSG)) + incr(net, msg_destnode(buf_msg(skb)), &stt_snd,&stt_snd_own, &stt_snd_peer); skb_reset_network_header(skb); skb->dev = dev; skb->protocol = htons(ETH_P_TIPC); @@ -532,6 +533,9 @@ static int tipc_l2_rcv_msg(struct sk_buff *skb, struct net_device *dev, b = rcu_dereference_rtnl(dev->tipc_ptr); if (likely(b && (skb->pkt_type <= PACKET_BROADCAST))) { skb->next = NULL; + if ((msg_user(buf_msg(skb)) == LINK_PROTOCOL) && (msg_type(buf_msg(skb)) == STATE_MSG)) + incr(dev_net(dev), msg_prevnode(buf_msg(skb)), &stt_rcv,&stt_rcv_own, &stt_rcv_peer); + tipc_rcv(dev_net(dev), skb, b); rcu_read_unlock(); return NET_RX_SUCCESS; @@ -675,7 +679,6 @@ static int __tipc_nl_add_bearer(struct tipc_nl_msg *msg, nla_nest_end(msg->skb, prop); nla_nest_end(msg->skb, attrs); genlmsg_end(msg->skb, hdr); - return 0; prop_msg_full: @@ -688,6 +691,8 @@ msg_full: return -EMSGSIZE; } +void pr_mon(struct net *net, bool cond); +void pr_stats(struct net *net, bool cond); int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb) { int err; @@ -697,9 +702,13 @@ int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb) struct net *net = sock_net(skb->sk); struct tipc_net *tn = net_generic(net, tipc_net_id); + if (i == MAX_BEARERS) return 0; + pr_mon(net, 1); + pr_stats(net, 1); + msg.skb = skb; msg.portid = NETLINK_CB(cb->skb).portid; msg.seq = cb->nlh->nlmsg_seq; @@ -717,6 +726,7 @@ int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb) rtnl_unlock(); cb->args[0] = i; + return skb->len; } @@ -977,6 +987,7 @@ int tipc_nl_media_dump(struct sk_buff *skb, struct netlink_callback *cb) rtnl_unlock(); cb->args[0] = i; + return skb->len; } diff --git a/net/tipc/core.h b/net/tipc/core.h index 2e359ab..802d227 100644 --- a/net/tipc/core.h +++ b/net/tipc/core.h @@ -72,7 +72,7 @@ struct tipc_monitor; #define NODE_HTABLE_SIZE 512 #define MAX_BEARERS 3 -#define TIPC_DEF_MON_THRESHOLD 32 +#define TIPC_DEF_MON_THRESHOLD 2 extern int tipc_net_id __read_mostly; extern int sysctl_tipc_rmem[3] __read_mostly; diff --git a/net/tipc/link.c b/net/tipc/link.c index adc0a68..4c28b68 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -457,6 +457,9 @@ bool tipc_link_create(struct net *net, char *if_name, int bearer_id, __skb_queue_head_init(&l->deferdq); skb_queue_head_init(&l->wakeupq); skb_queue_head_init(l->inputq); + if (!link_is_bc_rcvlink(l) && !link_is_bc_sndlink(l) && (bearer_id != 3)) { + incr (net, peer, &lcnt, &lown, &lpeer); + } return true; } @@ -713,20 +716,30 @@ int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq) u16 bc_acked = l->bc_rcvlink->acked; struct tipc_mon_state *mstate = &l->mon_state; + atomic_inc(&link_tim); + switch (l->state) { case LINK_ESTABLISHED: case LINK_SYNCHING: mtyp = STATE_MSG; link_profile_stats(l); tipc_mon_get_state(l->net, l->addr, mstate, l->bearer_id); - if (mstate->reset || (l->silent_intv_cnt > l->abort_limit)) + if (mstate->reset || (l->silent_intv_cnt > l->abort_limit)) { + if (!mstate->reset) + incr(l->net, l->addr, &prb_aborts, + &prb_aborts_own, &prb_aborts_peer); return tipc_link_fsm_evt(l, LINK_FAILURE_EVT); + } state = bc_acked != bc_snt; state |= l->bc_rcvlink->rcv_unacked; state |= l->rcv_unacked; state |= skb_queue_len(&l->transmq); state |= skb_queue_len(&l->deferdq); probe = mstate->probing; + if (probe) + incr(l->net, l->addr, &snt_probes,&snt_probes_own, &snt_probes_peer); + else if (l->silent_intv_cnt) + incr(l->net, l->addr, &snt_mons,&snt_mons_own, &snt_mons_peer); probe |= l->silent_intv_cnt; if (probe || mstate->monitoring) l->silent_intv_cnt++; @@ -750,7 +763,8 @@ int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq) if (state || probe || setup) tipc_link_build_proto_msg(l, mtyp, probe, 0, 0, 0, xmitq); - + else + atomic_inc(&idle_tim); return rc; } @@ -1263,6 +1277,7 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe, if (!skb_queue_empty(dfq)) rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt; + skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE, tipc_max_domain_size, l->addr, tipc_own_addr(l->net), 0, 0, 0); @@ -1285,6 +1300,7 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe, msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2); if (mtyp == STATE_MSG) { + incr(l->net, l->addr, &snt_states,&snt_states_own,&snt_states_peer); msg_set_seq_gap(hdr, rcvgap); msg_set_size(hdr, INT_H_SIZE); msg_set_probe(hdr, probe); @@ -1431,6 +1447,12 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, /* Update own priority if peer's priority is higher */ if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI)) l->priority = peers_prio; + if (link_is_up(l)) { + if (msg_peer_stopping(hdr)) + incr(l->net, l->addr, &peer_fail,&peer_fail_own, &peer_fail_peer); + else if ((mtyp == RESET_MSG)) + incr(l->net, l->addr, &peer_rst,&peer_rst_own, &peer_rst_peer); + } /* ACTIVATE_MSG serves as PEER_RESET if link is already down */ if (msg_peer_stopping(hdr)) @@ -1459,6 +1481,12 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, l->priority = peers_prio; rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT); } + if (l->silent_intv_cnt == 2) + incr(l->net, l->addr, &intv2,&intv2_own,&intv2_peer); + else if (l->silent_intv_cnt == 3) + incr(l->net, l->addr, &intv3,&intv3_own,&intv3_peer); + else if (l->silent_intv_cnt == 4) + incr(l->net, l->addr, &intv4,&intv4_own,&intv4_peer); l->silent_intv_cnt = 0; l->stats.recv_states++; @@ -1490,6 +1518,13 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, tipc_link_advance_backlog(l, xmitq); if (unlikely(!skb_queue_empty(&l->wakeupq))) link_prepare_wakeup(l); + + if (rcvgap || (msg_probe(hdr))) + incr(l->net, l->addr, &snt_rsps,&snt_rsps_own,&snt_rsps_peer); + + if (msg_probe(hdr)) + incr(l->net, l->addr, &rcv_probes,&rcv_probes_own,&rcv_probes_peer); + incr(l->net, l->addr, &rcv_states,&rcv_states_own,&rcv_states_peer); } exit: kfree_skb(skb); diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c index f1d4162..9a153f9 100644 --- a/net/tipc/monitor.c +++ b/net/tipc/monitor.c @@ -43,7 +43,7 @@ /* struct tipc_mon_domain: domain record to be transferred between peers * @len: actual size of domain record * @gen: current generation of sender's domain - * @ack_gen: most recent generation of self's domain acked by peer + * @acked_gen: most recent generation of self's domain acked by peer * @member_cnt: number of domain member nodes described in this record * @up_map: bit map indicating which of the members the sender considers up * @members: identity of the domain members @@ -53,13 +53,13 @@ struct tipc_mon_domain { u16 gen; u16 ack_gen; u16 member_cnt; + u16 sqno; u64 up_map; u32 members[MAX_MON_DOMAIN]; }; /* struct tipc_peer: state of a peer node and its domain * @addr: tipc node identity of peer - * @head_map: shows which other nodes currently consider peer 'up' * @domain: most recent domain record from peer * @hash: position in hashed lookup list * @list: position in linked list, in circular ascending order by 'addr' @@ -79,6 +79,7 @@ struct tipc_peer { bool is_head : 1; bool is_local : 1; unsigned int down_cnt : 6; + u32 upcnt; }; struct tipc_monitor { @@ -91,6 +92,8 @@ struct tipc_monitor { u16 dom_gen; struct net *net; struct timer_list timer; +/// dbg + struct timer_list st_timer; }; static struct tipc_monitor *tipc_monitor(struct net *net, int bearer_id) @@ -151,6 +154,8 @@ static struct tipc_peer *get_peer(struct tipc_monitor *mon, u32 addr) struct tipc_peer *peer; unsigned int thash = tipc_hashfn(addr); + atomic_inc(&m_peer); + hlist_for_each_entry(peer, &mon->peers[thash], hash) { if (peer->addr == addr) return peer; @@ -167,7 +172,8 @@ static struct tipc_peer *get_self(struct net *net, int bearer_id) /* mon_identify_lost_members() : - identify amd mark potentially lost members */ -static void mon_identify_lost_members(struct tipc_peer *peer, +static void mon_identify_lost_members(struct tipc_monitor *mon, + struct tipc_peer *peer, struct tipc_mon_domain *dom_bef, int applied_bef) { @@ -199,29 +205,38 @@ static void mon_identify_lost_members(struct tipc_peer *peer, } } -/* mon_apply_domain() : match a peer's domain record against monitor list +/* mon_apply_domain() : apply a peer's domain record in monitor list */ static void mon_apply_domain(struct tipc_monitor *mon, struct tipc_peer *peer) { struct tipc_mon_domain *dom = peer->domain; struct tipc_peer *member; + u32 addr; int i; if (!dom || !peer->is_up) return; + atomic_inc(&match_dom); + pr2(" APPLY BEGIN: dom from peer %x, mmbrcnt %u\n", peer->addr, dom->member_cnt); /* Scan across domain members and match against monitor list */ peer->applied = 0; member = peer_nxt(peer); for (i = 0; i < dom->member_cnt; i++) { addr = dom->members[i]; - if (addr != member->addr) + + atomic_inc(&match_dom_iter); + + if (addr != member->addr) { + pr2(" APPLY TRUNC: mismatch\n"); return; + } peer->applied++; member = peer_nxt(member); } + pr2(" APPLY END\n"); } /* mon_update_local_domain() : update after peer addition/removal/up/down @@ -238,13 +253,19 @@ static void mon_update_local_domain(struct tipc_monitor *mon) /* Update local domain size based on current size of cluster */ member_cnt = dom_size(mon->peer_cnt) - 1; + if (self->applied != member_cnt) + atomic_inc(&dom_chg); //dbg + self->applied = member_cnt; + pr2(" LOCAL DOM UPDATE BEGIN\n"); + atomic_inc(&upd_dom_rec); /* Update native and cached outgoing local domain records */ dom->len = dom_rec_len(dom, member_cnt); diff = dom->member_cnt != member_cnt; dom->member_cnt = member_cnt; for (i = 0; i < member_cnt; i++) { + atomic_inc(&upd_dom_rec_iter); peer = peer_nxt(peer); diff |= dom->members[i] != peer->addr; dom->members[i] = peer->addr; @@ -259,7 +280,19 @@ static void mon_update_local_domain(struct tipc_monitor *mon) cache->gen = htons(dom->gen); cache->member_cnt = htons(member_cnt); cache->up_map = cpu_to_be64(dom->up_map); +/// >>> test + if (self->addr == 0x1001001) { + u32 dummy_peer = 0x10020fa; + for (i = member_cnt; i < 16; i++){ + cache->member_cnt = htons(++member_cnt); + cache->members[i] = htonl(dummy_peer); + cache->len = htons(ntohs(cache->len) + 4); + dummy_peer++; + } + } +/// <<< test mon_apply_domain(mon, self); + pr2(" LOCAL DOM UPDATE END\n"); } /* mon_update_neighbors() : update preceding neighbors of added/removed peer @@ -269,11 +302,17 @@ static void mon_update_neighbors(struct tipc_monitor *mon, { int dz, i; + pr2(" UPD NEIGH BEGIN \n"); + atomic_inc(&upd_dom); + dz = dom_size(mon->peer_cnt); for (i = 0; i < dz; i++) { + atomic_inc(&upd_dom_iter); mon_apply_domain(mon, peer); peer = peer_prev(peer); } + pr2(" UPD: matched domains to %x\n", peer->addr); + pr2(" UPD NEIGH END \n"); } /* mon_assign_roles() : reassign peer roles after a network change @@ -286,25 +325,36 @@ static void mon_assign_roles(struct tipc_monitor *mon, struct tipc_peer *head) struct tipc_peer *self = mon->self; int i = 0; - for (; peer != self; peer = peer_nxt(peer)) { - peer->is_local = false; + atomic_inc(&ass_r); + pr2(" ASSGN_ROLES BEGIN\n"); - /* Update domain member */ + for (; peer != mon->self; peer = peer_nxt(peer)) { + if (peer->is_up && (peer != mon->self) && (peer->is_local || peer->is_head)) //dbg + decr(mon->net, peer->addr, &mon_links, &monown, &monpeer); + peer->is_local = false; + atomic_inc(&ass_r_iter); + /* Update domain members */ if (i++ < head->applied) { + pr2(" ASSGN_ROLES mmbr %x to head %x\n", peer->addr, head->addr); peer->is_head = false; if (head == self) peer->is_local = true; + if (peer->is_up && peer->is_local)//dbg + incr(mon->net, peer->addr, &mon_links, &monown, &monpeer); continue; } /* Assign next domain head */ if (!peer->is_up) continue; + incr(mon->net, peer->addr, &mon_links, &monown, &monpeer); + pr2(" ASSGN_ROLES new head %x\n", head->addr); if (peer->is_head) break; head = peer; head->is_head = true; i = 0; } + pr2(" ASSGN_ROLES END\n"); mon->list_gen++; } @@ -315,6 +365,8 @@ void tipc_mon_remove_peer(struct net *net, u32 addr, int bearer_id) struct tipc_peer *self = get_self(net, bearer_id); struct tipc_peer *peer, *prev, *head; + pr2("<<< %x <<<\n", tipc_own_addr(net)); + pr2("%x: REMOVING Link %x/%u\n", tipc_own_addr(net), addr, bearer_id); write_lock_bh(&mon->lock); peer = get_peer(mon, addr); if (!peer) @@ -338,6 +390,9 @@ void tipc_mon_remove_peer(struct net *net, u32 addr, int bearer_id) } } mon_assign_roles(mon, head); + +// if (mon->self->addr == 0x1001002) +// pr_mon(net, 1); exit: write_unlock_bh(&mon->lock); } @@ -380,6 +435,8 @@ void tipc_mon_peer_up(struct net *net, u32 addr, int bearer_id) struct tipc_peer *self = get_self(net, bearer_id); struct tipc_peer *peer, *head; + pr2("<<< %x <<<\n", tipc_own_addr(net)); + pr2("UP BEGIN: Link %x/%u \n", addr, bearer_id); write_lock_bh(&mon->lock); peer = get_peer(mon, addr); if (!peer && !tipc_mon_add_peer(mon, addr, &peer)) @@ -389,8 +446,22 @@ void tipc_mon_peer_up(struct net *net, u32 addr, int bearer_id) if (head == self) mon_update_local_domain(mon); mon_assign_roles(mon, head); + + peer->upcnt++; + incr(net, addr, &ucur, &ucurown,&ucurpeer); + incr(net, addr, &link_up, &up_own_part,&up_oth_part); + if (atomic_read(&link_up) == atomic_read(&lcnt)) + printk("All %u Links UP!!\n", atomic_read(&lcnt)); + if (peer->upcnt == 2) + atomic_inc(&up_2); //dbg + if (peer->upcnt == 3) + atomic_inc(&up_3); //dbg + if (peer->upcnt > 3) + atomic_inc(&up_many); //dbg exit: write_unlock_bh(&mon->lock); +// pr_mon(net, 1); + pr2("UP END\n"); } void tipc_mon_peer_down(struct net *net, u32 addr, int bearer_id) @@ -401,6 +472,9 @@ void tipc_mon_peer_down(struct net *net, u32 addr, int bearer_id) struct tipc_mon_domain *dom; int applied; +// printk("D:%u->%u\n", tipc_own_addr(net)&0xfff, addr &0xfff); + pr2("<<< %x <<<\n", tipc_own_addr(net)); + pr2("DOWN BEGIN: %x/%u\n", addr, bearer_id); write_lock_bh(&mon->lock); peer = get_peer(mon, addr); if (!peer) { @@ -412,9 +486,13 @@ void tipc_mon_peer_down(struct net *net, u32 addr, int bearer_id) dom = peer->domain; peer->domain = NULL; if (peer->is_head) - mon_identify_lost_members(peer, dom, applied); + mon_identify_lost_members(mon, peer, dom, applied); kfree(dom); peer->is_up = false; + if (peer->is_head || peer->is_local){//dbg + decr(net, addr, &mon_links, &monown, &monpeer); //dbg + } + peer->is_head = false; peer->is_local = false; peer->down_cnt = 0; @@ -423,7 +501,12 @@ void tipc_mon_peer_down(struct net *net, u32 addr, int bearer_id) mon_update_local_domain(mon); mon_assign_roles(mon, head); exit: + pr2("DOWN END\n"); + write_unlock_bh(&mon->lock); + incr(net, addr, &link_down, &down_own_part,&down_oth_part); + incr(net, addr, &dcur, &dcurown,&dcurpeer); + decr(net, addr, &ucur, &ucurown,&ucurpeer); } /* tipc_mon_rcv - process monitor domain event message @@ -459,8 +542,11 @@ void tipc_mon_rcv(struct net *net, void *data, u16 dlen, u32 addr, state->acked_gen = acked_gen; /* Drop duplicate unless we are waiting for a probe response */ - if (check_gen && !more(new_gen, old_gen) && !probing) + if (check_gen && !more(new_gen, old_gen) && !probing) { + if (arrv_dom->member_cnt) + atomic_inc(&dom_rcv_dupl); return; + } write_lock_bh(&mon->lock); peer = get_peer(mon, addr); @@ -473,9 +559,11 @@ void tipc_mon_rcv(struct net *net, void *data, u16 dlen, u32 addr, /* Task is done if duplicate record */ if (check_gen && !more(new_gen, old_gen)) goto exit; - + pr2("<<< %x <<<\n", tipc_own_addr(net)); + pr2("RCV BEGIN: <<< %x\n",addr); state->peer_gen = new_gen; state->check_gen = true; + atomic_inc(&dom_rcv); /* Cache current domain record for later use */ dom_bef.member_cnt = 0; @@ -501,8 +589,9 @@ void tipc_mon_rcv(struct net *net, void *data, u16 dlen, u32 addr, /* Update peers affected by this domain record */ applied_bef = peer->applied; mon_apply_domain(mon, peer); - mon_identify_lost_members(peer, &dom_bef, applied_bef); + mon_identify_lost_members(mon, peer, &dom_bef, applied_bef); mon_assign_roles(mon, peer_head(peer)); + pr2("RCV END\n"); exit: write_unlock_bh(&mon->lock); } @@ -526,6 +615,8 @@ void tipc_mon_prep(struct net *net, void *data, int *dlen, dom->gen = htons(gen); dom->ack_gen = htons(state->peer_gen); dom->member_cnt = 0; + if (!state->monitoring) + atomic_inc(&ddom_snd); return; } /* Send the full record */ @@ -534,6 +625,7 @@ void tipc_mon_prep(struct net *net, void *data, int *dlen, memcpy(data, &mon->cache, *dlen); read_unlock_bh(&mon->lock); dom->ack_gen = htons(state->peer_gen); + atomic_inc(&dom_snd); } void tipc_mon_get_state(struct net *net, u32 addr, @@ -555,6 +647,11 @@ void tipc_mon_get_state(struct net *net, u32 addr, state->probing = state->acked_gen != mon->dom_gen; state->probing |= peer->down_cnt; state->reset |= peer->down_cnt >= 4; + if (state->reset) { + incr(net, peer->addr, &mon_reset, &mon_reset_own, &mon_reset_peer); + //printk(" R:%u->%u\n", tipc_own_addr(net)&0xfff, addr &0xfff); + //printk("<<<%u: ret RESET for %u\n", tipc_own_addr(net)&0xfff,addr&0xfff); + } state->monitoring = peer->is_local; state->monitoring |= peer->is_head; state->list_gen = mon->list_gen; @@ -609,6 +706,7 @@ int tipc_mon_create(struct net *net, int bearer_id) INIT_LIST_HEAD(&self->list); setup_timer(&mon->timer, mon_timeout, (unsigned long)mon); mod_timer(&mon->timer, jiffies + msecs_to_jiffies(MON_TIMEOUT)); + stats_init(mon); return 0; } @@ -619,6 +717,8 @@ void tipc_mon_delete(struct net *net, int bearer_id) struct tipc_peer *self = get_self(net, bearer_id); struct tipc_peer *peer, *tmp; + stats_stop(mon); + write_lock_bh(&mon->lock); tn->monitors[bearer_id] = NULL; list_for_each_entry_safe(peer, tmp, &self->list, list) { @@ -634,3 +734,500 @@ void tipc_mon_delete(struct net *net, int bearer_id) kfree(self); kfree(mon); } + + + + + + + + + + + +////////////////////////////// DEBUG /////////////////// +////////////////////////////// DEBUG /////////////////// +////////////////////////////// DEBUG /////////////////// + + +atomic_t link_up = ATOMIC_INIT(0); +atomic_t link_down = ATOMIC_INIT(0); +atomic_t lcnt = ATOMIC_INIT(0); +atomic_t link_tim = ATOMIC_INIT(0); +atomic_t link_snt = ATOMIC_INIT(0); +atomic_t dom_snd = ATOMIC_INIT(0); +atomic_t ddom_snd = ATOMIC_INIT(0); +atomic_t dom_rcv = ATOMIC_INIT(0); +atomic_t dom_rcv_dupl = ATOMIC_INIT(0); +atomic_t ass_r = ATOMIC_INIT(0); +atomic_t ass_r_iter = ATOMIC_INIT(0); +atomic_t upd_dom = ATOMIC_INIT(0); +atomic_t upd_dom_iter = ATOMIC_INIT(0); +atomic_t upd_dom_rec = ATOMIC_INIT(0); +atomic_t upd_dom_rec_iter = ATOMIC_INIT(0); +atomic_t match_dom = ATOMIC_INIT(0); +atomic_t match_dom_iter = ATOMIC_INIT(0); +atomic_t m_peer = ATOMIC_INIT(0); +atomic_t dom_chg = ATOMIC_INIT(0); +extern uint cnt; + +atomic_t up_2 = ATOMIC_INIT(0); +atomic_t up_3 = ATOMIC_INIT(0); +atomic_t up_many = ATOMIC_INIT(0); +atomic_t up_oth_part = ATOMIC_INIT(0); +atomic_t up_own_part = ATOMIC_INIT(0); +atomic_t down_own_part = ATOMIC_INIT(0); +atomic_t down_oth_part = ATOMIC_INIT(0); +atomic_t up_curr = ATOMIC_INIT(0); +atomic_t down_curr = ATOMIC_INIT(0); + +atomic_t idle_tim = ATOMIC_INIT(0); +atomic_t mon_prb = ATOMIC_INIT(0); +atomic_t sil_prb = ATOMIC_INIT(0); + +atomic_t lown = ATOMIC_INIT(0); +atomic_t lpeer = ATOMIC_INIT(0); +atomic_t mon_links = ATOMIC_INIT(0); +atomic_t monown = ATOMIC_INIT(0); +atomic_t monpeer = ATOMIC_INIT(0); +atomic_t ucur = ATOMIC_INIT(0); +atomic_t ucurown = ATOMIC_INIT(0); +atomic_t ucurpeer = ATOMIC_INIT(0); +atomic_t dcur = ATOMIC_INIT(0); +atomic_t dcurown = ATOMIC_INIT(0); +atomic_t dcurpeer = ATOMIC_INIT(0); + +atomic_t snt_states= ATOMIC_INIT(0); +atomic_t snt_states_own= ATOMIC_INIT(0); +atomic_t snt_states_peer= ATOMIC_INIT(0); +atomic_t snt_probes= ATOMIC_INIT(0); +atomic_t snt_probes_own= ATOMIC_INIT(0); +atomic_t snt_probes_peer= ATOMIC_INIT(0); +atomic_t snt_rsps= ATOMIC_INIT(0); +atomic_t snt_rsps_own= ATOMIC_INIT(0); +atomic_t snt_rsps_peer= ATOMIC_INIT(0); +atomic_t snt_mons= ATOMIC_INIT(0); +atomic_t snt_mons_own= ATOMIC_INIT(0); +atomic_t snt_mons_peer= ATOMIC_INIT(0); +atomic_t rcv_states= ATOMIC_INIT(0); +atomic_t rcv_states_own= ATOMIC_INIT(0); +atomic_t rcv_states_peer= ATOMIC_INIT(0); +atomic_t rcv_probes= ATOMIC_INIT(0); +atomic_t rcv_probes_own= ATOMIC_INIT(0); +atomic_t rcv_probes_peer= ATOMIC_INIT(0); +atomic_t rcv_rsps= ATOMIC_INIT(0); +atomic_t rcv_rsps_own= ATOMIC_INIT(0); +atomic_t rcv_rsps_peer= ATOMIC_INIT(0); +atomic_t rcv_mons= ATOMIC_INIT(0); +atomic_t rcv_mons_own= ATOMIC_INIT(0); +atomic_t rcv_mons_peer= ATOMIC_INIT(0); +atomic_t mon_reset= ATOMIC_INIT(0); +atomic_t mon_reset_own= ATOMIC_INIT(0); +atomic_t mon_reset_peer= ATOMIC_INIT(0); + +atomic_t stt_snd= ATOMIC_INIT(0); +atomic_t stt_snd_own= ATOMIC_INIT(0); +atomic_t stt_snd_peer= ATOMIC_INIT(0); +atomic_t stt_rcv= ATOMIC_INIT(0); +atomic_t stt_rcv_own= ATOMIC_INIT(0); +atomic_t stt_rcv_peer= ATOMIC_INIT(0); +atomic_t prb_aborts= ATOMIC_INIT(0); +atomic_t prb_aborts_own= ATOMIC_INIT(0); +atomic_t prb_aborts_peer= ATOMIC_INIT(0); +atomic_t peer_fail= ATOMIC_INIT(0); +atomic_t peer_fail_own= ATOMIC_INIT(0); +atomic_t peer_fail_peer= ATOMIC_INIT(0); +atomic_t peer_rst= ATOMIC_INIT(0); +atomic_t peer_rst_own= ATOMIC_INIT(0); +atomic_t peer_rst_peer= ATOMIC_INIT(0); + +atomic_t intv2 = ATOMIC_INIT(0); +atomic_t intv2_own = ATOMIC_INIT(0); +atomic_t intv2_peer = ATOMIC_INIT(0); +atomic_t intv3 = ATOMIC_INIT(0); +atomic_t intv3_own = ATOMIC_INIT(0); +atomic_t intv3_peer = ATOMIC_INIT(0); +atomic_t intv4 = ATOMIC_INIT(0); +atomic_t intv4_own = ATOMIC_INIT(0); +atomic_t intv4_peer = ATOMIC_INIT(0); + + +DEFINE_SPINLOCK(pr_lock); + +bool condition(struct net *net) +{ + u32 own = tipc_own_addr(net) & 0xfff; +// return true; + if ((own == PRNODE) || (own == (500 + PRNODE))) + return true; + return false; +} + + +const char dashes[] = "+-----------------------------------------------------------------------------+\n"; +const char head1[] = "| Neighbor Monitoring Table for |\n"; +const char head2[] = "| Table Generation Number of Peers Bearer |\n"; +const char head3[] = "| Node | State | Domain | State of Applied / [Non-Applied] Domain Members |\n"; +const char head4[] = "| | | Gener- | |\n"; +const char head5[] = "| | | ation | 5 10 15 20 25 30 35 40 45 |\n"; +const char spaces[] = " \n"; + +#define sprintp(__line, __pos, fmt, arg...)\ +({ \ + int __p = __pos;\ + do { \ + char __str[128];\ + sprintf(__str, fmt, ## arg); \ + memcpy(&__line[__pos], __str, strlen(__str));\ + __p += strlen(__str);\ + } while(0);\ + __p;\ +}) + +#define STATE_POS 13 +#define GEN_POS 21 +#define APPL_POS 29 +#define MAX_POS (strlen(spaces) - strlen(" 255.4095.4095:u ]")) + +void pr_mon(struct net *net, bool cond) +{ + struct tipc_monitor *mon; + char line[128]; + u32 own = tipc_own_addr(net); + u32 addr; + struct tipc_peer *p; + struct tipc_mon_domain *d; + int bearer_id, max_applied = 0; + int nappl_pos, pos, appl, i; + char *state; + + if(!cond) + return; + + spin_lock(&pr_lock); + + for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) { + mon = tipc_monitor(net, bearer_id); + if (!mon) + continue; + p = mon->self; + do { + if (p->applied > max_applied) + max_applied = p->applied; + } while ((p = peer_nxt(p)) != mon->self); + + /* Output formatted header */ + printk(dashes); + strcpy(line, head1); + sprintp(line, 50, "<%u.%u.%u>", tipc_zone(own), + tipc_cluster(own), tipc_node(own)); + printk(line); + strcpy(line, head2); + sprintp(line, 27, "%u", mon->list_gen); + sprintp(line, 50, "%u", mon->peer_cnt); + sprintp(line, 64, "%s", "ens7"); + printk(line); + printk(dashes); + printk(head3); + printk(head4); + printk(head5); + printk(dashes); + + /* Output a formatted line for each peer */ + do { + nappl_pos = APPL_POS + max_applied + (max_applied / 5 + 2); + strcpy(line, spaces); + pos = (p->is_head || p->is_local) ? 1 : 2; + sprintp(line, pos, "%u.%u.%u", tipc_zone(p->addr), + tipc_cluster(p->addr), tipc_node(p->addr)); + sprintp(line, STATE_POS, "%s", p->is_up ? "Up" : "Down"); + d = p->domain; + if (!d) { + sprintp(line, GEN_POS, "%u", 0); + printk(line); + continue; + } + sprintp(line, GEN_POS, "%u", d->gen); + + /* Applied members */ + appl = p->applied; + pos = APPL_POS; + for (i = 0; i < appl; i++) { + if (i && !(i % 5)) + pos = sprintp(line, pos, " "); + state = map_get(d->up_map, i) ? "u" : "d"; + pos = sprintp(line, pos, "%s", state); + } + pos = sprintp(line, nappl_pos, "["); + if (d->member_cnt <= appl) { + sprintp(line, pos, "]"); + printk(line); + continue; + } + BUG_ON(d->member_cnt > 25); + /* Non-applied members */ + for (i = appl; i < d->member_cnt; i++) { + if (pos > MAX_POS) { + sprintp(line, pos, " ]"); + printk(line); + strcpy(line, spaces); + if (nappl_pos > MAX_POS) + nappl_pos = APPL_POS; + if ((d->member_cnt - i) > 3) + nappl_pos = APPL_POS; + pos = sprintp(line, nappl_pos, "["); + } + addr = d->members[i]; + pos = sprintp(line, pos, " %u.%u.%u", + tipc_zone(addr), + tipc_cluster(addr), + tipc_node(addr)); + state = map_get(d->up_map, i) ? ":u" : ":d"; + pos = sprintp(line, pos, "%s", state); + } + sprintp(line, pos, " ]"); + printk(line); + } while ((p = peer_nxt(p)) != mon->self); + printk("\n"); + spin_unlock(&pr_lock); + } +} + +void pr_stats(struct net *net, bool cond) +{ + u32 own = tipc_own_addr(net); + u32 strcv = atomic_read(&stt_rcv); + u32 strcvo = atomic_read(&stt_rcv_own); + u32 strcvp = atomic_read(&stt_rcv_peer); + u32 stsnd = atomic_read(&stt_snd); + u32 stsndo = atomic_read(&stt_snd_own); + u32 stsndp = atomic_read(&stt_snd_peer); + u32 diff, diffo, diffp; + + if (!cond) + return; + + printk("---------------- Statistics for <%u.%u.%u> ----------------\n", + tipc_zone(own), tipc_cluster(own), tipc_node(own)); + + printk(" Total This VM Peer VM \n"); + + if (tipc_node(own) > 500) + printk(" N > 1.1.500 N < 1.1.500\n"); + else + printk(" N < 1.1.500 N > 1.1.500\n"); + printk("----------------------------------------------------------\n"); + printk("Links:\n"); + printk(" Total: %7i %7i %7i \n", + atomic_read(&lcnt), atomic_read(&lown), atomic_read(&lpeer)); + + printk(" Monitored: %7i %7i %7i \n", + atomic_read(&mon_links),atomic_read(&monown), + atomic_read(&monpeer)); + + printk(" Current Up: %7i %7i %7i\n", + atomic_read(&ucur), atomic_read(&ucurown), + atomic_read(&ucurpeer)); + + printk(" Current Down: %7i %7i %7i\n", + atomic_read(&lcnt) - atomic_read(&ucur), + atomic_read(&lown) - atomic_read(&ucurown), + atomic_read(&lpeer) - atomic_read(&ucurpeer)); + + printk(" Total Ups: %7i %7i %7i\n", + atomic_read(&link_up),atomic_read(&up_own_part), + atomic_read(&up_oth_part)); + printk(" Downs: %7i %7i %7i\n", + atomic_read(&link_down),atomic_read(&down_own_part), + atomic_read(&down_oth_part)); + printk(" Multiple Ups:\n"); + printk(" Twice: %7i\n",atomic_read(&up_2)); + printk(" Three Times: %7i\n", atomic_read(&up_3)); + printk(" Many Times: %7i\n", atomic_read(&up_many)); + + printk("STATES Sent:\n"); + printk(" Total: %7i %7i %7i\n", + atomic_read(&snt_states),atomic_read(&snt_states_own), + atomic_read(&snt_states_peer)); + printk(" Monitoring Probes: %7i %7i %7i\n", + atomic_read(&snt_mons),atomic_read(&snt_mons_own), + atomic_read(&snt_mons_peer)); + printk(" Confirm Probes: %7i %7i %7i\n", + atomic_read(&snt_probes),atomic_read(&snt_probes_own), + atomic_read(&snt_probes_peer)); + printk(" Probe Responses: %7i %7i %7i\n", + atomic_read(&snt_rsps),atomic_read(&snt_rsps_own), + atomic_read(&snt_rsps_peer)); + printk(" Domain Records: %7i\n", atomic_read(&dom_snd)); + printk(" Dummy Domain Records: %7i\n", atomic_read(&ddom_snd)); + printk("STATES Received:\n"); + printk(" Total: %7i %7i %7i\n", + atomic_read(&rcv_states),atomic_read(&rcv_states_own), + atomic_read(&rcv_states_peer)); + printk(" Probes: %7i %7i %7i\n", + atomic_read(&rcv_probes),atomic_read(&rcv_probes_own), + atomic_read(&rcv_probes_peer)); + printk(" Domains Records: %7i\n", atomic_read(&dom_rcv) + + atomic_read(&dom_rcv_dupl)); + printk(" Applied: %7i\n", atomic_read(&dom_rcv)); + printk(" Dropped: %7i\n", atomic_read(&dom_rcv_dupl)); + + + diff = stsnd > strcv ? stsnd - strcv : 0; + diffo = stsndo > strcvo ? stsndo - strcvo : 0; + diffp = stsndp > strcvp ? stsndp - strcvp : 0; + + printk("STATES Lost:\n"); + printk(" Total: %7i %7i %7i\n", + diff, diffo, diffp); + + stsnd = stsnd ? stsnd : 1000000; + stsndo = stsndo ? stsndo : 100000000; + stsndp = stsndp ? stsndp : 100000000; + printk(" %%: %7i %7i %7i\n", + ((diff)*100)/stsnd, ((diffo)*100)/stsndo, ((diffp)*100)/stsndp); + + printk("Failure Reasons:\n"); + printk(" Probing: %7i %7i %7i\n", + atomic_read(&prb_aborts),atomic_read(&prb_aborts_own), + atomic_read(&prb_aborts_peer)); + printk(" (2 missed): %7i %7i %7i\n", + atomic_read(&intv2),atomic_read(&intv2_own), + atomic_read(&intv2_peer)); + printk(" (3 missed): %7i %7i %7i\n", + atomic_read(&intv3),atomic_read(&intv3_own), + atomic_read(&intv3_peer)); + printk(" (4 missed): %7i %7i %7i\n", + atomic_read(&intv4),atomic_read(&intv4_own), + atomic_read(&intv4_peer)); + + printk(" Rcv Peer RESET: %7i %7i %7i\n", + atomic_read(&peer_rst),atomic_read(&peer_rst_own), + atomic_read(&peer_rst_peer)); + printk(" Rcv Peer RESTART: %7i %7i %7i\n", + atomic_read(&peer_fail),atomic_read(&peer_fail_own), + atomic_read(&peer_fail_peer)); + printk(" Monitor trigged RESET:%7i %7i %7i\n", + atomic_read(&mon_reset),atomic_read(&mon_reset_own), + atomic_read(&mon_reset_peer)); + printk("Timers:\n"); + printk(" Total: %7i\n", atomic_read(&link_tim)); + printk(" Idle: %7i\n", atomic_read(&idle_tim)); + printk(" Sent Domain Rec: %7i\n", atomic_read(&dom_snd)); + printk("Function Calls:\n"); + printk(" assign_roles(): %7i\n", atomic_read(&ass_r)); + printk(" iterations: %7i\n", atomic_read(&ass_r_iter)); + printk(" update_local_domain():%7i\n", atomic_read(&upd_dom_rec)); + printk(" iterations: %7i\n", atomic_read(&upd_dom_rec_iter)); + printk(" update_neighbors(): %7i\n", atomic_read(&upd_dom)); + printk(" iterations: %7i\n", atomic_read(&upd_dom_iter)); + printk(" apply_domain(): %7i\n", atomic_read(&match_dom)); + printk(" iterations: %7i\n", atomic_read(&match_dom_iter)); + printk(" get_peer(): %7i\n", atomic_read(&m_peer)); + printk(" domain size changes: %7i\n", atomic_read(&dom_chg)); + + atomic_set(&snt_states, 0); + atomic_set(&snt_states_own, 0); + atomic_set(&snt_states_peer, 0); + atomic_set(&snt_probes, 0); + atomic_set(&snt_probes_own, 0); + atomic_set(&snt_probes_peer, 0); + atomic_set(&snt_rsps, 0); + atomic_set(&snt_rsps_own, 0); + atomic_set(&snt_rsps_peer, 0); + atomic_set(&snt_mons, 0); + atomic_set(&snt_mons_own, 0); + atomic_set(&snt_mons_peer, 0); + atomic_set(&rcv_states, 0); + atomic_set(&rcv_states_own, 0); + atomic_set(&rcv_states_peer, 0); + atomic_set(&rcv_probes, 0); + atomic_set(&rcv_probes_own, 0); + atomic_set(&rcv_probes_peer, 0); + atomic_set(&rcv_rsps, 0); + atomic_set(&rcv_rsps_own, 0); + atomic_set(&rcv_rsps_peer, 0); + atomic_set(&rcv_mons, 0); + atomic_set(&rcv_mons_own, 0); + atomic_set(&rcv_mons_peer, 0); + atomic_set(&mon_reset,0); + atomic_set(&mon_reset_own,0); + atomic_set(&mon_reset_peer,0); + + atomic_set(&link_tim,0); + atomic_set(&idle_tim,0); + atomic_set(&dom_snd,0); + atomic_set(&dom_rcv,0); + atomic_set(&ddom_snd,0); + atomic_set(&dom_rcv_dupl,0); + atomic_set(&ass_r,0); + atomic_set(&ass_r_iter,0); + atomic_set(&upd_dom,0); + atomic_set(&upd_dom_iter,0); + atomic_set(&upd_dom_rec,0); + atomic_set(&upd_dom_rec_iter,0); + atomic_set(&match_dom,0); + atomic_set(&match_dom_iter,0); + atomic_set(&m_peer,0); + atomic_set(&dom_chg,0); + + atomic_set(&stt_snd,0); + atomic_set(&stt_snd_own,0); + atomic_set(&stt_snd_peer,0); + atomic_set(&stt_rcv,0); + atomic_set(&stt_rcv_own,0); + atomic_set(&stt_rcv_peer,0); + atomic_set(&prb_aborts,0); + atomic_set(&prb_aborts_own,0); + atomic_set(&prb_aborts_peer,0); + atomic_set(&peer_rst,0); + atomic_set(&peer_rst_own,0); + atomic_set(&peer_rst_peer,0); + atomic_set(&peer_fail,0); + atomic_set(&peer_fail_own,0); + atomic_set(&peer_fail_peer,0); + + atomic_set(&intv2,0); + atomic_set(&intv2_own,0); + atomic_set(&intv2_peer,0); + atomic_set(&intv3,0); + atomic_set(&intv3_own,0); + atomic_set(&intv3_peer,0); + atomic_set(&intv4,0); + atomic_set(&intv4_own,0); + atomic_set(&intv4_peer,0); +} + + +void pr_newnode_stats(struct net *net) +{ + return; + pr_stats(net, condition(net)); +} + +#define STATS_INTV 20 //sec +#define TIMEOUT (STATS_INTV * 1000) + +static void stats_timeout(unsigned long m) +{ + struct tipc_monitor *mon = (void*)m; + pr_stats(mon->net, 1); + write_lock_bh(&mon->lock); + //pr_mon(mon->net, condition(mon->net)); + write_unlock_bh(&mon->lock); + mod_timer(&mon->st_timer, jiffies + msecs_to_jiffies(TIMEOUT)); +} + +void stats_init(struct tipc_monitor *mon) +{ + if (!condition(mon->net)) + return; + setup_timer(&mon->st_timer, stats_timeout, (unsigned long)mon); + mod_timer(&mon->st_timer, jiffies + msecs_to_jiffies(TIMEOUT)); +} + +void stats_stop(struct tipc_monitor *mon) +{ + if (condition(mon->net)) + del_timer_sync(&mon->st_timer); +} diff --git a/net/tipc/monitor.h b/net/tipc/monitor.h index 15dcc21..2457f71 100644 --- a/net/tipc/monitor.h +++ b/net/tipc/monitor.h @@ -38,7 +38,6 @@ /* struct tipc_mon_state: link instance's cache of monitor list and domain state * @list_gen: current generation of this node's monitor list - * @gen: current generation of this node's local domain * @peer_gen: most recent domain generation received from peer * @acked_gen: most recent generation of self's domain acked by peer * @monitoring: this peer endpoint should continuously monitored @@ -70,4 +69,155 @@ void tipc_mon_get_state(struct net *net, u32 addr, void tipc_mon_remove_peer(struct net *net, u32 addr, int bearer_id); extern const int tipc_max_domain_size; + + + + + +//////////////////// DEBUG///////////////////////////////////// +//////////////////// DEBUG///////////////////////////////////// +//////////////////// DEBUG///////////////////////////////////// +//////////////////// DEBUG///////////////////////////////////// + +extern atomic_t lcnt; +extern atomic_t mon_links; +extern atomic_t link_down; +extern atomic_t link_up; +extern atomic_t snt_states; +extern atomic_t state_rate; +extern atomic_t mon_links; + +extern atomic_t link_tim; +extern atomic_t link_snt; +extern atomic_t dom_snd; +extern atomic_t dom_rcv; +extern atomic_t ddom_snd; +extern atomic_t dom_rcv_dupl; +extern atomic_t ass_r; +extern atomic_t ass_r_iter; +extern atomic_t upd_dom; +extern atomic_t upd_dom_iter; +extern atomic_t upd_dom_rec; +extern atomic_t upd_dom_rec_iter; +extern atomic_t match_dom; +extern atomic_t match_dom_iter; +extern atomic_t m_peer; +extern atomic_t dom_chg; +extern atomic_t up_2; +extern atomic_t up_3; +extern atomic_t up_many; +extern atomic_t up_oth_part; +extern atomic_t up_own_part; +extern atomic_t down_own_part; +extern atomic_t down_oth_part; +extern atomic_t up_curr; +extern atomic_t down_curr; + +extern atomic_t idle_tim; +extern atomic_t mon_prb; +extern atomic_t sil_prb; +extern atomic_t mon_rsp; + +extern atomic_t lown; +extern atomic_t lpeer; +extern atomic_t mon_links; +extern atomic_t monown; +extern atomic_t monpeer; +extern atomic_t ucur; +extern atomic_t ucurown; +extern atomic_t ucurpeer; +extern atomic_t dcur; +extern atomic_t dcurown; +extern atomic_t dcurpeer; + +extern atomic_t snt_states; +extern atomic_t snt_states_own; +extern atomic_t snt_states_peer; +extern atomic_t snt_probes; +extern atomic_t snt_probes_own; +extern atomic_t snt_probes_peer; +extern atomic_t snt_rsps; +extern atomic_t snt_rsps_own; +extern atomic_t snt_rsps_peer; +extern atomic_t snt_mons; +extern atomic_t snt_mons_own; +extern atomic_t snt_mons_peer; +extern atomic_t rcv_states; +extern atomic_t rcv_states_own; +extern atomic_t rcv_states_peer; +extern atomic_t rcv_probes; +extern atomic_t rcv_probes_own; +extern atomic_t rcv_probes_peer; +extern atomic_t rcv_rsps; +extern atomic_t rcv_rsps_own; +extern atomic_t rcv_rsps_peer; +extern atomic_t rcv_mons; +extern atomic_t rcv_mons_own; +extern atomic_t rcv_mons_peer; +extern atomic_t mon_reset; +extern atomic_t mon_reset_own; +extern atomic_t mon_reset_peer; + +extern atomic_t stt_snd; +extern atomic_t stt_snd_own; +extern atomic_t stt_snd_peer; +extern atomic_t stt_rcv; +extern atomic_t stt_rcv_own; +extern atomic_t stt_rcv_peer; +extern atomic_t prb_aborts; +extern atomic_t prb_aborts_own; +extern atomic_t prb_aborts_peer; +extern atomic_t peer_fail; +extern atomic_t peer_fail_own; +extern atomic_t peer_fail_peer; +extern atomic_t peer_rst; +extern atomic_t peer_rst_own; +extern atomic_t peer_rst_peer; +extern atomic_t intv2; +extern atomic_t intv2_own; +extern atomic_t intv2_peer; +extern atomic_t intv3; +extern atomic_t intv3_own; +extern atomic_t intv3_peer; +extern atomic_t intv4; +extern atomic_t intv4_own; +extern atomic_t intv4_peer; + +#define PRNODE 4 + +void pr_newnode_stats(struct net *net); +void stats_init(struct tipc_monitor *mon); +void stats_stop(struct tipc_monitor *mon); +bool condition(struct net *net); + +#define pr2 if(0)printk +//#define pr2 if(condition(mon->net))printk +//#define pr2 printk + +static inline bool own_vm(struct net *net, u32 peer) +{ + if (((tipc_own_addr(net) &0xfff) > 500) && ((peer &0xfff) > 500)) + return true; + if (((tipc_own_addr(net) &0xfff) < 500) && ((peer &0xfff) < 500)) + return true; + return false; +} + +static inline void incr(struct net *net, u32 peer, atomic_t *all, atomic_t *here, atomic_t *there) +{ + atomic_inc(all); + if (own_vm(net, peer)) + atomic_inc(here); + else + atomic_inc(there); +} +static inline void decr(struct net *net, u32 peer, atomic_t *all, atomic_t *here, atomic_t *there) +{ + atomic_dec(all); + if (own_vm(net, peer)) + atomic_dec(here); + else + atomic_dec(there); +} + #endif diff --git a/net/tipc/node.c b/net/tipc/node.c index 79670e7..95eca96 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -325,6 +325,7 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities) } tn->num_nodes++; tn->prev_setup = jiffies_to_msecs(jiffies); + pr_newnode_stats(net); //dbg n->addr = addr; n->net = net; n->capabilities = capabilities; @@ -662,7 +663,7 @@ static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id, tipc_node_fsm_evt(n, SELF_LOST_CONTACT_EVT); tipc_link_fsm_evt(l, LINK_RESET_EVT); tipc_link_reset(l); - tipc_link_build_reset_msg(l, xmitq); + // tipc_link_build_reset_msg(l, xmitq); *maddr = &n->links[*bearer_id].maddr; node_lost_contact(n, &le->inputq); tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id); @@ -703,6 +704,7 @@ static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete) if (delete) { kfree(l); le->link = NULL; + decr(n->net, n->addr, &lcnt, &lown, &lpeer); n->link_cnt--; } } else { -- 1.9.1 ------------------------------------------------------------------------------ Mobile security can be enabling, not merely restricting. Employees who bring their own devices (BYOD) to work are irked by the imposition of MDM restrictions. Mobile Device Manager Plus allows you to control only the apps on BYO-devices by containerizing them, leaving personal data untouched! https://ad.doubleclick.net/ddm/clk/304595813;131938128;j _______________________________________________ tipc-discussion mailing list [email protected] https://lists.sourceforge.net/lists/listinfo/tipc-discussion
