Dear all,
   As requested you find attached patches (they should be orghogonal) to
tcp_westwood.c. Changes are listed below:

* westwood_comments.diff: Updated comments pointing to essential papers
about TCP Westwood

* westwood_bugfix.diff: Fixes a subtle bug that caused the first sample
to be wrong

* westwood_faster_filter.diff: The bandwidth estimate filter is now
initialized with the first bandwidth sample in order to have better
performances in the case of small file transfers.

* westwood_rtt_min_reset.diff: RTT_min is updated each time a timeout
event occurs (in order to cope with hard handovers in wireless
scenarios such as UMTS). A new inline function, update_rtt_min has been
added.

Signed-off-by: Luca De Cicco <[EMAIL PROTECTED]>

Best Regards,
Luca De Cicco
Politecnico di Bari
Index: ipv4/tcp_westwood.c
===================================================================
--- ipv4.orig/tcp_westwood.c	2006-06-06 18:08:03.000000000 +0200
+++ ipv4/tcp_westwood.c	2006-06-06 18:09:44.000000000 +0200
@@ -22,6 +22,7 @@
 	u32    accounted;
 	u32    rtt;
 	u32    rtt_min;          /* minimum observed RTT */
+	u16    first_ack;        /* flag which infers that this is the first ack */
 };
 
 
@@ -52,6 +53,7 @@
 	w->rtt_min = w->rtt = TCP_WESTWOOD_INIT_RTT;
 	w->rtt_win_sx = tcp_time_stamp;
 	w->snd_una = tcp_sk(sk)->snd_una;
+	w->first_ack = 1;
 }
 
 /*
@@ -184,6 +186,15 @@
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct westwood *w = inet_csk_ca(sk);
+	
+	/* Initialise w->snd_una with the first acked sequence number in oder
+	 * to fix mismatch between tp->snd_una and w->snd_una for the first
+	 * bandwidth sample
+	 */
+        if(w->first_ack && (event == CA_EVENT_FAST_ACK||event == CA_EVENT_SLOW_ACK)) {
+		w->snd_una = tp->snd_una;
+		w->first_ack = 0;
+	}
 
 	switch(event) {
 	case CA_EVENT_FAST_ACK:
Index: ipv4/tcp_westwood.c
===================================================================
--- ipv4.orig/tcp_westwood.c	2006-06-06 17:56:13.000000000 +0200
+++ ipv4/tcp_westwood.c	2006-06-06 17:57:13.000000000 +0200
@@ -1,9 +1,29 @@
 /*
- * TCP Westwood+
+ * TCP Westwood+: end-to-end bandwidth estimation for TCP
  *
- *	Angelo Dell'Aera:	TCP Westwood+ support
+ *      Angelo Dell'Aera: author of the first version of TCP Westwood+ in Linux 2.4
+ *      Luca De Cicco: current support of Westwood+ and author of the last patch
+ *                     with: updated RTT_min and initial bandwidth estimate
+ *
+ * Support at http://c3lab.poliba.it/index.php/Westwood
+ * Main references in literature:
+ *
+ * - Mascolo S, Casetti, M. Gerla et al.
+ *   "TCP Westwood: bandwidth estimation for TCP" Proc. ACM Mobicom 2001
+ *
+ * - A. Grieco, s. Mascolo
+ *   "Performance evaluation of New Reno, Vegas, Westwood+ TCP" ACM Computer
+ *     Comm. Review, 2004
+ *
+ * - A. Dell'Aera, L. Grieco, S. Mascolo.
+ *   "Linux 2.4 Implementation of Westwood+ TCP with Rate-Halving :
+ *    A Performance Evaluation Over the Internet" (ICC 2004), Paris, June 2004
+ *
+ * Westwood+ employs end-to-end bandwdidth measurement to set cwnd and
+ * ssthresh after packet loss. The probing phase is as the original Reno.
  */
 
+
 #include <linux/config.h>
 #include <linux/mm.h>
 #include <linux/module.h>
Index: ipv4/tcp_westwood.c
===================================================================
--- ipv4.orig/tcp_westwood.c	2006-06-06 18:14:50.000000000 +0200
+++ ipv4/tcp_westwood.c	2006-06-06 18:16:49.000000000 +0200
@@ -65,8 +65,17 @@
 
 static inline void westwood_filter(struct westwood *w, u32 delta)
 {
-	w->bw_ns_est = westwood_do_filter(w->bw_ns_est, w->bk / delta);
-	w->bw_est = westwood_do_filter(w->bw_est, w->bw_ns_est);
+	/*
+	 * If the filter is empty fill it with the first sample of bandwidth
+	 */
+	if (w->bw_ns_est==0 && w->bw_est==0)
+	{
+		w->bw_ns_est = w->bk / delta;
+		w->bw_est = w->bw_ns_est ;
+	} else {
+		w->bw_ns_est = westwood_do_filter(w->bw_ns_est, w->bk / delta);
+		w->bw_est = westwood_do_filter(w->bw_est, w->bw_ns_est);
+	}
 }
 
 /*
Index: ipv4/tcp_westwood.c
===================================================================
--- ipv4.orig/tcp_westwood.c	2006-06-06 18:10:22.000000000 +0200
+++ ipv4/tcp_westwood.c	2006-06-06 18:13:07.000000000 +0200
@@ -15,6 +15,7 @@
 struct westwood {
 	u32    bw_ns_est;        /* first bandwidth estimation..not too smoothed 8) */
 	u32    bw_est;           /* bandwidth estimate */
+	u16    reset_rtt_min;    /* Please reset RTT min to RTT sample*/
 	u32    rtt_win_sx;       /* here starts a new evaluation... */
 	u32    bk;
 	u32    snd_una;          /* used for evaluating the number of acked bytes */
@@ -50,6 +51,7 @@
         w->bw_est = 0;
         w->accounted = 0;
         w->cumul_ack = 0;
+	w->reset_rtt_min = 1;
 	w->rtt_min = w->rtt = TCP_WESTWOOD_INIT_RTT;
 	w->rtt_win_sx = tcp_time_stamp;
 	w->snd_una = tcp_sk(sk)->snd_una;
@@ -110,6 +112,19 @@
 	}
 }
 
+static inline void update_rtt_min(struct sock *sk)
+{
+	struct westwood *w = inet_csk_ca(sk);
+	
+	if (w->reset_rtt_min) {
+		w->rtt_min = w->rtt;
+		w->reset_rtt_min = 0;	
+	} else {
+		w->rtt_min = min(w->rtt, w->rtt_min);
+	}
+}
+
+
 /*
  * @westwood_fast_bw
  * It is called when we are in fast path. In particular it is called when
@@ -125,7 +140,7 @@
 
 	w->bk += tp->snd_una - w->snd_una;
 	w->snd_una = tp->snd_una;
-	w->rtt_min = min(w->rtt, w->rtt_min);
+	update_rtt_min(sk);
 }
 
 /*
@@ -207,12 +222,14 @@
 
 	case CA_EVENT_FRTO:
 		tp->snd_ssthresh = westwood_bw_rttmin(sk);
+		/* Please update RTT_min when next ack arrives */
+                w->reset_rtt_min = 1;
 		break;
 
 	case CA_EVENT_SLOW_ACK:
 		westwood_update_window(sk);
 		w->bk += westwood_acked_count(sk);
-		w->rtt_min = min(w->rtt, w->rtt_min);
+		update_rtt_min(sk);
 		break;
 
 	default:

Reply via email to