patch-2.4.10 linux/net/ipv4/tcp_input.c
Next file: linux/net/ipv4/tcp_ipv4.c
Previous file: linux/net/ipv4/tcp.c
Back to the patch index
Back to the overall index
- Lines: 353
- Date:
Thu Sep 20 14:12:56 2001
- Orig file:
v2.4.9/linux/net/ipv4/tcp_input.c
- Orig date:
Mon Aug 27 12:41:49 2001
diff -u --recursive --new-file v2.4.9/linux/net/ipv4/tcp_input.c linux/net/ipv4/tcp_input.c
@@ -5,7 +5,7 @@
*
* Implementation of the Transmission Control Protocol(TCP).
*
- * Version: $Id: tcp_input.c,v 1.235 2001/08/13 18:56:12 davem Exp $
+ * Version: $Id: tcp_input.c,v 1.236 2001/09/18 22:29:09 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
@@ -168,7 +168,7 @@
if (quickacks==0)
quickacks=2;
if (quickacks > tp->ack.quick)
- tp->ack.quick = min(unsigned int, quickacks, TCP_MAX_QUICKACKS);
+ tp->ack.quick = min_t(unsigned int, quickacks, TCP_MAX_QUICKACKS);
}
void tcp_enter_quickack_mode(struct tcp_opt *tp)
@@ -198,7 +198,7 @@
int sndmem = tp->mss_clamp+MAX_TCP_HEADER+16+sizeof(struct sk_buff);
if (sk->sndbuf < 3*sndmem)
- sk->sndbuf = min(int, 3*sndmem, sysctl_tcp_wmem[2]);
+ sk->sndbuf = min_t(int, 3*sndmem, sysctl_tcp_wmem[2]);
}
/* 2. Tuning advertised window (window_clamp, rcv_ssthresh)
@@ -262,7 +262,7 @@
incr = __tcp_grow_window(sk, tp, skb);
if (incr) {
- tp->rcv_ssthresh = min(u32, tp->rcv_ssthresh + incr, tp->window_clamp);
+ tp->rcv_ssthresh = min_t(u32, tp->rcv_ssthresh + incr, tp->window_clamp);
tp->ack.quick |= 1;
}
}
@@ -282,7 +282,7 @@
while (tcp_win_from_space(rcvmem) < tp->advmss)
rcvmem += 128;
if (sk->rcvbuf < 4*rcvmem)
- sk->rcvbuf = min(int, 4*rcvmem, sysctl_tcp_rmem[2]);
+ sk->rcvbuf = min_t(int, 4*rcvmem, sysctl_tcp_rmem[2]);
}
/* 4. Try to fixup all. It is made iimediately after connection enters
@@ -304,16 +304,16 @@
tp->window_clamp = maxwin;
if (sysctl_tcp_app_win && maxwin>4*tp->advmss)
- tp->window_clamp = max(u32, maxwin-(maxwin>>sysctl_tcp_app_win), 4*tp->advmss);
+ tp->window_clamp = max_t(u32, maxwin-(maxwin>>sysctl_tcp_app_win), 4*tp->advmss);
}
/* Force reservation of one segment. */
if (sysctl_tcp_app_win &&
tp->window_clamp > 2*tp->advmss &&
tp->window_clamp + tp->advmss > maxwin)
- tp->window_clamp = max(u32, 2*tp->advmss, maxwin-tp->advmss);
+ tp->window_clamp = max_t(u32, 2*tp->advmss, maxwin-tp->advmss);
- tp->rcv_ssthresh = min(u32, tp->rcv_ssthresh, tp->window_clamp);
+ tp->rcv_ssthresh = min_t(u32, tp->rcv_ssthresh, tp->window_clamp);
tp->snd_cwnd_stamp = tcp_time_stamp;
}
@@ -338,7 +338,7 @@
!(sk->userlocks&SOCK_RCVBUF_LOCK) &&
!tcp_memory_pressure &&
atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0])
- sk->rcvbuf = min(int, atomic_read(&sk->rmem_alloc), sysctl_tcp_rmem[2]);
+ sk->rcvbuf = min_t(int, atomic_read(&sk->rmem_alloc), sysctl_tcp_rmem[2]);
}
if (atomic_read(&sk->rmem_alloc) > sk->rcvbuf) {
app_win += ofo_win;
@@ -346,11 +346,11 @@
app_win >>= 1;
if (app_win > tp->ack.rcv_mss)
app_win -= tp->ack.rcv_mss;
- app_win = max(unsigned int, app_win, 2*tp->advmss);
+ app_win = max_t(unsigned int, app_win, 2*tp->advmss);
if (!ofo_win)
- tp->window_clamp = min(u32, tp->window_clamp, app_win);
- tp->rcv_ssthresh = min(u32, tp->window_clamp, 2*tp->advmss);
+ tp->window_clamp = min_t(u32, tp->window_clamp, app_win);
+ tp->rcv_ssthresh = min_t(u32, tp->window_clamp, 2*tp->advmss);
}
}
@@ -472,7 +472,7 @@
/* no previous measure. */
tp->srtt = m<<3; /* take the measured time to be rtt */
tp->mdev = m<<2; /* make sure rto = 3*rtt */
- tp->mdev_max = tp->rttvar = max(u32, tp->mdev, TCP_RTO_MIN);
+ tp->mdev_max = tp->rttvar = max_t(u32, tp->mdev, TCP_RTO_MIN);
tp->rtt_seq = tp->snd_nxt;
}
}
@@ -575,7 +575,7 @@
tp->ca_state == TCP_CA_Open) {
/* Cong. avoidance phase, cwnd is reliable. */
if (!(dst->mxlock&(1<<RTAX_SSTHRESH)))
- dst->ssthresh = max(u32, tp->snd_cwnd>>1, tp->snd_ssthresh);
+ dst->ssthresh = max_t(u32, tp->snd_cwnd>>1, tp->snd_ssthresh);
if (!(dst->mxlock&(1<<RTAX_CWND)))
dst->cwnd = (dst->cwnd + tp->snd_cwnd)>>1;
} else {
@@ -617,7 +617,7 @@
else if (cwnd > tp->snd_ssthresh)
cwnd = tp->snd_ssthresh;
- return min(u32, cwnd, tp->snd_cwnd_clamp);
+ return min_t(u32, cwnd, tp->snd_cwnd_clamp);
}
/* Initialize metrics on socket. */
@@ -668,7 +668,7 @@
tp->srtt = dst->rtt;
if (dst->rttvar > tp->mdev) {
tp->mdev = dst->rttvar;
- tp->mdev_max = tp->rttvar = max(u32, tp->mdev, TCP_RTO_MIN);
+ tp->mdev_max = tp->rttvar = max_t(u32, tp->mdev, TCP_RTO_MIN);
}
tcp_set_rto(tp);
tcp_bound_rto(tp);
@@ -693,7 +693,7 @@
static void tcp_update_reordering(struct tcp_opt *tp, int metric, int ts)
{
if (metric > tp->reordering) {
- tp->reordering = min(unsigned int, TCP_MAX_REORDERING, metric);
+ tp->reordering = min_t(unsigned int, TCP_MAX_REORDERING, metric);
/* This exciting event is worth to be remembered. 8) */
if (ts)
@@ -848,12 +848,12 @@
if (sacked&TCPCB_RETRANS) {
if ((dup_sack && in_sack) &&
(sacked&TCPCB_SACKED_ACKED))
- reord = min(int, fack_count, reord);
+ reord = min_t(int, fack_count, reord);
} else {
/* If it was in a hole, we detected reordering. */
if (fack_count < prior_fackets &&
!(sacked&TCPCB_SACKED_ACKED))
- reord = min(int, fack_count, reord);
+ reord = min_t(int, fack_count, reord);
}
/* Nothing to do; acked frame is about to be dropped. */
@@ -885,7 +885,7 @@
*/
if (!(sacked & TCPCB_RETRANS) &&
fack_count < prior_fackets)
- reord = min(int, fack_count, reord);
+ reord = min_t(int, fack_count, reord);
if (sacked & TCPCB_LOST) {
TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
@@ -901,7 +901,7 @@
tp->fackets_out = fack_count;
} else {
if (dup_sack && (sacked&TCPCB_RETRANS))
- reord = min(int, fack_count, reord);
+ reord = min_t(int, fack_count, reord);
}
/* D-SACK. We can detect redundant retransmission
@@ -1019,7 +1019,7 @@
}
tcp_sync_left_out(tp);
- tp->reordering = min(unsigned int, tp->reordering, sysctl_tcp_reordering);
+ tp->reordering = min_t(unsigned int, tp->reordering, sysctl_tcp_reordering);
tp->ca_state = TCP_CA_Loss;
tp->high_seq = tp->snd_nxt;
TCP_ECN_queue_cwr(tp);
@@ -1177,7 +1177,7 @@
* recovery more?
*/
if (tp->packets_out <= tp->reordering &&
- tp->sacked_out >= max(u32, tp->packets_out/2, sysctl_tcp_reordering) &&
+ tp->sacked_out >= max_t(u32, tp->packets_out/2, sysctl_tcp_reordering) &&
!tcp_may_send_now(sk, tp)) {
/* We have nothing to send. This connection is limited
* either by receiver window or by application.
@@ -1194,8 +1194,8 @@
*/
static void tcp_check_reno_reordering(struct tcp_opt *tp, int addend)
{
- u32 holes = min(unsigned int,
- max(unsigned int, tp->lost_out, 1),
+ u32 holes = min_t(unsigned int,
+ max_t(unsigned int, tp->lost_out, 1),
tp->packets_out);
if (tp->sacked_out + holes > tp->packets_out) {
@@ -1291,7 +1291,7 @@
*/
static __inline__ void tcp_moderate_cwnd(struct tcp_opt *tp)
{
- tp->snd_cwnd = min(u32, tp->snd_cwnd,
+ tp->snd_cwnd = min_t(u32, tp->snd_cwnd,
tcp_packets_in_flight(tp)+tcp_max_burst(tp));
tp->snd_cwnd_stamp = tcp_time_stamp;
}
@@ -1308,7 +1308,7 @@
if (decr && tp->snd_cwnd > tp->snd_ssthresh/2)
tp->snd_cwnd -= decr;
- tp->snd_cwnd = min(u32, tp->snd_cwnd, tcp_packets_in_flight(tp)+1);
+ tp->snd_cwnd = min_t(u32, tp->snd_cwnd, tcp_packets_in_flight(tp)+1);
tp->snd_cwnd_stamp = tcp_time_stamp;
}
@@ -1340,7 +1340,7 @@
static void tcp_undo_cwr(struct tcp_opt *tp, int undo)
{
if (tp->prior_ssthresh) {
- tp->snd_cwnd = max(unsigned int,
+ tp->snd_cwnd = max_t(unsigned int,
tp->snd_cwnd, tp->snd_ssthresh<<1);
if (undo && tp->prior_ssthresh > tp->snd_ssthresh) {
@@ -1348,7 +1348,7 @@
TCP_ECN_withdraw_cwr(tp);
}
} else {
- tp->snd_cwnd = max(unsigned int, tp->snd_cwnd, tp->snd_ssthresh);
+ tp->snd_cwnd = max_t(unsigned int, tp->snd_cwnd, tp->snd_ssthresh);
}
tcp_moderate_cwnd(tp);
tp->snd_cwnd_stamp = tcp_time_stamp;
@@ -1450,7 +1450,7 @@
static __inline__ void tcp_complete_cwr(struct tcp_opt *tp)
{
- tp->snd_cwnd = min(u32, tp->snd_cwnd, tp->snd_ssthresh);
+ tp->snd_cwnd = min_t(u32, tp->snd_cwnd, tp->snd_ssthresh);
tp->snd_cwnd_stamp = tcp_time_stamp;
}
@@ -1836,7 +1836,7 @@
*/
} else {
tcp_reset_xmit_timer(sk, TCP_TIME_PROBE0,
- min(u32, tp->rto << tp->backoff, TCP_RTO_MAX));
+ min_t(u32, tp->rto << tp->backoff, TCP_RTO_MAX));
}
}
@@ -2323,7 +2323,7 @@
tp->dsack = 1;
tp->duplicate_sack[0].start_seq = seq;
tp->duplicate_sack[0].end_seq = end_seq;
- tp->eff_sacks = min(unsigned int, tp->num_sacks+1, 4-tp->tstamp_ok);
+ tp->eff_sacks = min_t(unsigned int, tp->num_sacks+1, 4-tp->tstamp_ok);
}
}
@@ -2376,7 +2376,7 @@
* Decrease num_sacks.
*/
tp->num_sacks--;
- tp->eff_sacks = min(unsigned int, tp->num_sacks+tp->dsack, 4-tp->tstamp_ok);
+ tp->eff_sacks = min_t(unsigned int, tp->num_sacks+tp->dsack, 4-tp->tstamp_ok);
for(i=this_sack; i < tp->num_sacks; i++)
sp[i] = sp[i+1];
continue;
@@ -2438,7 +2438,7 @@
sp->start_seq = seq;
sp->end_seq = end_seq;
tp->num_sacks++;
- tp->eff_sacks = min(unsigned int, tp->num_sacks+tp->dsack, 4-tp->tstamp_ok);
+ tp->eff_sacks = min_t(unsigned int, tp->num_sacks+tp->dsack, 4-tp->tstamp_ok);
}
/* RCV.NXT advances, some SACKs should be eaten. */
@@ -2475,7 +2475,7 @@
}
if (num_sacks != tp->num_sacks) {
tp->num_sacks = num_sacks;
- tp->eff_sacks = min(unsigned int, tp->num_sacks+tp->dsack, 4-tp->tstamp_ok);
+ tp->eff_sacks = min_t(unsigned int, tp->num_sacks+tp->dsack, 4-tp->tstamp_ok);
}
}
@@ -2541,7 +2541,7 @@
if (tp->dsack) {
tp->dsack = 0;
- tp->eff_sacks = min(unsigned int, tp->num_sacks, 4-tp->tstamp_ok);
+ tp->eff_sacks = min_t(unsigned int, tp->num_sacks, 4-tp->tstamp_ok);
}
/* Queue data for delivery to the user.
@@ -2558,7 +2558,7 @@
tp->ucopy.len &&
sk->lock.users &&
!tp->urg_data) {
- int chunk = min(unsigned int, skb->len, tp->ucopy.len);
+ int chunk = min_t(unsigned int, skb->len, tp->ucopy.len);
__set_current_state(TASK_RUNNING);
@@ -2807,7 +2807,7 @@
if (offset < 0) BUG();
if (size > 0) {
- size = min(int, copy, size);
+ size = min_t(int, copy, size);
if (skb_copy_bits(skb, offset, skb_put(nskb, size), size))
BUG();
TCP_SKB_CB(nskb)->end_seq += size;
@@ -2886,7 +2886,7 @@
if (atomic_read(&sk->rmem_alloc) >= sk->rcvbuf)
tcp_clamp_window(sk, tp);
else if (tcp_memory_pressure)
- tp->rcv_ssthresh = min(u32, tp->rcv_ssthresh, 4*tp->advmss);
+ tp->rcv_ssthresh = min_t(u32, tp->rcv_ssthresh, 4*tp->advmss);
tcp_collapse_ofo_queue(sk);
tcp_collapse(sk, sk->receive_queue.next,
@@ -2941,7 +2941,7 @@
if (tp->ca_state == TCP_CA_Open &&
sk->socket && !test_bit(SOCK_NOSPACE, &sk->socket->flags)) {
/* Limited by application or receiver window. */
- u32 win_used = max(u32, tp->snd_cwnd_used, 2);
+ u32 win_used = max_t(u32, tp->snd_cwnd_used, 2);
if (win_used < tp->snd_cwnd) {
tp->snd_ssthresh = tcp_current_ssthresh(tp);
tp->snd_cwnd = (tp->snd_cwnd+win_used)>>1;
@@ -2967,10 +2967,10 @@
int sndmem, demanded;
sndmem = tp->mss_clamp+MAX_TCP_HEADER+16+sizeof(struct sk_buff);
- demanded = max(unsigned int, tp->snd_cwnd, tp->reordering+1);
+ demanded = max_t(unsigned int, tp->snd_cwnd, tp->reordering+1);
sndmem *= 2*demanded;
if (sndmem > sk->sndbuf)
- sk->sndbuf = min(int, sndmem, sysctl_tcp_wmem[2]);
+ sk->sndbuf = min_t(int, sndmem, sysctl_tcp_wmem[2]);
tp->snd_cwnd_stamp = tcp_time_stamp;
}
@@ -3520,7 +3520,7 @@
if (tp->wscale_ok == 0) {
tp->snd_wscale = tp->rcv_wscale = 0;
- tp->window_clamp = min(u32, tp->window_clamp, 65535);
+ tp->window_clamp = min_t(u32, tp->window_clamp, 65535);
}
if (tp->saw_tstamp) {
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)