uc-sdk
 All Classes Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
tcp_out.c
Go to the documentation of this file.
1 
9 /*
10  * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
11  * All rights reserved.
12  *
13  * Redistribution and use in source and binary forms, with or without modification,
14  * are permitted provided that the following conditions are met:
15  *
16  * 1. Redistributions of source code must retain the above copyright notice,
17  * this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright notice,
19  * this list of conditions and the following disclaimer in the documentation
20  * and/or other materials provided with the distribution.
21  * 3. The name of the author may not be used to endorse or promote products
22  * derived from this software without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
25  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
26  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
27  * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
28  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
29  * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
32  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
33  * OF SUCH DAMAGE.
34  *
35  * This file is part of the lwIP TCP/IP stack.
36  *
37  * Author: Adam Dunkels <adam@sics.se>
38  *
39  */
40 
41 #include "lwip/opt.h"
42 
43 #if LWIP_TCP /* don't build if not configured for use in lwipopts.h */
44 
45 #include "lwip/tcp_impl.h"
46 #include "lwip/def.h"
47 #include "lwip/mem.h"
48 #include "lwip/memp.h"
49 #include "lwip/sys.h"
50 #include "lwip/ip_addr.h"
51 #include "lwip/netif.h"
52 #include "lwip/inet_chksum.h"
53 #include "lwip/stats.h"
54 #include "lwip/snmp.h"
55 
56 #include <string.h>
57 
58 /* Define some copy-macros for checksum-on-copy so that the code looks
59  nicer by preventing too many ifdef's. */
60 #if TCP_CHECKSUM_ON_COPY
61 #define TCP_DATA_COPY(dst, src, len, seg) do { \
62  tcp_seg_add_chksum(LWIP_CHKSUM_COPY(dst, src, len), \
63  len, &seg->chksum, &seg->chksum_swapped); \
64  seg->flags |= TF_SEG_DATA_CHECKSUMMED; } while(0)
65 #define TCP_DATA_COPY2(dst, src, len, chksum, chksum_swapped) \
66  tcp_seg_add_chksum(LWIP_CHKSUM_COPY(dst, src, len), len, chksum, chksum_swapped);
67 #else /* TCP_CHECKSUM_ON_COPY*/
68 #define TCP_DATA_COPY(dst, src, len, seg) MEMCPY(dst, src, len)
69 #define TCP_DATA_COPY2(dst, src, len, chksum, chksum_swapped) MEMCPY(dst, src, len)
70 #endif /* TCP_CHECKSUM_ON_COPY*/
71 
74 #ifndef TCP_CHECKSUM_ON_COPY_SANITY_CHECK
75 #define TCP_CHECKSUM_ON_COPY_SANITY_CHECK 0
76 #endif
77 
78 /* Forward declarations.*/
79 static void tcp_output_segment(struct tcp_seg *seg, struct tcp_pcb *pcb);
80 
91 static struct pbuf *
92 tcp_output_alloc_header(struct tcp_pcb *pcb, u16_t optlen, u16_t datalen,
93  u32_t seqno_be /* already in network byte order */)
94 {
95  struct tcp_hdr *tcphdr;
96  struct pbuf *p = pbuf_alloc(PBUF_IP, TCP_HLEN + optlen + datalen, PBUF_RAM);
97  if (p != NULL) {
98  LWIP_ASSERT("check that first pbuf can hold struct tcp_hdr",
99  (p->len >= TCP_HLEN + optlen));
100  tcphdr = (struct tcp_hdr *)p->payload;
101  tcphdr->src = htons(pcb->local_port);
102  tcphdr->dest = htons(pcb->remote_port);
103  tcphdr->seqno = seqno_be;
104  tcphdr->ackno = htonl(pcb->rcv_nxt);
105  TCPH_HDRLEN_FLAGS_SET(tcphdr, (5 + optlen / 4), TCP_ACK);
106  tcphdr->wnd = htons(pcb->rcv_ann_wnd);
107  tcphdr->chksum = 0;
108  tcphdr->urgp = 0;
109 
110  /* If we're sending a packet, update the announced right window edge */
111  pcb->rcv_ann_right_edge = pcb->rcv_nxt + pcb->rcv_ann_wnd;
112  }
113  return p;
114 }
115 
122 err_t
123 tcp_send_fin(struct tcp_pcb *pcb)
124 {
125  /* first, try to add the fin to the last unsent segment */
126  if (pcb->unsent != NULL) {
127  struct tcp_seg *last_unsent;
128  for (last_unsent = pcb->unsent; last_unsent->next != NULL;
129  last_unsent = last_unsent->next);
130 
131  if ((TCPH_FLAGS(last_unsent->tcphdr) & (TCP_SYN | TCP_FIN | TCP_RST)) == 0) {
132  /* no SYN/FIN/RST flag in the header, we can add the FIN flag */
133  TCPH_SET_FLAG(last_unsent->tcphdr, TCP_FIN);
134  return ERR_OK;
135  }
136  }
137  /* no data, no length, flags, copy=1, no optdata */
138  return tcp_enqueue_flags(pcb, TCP_FIN);
139 }
140 
155 static struct tcp_seg *
156 tcp_create_segment(struct tcp_pcb *pcb, struct pbuf *p, u8_t flags, u32_t seqno, u8_t optflags)
157 {
158  struct tcp_seg *seg;
159  u8_t optlen = LWIP_TCP_OPT_LENGTH(optflags);
160 
161  if ((seg = (struct tcp_seg *)memp_malloc(MEMP_TCP_SEG)) == NULL) {
162  LWIP_DEBUGF(TCP_OUTPUT_DEBUG | 2, ("tcp_create_segment: no memory.\n"));
163  pbuf_free(p);
164  return NULL;
165  }
166  seg->flags = optflags;
167  seg->next = NULL;
168  seg->p = p;
169  seg->dataptr = p->payload;
170  seg->len = p->tot_len - optlen;
171 #if TCP_OVERSIZE_DBGCHECK
172  seg->oversize_left = 0;
173 #endif /* TCP_OVERSIZE_DBGCHECK */
174 #if TCP_CHECKSUM_ON_COPY
175  seg->chksum = 0;
176  seg->chksum_swapped = 0;
177  /* check optflags */
178  LWIP_ASSERT("invalid optflags passed: TF_SEG_DATA_CHECKSUMMED",
179  (optflags & TF_SEG_DATA_CHECKSUMMED) == 0);
180 #endif /* TCP_CHECKSUM_ON_COPY */
181 
182  /* build TCP header */
183  if (pbuf_header(p, TCP_HLEN)) {
184  LWIP_DEBUGF(TCP_OUTPUT_DEBUG | 2, ("tcp_create_segment: no room for TCP header in pbuf.\n"));
185  TCP_STATS_INC(tcp.err);
186  tcp_seg_free(seg);
187  return NULL;
188  }
189  seg->tcphdr = (struct tcp_hdr *)seg->p->payload;
190  seg->tcphdr->src = htons(pcb->local_port);
191  seg->tcphdr->dest = htons(pcb->remote_port);
192  seg->tcphdr->seqno = htonl(seqno);
193  /* ackno is set in tcp_output */
194  TCPH_HDRLEN_FLAGS_SET(seg->tcphdr, (5 + optlen / 4), flags);
195  /* wnd and chksum are set in tcp_output */
196  seg->tcphdr->urgp = 0;
197  return seg;
198 }
199 
215 #if TCP_OVERSIZE
216 static struct pbuf *
217 tcp_pbuf_prealloc(pbuf_layer layer, u16_t length, u16_t max_length,
218  u16_t *oversize, struct tcp_pcb *pcb, u8_t apiflags,
219  u8_t first_seg)
220 {
221  struct pbuf *p;
222  u16_t alloc = length;
223 
224 #if LWIP_NETIF_TX_SINGLE_PBUF
225  LWIP_UNUSED_ARG(max_length);
226  LWIP_UNUSED_ARG(pcb);
227  LWIP_UNUSED_ARG(apiflags);
228  LWIP_UNUSED_ARG(first_seg);
229  /* always create MSS-sized pbufs */
230  alloc = TCP_MSS;
231 #else /* LWIP_NETIF_TX_SINGLE_PBUF */
232  if (length < max_length) {
233  /* Should we allocate an oversized pbuf, or just the minimum
234  * length required? If tcp_write is going to be called again
235  * before this segment is transmitted, we want the oversized
236  * buffer. If the segment will be transmitted immediately, we can
237  * save memory by allocating only length. We use a simple
238  * heuristic based on the following information:
239  *
240  * Did the user set TCP_WRITE_FLAG_MORE?
241  *
242  * Will the Nagle algorithm defer transmission of this segment?
243  */
244  if ((apiflags & TCP_WRITE_FLAG_MORE) ||
245  (!(pcb->flags & TF_NODELAY) &&
246  (!first_seg ||
247  pcb->unsent != NULL ||
248  pcb->unacked != NULL))) {
249  alloc = LWIP_MIN(max_length, LWIP_MEM_ALIGN_SIZE(length + TCP_OVERSIZE));
250  }
251  }
252 #endif /* LWIP_NETIF_TX_SINGLE_PBUF */
253  p = pbuf_alloc(layer, alloc, PBUF_RAM);
254  if (p == NULL) {
255  return NULL;
256  }
257  LWIP_ASSERT("need unchained pbuf", p->next == NULL);
258  *oversize = p->len - length;
259  /* trim p->len to the currently used size */
260  p->len = p->tot_len = length;
261  return p;
262 }
263 #else /* TCP_OVERSIZE */
264 #define tcp_pbuf_prealloc(layer, length, mx, os, pcb, api, fst) pbuf_alloc((layer), (length), PBUF_RAM)
265 #endif /* TCP_OVERSIZE */
266 
267 #if TCP_CHECKSUM_ON_COPY
268 
269 static void
270 tcp_seg_add_chksum(u16_t chksum, u16_t len, u16_t *seg_chksum,
271  u8_t *seg_chksum_swapped)
272 {
273  u32_t helper;
274  /* add chksum to old chksum and fold to u16_t */
275  helper = chksum + *seg_chksum;
276  chksum = FOLD_U32T(helper);
277  if ((len & 1) != 0) {
278  *seg_chksum_swapped = 1 - *seg_chksum_swapped;
279  chksum = SWAP_BYTES_IN_WORD(chksum);
280  }
281  *seg_chksum = chksum;
282 }
283 #endif /* TCP_CHECKSUM_ON_COPY */
284 
291 static err_t
292 tcp_write_checks(struct tcp_pcb *pcb, u16_t len)
293 {
294  /* connection is in invalid state for data transmission? */
295  if ((pcb->state != ESTABLISHED) &&
296  (pcb->state != CLOSE_WAIT) &&
297  (pcb->state != SYN_SENT) &&
298  (pcb->state != SYN_RCVD)) {
299  LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_STATE | LWIP_DBG_LEVEL_SEVERE, ("tcp_write() called in invalid state\n"));
300  return ERR_CONN;
301  } else if (len == 0) {
302  return ERR_OK;
303  }
304 
305  /* fail on too much data */
306  if (len > pcb->snd_buf) {
307  LWIP_DEBUGF(TCP_OUTPUT_DEBUG | 3, ("tcp_write: too much data (len=%"U16_F" > snd_buf=%"U16_F")\n",
308  len, pcb->snd_buf));
309  pcb->flags |= TF_NAGLEMEMERR;
310  return ERR_MEM;
311  }
312 
313  LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_write: queuelen: %"U16_F"\n", (u16_t)pcb->snd_queuelen));
314 
315  /* If total number of pbufs on the unsent/unacked queues exceeds the
316  * configured maximum, return an error */
317  /* check for configured max queuelen and possible overflow */
318  if ((pcb->snd_queuelen >= TCP_SND_QUEUELEN) || (pcb->snd_queuelen > TCP_SNDQUEUELEN_OVERFLOW)) {
319  LWIP_DEBUGF(TCP_OUTPUT_DEBUG | 3, ("tcp_write: too long queue %"U16_F" (max %"U16_F")\n",
320  pcb->snd_queuelen, TCP_SND_QUEUELEN));
321  TCP_STATS_INC(tcp.memerr);
322  pcb->flags |= TF_NAGLEMEMERR;
323  return ERR_MEM;
324  }
325  if (pcb->snd_queuelen != 0) {
326  LWIP_ASSERT("tcp_write: pbufs on queue => at least one queue non-empty",
327  pcb->unacked != NULL || pcb->unsent != NULL);
328  } else {
329  LWIP_ASSERT("tcp_write: no pbufs on queue => both queues empty",
330  pcb->unacked == NULL && pcb->unsent == NULL);
331  }
332  return ERR_OK;
333 }
334 
351 err_t
352 tcp_write(struct tcp_pcb *pcb, const void *arg, u16_t len, u8_t apiflags)
353 {
354  struct pbuf *concat_p = NULL;
355  struct tcp_seg *last_unsent = NULL, *seg = NULL, *prev_seg = NULL, *queue = NULL;
356  u16_t pos = 0; /* position in 'arg' data */
357  u16_t queuelen;
358  u8_t optlen = 0;
359  u8_t optflags = 0;
360 #if TCP_OVERSIZE
361  u16_t oversize = 0;
362  u16_t oversize_used = 0;
363 #endif /* TCP_OVERSIZE */
364 #if TCP_CHECKSUM_ON_COPY
365  u16_t concat_chksum = 0;
366  u8_t concat_chksum_swapped = 0;
367  u16_t concat_chksummed = 0;
368 #endif /* TCP_CHECKSUM_ON_COPY */
369  err_t err;
370 
371 #if LWIP_NETIF_TX_SINGLE_PBUF
372  /* Always copy to try to create single pbufs for TX */
373  apiflags |= TCP_WRITE_FLAG_COPY;
374 #endif /* LWIP_NETIF_TX_SINGLE_PBUF */
375 
376  LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_write(pcb=%p, data=%p, len=%"U16_F", apiflags=%"U16_F")\n",
377  (void *)pcb, arg, len, (u16_t)apiflags));
378  LWIP_ERROR("tcp_write: arg == NULL (programmer violates API)",
379  arg != NULL, return ERR_ARG;);
380 
381  err = tcp_write_checks(pcb, len);
382  if (err != ERR_OK) {
383  return err;
384  }
385  queuelen = pcb->snd_queuelen;
386 
387 #if LWIP_TCP_TIMESTAMPS
388  if ((pcb->flags & TF_TIMESTAMP)) {
389  optflags = TF_SEG_OPTS_TS;
390  optlen = LWIP_TCP_OPT_LENGTH(TF_SEG_OPTS_TS);
391  }
392 #endif /* LWIP_TCP_TIMESTAMPS */
393 
394 
395  /*
396  * TCP segmentation is done in three phases with increasing complexity:
397  *
398  * 1. Copy data directly into an oversized pbuf.
399  * 2. Chain a new pbuf to the end of pcb->unsent.
400  * 3. Create new segments.
401  *
402  * We may run out of memory at any point. In that case we must
403  * return ERR_MEM and not change anything in pcb. Therefore, all
404  * changes are recorded in local variables and committed at the end
405  * of the function. Some pcb fields are maintained in local copies:
406  *
407  * queuelen = pcb->snd_queuelen
408  * oversize = pcb->unsent_oversize
409  *
410  * These variables are set consistently by the phases:
411  *
412  * seg points to the last segment tampered with.
413  *
414  * pos records progress as data is segmented.
415  */
416 
417  /* Find the tail of the unsent queue. */
418  if (pcb->unsent != NULL) {
419  u16_t space;
420  u16_t unsent_optlen;
421 
422  /* @todo: this could be sped up by keeping last_unsent in the pcb */
423  for (last_unsent = pcb->unsent; last_unsent->next != NULL;
424  last_unsent = last_unsent->next);
425 
426  /* Usable space at the end of the last unsent segment */
427  unsent_optlen = LWIP_TCP_OPT_LENGTH(last_unsent->flags);
428  space = pcb->mss - (last_unsent->len + unsent_optlen);
429 
430  /*
431  * Phase 1: Copy data directly into an oversized pbuf.
432  *
433  * The number of bytes copied is recorded in the oversize_used
434  * variable. The actual copying is done at the bottom of the
435  * function.
436  */
437 #if TCP_OVERSIZE
438 #if TCP_OVERSIZE_DBGCHECK
439  /* check that pcb->unsent_oversize matches last_unsent->unsent_oversize */
440  LWIP_ASSERT("unsent_oversize mismatch (pcb vs. last_unsent)",
441  pcb->unsent_oversize == last_unsent->oversize_left);
442 #endif /* TCP_OVERSIZE_DBGCHECK */
443  oversize = pcb->unsent_oversize;
444  if (oversize > 0) {
445  LWIP_ASSERT("inconsistent oversize vs. space", oversize_used <= space);
446  seg = last_unsent;
447  oversize_used = oversize < len ? oversize : len;
448  pos += oversize_used;
449  oversize -= oversize_used;
450  space -= oversize_used;
451  }
452  /* now we are either finished or oversize is zero */
453  LWIP_ASSERT("inconsistend oversize vs. len", (oversize == 0) || (pos == len));
454 #endif /* TCP_OVERSIZE */
455 
456  /*
457  * Phase 2: Chain a new pbuf to the end of pcb->unsent.
458  *
459  * We don't extend segments containing SYN/FIN flags or options
460  * (len==0). The new pbuf is kept in concat_p and pbuf_cat'ed at
461  * the end.
462  */
463  if ((pos < len) && (space > 0) && (last_unsent->len > 0)) {
464  u16_t seglen = space < len - pos ? space : len - pos;
465  seg = last_unsent;
466 
467  /* Create a pbuf with a copy or reference to seglen bytes. We
468  * can use PBUF_RAW here since the data appears in the middle of
469  * a segment. A header will never be prepended. */
470  if (apiflags & TCP_WRITE_FLAG_COPY) {
471  /* Data is copied */
472  if ((concat_p = tcp_pbuf_prealloc(PBUF_RAW, seglen, space, &oversize, pcb, apiflags, 1)) == NULL) {
474  ("tcp_write : could not allocate memory for pbuf copy size %"U16_F"\n",
475  seglen));
476  goto memerr;
477  }
478 #if TCP_OVERSIZE_DBGCHECK
479  last_unsent->oversize_left = oversize;
480 #endif /* TCP_OVERSIZE_DBGCHECK */
481  TCP_DATA_COPY2(concat_p->payload, (u8_t*)arg + pos, seglen, &concat_chksum, &concat_chksum_swapped);
482 #if TCP_CHECKSUM_ON_COPY
483  concat_chksummed += seglen;
484 #endif /* TCP_CHECKSUM_ON_COPY */
485  } else {
486  /* Data is not copied */
487  if ((concat_p = pbuf_alloc(PBUF_RAW, seglen, PBUF_ROM)) == NULL) {
489  ("tcp_write: could not allocate memory for zero-copy pbuf\n"));
490  goto memerr;
491  }
492 #if TCP_CHECKSUM_ON_COPY
493  /* calculate the checksum of nocopy-data */
494  tcp_seg_add_chksum(~inet_chksum((u8_t*)arg + pos, seglen), seglen,
495  &concat_chksum, &concat_chksum_swapped);
496  concat_chksummed += seglen;
497 #endif /* TCP_CHECKSUM_ON_COPY */
498  /* reference the non-volatile payload data */
499  concat_p->payload = (u8_t*)arg + pos;
500  }
501 
502  pos += seglen;
503  queuelen += pbuf_clen(concat_p);
504  }
505  } else {
506 #if TCP_OVERSIZE
507  LWIP_ASSERT("unsent_oversize mismatch (pcb->unsent is NULL)",
508  pcb->unsent_oversize == 0);
509 #endif /* TCP_OVERSIZE */
510  }
511 
512  /*
513  * Phase 3: Create new segments.
514  *
515  * The new segments are chained together in the local 'queue'
516  * variable, ready to be appended to pcb->unsent.
517  */
518  while (pos < len) {
519  struct pbuf *p;
520  u16_t left = len - pos;
521  u16_t max_len = pcb->mss - optlen;
522  u16_t seglen = left > max_len ? max_len : left;
523 #if TCP_CHECKSUM_ON_COPY
524  u16_t chksum = 0;
525  u8_t chksum_swapped = 0;
526 #endif /* TCP_CHECKSUM_ON_COPY */
527 
528  if (apiflags & TCP_WRITE_FLAG_COPY) {
529  /* If copy is set, memory should be allocated and data copied
530  * into pbuf */
531  if ((p = tcp_pbuf_prealloc(PBUF_TRANSPORT, seglen + optlen, pcb->mss, &oversize, pcb, apiflags, queue == NULL)) == NULL) {
532  LWIP_DEBUGF(TCP_OUTPUT_DEBUG | 2, ("tcp_write : could not allocate memory for pbuf copy size %"U16_F"\n", seglen));
533  goto memerr;
534  }
535  LWIP_ASSERT("tcp_write: check that first pbuf can hold the complete seglen",
536  (p->len >= seglen));
537  TCP_DATA_COPY2((char *)p->payload + optlen, (u8_t*)arg + pos, seglen, &chksum, &chksum_swapped);
538  } else {
539  /* Copy is not set: First allocate a pbuf for holding the data.
540  * Since the referenced data is available at least until it is
541  * sent out on the link (as it has to be ACKed by the remote
542  * party) we can safely use PBUF_ROM instead of PBUF_REF here.
543  */
544  struct pbuf *p2;
545 #if TCP_OVERSIZE
546  LWIP_ASSERT("oversize == 0", oversize == 0);
547 #endif /* TCP_OVERSIZE */
548  if ((p2 = pbuf_alloc(PBUF_TRANSPORT, seglen, PBUF_ROM)) == NULL) {
549  LWIP_DEBUGF(TCP_OUTPUT_DEBUG | 2, ("tcp_write: could not allocate memory for zero-copy pbuf\n"));
550  goto memerr;
551  }
552 #if TCP_CHECKSUM_ON_COPY
553  /* calculate the checksum of nocopy-data */
554  chksum = ~inet_chksum((u8_t*)arg + pos, seglen);
555 #endif /* TCP_CHECKSUM_ON_COPY */
556  /* reference the non-volatile payload data */
557  p2->payload = (u8_t*)arg + pos;
558 
559  /* Second, allocate a pbuf for the headers. */
560  if ((p = pbuf_alloc(PBUF_TRANSPORT, optlen, PBUF_RAM)) == NULL) {
561  /* If allocation fails, we have to deallocate the data pbuf as
562  * well. */
563  pbuf_free(p2);
564  LWIP_DEBUGF(TCP_OUTPUT_DEBUG | 2, ("tcp_write: could not allocate memory for header pbuf\n"));
565  goto memerr;
566  }
567  /* Concatenate the headers and data pbufs together. */
568  pbuf_cat(p/*header*/, p2/*data*/);
569  }
570 
571  queuelen += pbuf_clen(p);
572 
573  /* Now that there are more segments queued, we check again if the
574  * length of the queue exceeds the configured maximum or
575  * overflows. */
576  if ((queuelen > TCP_SND_QUEUELEN) || (queuelen > TCP_SNDQUEUELEN_OVERFLOW)) {
577  LWIP_DEBUGF(TCP_OUTPUT_DEBUG | 2, ("tcp_write: queue too long %"U16_F" (%"U16_F")\n", queuelen, TCP_SND_QUEUELEN));
578  pbuf_free(p);
579  goto memerr;
580  }
581 
582  if ((seg = tcp_create_segment(pcb, p, 0, pcb->snd_lbb + pos, optflags)) == NULL) {
583  goto memerr;
584  }
585 #if TCP_OVERSIZE_DBGCHECK
586  seg->oversize_left = oversize;
587 #endif /* TCP_OVERSIZE_DBGCHECK */
588 #if TCP_CHECKSUM_ON_COPY
589  seg->chksum = chksum;
590  seg->chksum_swapped = chksum_swapped;
591  seg->flags |= TF_SEG_DATA_CHECKSUMMED;
592 #endif /* TCP_CHECKSUM_ON_COPY */
593  /* Fix dataptr for the nocopy case */
594  if ((apiflags & TCP_WRITE_FLAG_COPY) == 0) {
595  seg->dataptr = (u8_t*)arg + pos;
596  }
597 
598  /* first segment of to-be-queued data? */
599  if (queue == NULL) {
600  queue = seg;
601  } else {
602  /* Attach the segment to the end of the queued segments */
603  LWIP_ASSERT("prev_seg != NULL", prev_seg != NULL);
604  prev_seg->next = seg;
605  }
606  /* remember last segment of to-be-queued data for next iteration */
607  prev_seg = seg;
608 
609  LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_TRACE, ("tcp_write: queueing %"U32_F":%"U32_F"\n",
610  ntohl(seg->tcphdr->seqno),
611  ntohl(seg->tcphdr->seqno) + TCP_TCPLEN(seg)));
612 
613  pos += seglen;
614  }
615 
616  /*
617  * All three segmentation phases were successful. We can commit the
618  * transaction.
619  */
620 
621  /*
622  * Phase 1: If data has been added to the preallocated tail of
623  * last_unsent, we update the length fields of the pbuf chain.
624  */
625 #if TCP_OVERSIZE
626  if (oversize_used > 0) {
627  struct pbuf *p;
628  /* Bump tot_len of whole chain, len of tail */
629  for (p = last_unsent->p; p; p = p->next) {
630  p->tot_len += oversize_used;
631  if (p->next == NULL) {
632  TCP_DATA_COPY((char *)p->payload + p->len, arg, oversize_used, last_unsent);
633  p->len += oversize_used;
634  }
635  }
636  last_unsent->len += oversize_used;
637 #if TCP_OVERSIZE_DBGCHECK
638  last_unsent->oversize_left -= oversize_used;
639 #endif /* TCP_OVERSIZE_DBGCHECK */
640  }
641  pcb->unsent_oversize = oversize;
642 #endif /* TCP_OVERSIZE */
643 
644  /*
645  * Phase 2: concat_p can be concatenated onto last_unsent->p
646  */
647  if (concat_p != NULL) {
648  LWIP_ASSERT("tcp_write: cannot concatenate when pcb->unsent is empty",
649  (last_unsent != NULL));
650  pbuf_cat(last_unsent->p, concat_p);
651  last_unsent->len += concat_p->tot_len;
652 #if TCP_CHECKSUM_ON_COPY
653  if (concat_chksummed) {
654  tcp_seg_add_chksum(concat_chksum, concat_chksummed, &last_unsent->chksum,
655  &last_unsent->chksum_swapped);
656  last_unsent->flags |= TF_SEG_DATA_CHECKSUMMED;
657  }
658 #endif /* TCP_CHECKSUM_ON_COPY */
659  }
660 
661  /*
662  * Phase 3: Append queue to pcb->unsent. Queue may be NULL, but that
663  * is harmless
664  */
665  if (last_unsent == NULL) {
666  pcb->unsent = queue;
667  } else {
668  last_unsent->next = queue;
669  }
670 
671  /*
672  * Finally update the pcb state.
673  */
674  pcb->snd_lbb += len;
675  pcb->snd_buf -= len;
676  pcb->snd_queuelen = queuelen;
677 
678  LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_write: %"S16_F" (after enqueued)\n",
679  pcb->snd_queuelen));
680  if (pcb->snd_queuelen != 0) {
681  LWIP_ASSERT("tcp_write: valid queue length",
682  pcb->unacked != NULL || pcb->unsent != NULL);
683  }
684 
685  /* Set the PSH flag in the last segment that we enqueued. */
686  if (seg != NULL && seg->tcphdr != NULL && ((apiflags & TCP_WRITE_FLAG_MORE)==0)) {
687  TCPH_SET_FLAG(seg->tcphdr, TCP_PSH);
688  }
689 
690  return ERR_OK;
691 memerr:
692  pcb->flags |= TF_NAGLEMEMERR;
693  TCP_STATS_INC(tcp.memerr);
694 
695  if (concat_p != NULL) {
696  pbuf_free(concat_p);
697  }
698  if (queue != NULL) {
699  tcp_segs_free(queue);
700  }
701  if (pcb->snd_queuelen != 0) {
702  LWIP_ASSERT("tcp_write: valid queue length", pcb->unacked != NULL ||
703  pcb->unsent != NULL);
704  }
705  LWIP_DEBUGF(TCP_QLEN_DEBUG | LWIP_DBG_STATE, ("tcp_write: %"S16_F" (with mem err)\n", pcb->snd_queuelen));
706  return ERR_MEM;
707 }
708 
719 err_t
720 tcp_enqueue_flags(struct tcp_pcb *pcb, u8_t flags)
721 {
722  struct pbuf *p;
723  struct tcp_seg *seg;
724  u8_t optflags = 0;
725  u8_t optlen = 0;
726 
727  LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_enqueue_flags: queuelen: %"U16_F"\n", (u16_t)pcb->snd_queuelen));
728 
729  LWIP_ASSERT("tcp_enqueue_flags: need either TCP_SYN or TCP_FIN in flags (programmer violates API)",
730  (flags & (TCP_SYN | TCP_FIN)) != 0);
731 
732  /* check for configured max queuelen and possible overflow */
733  if ((pcb->snd_queuelen >= TCP_SND_QUEUELEN) || (pcb->snd_queuelen > TCP_SNDQUEUELEN_OVERFLOW)) {
734  LWIP_DEBUGF(TCP_OUTPUT_DEBUG | 3, ("tcp_enqueue_flags: too long queue %"U16_F" (max %"U16_F")\n",
735  pcb->snd_queuelen, TCP_SND_QUEUELEN));
736  TCP_STATS_INC(tcp.memerr);
737  pcb->flags |= TF_NAGLEMEMERR;
738  return ERR_MEM;
739  }
740 
741  if (flags & TCP_SYN) {
742  optflags = TF_SEG_OPTS_MSS;
743  }
744 #if LWIP_TCP_TIMESTAMPS
745  if ((pcb->flags & TF_TIMESTAMP)) {
746  optflags |= TF_SEG_OPTS_TS;
747  }
748 #endif /* LWIP_TCP_TIMESTAMPS */
749  optlen = LWIP_TCP_OPT_LENGTH(optflags);
750 
751  /* tcp_enqueue_flags is always called with either SYN or FIN in flags.
752  * We need one available snd_buf byte to do that.
753  * This means we can't send FIN while snd_buf==0. A better fix would be to
754  * not include SYN and FIN sequence numbers in the snd_buf count. */
755  if (pcb->snd_buf == 0) {
756  LWIP_DEBUGF(TCP_OUTPUT_DEBUG | 3, ("tcp_enqueue_flags: no send buffer available\n"));
757  TCP_STATS_INC(tcp.memerr);
758  return ERR_MEM;
759  }
760 
761  /* Allocate pbuf with room for TCP header + options */
762  if ((p = pbuf_alloc(PBUF_TRANSPORT, optlen, PBUF_RAM)) == NULL) {
763  pcb->flags |= TF_NAGLEMEMERR;
764  TCP_STATS_INC(tcp.memerr);
765  return ERR_MEM;
766  }
767  LWIP_ASSERT("tcp_enqueue_flags: check that first pbuf can hold optlen",
768  (p->len >= optlen));
769 
770  /* Allocate memory for tcp_seg, and fill in fields. */
771  if ((seg = tcp_create_segment(pcb, p, flags, pcb->snd_lbb, optflags)) == NULL) {
772  pcb->flags |= TF_NAGLEMEMERR;
773  TCP_STATS_INC(tcp.memerr);
774  return ERR_MEM;
775  }
776  LWIP_ASSERT("seg->tcphdr not aligned", ((mem_ptr_t)seg->tcphdr % MEM_ALIGNMENT) == 0);
777  LWIP_ASSERT("tcp_enqueue_flags: invalid segment length", seg->len == 0);
778 
780  ("tcp_enqueue_flags: queueing %"U32_F":%"U32_F" (0x%"X16_F")\n",
781  ntohl(seg->tcphdr->seqno),
782  ntohl(seg->tcphdr->seqno) + TCP_TCPLEN(seg),
783  (u16_t)flags));
784 
785  /* Now append seg to pcb->unsent queue */
786  if (pcb->unsent == NULL) {
787  pcb->unsent = seg;
788  } else {
789  struct tcp_seg *useg;
790  for (useg = pcb->unsent; useg->next != NULL; useg = useg->next);
791  useg->next = seg;
792  }
793 #if TCP_OVERSIZE
794  /* The new unsent tail has no space */
795  pcb->unsent_oversize = 0;
796 #endif /* TCP_OVERSIZE */
797 
798  /* SYN and FIN bump the sequence number */
799  if ((flags & TCP_SYN) || (flags & TCP_FIN)) {
800  pcb->snd_lbb++;
801  /* optlen does not influence snd_buf */
802  pcb->snd_buf--;
803  }
804  if (flags & TCP_FIN) {
805  pcb->flags |= TF_FIN;
806  }
807 
808  /* update number of segments on the queues */
809  pcb->snd_queuelen += pbuf_clen(seg->p);
810  LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_enqueue_flags: %"S16_F" (after enqueued)\n", pcb->snd_queuelen));
811  if (pcb->snd_queuelen != 0) {
812  LWIP_ASSERT("tcp_enqueue_flags: invalid queue length",
813  pcb->unacked != NULL || pcb->unsent != NULL);
814  }
815 
816  return ERR_OK;
817 }
818 
819 
820 #if LWIP_TCP_TIMESTAMPS
821 /* Build a timestamp option (12 bytes long) at the specified options pointer)
822  *
823  * @param pcb tcp_pcb
824  * @param opts option pointer where to store the timestamp option
825  */
826 static void
827 tcp_build_timestamp_option(struct tcp_pcb *pcb, u32_t *opts)
828 {
829  /* Pad with two NOP options to make everything nicely aligned */
830  opts[0] = PP_HTONL(0x0101080A);
831  opts[1] = htonl(sys_now());
832  opts[2] = htonl(pcb->ts_recent);
833 }
834 #endif
835 
840 err_t
841 tcp_send_empty_ack(struct tcp_pcb *pcb)
842 {
843  struct pbuf *p;
844  struct tcp_hdr *tcphdr;
845  u8_t optlen = 0;
846 
847 #if LWIP_TCP_TIMESTAMPS
848  if (pcb->flags & TF_TIMESTAMP) {
849  optlen = LWIP_TCP_OPT_LENGTH(TF_SEG_OPTS_TS);
850  }
851 #endif
852 
853  p = tcp_output_alloc_header(pcb, optlen, 0, htonl(pcb->snd_nxt));
854  if (p == NULL) {
855  LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_output: (ACK) could not allocate pbuf\n"));
856  return ERR_BUF;
857  }
858  tcphdr = (struct tcp_hdr *)p->payload;
860  ("tcp_output: sending ACK for %"U32_F"\n", pcb->rcv_nxt));
861  /* remove ACK flags from the PCB, as we send an empty ACK now */
862  pcb->flags &= ~(TF_ACK_DELAY | TF_ACK_NOW);
863 
864  /* NB. MSS option is only sent on SYNs, so ignore it here */
865 #if LWIP_TCP_TIMESTAMPS
866  pcb->ts_lastacksent = pcb->rcv_nxt;
867 
868  if (pcb->flags & TF_TIMESTAMP) {
869  tcp_build_timestamp_option(pcb, (u32_t *)(tcphdr + 1));
870  }
871 #endif
872 
873 #if CHECKSUM_GEN_TCP
874  tcphdr->chksum = inet_chksum_pseudo(p, &(pcb->local_ip), &(pcb->remote_ip),
875  IP_PROTO_TCP, p->tot_len);
876 #endif
877 #if LWIP_NETIF_HWADDRHINT
878  ip_output_hinted(p, &(pcb->local_ip), &(pcb->remote_ip), pcb->ttl, pcb->tos,
879  IP_PROTO_TCP, &(pcb->addr_hint));
880 #else /* LWIP_NETIF_HWADDRHINT*/
881  ip_output(p, &(pcb->local_ip), &(pcb->remote_ip), pcb->ttl, pcb->tos,
882  IP_PROTO_TCP);
883 #endif /* LWIP_NETIF_HWADDRHINT*/
884  pbuf_free(p);
885 
886  return ERR_OK;
887 }
888 
896 err_t
897 tcp_output(struct tcp_pcb *pcb)
898 {
899  struct tcp_seg *seg, *useg;
900  u32_t wnd, snd_nxt;
901 #if TCP_CWND_DEBUG
902  s16_t i = 0;
903 #endif /* TCP_CWND_DEBUG */
904 
905  /* First, check if we are invoked by the TCP input processing
906  code. If so, we do not output anything. Instead, we rely on the
907  input processing code to call us when input processing is done
908  with. */
909  if (tcp_input_pcb == pcb) {
910  return ERR_OK;
911  }
912 
913  wnd = LWIP_MIN(pcb->snd_wnd, pcb->cwnd);
914 
915  seg = pcb->unsent;
916 
917  /* If the TF_ACK_NOW flag is set and no data will be sent (either
918  * because the ->unsent queue is empty or because the window does
919  * not allow it), construct an empty ACK segment and send it.
920  *
921  * If data is to be sent, we will just piggyback the ACK (see below).
922  */
923  if (pcb->flags & TF_ACK_NOW &&
924  (seg == NULL ||
925  ntohl(seg->tcphdr->seqno) - pcb->lastack + seg->len > wnd)) {
926  return tcp_send_empty_ack(pcb);
927  }
928 
929  /* useg should point to last segment on unacked queue */
930  useg = pcb->unacked;
931  if (useg != NULL) {
932  for (; useg->next != NULL; useg = useg->next);
933  }
934 
935 #if TCP_OUTPUT_DEBUG
936  if (seg == NULL) {
937  LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_output: nothing to send (%p)\n",
938  (void*)pcb->unsent));
939  }
940 #endif /* TCP_OUTPUT_DEBUG */
941 #if TCP_CWND_DEBUG
942  if (seg == NULL) {
943  LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_output: snd_wnd %"U16_F
944  ", cwnd %"U16_F", wnd %"U32_F
945  ", seg == NULL, ack %"U32_F"\n",
946  pcb->snd_wnd, pcb->cwnd, wnd, pcb->lastack));
947  } else {
949  ("tcp_output: snd_wnd %"U16_F", cwnd %"U16_F", wnd %"U32_F
950  ", effwnd %"U32_F", seq %"U32_F", ack %"U32_F"\n",
951  pcb->snd_wnd, pcb->cwnd, wnd,
952  ntohl(seg->tcphdr->seqno) - pcb->lastack + seg->len,
953  ntohl(seg->tcphdr->seqno), pcb->lastack));
954  }
955 #endif /* TCP_CWND_DEBUG */
956  /* data available and window allows it to be sent? */
957  while (seg != NULL &&
958  ntohl(seg->tcphdr->seqno) - pcb->lastack + seg->len <= wnd) {
959  LWIP_ASSERT("RST not expected here!",
960  (TCPH_FLAGS(seg->tcphdr) & TCP_RST) == 0);
961  /* Stop sending if the nagle algorithm would prevent it
962  * Don't stop:
963  * - if tcp_write had a memory error before (prevent delayed ACK timeout) or
964  * - if FIN was already enqueued for this PCB (SYN is always alone in a segment -
965  * either seg->next != NULL or pcb->unacked == NULL;
966  * RST is no sent using tcp_write/tcp_output.
967  */
968  if((tcp_do_output_nagle(pcb) == 0) &&
969  ((pcb->flags & (TF_NAGLEMEMERR | TF_FIN)) == 0)){
970  break;
971  }
972 #if TCP_CWND_DEBUG
973  LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_output: snd_wnd %"U16_F", cwnd %"U16_F", wnd %"U32_F", effwnd %"U32_F", seq %"U32_F", ack %"U32_F", i %"S16_F"\n",
974  pcb->snd_wnd, pcb->cwnd, wnd,
975  ntohl(seg->tcphdr->seqno) + seg->len -
976  pcb->lastack,
977  ntohl(seg->tcphdr->seqno), pcb->lastack, i));
978  ++i;
979 #endif /* TCP_CWND_DEBUG */
980 
981  pcb->unsent = seg->next;
982 
983  if (pcb->state != SYN_SENT) {
984  TCPH_SET_FLAG(seg->tcphdr, TCP_ACK);
985  pcb->flags &= ~(TF_ACK_DELAY | TF_ACK_NOW);
986  }
987 
988  tcp_output_segment(seg, pcb);
989  snd_nxt = ntohl(seg->tcphdr->seqno) + TCP_TCPLEN(seg);
990  if (TCP_SEQ_LT(pcb->snd_nxt, snd_nxt)) {
991  pcb->snd_nxt = snd_nxt;
992  }
993  /* put segment on unacknowledged list if length > 0 */
994  if (TCP_TCPLEN(seg) > 0) {
995  seg->next = NULL;
996  /* unacked list is empty? */
997  if (pcb->unacked == NULL) {
998  pcb->unacked = seg;
999  useg = seg;
1000  /* unacked list is not empty? */
1001  } else {
1002  /* In the case of fast retransmit, the packet should not go to the tail
1003  * of the unacked queue, but rather somewhere before it. We need to check for
1004  * this case. -STJ Jul 27, 2004 */
1005  if (TCP_SEQ_LT(ntohl(seg->tcphdr->seqno), ntohl(useg->tcphdr->seqno))) {
1006  /* add segment to before tail of unacked list, keeping the list sorted */
1007  struct tcp_seg **cur_seg = &(pcb->unacked);
1008  while (*cur_seg &&
1009  TCP_SEQ_LT(ntohl((*cur_seg)->tcphdr->seqno), ntohl(seg->tcphdr->seqno))) {
1010  cur_seg = &((*cur_seg)->next );
1011  }
1012  seg->next = (*cur_seg);
1013  (*cur_seg) = seg;
1014  } else {
1015  /* add segment to tail of unacked list */
1016  useg->next = seg;
1017  useg = useg->next;
1018  }
1019  }
1020  /* do not queue empty segments on the unacked list */
1021  } else {
1022  tcp_seg_free(seg);
1023  }
1024  seg = pcb->unsent;
1025  }
1026 #if TCP_OVERSIZE
1027  if (pcb->unsent == NULL) {
1028  /* last unsent has been removed, reset unsent_oversize */
1029  pcb->unsent_oversize = 0;
1030  }
1031 #endif /* TCP_OVERSIZE */
1032 
1033  if (seg != NULL && pcb->persist_backoff == 0 &&
1034  ntohl(seg->tcphdr->seqno) - pcb->lastack + seg->len > pcb->snd_wnd) {
1035  /* prepare for persist timer */
1036  pcb->persist_cnt = 0;
1037  pcb->persist_backoff = 1;
1038  }
1039 
1040  pcb->flags &= ~TF_NAGLEMEMERR;
1041  return ERR_OK;
1042 }
1043 
1050 static void
1051 tcp_output_segment(struct tcp_seg *seg, struct tcp_pcb *pcb)
1052 {
1053  u16_t len;
1054  struct netif *netif;
1055  u32_t *opts;
1056 
1059 
1060  /* The TCP header has already been constructed, but the ackno and
1061  wnd fields remain. */
1062  seg->tcphdr->ackno = htonl(pcb->rcv_nxt);
1063 
1064  /* advertise our receive window size in this TCP segment */
1065  seg->tcphdr->wnd = htons(pcb->rcv_ann_wnd);
1066 
1067  pcb->rcv_ann_right_edge = pcb->rcv_nxt + pcb->rcv_ann_wnd;
1068 
1069  /* Add any requested options. NB MSS option is only set on SYN
1070  packets, so ignore it here */
1071  LWIP_ASSERT("seg->tcphdr not aligned", ((mem_ptr_t)seg->tcphdr % MEM_ALIGNMENT) == 0);
1072  opts = (u32_t *)(void *)(seg->tcphdr + 1);
1073  if (seg->flags & TF_SEG_OPTS_MSS) {
1074  TCP_BUILD_MSS_OPTION(*opts);
1075  opts += 1;
1076  }
1077 #if LWIP_TCP_TIMESTAMPS
1078  pcb->ts_lastacksent = pcb->rcv_nxt;
1079 
1080  if (seg->flags & TF_SEG_OPTS_TS) {
1081  tcp_build_timestamp_option(pcb, opts);
1082  opts += 3;
1083  }
1084 #endif
1085 
1086  /* If we don't have a local IP address, we get one by
1087  calling ip_route(). */
1088  if (ip_addr_isany(&(pcb->local_ip))) {
1089  netif = ip_route(&(pcb->remote_ip));
1090  if (netif == NULL) {
1091  return;
1092  }
1093  ip_addr_copy(pcb->local_ip, netif->ip_addr);
1094  }
1095 
1096  /* Set retransmission timer running if it is not currently enabled */
1097  if(pcb->rtime == -1) {
1098  pcb->rtime = 0;
1099  }
1100 
1101  if (pcb->rttest == 0) {
1102  pcb->rttest = tcp_ticks;
1103  pcb->rtseq = ntohl(seg->tcphdr->seqno);
1104 
1105  LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_output_segment: rtseq %"U32_F"\n", pcb->rtseq));
1106  }
1107  LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_output_segment: %"U32_F":%"U32_F"\n",
1108  htonl(seg->tcphdr->seqno), htonl(seg->tcphdr->seqno) +
1109  seg->len));
1110 
1111  len = (u16_t)((u8_t *)seg->tcphdr - (u8_t *)seg->p->payload);
1112 
1113  seg->p->len -= len;
1114  seg->p->tot_len -= len;
1115 
1116  seg->p->payload = seg->tcphdr;
1117 
1118  seg->tcphdr->chksum = 0;
1119 #if CHECKSUM_GEN_TCP
1120 #if TCP_CHECKSUM_ON_COPY
1121  {
1122  u32_t acc;
1123 #if TCP_CHECKSUM_ON_COPY_SANITY_CHECK
1124  u16_t chksum_slow = inet_chksum_pseudo(seg->p, &(pcb->local_ip),
1125  &(pcb->remote_ip),
1126  IP_PROTO_TCP, seg->p->tot_len);
1127 #endif /* TCP_CHECKSUM_ON_COPY_SANITY_CHECK */
1128  if ((seg->flags & TF_SEG_DATA_CHECKSUMMED) == 0) {
1129  LWIP_ASSERT("data included but not checksummed",
1130  seg->p->tot_len == (TCPH_HDRLEN(seg->tcphdr) * 4));
1131  }
1132 
1133  /* rebuild TCP header checksum (TCP header changes for retransmissions!) */
1134  acc = inet_chksum_pseudo_partial(seg->p, &(pcb->local_ip),
1135  &(pcb->remote_ip),
1136  IP_PROTO_TCP, seg->p->tot_len, TCPH_HDRLEN(seg->tcphdr) * 4);
1137  /* add payload checksum */
1138  if (seg->chksum_swapped) {
1139  seg->chksum = SWAP_BYTES_IN_WORD(seg->chksum);
1140  seg->chksum_swapped = 0;
1141  }
1142  acc += (u16_t)~(seg->chksum);
1143  seg->tcphdr->chksum = FOLD_U32T(acc);
1144 #if TCP_CHECKSUM_ON_COPY_SANITY_CHECK
1145  if (chksum_slow != seg->tcphdr->chksum) {
1147  ("tcp_output_segment: calculated checksum is %"X16_F" instead of %"X16_F"\n",
1148  seg->tcphdr->chksum, chksum_slow));
1149  seg->tcphdr->chksum = chksum_slow;
1150  }
1151 #endif /* TCP_CHECKSUM_ON_COPY_SANITY_CHECK */
1152  }
1153 #else /* TCP_CHECKSUM_ON_COPY */
1154  seg->tcphdr->chksum = inet_chksum_pseudo(seg->p, &(pcb->local_ip),
1155  &(pcb->remote_ip),
1156  IP_PROTO_TCP, seg->p->tot_len);
1157 #endif /* TCP_CHECKSUM_ON_COPY */
1158 #endif /* CHECKSUM_GEN_TCP */
1159  TCP_STATS_INC(tcp.xmit);
1160 
1161 #if LWIP_NETIF_HWADDRHINT
1162  ip_output_hinted(seg->p, &(pcb->local_ip), &(pcb->remote_ip), pcb->ttl, pcb->tos,
1163  IP_PROTO_TCP, &(pcb->addr_hint));
1164 #else /* LWIP_NETIF_HWADDRHINT*/
1165  ip_output(seg->p, &(pcb->local_ip), &(pcb->remote_ip), pcb->ttl, pcb->tos,
1166  IP_PROTO_TCP);
1167 #endif /* LWIP_NETIF_HWADDRHINT*/
1168 }
1169 
1190 void
1191 tcp_rst(u32_t seqno, u32_t ackno,
1192  ip_addr_t *local_ip, ip_addr_t *remote_ip,
1193  u16_t local_port, u16_t remote_port)
1194 {
1195  struct pbuf *p;
1196  struct tcp_hdr *tcphdr;
1197  p = pbuf_alloc(PBUF_IP, TCP_HLEN, PBUF_RAM);
1198  if (p == NULL) {
1199  LWIP_DEBUGF(TCP_DEBUG, ("tcp_rst: could not allocate memory for pbuf\n"));
1200  return;
1201  }
1202  LWIP_ASSERT("check that first pbuf can hold struct tcp_hdr",
1203  (p->len >= sizeof(struct tcp_hdr)));
1204 
1205  tcphdr = (struct tcp_hdr *)p->payload;
1206  tcphdr->src = htons(local_port);
1207  tcphdr->dest = htons(remote_port);
1208  tcphdr->seqno = htonl(seqno);
1209  tcphdr->ackno = htonl(ackno);
1210  TCPH_HDRLEN_FLAGS_SET(tcphdr, TCP_HLEN/4, TCP_RST | TCP_ACK);
1211  tcphdr->wnd = PP_HTONS(TCP_WND);
1212  tcphdr->chksum = 0;
1213  tcphdr->urgp = 0;
1214 
1215 #if CHECKSUM_GEN_TCP
1216  tcphdr->chksum = inet_chksum_pseudo(p, local_ip, remote_ip,
1217  IP_PROTO_TCP, p->tot_len);
1218 #endif
1219  TCP_STATS_INC(tcp.xmit);
1221  /* Send output with hardcoded TTL since we have no access to the pcb */
1222  ip_output(p, local_ip, remote_ip, TCP_TTL, 0, IP_PROTO_TCP);
1223  pbuf_free(p);
1224  LWIP_DEBUGF(TCP_RST_DEBUG, ("tcp_rst: seqno %"U32_F" ackno %"U32_F".\n", seqno, ackno));
1225 }
1226 
1234 void
1235 tcp_rexmit_rto(struct tcp_pcb *pcb)
1236 {
1237  struct tcp_seg *seg;
1238 
1239  if (pcb->unacked == NULL) {
1240  return;
1241  }
1242 
1243  /* Move all unacked segments to the head of the unsent queue */
1244  for (seg = pcb->unacked; seg->next != NULL; seg = seg->next);
1245  /* concatenate unsent queue after unacked queue */
1246  seg->next = pcb->unsent;
1247  /* unsent queue is the concatenated queue (of unacked, unsent) */
1248  pcb->unsent = pcb->unacked;
1249  /* unacked queue is now empty */
1250  pcb->unacked = NULL;
1251 
1252  /* increment number of retransmissions */
1253  ++pcb->nrtx;
1254 
1255  /* Don't take any RTT measurements after retransmitting. */
1256  pcb->rttest = 0;
1257 
1258  /* Do the actual retransmission */
1259  tcp_output(pcb);
1260 }
1261 
1269 void
1270 tcp_rexmit(struct tcp_pcb *pcb)
1271 {
1272  struct tcp_seg *seg;
1273  struct tcp_seg **cur_seg;
1274 
1275  if (pcb->unacked == NULL) {
1276  return;
1277  }
1278 
1279  /* Move the first unacked segment to the unsent queue */
1280  /* Keep the unsent queue sorted. */
1281  seg = pcb->unacked;
1282  pcb->unacked = seg->next;
1283 
1284  cur_seg = &(pcb->unsent);
1285  while (*cur_seg &&
1286  TCP_SEQ_LT(ntohl((*cur_seg)->tcphdr->seqno), ntohl(seg->tcphdr->seqno))) {
1287  cur_seg = &((*cur_seg)->next );
1288  }
1289  seg->next = *cur_seg;
1290  *cur_seg = seg;
1291 
1292  ++pcb->nrtx;
1293 
1294  /* Don't take any rtt measurements after retransmitting. */
1295  pcb->rttest = 0;
1296 
1297  /* Do the actual retransmission. */
1299  /* No need to call tcp_output: we are always called from tcp_input()
1300  and thus tcp_output directly returns. */
1301 }
1302 
1303 
1309 void
1310 tcp_rexmit_fast(struct tcp_pcb *pcb)
1311 {
1312  if (pcb->unacked != NULL && !(pcb->flags & TF_INFR)) {
1313  /* This is fast retransmit. Retransmit the first unacked segment. */
1315  ("tcp_receive: dupacks %"U16_F" (%"U32_F
1316  "), fast retransmit %"U32_F"\n",
1317  (u16_t)pcb->dupacks, pcb->lastack,
1318  ntohl(pcb->unacked->tcphdr->seqno)));
1319  tcp_rexmit(pcb);
1320 
1321  /* Set ssthresh to half of the minimum of the current
1322  * cwnd and the advertised window */
1323  if (pcb->cwnd > pcb->snd_wnd) {
1324  pcb->ssthresh = pcb->snd_wnd / 2;
1325  } else {
1326  pcb->ssthresh = pcb->cwnd / 2;
1327  }
1328 
1329  /* The minimum value for ssthresh should be 2 MSS */
1330  if (pcb->ssthresh < 2*pcb->mss) {
1332  ("tcp_receive: The minimum value for ssthresh %"U16_F
1333  " should be min 2 mss %"U16_F"...\n",
1334  pcb->ssthresh, 2*pcb->mss));
1335  pcb->ssthresh = 2*pcb->mss;
1336  }
1337 
1338  pcb->cwnd = pcb->ssthresh + 3 * pcb->mss;
1339  pcb->flags |= TF_INFR;
1340  }
1341 }
1342 
1343 
1352 void
1353 tcp_keepalive(struct tcp_pcb *pcb)
1354 {
1355  struct pbuf *p;
1356  struct tcp_hdr *tcphdr;
1357 
1358  LWIP_DEBUGF(TCP_DEBUG, ("tcp_keepalive: sending KEEPALIVE probe to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n",
1359  ip4_addr1_16(&pcb->remote_ip), ip4_addr2_16(&pcb->remote_ip),
1360  ip4_addr3_16(&pcb->remote_ip), ip4_addr4_16(&pcb->remote_ip)));
1361 
1362  LWIP_DEBUGF(TCP_DEBUG, ("tcp_keepalive: tcp_ticks %"U32_F" pcb->tmr %"U32_F" pcb->keep_cnt_sent %"U16_F"\n",
1363  tcp_ticks, pcb->tmr, pcb->keep_cnt_sent));
1364 
1365  p = tcp_output_alloc_header(pcb, 0, 0, htonl(pcb->snd_nxt - 1));
1366  if(p == NULL) {
1368  ("tcp_keepalive: could not allocate memory for pbuf\n"));
1369  return;
1370  }
1371  tcphdr = (struct tcp_hdr *)p->payload;
1372 
1373 #if CHECKSUM_GEN_TCP
1374  tcphdr->chksum = inet_chksum_pseudo(p, &pcb->local_ip, &pcb->remote_ip,
1375  IP_PROTO_TCP, p->tot_len);
1376 #endif
1377  TCP_STATS_INC(tcp.xmit);
1378 
1379  /* Send output to IP */
1380 #if LWIP_NETIF_HWADDRHINT
1381  ip_output_hinted(p, &pcb->local_ip, &pcb->remote_ip, pcb->ttl, 0, IP_PROTO_TCP,
1382  &(pcb->addr_hint));
1383 #else /* LWIP_NETIF_HWADDRHINT*/
1384  ip_output(p, &pcb->local_ip, &pcb->remote_ip, pcb->ttl, 0, IP_PROTO_TCP);
1385 #endif /* LWIP_NETIF_HWADDRHINT*/
1386 
1387  pbuf_free(p);
1388 
1389  LWIP_DEBUGF(TCP_DEBUG, ("tcp_keepalive: seqno %"U32_F" ackno %"U32_F".\n",
1390  pcb->snd_nxt - 1, pcb->rcv_nxt));
1391 }
1392 
1393 
1402 void
1403 tcp_zero_window_probe(struct tcp_pcb *pcb)
1404 {
1405  struct pbuf *p;
1406  struct tcp_hdr *tcphdr;
1407  struct tcp_seg *seg;
1408  u16_t len;
1409  u8_t is_fin;
1410 
1412  ("tcp_zero_window_probe: sending ZERO WINDOW probe to %"
1413  U16_F".%"U16_F".%"U16_F".%"U16_F"\n",
1414  ip4_addr1_16(&pcb->remote_ip), ip4_addr2_16(&pcb->remote_ip),
1415  ip4_addr3_16(&pcb->remote_ip), ip4_addr4_16(&pcb->remote_ip)));
1416 
1418  ("tcp_zero_window_probe: tcp_ticks %"U32_F
1419  " pcb->tmr %"U32_F" pcb->keep_cnt_sent %"U16_F"\n",
1420  tcp_ticks, pcb->tmr, pcb->keep_cnt_sent));
1421 
1422  seg = pcb->unacked;
1423 
1424  if(seg == NULL) {
1425  seg = pcb->unsent;
1426  }
1427  if(seg == NULL) {
1428  return;
1429  }
1430 
1431  is_fin = ((TCPH_FLAGS(seg->tcphdr) & TCP_FIN) != 0) && (seg->len == 0);
1432  /* we want to send one seqno: either FIN or data (no options) */
1433  len = is_fin ? 0 : 1;
1434 
1435  p = tcp_output_alloc_header(pcb, 0, len, seg->tcphdr->seqno);
1436  if(p == NULL) {
1437  LWIP_DEBUGF(TCP_DEBUG, ("tcp_zero_window_probe: no memory for pbuf\n"));
1438  return;
1439  }
1440  tcphdr = (struct tcp_hdr *)p->payload;
1441 
1442  if (is_fin) {
1443  /* FIN segment, no data */
1444  TCPH_FLAGS_SET(tcphdr, TCP_ACK | TCP_FIN);
1445  } else {
1446  /* Data segment, copy in one byte from the head of the unacked queue */
1447  *((char *)p->payload + TCP_HLEN) = *(char *)seg->dataptr;
1448  }
1449 
1450 #if CHECKSUM_GEN_TCP
1451  tcphdr->chksum = inet_chksum_pseudo(p, &pcb->local_ip, &pcb->remote_ip,
1452  IP_PROTO_TCP, p->tot_len);
1453 #endif
1454  TCP_STATS_INC(tcp.xmit);
1455 
1456  /* Send output to IP */
1457 #if LWIP_NETIF_HWADDRHINT
1458  ip_output_hinted(p, &pcb->local_ip, &pcb->remote_ip, pcb->ttl, 0, IP_PROTO_TCP,
1459  &(pcb->addr_hint));
1460 #else /* LWIP_NETIF_HWADDRHINT*/
1461  ip_output(p, &pcb->local_ip, &pcb->remote_ip, pcb->ttl, 0, IP_PROTO_TCP);
1462 #endif /* LWIP_NETIF_HWADDRHINT*/
1463 
1464  pbuf_free(p);
1465 
1466  LWIP_DEBUGF(TCP_DEBUG, ("tcp_zero_window_probe: seqno %"U32_F
1467  " ackno %"U32_F".\n",
1468  pcb->snd_nxt - 1, pcb->rcv_nxt));
1469 }
1470 #endif /* LWIP_TCP */