Home Home > GIT Browse
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTakashi Iwai <tiwai@suse.de>2019-07-30 11:24:42 +0200
committerTakashi Iwai <tiwai@suse.de>2019-07-30 11:24:42 +0200
commit72b3601ccc80e9afd6b757785df0a6e2c8d1e40f (patch)
tree3e73074831ec85ba20441de40a354a55d50f1e4b
parent841c4f04e610142aec773bb7986cd1eed283526d (diff)
parent2c34bb11be79060e9e85989b58fb8e68df2aef22 (diff)
Merge branch 'users/mkubecek/SLE15/for-next' into SLE15
Pull TCP fixes from Michal Kubecek suse-commit: 9a98ebe1ad63b7939e515de5b3d8fbe958869379
-rw-r--r--include/net/tcp.h14
-rw-r--r--net/ipv4/tcp_output.c11
-rw-r--r--net/sctp/stream.c10
3 files changed, 33 insertions, 2 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h
index d5ad52583018..0f24b39017fa 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1993,4 +1993,18 @@ static inline void tcp_listendrop(const struct sock *sk)
enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer);
+static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk)
+{
+ struct sk_buff *skb = tcp_write_queue_head(sk);
+
+ return skb == tcp_send_head(sk) ? NULL : skb;
+}
+
+static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk)
+{
+ struct sk_buff *skb = tcp_send_head(sk);
+
+ return skb ? tcp_write_queue_prev(sk, skb) : tcp_write_queue_tail(sk);
+}
+
#endif /* _TCP_H */
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 3436b32a9bfa..e755f09dc104 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1263,6 +1263,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *buff;
int nsize, old_factor;
+ long limit;
int nlen;
u8 flags;
@@ -1273,7 +1274,15 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
if (nsize < 0)
nsize = 0;
- if (unlikely((sk->sk_wmem_queued >> 1) > sk->sk_sndbuf + 0x20000))
+ /* tcp_sendmsg() can overshoot sk_wmem_queued by one full size skb.
+ * We need some allowance to not penalize applications setting small
+ * SO_SNDBUF values.
+ * Also allow first and last skb in retransmit queue to be split.
+ */
+ limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_MAX_SIZE);
+ if (unlikely((sk->sk_wmem_queued >> 1) > limit &&
+ skb != tcp_rtx_queue_head(sk) &&
+ skb != tcp_rtx_queue_tail(sk)))
return -ENOMEM;
if (skb_unclone(skb, gfp))
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index 1dd4c88565e2..cda97cd5ecbb 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -40,6 +40,8 @@ int sctp_stream_new(struct sctp_association *asoc, gfp_t gfp)
struct sctp_stream *stream;
int i;
+ gfp |= __GFP_NOWARN;
+
stream = kzalloc(sizeof(*stream), gfp);
if (!stream)
return -ENOMEM;
@@ -63,9 +65,14 @@ int sctp_stream_init(struct sctp_association *asoc, gfp_t gfp)
struct sctp_stream *stream = asoc->stream;
int i;
+ gfp |= __GFP_NOWARN;
+
/* Initial stream->out size may be very big, so free it and alloc
- * a new one with new outcnt to save memory.
+ * a new one with new outcnt to save memory if needed.
*/
+ if (asoc->c.sinit_num_ostreams == stream->outcnt)
+ goto in;
+
kfree(stream->out);
stream->outcnt = asoc->c.sinit_num_ostreams;
stream->out = kcalloc(stream->outcnt, sizeof(*stream->out), gfp);
@@ -75,6 +82,7 @@ int sctp_stream_init(struct sctp_association *asoc, gfp_t gfp)
for (i = 0; i < stream->outcnt; i++)
stream->out[i].state = SCTP_STREAM_OPEN;
+in:
stream->incnt = asoc->c.sinit_max_instreams;
stream->in = kcalloc(stream->incnt, sizeof(*stream->in), gfp);
if (!stream->in) {