* [PATCH v5 5/5] sctp: Pass sk_buff_head explicitly to sctp_ulpq_tail_event().
@ 2019-04-11 22:02 David Miller
2019-04-11 22:19 ` Marcelo Ricardo Leitner
0 siblings, 1 reply; 2+ messages in thread
From: David Miller @ 2019-04-11 22:02 UTC (permalink / raw)
To: netdev; +Cc: marcelo.leitner, lucien.xin, nhorman
Now the SKB list implementation assumption can be removed.
And now that we know that the list head is always non-NULL
we can remove the code blocks dealing with that as well.
Signed-off-by: David S. Miller <davem@davemloft.net>
---
include/net/sctp/ulpqueue.h | 2 +-
net/sctp/stream_interleave.c | 2 +-
net/sctp/ulpqueue.c | 29 +++++++++++------------------
3 files changed, 13 insertions(+), 20 deletions(-)
diff --git a/include/net/sctp/ulpqueue.h b/include/net/sctp/ulpqueue.h
index bb0ecba3db2b..f4ac7117ff29 100644
--- a/include/net/sctp/ulpqueue.h
+++ b/include/net/sctp/ulpqueue.h
@@ -59,7 +59,7 @@ void sctp_ulpq_free(struct sctp_ulpq *);
int sctp_ulpq_tail_data(struct sctp_ulpq *, struct sctp_chunk *, gfp_t);
/* Add a new event for propagation to the ULP. */
-int sctp_ulpq_tail_event(struct sctp_ulpq *, struct sctp_ulpevent *ev);
+int sctp_ulpq_tail_event(struct sctp_ulpq *, struct sk_buff_head *skb_list);
/* Renege previously received chunks. */
void sctp_ulpq_renege(struct sctp_ulpq *, struct sctp_chunk *, gfp_t);
diff --git a/net/sctp/stream_interleave.c b/net/sctp/stream_interleave.c
index 2c50627bdfdb..25e0b7e5189c 100644
--- a/net/sctp/stream_interleave.c
+++ b/net/sctp/stream_interleave.c
@@ -1317,7 +1317,7 @@ static int do_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *even
skb_queue_head_init(&temp);
__skb_queue_tail(&temp, sctp_event2skb(event));
- return sctp_ulpq_tail_event(ulpq, event);
+ return sctp_ulpq_tail_event(ulpq, &temp);
}
static struct sctp_stream_interleave sctp_stream_interleave_0 = {
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index a698f1a509bf..7cdc3623fa35 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -130,7 +130,7 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
*/
if (event) {
event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
- sctp_ulpq_tail_event(ulpq, event);
+ sctp_ulpq_tail_event(ulpq, &temp);
}
return event_eor;
@@ -194,18 +194,17 @@ static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
}
-/* If the SKB of 'event' is on a list, it is the first such member
- * of that list.
- */
-int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
+int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sk_buff_head *skb_list)
{
struct sock *sk = ulpq->asoc->base.sk;
struct sctp_sock *sp = sctp_sk(sk);
- struct sk_buff_head *queue, *skb_list;
- struct sk_buff *skb = sctp_event2skb(event);
+ struct sctp_ulpevent *event;
+ struct sk_buff_head *queue;
+ struct sk_buff *skb;
int clear_pd = 0;
- skb_list = (struct sk_buff_head *) skb->prev;
+ skb = __skb_peek(skb_list);
+ event = sctp_skb2event(skb);
/* If the socket is just going to throw this away, do not
* even try to deliver it.
@@ -258,13 +257,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
}
}
- /* If we are harvesting multiple skbs they will be
- * collected on a list.
- */
- if (skb_list)
- skb_queue_splice_tail_init(skb_list, queue);
- else
- __skb_queue_tail(queue, skb);
+ skb_queue_splice_tail_init(skb_list, queue);
/* Did we just complete partial delivery and need to get
* rolling again? Move pending data to the receive
@@ -757,7 +750,7 @@ static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
* sctp_ulpevent for very first SKB on the temp' list.
*/
if (event)
- sctp_ulpq_tail_event(ulpq, event);
+ sctp_ulpq_tail_event(ulpq, &temp);
}
}
@@ -957,7 +950,7 @@ static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
if (event) {
/* see if we have more ordered that we can deliver */
sctp_ulpq_retrieve_ordered(ulpq, event);
- sctp_ulpq_tail_event(ulpq, event);
+ sctp_ulpq_tail_event(ulpq, &temp);
}
}
@@ -1087,7 +1080,7 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
skb_queue_head_init(&temp);
__skb_queue_tail(&temp, sctp_event2skb(event));
- sctp_ulpq_tail_event(ulpq, event);
+ sctp_ulpq_tail_event(ulpq, &temp);
sctp_ulpq_set_pd(ulpq);
return;
}
--
2.20.1
^ permalink raw reply related [flat|nested] 2+ messages in thread
* Re: [PATCH v5 5/5] sctp: Pass sk_buff_head explicitly to sctp_ulpq_tail_event().
2019-04-11 22:02 [PATCH v5 5/5] sctp: Pass sk_buff_head explicitly to sctp_ulpq_tail_event() David Miller
@ 2019-04-11 22:19 ` Marcelo Ricardo Leitner
0 siblings, 0 replies; 2+ messages in thread
From: Marcelo Ricardo Leitner @ 2019-04-11 22:19 UTC (permalink / raw)
To: David Miller; +Cc: netdev, lucien.xin, nhorman
On Thu, Apr 11, 2019 at 03:02:07PM -0700, David Miller wrote:
>
> Now the SKB list implementation assumption can be removed.
>
> And now that we know that the list head is always non-NULL
> we can remove the code blocks dealing with that as well.
>
> Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
Thanks!
> ---
> include/net/sctp/ulpqueue.h | 2 +-
> net/sctp/stream_interleave.c | 2 +-
> net/sctp/ulpqueue.c | 29 +++++++++++------------------
> 3 files changed, 13 insertions(+), 20 deletions(-)
>
> diff --git a/include/net/sctp/ulpqueue.h b/include/net/sctp/ulpqueue.h
> index bb0ecba3db2b..f4ac7117ff29 100644
> --- a/include/net/sctp/ulpqueue.h
> +++ b/include/net/sctp/ulpqueue.h
> @@ -59,7 +59,7 @@ void sctp_ulpq_free(struct sctp_ulpq *);
> int sctp_ulpq_tail_data(struct sctp_ulpq *, struct sctp_chunk *, gfp_t);
>
> /* Add a new event for propagation to the ULP. */
> -int sctp_ulpq_tail_event(struct sctp_ulpq *, struct sctp_ulpevent *ev);
> +int sctp_ulpq_tail_event(struct sctp_ulpq *, struct sk_buff_head *skb_list);
>
> /* Renege previously received chunks. */
> void sctp_ulpq_renege(struct sctp_ulpq *, struct sctp_chunk *, gfp_t);
> diff --git a/net/sctp/stream_interleave.c b/net/sctp/stream_interleave.c
> index 2c50627bdfdb..25e0b7e5189c 100644
> --- a/net/sctp/stream_interleave.c
> +++ b/net/sctp/stream_interleave.c
> @@ -1317,7 +1317,7 @@ static int do_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *even
>
> skb_queue_head_init(&temp);
> __skb_queue_tail(&temp, sctp_event2skb(event));
> - return sctp_ulpq_tail_event(ulpq, event);
> + return sctp_ulpq_tail_event(ulpq, &temp);
> }
>
> static struct sctp_stream_interleave sctp_stream_interleave_0 = {
> diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
> index a698f1a509bf..7cdc3623fa35 100644
> --- a/net/sctp/ulpqueue.c
> +++ b/net/sctp/ulpqueue.c
> @@ -130,7 +130,7 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
> */
> if (event) {
> event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
> - sctp_ulpq_tail_event(ulpq, event);
> + sctp_ulpq_tail_event(ulpq, &temp);
> }
>
> return event_eor;
> @@ -194,18 +194,17 @@ static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
> return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
> }
>
> -/* If the SKB of 'event' is on a list, it is the first such member
> - * of that list.
> - */
> -int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
> +int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sk_buff_head *skb_list)
> {
> struct sock *sk = ulpq->asoc->base.sk;
> struct sctp_sock *sp = sctp_sk(sk);
> - struct sk_buff_head *queue, *skb_list;
> - struct sk_buff *skb = sctp_event2skb(event);
> + struct sctp_ulpevent *event;
> + struct sk_buff_head *queue;
> + struct sk_buff *skb;
> int clear_pd = 0;
>
> - skb_list = (struct sk_buff_head *) skb->prev;
> + skb = __skb_peek(skb_list);
> + event = sctp_skb2event(skb);
>
> /* If the socket is just going to throw this away, do not
> * even try to deliver it.
> @@ -258,13 +257,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
> }
> }
>
> - /* If we are harvesting multiple skbs they will be
> - * collected on a list.
> - */
> - if (skb_list)
> - skb_queue_splice_tail_init(skb_list, queue);
> - else
> - __skb_queue_tail(queue, skb);
> + skb_queue_splice_tail_init(skb_list, queue);
>
> /* Did we just complete partial delivery and need to get
> * rolling again? Move pending data to the receive
> @@ -757,7 +750,7 @@ static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
> * sctp_ulpevent for very first SKB on the temp' list.
> */
> if (event)
> - sctp_ulpq_tail_event(ulpq, event);
> + sctp_ulpq_tail_event(ulpq, &temp);
> }
> }
>
> @@ -957,7 +950,7 @@ static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
> if (event) {
> /* see if we have more ordered that we can deliver */
> sctp_ulpq_retrieve_ordered(ulpq, event);
> - sctp_ulpq_tail_event(ulpq, event);
> + sctp_ulpq_tail_event(ulpq, &temp);
> }
> }
>
> @@ -1087,7 +1080,7 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
>
> skb_queue_head_init(&temp);
> __skb_queue_tail(&temp, sctp_event2skb(event));
> - sctp_ulpq_tail_event(ulpq, event);
> + sctp_ulpq_tail_event(ulpq, &temp);
> sctp_ulpq_set_pd(ulpq);
> return;
> }
> --
> 2.20.1
>
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2019-04-11 22:19 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2019-04-11 22:02 [PATCH v5 5/5] sctp: Pass sk_buff_head explicitly to sctp_ulpq_tail_event() David Miller
2019-04-11 22:19 ` Marcelo Ricardo Leitner
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).