* [PATCH v5 4/5] sctp: Make sctp_enqueue_event tak an skb list.
@ 2019-04-10 21:20 David Miller
0 siblings, 0 replies; 3+ messages in thread
From: David Miller @ 2019-04-10 21:20 UTC (permalink / raw)
To: netdev
Pass this, instead of an event. Then everything trickles down and we
always have events a non-empty list.
Then we needs a list creating stub to place into .enqueue_event for sctp_stream_interleave_1.
Signed-off-by: David S. Miller <davem@davemloft.net>
---
net/sctp/stream_interleave.c | 49 ++++++++++++++++++++++++++----------
net/sctp/ulpqueue.c | 5 ++--
2 files changed, 39 insertions(+), 15 deletions(-)
diff --git a/net/sctp/stream_interleave.c b/net/sctp/stream_interleave.c
index a6bc42121e35..2c50627bdfdb 100644
--- a/net/sctp/stream_interleave.c
+++ b/net/sctp/stream_interleave.c
@@ -484,14 +484,15 @@ static struct sctp_ulpevent *sctp_intl_order(struct sctp_ulpq *ulpq,
}
static int sctp_enqueue_event(struct sctp_ulpq *ulpq,
- struct sctp_ulpevent *event)
+ struct sk_buff_head *skb_list)
{
- struct sk_buff *skb = sctp_event2skb(event);
struct sock *sk = ulpq->asoc->base.sk;
struct sctp_sock *sp = sctp_sk(sk);
- struct sk_buff_head *skb_list;
+ struct sctp_ulpevent *event;
+ struct sk_buff *skb;
- skb_list = (struct sk_buff_head *)skb->prev;
+ skb = __skb_peek(skb_list);
+ event = sctp_skb2event(skb);
if (sk->sk_shutdown & RCV_SHUTDOWN &&
(sk->sk_shutdown & SEND_SHUTDOWN ||
@@ -858,19 +859,24 @@ static int sctp_ulpevent_idata(struct sctp_ulpq *ulpq,
if (!(event->msg_flags & SCTP_DATA_UNORDERED)) {
event = sctp_intl_reasm(ulpq, event);
- if (event && event->msg_flags & MSG_EOR) {
+ if (event) {
skb_queue_head_init(&temp);
__skb_queue_tail(&temp, sctp_event2skb(event));
- event = sctp_intl_order(ulpq, event);
+ if (event->msg_flags & MSG_EOR)
+ event = sctp_intl_order(ulpq, event);
}
} else {
event = sctp_intl_reasm_uo(ulpq, event);
+ if (event) {
+ skb_queue_head_init(&temp);
+ __skb_queue_tail(&temp, sctp_event2skb(event));
+ }
}
if (event) {
event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
- sctp_enqueue_event(ulpq, event);
+ sctp_enqueue_event(ulpq, &temp);
}
return event_eor;
@@ -944,20 +950,27 @@ static struct sctp_ulpevent *sctp_intl_retrieve_first(struct sctp_ulpq *ulpq)
static void sctp_intl_start_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
{
struct sctp_ulpevent *event;
+ struct sk_buff_head temp;
if (!skb_queue_empty(&ulpq->reasm)) {
do {
event = sctp_intl_retrieve_first(ulpq);
- if (event)
- sctp_enqueue_event(ulpq, event);
+ if (event) {
+ skb_queue_head_init(&temp);
+ __skb_queue_tail(&temp, sctp_event2skb(event));
+ sctp_enqueue_event(ulpq, &temp);
+ }
} while (event);
}
if (!skb_queue_empty(&ulpq->reasm_uo)) {
do {
event = sctp_intl_retrieve_first_uo(ulpq);
- if (event)
- sctp_enqueue_event(ulpq, event);
+ if (event) {
+ skb_queue_head_init(&temp);
+ __skb_queue_tail(&temp, sctp_event2skb(event));
+ sctp_enqueue_event(ulpq, &temp);
+ }
} while (event);
}
}
@@ -1059,7 +1072,7 @@ static void sctp_intl_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
if (event) {
sctp_intl_retrieve_ordered(ulpq, event);
- sctp_enqueue_event(ulpq, event);
+ sctp_enqueue_event(ulpq, &temp);
}
}
@@ -1326,6 +1339,16 @@ static struct sctp_stream_interleave sctp_stream_interleave_0 = {
.handle_ftsn = sctp_handle_fwdtsn,
};
+static int do_sctp_enqueue_event(struct sctp_ulpq *ulpq,
+ struct sctp_ulpevent *event)
+{
+ struct sk_buff_head temp;
+
+ skb_queue_head_init(&temp);
+ __skb_queue_tail(&temp, sctp_event2skb(event));
+ return sctp_enqueue_event(ulpq, &temp);
+}
+
static struct sctp_stream_interleave sctp_stream_interleave_1 = {
.data_chunk_len = sizeof(struct sctp_idata_chunk),
.ftsn_chunk_len = sizeof(struct sctp_ifwdtsn_chunk),
@@ -1334,7 +1357,7 @@ static struct sctp_stream_interleave sctp_stream_interleave_1 = {
.assign_number = sctp_chunk_assign_mid,
.validate_data = sctp_validate_idata,
.ulpevent_data = sctp_ulpevent_idata,
- .enqueue_event = sctp_enqueue_event,
+ .enqueue_event = do_sctp_enqueue_event,
.renege_events = sctp_renege_events,
.start_pd = sctp_intl_start_pd,
.abort_pd = sctp_intl_abort_pd,
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index b22f558adc49..a698f1a509bf 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -116,12 +116,13 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
event = sctp_ulpq_reasm(ulpq, event);
/* Do ordering if needed. */
- if ((event) && (event->msg_flags & MSG_EOR)) {
+ if (event) {
/* Create a temporary list to collect chunks on. */
skb_queue_head_init(&temp);
__skb_queue_tail(&temp, sctp_event2skb(event));
- event = sctp_ulpq_order(ulpq, event);
+ if (event->msg_flags & MSG_EOR)
+ event = sctp_ulpq_order(ulpq, event);
}
/* Send event to the ULP. 'event' is the sctp_ulpevent for
--
2.20.1
^ permalink raw reply related [flat|nested] 3+ messages in thread
* [PATCH v5 4/5] sctp: Make sctp_enqueue_event tak an skb list.
@ 2019-04-11 22:02 David Miller
2019-04-11 22:19 ` Marcelo Ricardo Leitner
0 siblings, 1 reply; 3+ messages in thread
From: David Miller @ 2019-04-11 22:02 UTC (permalink / raw)
To: netdev; +Cc: marcelo.leitner, lucien.xin, nhorman
Pass this, instead of an event. Then everything trickles down and we
always have events a non-empty list.
Then we needs a list creating stub to place into .enqueue_event for sctp_stream_interleave_1.
Signed-off-by: David S. Miller <davem@davemloft.net>
---
net/sctp/stream_interleave.c | 49 ++++++++++++++++++++++++++----------
net/sctp/ulpqueue.c | 5 ++--
2 files changed, 39 insertions(+), 15 deletions(-)
diff --git a/net/sctp/stream_interleave.c b/net/sctp/stream_interleave.c
index a6bc42121e35..2c50627bdfdb 100644
--- a/net/sctp/stream_interleave.c
+++ b/net/sctp/stream_interleave.c
@@ -484,14 +484,15 @@ static struct sctp_ulpevent *sctp_intl_order(struct sctp_ulpq *ulpq,
}
static int sctp_enqueue_event(struct sctp_ulpq *ulpq,
- struct sctp_ulpevent *event)
+ struct sk_buff_head *skb_list)
{
- struct sk_buff *skb = sctp_event2skb(event);
struct sock *sk = ulpq->asoc->base.sk;
struct sctp_sock *sp = sctp_sk(sk);
- struct sk_buff_head *skb_list;
+ struct sctp_ulpevent *event;
+ struct sk_buff *skb;
- skb_list = (struct sk_buff_head *)skb->prev;
+ skb = __skb_peek(skb_list);
+ event = sctp_skb2event(skb);
if (sk->sk_shutdown & RCV_SHUTDOWN &&
(sk->sk_shutdown & SEND_SHUTDOWN ||
@@ -858,19 +859,24 @@ static int sctp_ulpevent_idata(struct sctp_ulpq *ulpq,
if (!(event->msg_flags & SCTP_DATA_UNORDERED)) {
event = sctp_intl_reasm(ulpq, event);
- if (event && event->msg_flags & MSG_EOR) {
+ if (event) {
skb_queue_head_init(&temp);
__skb_queue_tail(&temp, sctp_event2skb(event));
- event = sctp_intl_order(ulpq, event);
+ if (event->msg_flags & MSG_EOR)
+ event = sctp_intl_order(ulpq, event);
}
} else {
event = sctp_intl_reasm_uo(ulpq, event);
+ if (event) {
+ skb_queue_head_init(&temp);
+ __skb_queue_tail(&temp, sctp_event2skb(event));
+ }
}
if (event) {
event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
- sctp_enqueue_event(ulpq, event);
+ sctp_enqueue_event(ulpq, &temp);
}
return event_eor;
@@ -944,20 +950,27 @@ static struct sctp_ulpevent *sctp_intl_retrieve_first(struct sctp_ulpq *ulpq)
static void sctp_intl_start_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
{
struct sctp_ulpevent *event;
+ struct sk_buff_head temp;
if (!skb_queue_empty(&ulpq->reasm)) {
do {
event = sctp_intl_retrieve_first(ulpq);
- if (event)
- sctp_enqueue_event(ulpq, event);
+ if (event) {
+ skb_queue_head_init(&temp);
+ __skb_queue_tail(&temp, sctp_event2skb(event));
+ sctp_enqueue_event(ulpq, &temp);
+ }
} while (event);
}
if (!skb_queue_empty(&ulpq->reasm_uo)) {
do {
event = sctp_intl_retrieve_first_uo(ulpq);
- if (event)
- sctp_enqueue_event(ulpq, event);
+ if (event) {
+ skb_queue_head_init(&temp);
+ __skb_queue_tail(&temp, sctp_event2skb(event));
+ sctp_enqueue_event(ulpq, &temp);
+ }
} while (event);
}
}
@@ -1059,7 +1072,7 @@ static void sctp_intl_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
if (event) {
sctp_intl_retrieve_ordered(ulpq, event);
- sctp_enqueue_event(ulpq, event);
+ sctp_enqueue_event(ulpq, &temp);
}
}
@@ -1326,6 +1339,16 @@ static struct sctp_stream_interleave sctp_stream_interleave_0 = {
.handle_ftsn = sctp_handle_fwdtsn,
};
+static int do_sctp_enqueue_event(struct sctp_ulpq *ulpq,
+ struct sctp_ulpevent *event)
+{
+ struct sk_buff_head temp;
+
+ skb_queue_head_init(&temp);
+ __skb_queue_tail(&temp, sctp_event2skb(event));
+ return sctp_enqueue_event(ulpq, &temp);
+}
+
static struct sctp_stream_interleave sctp_stream_interleave_1 = {
.data_chunk_len = sizeof(struct sctp_idata_chunk),
.ftsn_chunk_len = sizeof(struct sctp_ifwdtsn_chunk),
@@ -1334,7 +1357,7 @@ static struct sctp_stream_interleave sctp_stream_interleave_1 = {
.assign_number = sctp_chunk_assign_mid,
.validate_data = sctp_validate_idata,
.ulpevent_data = sctp_ulpevent_idata,
- .enqueue_event = sctp_enqueue_event,
+ .enqueue_event = do_sctp_enqueue_event,
.renege_events = sctp_renege_events,
.start_pd = sctp_intl_start_pd,
.abort_pd = sctp_intl_abort_pd,
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index b22f558adc49..a698f1a509bf 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -116,12 +116,13 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
event = sctp_ulpq_reasm(ulpq, event);
/* Do ordering if needed. */
- if ((event) && (event->msg_flags & MSG_EOR)) {
+ if (event) {
/* Create a temporary list to collect chunks on. */
skb_queue_head_init(&temp);
__skb_queue_tail(&temp, sctp_event2skb(event));
- event = sctp_ulpq_order(ulpq, event);
+ if (event->msg_flags & MSG_EOR)
+ event = sctp_ulpq_order(ulpq, event);
}
/* Send event to the ULP. 'event' is the sctp_ulpevent for
--
2.20.1
^ permalink raw reply related [flat|nested] 3+ messages in thread
* Re: [PATCH v5 4/5] sctp: Make sctp_enqueue_event tak an skb list.
2019-04-11 22:02 [PATCH v5 4/5] sctp: Make sctp_enqueue_event tak an skb list David Miller
@ 2019-04-11 22:19 ` Marcelo Ricardo Leitner
0 siblings, 0 replies; 3+ messages in thread
From: Marcelo Ricardo Leitner @ 2019-04-11 22:19 UTC (permalink / raw)
To: David Miller; +Cc: netdev, lucien.xin, nhorman
On Thu, Apr 11, 2019 at 03:02:04PM -0700, David Miller wrote:
>
> Pass this, instead of an event. Then everything trickles down and we
> always have events a non-empty list.
>
> Then we needs a list creating stub to place into .enqueue_event for sctp_stream_interleave_1.
>
> Signed-off-by: David S. Miller <davem@davemloft.net>
Typo on summary, s/tak /take /
Acked-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
> ---
> net/sctp/stream_interleave.c | 49 ++++++++++++++++++++++++++----------
> net/sctp/ulpqueue.c | 5 ++--
> 2 files changed, 39 insertions(+), 15 deletions(-)
>
> diff --git a/net/sctp/stream_interleave.c b/net/sctp/stream_interleave.c
> index a6bc42121e35..2c50627bdfdb 100644
> --- a/net/sctp/stream_interleave.c
> +++ b/net/sctp/stream_interleave.c
> @@ -484,14 +484,15 @@ static struct sctp_ulpevent *sctp_intl_order(struct sctp_ulpq *ulpq,
> }
>
> static int sctp_enqueue_event(struct sctp_ulpq *ulpq,
> - struct sctp_ulpevent *event)
> + struct sk_buff_head *skb_list)
> {
> - struct sk_buff *skb = sctp_event2skb(event);
> struct sock *sk = ulpq->asoc->base.sk;
> struct sctp_sock *sp = sctp_sk(sk);
> - struct sk_buff_head *skb_list;
> + struct sctp_ulpevent *event;
> + struct sk_buff *skb;
>
> - skb_list = (struct sk_buff_head *)skb->prev;
> + skb = __skb_peek(skb_list);
> + event = sctp_skb2event(skb);
>
> if (sk->sk_shutdown & RCV_SHUTDOWN &&
> (sk->sk_shutdown & SEND_SHUTDOWN ||
> @@ -858,19 +859,24 @@ static int sctp_ulpevent_idata(struct sctp_ulpq *ulpq,
>
> if (!(event->msg_flags & SCTP_DATA_UNORDERED)) {
> event = sctp_intl_reasm(ulpq, event);
> - if (event && event->msg_flags & MSG_EOR) {
> + if (event) {
> skb_queue_head_init(&temp);
> __skb_queue_tail(&temp, sctp_event2skb(event));
>
> - event = sctp_intl_order(ulpq, event);
> + if (event->msg_flags & MSG_EOR)
> + event = sctp_intl_order(ulpq, event);
> }
> } else {
> event = sctp_intl_reasm_uo(ulpq, event);
> + if (event) {
> + skb_queue_head_init(&temp);
> + __skb_queue_tail(&temp, sctp_event2skb(event));
> + }
> }
>
> if (event) {
> event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
> - sctp_enqueue_event(ulpq, event);
> + sctp_enqueue_event(ulpq, &temp);
> }
>
> return event_eor;
> @@ -944,20 +950,27 @@ static struct sctp_ulpevent *sctp_intl_retrieve_first(struct sctp_ulpq *ulpq)
> static void sctp_intl_start_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
> {
> struct sctp_ulpevent *event;
> + struct sk_buff_head temp;
>
> if (!skb_queue_empty(&ulpq->reasm)) {
> do {
> event = sctp_intl_retrieve_first(ulpq);
> - if (event)
> - sctp_enqueue_event(ulpq, event);
> + if (event) {
> + skb_queue_head_init(&temp);
> + __skb_queue_tail(&temp, sctp_event2skb(event));
> + sctp_enqueue_event(ulpq, &temp);
> + }
> } while (event);
> }
>
> if (!skb_queue_empty(&ulpq->reasm_uo)) {
> do {
> event = sctp_intl_retrieve_first_uo(ulpq);
> - if (event)
> - sctp_enqueue_event(ulpq, event);
> + if (event) {
> + skb_queue_head_init(&temp);
> + __skb_queue_tail(&temp, sctp_event2skb(event));
> + sctp_enqueue_event(ulpq, &temp);
> + }
> } while (event);
> }
> }
> @@ -1059,7 +1072,7 @@ static void sctp_intl_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
>
> if (event) {
> sctp_intl_retrieve_ordered(ulpq, event);
> - sctp_enqueue_event(ulpq, event);
> + sctp_enqueue_event(ulpq, &temp);
> }
> }
>
> @@ -1326,6 +1339,16 @@ static struct sctp_stream_interleave sctp_stream_interleave_0 = {
> .handle_ftsn = sctp_handle_fwdtsn,
> };
>
> +static int do_sctp_enqueue_event(struct sctp_ulpq *ulpq,
> + struct sctp_ulpevent *event)
> +{
> + struct sk_buff_head temp;
> +
> + skb_queue_head_init(&temp);
> + __skb_queue_tail(&temp, sctp_event2skb(event));
> + return sctp_enqueue_event(ulpq, &temp);
> +}
> +
> static struct sctp_stream_interleave sctp_stream_interleave_1 = {
> .data_chunk_len = sizeof(struct sctp_idata_chunk),
> .ftsn_chunk_len = sizeof(struct sctp_ifwdtsn_chunk),
> @@ -1334,7 +1357,7 @@ static struct sctp_stream_interleave sctp_stream_interleave_1 = {
> .assign_number = sctp_chunk_assign_mid,
> .validate_data = sctp_validate_idata,
> .ulpevent_data = sctp_ulpevent_idata,
> - .enqueue_event = sctp_enqueue_event,
> + .enqueue_event = do_sctp_enqueue_event,
> .renege_events = sctp_renege_events,
> .start_pd = sctp_intl_start_pd,
> .abort_pd = sctp_intl_abort_pd,
> diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
> index b22f558adc49..a698f1a509bf 100644
> --- a/net/sctp/ulpqueue.c
> +++ b/net/sctp/ulpqueue.c
> @@ -116,12 +116,13 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
> event = sctp_ulpq_reasm(ulpq, event);
>
> /* Do ordering if needed. */
> - if ((event) && (event->msg_flags & MSG_EOR)) {
> + if (event) {
> /* Create a temporary list to collect chunks on. */
> skb_queue_head_init(&temp);
> __skb_queue_tail(&temp, sctp_event2skb(event));
>
> - event = sctp_ulpq_order(ulpq, event);
> + if (event->msg_flags & MSG_EOR)
> + event = sctp_ulpq_order(ulpq, event);
> }
>
> /* Send event to the ULP. 'event' is the sctp_ulpevent for
> --
> 2.20.1
>
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2019-04-11 22:19 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2019-04-11 22:02 [PATCH v5 4/5] sctp: Make sctp_enqueue_event tak an skb list David Miller
2019-04-11 22:19 ` Marcelo Ricardo Leitner
-- strict thread matches above, loose matches on Subject: below --
2019-04-10 21:20 David Miller
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).