h2/proto/streams/
send.rs

1use super::{
2    store, Buffer, Codec, Config, Counts, Frame, Prioritize, Prioritized, Store, Stream, StreamId,
3    StreamIdOverflow, WindowSize,
4};
5use crate::codec::UserError;
6use crate::frame::{self, Reason};
7use crate::proto::{self, Error, Initiator};
8
9use bytes::Buf;
10use tokio::io::AsyncWrite;
11
12use std::cmp::Ordering;
13use std::io;
14use std::task::{Context, Poll, Waker};
15
16/// Manages state transitions related to outbound frames.
17#[derive(Debug)]
18pub(super) struct Send {
19    /// Stream identifier to use for next initialized stream.
20    next_stream_id: Result<StreamId, StreamIdOverflow>,
21
22    /// Any streams with a higher ID are ignored.
23    ///
24    /// This starts as MAX, but is lowered when a GOAWAY is received.
25    ///
26    /// > After sending a GOAWAY frame, the sender can discard frames for
27    /// > streams initiated by the receiver with identifiers higher than
28    /// > the identified last stream.
29    max_stream_id: StreamId,
30
31    /// Initial window size of locally initiated streams
32    init_window_sz: WindowSize,
33
34    /// Prioritization layer
35    prioritize: Prioritize,
36
37    is_push_enabled: bool,
38
39    /// If extended connect protocol is enabled.
40    is_extended_connect_protocol_enabled: bool,
41}
42
43/// A value to detect which public API has called `poll_reset`.
44#[derive(Debug)]
45pub(crate) enum PollReset {
46    AwaitingHeaders,
47    Streaming,
48}
49
50impl Send {
51    /// Create a new `Send`
52    pub fn new(config: &Config) -> Self {
53        Send {
54            init_window_sz: config.remote_init_window_sz,
55            max_stream_id: StreamId::MAX,
56            next_stream_id: Ok(config.local_next_stream_id),
57            prioritize: Prioritize::new(config),
58            is_push_enabled: true,
59            is_extended_connect_protocol_enabled: false,
60        }
61    }
62
63    /// Returns the initial send window size
64    pub fn init_window_sz(&self) -> WindowSize {
65        self.init_window_sz
66    }
67
68    pub fn open(&mut self) -> Result<StreamId, UserError> {
69        let stream_id = self.ensure_next_stream_id()?;
70        self.next_stream_id = stream_id.next_id();
71        Ok(stream_id)
72    }
73
74    pub fn reserve_local(&mut self) -> Result<StreamId, UserError> {
75        let stream_id = self.ensure_next_stream_id()?;
76        self.next_stream_id = stream_id.next_id();
77        Ok(stream_id)
78    }
79
80    fn check_headers(fields: &http::HeaderMap) -> Result<(), UserError> {
81        // 8.1.2.2. Connection-Specific Header Fields
82        if fields.contains_key(http::header::CONNECTION)
83            || fields.contains_key(http::header::TRANSFER_ENCODING)
84            || fields.contains_key(http::header::UPGRADE)
85            || fields.contains_key("keep-alive")
86            || fields.contains_key("proxy-connection")
87        {
88            tracing::debug!("illegal connection-specific headers found");
89            return Err(UserError::MalformedHeaders);
90        } else if let Some(te) = fields.get(http::header::TE) {
91            if te != "trailers" {
92                tracing::debug!("illegal connection-specific headers found");
93                return Err(UserError::MalformedHeaders);
94            }
95        }
96        Ok(())
97    }
98
99    pub fn send_push_promise<B>(
100        &mut self,
101        frame: frame::PushPromise,
102        buffer: &mut Buffer<Frame<B>>,
103        stream: &mut store::Ptr,
104        task: &mut Option<Waker>,
105    ) -> Result<(), UserError> {
106        if !self.is_push_enabled {
107            return Err(UserError::PeerDisabledServerPush);
108        }
109
110        tracing::trace!(
111            "send_push_promise; frame={:?}; init_window={:?}",
112            frame,
113            self.init_window_sz
114        );
115
116        Self::check_headers(frame.fields())?;
117
118        // Queue the frame for sending
119        self.prioritize
120            .queue_frame(frame.into(), buffer, stream, task);
121
122        Ok(())
123    }
124
125    pub fn send_headers<B>(
126        &mut self,
127        frame: frame::Headers,
128        buffer: &mut Buffer<Frame<B>>,
129        stream: &mut store::Ptr,
130        counts: &mut Counts,
131        task: &mut Option<Waker>,
132    ) -> Result<(), UserError> {
133        tracing::trace!(
134            "send_headers; frame={:?}; init_window={:?}",
135            frame,
136            self.init_window_sz
137        );
138
139        Self::check_headers(frame.fields())?;
140
141        let end_stream = frame.is_end_stream();
142
143        // Update the state
144        stream.state.send_open(end_stream)?;
145
146        let mut pending_open = false;
147        if counts.peer().is_local_init(frame.stream_id()) && !stream.is_pending_push {
148            self.prioritize.queue_open(stream);
149            pending_open = true;
150        }
151
152        // Queue the frame for sending
153        //
154        // This call expects that, since new streams are in the open queue, new
155        // streams won't be pushed on pending_send.
156        self.prioritize
157            .queue_frame(frame.into(), buffer, stream, task);
158
159        // Need to notify the connection when pushing onto pending_open since
160        // queue_frame only notifies for pending_send.
161        if pending_open {
162            if let Some(task) = task.take() {
163                task.wake();
164            }
165        }
166
167        Ok(())
168    }
169
170    /// Send interim informational headers (1xx responses) without changing stream state.
171    /// This allows multiple interim informational responses to be sent before the final response.
172    pub fn send_interim_informational_headers<B>(
173        &mut self,
174        frame: frame::Headers,
175        buffer: &mut Buffer<Frame<B>>,
176        stream: &mut store::Ptr,
177        _counts: &mut Counts,
178        task: &mut Option<Waker>,
179    ) -> Result<(), UserError> {
180        tracing::trace!(
181            "send_interim_informational_headers; frame={:?}; stream_id={:?}",
182            frame,
183            frame.stream_id()
184        );
185
186        // Validate headers
187        Self::check_headers(frame.fields())?;
188
189        debug_assert!(frame.is_informational(),
190            "Frame must be informational (1xx status code) at this point. Validation should happen at the public API boundary.");
191        debug_assert!(!frame.is_end_stream(),
192            "Informational frames must not have end_stream flag set. Validation should happen at the internal send informational header streams.");
193
194        // Queue the frame for sending WITHOUT changing stream state
195        // This is the key difference from send_headers - we don't call stream.state.send_open()
196        self.prioritize
197            .queue_frame(frame.into(), buffer, stream, task);
198
199        Ok(())
200    }
201
202    /// Send an explicit RST_STREAM frame
203    pub fn send_reset<B>(
204        &mut self,
205        reason: Reason,
206        initiator: Initiator,
207        buffer: &mut Buffer<Frame<B>>,
208        stream: &mut store::Ptr,
209        counts: &mut Counts,
210        task: &mut Option<Waker>,
211    ) {
212        let is_reset = stream.state.is_reset();
213        let is_closed = stream.state.is_closed();
214        let is_empty = stream.pending_send.is_empty();
215        let stream_id = stream.id;
216
217        tracing::trace!(
218            "send_reset(..., reason={:?}, initiator={:?}, stream={:?}, ..., \
219             is_reset={:?}; is_closed={:?}; pending_send.is_empty={:?}; \
220             state={:?} \
221             ",
222            reason,
223            initiator,
224            stream_id,
225            is_reset,
226            is_closed,
227            is_empty,
228            stream.state
229        );
230
231        if is_reset {
232            // Don't double reset
233            tracing::trace!(
234                " -> not sending RST_STREAM ({:?} is already reset)",
235                stream_id
236            );
237            return;
238        }
239
240        // Transition the state to reset no matter what.
241        stream.set_reset(reason, initiator);
242
243        // If closed AND the send queue is flushed, then the stream cannot be
244        // reset explicitly, either. Implicit resets can still be queued.
245        if is_closed && is_empty {
246            tracing::trace!(
247                " -> not sending explicit RST_STREAM ({:?} was closed \
248                 and send queue was flushed)",
249                stream_id
250            );
251            return;
252        }
253
254        // Clear all pending outbound frames.
255        // Note that we don't call `self.recv_err` because we want to enqueue
256        // the reset frame before transitioning the stream inside
257        // `reclaim_all_capacity`.
258        self.prioritize.clear_queue(buffer, stream);
259
260        let frame = frame::Reset::new(stream.id, reason);
261
262        tracing::trace!("send_reset -- queueing; frame={:?}", frame);
263        self.prioritize
264            .queue_frame(frame.into(), buffer, stream, task);
265        self.prioritize.reclaim_all_capacity(stream, counts);
266    }
267
268    pub fn schedule_implicit_reset(
269        &mut self,
270        stream: &mut store::Ptr,
271        reason: Reason,
272        counts: &mut Counts,
273        task: &mut Option<Waker>,
274    ) {
275        if stream.state.is_closed() {
276            // Stream is already closed, nothing more to do
277            return;
278        }
279
280        stream.state.set_scheduled_reset(reason);
281
282        self.prioritize.reclaim_reserved_capacity(stream, counts);
283        self.prioritize.schedule_send(stream, task);
284    }
285
286    pub fn send_data<B>(
287        &mut self,
288        frame: frame::Data<B>,
289        buffer: &mut Buffer<Frame<B>>,
290        stream: &mut store::Ptr,
291        counts: &mut Counts,
292        task: &mut Option<Waker>,
293    ) -> Result<(), UserError>
294    where
295        B: Buf,
296    {
297        self.prioritize
298            .send_data(frame, buffer, stream, counts, task)
299    }
300
301    pub fn send_trailers<B>(
302        &mut self,
303        frame: frame::Headers,
304        buffer: &mut Buffer<Frame<B>>,
305        stream: &mut store::Ptr,
306        counts: &mut Counts,
307        task: &mut Option<Waker>,
308    ) -> Result<(), UserError> {
309        // TODO: Should this logic be moved into state.rs?
310        if !stream.state.is_send_streaming() {
311            return Err(UserError::UnexpectedFrameType);
312        }
313
314        stream.state.send_close();
315
316        tracing::trace!("send_trailers -- queuing; frame={:?}", frame);
317        self.prioritize
318            .queue_frame(frame.into(), buffer, stream, task);
319
320        // Release any excess capacity
321        self.prioritize.reserve_capacity(0, stream, counts);
322
323        Ok(())
324    }
325
326    pub fn poll_complete<T, B>(
327        &mut self,
328        cx: &mut Context,
329        buffer: &mut Buffer<Frame<B>>,
330        store: &mut Store,
331        counts: &mut Counts,
332        dst: &mut Codec<T, Prioritized<B>>,
333    ) -> Poll<io::Result<()>>
334    where
335        T: AsyncWrite + Unpin,
336        B: Buf,
337    {
338        self.prioritize
339            .poll_complete(cx, buffer, store, counts, dst)
340    }
341
342    /// Request capacity to send data
343    pub fn reserve_capacity(
344        &mut self,
345        capacity: WindowSize,
346        stream: &mut store::Ptr,
347        counts: &mut Counts,
348    ) {
349        self.prioritize.reserve_capacity(capacity, stream, counts)
350    }
351
352    pub fn poll_capacity(
353        &mut self,
354        cx: &Context,
355        stream: &mut store::Ptr,
356    ) -> Poll<Option<Result<WindowSize, UserError>>> {
357        if !stream.state.is_send_streaming() {
358            return Poll::Ready(None);
359        }
360
361        if !stream.send_capacity_inc {
362            stream.wait_send(cx);
363            return Poll::Pending;
364        }
365
366        stream.send_capacity_inc = false;
367
368        Poll::Ready(Some(Ok(self.capacity(stream))))
369    }
370
371    /// Current available stream send capacity
372    pub fn capacity(&self, stream: &mut store::Ptr) -> WindowSize {
373        stream.capacity(self.prioritize.max_buffer_size())
374    }
375
376    pub fn poll_reset(
377        &self,
378        cx: &Context,
379        stream: &mut Stream,
380        mode: PollReset,
381    ) -> Poll<Result<Reason, crate::Error>> {
382        match stream.state.ensure_reason(mode)? {
383            Some(reason) => Poll::Ready(Ok(reason)),
384            None => {
385                stream.wait_send(cx);
386                Poll::Pending
387            }
388        }
389    }
390
391    pub fn recv_connection_window_update(
392        &mut self,
393        frame: frame::WindowUpdate,
394        store: &mut Store,
395        counts: &mut Counts,
396    ) -> Result<(), Reason> {
397        self.prioritize
398            .recv_connection_window_update(frame.size_increment(), store, counts)
399    }
400
401    pub fn recv_stream_window_update<B>(
402        &mut self,
403        sz: WindowSize,
404        buffer: &mut Buffer<Frame<B>>,
405        stream: &mut store::Ptr,
406        counts: &mut Counts,
407        task: &mut Option<Waker>,
408    ) -> Result<(), Reason> {
409        if let Err(e) = self.prioritize.recv_stream_window_update(sz, stream) {
410            tracing::debug!("recv_stream_window_update !!; err={:?}", e);
411
412            self.send_reset(
413                Reason::FLOW_CONTROL_ERROR,
414                Initiator::Library,
415                buffer,
416                stream,
417                counts,
418                task,
419            );
420
421            return Err(e);
422        }
423
424        Ok(())
425    }
426
427    pub(super) fn recv_go_away(&mut self, last_stream_id: StreamId) -> Result<(), Error> {
428        if last_stream_id > self.max_stream_id {
429            // The remote endpoint sent a `GOAWAY` frame indicating a stream
430            // that we never sent, or that we have already terminated on account
431            // of previous `GOAWAY` frame. In either case, that is illegal.
432            // (When sending multiple `GOAWAY`s, "Endpoints MUST NOT increase
433            // the value they send in the last stream identifier, since the
434            // peers might already have retried unprocessed requests on another
435            // connection.")
436            proto_err!(conn:
437                "recv_go_away: last_stream_id ({:?}) > max_stream_id ({:?})",
438                last_stream_id, self.max_stream_id,
439            );
440            return Err(Error::library_go_away(Reason::PROTOCOL_ERROR));
441        }
442
443        self.max_stream_id = last_stream_id;
444        Ok(())
445    }
446
447    pub fn handle_error<B>(
448        &mut self,
449        buffer: &mut Buffer<Frame<B>>,
450        stream: &mut store::Ptr,
451        counts: &mut Counts,
452    ) {
453        // Clear all pending outbound frames
454        self.prioritize.clear_queue(buffer, stream);
455        self.prioritize.reclaim_all_capacity(stream, counts);
456    }
457
458    pub fn apply_remote_settings<B>(
459        &mut self,
460        settings: &frame::Settings,
461        buffer: &mut Buffer<Frame<B>>,
462        store: &mut Store,
463        counts: &mut Counts,
464        task: &mut Option<Waker>,
465    ) -> Result<(), Error> {
466        if let Some(val) = settings.is_extended_connect_protocol_enabled() {
467            self.is_extended_connect_protocol_enabled = val;
468        }
469
470        // Applies an update to the remote endpoint's initial window size.
471        //
472        // Per RFC 7540 ยง6.9.2:
473        //
474        // In addition to changing the flow-control window for streams that are
475        // not yet active, a SETTINGS frame can alter the initial flow-control
476        // window size for streams with active flow-control windows (that is,
477        // streams in the "open" or "half-closed (remote)" state). When the
478        // value of SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST adjust
479        // the size of all stream flow-control windows that it maintains by the
480        // difference between the new value and the old value.
481        //
482        // A change to `SETTINGS_INITIAL_WINDOW_SIZE` can cause the available
483        // space in a flow-control window to become negative. A sender MUST
484        // track the negative flow-control window and MUST NOT send new
485        // flow-controlled frames until it receives WINDOW_UPDATE frames that
486        // cause the flow-control window to become positive.
487        if let Some(val) = settings.initial_window_size() {
488            let old_val = self.init_window_sz;
489            self.init_window_sz = val;
490
491            match val.cmp(&old_val) {
492                Ordering::Less => {
493                    // We must decrease the (remote) window on every open stream.
494                    let dec = old_val - val;
495                    tracing::trace!("decrementing all windows; dec={}", dec);
496
497                    let mut total_reclaimed = 0;
498                    store.try_for_each(|mut stream| {
499                        let stream = &mut *stream;
500
501                        if stream.state.is_send_closed() && stream.buffered_send_data == 0 {
502                            tracing::trace!(
503                                "skipping send-closed stream; id={:?}; flow={:?}",
504                                stream.id,
505                                stream.send_flow
506                            );
507
508                            return Ok(());
509                        }
510
511                        tracing::trace!(
512                            "decrementing stream window; id={:?}; decr={}; flow={:?}",
513                            stream.id,
514                            dec,
515                            stream.send_flow
516                        );
517
518                        // TODO: this decrement can underflow based on received frames!
519                        stream
520                            .send_flow
521                            .dec_send_window(dec)
522                            .map_err(proto::Error::library_go_away)?;
523
524                        // It's possible that decreasing the window causes
525                        // `window_size` (the stream-specific window) to fall below
526                        // `available` (the portion of the connection-level window
527                        // that we have allocated to the stream).
528                        // In this case, we should take that excess allocation away
529                        // and reassign it to other streams.
530                        let window_size = stream.send_flow.window_size();
531                        let available = stream.send_flow.available().as_size();
532                        let reclaimed = if available > window_size {
533                            // Drop down to `window_size`.
534                            let reclaim = available - window_size;
535                            stream
536                                .send_flow
537                                .claim_capacity(reclaim)
538                                .map_err(proto::Error::library_go_away)?;
539                            total_reclaimed += reclaim;
540                            reclaim
541                        } else {
542                            0
543                        };
544
545                        tracing::trace!(
546                            "decremented stream window; id={:?}; decr={}; reclaimed={}; flow={:?}",
547                            stream.id,
548                            dec,
549                            reclaimed,
550                            stream.send_flow
551                        );
552
553                        // TODO: Should this notify the producer when the capacity
554                        // of a stream is reduced? Maybe it should if the capacity
555                        // is reduced to zero, allowing the producer to stop work.
556
557                        Ok::<_, proto::Error>(())
558                    })?;
559
560                    self.prioritize
561                        .assign_connection_capacity(total_reclaimed, store, counts);
562                }
563                Ordering::Greater => {
564                    let inc = val - old_val;
565
566                    store.try_for_each(|mut stream| {
567                        self.recv_stream_window_update(inc, buffer, &mut stream, counts, task)
568                            .map_err(Error::library_go_away)
569                    })?;
570                }
571                Ordering::Equal => (),
572            }
573        }
574
575        if let Some(val) = settings.is_push_enabled() {
576            self.is_push_enabled = val
577        }
578
579        Ok(())
580    }
581
582    pub fn clear_queues(&mut self, store: &mut Store, counts: &mut Counts) {
583        self.prioritize.clear_pending_capacity(store, counts);
584        self.prioritize.clear_pending_send(store, counts);
585        self.prioritize.clear_pending_open(store, counts);
586    }
587
588    pub fn ensure_not_idle(&self, id: StreamId) -> Result<(), Reason> {
589        if let Ok(next) = self.next_stream_id {
590            if id >= next {
591                return Err(Reason::PROTOCOL_ERROR);
592            }
593        }
594        // if next_stream_id is overflowed, that's ok.
595
596        Ok(())
597    }
598
599    pub fn ensure_next_stream_id(&self) -> Result<StreamId, UserError> {
600        self.next_stream_id
601            .map_err(|_| UserError::OverflowedStreamId)
602    }
603
604    pub fn may_have_created_stream(&self, id: StreamId) -> bool {
605        if let Ok(next_id) = self.next_stream_id {
606            // Peer::is_local_init should have been called beforehand
607            debug_assert_eq!(id.is_server_initiated(), next_id.is_server_initiated(),);
608            id < next_id
609        } else {
610            true
611        }
612    }
613
614    pub(super) fn maybe_reset_next_stream_id(&mut self, id: StreamId) {
615        if let Ok(next_id) = self.next_stream_id {
616            // Peer::is_local_init should have been called beforehand
617            debug_assert_eq!(id.is_server_initiated(), next_id.is_server_initiated());
618            if id >= next_id {
619                self.next_stream_id = id.next_id();
620            }
621        }
622    }
623
624    pub(crate) fn is_extended_connect_protocol_enabled(&self) -> bool {
625        self.is_extended_connect_protocol_enabled
626    }
627}