1use super::{
2 store, Buffer, Codec, Config, Counts, Frame, Prioritize, Prioritized, Store, Stream, StreamId,
3 StreamIdOverflow, WindowSize,
4};
5use crate::codec::UserError;
6use crate::frame::{self, Reason};
7use crate::proto::{self, Error, Initiator};
8
9use bytes::Buf;
10use tokio::io::AsyncWrite;
11
12use std::cmp::Ordering;
13use std::io;
14use std::task::{Context, Poll, Waker};
15
16#[derive(Debug)]
18pub(super) struct Send {
19 next_stream_id: Result<StreamId, StreamIdOverflow>,
21
22 max_stream_id: StreamId,
30
31 init_window_sz: WindowSize,
33
34 prioritize: Prioritize,
36
37 is_push_enabled: bool,
38
39 is_extended_connect_protocol_enabled: bool,
41}
42
43#[derive(Debug)]
45pub(crate) enum PollReset {
46 AwaitingHeaders,
47 Streaming,
48}
49
50impl Send {
51 pub fn new(config: &Config) -> Self {
53 Send {
54 init_window_sz: config.remote_init_window_sz,
55 max_stream_id: StreamId::MAX,
56 next_stream_id: Ok(config.local_next_stream_id),
57 prioritize: Prioritize::new(config),
58 is_push_enabled: true,
59 is_extended_connect_protocol_enabled: false,
60 }
61 }
62
63 pub fn init_window_sz(&self) -> WindowSize {
65 self.init_window_sz
66 }
67
68 pub fn open(&mut self) -> Result<StreamId, UserError> {
69 let stream_id = self.ensure_next_stream_id()?;
70 self.next_stream_id = stream_id.next_id();
71 Ok(stream_id)
72 }
73
74 pub fn reserve_local(&mut self) -> Result<StreamId, UserError> {
75 let stream_id = self.ensure_next_stream_id()?;
76 self.next_stream_id = stream_id.next_id();
77 Ok(stream_id)
78 }
79
80 fn check_headers(fields: &http::HeaderMap) -> Result<(), UserError> {
81 if fields.contains_key(http::header::CONNECTION)
83 || fields.contains_key(http::header::TRANSFER_ENCODING)
84 || fields.contains_key(http::header::UPGRADE)
85 || fields.contains_key("keep-alive")
86 || fields.contains_key("proxy-connection")
87 {
88 tracing::debug!("illegal connection-specific headers found");
89 return Err(UserError::MalformedHeaders);
90 } else if let Some(te) = fields.get(http::header::TE) {
91 if te != "trailers" {
92 tracing::debug!("illegal connection-specific headers found");
93 return Err(UserError::MalformedHeaders);
94 }
95 }
96 Ok(())
97 }
98
99 pub fn send_push_promise<B>(
100 &mut self,
101 frame: frame::PushPromise,
102 buffer: &mut Buffer<Frame<B>>,
103 stream: &mut store::Ptr,
104 task: &mut Option<Waker>,
105 ) -> Result<(), UserError> {
106 if !self.is_push_enabled {
107 return Err(UserError::PeerDisabledServerPush);
108 }
109
110 tracing::trace!(
111 "send_push_promise; frame={:?}; init_window={:?}",
112 frame,
113 self.init_window_sz
114 );
115
116 Self::check_headers(frame.fields())?;
117
118 self.prioritize
120 .queue_frame(frame.into(), buffer, stream, task);
121
122 Ok(())
123 }
124
125 pub fn send_headers<B>(
126 &mut self,
127 frame: frame::Headers,
128 buffer: &mut Buffer<Frame<B>>,
129 stream: &mut store::Ptr,
130 counts: &mut Counts,
131 task: &mut Option<Waker>,
132 ) -> Result<(), UserError> {
133 tracing::trace!(
134 "send_headers; frame={:?}; init_window={:?}",
135 frame,
136 self.init_window_sz
137 );
138
139 Self::check_headers(frame.fields())?;
140
141 let end_stream = frame.is_end_stream();
142
143 stream.state.send_open(end_stream)?;
145
146 let mut pending_open = false;
147 if counts.peer().is_local_init(frame.stream_id()) && !stream.is_pending_push {
148 self.prioritize.queue_open(stream);
149 pending_open = true;
150 }
151
152 self.prioritize
157 .queue_frame(frame.into(), buffer, stream, task);
158
159 if pending_open {
162 if let Some(task) = task.take() {
163 task.wake();
164 }
165 }
166
167 Ok(())
168 }
169
170 pub fn send_interim_informational_headers<B>(
173 &mut self,
174 frame: frame::Headers,
175 buffer: &mut Buffer<Frame<B>>,
176 stream: &mut store::Ptr,
177 _counts: &mut Counts,
178 task: &mut Option<Waker>,
179 ) -> Result<(), UserError> {
180 tracing::trace!(
181 "send_interim_informational_headers; frame={:?}; stream_id={:?}",
182 frame,
183 frame.stream_id()
184 );
185
186 Self::check_headers(frame.fields())?;
188
189 debug_assert!(frame.is_informational(),
190 "Frame must be informational (1xx status code) at this point. Validation should happen at the public API boundary.");
191 debug_assert!(!frame.is_end_stream(),
192 "Informational frames must not have end_stream flag set. Validation should happen at the internal send informational header streams.");
193
194 self.prioritize
197 .queue_frame(frame.into(), buffer, stream, task);
198
199 Ok(())
200 }
201
202 pub fn send_reset<B>(
204 &mut self,
205 reason: Reason,
206 initiator: Initiator,
207 buffer: &mut Buffer<Frame<B>>,
208 stream: &mut store::Ptr,
209 counts: &mut Counts,
210 task: &mut Option<Waker>,
211 ) {
212 let is_reset = stream.state.is_reset();
213 let is_closed = stream.state.is_closed();
214 let is_empty = stream.pending_send.is_empty();
215 let stream_id = stream.id;
216
217 tracing::trace!(
218 "send_reset(..., reason={:?}, initiator={:?}, stream={:?}, ..., \
219 is_reset={:?}; is_closed={:?}; pending_send.is_empty={:?}; \
220 state={:?} \
221 ",
222 reason,
223 initiator,
224 stream_id,
225 is_reset,
226 is_closed,
227 is_empty,
228 stream.state
229 );
230
231 if is_reset {
232 tracing::trace!(
234 " -> not sending RST_STREAM ({:?} is already reset)",
235 stream_id
236 );
237 return;
238 }
239
240 stream.set_reset(reason, initiator);
242
243 if is_closed && is_empty {
246 tracing::trace!(
247 " -> not sending explicit RST_STREAM ({:?} was closed \
248 and send queue was flushed)",
249 stream_id
250 );
251 return;
252 }
253
254 self.prioritize.clear_queue(buffer, stream);
259
260 let frame = frame::Reset::new(stream.id, reason);
261
262 tracing::trace!("send_reset -- queueing; frame={:?}", frame);
263 self.prioritize
264 .queue_frame(frame.into(), buffer, stream, task);
265 self.prioritize.reclaim_all_capacity(stream, counts);
266 }
267
268 pub fn schedule_implicit_reset(
269 &mut self,
270 stream: &mut store::Ptr,
271 reason: Reason,
272 counts: &mut Counts,
273 task: &mut Option<Waker>,
274 ) {
275 if stream.state.is_closed() {
276 return;
278 }
279
280 stream.state.set_scheduled_reset(reason);
281
282 self.prioritize.reclaim_reserved_capacity(stream, counts);
283 self.prioritize.schedule_send(stream, task);
284 }
285
286 pub fn send_data<B>(
287 &mut self,
288 frame: frame::Data<B>,
289 buffer: &mut Buffer<Frame<B>>,
290 stream: &mut store::Ptr,
291 counts: &mut Counts,
292 task: &mut Option<Waker>,
293 ) -> Result<(), UserError>
294 where
295 B: Buf,
296 {
297 self.prioritize
298 .send_data(frame, buffer, stream, counts, task)
299 }
300
301 pub fn send_trailers<B>(
302 &mut self,
303 frame: frame::Headers,
304 buffer: &mut Buffer<Frame<B>>,
305 stream: &mut store::Ptr,
306 counts: &mut Counts,
307 task: &mut Option<Waker>,
308 ) -> Result<(), UserError> {
309 if !stream.state.is_send_streaming() {
311 return Err(UserError::UnexpectedFrameType);
312 }
313
314 stream.state.send_close();
315
316 tracing::trace!("send_trailers -- queuing; frame={:?}", frame);
317 self.prioritize
318 .queue_frame(frame.into(), buffer, stream, task);
319
320 self.prioritize.reserve_capacity(0, stream, counts);
322
323 Ok(())
324 }
325
326 pub fn poll_complete<T, B>(
327 &mut self,
328 cx: &mut Context,
329 buffer: &mut Buffer<Frame<B>>,
330 store: &mut Store,
331 counts: &mut Counts,
332 dst: &mut Codec<T, Prioritized<B>>,
333 ) -> Poll<io::Result<()>>
334 where
335 T: AsyncWrite + Unpin,
336 B: Buf,
337 {
338 self.prioritize
339 .poll_complete(cx, buffer, store, counts, dst)
340 }
341
342 pub fn reserve_capacity(
344 &mut self,
345 capacity: WindowSize,
346 stream: &mut store::Ptr,
347 counts: &mut Counts,
348 ) {
349 self.prioritize.reserve_capacity(capacity, stream, counts)
350 }
351
352 pub fn poll_capacity(
353 &mut self,
354 cx: &Context,
355 stream: &mut store::Ptr,
356 ) -> Poll<Option<Result<WindowSize, UserError>>> {
357 if !stream.state.is_send_streaming() {
358 return Poll::Ready(None);
359 }
360
361 if !stream.send_capacity_inc {
362 stream.wait_send(cx);
363 return Poll::Pending;
364 }
365
366 stream.send_capacity_inc = false;
367
368 Poll::Ready(Some(Ok(self.capacity(stream))))
369 }
370
371 pub fn capacity(&self, stream: &mut store::Ptr) -> WindowSize {
373 stream.capacity(self.prioritize.max_buffer_size())
374 }
375
376 pub fn poll_reset(
377 &self,
378 cx: &Context,
379 stream: &mut Stream,
380 mode: PollReset,
381 ) -> Poll<Result<Reason, crate::Error>> {
382 match stream.state.ensure_reason(mode)? {
383 Some(reason) => Poll::Ready(Ok(reason)),
384 None => {
385 stream.wait_send(cx);
386 Poll::Pending
387 }
388 }
389 }
390
391 pub fn recv_connection_window_update(
392 &mut self,
393 frame: frame::WindowUpdate,
394 store: &mut Store,
395 counts: &mut Counts,
396 ) -> Result<(), Reason> {
397 self.prioritize
398 .recv_connection_window_update(frame.size_increment(), store, counts)
399 }
400
401 pub fn recv_stream_window_update<B>(
402 &mut self,
403 sz: WindowSize,
404 buffer: &mut Buffer<Frame<B>>,
405 stream: &mut store::Ptr,
406 counts: &mut Counts,
407 task: &mut Option<Waker>,
408 ) -> Result<(), Reason> {
409 if let Err(e) = self.prioritize.recv_stream_window_update(sz, stream) {
410 tracing::debug!("recv_stream_window_update !!; err={:?}", e);
411
412 self.send_reset(
413 Reason::FLOW_CONTROL_ERROR,
414 Initiator::Library,
415 buffer,
416 stream,
417 counts,
418 task,
419 );
420
421 return Err(e);
422 }
423
424 Ok(())
425 }
426
427 pub(super) fn recv_go_away(&mut self, last_stream_id: StreamId) -> Result<(), Error> {
428 if last_stream_id > self.max_stream_id {
429 proto_err!(conn:
437 "recv_go_away: last_stream_id ({:?}) > max_stream_id ({:?})",
438 last_stream_id, self.max_stream_id,
439 );
440 return Err(Error::library_go_away(Reason::PROTOCOL_ERROR));
441 }
442
443 self.max_stream_id = last_stream_id;
444 Ok(())
445 }
446
447 pub fn handle_error<B>(
448 &mut self,
449 buffer: &mut Buffer<Frame<B>>,
450 stream: &mut store::Ptr,
451 counts: &mut Counts,
452 ) {
453 self.prioritize.clear_queue(buffer, stream);
455 self.prioritize.reclaim_all_capacity(stream, counts);
456 }
457
458 pub fn apply_remote_settings<B>(
459 &mut self,
460 settings: &frame::Settings,
461 buffer: &mut Buffer<Frame<B>>,
462 store: &mut Store,
463 counts: &mut Counts,
464 task: &mut Option<Waker>,
465 ) -> Result<(), Error> {
466 if let Some(val) = settings.is_extended_connect_protocol_enabled() {
467 self.is_extended_connect_protocol_enabled = val;
468 }
469
470 if let Some(val) = settings.initial_window_size() {
488 let old_val = self.init_window_sz;
489 self.init_window_sz = val;
490
491 match val.cmp(&old_val) {
492 Ordering::Less => {
493 let dec = old_val - val;
495 tracing::trace!("decrementing all windows; dec={}", dec);
496
497 let mut total_reclaimed = 0;
498 store.try_for_each(|mut stream| {
499 let stream = &mut *stream;
500
501 if stream.state.is_send_closed() && stream.buffered_send_data == 0 {
502 tracing::trace!(
503 "skipping send-closed stream; id={:?}; flow={:?}",
504 stream.id,
505 stream.send_flow
506 );
507
508 return Ok(());
509 }
510
511 tracing::trace!(
512 "decrementing stream window; id={:?}; decr={}; flow={:?}",
513 stream.id,
514 dec,
515 stream.send_flow
516 );
517
518 stream
520 .send_flow
521 .dec_send_window(dec)
522 .map_err(proto::Error::library_go_away)?;
523
524 let window_size = stream.send_flow.window_size();
531 let available = stream.send_flow.available().as_size();
532 let reclaimed = if available > window_size {
533 let reclaim = available - window_size;
535 stream
536 .send_flow
537 .claim_capacity(reclaim)
538 .map_err(proto::Error::library_go_away)?;
539 total_reclaimed += reclaim;
540 reclaim
541 } else {
542 0
543 };
544
545 tracing::trace!(
546 "decremented stream window; id={:?}; decr={}; reclaimed={}; flow={:?}",
547 stream.id,
548 dec,
549 reclaimed,
550 stream.send_flow
551 );
552
553 Ok::<_, proto::Error>(())
558 })?;
559
560 self.prioritize
561 .assign_connection_capacity(total_reclaimed, store, counts);
562 }
563 Ordering::Greater => {
564 let inc = val - old_val;
565
566 store.try_for_each(|mut stream| {
567 self.recv_stream_window_update(inc, buffer, &mut stream, counts, task)
568 .map_err(Error::library_go_away)
569 })?;
570 }
571 Ordering::Equal => (),
572 }
573 }
574
575 if let Some(val) = settings.is_push_enabled() {
576 self.is_push_enabled = val
577 }
578
579 Ok(())
580 }
581
582 pub fn clear_queues(&mut self, store: &mut Store, counts: &mut Counts) {
583 self.prioritize.clear_pending_capacity(store, counts);
584 self.prioritize.clear_pending_send(store, counts);
585 self.prioritize.clear_pending_open(store, counts);
586 }
587
588 pub fn ensure_not_idle(&self, id: StreamId) -> Result<(), Reason> {
589 if let Ok(next) = self.next_stream_id {
590 if id >= next {
591 return Err(Reason::PROTOCOL_ERROR);
592 }
593 }
594 Ok(())
597 }
598
599 pub fn ensure_next_stream_id(&self) -> Result<StreamId, UserError> {
600 self.next_stream_id
601 .map_err(|_| UserError::OverflowedStreamId)
602 }
603
604 pub fn may_have_created_stream(&self, id: StreamId) -> bool {
605 if let Ok(next_id) = self.next_stream_id {
606 debug_assert_eq!(id.is_server_initiated(), next_id.is_server_initiated(),);
608 id < next_id
609 } else {
610 true
611 }
612 }
613
614 pub(super) fn maybe_reset_next_stream_id(&mut self, id: StreamId) {
615 if let Ok(next_id) = self.next_stream_id {
616 debug_assert_eq!(id.is_server_initiated(), next_id.is_server_initiated());
618 if id >= next_id {
619 self.next_stream_id = id.next_id();
620 }
621 }
622 }
623
624 pub(crate) fn is_extended_connect_protocol_enabled(&self) -> bool {
625 self.is_extended_connect_protocol_enabled
626 }
627}