quinn_proto/config/transport.rs
1use std::{fmt, sync::Arc};
2
3use crate::{Duration, INITIAL_MTU, MAX_UDP_PAYLOAD, VarInt, VarIntBoundsExceeded, congestion};
4
5/// Parameters governing the core QUIC state machine
6///
7/// Default values should be suitable for most internet applications. Applications protocols which
8/// forbid remotely-initiated streams should set `max_concurrent_bidi_streams` and
9/// `max_concurrent_uni_streams` to zero.
10///
11/// In some cases, performance or resource requirements can be improved by tuning these values to
12/// suit a particular application and/or network connection. In particular, data window sizes can be
13/// tuned for a particular expected round trip time, link capacity, and memory availability. Tuning
14/// for higher bandwidths and latencies increases worst-case memory consumption, but does not impair
15/// performance at lower bandwidths and latencies. The default configuration is tuned for a 100Mbps
16/// link with a 100ms round trip time.
17pub struct TransportConfig {
18 pub(crate) max_concurrent_bidi_streams: VarInt,
19 pub(crate) max_concurrent_uni_streams: VarInt,
20 pub(crate) max_idle_timeout: Option<VarInt>,
21 pub(crate) stream_receive_window: VarInt,
22 pub(crate) receive_window: VarInt,
23 pub(crate) send_window: u64,
24 pub(crate) send_fairness: bool,
25
26 pub(crate) packet_threshold: u32,
27 pub(crate) time_threshold: f32,
28 pub(crate) initial_rtt: Duration,
29 pub(crate) initial_mtu: u16,
30 pub(crate) min_mtu: u16,
31 pub(crate) mtu_discovery_config: Option<MtuDiscoveryConfig>,
32 pub(crate) ack_frequency_config: Option<AckFrequencyConfig>,
33
34 pub(crate) persistent_congestion_threshold: u32,
35 pub(crate) keep_alive_interval: Option<Duration>,
36 pub(crate) crypto_buffer_size: usize,
37 pub(crate) allow_spin: bool,
38 pub(crate) datagram_receive_buffer_size: Option<usize>,
39 pub(crate) datagram_send_buffer_size: usize,
40 #[cfg(test)]
41 pub(crate) deterministic_packet_numbers: bool,
42
43 pub(crate) congestion_controller_factory: Arc<dyn congestion::ControllerFactory + Send + Sync>,
44
45 pub(crate) enable_segmentation_offload: bool,
46}
47
48impl TransportConfig {
49 /// Maximum number of incoming bidirectional streams that may be open concurrently
50 ///
51 /// Must be nonzero for the peer to open any bidirectional streams.
52 ///
53 /// Worst-case memory use is directly proportional to `max_concurrent_bidi_streams *
54 /// stream_receive_window`, with an upper bound proportional to `receive_window`.
55 pub fn max_concurrent_bidi_streams(&mut self, value: VarInt) -> &mut Self {
56 self.max_concurrent_bidi_streams = value;
57 self
58 }
59
60 /// Variant of `max_concurrent_bidi_streams` affecting unidirectional streams
61 pub fn max_concurrent_uni_streams(&mut self, value: VarInt) -> &mut Self {
62 self.max_concurrent_uni_streams = value;
63 self
64 }
65
66 /// Maximum duration of inactivity to accept before timing out the connection.
67 ///
68 /// The true idle timeout is the minimum of this and the peer's own max idle timeout. `None`
69 /// represents an infinite timeout. Defaults to 30 seconds.
70 ///
71 /// **WARNING**: If a peer or its network path malfunctions or acts maliciously, an infinite
72 /// idle timeout can result in permanently hung futures!
73 ///
74 /// ```
75 /// # use std::{convert::TryInto, time::Duration};
76 /// # use quinn_proto::{TransportConfig, VarInt, VarIntBoundsExceeded};
77 /// # fn main() -> Result<(), VarIntBoundsExceeded> {
78 /// let mut config = TransportConfig::default();
79 ///
80 /// // Set the idle timeout as `VarInt`-encoded milliseconds
81 /// config.max_idle_timeout(Some(VarInt::from_u32(10_000).into()));
82 ///
83 /// // Set the idle timeout as a `Duration`
84 /// config.max_idle_timeout(Some(Duration::from_secs(10).try_into()?));
85 /// # Ok(())
86 /// # }
87 /// ```
88 pub fn max_idle_timeout(&mut self, value: Option<IdleTimeout>) -> &mut Self {
89 self.max_idle_timeout = value.map(|t| t.0);
90 self
91 }
92
93 /// Maximum number of bytes the peer may transmit without acknowledgement on any one stream
94 /// before becoming blocked.
95 ///
96 /// This should be set to at least the expected connection latency multiplied by the maximum
97 /// desired throughput. Setting this smaller than `receive_window` helps ensure that a single
98 /// stream doesn't monopolize receive buffers, which may otherwise occur if the application
99 /// chooses not to read from a large stream for a time while still requiring data on other
100 /// streams.
101 pub fn stream_receive_window(&mut self, value: VarInt) -> &mut Self {
102 self.stream_receive_window = value;
103 self
104 }
105
106 /// Maximum number of bytes the peer may transmit across all streams of a connection before
107 /// becoming blocked.
108 ///
109 /// This should be set to at least the expected connection latency multiplied by the maximum
110 /// desired throughput. Larger values can be useful to allow maximum throughput within a
111 /// stream while another is blocked.
112 pub fn receive_window(&mut self, value: VarInt) -> &mut Self {
113 self.receive_window = value;
114 self
115 }
116
117 /// Maximum number of bytes to transmit to a peer without acknowledgment
118 ///
119 /// Provides an upper bound on memory when communicating with peers that issue large amounts of
120 /// flow control credit. Endpoints that wish to handle large numbers of connections robustly
121 /// should take care to set this low enough to guarantee memory exhaustion does not occur if
122 /// every connection uses the entire window.
123 pub fn send_window(&mut self, value: u64) -> &mut Self {
124 self.send_window = value;
125 self
126 }
127
128 /// Whether to implement fair queuing for send streams having the same priority.
129 ///
130 /// When enabled, connections schedule data from outgoing streams having the same priority in a
131 /// round-robin fashion. When disabled, streams are scheduled in the order they are written to.
132 ///
133 /// Note that this only affects streams with the same priority. Higher priority streams always
134 /// take precedence over lower priority streams.
135 ///
136 /// Disabling fairness can reduce fragmentation and protocol overhead for workloads that use
137 /// many small streams.
138 pub fn send_fairness(&mut self, value: bool) -> &mut Self {
139 self.send_fairness = value;
140 self
141 }
142
143 /// Maximum reordering in packet number space before FACK style loss detection considers a
144 /// packet lost. Should not be less than 3, per RFC5681.
145 pub fn packet_threshold(&mut self, value: u32) -> &mut Self {
146 self.packet_threshold = value;
147 self
148 }
149
150 /// Maximum reordering in time space before time based loss detection considers a packet lost,
151 /// as a factor of RTT
152 pub fn time_threshold(&mut self, value: f32) -> &mut Self {
153 self.time_threshold = value;
154 self
155 }
156
157 /// The RTT used before an RTT sample is taken
158 pub fn initial_rtt(&mut self, value: Duration) -> &mut Self {
159 self.initial_rtt = value;
160 self
161 }
162
163 /// The initial value to be used as the maximum UDP payload size before running MTU discovery
164 /// (see [`TransportConfig::mtu_discovery_config`]).
165 ///
166 /// Must be at least 1200, which is the default, and known to be safe for typical internet
167 /// applications. Larger values are more efficient, but increase the risk of packet loss due to
168 /// exceeding the network path's IP MTU. If the provided value is higher than what the network
169 /// path actually supports, packet loss will eventually trigger black hole detection and bring
170 /// it down to [`TransportConfig::min_mtu`].
171 pub fn initial_mtu(&mut self, value: u16) -> &mut Self {
172 self.initial_mtu = value.max(INITIAL_MTU);
173 self
174 }
175
176 pub(crate) fn get_initial_mtu(&self) -> u16 {
177 self.initial_mtu.max(self.min_mtu)
178 }
179
180 /// The maximum UDP payload size guaranteed to be supported by the network.
181 ///
182 /// Must be at least 1200, which is the default, and lower than or equal to
183 /// [`TransportConfig::initial_mtu`].
184 ///
185 /// Real-world MTUs can vary according to ISP, VPN, and properties of intermediate network links
186 /// outside of either endpoint's control. Extreme care should be used when raising this value
187 /// outside of private networks where these factors are fully controlled. If the provided value
188 /// is higher than what the network path actually supports, the result will be unpredictable and
189 /// catastrophic packet loss, without a possibility of repair. Prefer
190 /// [`TransportConfig::initial_mtu`] together with
191 /// [`TransportConfig::mtu_discovery_config`] to set a maximum UDP payload size that robustly
192 /// adapts to the network.
193 pub fn min_mtu(&mut self, value: u16) -> &mut Self {
194 self.min_mtu = value.max(INITIAL_MTU);
195 self
196 }
197
198 /// Specifies the MTU discovery config (see [`MtuDiscoveryConfig`] for details).
199 ///
200 /// Enabled by default.
201 pub fn mtu_discovery_config(&mut self, value: Option<MtuDiscoveryConfig>) -> &mut Self {
202 self.mtu_discovery_config = value;
203 self
204 }
205
206 /// Specifies the ACK frequency config (see [`AckFrequencyConfig`] for details)
207 ///
208 /// The provided configuration will be ignored if the peer does not support the acknowledgement
209 /// frequency QUIC extension.
210 ///
211 /// Defaults to `None`, which disables controlling the peer's acknowledgement frequency. Even
212 /// if set to `None`, the local side still supports the acknowledgement frequency QUIC
213 /// extension and may use it in other ways.
214 pub fn ack_frequency_config(&mut self, value: Option<AckFrequencyConfig>) -> &mut Self {
215 self.ack_frequency_config = value;
216 self
217 }
218
219 /// Number of consecutive PTOs after which network is considered to be experiencing persistent congestion.
220 pub fn persistent_congestion_threshold(&mut self, value: u32) -> &mut Self {
221 self.persistent_congestion_threshold = value;
222 self
223 }
224
225 /// Period of inactivity before sending a keep-alive packet
226 ///
227 /// Keep-alive packets prevent an inactive but otherwise healthy connection from timing out.
228 ///
229 /// `None` to disable, which is the default. Only one side of any given connection needs keep-alive
230 /// enabled for the connection to be preserved. Must be set lower than the idle_timeout of both
231 /// peers to be effective.
232 pub fn keep_alive_interval(&mut self, value: Option<Duration>) -> &mut Self {
233 self.keep_alive_interval = value;
234 self
235 }
236
237 /// Maximum quantity of out-of-order crypto layer data to buffer
238 pub fn crypto_buffer_size(&mut self, value: usize) -> &mut Self {
239 self.crypto_buffer_size = value;
240 self
241 }
242
243 /// Whether the implementation is permitted to set the spin bit on this connection
244 ///
245 /// This allows passive observers to easily judge the round trip time of a connection, which can
246 /// be useful for network administration but sacrifices a small amount of privacy.
247 pub fn allow_spin(&mut self, value: bool) -> &mut Self {
248 self.allow_spin = value;
249 self
250 }
251
252 /// Maximum number of incoming application datagram bytes to buffer, or None to disable
253 /// incoming datagrams
254 ///
255 /// The peer is forbidden to send single datagrams larger than this size. If the aggregate size
256 /// of all datagrams that have been received from the peer but not consumed by the application
257 /// exceeds this value, old datagrams are dropped until it is no longer exceeded.
258 pub fn datagram_receive_buffer_size(&mut self, value: Option<usize>) -> &mut Self {
259 self.datagram_receive_buffer_size = value;
260 self
261 }
262
263 /// Maximum number of outgoing application datagram bytes to buffer
264 ///
265 /// While datagrams are sent ASAP, it is possible for an application to generate data faster
266 /// than the link, or even the underlying hardware, can transmit them. This limits the amount of
267 /// memory that may be consumed in that case. When the send buffer is full and a new datagram is
268 /// sent, older datagrams are dropped until sufficient space is available.
269 pub fn datagram_send_buffer_size(&mut self, value: usize) -> &mut Self {
270 self.datagram_send_buffer_size = value;
271 self
272 }
273
274 /// Whether to force every packet number to be used
275 ///
276 /// By default, packet numbers are occasionally skipped to ensure peers aren't ACKing packets
277 /// before they see them.
278 #[cfg(test)]
279 pub(crate) fn deterministic_packet_numbers(&mut self, enabled: bool) -> &mut Self {
280 self.deterministic_packet_numbers = enabled;
281 self
282 }
283
284 /// How to construct new `congestion::Controller`s
285 ///
286 /// Typically the refcounted configuration of a `congestion::Controller`,
287 /// e.g. a `congestion::NewRenoConfig`.
288 ///
289 /// # Example
290 /// ```
291 /// # use quinn_proto::*; use std::sync::Arc;
292 /// let mut config = TransportConfig::default();
293 /// config.congestion_controller_factory(Arc::new(congestion::NewRenoConfig::default()));
294 /// ```
295 pub fn congestion_controller_factory(
296 &mut self,
297 factory: Arc<dyn congestion::ControllerFactory + Send + Sync + 'static>,
298 ) -> &mut Self {
299 self.congestion_controller_factory = factory;
300 self
301 }
302
303 /// Whether to use "Generic Segmentation Offload" to accelerate transmits, when supported by the
304 /// environment
305 ///
306 /// Defaults to `true`.
307 ///
308 /// GSO dramatically reduces CPU consumption when sending large numbers of packets with the same
309 /// headers, such as when transmitting bulk data on a connection. However, it is not supported
310 /// by all network interface drivers or packet inspection tools. `quinn-udp` will attempt to
311 /// disable GSO automatically when unavailable, but this can lead to spurious packet loss at
312 /// startup, temporarily degrading performance.
313 pub fn enable_segmentation_offload(&mut self, enabled: bool) -> &mut Self {
314 self.enable_segmentation_offload = enabled;
315 self
316 }
317}
318
319impl Default for TransportConfig {
320 fn default() -> Self {
321 const EXPECTED_RTT: u32 = 100; // ms
322 const MAX_STREAM_BANDWIDTH: u32 = 12500 * 1000; // bytes/s
323 // Window size needed to avoid pipeline
324 // stalls
325 const STREAM_RWND: u32 = MAX_STREAM_BANDWIDTH / 1000 * EXPECTED_RTT;
326
327 Self {
328 max_concurrent_bidi_streams: 100u32.into(),
329 max_concurrent_uni_streams: 100u32.into(),
330 // 30 second default recommended by RFC 9308 ยง 3.2
331 max_idle_timeout: Some(VarInt(30_000)),
332 stream_receive_window: STREAM_RWND.into(),
333 receive_window: VarInt::MAX,
334 send_window: (8 * STREAM_RWND).into(),
335 send_fairness: true,
336
337 packet_threshold: 3,
338 time_threshold: 9.0 / 8.0,
339 initial_rtt: Duration::from_millis(333), // per spec, intentionally distinct from EXPECTED_RTT
340 initial_mtu: INITIAL_MTU,
341 min_mtu: INITIAL_MTU,
342 mtu_discovery_config: Some(MtuDiscoveryConfig::default()),
343 ack_frequency_config: None,
344
345 persistent_congestion_threshold: 3,
346 keep_alive_interval: None,
347 crypto_buffer_size: 16 * 1024,
348 allow_spin: true,
349 datagram_receive_buffer_size: Some(STREAM_RWND as usize),
350 datagram_send_buffer_size: 1024 * 1024,
351 #[cfg(test)]
352 deterministic_packet_numbers: false,
353
354 congestion_controller_factory: Arc::new(congestion::CubicConfig::default()),
355
356 enable_segmentation_offload: true,
357 }
358 }
359}
360
361impl fmt::Debug for TransportConfig {
362 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
363 let Self {
364 max_concurrent_bidi_streams,
365 max_concurrent_uni_streams,
366 max_idle_timeout,
367 stream_receive_window,
368 receive_window,
369 send_window,
370 send_fairness,
371 packet_threshold,
372 time_threshold,
373 initial_rtt,
374 initial_mtu,
375 min_mtu,
376 mtu_discovery_config,
377 ack_frequency_config,
378 persistent_congestion_threshold,
379 keep_alive_interval,
380 crypto_buffer_size,
381 allow_spin,
382 datagram_receive_buffer_size,
383 datagram_send_buffer_size,
384 #[cfg(test)]
385 deterministic_packet_numbers: _,
386 congestion_controller_factory: _,
387 enable_segmentation_offload,
388 } = self;
389 fmt.debug_struct("TransportConfig")
390 .field("max_concurrent_bidi_streams", max_concurrent_bidi_streams)
391 .field("max_concurrent_uni_streams", max_concurrent_uni_streams)
392 .field("max_idle_timeout", max_idle_timeout)
393 .field("stream_receive_window", stream_receive_window)
394 .field("receive_window", receive_window)
395 .field("send_window", send_window)
396 .field("send_fairness", send_fairness)
397 .field("packet_threshold", packet_threshold)
398 .field("time_threshold", time_threshold)
399 .field("initial_rtt", initial_rtt)
400 .field("initial_mtu", initial_mtu)
401 .field("min_mtu", min_mtu)
402 .field("mtu_discovery_config", mtu_discovery_config)
403 .field("ack_frequency_config", ack_frequency_config)
404 .field(
405 "persistent_congestion_threshold",
406 persistent_congestion_threshold,
407 )
408 .field("keep_alive_interval", keep_alive_interval)
409 .field("crypto_buffer_size", crypto_buffer_size)
410 .field("allow_spin", allow_spin)
411 .field("datagram_receive_buffer_size", datagram_receive_buffer_size)
412 .field("datagram_send_buffer_size", datagram_send_buffer_size)
413 // congestion_controller_factory not debug
414 .field("enable_segmentation_offload", enable_segmentation_offload)
415 .finish_non_exhaustive()
416 }
417}
418
419/// Parameters for controlling the peer's acknowledgement frequency
420///
421/// The parameters provided in this config will be sent to the peer at the beginning of the
422/// connection, so it can take them into account when sending acknowledgements (see each parameter's
423/// description for details on how it influences acknowledgement frequency).
424///
425/// Quinn's implementation follows the fourth draft of the
426/// [QUIC Acknowledgement Frequency extension](https://datatracker.ietf.org/doc/html/draft-ietf-quic-ack-frequency-04).
427/// The defaults produce behavior slightly different than the behavior without this extension,
428/// because they change the way reordered packets are handled (see
429/// [`AckFrequencyConfig::reordering_threshold`] for details).
430#[derive(Clone, Debug)]
431pub struct AckFrequencyConfig {
432 pub(crate) ack_eliciting_threshold: VarInt,
433 pub(crate) max_ack_delay: Option<Duration>,
434 pub(crate) reordering_threshold: VarInt,
435}
436
437impl AckFrequencyConfig {
438 /// The ack-eliciting threshold we will request the peer to use
439 ///
440 /// This threshold represents the number of ack-eliciting packets an endpoint may receive
441 /// without immediately sending an ACK.
442 ///
443 /// The remote peer should send at least one ACK frame when more than this number of
444 /// ack-eliciting packets have been received. A value of 0 results in a receiver immediately
445 /// acknowledging every ack-eliciting packet.
446 ///
447 /// Defaults to 1, which sends ACK frames for every other ack-eliciting packet.
448 pub fn ack_eliciting_threshold(&mut self, value: VarInt) -> &mut Self {
449 self.ack_eliciting_threshold = value;
450 self
451 }
452
453 /// The `max_ack_delay` we will request the peer to use
454 ///
455 /// This parameter represents the maximum amount of time that an endpoint waits before sending
456 /// an ACK when the ack-eliciting threshold hasn't been reached.
457 ///
458 /// The effective `max_ack_delay` will be clamped to be at least the peer's `min_ack_delay`
459 /// transport parameter, and at most the greater of the current path RTT or 25ms.
460 ///
461 /// Defaults to `None`, in which case the peer's original `max_ack_delay` will be used, as
462 /// obtained from its transport parameters.
463 pub fn max_ack_delay(&mut self, value: Option<Duration>) -> &mut Self {
464 self.max_ack_delay = value;
465 self
466 }
467
468 /// The reordering threshold we will request the peer to use
469 ///
470 /// This threshold represents the amount of out-of-order packets that will trigger an endpoint
471 /// to send an ACK, without waiting for `ack_eliciting_threshold` to be exceeded or for
472 /// `max_ack_delay` to be elapsed.
473 ///
474 /// A value of 0 indicates out-of-order packets do not elicit an immediate ACK. A value of 1
475 /// immediately acknowledges any packets that are received out of order (this is also the
476 /// behavior when the extension is disabled).
477 ///
478 /// It is recommended to set this value to [`TransportConfig::packet_threshold`] minus one.
479 /// Since the default value for [`TransportConfig::packet_threshold`] is 3, this value defaults
480 /// to 2.
481 pub fn reordering_threshold(&mut self, value: VarInt) -> &mut Self {
482 self.reordering_threshold = value;
483 self
484 }
485}
486
487impl Default for AckFrequencyConfig {
488 fn default() -> Self {
489 Self {
490 ack_eliciting_threshold: VarInt(1),
491 max_ack_delay: None,
492 reordering_threshold: VarInt(2),
493 }
494 }
495}
496
497/// Parameters governing MTU discovery.
498///
499/// # The why of MTU discovery
500///
501/// By design, QUIC ensures during the handshake that the network path between the client and the
502/// server is able to transmit unfragmented UDP packets with a body of 1200 bytes. In other words,
503/// once the connection is established, we know that the network path's maximum transmission unit
504/// (MTU) is of at least 1200 bytes (plus IP and UDP headers). Because of this, a QUIC endpoint can
505/// split outgoing data in packets of 1200 bytes, with confidence that the network will be able to
506/// deliver them (if the endpoint were to send bigger packets, they could prove too big and end up
507/// being dropped).
508///
509/// There is, however, a significant overhead associated to sending a packet. If the same
510/// information can be sent in fewer packets, that results in higher throughput. The amount of
511/// packets that need to be sent is inversely proportional to the MTU: the higher the MTU, the
512/// bigger the packets that can be sent, and the fewer packets that are needed to transmit a given
513/// amount of bytes.
514///
515/// Most networks have an MTU higher than 1200. Through MTU discovery, endpoints can detect the
516/// path's MTU and, if it turns out to be higher, start sending bigger packets.
517///
518/// # MTU discovery internals
519///
520/// Quinn implements MTU discovery through DPLPMTUD (Datagram Packetization Layer Path MTU
521/// Discovery), described in [section 14.3 of RFC
522/// 9000](https://www.rfc-editor.org/rfc/rfc9000.html#section-14.3). This method consists of sending
523/// QUIC packets padded to a particular size (called PMTU probes), and waiting to see if the remote
524/// peer responds with an ACK. If an ACK is received, that means the probe arrived at the remote
525/// peer, which in turn means that the network path's MTU is of at least the packet's size. If the
526/// probe is lost, it is sent another 2 times before concluding that the MTU is lower than the
527/// packet's size.
528///
529/// MTU discovery runs on a schedule (e.g. every 600 seconds) specified through
530/// [`MtuDiscoveryConfig::interval`]. The first run happens right after the handshake, and
531/// subsequent discoveries are scheduled to run when the interval has elapsed, starting from the
532/// last time when MTU discovery completed.
533///
534/// Since the search space for MTUs is quite big (the smallest possible MTU is 1200, and the highest
535/// is 65527), Quinn performs a binary search to keep the number of probes as low as possible. The
536/// lower bound of the search is equal to [`TransportConfig::initial_mtu`] in the
537/// initial MTU discovery run, and is equal to the currently discovered MTU in subsequent runs. The
538/// upper bound is determined by the minimum of [`MtuDiscoveryConfig::upper_bound`] and the
539/// `max_udp_payload_size` transport parameter received from the peer during the handshake.
540///
541/// # Black hole detection
542///
543/// If, at some point, the network path no longer accepts packets of the detected size, packet loss
544/// will eventually trigger black hole detection and reset the detected MTU to 1200. In that case,
545/// MTU discovery will be triggered after [`MtuDiscoveryConfig::black_hole_cooldown`] (ignoring the
546/// timer that was set based on [`MtuDiscoveryConfig::interval`]).
547///
548/// # Interaction between peers
549///
550/// There is no guarantee that the MTU on the path between A and B is the same as the MTU of the
551/// path between B and A. Therefore, each peer in the connection needs to run MTU discovery
552/// independently in order to discover the path's MTU.
553#[derive(Clone, Debug)]
554pub struct MtuDiscoveryConfig {
555 pub(crate) interval: Duration,
556 pub(crate) upper_bound: u16,
557 pub(crate) minimum_change: u16,
558 pub(crate) black_hole_cooldown: Duration,
559}
560
561impl MtuDiscoveryConfig {
562 /// Specifies the time to wait after completing MTU discovery before starting a new MTU
563 /// discovery run.
564 ///
565 /// Defaults to 600 seconds, as recommended by [RFC
566 /// 8899](https://www.rfc-editor.org/rfc/rfc8899).
567 pub fn interval(&mut self, value: Duration) -> &mut Self {
568 self.interval = value;
569 self
570 }
571
572 /// Specifies the upper bound to the max UDP payload size that MTU discovery will search for.
573 ///
574 /// Defaults to 1452, to stay within Ethernet's MTU when using IPv4 and IPv6. The highest
575 /// allowed value is 65527, which corresponds to the maximum permitted UDP payload on IPv6.
576 ///
577 /// It is safe to use an arbitrarily high upper bound, regardless of the network path's MTU. The
578 /// only drawback is that MTU discovery might take more time to finish.
579 pub fn upper_bound(&mut self, value: u16) -> &mut Self {
580 self.upper_bound = value.min(MAX_UDP_PAYLOAD);
581 self
582 }
583
584 /// Specifies the amount of time that MTU discovery should wait after a black hole was detected
585 /// before running again. Defaults to one minute.
586 ///
587 /// Black hole detection can be spuriously triggered in case of congestion, so it makes sense to
588 /// try MTU discovery again after a short period of time.
589 pub fn black_hole_cooldown(&mut self, value: Duration) -> &mut Self {
590 self.black_hole_cooldown = value;
591 self
592 }
593
594 /// Specifies the minimum MTU change to stop the MTU discovery phase.
595 /// Defaults to 20.
596 pub fn minimum_change(&mut self, value: u16) -> &mut Self {
597 self.minimum_change = value;
598 self
599 }
600}
601
602impl Default for MtuDiscoveryConfig {
603 fn default() -> Self {
604 Self {
605 interval: Duration::from_secs(600),
606 upper_bound: 1452,
607 black_hole_cooldown: Duration::from_secs(60),
608 minimum_change: 20,
609 }
610 }
611}
612
613/// Maximum duration of inactivity to accept before timing out the connection
614///
615/// This wraps an underlying [`VarInt`], representing the duration in milliseconds. Values can be
616/// constructed by converting directly from `VarInt`, or using `TryFrom<Duration>`.
617///
618/// ```
619/// # use std::{convert::TryFrom, time::Duration};
620/// # use quinn_proto::{IdleTimeout, VarIntBoundsExceeded, VarInt};
621/// # fn main() -> Result<(), VarIntBoundsExceeded> {
622/// // A `VarInt`-encoded value in milliseconds
623/// let timeout = IdleTimeout::from(VarInt::from_u32(10_000));
624///
625/// // Try to convert a `Duration` into a `VarInt`-encoded timeout
626/// let timeout = IdleTimeout::try_from(Duration::from_secs(10))?;
627/// # Ok(())
628/// # }
629/// ```
630#[derive(Default, Copy, Clone, Eq, Hash, Ord, PartialEq, PartialOrd)]
631pub struct IdleTimeout(VarInt);
632
633impl From<VarInt> for IdleTimeout {
634 fn from(inner: VarInt) -> Self {
635 Self(inner)
636 }
637}
638
639impl std::convert::TryFrom<Duration> for IdleTimeout {
640 type Error = VarIntBoundsExceeded;
641
642 fn try_from(timeout: Duration) -> Result<Self, Self::Error> {
643 let inner = VarInt::try_from(timeout.as_millis())?;
644 Ok(Self(inner))
645 }
646}
647
648impl fmt::Debug for IdleTimeout {
649 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
650 self.0.fmt(f)
651 }
652}