quic: correctly extend peer's flow control window after MAX_DATA
When sending the peer a connection-level flow control update in
a MAX_DATA frame, we weren't recording the updated limit locally.
When the peer wrote data past the old limit, we would incorrectly
close the connection with a FLOW_CONTROL_ERROR.
For golang/go#58547
Change-Id: I6879c0cccc3cfdc673b613a07b038138d9e285ff
Reviewed-on: https://go-review.googlesource.com/c/net/+/530075
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: Jonathan Amsterdam <jba@google.com>
diff --git a/internal/quic/conn_flow.go b/internal/quic/conn_flow.go
index 281c708..4f1ab6e 100644
--- a/internal/quic/conn_flow.go
+++ b/internal/quic/conn_flow.go
@@ -106,6 +106,7 @@
if !w.appendMaxDataFrame(c.streams.inflow.newLimit) {
return false
}
+ c.streams.inflow.sentLimit += c.streams.inflow.newLimit
c.streams.inflow.sent.setSent(pnum)
}
return true
diff --git a/internal/quic/conn_flow_test.go b/internal/quic/conn_flow_test.go
index 45c82f6..d5ee74e 100644
--- a/internal/quic/conn_flow_test.go
+++ b/internal/quic/conn_flow_test.go
@@ -35,6 +35,16 @@
packetType1RTT, debugFrameMaxData{
max: 128,
})
+ // Peer can write up to the new limit.
+ tc.writeFrames(packetType1RTT, debugFrameStream{
+ id: s.id,
+ off: 64,
+ data: make([]byte, 64),
+ })
+ tc.wantIdle("connection is idle")
+ if n, err := s.ReadContext(ctx, make([]byte, 64)); n != 64 || err != nil {
+ t.Fatalf("offset 64: s.Read() = %v, %v; want %v, nil", n, err, 64)
+ }
}
func TestConnInflowReturnOnRacingReads(t *testing.T) {