http2: document that RFC 7540 prioritization does not work with small payloads

This change demonstrates that golang/go#75936 applies to the RFC 7540
write scheduler.

A similar test will be added for RFC 9218 write scheduler after support
for it is incorporated within http2/server.go.

For golang/go#75936

Change-Id: I4e05dbeb0aab71942eb699b67383ef5b52c3ef4d
Reviewed-on: https://go-review.googlesource.com/c/net/+/714741
Reviewed-by: Nicholas Husin <husin@google.com>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: Damien Neil <dneil@google.com>
diff --git a/http2/server_test.go b/http2/server_test.go
index 71287d1..02c96a0 100644
--- a/http2/server_test.go
+++ b/http2/server_test.go
@@ -5124,3 +5124,71 @@
 	})
 	st.wantIdle()
 }
+
+// This test documents current behavior, rather than ideal behavior that we
+// would necessarily like to see. Refer to go.dev/issues/75936 for details.
+func TestServerRFC7540PrioritySmallPayload(t *testing.T) {
+	synctestTest(t, testServerRFC7540PrioritySmallPayload)
+}
+func testServerRFC7540PrioritySmallPayload(t testing.TB) {
+	endTest := false
+	st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+		for !endTest {
+			w.Write([]byte("a"))
+			if f, ok := w.(http.Flusher); ok {
+				f.Flush()
+			}
+		}
+	}, func(s *Server) {
+		s.NewWriteScheduler = func() WriteScheduler {
+			return NewPriorityWriteScheduler(nil)
+		}
+	})
+	if syncConn, ok := st.cc.(*synctestNetConn); ok {
+		syncConn.SetReadBufferSize(1)
+	} else {
+		t.Fatal("Server connection is not synctestNetConn")
+	}
+	defer st.Close()
+	defer func() { endTest = true }()
+	st.greet()
+
+	// Create 5 streams with weight of 0, and another 5 streams with weight of
+	// 255.
+	// Since each stream receives an infinite number of bytes, we should expect
+	// to see that almost all of the response we get are for the streams with
+	// weight of 255.
+	for i := 1; i <= 19; i += 2 {
+		weight := 1
+		if i > 10 {
+			weight = 255
+		}
+		st.writeHeaders(HeadersFrameParam{
+			StreamID:      uint32(i),
+			BlockFragment: st.encodeHeader(),
+			EndStream:     true,
+			EndHeaders:    true,
+			Priority:      PriorityParam{StreamDep: 0, Weight: uint8(weight)},
+		})
+		synctest.Wait()
+	}
+
+	// In the current implementation however, the response we get are
+	// distributed equally amongst all the streams, regardless of weight.
+	streamWriteCount := make(map[uint32]int)
+	totalWriteCount := 10000
+	for range totalWriteCount {
+		f := st.readFrame()
+		if f == nil {
+			break
+		}
+		streamWriteCount[f.Header().StreamID] += 1
+	}
+	for streamID, writeCount := range streamWriteCount {
+		expectedWriteCount := totalWriteCount / len(streamWriteCount)
+		errorMargin := expectedWriteCount / 100
+		if writeCount >= expectedWriteCount+errorMargin || writeCount <= expectedWriteCount-errorMargin {
+			t.Errorf("Expected stream %v to receive %v±%v writes, got %v", streamID, expectedWriteCount, errorMargin, writeCount)
+		}
+	}
+}