internal/bigquery: require SetUploadTime

Previously, bigquery.Client.Upload called SetUploadTime only
if a row implemented that method.

That resulted in a bug where analysis results did not have their
CreatedAt column populated.

Since we expect everything we write to BigQuery will be timestamped,
require the SetUploadTime method.

Change-Id: I7c499e0beff88d0d34a8bd1c95e6f8631cb9a348
Reviewed-on: https://go-review.googlesource.com/c/pkgsite-metrics/+/473777
Reviewed-by: Zvonimir Pavlinovic <zpavlinovic@google.com>
Run-TryBot: Jonathan Amsterdam <jba@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
diff --git a/internal/analysis/analysis.go b/internal/analysis/analysis.go
index 32af474..fd312e2 100644
--- a/internal/analysis/analysis.go
+++ b/internal/analysis/analysis.go
@@ -156,6 +156,8 @@
 	r.ErrorCategory = derrors.CategorizeError(err)
 }
 
+func (r *Result) SetUploadTime(t time.Time) { r.CreatedAt = t }
+
 // WorkVersion contains information that can be used to avoid duplicate work.
 // Given two WorkVersion values v1 and v2 for the same module path and version,
 // if v1 == v2 then it is not necessary to scan the module.
diff --git a/internal/bigquery/bigquery.go b/internal/bigquery/bigquery.go
index f09e0fe..e02d269 100644
--- a/internal/bigquery/bigquery.go
+++ b/internal/bigquery/bigquery.go
@@ -136,13 +136,16 @@
 	return false, err
 }
 
+// A Row is something that can be uploaded to BigQuery.
+type Row interface {
+	SetUploadTime(time.Time)
+}
+
 // Upload inserts a row into the table.
-func (c *Client) Upload(ctx context.Context, tableID string, row any) (err error) {
+func (c *Client) Upload(ctx context.Context, tableID string, row Row) (err error) {
 	defer derrors.Wrap(&err, "Upload(ctx, %q)", tableID)
 	u := c.Table(tableID).Inserter()
-	if s, ok := row.(interface{ SetUploadTime(time.Time) }); ok {
-		s.SetUploadTime(time.Now())
-	}
+	row.SetUploadTime(time.Now())
 	return u.Put(ctx, row)
 }
 
@@ -151,15 +154,13 @@
 // The chunkSize parameter limits the number of rows sent in a single request; this may
 // be necessary to avoid reaching the maximum size of a request.
 // If chunkSize is <= 0, all rows will be sent in one request.
-func UploadMany[T any](ctx context.Context, client *Client, tableID string, rows []T, chunkSize int) (err error) {
+func UploadMany[T Row](ctx context.Context, client *Client, tableID string, rows []T, chunkSize int) (err error) {
 	defer derrors.Wrap(&err, "UploadMany(%q), %d rows, chunkSize=%d", tableID, len(rows), chunkSize)
 
 	now := time.Now()
 	// Set upload time.
 	for _, r := range rows {
-		if s, ok := any(r).(interface{ SetUploadTime(time.Time) }); ok {
-			s.SetUploadTime(now)
-		}
+		r.SetUploadTime(now)
 	}
 
 	ins := client.Table(tableID).Inserter()