Skip to content

fix: accumulate agentstats until reported and fix insights DAU offset #15832

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 9 commits into from
Dec 18, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 12 additions & 5 deletions agent/stats.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package agent

import (
"context"
"maps"
"sync"
"time"

Expand Down Expand Up @@ -32,7 +33,7 @@ type statsDest interface {
// statsDest (agent API in prod)
type statsReporter struct {
*sync.Cond
networkStats *map[netlogtype.Connection]netlogtype.Counts
networkStats map[netlogtype.Connection]netlogtype.Counts
unreported bool
lastInterval time.Duration

Expand All @@ -54,8 +55,15 @@ func (s *statsReporter) callback(_, _ time.Time, virtual, _ map[netlogtype.Conne
s.L.Lock()
defer s.L.Unlock()
s.logger.Debug(context.Background(), "got stats callback")
s.networkStats = &virtual
s.unreported = true
// Accumulate stats until they've been reported.
if s.unreported && len(s.networkStats) > 0 {
for k, v := range virtual {
s.networkStats[k] = s.networkStats[k].Add(v)
}
} else {
s.networkStats = maps.Clone(virtual)
s.unreported = true
}
Copy link
Member Author

@mafredri mafredri Dec 11, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Review: If the callback was called multiple times before reporting, we lost data as each update is a snapshot since the last.

This can happen if:

  1. The interval is short (tests)
  2. Report takes a long time

I believe the assumption is that the "ConnStatsCallback" reports a realistic count for "now", however, what it actually returns is closer to an additive diff between this and the previous report. Thus, if two callbacks happen in quick succession we're effectively zeroing the actual data.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Great catch!

s.Broadcast()
}

Expand Down Expand Up @@ -96,9 +104,8 @@ func (s *statsReporter) reportLoop(ctx context.Context, dest statsDest) error {
if ctxDone {
return nil
}
networkStats := *s.networkStats
s.unreported = false
if err = s.reportLocked(ctx, dest, networkStats); err != nil {
if err = s.reportLocked(ctx, dest, s.networkStats); err != nil {
return xerrors.Errorf("report stats: %w", err)
}
}
Expand Down
18 changes: 15 additions & 3 deletions agent/stats_internal_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ func TestStatsReporter(t *testing.T) {
require.Equal(t, netStats, gotNetStats)

// while we are collecting the stats, send in two new netStats to simulate
// what happens if we don't keep up. Only the latest should be kept.
// what happens if we don't keep up. The stats should be accumulated.
netStats0 := map[netlogtype.Connection]netlogtype.Counts{
{
Proto: ipproto.TCP,
Expand Down Expand Up @@ -102,9 +102,21 @@ func TestStatsReporter(t *testing.T) {
require.Equal(t, stats, update.Stats)
testutil.RequireSendCtx(ctx, t, fDest.resps, &proto.UpdateStatsResponse{ReportInterval: durationpb.New(interval)})

// second update -- only netStats1 is reported
// second update -- netStat0 and netStats1 are accumulated and reported
wantNetStats := map[netlogtype.Connection]netlogtype.Counts{
{
Proto: ipproto.TCP,
Src: netip.MustParseAddrPort("192.168.1.33:4887"),
Dst: netip.MustParseAddrPort("192.168.2.99:9999"),
}: {
TxPackets: 21,
TxBytes: 21,
RxPackets: 21,
RxBytes: 21,
},
}
gotNetStats = testutil.RequireRecvCtx(ctx, t, fCollector.calls)
require.Equal(t, netStats1, gotNetStats)
require.Equal(t, wantNetStats, gotNetStats)
stats = &proto.Stats{SessionCountJetbrains: 66}
testutil.RequireSendCtx(ctx, t, fCollector.stats, stats)
update = testutil.RequireRecvCtx(ctx, t, fDest.reqs)
Expand Down
2 changes: 1 addition & 1 deletion coderd/insights.go
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ func (api *API) returnDAUsInternal(rw http.ResponseWriter, r *http.Request, temp
}
for _, row := range rows {
resp.Entries = append(resp.Entries, codersdk.DAUEntry{
Date: row.StartTime.Format(time.DateOnly),
Date: row.StartTime.In(loc).Format(time.DateOnly),
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Review: Drive-by fix, the date was off-by-one depending on timezone.

Amount: int(row.ActiveUsers),
})
}
Expand Down
25 changes: 22 additions & 3 deletions coderd/insights_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,16 +48,27 @@ func TestDeploymentInsights(t *testing.T) {
db, ps := dbtestutil.NewDB(t, dbtestutil.WithDumpOnFailure())
logger := testutil.Logger(t)
rollupEvents := make(chan dbrollup.Event)
statsInterval := 500 * time.Millisecond
// Speed up the test by controlling batch size and interval.
batcher, closeBatcher, err := workspacestats.NewBatcher(context.Background(),
workspacestats.BatcherWithLogger(logger.Named("batcher").Leveled(slog.LevelDebug)),
workspacestats.BatcherWithStore(db),
workspacestats.BatcherWithBatchSize(1),
workspacestats.BatcherWithInterval(statsInterval),
)
require.NoError(t, err)
defer closeBatcher()
client := coderdtest.New(t, &coderdtest.Options{
Database: db,
Pubsub: ps,
Logger: &logger,
IncludeProvisionerDaemon: true,
AgentStatsRefreshInterval: time.Millisecond * 100,
AgentStatsRefreshInterval: statsInterval,
StatsBatcher: batcher,
DatabaseRolluper: dbrollup.New(
logger.Named("dbrollup").Leveled(slog.LevelDebug),
db,
dbrollup.WithInterval(time.Millisecond*100),
dbrollup.WithInterval(statsInterval/2),
dbrollup.WithEventChannel(rollupEvents),
),
})
Expand All @@ -76,7 +87,7 @@ func TestDeploymentInsights(t *testing.T) {
workspace := coderdtest.CreateWorkspace(t, client, template.ID)
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID)

ctx := testutil.Context(t, testutil.WaitLong)
ctx := testutil.Context(t, testutil.WaitSuperLong)
Copy link
Member Author

@mafredri mafredri Dec 11, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Review: In race mode, propagating the agent connection stats can take a while.


// Pre-check, no permission issues.
daus, err := client.DeploymentDAUs(ctx, codersdk.TimezoneOffsetHour(clientTz))
Expand Down Expand Up @@ -108,6 +119,13 @@ func TestDeploymentInsights(t *testing.T) {
err = sess.Start("cat")
require.NoError(t, err)

select {
case <-ctx.Done():
require.Fail(t, "timed out waiting for initial rollup event", ctx.Err())
case ev := <-rollupEvents:
require.True(t, ev.Init, "want init event")
}

for {
select {
case <-ctx.Done():
Expand All @@ -120,6 +138,7 @@ func TestDeploymentInsights(t *testing.T) {
if len(daus.Entries) > 0 && daus.Entries[len(daus.Entries)-1].Amount > 0 {
break
}
t.Logf("waiting for deployment daus to update: %+v", daus)
}
}

Expand Down
Loading