diff --git a/state/storage.go b/state/storage.go index eefbeac1..bf838237 100644 --- a/state/storage.go +++ b/state/storage.go @@ -68,6 +68,7 @@ type Storage struct { DeviceDataTable *DeviceDataTable ReceiptTable *ReceiptTable DB *sqlx.DB + MaxTimelineLimit int } func NewStorage(postgresURI string) *Storage { @@ -102,6 +103,7 @@ func NewStorageWithDB(db *sqlx.DB, addPrometheusMetrics bool) *Storage { DeviceDataTable: NewDeviceDataTable(db), ReceiptTable: NewReceiptTable(db), DB: db, + MaxTimelineLimit: 50, } } @@ -705,12 +707,15 @@ func (s *Storage) RoomStateAfterEventPosition(ctx context.Context, roomIDs []str // - in the given rooms // - that the user has permission to see // - with NIDs <= `to`. -// Up to `limit` events are chosen per room. +// Up to `limit` events are chosen per room. This limit be itself be limited according to MaxTimelineLimit. func (s *Storage) LatestEventsInRooms(userID string, roomIDs []string, to int64, limit int) (map[string]*LatestEvents, error) { roomIDToRange, err := s.visibleEventNIDsBetweenForRooms(userID, roomIDs, 0, to) if err != nil { return nil, err } + if s.MaxTimelineLimit != 0 && limit > s.MaxTimelineLimit { + limit = s.MaxTimelineLimit + } result := make(map[string]*LatestEvents, len(roomIDs)) err = sqlutil.WithTransaction(s.Accumulator.db, func(txn *sqlx.Tx) error { for roomID, r := range roomIDToRange { diff --git a/tests-integration/timeline_test.go b/tests-integration/timeline_test.go index 72c7316b..051f3efd 100644 --- a/tests-integration/timeline_test.go +++ b/tests-integration/timeline_test.go @@ -1339,6 +1339,49 @@ func TestNumLiveBulk(t *testing.T) { )) } +// Ensure that clients cannot just set timeline_limit: 99999 and DoS the server +func TestSensibleLimitToTimelineLimit(t *testing.T) { + pqString := testutils.PrepareDBConnectionString() + // setup code + v2 := runTestV2Server(t) + v3 := runTestServer(t, v2, pqString) + defer v2.close() + defer v3.close() + roomID := "!a:localhost" + + var hundredEvents = make([]json.RawMessage, 100) + for i := 0; i < 100; i++ { + hundredEvents[i] = testutils.NewEvent(t, "m.room.message", alice, map[string]any{ + "msgtype": "m.text", + "body": fmt.Sprintf("msg %d", i), + }, testutils.WithTimestamp(time.Now().Add(time.Second))) + } + + v2.addAccount(t, alice, aliceToken) + v2.queueResponse(alice, sync2.SyncResponse{ + Rooms: sync2.SyncRoomsResponse{ + Join: v2JoinTimeline(roomEvents{ + roomID: roomID, + state: createRoomState(t, alice, time.Now()), + events: hundredEvents, + }), + }, + }) + res := v3.mustDoV3Request(t, aliceToken, sync3.Request{ + Lists: map[string]sync3.RequestList{"a": { + Ranges: sync3.SliceRanges{ + [2]int64{0, 10}, + }, + RoomSubscription: sync3.RoomSubscription{ + TimelineLimit: 99999, + }, + }}, + }) + m.MatchResponse(t, res, m.MatchList("a", + m.MatchV3Ops(m.MatchV3SyncOp(0, 0, []string{roomID})), + ), m.MatchRoomSubscription(roomID, m.MatchRoomTimeline(hundredEvents[50:]))) // caps at 50 +} + // Regression test for a thing which Synapse can sometimes send down sync v2. // See https://github.com/matrix-org/sliding-sync/issues/367 // This would cause this room to not be processed at all, which is bad.