relay_server/services/projects/cache/
state.rs

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
use futures::StreamExt;
use std::fmt;
use std::sync::Arc;
use tokio::time::Instant;

use arc_swap::ArcSwap;
use relay_base_schema::project::ProjectKey;
use relay_config::Config;
use relay_quotas::CachedRateLimits;
use relay_sampling::evaluation::ReservoirCounters;
use relay_statsd::metric;

use crate::services::projects::project::{ProjectState, Revision};
use crate::services::projects::source::SourceProjectState;
use crate::statsd::RelayHistograms;
use crate::utils::{RetryBackoff, UniqueScheduledQueue};

/// The backing storage for a project cache.
///
/// Exposes the only interface to delete from [`Shared`], guaranteed by
/// requiring exclusive/mutable access to [`ProjectStore`].
///
/// [`Shared`] can be extended through [`Shared::get_or_create`], in which case
/// the private state is missing. Users of [`Shared::get_or_create`] *must* trigger
/// a fetch to create the private state and keep it updated.
/// This guarantees that eventually the project state is populated, but for a undetermined,
/// time it is possible that shared state exists without the respective private state.
#[derive(Default)]
pub struct ProjectStore {
    /// The shared state, which can be accessed concurrently.
    shared: Arc<Shared>,
    /// The private, mutably exclusive state, used to maintain the project state.
    private: hashbrown::HashMap<ProjectKey, PrivateProjectState>,
    /// Scheduled queue tracking all evictions.
    evictions: UniqueScheduledQueue<ProjectKey>,
}

impl ProjectStore {
    /// Retrieves a [`Shared`] handle which can be freely shared with multiple consumers.
    pub fn shared(&self) -> Arc<Shared> {
        Arc::clone(&self.shared)
    }

    /// Tries to begin a new fetch for the passed `project_key`.
    ///
    /// Returns `None` if no fetch is necessary or there is already a fetch ongoing.
    /// A returned [`Fetch`] must be scheduled and completed with [`Fetch::complete`] and
    /// [`Self::complete_fetch`].
    pub fn try_begin_fetch(&mut self, project_key: ProjectKey, config: &Config) -> Option<Fetch> {
        let fetch = self
            .get_or_create(project_key, config)
            .try_begin_fetch(config);

        // If there is a new fetch, remove the pending eviction, it will be re-scheduled once the
        // fetch is completed.
        if fetch.is_some() {
            self.evictions.remove(&project_key);
        }

        fetch
    }

    /// Completes a [`CompletedFetch`] started with [`Self::try_begin_fetch`].
    ///
    /// Returns a new [`Fetch`] if another fetch must be scheduled. This happens when the fetched
    /// [`ProjectState`] is still pending or already deemed expired.
    #[must_use = "an incomplete fetch must be retried"]
    pub fn complete_fetch(&mut self, fetch: CompletedFetch, config: &Config) -> Option<Fetch> {
        let project_key = fetch.project_key;

        // Eviction is not possible for projects which are currently being fetched.
        // Hence if there was a started fetch, the project state must always exist at this stage.
        debug_assert!(self.shared.projects.pin().get(&project_key).is_some());
        debug_assert!(self.private.get(&project_key).is_some());

        let mut project = self.get_or_create(project_key, config);
        let expiry = project.complete_fetch(fetch, config);
        // Schedule another fetch if necessary, usually should only happen if
        // the completed fetch is pending.
        let new_fetch = project.try_begin_fetch(config);

        if let Some(ExpiryTime(when)) = expiry {
            debug_assert!(
                new_fetch.is_none(),
                "there cannot be a new fetch and a scheduled expiry"
            );
            self.evictions.schedule(when, project_key);
        }

        metric!(
            histogram(RelayHistograms::ProjectStateCacheSize) = self.shared.projects.len() as u64,
            storage = "shared"
        );
        metric!(
            histogram(RelayHistograms::ProjectStateCacheSize) = self.private.len() as u64,
            storage = "private"
        );

        new_fetch
    }

    /// Waits for the next scheduled eviction and returns an [`Eviction`] token.
    ///
    /// The returned [`Eviction`] token must be immediately turned in using [`Self::evict`].
    ///
    /// The returned future is cancellation safe.
    pub async fn next_eviction(&mut self) -> Option<Eviction> {
        if self.evictions.is_empty() {
            return None;
        }
        self.evictions.next().await.map(Eviction)
    }

    /// Evicts a project using an [`Eviction`] token returned from [`Self::next_eviction`].
    pub fn evict(&mut self, Eviction(project_key): Eviction) {
        // Remove the private part.
        let Some(private) = self.private.remove(&project_key) else {
            // Not possible if all invariants are upheld.
            debug_assert!(false, "no private state for eviction");
            return;
        };

        debug_assert!(
            matches!(private.state, FetchState::Complete { .. }),
            "private state must be completed"
        );

        // Remove the shared part.
        let shared = self.shared.projects.pin();
        let _removed = shared.remove(&project_key);
        debug_assert!(
            _removed.is_some(),
            "an expired project must exist in the shared state"
        );
    }

    /// Get a reference to the current project or create a new project.
    ///
    /// For internal use only, a created project must always be fetched immediately.
    fn get_or_create(&mut self, project_key: ProjectKey, config: &Config) -> ProjectRef<'_> {
        #[cfg(debug_assertions)]
        if self.private.contains_key(&project_key) {
            // We have exclusive access to the private part, there are no concurrent deletions
            // hence if we have a private state there must always be a shared state as well.
            //
            // The opposite is not true, the shared state may have been created concurrently
            // through the shared access.
            debug_assert!(self.shared.projects.pin().contains_key(&project_key));
        }

        let private = self
            .private
            .entry(project_key)
            .or_insert_with(|| PrivateProjectState::new(project_key, config));

        let shared = self
            .shared
            .projects
            .pin()
            .get_or_insert_with(project_key, Default::default)
            .clone();

        ProjectRef { private, shared }
    }
}

/// The shared and concurrently accessible handle to the project cache.
#[derive(Default)]
pub struct Shared {
    projects: papaya::HashMap<ProjectKey, SharedProjectState, ahash::RandomState>,
}

impl Shared {
    /// Returns the existing project state or creates a new one.
    ///
    /// The caller must ensure that the project cache is instructed to
    /// [`super::ProjectCache::Fetch`] the retrieved project.
    pub fn get_or_create(&self, project_key: ProjectKey) -> SharedProject {
        // The fast path, we expect the project to exist.
        let projects = self.projects.pin();
        if let Some(project) = projects.get(&project_key) {
            return project.to_shared_project();
        }

        // The slow path, try to attempt to insert, somebody else may have been faster, but that's okay.
        match projects.try_insert(project_key, Default::default()) {
            Ok(inserted) => inserted.to_shared_project(),
            Err(occupied) => occupied.current.to_shared_project(),
        }
    }
}

/// TEST ONLY bypass to make the project cache mockable.
#[cfg(test)]
impl Shared {
    /// Updates the project state for a project.
    ///
    /// TEST ONLY!
    pub fn test_set_project_state(&self, project_key: ProjectKey, state: ProjectState) {
        self.projects
            .pin()
            .get_or_insert_with(project_key, Default::default)
            .set_project_state(state);
    }

    /// Returns `true` if there exists a shared state for the passed `project_key`.
    pub fn test_has_project_created(&self, project_key: ProjectKey) -> bool {
        self.projects.pin().contains_key(&project_key)
    }
}

impl fmt::Debug for Shared {
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
        f.debug_struct("Shared")
            .field("num_projects", &self.projects.len())
            .finish()
    }
}

/// A single project from the [`Shared`] project cache.
pub struct SharedProject(Arc<SharedProjectStateInner>);

impl SharedProject {
    /// Returns a reference to the contained [`ProjectState`].
    pub fn project_state(&self) -> &ProjectState {
        &self.0.state
    }

    /// Returns a reference to the contained [`CachedRateLimits`].
    pub fn cached_rate_limits(&self) -> &CachedRateLimits {
        // Exposing cached rate limits may be a bad idea, this allows mutation
        // and caching of rate limits for pending projects, which may or may not be fine.
        // Although, for now this is fine.
        //
        // Read only access is easily achievable if we return only the current rate limits.
        &self.0.rate_limits
    }

    /// Returns a reference to the contained [`ReservoirCounters`].
    pub fn reservoir_counters(&self) -> &ReservoirCounters {
        &self.0.reservoir_counters
    }
}

/// TEST ONLY bypass to make the project cache mockable.
#[cfg(test)]
impl SharedProject {
    /// Creates a new [`SharedProject`] for testing only.
    pub fn for_test(state: ProjectState) -> Self {
        Self(Arc::new(SharedProjectStateInner {
            state,
            ..Default::default()
        }))
    }
}

/// Reference to a full project wrapping shared and private state.
struct ProjectRef<'a> {
    shared: SharedProjectState,
    private: &'a mut PrivateProjectState,
}

impl ProjectRef<'_> {
    fn try_begin_fetch(&mut self, config: &Config) -> Option<Fetch> {
        let now = Instant::now();
        self.private
            .try_begin_fetch(now, config)
            .map(|fetch| fetch.with_revision(self.shared.revision()))
    }

    fn complete_fetch(&mut self, fetch: CompletedFetch, config: &Config) -> Option<ExpiryTime> {
        let now = Instant::now();
        self.private.complete_fetch(&fetch, now);

        // Keep the old state around if the current fetch is pending.
        // It may still be useful to callers.
        match fetch.state {
            SourceProjectState::New(state) if !state.is_pending() => {
                self.shared.set_project_state(state);
            }
            _ => {}
        }

        self.private.expiry_time(config)
    }
}

/// A [`Eviction`] token.
///
/// The token must be turned in using [`ProjectStore::evict`].
#[must_use = "an eviction must be used"]
pub struct Eviction(ProjectKey);

impl Eviction {
    /// Returns the [`ProjectKey`] of the project that needs to be evicted.
    pub fn project_key(&self) -> ProjectKey {
        self.0
    }
}

/// A [`Fetch`] token.
///
/// When returned it must be executed and completed using [`Self::complete`].
#[must_use = "a fetch must be executed"]
#[derive(Debug)]
pub struct Fetch {
    project_key: ProjectKey,
    when: Option<Instant>,
    revision: Revision,
}

impl Fetch {
    /// Returns the [`ProjectKey`] of the project to fetch.
    pub fn project_key(&self) -> ProjectKey {
        self.project_key
    }

    /// Returns when the fetch for the project should be scheduled.
    ///
    /// This can be now (as soon as possible, indicated by `None`) or a later point in time,
    /// if the project is currently in a backoff.
    pub fn when(&self) -> Option<Instant> {
        self.when
    }

    /// Returns the revisions of the currently cached project.
    ///
    /// If the upstream indicates it does not have a different version of this project
    /// we do not need to update the local state.
    pub fn revision(&self) -> Revision {
        self.revision.clone()
    }

    /// Completes the fetch with a result and returns a [`CompletedFetch`].
    pub fn complete(self, state: SourceProjectState) -> CompletedFetch {
        CompletedFetch {
            project_key: self.project_key,
            state,
        }
    }

    fn with_revision(mut self, revision: Revision) -> Self {
        self.revision = revision;
        self
    }
}

/// The result of an executed [`Fetch`].
#[must_use = "a completed fetch must be acted upon"]
#[derive(Debug)]
pub struct CompletedFetch {
    project_key: ProjectKey,
    state: SourceProjectState,
}

impl CompletedFetch {
    /// Returns the [`ProjectKey`] of the project which was fetched.
    pub fn project_key(&self) -> ProjectKey {
        self.project_key
    }

    /// Returns `true` if the fetch completed with a pending status.
    fn is_pending(&self) -> bool {
        match &self.state {
            SourceProjectState::New(state) => state.is_pending(),
            SourceProjectState::NotModified => false,
        }
    }
}

/// The state of a project contained in the [`Shared`] project cache.
///
/// This state is interior mutable and allows updates to the project.
#[derive(Debug, Default, Clone)]
struct SharedProjectState(Arc<ArcSwap<SharedProjectStateInner>>);

impl SharedProjectState {
    /// Updates the project state.
    fn set_project_state(&self, state: ProjectState) {
        let prev = self.0.rcu(|stored| SharedProjectStateInner {
            state: state.clone(),
            rate_limits: Arc::clone(&stored.rate_limits),
            reservoir_counters: Arc::clone(&stored.reservoir_counters),
        });

        // Try clean expired reservoir counters.
        //
        // We do it after the `rcu`, to not re-run this more often than necessary.
        if let Some(state) = state.enabled() {
            let config = state.config.sampling.as_ref();
            if let Some(config) = config.and_then(|eb| eb.as_ref().ok()) {
                // We can safely use previous here, the `rcu` just replaced the state, the
                // reservoir counters did not change.
                //
                // `try_lock` to not potentially block, it's a best effort cleanup.
                if let Ok(mut counters) = prev.reservoir_counters.try_lock() {
                    counters.retain(|key, _| config.rules.iter().any(|rule| rule.id == *key));
                }
            }
        }
    }

    /// Extracts and clones the revision from the contained project state.
    fn revision(&self) -> Revision {
        self.0.as_ref().load().state.revision().clone()
    }

    /// Transforms this interior mutable handle to an immutable [`SharedProject`].
    fn to_shared_project(&self) -> SharedProject {
        SharedProject(self.0.as_ref().load_full())
    }
}

/// The data contained in a [`SharedProjectState`].
///
/// All fields must be cheap to clone and are ideally just a single `Arc`.
/// Partial updates to [`SharedProjectState`], are performed using `rcu` cloning all fields.
#[derive(Debug, Default)]
struct SharedProjectStateInner {
    state: ProjectState,
    rate_limits: Arc<CachedRateLimits>,
    reservoir_counters: ReservoirCounters,
}

/// Current fetch state for a project.
#[derive(Debug)]
enum FetchState {
    /// There is a fetch currently in progress.
    InProgress,
    /// A successful fetch is pending.
    ///
    /// This state is essentially only the initial state, a project
    /// for the most part should always have a fetch in progress or be
    /// in the non-pending state.
    Pending {
        /// Time when the next fetch should be attempted.
        ///
        /// `None` means soon as possible.
        next_fetch_attempt: Option<Instant>,
    },
    /// There was a successful non-pending fetch.
    Complete {
        /// Time when the fetch was completed.
        last_fetch: LastFetch,
    },
}

/// Contains all mutable state necessary to maintain the project cache.
struct PrivateProjectState {
    /// Project key this state belongs to.
    project_key: ProjectKey,

    /// The current fetch state.
    state: FetchState,
    /// The current backoff used for calculating the next fetch attempt.
    ///
    /// The backoff is reset after a successful, non-pending fetch.
    backoff: RetryBackoff,
}

impl PrivateProjectState {
    fn new(project_key: ProjectKey, config: &Config) -> Self {
        Self {
            project_key,
            state: FetchState::Pending {
                next_fetch_attempt: None,
            },
            backoff: RetryBackoff::new(config.http_max_retry_interval()),
        }
    }

    fn expiry_time(&self, config: &Config) -> Option<ExpiryTime> {
        match &self.state {
            FetchState::Complete { last_fetch } => Some(last_fetch.expiry_time(config)),
            _ => None,
        }
    }

    fn try_begin_fetch(&mut self, now: Instant, config: &Config) -> Option<Fetch> {
        let when = match &self.state {
            FetchState::InProgress {} => {
                relay_log::trace!(
                    tags.project_key = self.project_key.as_str(),
                    "project fetch skipped, fetch in progress"
                );
                return None;
            }
            FetchState::Pending { next_fetch_attempt } => {
                // Schedule a new fetch, even if there is a backoff, it will just be sleeping for a while.
                *next_fetch_attempt
            }
            FetchState::Complete { last_fetch } => {
                if last_fetch.check_expiry(now, config).is_fresh() {
                    // The current state is up to date, no need to start another fetch.
                    relay_log::trace!(
                        tags.project_key = self.project_key.as_str(),
                        "project fetch skipped, already up to date"
                    );
                    return None;
                }
                None
            }
        };

        // Mark a current fetch in progress.
        self.state = FetchState::InProgress {};

        relay_log::trace!(
            tags.project_key = &self.project_key.as_str(),
            attempts = self.backoff.attempt() + 1,
            "project state fetch scheduled in {:?}",
            when.unwrap_or(now).saturating_duration_since(now),
        );

        Some(Fetch {
            project_key: self.project_key,
            when,
            revision: Revision::default(),
        })
    }

    fn complete_fetch(&mut self, fetch: &CompletedFetch, now: Instant) {
        debug_assert!(
            matches!(self.state, FetchState::InProgress),
            "fetch completed while there was no current fetch registered"
        );

        if fetch.is_pending() {
            let next_backoff = self.backoff.next_backoff();
            let next_fetch_attempt = match next_backoff.is_zero() {
                false => now.checked_add(next_backoff),
                true => None,
            };
            self.state = FetchState::Pending { next_fetch_attempt };
            relay_log::trace!(
                tags.project_key = &self.project_key.as_str(),
                "project state fetch completed but still pending"
            );
        } else {
            relay_log::trace!(
                tags.project_key = &self.project_key.as_str(),
                "project state fetch completed with non-pending config"
            );
            self.backoff.reset();
            self.state = FetchState::Complete {
                last_fetch: LastFetch(now),
            };
        }
    }
}

/// New type containing the last successful fetch time as an [`Instant`].
#[derive(Debug, Copy, Clone)]
struct LastFetch(Instant);

impl LastFetch {
    /// Returns the [`Expiry`] of the last fetch in relation to `now`.
    fn check_expiry(&self, now: Instant, config: &Config) -> Expiry {
        let expiry = config.project_cache_expiry();
        let elapsed = now.saturating_duration_since(self.0);

        if elapsed >= expiry + config.project_grace_period() {
            Expiry::Expired
        } else if elapsed >= expiry {
            Expiry::Stale
        } else {
            Expiry::Fresh
        }
    }

    /// Returns when the project is based to expire based on the current [`LastFetch`].
    fn expiry_time(&self, config: &Config) -> ExpiryTime {
        ExpiryTime(self.0 + config.project_grace_period() + config.project_cache_expiry())
    }
}

/// Expiry state of a project.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)]
enum Expiry {
    /// The project state is perfectly up to date.
    Fresh,
    /// The project state is outdated but events depending on this project state can still be
    /// processed. The state should be refreshed in the background though.
    Stale,
    /// The project state is completely outdated and events need to be buffered up until the new
    /// state has been fetched.
    Expired,
}

impl Expiry {
    /// Returns `true` if the project is up-to-date and does not need to be fetched.
    fn is_fresh(&self) -> bool {
        matches!(self, Self::Fresh)
    }
}

/// Instant when a project is scheduled for expiry.
#[must_use = "an expiry time must be used to schedule an eviction"]
struct ExpiryTime(Instant);

#[cfg(test)]
mod tests {
    use std::time::Duration;

    use super::*;

    async fn collect_evicted(store: &mut ProjectStore) -> Vec<ProjectKey> {
        let mut evicted = Vec::new();
        // Small timeout to really only get what is ready to be evicted right now.
        while let Ok(Some(eviction)) =
            tokio::time::timeout(Duration::from_nanos(5), store.next_eviction()).await
        {
            evicted.push(eviction.0);
            store.evict(eviction);
        }
        evicted
    }

    macro_rules! assert_state {
        ($store:ident, $project_key:ident, $state:pat) => {
            assert!(matches!(
                $store.shared().get_or_create($project_key).project_state(),
                $state
            ));
        };
    }

    #[tokio::test(start_paused = true)]
    async fn test_store_fetch() {
        let project_key = ProjectKey::parse("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa").unwrap();
        let mut store = ProjectStore::default();
        let config = Default::default();

        let fetch = store.try_begin_fetch(project_key, &config).unwrap();
        assert_eq!(fetch.project_key(), project_key);
        assert_eq!(fetch.when(), None);
        assert_eq!(fetch.revision().as_str(), None);
        assert_state!(store, project_key, ProjectState::Pending);

        // Fetch already in progress, nothing to do.
        assert!(store.try_begin_fetch(project_key, &config).is_none());

        // A pending fetch should trigger a new fetch immediately.
        let fetch = fetch.complete(ProjectState::Pending.into());
        let fetch = store.complete_fetch(fetch, &config).unwrap();
        assert_eq!(fetch.project_key(), project_key);
        // First backoff is still immediately.
        assert_eq!(fetch.when(), None);
        assert_eq!(fetch.revision().as_str(), None);
        assert_state!(store, project_key, ProjectState::Pending);

        // Pending again.
        let fetch = fetch.complete(ProjectState::Pending.into());
        let fetch = store.complete_fetch(fetch, &config).unwrap();
        assert_eq!(fetch.project_key(), project_key);
        // This time it needs to be in the future (backoff).
        assert!(fetch.when() > Some(Instant::now()));
        assert_eq!(fetch.revision().as_str(), None);
        assert_state!(store, project_key, ProjectState::Pending);

        // Now complete with disabled.
        let fetch = fetch.complete(ProjectState::Disabled.into());
        assert!(store.complete_fetch(fetch, &config).is_none());
        assert_state!(store, project_key, ProjectState::Disabled);

        // A new fetch is not yet necessary.
        assert!(store.try_begin_fetch(project_key, &config).is_none());
    }

    #[tokio::test(start_paused = true)]
    async fn test_store_fetch_pending_does_not_replace_state() {
        let project_key = ProjectKey::parse("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa").unwrap();
        let mut store = ProjectStore::default();
        let config = Config::from_json_value(serde_json::json!({
            "cache": {
                "project_expiry": 5,
                "project_grace_period": 5,
            }
        }))
        .unwrap();

        let fetch = store.try_begin_fetch(project_key, &config).unwrap();
        let fetch = fetch.complete(ProjectState::Disabled.into());
        assert!(store.complete_fetch(fetch, &config).is_none());
        assert_state!(store, project_key, ProjectState::Disabled);

        tokio::time::advance(Duration::from_secs(6)).await;

        let fetch = store.try_begin_fetch(project_key, &config).unwrap();
        let fetch = fetch.complete(ProjectState::Pending.into());
        // We're returned a new fetch, because the current one completed pending.
        let fetch = store.complete_fetch(fetch, &config).unwrap();
        // The old cached state is still available and not replaced.
        assert_state!(store, project_key, ProjectState::Disabled);

        let fetch = fetch.complete(ProjectState::new_allowed().into());
        assert!(store.complete_fetch(fetch, &config).is_none());
        assert_state!(store, project_key, ProjectState::Enabled(_));
    }

    #[tokio::test(start_paused = true)]
    async fn test_store_evict_projects() {
        let project_key1 = ProjectKey::parse("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa").unwrap();
        let project_key2 = ProjectKey::parse("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb").unwrap();
        let mut store = ProjectStore::default();
        let config = Config::from_json_value(serde_json::json!({
            "cache": {
                "project_expiry": 5,
                "project_grace_period": 0,
            }
        }))
        .unwrap();

        let fetch = store.try_begin_fetch(project_key1, &config).unwrap();
        let fetch = fetch.complete(ProjectState::Disabled.into());
        assert!(store.complete_fetch(fetch, &config).is_none());

        assert_eq!(collect_evicted(&mut store).await, Vec::new());
        assert_state!(store, project_key1, ProjectState::Disabled);

        // 3 seconds is not enough to expire any project.
        tokio::time::advance(Duration::from_secs(3)).await;

        assert_eq!(collect_evicted(&mut store).await, Vec::new());
        assert_state!(store, project_key1, ProjectState::Disabled);

        let fetch = store.try_begin_fetch(project_key2, &config).unwrap();
        let fetch = fetch.complete(ProjectState::Disabled.into());
        assert!(store.complete_fetch(fetch, &config).is_none());

        // A total of 6 seconds should expire the first project.
        tokio::time::advance(Duration::from_secs(3)).await;

        assert_eq!(collect_evicted(&mut store).await, vec![project_key1]);
        assert_state!(store, project_key1, ProjectState::Pending);
        assert_state!(store, project_key2, ProjectState::Disabled);
    }

    #[tokio::test(start_paused = true)]
    async fn test_store_evict_projects_pending_not_expired() {
        let project_key1 = ProjectKey::parse("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa").unwrap();
        let project_key2 = ProjectKey::parse("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb").unwrap();
        let mut store = ProjectStore::default();
        let config = Config::from_json_value(serde_json::json!({
            "cache": {
                "project_expiry": 5,
                "project_grace_period": 0,
            }
        }))
        .unwrap();

        let fetch = store.try_begin_fetch(project_key1, &config).unwrap();
        // Create a new project in a pending state, but never fetch it, this should also never expire.
        store.shared().get_or_create(project_key2);

        tokio::time::advance(Duration::from_secs(6)).await;

        // No evictions, project is pending.
        assert_eq!(collect_evicted(&mut store).await, Vec::new());

        // Complete the project.
        let fetch = fetch.complete(ProjectState::Disabled.into());
        assert!(store.complete_fetch(fetch, &config).is_none());

        // Still should not be evicted, because we do have 5 seconds to expire since completion.
        assert_eq!(collect_evicted(&mut store).await, Vec::new());
        tokio::time::advance(Duration::from_secs(4)).await;
        assert_eq!(collect_evicted(&mut store).await, Vec::new());
        assert_state!(store, project_key1, ProjectState::Disabled);

        // Just enough to expire the project.
        tokio::time::advance(Duration::from_millis(1001)).await;
        assert_eq!(collect_evicted(&mut store).await, vec![project_key1]);
        assert_state!(store, project_key1, ProjectState::Pending);
        assert_state!(store, project_key2, ProjectState::Pending);
    }

    #[tokio::test(start_paused = true)]
    async fn test_store_evict_projects_stale() {
        let project_key = ProjectKey::parse("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa").unwrap();
        let mut store = ProjectStore::default();
        let config = Config::from_json_value(serde_json::json!({
            "cache": {
                "project_expiry": 5,
                "project_grace_period": 5,
            }
        }))
        .unwrap();

        let fetch = store.try_begin_fetch(project_key, &config).unwrap();
        let fetch = fetch.complete(ProjectState::Disabled.into());
        assert!(store.complete_fetch(fetch, &config).is_none());

        // This is in the grace period, but not yet expired.
        tokio::time::advance(Duration::from_millis(9500)).await;

        assert_eq!(collect_evicted(&mut store).await, Vec::new());
        assert_state!(store, project_key, ProjectState::Disabled);

        // Now it's expired.
        tokio::time::advance(Duration::from_secs(1)).await;

        assert_eq!(collect_evicted(&mut store).await, vec![project_key]);
        assert_state!(store, project_key, ProjectState::Pending);
    }

    #[tokio::test(start_paused = true)]
    async fn test_store_no_eviction_during_fetch() {
        let project_key = ProjectKey::parse("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa").unwrap();
        let mut store = ProjectStore::default();
        let config = Config::from_json_value(serde_json::json!({
            "cache": {
                "project_expiry": 5,
                "project_grace_period": 5,
            }
        }))
        .unwrap();

        let fetch = store.try_begin_fetch(project_key, &config).unwrap();

        // Project is expired, but there is an ongoing fetch.
        tokio::time::advance(Duration::from_millis(10500)).await;
        // No evictions, there is a fetch ongoing!
        assert_eq!(collect_evicted(&mut store).await, Vec::new());

        // Complete the project.
        let fetch = fetch.complete(ProjectState::Disabled.into());
        assert!(store.complete_fetch(fetch, &config).is_none());
        // But start a new fetch asap (after grace period).
        tokio::time::advance(Duration::from_millis(5001)).await;
        let fetch = store.try_begin_fetch(project_key, &config).unwrap();

        // Again, expire the project.
        tokio::time::advance(Duration::from_millis(10500)).await;
        // No evictions, there is a fetch ongoing!
        assert_eq!(collect_evicted(&mut store).await, Vec::new());

        // Complete the project.
        let fetch = fetch.complete(ProjectState::Disabled.into());
        assert!(store.complete_fetch(fetch, &config).is_none());

        // Not quite yet expired.
        tokio::time::advance(Duration::from_millis(9500)).await;
        assert_eq!(collect_evicted(&mut store).await, Vec::new());
        // Now it's expired.
        tokio::time::advance(Duration::from_millis(501)).await;
        assert_eq!(collect_evicted(&mut store).await, vec![project_key]);
        assert_state!(store, project_key, ProjectState::Pending);
    }
}