mirror of
https://codeberg.org/forgejo/forgejo.git
synced 2025-08-05 09:55:20 +02:00
chore: QoL improvements to tests (#7917)
- Use mock helper functions, instead of home-brew solutions. - Disable cron jobs that are not important to be run during integration tests and might even interfere. - Avoid sleeping unnecessary, if there's some requirement then sleep or retry until that requirement is met. - Avoid trying to deliver webhooks that will always result in a failure. Reviewed-on: https://codeberg.org/forgejo/forgejo/pulls/7917 Reviewed-by: Michael Kriese <michael.kriese@gmx.de> Co-authored-by: Gusted <postmaster@gusted.xyz> Co-committed-by: Gusted <postmaster@gusted.xyz>
This commit is contained in:
parent
112ba66637
commit
fa2a135f68
44 changed files with 155 additions and 264 deletions
|
@ -11,6 +11,8 @@ import (
|
|||
"time"
|
||||
|
||||
"forgejo.org/modules/indexer/issues/internal/tests"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestElasticsearchIndexer(t *testing.T) {
|
||||
|
@ -26,20 +28,10 @@ func TestElasticsearchIndexer(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
ok := false
|
||||
for i := 0; i < 60; i++ {
|
||||
require.Eventually(t, func() bool {
|
||||
resp, err := http.Get(url)
|
||||
if err == nil && resp.StatusCode == http.StatusOK {
|
||||
ok = true
|
||||
break
|
||||
}
|
||||
t.Logf("Waiting for elasticsearch to be up: %v", err)
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
if !ok {
|
||||
t.Fatalf("Failed to wait for elasticsearch to be up")
|
||||
return
|
||||
}
|
||||
return err == nil && resp.StatusCode == http.StatusOK
|
||||
}, time.Minute, time.Microsecond*100, "Failed to wait for elasticsearch to be up")
|
||||
|
||||
indexer := NewIndexer(url, fmt.Sprintf("test_elasticsearch_indexer_%d", time.Now().Unix()))
|
||||
defer indexer.Close()
|
||||
|
|
|
@ -8,7 +8,6 @@
|
|||
package tests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"testing"
|
||||
|
@ -40,7 +39,7 @@ func TestIndexer(t *testing.T, indexer internal.Indexer) {
|
|||
data[v.ID] = v
|
||||
}
|
||||
require.NoError(t, indexer.Index(t.Context(), d...))
|
||||
require.NoError(t, waitData(indexer, int64(len(data))))
|
||||
waitData(t, indexer, int64(len(data)))
|
||||
}
|
||||
|
||||
defer func() {
|
||||
|
@ -54,13 +53,13 @@ func TestIndexer(t *testing.T, indexer internal.Indexer) {
|
|||
for _, v := range c.ExtraData {
|
||||
data[v.ID] = v
|
||||
}
|
||||
require.NoError(t, waitData(indexer, int64(len(data))))
|
||||
waitData(t, indexer, int64(len(data)))
|
||||
defer func() {
|
||||
for _, v := range c.ExtraData {
|
||||
require.NoError(t, indexer.Delete(t.Context(), v.ID))
|
||||
delete(data, v.ID)
|
||||
}
|
||||
require.NoError(t, waitData(indexer, int64(len(data))))
|
||||
waitData(t, indexer, int64(len(data)))
|
||||
}()
|
||||
}
|
||||
|
||||
|
@ -783,22 +782,17 @@ func countIndexerData(data map[int64]*internal.IndexerData, f func(v *internal.I
|
|||
|
||||
// waitData waits for the indexer to index all data.
|
||||
// Some engines like Elasticsearch index data asynchronously, so we need to wait for a while.
|
||||
func waitData(indexer internal.Indexer, total int64) error {
|
||||
func waitData(t testing.TB, indexer internal.Indexer, total int64) {
|
||||
var actual int64
|
||||
for i := 0; i < 100; i++ {
|
||||
result, err := indexer.Search(context.Background(), &internal.SearchOptions{
|
||||
assert.Eventually(t, func() bool {
|
||||
result, err := indexer.Search(t.Context(), &internal.SearchOptions{
|
||||
Paginator: &db.ListOptions{
|
||||
PageSize: 0,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
actual = result.Total
|
||||
if actual == total {
|
||||
return nil
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
return fmt.Errorf("waitData: expected %d, actual %d", total, actual)
|
||||
return actual == total
|
||||
}, time.Second*10, time.Millisecond*100, "expected %d but got %d", total, actual)
|
||||
}
|
||||
|
|
|
@ -34,20 +34,10 @@ func TestMeilisearchIndexer(t *testing.T) {
|
|||
key = os.Getenv("TEST_MEILISEARCH_KEY")
|
||||
}
|
||||
|
||||
ok := false
|
||||
for i := 0; i < 60; i++ {
|
||||
require.Eventually(t, func() bool {
|
||||
resp, err := http.Get(url)
|
||||
if err == nil && resp.StatusCode == http.StatusOK {
|
||||
ok = true
|
||||
break
|
||||
}
|
||||
t.Logf("Waiting for meilisearch to be up: %v", err)
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
if !ok {
|
||||
t.Fatalf("Failed to wait for meilisearch to be up")
|
||||
return
|
||||
}
|
||||
return err == nil && resp.StatusCode == http.StatusOK
|
||||
}, time.Minute, time.Microsecond*100, "Failed to wait for meilisearch to be up")
|
||||
|
||||
indexer := NewIndexer(url, key, fmt.Sprintf("test_meilisearch_indexer_%d", time.Now().Unix()))
|
||||
defer indexer.Close()
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue