2017-04-25 15:24:51 +08:00
|
|
|
// Copyright 2017 The Gitea Authors. All rights reserved.
|
2022-11-28 02:20:29 +08:00
|
|
|
// SPDX-License-Identifier: MIT
|
2017-04-25 15:24:51 +08:00
|
|
|
|
Enable forbidigo linter (#24278)
Enable [forbidigo](https://github.com/ashanbrown/forbidigo) linter which
forbids print statements. Will check how to integrate this with the
smallest impact possible, so a few `nolint` comments will likely be
required. Plan is to just go through the issues and either:
- Remove the print if it is nonsensical
- Add a `//nolint` directive if it makes sense
I don't plan on investigating the individual issues any further.
<details>
<summary>Initial Lint Results</summary>
```
modules/log/event.go:348:6: use of `fmt.Println` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Println(err)
^
modules/log/event.go:382:6: use of `fmt.Println` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Println(err)
^
modules/queue/unique_queue_disk_channel_test.go:20:2: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("TempDir %s\n", tmpDir)
^
contrib/backport/backport.go:168:2: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("* Backporting %s to %s as %s\n", pr, localReleaseBranch, backportBranch)
^
contrib/backport/backport.go:216:4: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("* Navigate to %s to open PR\n", url)
^
contrib/backport/backport.go:223:2: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("* `xdg-open %s`\n", url)
^
contrib/backport/backport.go:233:2: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("* `git push -u %s %s`\n", remote, backportBranch)
^
contrib/backport/backport.go:243:2: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("* Amending commit to prepend `Backport #%s` to body\n", pr)
^
contrib/backport/backport.go:272:3: use of `fmt.Println` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Println("* Attempting git cherry-pick --continue")
^
contrib/backport/backport.go:281:2: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("* Attempting git cherry-pick %s\n", sha)
^
contrib/backport/backport.go:297:2: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("* Current branch is %s\n", currentBranch)
^
contrib/backport/backport.go:299:3: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("* Current branch is %s - not checking out\n", currentBranch)
^
contrib/backport/backport.go:304:3: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("* Branch %s already exists. Checking it out...\n", backportBranch)
^
contrib/backport/backport.go:308:2: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("* `git checkout -b %s %s`\n", backportBranch, releaseBranch)
^
contrib/backport/backport.go:313:2: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("* `git fetch %s main`\n", remote)
^
contrib/backport/backport.go:316:3: use of `fmt.Println` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Println(string(out))
^
contrib/backport/backport.go:319:2: use of `fmt.Println` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Println(string(out))
^
contrib/backport/backport.go:321:2: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("* `git fetch %s %s`\n", remote, releaseBranch)
^
contrib/backport/backport.go:324:3: use of `fmt.Println` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Println(string(out))
^
contrib/backport/backport.go:327:2: use of `fmt.Println` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Println(string(out))
^
models/unittest/fixtures.go:50:3: use of `fmt.Println` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Println("Unsupported RDBMS for integration tests")
^
models/unittest/fixtures.go:89:3: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("LoadFixtures failed after retries: %v\n", err)
^
models/unittest/fixtures.go:110:4: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("Failed to generate sequence update: %v\n", err)
^
models/unittest/fixtures.go:117:6: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("Failed to update sequence: %s Error: %v\n", value, err)
^
models/migrations/base/tests.go:118:3: use of `fmt.Println` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Println("Environment variable $GITEA_ROOT not set")
^
models/migrations/base/tests.go:127:3: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("Could not find gitea binary at %s\n", setting.AppPath)
^
models/migrations/base/tests.go:134:3: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("Environment variable $GITEA_CONF not set - defaulting to %s\n", giteaConf)
^
models/migrations/base/tests.go:145:3: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("Unable to create temporary data path %v\n", err)
^
models/migrations/base/tests.go:154:3: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("Unable to InitFull: %v\n", err)
^
models/migrations/v1_11/v112.go:34:5: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("Error: %v", err)
^
contrib/fixtures/fixture_generation.go:36:3: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("CreateTestEngine: %+v", err)
^
contrib/fixtures/fixture_generation.go:40:3: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("PrepareTestDatabase: %+v\n", err)
^
contrib/fixtures/fixture_generation.go:46:5: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("generate '%s': %+v\n", r, err)
^
contrib/fixtures/fixture_generation.go:53:5: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("generate '%s': %+v\n", g.name, err)
^
contrib/fixtures/fixture_generation.go:71:4: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("%s created.\n", path)
^
services/gitdiff/gitdiff_test.go:543:2: use of `println` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
println(result)
^
services/gitdiff/gitdiff_test.go:560:2: use of `println` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
println(result)
^
services/gitdiff/gitdiff_test.go:577:2: use of `println` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
println(result)
^
modules/web/routing/logger_manager.go:34:2: use of `print` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
print Printer
^
modules/doctor/paths.go:109:3: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("Warning: can't remove temporary file: '%s'\n", tmpFile.Name())
^
tests/test_utils.go:33:2: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf(format+"\n", args...)
^
tests/test_utils.go:61:3: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("Environment variable $GITEA_CONF not set, use default: %s\n", giteaConf)
^
cmd/actions.go:54:9: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
_, _ = fmt.Printf("%s\n", respText)
^
cmd/admin_user_change_password.go:74:2: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("%s's password has been successfully updated!\n", user.Name)
^
cmd/admin_user_create.go:109:3: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("generated random password is '%s'\n", password)
^
cmd/admin_user_create.go:164:3: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("Access token was successfully created... %s\n", t.Token)
^
cmd/admin_user_create.go:167:2: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("New user '%s' has been successfully created!\n", username)
^
cmd/admin_user_generate_access_token.go:74:3: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("%s\n", t.Token)
^
cmd/admin_user_generate_access_token.go:76:3: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("Access token was successfully created: %s\n", t.Token)
^
cmd/admin_user_must_change_password.go:56:2: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("Updated %d users setting MustChangePassword to %t\n", n, mustChangePassword)
^
cmd/convert.go:44:3: use of `fmt.Println` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Println("Converted successfully, please confirm your database's character set is now utf8mb4")
^
cmd/convert.go:50:3: use of `fmt.Println` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Println("Converted successfully, please confirm your database's all columns character is NVARCHAR now")
^
cmd/convert.go:52:3: use of `fmt.Println` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Println("This command can only be used with a MySQL or MSSQL database")
^
cmd/doctor.go:104:3: use of `fmt.Println` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Println(err)
^
cmd/doctor.go:105:3: use of `fmt.Println` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Println("Check if you are using the right config file. You can use a --config directive to specify one.")
^
cmd/doctor.go:243:3: use of `fmt.Println` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Println(err)
^
cmd/embedded.go:154:3: use of `fmt.Println` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Println(a.path)
^
cmd/embedded.go:198:3: use of `fmt.Println` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Println("Using app.ini at", setting.CustomConf)
^
cmd/embedded.go:217:2: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("Extracting to %s:\n", destdir)
^
cmd/embedded.go:253:3: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("%s already exists; skipped.\n", dest)
^
cmd/embedded.go:275:2: use of `fmt.Println` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Println(dest)
^
cmd/generate.go:63:2: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("%s", internalToken)
^
cmd/generate.go:66:3: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("\n")
^
cmd/generate.go:78:2: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("%s", JWTSecretBase64)
^
cmd/generate.go:81:3: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("\n")
^
cmd/generate.go:93:2: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("%s", secretKey)
^
cmd/generate.go:96:3: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("\n")
^
cmd/keys.go:74:2: use of `fmt.Println` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Println(strings.TrimSpace(authorizedString))
^
cmd/mailer.go:32:4: use of `fmt.Print` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Print("warning: Content is empty")
^
cmd/mailer.go:35:3: use of `fmt.Print` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Print("Proceed with sending email? [Y/n] ")
^
cmd/mailer.go:40:4: use of `fmt.Println` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Println("The mail was not sent")
^
cmd/mailer.go:49:9: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
_, _ = fmt.Printf("Sent %s email(s) to all users\n", respText)
^
cmd/serv.go:147:3: use of `println` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
println("Gitea: SSH has been disabled")
^
cmd/serv.go:153:4: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("error showing subcommand help: %v\n", err)
^
cmd/serv.go:175:4: use of `println` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
println("Hi there! You've successfully authenticated with the deploy key named " + key.Name + ", but Gitea does not provide shell access.")
^
cmd/serv.go:177:4: use of `println` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
println("Hi there! You've successfully authenticated with the principal " + key.Content + ", but Gitea does not provide shell access.")
^
cmd/serv.go:179:4: use of `println` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
println("Hi there, " + user.Name + "! You've successfully authenticated with the key named " + key.Name + ", but Gitea does not provide shell access.")
^
cmd/serv.go:181:3: use of `println` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
println("If this is unexpected, please log in with password and setup Gitea under another user.")
^
cmd/serv.go:196:5: use of `fmt.Print` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Print(`{"type":"gitea","version":1}`)
^
tests/e2e/e2e_test.go:54:3: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("Error initializing test database: %v\n", err)
^
tests/e2e/e2e_test.go:63:3: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("util.RemoveAll: %v\n", err)
^
tests/e2e/e2e_test.go:67:3: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("Unable to remove repo indexer: %v\n", err)
^
tests/e2e/e2e_test.go:109:6: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("%v", stdout.String())
^
tests/e2e/e2e_test.go:110:6: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("%v", stderr.String())
^
tests/e2e/e2e_test.go:113:6: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("%v", stdout.String())
^
tests/integration/integration_test.go:124:3: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("Error initializing test database: %v\n", err)
^
tests/integration/integration_test.go:135:3: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("util.RemoveAll: %v\n", err)
^
tests/integration/integration_test.go:139:3: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("Unable to remove repo indexer: %v\n", err)
^
tests/integration/repo_test.go:357:4: use of `fmt.Printf` forbidden by pattern `^(fmt\.Print(|f|ln)|print|println)$` (forbidigo)
fmt.Printf("%s", resp.Body)
^
```
</details>
---------
Co-authored-by: Giteabot <teabot@gitea.io>
2023-04-24 17:50:58 +08:00
|
|
|
//nolint:forbidigo
|
2022-09-03 03:18:23 +08:00
|
|
|
package integration
|
2017-04-25 15:24:51 +08:00
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2019-12-15 17:51:28 +08:00
|
|
|
"context"
|
2017-04-25 15:24:51 +08:00
|
|
|
"fmt"
|
2021-04-07 00:44:02 +08:00
|
|
|
"hash"
|
|
|
|
"hash/fnv"
|
2017-04-25 15:24:51 +08:00
|
|
|
"io"
|
|
|
|
"net/http"
|
2017-05-02 08:49:55 +08:00
|
|
|
"net/http/cookiejar"
|
2017-12-04 06:46:01 +08:00
|
|
|
"net/http/httptest"
|
2017-05-02 08:49:55 +08:00
|
|
|
"net/url"
|
2017-04-25 15:24:51 +08:00
|
|
|
"os"
|
2017-11-03 01:51:03 +08:00
|
|
|
"path/filepath"
|
2017-05-02 08:49:55 +08:00
|
|
|
"strings"
|
2022-10-21 02:20:01 +08:00
|
|
|
"sync/atomic"
|
2017-04-25 15:24:51 +08:00
|
|
|
"testing"
|
2020-06-02 09:39:44 +08:00
|
|
|
"time"
|
2017-04-25 15:24:51 +08:00
|
|
|
|
2023-01-18 05:46:03 +08:00
|
|
|
"code.gitea.io/gitea/models/auth"
|
2021-11-12 22:36:47 +08:00
|
|
|
"code.gitea.io/gitea/models/unittest"
|
2023-04-14 03:45:33 +08:00
|
|
|
gitea_context "code.gitea.io/gitea/modules/context"
|
2019-12-15 17:51:28 +08:00
|
|
|
"code.gitea.io/gitea/modules/graceful"
|
2021-07-25 00:03:58 +08:00
|
|
|
"code.gitea.io/gitea/modules/json"
|
2020-11-01 04:51:48 +08:00
|
|
|
"code.gitea.io/gitea/modules/log"
|
2017-04-25 15:24:51 +08:00
|
|
|
"code.gitea.io/gitea/modules/setting"
|
Rewrite queue (#24505)
# ⚠️ Breaking
Many deprecated queue config options are removed (actually, they should
have been removed in 1.18/1.19).
If you see the fatal message when starting Gitea: "Please update your
app.ini to remove deprecated config options", please follow the error
messages to remove these options from your app.ini.
Example:
```
2023/05/06 19:39:22 [E] Removed queue option: `[indexer].ISSUE_INDEXER_QUEUE_TYPE`. Use new options in `[queue.issue_indexer]`
2023/05/06 19:39:22 [E] Removed queue option: `[indexer].UPDATE_BUFFER_LEN`. Use new options in `[queue.issue_indexer]`
2023/05/06 19:39:22 [F] Please update your app.ini to remove deprecated config options
```
Many options in `[queue]` are are dropped, including:
`WRAP_IF_NECESSARY`, `MAX_ATTEMPTS`, `TIMEOUT`, `WORKERS`,
`BLOCK_TIMEOUT`, `BOOST_TIMEOUT`, `BOOST_WORKERS`, they can be removed
from app.ini.
# The problem
The old queue package has some legacy problems:
* complexity: I doubt few people could tell how it works.
* maintainability: Too many channels and mutex/cond are mixed together,
too many different structs/interfaces depends each other.
* stability: due to the complexity & maintainability, sometimes there
are strange bugs and difficult to debug, and some code doesn't have test
(indeed some code is difficult to test because a lot of things are mixed
together).
* general applicability: although it is called "queue", its behavior is
not a well-known queue.
* scalability: it doesn't seem easy to make it work with a cluster
without breaking its behaviors.
It came from some very old code to "avoid breaking", however, its
technical debt is too heavy now. It's a good time to introduce a better
"queue" package.
# The new queue package
It keeps using old config and concept as much as possible.
* It only contains two major kinds of concepts:
* The "base queue": channel, levelqueue, redis
* They have the same abstraction, the same interface, and they are
tested by the same testing code.
* The "WokerPoolQueue", it uses the "base queue" to provide "worker
pool" function, calls the "handler" to process the data in the base
queue.
* The new code doesn't do "PushBack"
* Think about a queue with many workers, the "PushBack" can't guarantee
the order for re-queued unhandled items, so in new code it just does
"normal push"
* The new code doesn't do "pause/resume"
* The "pause/resume" was designed to handle some handler's failure: eg:
document indexer (elasticsearch) is down
* If a queue is paused for long time, either the producers blocks or the
new items are dropped.
* The new code doesn't do such "pause/resume" trick, it's not a common
queue's behavior and it doesn't help much.
* If there are unhandled items, the "push" function just blocks for a
few seconds and then re-queue them and retry.
* The new code doesn't do "worker booster"
* Gitea's queue's handlers are light functions, the cost is only the
go-routine, so it doesn't make sense to "boost" them.
* The new code only use "max worker number" to limit the concurrent
workers.
* The new "Push" never blocks forever
* Instead of creating more and more blocking goroutines, return an error
is more friendly to the server and to the end user.
There are more details in code comments: eg: the "Flush" problem, the
strange "code.index" hanging problem, the "immediate" queue problem.
Almost ready for review.
TODO:
* [x] add some necessary comments during review
* [x] add some more tests if necessary
* [x] update documents and config options
* [x] test max worker / active worker
* [x] re-run the CI tasks to see whether any test is flaky
* [x] improve the `handleOldLengthConfiguration` to provide more
friendly messages
* [x] fine tune default config values (eg: length?)
## Code coverage:
![image](https://user-images.githubusercontent.com/2114189/236620635-55576955-f95d-4810-b12f-879026a3afdf.png)
2023-05-08 19:49:59 +08:00
|
|
|
"code.gitea.io/gitea/modules/testlogger"
|
2020-08-12 04:05:34 +08:00
|
|
|
"code.gitea.io/gitea/modules/util"
|
2021-01-26 23:36:53 +08:00
|
|
|
"code.gitea.io/gitea/modules/web"
|
2017-04-25 15:24:51 +08:00
|
|
|
"code.gitea.io/gitea/routers"
|
2022-09-03 03:18:23 +08:00
|
|
|
"code.gitea.io/gitea/tests"
|
2017-04-25 15:24:51 +08:00
|
|
|
|
2017-12-11 10:15:27 +08:00
|
|
|
"github.com/PuerkitoBio/goquery"
|
2017-04-28 21:23:28 +08:00
|
|
|
"github.com/stretchr/testify/assert"
|
2022-12-17 14:22:34 +08:00
|
|
|
"github.com/xeipuuv/gojsonschema"
|
2017-04-25 15:24:51 +08:00
|
|
|
)
|
|
|
|
|
2021-01-26 23:36:53 +08:00
|
|
|
var c *web.Route
|
2017-04-25 15:24:51 +08:00
|
|
|
|
2019-02-12 23:09:43 +08:00
|
|
|
type NilResponseRecorder struct {
|
|
|
|
httptest.ResponseRecorder
|
|
|
|
Length int
|
|
|
|
}
|
|
|
|
|
|
|
|
func (n *NilResponseRecorder) Write(b []byte) (int, error) {
|
2019-06-13 03:41:28 +08:00
|
|
|
n.Length += len(b)
|
2019-02-12 23:09:43 +08:00
|
|
|
return len(b), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewRecorder returns an initialized ResponseRecorder.
|
|
|
|
func NewNilResponseRecorder() *NilResponseRecorder {
|
|
|
|
return &NilResponseRecorder{
|
|
|
|
ResponseRecorder: *httptest.NewRecorder(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-07 00:44:02 +08:00
|
|
|
type NilResponseHashSumRecorder struct {
|
|
|
|
httptest.ResponseRecorder
|
|
|
|
Hash hash.Hash
|
|
|
|
Length int
|
|
|
|
}
|
|
|
|
|
|
|
|
func (n *NilResponseHashSumRecorder) Write(b []byte) (int, error) {
|
|
|
|
_, _ = n.Hash.Write(b)
|
|
|
|
n.Length += len(b)
|
|
|
|
return len(b), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewRecorder returns an initialized ResponseRecorder.
|
|
|
|
func NewNilResponseHashSumRecorder() *NilResponseHashSumRecorder {
|
|
|
|
return &NilResponseHashSumRecorder{
|
|
|
|
Hash: fnv.New32(),
|
|
|
|
ResponseRecorder: *httptest.NewRecorder(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-25 15:24:51 +08:00
|
|
|
func TestMain(m *testing.M) {
|
2020-11-01 04:51:48 +08:00
|
|
|
defer log.Close()
|
|
|
|
|
2019-12-15 17:51:28 +08:00
|
|
|
managerCtx, cancel := context.WithCancel(context.Background())
|
|
|
|
graceful.InitManager(managerCtx)
|
|
|
|
defer cancel()
|
|
|
|
|
2022-09-03 03:18:23 +08:00
|
|
|
tests.InitTest(true)
|
2022-08-28 17:43:25 +08:00
|
|
|
c = routers.NormalRoutes(context.TODO())
|
2017-04-25 15:24:51 +08:00
|
|
|
|
2020-06-02 09:39:44 +08:00
|
|
|
// integration test settings...
|
2023-02-20 00:12:01 +08:00
|
|
|
if setting.CfgProvider != nil {
|
|
|
|
testingCfg := setting.CfgProvider.Section("integration-tests")
|
Rewrite queue (#24505)
# ⚠️ Breaking
Many deprecated queue config options are removed (actually, they should
have been removed in 1.18/1.19).
If you see the fatal message when starting Gitea: "Please update your
app.ini to remove deprecated config options", please follow the error
messages to remove these options from your app.ini.
Example:
```
2023/05/06 19:39:22 [E] Removed queue option: `[indexer].ISSUE_INDEXER_QUEUE_TYPE`. Use new options in `[queue.issue_indexer]`
2023/05/06 19:39:22 [E] Removed queue option: `[indexer].UPDATE_BUFFER_LEN`. Use new options in `[queue.issue_indexer]`
2023/05/06 19:39:22 [F] Please update your app.ini to remove deprecated config options
```
Many options in `[queue]` are are dropped, including:
`WRAP_IF_NECESSARY`, `MAX_ATTEMPTS`, `TIMEOUT`, `WORKERS`,
`BLOCK_TIMEOUT`, `BOOST_TIMEOUT`, `BOOST_WORKERS`, they can be removed
from app.ini.
# The problem
The old queue package has some legacy problems:
* complexity: I doubt few people could tell how it works.
* maintainability: Too many channels and mutex/cond are mixed together,
too many different structs/interfaces depends each other.
* stability: due to the complexity & maintainability, sometimes there
are strange bugs and difficult to debug, and some code doesn't have test
(indeed some code is difficult to test because a lot of things are mixed
together).
* general applicability: although it is called "queue", its behavior is
not a well-known queue.
* scalability: it doesn't seem easy to make it work with a cluster
without breaking its behaviors.
It came from some very old code to "avoid breaking", however, its
technical debt is too heavy now. It's a good time to introduce a better
"queue" package.
# The new queue package
It keeps using old config and concept as much as possible.
* It only contains two major kinds of concepts:
* The "base queue": channel, levelqueue, redis
* They have the same abstraction, the same interface, and they are
tested by the same testing code.
* The "WokerPoolQueue", it uses the "base queue" to provide "worker
pool" function, calls the "handler" to process the data in the base
queue.
* The new code doesn't do "PushBack"
* Think about a queue with many workers, the "PushBack" can't guarantee
the order for re-queued unhandled items, so in new code it just does
"normal push"
* The new code doesn't do "pause/resume"
* The "pause/resume" was designed to handle some handler's failure: eg:
document indexer (elasticsearch) is down
* If a queue is paused for long time, either the producers blocks or the
new items are dropped.
* The new code doesn't do such "pause/resume" trick, it's not a common
queue's behavior and it doesn't help much.
* If there are unhandled items, the "push" function just blocks for a
few seconds and then re-queue them and retry.
* The new code doesn't do "worker booster"
* Gitea's queue's handlers are light functions, the cost is only the
go-routine, so it doesn't make sense to "boost" them.
* The new code only use "max worker number" to limit the concurrent
workers.
* The new "Push" never blocks forever
* Instead of creating more and more blocking goroutines, return an error
is more friendly to the server and to the end user.
There are more details in code comments: eg: the "Flush" problem, the
strange "code.index" hanging problem, the "immediate" queue problem.
Almost ready for review.
TODO:
* [x] add some necessary comments during review
* [x] add some more tests if necessary
* [x] update documents and config options
* [x] test max worker / active worker
* [x] re-run the CI tasks to see whether any test is flaky
* [x] improve the `handleOldLengthConfiguration` to provide more
friendly messages
* [x] fine tune default config values (eg: length?)
## Code coverage:
![image](https://user-images.githubusercontent.com/2114189/236620635-55576955-f95d-4810-b12f-879026a3afdf.png)
2023-05-08 19:49:59 +08:00
|
|
|
testlogger.SlowTest = testingCfg.Key("SLOW_TEST").MustDuration(testlogger.SlowTest)
|
|
|
|
testlogger.SlowFlush = testingCfg.Key("SLOW_FLUSH").MustDuration(testlogger.SlowFlush)
|
2020-06-02 09:39:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if os.Getenv("GITEA_SLOW_TEST_TIME") != "" {
|
|
|
|
duration, err := time.ParseDuration(os.Getenv("GITEA_SLOW_TEST_TIME"))
|
|
|
|
if err == nil {
|
Rewrite queue (#24505)
# ⚠️ Breaking
Many deprecated queue config options are removed (actually, they should
have been removed in 1.18/1.19).
If you see the fatal message when starting Gitea: "Please update your
app.ini to remove deprecated config options", please follow the error
messages to remove these options from your app.ini.
Example:
```
2023/05/06 19:39:22 [E] Removed queue option: `[indexer].ISSUE_INDEXER_QUEUE_TYPE`. Use new options in `[queue.issue_indexer]`
2023/05/06 19:39:22 [E] Removed queue option: `[indexer].UPDATE_BUFFER_LEN`. Use new options in `[queue.issue_indexer]`
2023/05/06 19:39:22 [F] Please update your app.ini to remove deprecated config options
```
Many options in `[queue]` are are dropped, including:
`WRAP_IF_NECESSARY`, `MAX_ATTEMPTS`, `TIMEOUT`, `WORKERS`,
`BLOCK_TIMEOUT`, `BOOST_TIMEOUT`, `BOOST_WORKERS`, they can be removed
from app.ini.
# The problem
The old queue package has some legacy problems:
* complexity: I doubt few people could tell how it works.
* maintainability: Too many channels and mutex/cond are mixed together,
too many different structs/interfaces depends each other.
* stability: due to the complexity & maintainability, sometimes there
are strange bugs and difficult to debug, and some code doesn't have test
(indeed some code is difficult to test because a lot of things are mixed
together).
* general applicability: although it is called "queue", its behavior is
not a well-known queue.
* scalability: it doesn't seem easy to make it work with a cluster
without breaking its behaviors.
It came from some very old code to "avoid breaking", however, its
technical debt is too heavy now. It's a good time to introduce a better
"queue" package.
# The new queue package
It keeps using old config and concept as much as possible.
* It only contains two major kinds of concepts:
* The "base queue": channel, levelqueue, redis
* They have the same abstraction, the same interface, and they are
tested by the same testing code.
* The "WokerPoolQueue", it uses the "base queue" to provide "worker
pool" function, calls the "handler" to process the data in the base
queue.
* The new code doesn't do "PushBack"
* Think about a queue with many workers, the "PushBack" can't guarantee
the order for re-queued unhandled items, so in new code it just does
"normal push"
* The new code doesn't do "pause/resume"
* The "pause/resume" was designed to handle some handler's failure: eg:
document indexer (elasticsearch) is down
* If a queue is paused for long time, either the producers blocks or the
new items are dropped.
* The new code doesn't do such "pause/resume" trick, it's not a common
queue's behavior and it doesn't help much.
* If there are unhandled items, the "push" function just blocks for a
few seconds and then re-queue them and retry.
* The new code doesn't do "worker booster"
* Gitea's queue's handlers are light functions, the cost is only the
go-routine, so it doesn't make sense to "boost" them.
* The new code only use "max worker number" to limit the concurrent
workers.
* The new "Push" never blocks forever
* Instead of creating more and more blocking goroutines, return an error
is more friendly to the server and to the end user.
There are more details in code comments: eg: the "Flush" problem, the
strange "code.index" hanging problem, the "immediate" queue problem.
Almost ready for review.
TODO:
* [x] add some necessary comments during review
* [x] add some more tests if necessary
* [x] update documents and config options
* [x] test max worker / active worker
* [x] re-run the CI tasks to see whether any test is flaky
* [x] improve the `handleOldLengthConfiguration` to provide more
friendly messages
* [x] fine tune default config values (eg: length?)
## Code coverage:
![image](https://user-images.githubusercontent.com/2114189/236620635-55576955-f95d-4810-b12f-879026a3afdf.png)
2023-05-08 19:49:59 +08:00
|
|
|
testlogger.SlowTest = duration
|
2020-06-02 09:39:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if os.Getenv("GITEA_SLOW_FLUSH_TIME") != "" {
|
|
|
|
duration, err := time.ParseDuration(os.Getenv("GITEA_SLOW_FLUSH_TIME"))
|
|
|
|
if err == nil {
|
Rewrite queue (#24505)
# ⚠️ Breaking
Many deprecated queue config options are removed (actually, they should
have been removed in 1.18/1.19).
If you see the fatal message when starting Gitea: "Please update your
app.ini to remove deprecated config options", please follow the error
messages to remove these options from your app.ini.
Example:
```
2023/05/06 19:39:22 [E] Removed queue option: `[indexer].ISSUE_INDEXER_QUEUE_TYPE`. Use new options in `[queue.issue_indexer]`
2023/05/06 19:39:22 [E] Removed queue option: `[indexer].UPDATE_BUFFER_LEN`. Use new options in `[queue.issue_indexer]`
2023/05/06 19:39:22 [F] Please update your app.ini to remove deprecated config options
```
Many options in `[queue]` are are dropped, including:
`WRAP_IF_NECESSARY`, `MAX_ATTEMPTS`, `TIMEOUT`, `WORKERS`,
`BLOCK_TIMEOUT`, `BOOST_TIMEOUT`, `BOOST_WORKERS`, they can be removed
from app.ini.
# The problem
The old queue package has some legacy problems:
* complexity: I doubt few people could tell how it works.
* maintainability: Too many channels and mutex/cond are mixed together,
too many different structs/interfaces depends each other.
* stability: due to the complexity & maintainability, sometimes there
are strange bugs and difficult to debug, and some code doesn't have test
(indeed some code is difficult to test because a lot of things are mixed
together).
* general applicability: although it is called "queue", its behavior is
not a well-known queue.
* scalability: it doesn't seem easy to make it work with a cluster
without breaking its behaviors.
It came from some very old code to "avoid breaking", however, its
technical debt is too heavy now. It's a good time to introduce a better
"queue" package.
# The new queue package
It keeps using old config and concept as much as possible.
* It only contains two major kinds of concepts:
* The "base queue": channel, levelqueue, redis
* They have the same abstraction, the same interface, and they are
tested by the same testing code.
* The "WokerPoolQueue", it uses the "base queue" to provide "worker
pool" function, calls the "handler" to process the data in the base
queue.
* The new code doesn't do "PushBack"
* Think about a queue with many workers, the "PushBack" can't guarantee
the order for re-queued unhandled items, so in new code it just does
"normal push"
* The new code doesn't do "pause/resume"
* The "pause/resume" was designed to handle some handler's failure: eg:
document indexer (elasticsearch) is down
* If a queue is paused for long time, either the producers blocks or the
new items are dropped.
* The new code doesn't do such "pause/resume" trick, it's not a common
queue's behavior and it doesn't help much.
* If there are unhandled items, the "push" function just blocks for a
few seconds and then re-queue them and retry.
* The new code doesn't do "worker booster"
* Gitea's queue's handlers are light functions, the cost is only the
go-routine, so it doesn't make sense to "boost" them.
* The new code only use "max worker number" to limit the concurrent
workers.
* The new "Push" never blocks forever
* Instead of creating more and more blocking goroutines, return an error
is more friendly to the server and to the end user.
There are more details in code comments: eg: the "Flush" problem, the
strange "code.index" hanging problem, the "immediate" queue problem.
Almost ready for review.
TODO:
* [x] add some necessary comments during review
* [x] add some more tests if necessary
* [x] update documents and config options
* [x] test max worker / active worker
* [x] re-run the CI tasks to see whether any test is flaky
* [x] improve the `handleOldLengthConfiguration` to provide more
friendly messages
* [x] fine tune default config values (eg: length?)
## Code coverage:
![image](https://user-images.githubusercontent.com/2114189/236620635-55576955-f95d-4810-b12f-879026a3afdf.png)
2023-05-08 19:49:59 +08:00
|
|
|
testlogger.SlowFlush = duration
|
2020-06-02 09:39:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-27 04:28:45 +08:00
|
|
|
os.Unsetenv("GIT_AUTHOR_NAME")
|
|
|
|
os.Unsetenv("GIT_AUTHOR_EMAIL")
|
|
|
|
os.Unsetenv("GIT_AUTHOR_DATE")
|
|
|
|
os.Unsetenv("GIT_COMMITTER_NAME")
|
|
|
|
os.Unsetenv("GIT_COMMITTER_EMAIL")
|
|
|
|
os.Unsetenv("GIT_COMMITTER_DATE")
|
|
|
|
|
2021-11-12 22:36:47 +08:00
|
|
|
err := unittest.InitFixtures(
|
|
|
|
unittest.FixturesOptions{
|
2021-09-24 19:32:56 +08:00
|
|
|
Dir: filepath.Join(filepath.Dir(setting.AppPath), "models/fixtures/"),
|
|
|
|
},
|
2017-04-25 15:24:51 +08:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
fmt.Printf("Error initializing test database: %v\n", err)
|
|
|
|
os.Exit(1)
|
|
|
|
}
|
2023-04-19 21:40:42 +08:00
|
|
|
|
|
|
|
// FIXME: the console logger is deleted by mistake, so if there is any `log.Fatal`, developers won't see any error message.
|
|
|
|
// Instead, "No tests were found", last nonsense log is "According to the configuration, subsequent logs will not be printed to the console"
|
2017-09-17 04:16:21 +08:00
|
|
|
exitCode := m.Run()
|
|
|
|
|
Rewrite queue (#24505)
# ⚠️ Breaking
Many deprecated queue config options are removed (actually, they should
have been removed in 1.18/1.19).
If you see the fatal message when starting Gitea: "Please update your
app.ini to remove deprecated config options", please follow the error
messages to remove these options from your app.ini.
Example:
```
2023/05/06 19:39:22 [E] Removed queue option: `[indexer].ISSUE_INDEXER_QUEUE_TYPE`. Use new options in `[queue.issue_indexer]`
2023/05/06 19:39:22 [E] Removed queue option: `[indexer].UPDATE_BUFFER_LEN`. Use new options in `[queue.issue_indexer]`
2023/05/06 19:39:22 [F] Please update your app.ini to remove deprecated config options
```
Many options in `[queue]` are are dropped, including:
`WRAP_IF_NECESSARY`, `MAX_ATTEMPTS`, `TIMEOUT`, `WORKERS`,
`BLOCK_TIMEOUT`, `BOOST_TIMEOUT`, `BOOST_WORKERS`, they can be removed
from app.ini.
# The problem
The old queue package has some legacy problems:
* complexity: I doubt few people could tell how it works.
* maintainability: Too many channels and mutex/cond are mixed together,
too many different structs/interfaces depends each other.
* stability: due to the complexity & maintainability, sometimes there
are strange bugs and difficult to debug, and some code doesn't have test
(indeed some code is difficult to test because a lot of things are mixed
together).
* general applicability: although it is called "queue", its behavior is
not a well-known queue.
* scalability: it doesn't seem easy to make it work with a cluster
without breaking its behaviors.
It came from some very old code to "avoid breaking", however, its
technical debt is too heavy now. It's a good time to introduce a better
"queue" package.
# The new queue package
It keeps using old config and concept as much as possible.
* It only contains two major kinds of concepts:
* The "base queue": channel, levelqueue, redis
* They have the same abstraction, the same interface, and they are
tested by the same testing code.
* The "WokerPoolQueue", it uses the "base queue" to provide "worker
pool" function, calls the "handler" to process the data in the base
queue.
* The new code doesn't do "PushBack"
* Think about a queue with many workers, the "PushBack" can't guarantee
the order for re-queued unhandled items, so in new code it just does
"normal push"
* The new code doesn't do "pause/resume"
* The "pause/resume" was designed to handle some handler's failure: eg:
document indexer (elasticsearch) is down
* If a queue is paused for long time, either the producers blocks or the
new items are dropped.
* The new code doesn't do such "pause/resume" trick, it's not a common
queue's behavior and it doesn't help much.
* If there are unhandled items, the "push" function just blocks for a
few seconds and then re-queue them and retry.
* The new code doesn't do "worker booster"
* Gitea's queue's handlers are light functions, the cost is only the
go-routine, so it doesn't make sense to "boost" them.
* The new code only use "max worker number" to limit the concurrent
workers.
* The new "Push" never blocks forever
* Instead of creating more and more blocking goroutines, return an error
is more friendly to the server and to the end user.
There are more details in code comments: eg: the "Flush" problem, the
strange "code.index" hanging problem, the "immediate" queue problem.
Almost ready for review.
TODO:
* [x] add some necessary comments during review
* [x] add some more tests if necessary
* [x] update documents and config options
* [x] test max worker / active worker
* [x] re-run the CI tasks to see whether any test is flaky
* [x] improve the `handleOldLengthConfiguration` to provide more
friendly messages
* [x] fine tune default config values (eg: length?)
## Code coverage:
![image](https://user-images.githubusercontent.com/2114189/236620635-55576955-f95d-4810-b12f-879026a3afdf.png)
2023-05-08 19:49:59 +08:00
|
|
|
testlogger.WriterCloser.Reset()
|
2019-04-11 19:49:49 +08:00
|
|
|
|
2020-08-12 04:05:34 +08:00
|
|
|
if err = util.RemoveAll(setting.Indexer.IssuePath); err != nil {
|
|
|
|
fmt.Printf("util.RemoveAll: %v\n", err)
|
2017-09-17 04:16:21 +08:00
|
|
|
os.Exit(1)
|
|
|
|
}
|
2020-08-12 04:05:34 +08:00
|
|
|
if err = util.RemoveAll(setting.Indexer.RepoPath); err != nil {
|
2017-10-27 14:10:54 +08:00
|
|
|
fmt.Printf("Unable to remove repo indexer: %v\n", err)
|
|
|
|
os.Exit(1)
|
|
|
|
}
|
2017-09-17 04:16:21 +08:00
|
|
|
|
|
|
|
os.Exit(exitCode)
|
2017-04-25 15:24:51 +08:00
|
|
|
}
|
|
|
|
|
2017-05-02 08:49:55 +08:00
|
|
|
type TestSession struct {
|
|
|
|
jar http.CookieJar
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *TestSession) GetCookie(name string) *http.Cookie {
|
|
|
|
baseURL, err := url.Parse(setting.AppURL)
|
|
|
|
if err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, c := range s.jar.Cookies(baseURL) {
|
|
|
|
if c.Name == name {
|
|
|
|
return c
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-12-04 06:46:01 +08:00
|
|
|
func (s *TestSession) MakeRequest(t testing.TB, req *http.Request, expectedStatus int) *httptest.ResponseRecorder {
|
2019-07-29 12:15:18 +08:00
|
|
|
t.Helper()
|
2017-05-02 08:49:55 +08:00
|
|
|
baseURL, err := url.Parse(setting.AppURL)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
for _, c := range s.jar.Cookies(baseURL) {
|
|
|
|
req.AddCookie(c)
|
|
|
|
}
|
2017-07-08 03:36:47 +08:00
|
|
|
resp := MakeRequest(t, req, expectedStatus)
|
2017-05-02 08:49:55 +08:00
|
|
|
|
|
|
|
ch := http.Header{}
|
2019-06-13 03:41:28 +08:00
|
|
|
ch.Add("Cookie", strings.Join(resp.Header()["Set-Cookie"], ";"))
|
2017-05-02 08:49:55 +08:00
|
|
|
cr := http.Request{Header: ch}
|
|
|
|
s.jar.SetCookies(baseURL, cr.Cookies())
|
|
|
|
|
|
|
|
return resp
|
|
|
|
}
|
|
|
|
|
2019-02-12 23:09:43 +08:00
|
|
|
func (s *TestSession) MakeRequestNilResponseRecorder(t testing.TB, req *http.Request, expectedStatus int) *NilResponseRecorder {
|
2019-07-29 12:15:18 +08:00
|
|
|
t.Helper()
|
2019-02-12 23:09:43 +08:00
|
|
|
baseURL, err := url.Parse(setting.AppURL)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
for _, c := range s.jar.Cookies(baseURL) {
|
|
|
|
req.AddCookie(c)
|
|
|
|
}
|
|
|
|
resp := MakeRequestNilResponseRecorder(t, req, expectedStatus)
|
|
|
|
|
|
|
|
ch := http.Header{}
|
2019-06-13 03:41:28 +08:00
|
|
|
ch.Add("Cookie", strings.Join(resp.Header()["Set-Cookie"], ";"))
|
2019-02-12 23:09:43 +08:00
|
|
|
cr := http.Request{Header: ch}
|
|
|
|
s.jar.SetCookies(baseURL, cr.Cookies())
|
|
|
|
|
|
|
|
return resp
|
|
|
|
}
|
|
|
|
|
2021-04-07 00:44:02 +08:00
|
|
|
func (s *TestSession) MakeRequestNilResponseHashSumRecorder(t testing.TB, req *http.Request, expectedStatus int) *NilResponseHashSumRecorder {
|
|
|
|
t.Helper()
|
|
|
|
baseURL, err := url.Parse(setting.AppURL)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
for _, c := range s.jar.Cookies(baseURL) {
|
|
|
|
req.AddCookie(c)
|
|
|
|
}
|
|
|
|
resp := MakeRequestNilResponseHashSumRecorder(t, req, expectedStatus)
|
|
|
|
|
|
|
|
ch := http.Header{}
|
|
|
|
ch.Add("Cookie", strings.Join(resp.Header()["Set-Cookie"], ";"))
|
|
|
|
cr := http.Request{Header: ch}
|
|
|
|
s.jar.SetCookies(baseURL, cr.Cookies())
|
|
|
|
|
|
|
|
return resp
|
|
|
|
}
|
|
|
|
|
2017-06-17 12:49:45 +08:00
|
|
|
const userPassword = "password"
|
|
|
|
|
2017-08-23 15:30:33 +08:00
|
|
|
func emptyTestSession(t testing.TB) *TestSession {
|
2019-07-29 12:15:18 +08:00
|
|
|
t.Helper()
|
2017-08-23 15:30:33 +08:00
|
|
|
jar, err := cookiejar.New(nil)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
return &TestSession{jar: jar}
|
|
|
|
}
|
|
|
|
|
2023-01-18 05:46:03 +08:00
|
|
|
func getUserToken(t testing.TB, userName string, scope ...auth.AccessTokenScope) string {
|
|
|
|
return getTokenForLoggedInUser(t, loginUser(t, userName), scope...)
|
2022-04-08 12:22:10 +08:00
|
|
|
}
|
|
|
|
|
2017-06-17 23:01:03 +08:00
|
|
|
func loginUser(t testing.TB, userName string) *TestSession {
|
2019-07-29 12:15:18 +08:00
|
|
|
t.Helper()
|
2022-12-22 21:09:35 +08:00
|
|
|
|
|
|
|
return loginUserWithPassword(t, userName, userPassword)
|
2017-06-17 12:49:45 +08:00
|
|
|
}
|
|
|
|
|
2017-06-17 23:01:03 +08:00
|
|
|
func loginUserWithPassword(t testing.TB, userName, password string) *TestSession {
|
2019-07-29 12:15:18 +08:00
|
|
|
t.Helper()
|
2017-06-10 08:41:36 +08:00
|
|
|
req := NewRequest(t, "GET", "/user/login")
|
2017-07-08 03:36:47 +08:00
|
|
|
resp := MakeRequest(t, req, http.StatusOK)
|
2017-05-02 08:49:55 +08:00
|
|
|
|
2017-06-18 00:29:59 +08:00
|
|
|
doc := NewHTMLParser(t, resp.Body)
|
2017-06-17 12:49:45 +08:00
|
|
|
req = NewRequestWithValues(t, "POST", "/user/login", map[string]string{
|
|
|
|
"_csrf": doc.GetCSRF(),
|
|
|
|
"user_name": userName,
|
|
|
|
"password": password,
|
|
|
|
})
|
2022-03-23 12:54:07 +08:00
|
|
|
resp = MakeRequest(t, req, http.StatusSeeOther)
|
2017-05-02 08:49:55 +08:00
|
|
|
|
|
|
|
ch := http.Header{}
|
2019-06-13 03:41:28 +08:00
|
|
|
ch.Add("Cookie", strings.Join(resp.Header()["Set-Cookie"], ";"))
|
2017-05-02 08:49:55 +08:00
|
|
|
cr := http.Request{Header: ch}
|
|
|
|
|
2017-08-23 15:30:33 +08:00
|
|
|
session := emptyTestSession(t)
|
|
|
|
|
2017-05-02 08:49:55 +08:00
|
|
|
baseURL, err := url.Parse(setting.AppURL)
|
|
|
|
assert.NoError(t, err)
|
2017-08-23 15:30:33 +08:00
|
|
|
session.jar.SetCookies(baseURL, cr.Cookies())
|
2017-05-02 08:49:55 +08:00
|
|
|
|
2017-08-23 15:30:33 +08:00
|
|
|
return session
|
2017-05-02 08:49:55 +08:00
|
|
|
}
|
|
|
|
|
2022-01-21 01:46:10 +08:00
|
|
|
// token has to be unique this counter take care of
|
2020-04-14 03:02:48 +08:00
|
|
|
var tokenCounter int64
|
|
|
|
|
2023-01-18 05:46:03 +08:00
|
|
|
// getTokenForLoggedInUser returns a token for a logged in user.
|
|
|
|
// The scope is an optional list of snake_case strings like the frontend form fields,
|
|
|
|
// but without the "scope_" prefix.
|
|
|
|
func getTokenForLoggedInUser(t testing.TB, session *TestSession, scopes ...auth.AccessTokenScope) string {
|
2019-07-29 12:15:18 +08:00
|
|
|
t.Helper()
|
2022-12-21 09:46:16 +08:00
|
|
|
var token string
|
2018-09-11 00:15:52 +08:00
|
|
|
req := NewRequest(t, "GET", "/user/settings/applications")
|
|
|
|
resp := session.MakeRequest(t, req, http.StatusOK)
|
2022-12-21 09:46:16 +08:00
|
|
|
var csrf string
|
|
|
|
for _, cookie := range resp.Result().Cookies() {
|
|
|
|
if cookie.Name != "_csrf" {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
csrf = cookie.Value
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if csrf == "" {
|
|
|
|
doc := NewHTMLParser(t, resp.Body)
|
|
|
|
csrf = doc.GetCSRF()
|
|
|
|
}
|
|
|
|
assert.NotEmpty(t, csrf)
|
2023-01-18 05:46:03 +08:00
|
|
|
urlValues := url.Values{}
|
|
|
|
urlValues.Add("_csrf", csrf)
|
|
|
|
urlValues.Add("name", fmt.Sprintf("api-testing-token-%d", atomic.AddInt64(&tokenCounter, 1)))
|
|
|
|
for _, scope := range scopes {
|
|
|
|
urlValues.Add("scope", string(scope))
|
|
|
|
}
|
|
|
|
req = NewRequestWithURLValues(t, "POST", "/user/settings/applications", urlValues)
|
2022-12-21 09:46:16 +08:00
|
|
|
resp = session.MakeRequest(t, req, http.StatusSeeOther)
|
|
|
|
|
|
|
|
// Log the flash values on failure
|
|
|
|
if !assert.Equal(t, resp.Result().Header["Location"], []string{"/user/settings/applications"}) {
|
|
|
|
for _, cookie := range resp.Result().Cookies() {
|
2023-04-14 03:45:33 +08:00
|
|
|
if cookie.Name != gitea_context.CookieNameFlash {
|
2022-12-21 09:46:16 +08:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
flash, _ := url.ParseQuery(cookie.Value)
|
|
|
|
for key, value := range flash {
|
|
|
|
t.Logf("Flash %q: %q", key, value)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-11 00:15:52 +08:00
|
|
|
req = NewRequest(t, "GET", "/user/settings/applications")
|
|
|
|
resp = session.MakeRequest(t, req, http.StatusOK)
|
|
|
|
htmlDoc := NewHTMLParser(t, resp.Body)
|
2022-12-21 09:46:16 +08:00
|
|
|
token = htmlDoc.doc.Find(".ui.info p").Text()
|
2022-10-21 02:20:01 +08:00
|
|
|
assert.NotEmpty(t, token)
|
2018-09-11 00:15:52 +08:00
|
|
|
return token
|
|
|
|
}
|
|
|
|
|
2017-06-17 23:01:03 +08:00
|
|
|
func NewRequest(t testing.TB, method, urlStr string) *http.Request {
|
2019-07-29 12:15:18 +08:00
|
|
|
t.Helper()
|
2017-06-17 12:49:45 +08:00
|
|
|
return NewRequestWithBody(t, method, urlStr, nil)
|
|
|
|
}
|
|
|
|
|
2017-06-25 08:15:42 +08:00
|
|
|
func NewRequestf(t testing.TB, method, urlFormat string, args ...interface{}) *http.Request {
|
2019-07-29 12:15:18 +08:00
|
|
|
t.Helper()
|
2017-06-25 08:15:42 +08:00
|
|
|
return NewRequest(t, method, fmt.Sprintf(urlFormat, args...))
|
|
|
|
}
|
|
|
|
|
2017-06-17 23:01:03 +08:00
|
|
|
func NewRequestWithValues(t testing.TB, method, urlStr string, values map[string]string) *http.Request {
|
2019-07-29 12:15:18 +08:00
|
|
|
t.Helper()
|
2017-06-17 12:49:45 +08:00
|
|
|
urlValues := url.Values{}
|
|
|
|
for key, value := range values {
|
|
|
|
urlValues[key] = []string{value}
|
|
|
|
}
|
2023-01-18 05:46:03 +08:00
|
|
|
return NewRequestWithURLValues(t, method, urlStr, urlValues)
|
|
|
|
}
|
|
|
|
|
|
|
|
func NewRequestWithURLValues(t testing.TB, method, urlStr string, urlValues url.Values) *http.Request {
|
|
|
|
t.Helper()
|
2017-06-25 08:15:42 +08:00
|
|
|
req := NewRequestWithBody(t, method, urlStr, bytes.NewBufferString(urlValues.Encode()))
|
|
|
|
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
|
|
|
|
return req
|
2017-06-17 12:49:45 +08:00
|
|
|
}
|
|
|
|
|
2017-06-17 23:01:03 +08:00
|
|
|
func NewRequestWithJSON(t testing.TB, method, urlStr string, v interface{}) *http.Request {
|
2019-07-29 12:15:18 +08:00
|
|
|
t.Helper()
|
2021-03-02 05:08:10 +08:00
|
|
|
|
2017-06-17 12:49:45 +08:00
|
|
|
jsonBytes, err := json.Marshal(v)
|
|
|
|
assert.NoError(t, err)
|
2017-06-25 07:52:51 +08:00
|
|
|
req := NewRequestWithBody(t, method, urlStr, bytes.NewBuffer(jsonBytes))
|
|
|
|
req.Header.Add("Content-Type", "application/json")
|
|
|
|
return req
|
2017-06-10 08:41:36 +08:00
|
|
|
}
|
|
|
|
|
2017-06-17 23:01:03 +08:00
|
|
|
func NewRequestWithBody(t testing.TB, method, urlStr string, body io.Reader) *http.Request {
|
2019-07-29 12:15:18 +08:00
|
|
|
t.Helper()
|
2021-01-26 23:36:53 +08:00
|
|
|
if !strings.HasPrefix(urlStr, "http") && !strings.HasPrefix(urlStr, "/") {
|
|
|
|
urlStr = "/" + urlStr
|
|
|
|
}
|
2017-06-17 12:49:45 +08:00
|
|
|
request, err := http.NewRequest(method, urlStr, body)
|
2017-06-10 08:41:36 +08:00
|
|
|
assert.NoError(t, err)
|
2017-06-17 12:49:45 +08:00
|
|
|
request.RequestURI = urlStr
|
2017-06-10 08:41:36 +08:00
|
|
|
return request
|
|
|
|
}
|
|
|
|
|
2018-07-07 09:54:30 +08:00
|
|
|
func AddBasicAuthHeader(request *http.Request, username string) *http.Request {
|
|
|
|
request.SetBasicAuth(username, userPassword)
|
|
|
|
return request
|
|
|
|
}
|
|
|
|
|
2017-07-08 03:36:47 +08:00
|
|
|
const NoExpectedStatus = -1
|
|
|
|
|
2017-12-04 06:46:01 +08:00
|
|
|
func MakeRequest(t testing.TB, req *http.Request, expectedStatus int) *httptest.ResponseRecorder {
|
2019-07-29 12:15:18 +08:00
|
|
|
t.Helper()
|
2017-12-04 06:46:01 +08:00
|
|
|
recorder := httptest.NewRecorder()
|
2023-04-19 21:40:42 +08:00
|
|
|
if req.RemoteAddr == "" {
|
|
|
|
req.RemoteAddr = "test-mock:12345"
|
|
|
|
}
|
2020-11-13 20:51:07 +08:00
|
|
|
c.ServeHTTP(recorder, req)
|
2017-07-08 03:36:47 +08:00
|
|
|
if expectedStatus != NoExpectedStatus {
|
2023-04-19 21:40:42 +08:00
|
|
|
if !assert.EqualValues(t, expectedStatus, recorder.Code, "Request: %s %s", req.Method, req.URL.String()) {
|
2017-12-11 10:15:27 +08:00
|
|
|
logUnexpectedResponse(t, recorder)
|
|
|
|
}
|
2017-07-08 03:36:47 +08:00
|
|
|
}
|
2017-12-04 06:46:01 +08:00
|
|
|
return recorder
|
2017-04-25 15:24:51 +08:00
|
|
|
}
|
2017-06-18 17:06:17 +08:00
|
|
|
|
2019-02-12 23:09:43 +08:00
|
|
|
func MakeRequestNilResponseRecorder(t testing.TB, req *http.Request, expectedStatus int) *NilResponseRecorder {
|
2019-07-29 12:15:18 +08:00
|
|
|
t.Helper()
|
2019-02-12 23:09:43 +08:00
|
|
|
recorder := NewNilResponseRecorder()
|
2020-11-13 20:51:07 +08:00
|
|
|
c.ServeHTTP(recorder, req)
|
2019-02-12 23:09:43 +08:00
|
|
|
if expectedStatus != NoExpectedStatus {
|
2021-04-07 00:44:02 +08:00
|
|
|
if !assert.EqualValues(t, expectedStatus, recorder.Code,
|
|
|
|
"Request: %s %s", req.Method, req.URL.String()) {
|
|
|
|
logUnexpectedResponse(t, &recorder.ResponseRecorder)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return recorder
|
|
|
|
}
|
|
|
|
|
|
|
|
func MakeRequestNilResponseHashSumRecorder(t testing.TB, req *http.Request, expectedStatus int) *NilResponseHashSumRecorder {
|
|
|
|
t.Helper()
|
|
|
|
recorder := NewNilResponseHashSumRecorder()
|
|
|
|
c.ServeHTTP(recorder, req)
|
|
|
|
if expectedStatus != NoExpectedStatus {
|
2019-02-12 23:09:43 +08:00
|
|
|
if !assert.EqualValues(t, expectedStatus, recorder.Code,
|
|
|
|
"Request: %s %s", req.Method, req.URL.String()) {
|
|
|
|
logUnexpectedResponse(t, &recorder.ResponseRecorder)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return recorder
|
|
|
|
}
|
|
|
|
|
2017-12-11 10:15:27 +08:00
|
|
|
// logUnexpectedResponse logs the contents of an unexpected response.
|
|
|
|
func logUnexpectedResponse(t testing.TB, recorder *httptest.ResponseRecorder) {
|
2019-07-29 12:15:18 +08:00
|
|
|
t.Helper()
|
2017-12-11 10:15:27 +08:00
|
|
|
respBytes := recorder.Body.Bytes()
|
|
|
|
if len(respBytes) == 0 {
|
|
|
|
return
|
|
|
|
} else if len(respBytes) < 500 {
|
|
|
|
// if body is short, just log the whole thing
|
2023-04-19 21:40:42 +08:00
|
|
|
t.Log("Response: ", string(respBytes))
|
2017-12-11 10:15:27 +08:00
|
|
|
return
|
2023-04-19 21:40:42 +08:00
|
|
|
} else {
|
|
|
|
t.Log("Response length: ", len(respBytes))
|
2017-12-11 10:15:27 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// log the "flash" error message, if one exists
|
|
|
|
// we must create a new buffer, so that we don't "use up" resp.Body
|
|
|
|
htmlDoc, err := goquery.NewDocumentFromReader(bytes.NewBuffer(respBytes))
|
|
|
|
if err != nil {
|
|
|
|
return // probably a non-HTML response
|
|
|
|
}
|
|
|
|
errMsg := htmlDoc.Find(".ui.negative.message").Text()
|
|
|
|
if len(errMsg) > 0 {
|
|
|
|
t.Log("A flash error message was found:", errMsg)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-04 06:46:01 +08:00
|
|
|
func DecodeJSON(t testing.TB, resp *httptest.ResponseRecorder, v interface{}) {
|
2019-07-29 12:15:18 +08:00
|
|
|
t.Helper()
|
2021-03-02 05:08:10 +08:00
|
|
|
|
2017-12-04 06:46:01 +08:00
|
|
|
decoder := json.NewDecoder(resp.Body)
|
2017-06-18 17:06:17 +08:00
|
|
|
assert.NoError(t, decoder.Decode(v))
|
|
|
|
}
|
2017-07-08 03:36:47 +08:00
|
|
|
|
2022-12-17 14:22:34 +08:00
|
|
|
func VerifyJSONSchema(t testing.TB, resp *httptest.ResponseRecorder, schemaFile string) {
|
|
|
|
t.Helper()
|
|
|
|
|
|
|
|
schemaFilePath := filepath.Join(filepath.Dir(setting.AppPath), "tests", "integration", "schemas", schemaFile)
|
|
|
|
_, schemaFileErr := os.Stat(schemaFilePath)
|
|
|
|
assert.Nil(t, schemaFileErr)
|
|
|
|
|
|
|
|
schema, schemaFileReadErr := os.ReadFile(schemaFilePath)
|
|
|
|
assert.Nil(t, schemaFileReadErr)
|
|
|
|
assert.True(t, len(schema) > 0)
|
|
|
|
|
|
|
|
nodeinfoSchema := gojsonschema.NewStringLoader(string(schema))
|
|
|
|
nodeinfoString := gojsonschema.NewStringLoader(resp.Body.String())
|
|
|
|
result, schemaValidationErr := gojsonschema.Validate(nodeinfoSchema, nodeinfoString)
|
|
|
|
assert.Nil(t, schemaValidationErr)
|
|
|
|
assert.Empty(t, result.Errors())
|
|
|
|
assert.True(t, result.Valid())
|
|
|
|
}
|
|
|
|
|
2017-07-08 03:36:47 +08:00
|
|
|
func GetCSRF(t testing.TB, session *TestSession, urlStr string) string {
|
2019-07-29 12:15:18 +08:00
|
|
|
t.Helper()
|
2017-07-08 03:36:47 +08:00
|
|
|
req := NewRequest(t, "GET", urlStr)
|
|
|
|
resp := session.MakeRequest(t, req, http.StatusOK)
|
|
|
|
doc := NewHTMLParser(t, resp.Body)
|
|
|
|
return doc.GetCSRF()
|
|
|
|
}
|