mirror of
				https://github.com/go-gitea/gitea.git
				synced 2025-10-26 12:27:06 +00:00 
			
		
		
		
	Enable unparam linter (#31277)
				
					
				
			Enable [unparam](https://github.com/mvdan/unparam) linter. Often I could not tell the intention why param is unused, so I put `//nolint` for those cases like webhook request creation functions never using `ctx`. --------- Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com> Co-authored-by: delvh <dev.lh@web.de>
This commit is contained in:
		| @@ -22,6 +22,7 @@ linters: | ||||
|     - typecheck | ||||
|     - unconvert | ||||
|     - unused | ||||
|     - unparam | ||||
|     - wastedassign | ||||
|  | ||||
| run: | ||||
|   | ||||
| @@ -215,16 +215,15 @@ func fileTimestampToTime(timestamp int64) time.Time { | ||||
| 	return time.UnixMicro(timestamp) | ||||
| } | ||||
|  | ||||
| func (f *file) loadMetaByPath() (*dbfsMeta, error) { | ||||
| func (f *file) loadMetaByPath() error { | ||||
| 	var fileMeta dbfsMeta | ||||
| 	if ok, err := db.GetEngine(f.ctx).Where("full_path = ?", f.fullPath).Get(&fileMeta); err != nil { | ||||
| 		return nil, err | ||||
| 		return err | ||||
| 	} else if ok { | ||||
| 		f.metaID = fileMeta.ID | ||||
| 		f.blockSize = fileMeta.BlockSize | ||||
| 		return &fileMeta, nil | ||||
| 	} | ||||
| 	return nil, nil | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (f *file) open(flag int) (err error) { | ||||
| @@ -288,10 +287,7 @@ func (f *file) createEmpty() error { | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	if _, err = f.loadMetaByPath(); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	return nil | ||||
| 	return f.loadMetaByPath() | ||||
| } | ||||
|  | ||||
| func (f *file) truncate() error { | ||||
| @@ -368,8 +364,5 @@ func buildPath(path string) string { | ||||
| func newDbFile(ctx context.Context, path string) (*file, error) { | ||||
| 	path = buildPath(path) | ||||
| 	f := &file{ctx: ctx, fullPath: path, blockSize: defaultFileBlockSize} | ||||
| 	if _, err := f.loadMetaByPath(); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return f, nil | ||||
| 	return f, f.loadMetaByPath() | ||||
| } | ||||
|   | ||||
| @@ -99,9 +99,9 @@ func applySorts(sess *xorm.Session, sortType string, priorityRepoID int64) { | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func applyLimit(sess *xorm.Session, opts *IssuesOptions) *xorm.Session { | ||||
| func applyLimit(sess *xorm.Session, opts *IssuesOptions) { | ||||
| 	if opts.Paginator == nil || opts.Paginator.IsListAll() { | ||||
| 		return sess | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	start := 0 | ||||
| @@ -109,11 +109,9 @@ func applyLimit(sess *xorm.Session, opts *IssuesOptions) *xorm.Session { | ||||
| 		start = (opts.Paginator.Page - 1) * opts.Paginator.PageSize | ||||
| 	} | ||||
| 	sess.Limit(opts.Paginator.PageSize, start) | ||||
|  | ||||
| 	return sess | ||||
| } | ||||
|  | ||||
| func applyLabelsCondition(sess *xorm.Session, opts *IssuesOptions) *xorm.Session { | ||||
| func applyLabelsCondition(sess *xorm.Session, opts *IssuesOptions) { | ||||
| 	if len(opts.LabelIDs) > 0 { | ||||
| 		if opts.LabelIDs[0] == 0 { | ||||
| 			sess.Where("issue.id NOT IN (SELECT issue_id FROM issue_label)") | ||||
| @@ -136,11 +134,9 @@ func applyLabelsCondition(sess *xorm.Session, opts *IssuesOptions) *xorm.Session | ||||
| 	if len(opts.ExcludedLabelNames) > 0 { | ||||
| 		sess.And(builder.NotIn("issue.id", BuildLabelNamesIssueIDsCondition(opts.ExcludedLabelNames))) | ||||
| 	} | ||||
|  | ||||
| 	return sess | ||||
| } | ||||
|  | ||||
| func applyMilestoneCondition(sess *xorm.Session, opts *IssuesOptions) *xorm.Session { | ||||
| func applyMilestoneCondition(sess *xorm.Session, opts *IssuesOptions) { | ||||
| 	if len(opts.MilestoneIDs) == 1 && opts.MilestoneIDs[0] == db.NoConditionID { | ||||
| 		sess.And("issue.milestone_id = 0") | ||||
| 	} else if len(opts.MilestoneIDs) > 0 { | ||||
| @@ -153,11 +149,9 @@ func applyMilestoneCondition(sess *xorm.Session, opts *IssuesOptions) *xorm.Sess | ||||
| 				From("milestone"). | ||||
| 				Where(builder.In("name", opts.IncludeMilestones))) | ||||
| 	} | ||||
|  | ||||
| 	return sess | ||||
| } | ||||
|  | ||||
| func applyProjectCondition(sess *xorm.Session, opts *IssuesOptions) *xorm.Session { | ||||
| func applyProjectCondition(sess *xorm.Session, opts *IssuesOptions) { | ||||
| 	if opts.ProjectID > 0 { // specific project | ||||
| 		sess.Join("INNER", "project_issue", "issue.id = project_issue.issue_id"). | ||||
| 			And("project_issue.project_id=?", opts.ProjectID) | ||||
| @@ -166,10 +160,9 @@ func applyProjectCondition(sess *xorm.Session, opts *IssuesOptions) *xorm.Sessio | ||||
| 	} | ||||
| 	// opts.ProjectID == 0 means all projects, | ||||
| 	// do not need to apply any condition | ||||
| 	return sess | ||||
| } | ||||
|  | ||||
| func applyProjectColumnCondition(sess *xorm.Session, opts *IssuesOptions) *xorm.Session { | ||||
| func applyProjectColumnCondition(sess *xorm.Session, opts *IssuesOptions) { | ||||
| 	// opts.ProjectColumnID == 0 means all project columns, | ||||
| 	// do not need to apply any condition | ||||
| 	if opts.ProjectColumnID > 0 { | ||||
| @@ -177,10 +170,9 @@ func applyProjectColumnCondition(sess *xorm.Session, opts *IssuesOptions) *xorm. | ||||
| 	} else if opts.ProjectColumnID == db.NoConditionID { | ||||
| 		sess.In("issue.id", builder.Select("issue_id").From("project_issue").Where(builder.Eq{"project_board_id": 0})) | ||||
| 	} | ||||
| 	return sess | ||||
| } | ||||
|  | ||||
| func applyRepoConditions(sess *xorm.Session, opts *IssuesOptions) *xorm.Session { | ||||
| func applyRepoConditions(sess *xorm.Session, opts *IssuesOptions) { | ||||
| 	if len(opts.RepoIDs) == 1 { | ||||
| 		opts.RepoCond = builder.Eq{"issue.repo_id": opts.RepoIDs[0]} | ||||
| 	} else if len(opts.RepoIDs) > 1 { | ||||
| @@ -195,10 +187,9 @@ func applyRepoConditions(sess *xorm.Session, opts *IssuesOptions) *xorm.Session | ||||
| 	if opts.RepoCond != nil { | ||||
| 		sess.And(opts.RepoCond) | ||||
| 	} | ||||
| 	return sess | ||||
| } | ||||
|  | ||||
| func applyConditions(sess *xorm.Session, opts *IssuesOptions) *xorm.Session { | ||||
| func applyConditions(sess *xorm.Session, opts *IssuesOptions) { | ||||
| 	if len(opts.IssueIDs) > 0 { | ||||
| 		sess.In("issue.id", opts.IssueIDs) | ||||
| 	} | ||||
| @@ -261,8 +252,6 @@ func applyConditions(sess *xorm.Session, opts *IssuesOptions) *xorm.Session { | ||||
| 	if opts.User != nil { | ||||
| 		sess.And(issuePullAccessibleRepoCond("issue.repo_id", opts.User.ID, opts.Org, opts.Team, opts.IsPull.Value())) | ||||
| 	} | ||||
|  | ||||
| 	return sess | ||||
| } | ||||
|  | ||||
| // teamUnitsRepoCond returns query condition for those repo id in the special org team with special units access | ||||
| @@ -339,22 +328,22 @@ func issuePullAccessibleRepoCond(repoIDstr string, userID int64, org *organizati | ||||
| 	return cond | ||||
| } | ||||
|  | ||||
| func applyAssigneeCondition(sess *xorm.Session, assigneeID int64) *xorm.Session { | ||||
| 	return sess.Join("INNER", "issue_assignees", "issue.id = issue_assignees.issue_id"). | ||||
| func applyAssigneeCondition(sess *xorm.Session, assigneeID int64) { | ||||
| 	sess.Join("INNER", "issue_assignees", "issue.id = issue_assignees.issue_id"). | ||||
| 		And("issue_assignees.assignee_id = ?", assigneeID) | ||||
| } | ||||
|  | ||||
| func applyPosterCondition(sess *xorm.Session, posterID int64) *xorm.Session { | ||||
| 	return sess.And("issue.poster_id=?", posterID) | ||||
| func applyPosterCondition(sess *xorm.Session, posterID int64) { | ||||
| 	sess.And("issue.poster_id=?", posterID) | ||||
| } | ||||
|  | ||||
| func applyMentionedCondition(sess *xorm.Session, mentionedID int64) *xorm.Session { | ||||
| 	return sess.Join("INNER", "issue_user", "issue.id = issue_user.issue_id"). | ||||
| func applyMentionedCondition(sess *xorm.Session, mentionedID int64) { | ||||
| 	sess.Join("INNER", "issue_user", "issue.id = issue_user.issue_id"). | ||||
| 		And("issue_user.is_mentioned = ?", true). | ||||
| 		And("issue_user.uid = ?", mentionedID) | ||||
| } | ||||
|  | ||||
| func applyReviewRequestedCondition(sess *xorm.Session, reviewRequestedID int64) *xorm.Session { | ||||
| func applyReviewRequestedCondition(sess *xorm.Session, reviewRequestedID int64) { | ||||
| 	existInTeamQuery := builder.Select("team_user.team_id"). | ||||
| 		From("team_user"). | ||||
| 		Where(builder.Eq{"team_user.uid": reviewRequestedID}) | ||||
| @@ -375,11 +364,11 @@ func applyReviewRequestedCondition(sess *xorm.Session, reviewRequestedID int64) | ||||
| 			), | ||||
| 			builder.In("review.id", maxReview), | ||||
| 		)) | ||||
| 	return sess.Where("issue.poster_id <> ?", reviewRequestedID). | ||||
| 	sess.Where("issue.poster_id <> ?", reviewRequestedID). | ||||
| 		And(builder.In("issue.id", subQuery)) | ||||
| } | ||||
|  | ||||
| func applyReviewedCondition(sess *xorm.Session, reviewedID int64) *xorm.Session { | ||||
| func applyReviewedCondition(sess *xorm.Session, reviewedID int64) { | ||||
| 	// Query for pull requests where you are a reviewer or commenter, excluding | ||||
| 	// any pull requests already returned by the review requested filter. | ||||
| 	notPoster := builder.Neq{"issue.poster_id": reviewedID} | ||||
| @@ -406,11 +395,11 @@ func applyReviewedCondition(sess *xorm.Session, reviewedID int64) *xorm.Session | ||||
| 			builder.In("type", CommentTypeComment, CommentTypeCode, CommentTypeReview), | ||||
| 		)), | ||||
| 	) | ||||
| 	return sess.And(notPoster, builder.Or(reviewed, commented)) | ||||
| 	sess.And(notPoster, builder.Or(reviewed, commented)) | ||||
| } | ||||
|  | ||||
| func applySubscribedCondition(sess *xorm.Session, subscriberID int64) *xorm.Session { | ||||
| 	return sess.And( | ||||
| func applySubscribedCondition(sess *xorm.Session, subscriberID int64) { | ||||
| 	sess.And( | ||||
| 		builder. | ||||
| 			NotIn("issue.id", | ||||
| 				builder.Select("issue_id"). | ||||
|   | ||||
| @@ -28,7 +28,7 @@ type PullRequestsOptions struct { | ||||
| 	MilestoneID int64 | ||||
| } | ||||
|  | ||||
| func listPullRequestStatement(ctx context.Context, baseRepoID int64, opts *PullRequestsOptions) (*xorm.Session, error) { | ||||
| func listPullRequestStatement(ctx context.Context, baseRepoID int64, opts *PullRequestsOptions) *xorm.Session { | ||||
| 	sess := db.GetEngine(ctx).Where("pull_request.base_repo_id=?", baseRepoID) | ||||
|  | ||||
| 	sess.Join("INNER", "issue", "pull_request.issue_id = issue.id") | ||||
| @@ -46,7 +46,7 @@ func listPullRequestStatement(ctx context.Context, baseRepoID int64, opts *PullR | ||||
| 		sess.And("issue.milestone_id=?", opts.MilestoneID) | ||||
| 	} | ||||
|  | ||||
| 	return sess, nil | ||||
| 	return sess | ||||
| } | ||||
|  | ||||
| // GetUnmergedPullRequestsByHeadInfo returns all pull requests that are open and has not been merged | ||||
| @@ -130,23 +130,15 @@ func PullRequests(ctx context.Context, baseRepoID int64, opts *PullRequestsOptio | ||||
| 		opts.Page = 1 | ||||
| 	} | ||||
|  | ||||
| 	countSession, err := listPullRequestStatement(ctx, baseRepoID, opts) | ||||
| 	if err != nil { | ||||
| 		log.Error("listPullRequestStatement: %v", err) | ||||
| 		return nil, 0, err | ||||
| 	} | ||||
| 	countSession := listPullRequestStatement(ctx, baseRepoID, opts) | ||||
| 	maxResults, err := countSession.Count(new(PullRequest)) | ||||
| 	if err != nil { | ||||
| 		log.Error("Count PRs: %v", err) | ||||
| 		return nil, maxResults, err | ||||
| 	} | ||||
|  | ||||
| 	findSession, err := listPullRequestStatement(ctx, baseRepoID, opts) | ||||
| 	findSession := listPullRequestStatement(ctx, baseRepoID, opts) | ||||
| 	applySorts(findSession, opts.SortType, 0) | ||||
| 	if err != nil { | ||||
| 		log.Error("listPullRequestStatement: %v", err) | ||||
| 		return nil, maxResults, err | ||||
| 	} | ||||
| 	findSession = db.SetSessionPagination(findSession, opts) | ||||
| 	prs := make([]*PullRequest, 0, opts.PageSize) | ||||
| 	return prs, maxResults, findSession.Find(&prs) | ||||
|   | ||||
| @@ -18,7 +18,7 @@ func parseIntParam(value, param, algorithmName, config string, previousErr error | ||||
| 	return parsed, previousErr // <- Keep the previous error as this function should still return an error once everything has been checked if any call failed | ||||
| } | ||||
|  | ||||
| func parseUIntParam(value, param, algorithmName, config string, previousErr error) (uint64, error) { | ||||
| func parseUIntParam(value, param, algorithmName, config string, previousErr error) (uint64, error) { //nolint:unparam | ||||
| 	parsed, err := strconv.ParseUint(value, 10, 64) | ||||
| 	if err != nil { | ||||
| 		log.Error("invalid integer for %s representation in %s hash spec %s", param, algorithmName, config) | ||||
|   | ||||
| @@ -185,8 +185,6 @@ func ParseDescription(r io.Reader) (*Package, error) { | ||||
| } | ||||
|  | ||||
| func setField(p *Package, data string) error { | ||||
| 	const listDelimiter = ", " | ||||
|  | ||||
| 	if data == "" { | ||||
| 		return nil | ||||
| 	} | ||||
| @@ -215,19 +213,19 @@ func setField(p *Package, data string) error { | ||||
| 	case "Description": | ||||
| 		p.Metadata.Description = value | ||||
| 	case "URL": | ||||
| 		p.Metadata.ProjectURL = splitAndTrim(value, listDelimiter) | ||||
| 		p.Metadata.ProjectURL = splitAndTrim(value) | ||||
| 	case "License": | ||||
| 		p.Metadata.License = value | ||||
| 	case "Author": | ||||
| 		p.Metadata.Authors = splitAndTrim(authorReplacePattern.ReplaceAllString(value, ""), listDelimiter) | ||||
| 		p.Metadata.Authors = splitAndTrim(authorReplacePattern.ReplaceAllString(value, "")) | ||||
| 	case "Depends": | ||||
| 		p.Metadata.Depends = splitAndTrim(value, listDelimiter) | ||||
| 		p.Metadata.Depends = splitAndTrim(value) | ||||
| 	case "Imports": | ||||
| 		p.Metadata.Imports = splitAndTrim(value, listDelimiter) | ||||
| 		p.Metadata.Imports = splitAndTrim(value) | ||||
| 	case "Suggests": | ||||
| 		p.Metadata.Suggests = splitAndTrim(value, listDelimiter) | ||||
| 		p.Metadata.Suggests = splitAndTrim(value) | ||||
| 	case "LinkingTo": | ||||
| 		p.Metadata.LinkingTo = splitAndTrim(value, listDelimiter) | ||||
| 		p.Metadata.LinkingTo = splitAndTrim(value) | ||||
| 	case "NeedsCompilation": | ||||
| 		p.Metadata.NeedsCompilation = value == "yes" | ||||
| 	} | ||||
| @@ -235,8 +233,8 @@ func setField(p *Package, data string) error { | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func splitAndTrim(s, sep string) []string { | ||||
| 	items := strings.Split(s, sep) | ||||
| func splitAndTrim(s string) []string { | ||||
| 	items := strings.Split(s, ", ") | ||||
| 	for i := range items { | ||||
| 		items[i] = strings.TrimSpace(items[i]) | ||||
| 	} | ||||
|   | ||||
| @@ -97,7 +97,7 @@ func decodeEnvSectionKey(encoded string) (ok bool, section, key string) { | ||||
|  | ||||
| // decodeEnvironmentKey decode the environment key to section and key | ||||
| // The environment key is in the form of GITEA__SECTION__KEY or GITEA__SECTION__KEY__FILE | ||||
| func decodeEnvironmentKey(prefixGitea, suffixFile, envKey string) (ok bool, section, key string, useFileValue bool) { | ||||
| func decodeEnvironmentKey(prefixGitea, suffixFile, envKey string) (ok bool, section, key string, useFileValue bool) { //nolint:unparam | ||||
| 	if !strings.HasPrefix(envKey, prefixGitea) { | ||||
| 		return false, "", "", false | ||||
| 	} | ||||
|   | ||||
| @@ -161,7 +161,7 @@ const ( | ||||
| 	targetSecIsSec                                  // target section is from the name seciont [name] | ||||
| ) | ||||
|  | ||||
| func getStorageSectionByType(rootCfg ConfigProvider, typ string) (ConfigSection, targetSecType, error) { | ||||
| func getStorageSectionByType(rootCfg ConfigProvider, typ string) (ConfigSection, targetSecType, error) { //nolint:unparam | ||||
| 	targetSec, err := rootCfg.GetSection(storageSectionName + "." + typ) | ||||
| 	if err != nil { | ||||
| 		if !IsValidStorageType(StorageType(typ)) { | ||||
|   | ||||
| @@ -163,10 +163,7 @@ func (a *AzureBlobStorage) getObjectNameFromPath(path string) string { | ||||
|  | ||||
| // Open opens a file | ||||
| func (a *AzureBlobStorage) Open(path string) (Object, error) { | ||||
| 	blobClient, err := a.getBlobClient(path) | ||||
| 	if err != nil { | ||||
| 		return nil, convertAzureBlobErr(err) | ||||
| 	} | ||||
| 	blobClient := a.getBlobClient(path) | ||||
| 	res, err := blobClient.GetProperties(a.ctx, &blob.GetPropertiesOptions{}) | ||||
| 	if err != nil { | ||||
| 		return nil, convertAzureBlobErr(err) | ||||
| @@ -229,10 +226,7 @@ func (a azureBlobFileInfo) Sys() any { | ||||
|  | ||||
| // Stat returns the stat information of the object | ||||
| func (a *AzureBlobStorage) Stat(path string) (os.FileInfo, error) { | ||||
| 	blobClient, err := a.getBlobClient(path) | ||||
| 	if err != nil { | ||||
| 		return nil, convertAzureBlobErr(err) | ||||
| 	} | ||||
| 	blobClient := a.getBlobClient(path) | ||||
| 	res, err := blobClient.GetProperties(a.ctx, &blob.GetPropertiesOptions{}) | ||||
| 	if err != nil { | ||||
| 		return nil, convertAzureBlobErr(err) | ||||
| @@ -247,20 +241,14 @@ func (a *AzureBlobStorage) Stat(path string) (os.FileInfo, error) { | ||||
|  | ||||
| // Delete delete a file | ||||
| func (a *AzureBlobStorage) Delete(path string) error { | ||||
| 	blobClient, err := a.getBlobClient(path) | ||||
| 	if err != nil { | ||||
| 		return convertAzureBlobErr(err) | ||||
| 	} | ||||
| 	_, err = blobClient.Delete(a.ctx, nil) | ||||
| 	blobClient := a.getBlobClient(path) | ||||
| 	_, err := blobClient.Delete(a.ctx, nil) | ||||
| 	return convertAzureBlobErr(err) | ||||
| } | ||||
|  | ||||
| // URL gets the redirect URL to a file. The presigned link is valid for 5 minutes. | ||||
| func (a *AzureBlobStorage) URL(path, name string) (*url.URL, error) { | ||||
| 	blobClient, err := a.getBlobClient(path) | ||||
| 	if err != nil { | ||||
| 		return nil, convertAzureBlobErr(err) | ||||
| 	} | ||||
| 	blobClient := a.getBlobClient(path) | ||||
|  | ||||
| 	startTime := time.Now() | ||||
| 	u, err := blobClient.GetSASURL(sas.BlobPermissions{ | ||||
| @@ -290,10 +278,7 @@ func (a *AzureBlobStorage) IterateObjects(dirName string, fn func(path string, o | ||||
| 			return convertAzureBlobErr(err) | ||||
| 		} | ||||
| 		for _, object := range resp.Segment.BlobItems { | ||||
| 			blobClient, err := a.getBlobClient(*object.Name) | ||||
| 			if err != nil { | ||||
| 				return convertAzureBlobErr(err) | ||||
| 			} | ||||
| 			blobClient := a.getBlobClient(*object.Name) | ||||
| 			object := &azureBlobObject{ | ||||
| 				Context:    a.ctx, | ||||
| 				blobClient: blobClient, | ||||
| @@ -313,8 +298,8 @@ func (a *AzureBlobStorage) IterateObjects(dirName string, fn func(path string, o | ||||
| } | ||||
|  | ||||
| // Delete delete a file | ||||
| func (a *AzureBlobStorage) getBlobClient(path string) (*blob.Client, error) { | ||||
| 	return a.client.ServiceClient().NewContainerClient(a.cfg.Container).NewBlobClient(a.buildAzureBlobPath(path)), nil | ||||
| func (a *AzureBlobStorage) getBlobClient(path string) *blob.Client { | ||||
| 	return a.client.ServiceClient().NewContainerClient(a.cfg.Container).NewBlobClient(a.buildAzureBlobPath(path)) | ||||
| } | ||||
|  | ||||
| func init() { | ||||
|   | ||||
| @@ -15,10 +15,7 @@ import ( | ||||
| // GenerateKeyPair generates a public and private keypair | ||||
| func GenerateKeyPair(bits int) (string, string, error) { | ||||
| 	priv, _ := rsa.GenerateKey(rand.Reader, bits) | ||||
| 	privPem, err := pemBlockForPriv(priv) | ||||
| 	if err != nil { | ||||
| 		return "", "", err | ||||
| 	} | ||||
| 	privPem := pemBlockForPriv(priv) | ||||
| 	pubPem, err := pemBlockForPub(&priv.PublicKey) | ||||
| 	if err != nil { | ||||
| 		return "", "", err | ||||
| @@ -26,12 +23,12 @@ func GenerateKeyPair(bits int) (string, string, error) { | ||||
| 	return privPem, pubPem, nil | ||||
| } | ||||
|  | ||||
| func pemBlockForPriv(priv *rsa.PrivateKey) (string, error) { | ||||
| func pemBlockForPriv(priv *rsa.PrivateKey) string { | ||||
| 	privBytes := pem.EncodeToMemory(&pem.Block{ | ||||
| 		Type:  "RSA PRIVATE KEY", | ||||
| 		Bytes: x509.MarshalPKCS1PrivateKey(priv), | ||||
| 	}) | ||||
| 	return string(privBytes), nil | ||||
| 	return string(privBytes) | ||||
| } | ||||
|  | ||||
| func pemBlockForPub(pub *rsa.PublicKey) (string, error) { | ||||
|   | ||||
| @@ -242,16 +242,12 @@ func (ar artifactRoutes) uploadArtifact(ctx *ArtifactContext) { | ||||
| 	} | ||||
|  | ||||
| 	// get upload file size | ||||
| 	fileRealTotalSize, contentLength, err := getUploadFileSize(ctx) | ||||
| 	if err != nil { | ||||
| 		log.Error("Error get upload file size: %v", err) | ||||
| 		ctx.Error(http.StatusInternalServerError, "Error get upload file size") | ||||
| 		return | ||||
| 	} | ||||
| 	fileRealTotalSize, contentLength := getUploadFileSize(ctx) | ||||
|  | ||||
| 	// get artifact retention days | ||||
| 	expiredDays := setting.Actions.ArtifactRetentionDays | ||||
| 	if queryRetentionDays := ctx.Req.URL.Query().Get("retentionDays"); queryRetentionDays != "" { | ||||
| 		var err error | ||||
| 		expiredDays, err = strconv.ParseInt(queryRetentionDays, 10, 64) | ||||
| 		if err != nil { | ||||
| 			log.Error("Error parse retention days: %v", err) | ||||
|   | ||||
| @@ -43,7 +43,7 @@ func validateRunID(ctx *ArtifactContext) (*actions.ActionTask, int64, bool) { | ||||
| 	return task, runID, true | ||||
| } | ||||
|  | ||||
| func validateRunIDV4(ctx *ArtifactContext, rawRunID string) (*actions.ActionTask, int64, bool) { | ||||
| func validateRunIDV4(ctx *ArtifactContext, rawRunID string) (*actions.ActionTask, int64, bool) { //nolint:unparam | ||||
| 	task := ctx.ActionTask | ||||
| 	runID, err := strconv.ParseInt(rawRunID, 10, 64) | ||||
| 	if err != nil || task.Job.RunID != runID { | ||||
| @@ -84,11 +84,11 @@ func parseArtifactItemPath(ctx *ArtifactContext) (string, string, bool) { | ||||
|  | ||||
| // getUploadFileSize returns the size of the file to be uploaded. | ||||
| // The raw size is the size of the file as reported by the header X-TFS-FileLength. | ||||
| func getUploadFileSize(ctx *ArtifactContext) (int64, int64, error) { | ||||
| func getUploadFileSize(ctx *ArtifactContext) (int64, int64) { | ||||
| 	contentLength := ctx.Req.ContentLength | ||||
| 	xTfsLength, _ := strconv.ParseInt(ctx.Req.Header.Get(artifactXTfsFileLengthHeader), 10, 64) | ||||
| 	if xTfsLength > 0 { | ||||
| 		return xTfsLength, contentLength, nil | ||||
| 		return xTfsLength, contentLength | ||||
| 	} | ||||
| 	return contentLength, contentLength, nil | ||||
| 	return contentLength, contentLength | ||||
| } | ||||
|   | ||||
| @@ -26,7 +26,7 @@ var uploadVersionMutex sync.Mutex | ||||
|  | ||||
| // saveAsPackageBlob creates a package blob from an upload | ||||
| // The uploaded blob gets stored in a special upload version to link them to the package/image | ||||
| func saveAsPackageBlob(ctx context.Context, hsr packages_module.HashedSizeReader, pci *packages_service.PackageCreationInfo) (*packages_model.PackageBlob, error) { | ||||
| func saveAsPackageBlob(ctx context.Context, hsr packages_module.HashedSizeReader, pci *packages_service.PackageCreationInfo) (*packages_model.PackageBlob, error) { //nolint:unparam | ||||
| 	pb := packages_service.NewPackageBlob(hsr) | ||||
|  | ||||
| 	exists := false | ||||
|   | ||||
| @@ -36,7 +36,7 @@ func apiError(ctx *context.Context, status int, obj any) { | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func xmlResponse(ctx *context.Context, status int, obj any) { | ||||
| func xmlResponse(ctx *context.Context, status int, obj any) { //nolint:unparam | ||||
| 	ctx.Resp.Header().Set("Content-Type", "application/atom+xml; charset=utf-8") | ||||
| 	ctx.Resp.WriteHeader(status) | ||||
| 	if _, err := ctx.Resp.Write([]byte(xml.Header)); err != nil { | ||||
|   | ||||
| @@ -64,7 +64,7 @@ func CompareDiff(ctx *context.APIContext) { | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	_, _, headGitRepo, ci, _, _ := parseCompareInfo(ctx, api.CreatePullRequestOption{ | ||||
| 	_, headGitRepo, ci, _, _ := parseCompareInfo(ctx, api.CreatePullRequestOption{ | ||||
| 		Base: infos[0], | ||||
| 		Head: infos[1], | ||||
| 	}) | ||||
|   | ||||
| @@ -408,7 +408,7 @@ func CreatePullRequest(ctx *context.APIContext) { | ||||
| 	) | ||||
|  | ||||
| 	// Get repo/branch information | ||||
| 	_, headRepo, headGitRepo, compareInfo, baseBranch, headBranch := parseCompareInfo(ctx, form) | ||||
| 	headRepo, headGitRepo, compareInfo, baseBranch, headBranch := parseCompareInfo(ctx, form) | ||||
| 	if ctx.Written() { | ||||
| 		return | ||||
| 	} | ||||
| @@ -1054,7 +1054,7 @@ func MergePullRequest(ctx *context.APIContext) { | ||||
| 	ctx.Status(http.StatusOK) | ||||
| } | ||||
|  | ||||
| func parseCompareInfo(ctx *context.APIContext, form api.CreatePullRequestOption) (*user_model.User, *repo_model.Repository, *git.Repository, *git.CompareInfo, string, string) { | ||||
| func parseCompareInfo(ctx *context.APIContext, form api.CreatePullRequestOption) (*repo_model.Repository, *git.Repository, *git.CompareInfo, string, string) { | ||||
| 	baseRepo := ctx.Repo.Repository | ||||
|  | ||||
| 	// Get compared branches information | ||||
| @@ -1087,14 +1087,14 @@ func parseCompareInfo(ctx *context.APIContext, form api.CreatePullRequestOption) | ||||
| 			} else { | ||||
| 				ctx.Error(http.StatusInternalServerError, "GetUserByName", err) | ||||
| 			} | ||||
| 			return nil, nil, nil, nil, "", "" | ||||
| 			return nil, nil, nil, "", "" | ||||
| 		} | ||||
| 		headBranch = headInfos[1] | ||||
| 		// The head repository can also point to the same repo | ||||
| 		isSameRepo = ctx.Repo.Owner.ID == headUser.ID | ||||
| 	} else { | ||||
| 		ctx.NotFound() | ||||
| 		return nil, nil, nil, nil, "", "" | ||||
| 		return nil, nil, nil, "", "" | ||||
| 	} | ||||
|  | ||||
| 	ctx.Repo.PullRequest.SameRepo = isSameRepo | ||||
| @@ -1102,7 +1102,7 @@ func parseCompareInfo(ctx *context.APIContext, form api.CreatePullRequestOption) | ||||
| 	// Check if base branch is valid. | ||||
| 	if !ctx.Repo.GitRepo.IsBranchExist(baseBranch) && !ctx.Repo.GitRepo.IsTagExist(baseBranch) { | ||||
| 		ctx.NotFound("BaseNotExist") | ||||
| 		return nil, nil, nil, nil, "", "" | ||||
| 		return nil, nil, nil, "", "" | ||||
| 	} | ||||
|  | ||||
| 	// Check if current user has fork of repository or in the same repository. | ||||
| @@ -1110,7 +1110,7 @@ func parseCompareInfo(ctx *context.APIContext, form api.CreatePullRequestOption) | ||||
| 	if headRepo == nil && !isSameRepo { | ||||
| 		log.Trace("parseCompareInfo[%d]: does not have fork or in same repository", baseRepo.ID) | ||||
| 		ctx.NotFound("GetForkedRepo") | ||||
| 		return nil, nil, nil, nil, "", "" | ||||
| 		return nil, nil, nil, "", "" | ||||
| 	} | ||||
|  | ||||
| 	var headGitRepo *git.Repository | ||||
| @@ -1121,7 +1121,7 @@ func parseCompareInfo(ctx *context.APIContext, form api.CreatePullRequestOption) | ||||
| 		headGitRepo, err = gitrepo.OpenRepository(ctx, headRepo) | ||||
| 		if err != nil { | ||||
| 			ctx.Error(http.StatusInternalServerError, "OpenRepository", err) | ||||
| 			return nil, nil, nil, nil, "", "" | ||||
| 			return nil, nil, nil, "", "" | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| @@ -1130,7 +1130,7 @@ func parseCompareInfo(ctx *context.APIContext, form api.CreatePullRequestOption) | ||||
| 	if err != nil { | ||||
| 		headGitRepo.Close() | ||||
| 		ctx.Error(http.StatusInternalServerError, "GetUserRepoPermission", err) | ||||
| 		return nil, nil, nil, nil, "", "" | ||||
| 		return nil, nil, nil, "", "" | ||||
| 	} | ||||
| 	if !permBase.CanReadIssuesOrPulls(true) || !permBase.CanRead(unit.TypeCode) { | ||||
| 		if log.IsTrace() { | ||||
| @@ -1141,7 +1141,7 @@ func parseCompareInfo(ctx *context.APIContext, form api.CreatePullRequestOption) | ||||
| 		} | ||||
| 		headGitRepo.Close() | ||||
| 		ctx.NotFound("Can't read pulls or can't read UnitTypeCode") | ||||
| 		return nil, nil, nil, nil, "", "" | ||||
| 		return nil, nil, nil, "", "" | ||||
| 	} | ||||
|  | ||||
| 	// user should have permission to read headrepo's codes | ||||
| @@ -1149,7 +1149,7 @@ func parseCompareInfo(ctx *context.APIContext, form api.CreatePullRequestOption) | ||||
| 	if err != nil { | ||||
| 		headGitRepo.Close() | ||||
| 		ctx.Error(http.StatusInternalServerError, "GetUserRepoPermission", err) | ||||
| 		return nil, nil, nil, nil, "", "" | ||||
| 		return nil, nil, nil, "", "" | ||||
| 	} | ||||
| 	if !permHead.CanRead(unit.TypeCode) { | ||||
| 		if log.IsTrace() { | ||||
| @@ -1160,24 +1160,24 @@ func parseCompareInfo(ctx *context.APIContext, form api.CreatePullRequestOption) | ||||
| 		} | ||||
| 		headGitRepo.Close() | ||||
| 		ctx.NotFound("Can't read headRepo UnitTypeCode") | ||||
| 		return nil, nil, nil, nil, "", "" | ||||
| 		return nil, nil, nil, "", "" | ||||
| 	} | ||||
|  | ||||
| 	// Check if head branch is valid. | ||||
| 	if !headGitRepo.IsBranchExist(headBranch) && !headGitRepo.IsTagExist(headBranch) { | ||||
| 		headGitRepo.Close() | ||||
| 		ctx.NotFound() | ||||
| 		return nil, nil, nil, nil, "", "" | ||||
| 		return nil, nil, nil, "", "" | ||||
| 	} | ||||
|  | ||||
| 	compareInfo, err := headGitRepo.GetCompareInfo(repo_model.RepoPath(baseRepo.Owner.Name, baseRepo.Name), baseBranch, headBranch, false, false) | ||||
| 	if err != nil { | ||||
| 		headGitRepo.Close() | ||||
| 		ctx.Error(http.StatusInternalServerError, "GetCompareInfo", err) | ||||
| 		return nil, nil, nil, nil, "", "" | ||||
| 		return nil, nil, nil, "", "" | ||||
| 	} | ||||
|  | ||||
| 	return headUser, headRepo, headGitRepo, compareInfo, baseBranch, headBranch | ||||
| 	return headRepo, headGitRepo, compareInfo, baseBranch, headBranch | ||||
| } | ||||
|  | ||||
| // UpdatePullRequest merge PR's baseBranch into headBranch | ||||
|   | ||||
| @@ -183,7 +183,7 @@ func ChangeConfig(ctx *context.Context) { | ||||
| 	value := ctx.FormString("value") | ||||
| 	cfg := setting.Config() | ||||
|  | ||||
| 	marshalBool := func(v string) (string, error) { | ||||
| 	marshalBool := func(v string) (string, error) { //nolint:unparam | ||||
| 		if b, _ := strconv.ParseBool(v); b { | ||||
| 			return "true", nil | ||||
| 		} | ||||
|   | ||||
| @@ -246,7 +246,7 @@ func Merge(ctx context.Context, pr *issues_model.PullRequest, doer *user_model.U | ||||
| } | ||||
|  | ||||
| // doMergeAndPush performs the merge operation without changing any pull information in database and pushes it up to the base repository | ||||
| func doMergeAndPush(ctx context.Context, pr *issues_model.PullRequest, doer *user_model.User, mergeStyle repo_model.MergeStyle, expectedHeadCommitID, message string, pushTrigger repo_module.PushTrigger) (string, error) { | ||||
| func doMergeAndPush(ctx context.Context, pr *issues_model.PullRequest, doer *user_model.User, mergeStyle repo_model.MergeStyle, expectedHeadCommitID, message string, pushTrigger repo_module.PushTrigger) (string, error) { //nolint:unparam | ||||
| 	// Clone base repo. | ||||
| 	mergeCtx, cancel, err := createTemporaryRepoForMerge(ctx, pr, doer, expectedHeadCommitID) | ||||
| 	if err != nil { | ||||
|   | ||||
| @@ -190,6 +190,6 @@ type dingtalkConvertor struct{} | ||||
|  | ||||
| var _ payloadConvertor[DingtalkPayload] = dingtalkConvertor{} | ||||
|  | ||||
| func newDingtalkRequest(ctx context.Context, w *webhook_model.Webhook, t *webhook_model.HookTask) (*http.Request, []byte, error) { | ||||
| func newDingtalkRequest(_ context.Context, w *webhook_model.Webhook, t *webhook_model.HookTask) (*http.Request, []byte, error) { | ||||
| 	return newJSONRequest(dingtalkConvertor{}, w, t, true) | ||||
| } | ||||
|   | ||||
| @@ -260,7 +260,7 @@ type discordConvertor struct { | ||||
|  | ||||
| var _ payloadConvertor[DiscordPayload] = discordConvertor{} | ||||
|  | ||||
| func newDiscordRequest(ctx context.Context, w *webhook_model.Webhook, t *webhook_model.HookTask) (*http.Request, []byte, error) { | ||||
| func newDiscordRequest(_ context.Context, w *webhook_model.Webhook, t *webhook_model.HookTask) (*http.Request, []byte, error) { | ||||
| 	meta := &DiscordMeta{} | ||||
| 	if err := json.Unmarshal([]byte(w.Meta), meta); err != nil { | ||||
| 		return nil, nil, fmt.Errorf("newDiscordRequest meta json: %w", err) | ||||
|   | ||||
| @@ -168,6 +168,6 @@ type feishuConvertor struct{} | ||||
|  | ||||
| var _ payloadConvertor[FeishuPayload] = feishuConvertor{} | ||||
|  | ||||
| func newFeishuRequest(ctx context.Context, w *webhook_model.Webhook, t *webhook_model.HookTask) (*http.Request, []byte, error) { | ||||
| func newFeishuRequest(_ context.Context, w *webhook_model.Webhook, t *webhook_model.HookTask) (*http.Request, []byte, error) { | ||||
| 	return newJSONRequest(feishuConvertor{}, w, t, true) | ||||
| } | ||||
|   | ||||
| @@ -24,7 +24,7 @@ import ( | ||||
| 	webhook_module "code.gitea.io/gitea/modules/webhook" | ||||
| ) | ||||
|  | ||||
| func newMatrixRequest(ctx context.Context, w *webhook_model.Webhook, t *webhook_model.HookTask) (*http.Request, []byte, error) { | ||||
| func newMatrixRequest(_ context.Context, w *webhook_model.Webhook, t *webhook_model.HookTask) (*http.Request, []byte, error) { | ||||
| 	meta := &MatrixMeta{} | ||||
| 	if err := json.Unmarshal([]byte(w.Meta), meta); err != nil { | ||||
| 		return nil, nil, fmt.Errorf("GetMatrixPayload meta json: %w", err) | ||||
|   | ||||
| @@ -347,6 +347,6 @@ type msteamsConvertor struct{} | ||||
|  | ||||
| var _ payloadConvertor[MSTeamsPayload] = msteamsConvertor{} | ||||
|  | ||||
| func newMSTeamsRequest(ctx context.Context, w *webhook_model.Webhook, t *webhook_model.HookTask) (*http.Request, []byte, error) { | ||||
| func newMSTeamsRequest(_ context.Context, w *webhook_model.Webhook, t *webhook_model.HookTask) (*http.Request, []byte, error) { | ||||
| 	return newJSONRequest(msteamsConvertor{}, w, t, true) | ||||
| } | ||||
|   | ||||
| @@ -112,7 +112,7 @@ type packagistConvertor struct { | ||||
|  | ||||
| var _ payloadConvertor[PackagistPayload] = packagistConvertor{} | ||||
|  | ||||
| func newPackagistRequest(ctx context.Context, w *webhook_model.Webhook, t *webhook_model.HookTask) (*http.Request, []byte, error) { | ||||
| func newPackagistRequest(_ context.Context, w *webhook_model.Webhook, t *webhook_model.HookTask) (*http.Request, []byte, error) { | ||||
| 	meta := &PackagistMeta{} | ||||
| 	if err := json.Unmarshal([]byte(w.Meta), meta); err != nil { | ||||
| 		return nil, nil, fmt.Errorf("newpackagistRequest meta json: %w", err) | ||||
|   | ||||
| @@ -283,7 +283,7 @@ type slackConvertor struct { | ||||
|  | ||||
| var _ payloadConvertor[SlackPayload] = slackConvertor{} | ||||
|  | ||||
| func newSlackRequest(ctx context.Context, w *webhook_model.Webhook, t *webhook_model.HookTask) (*http.Request, []byte, error) { | ||||
| func newSlackRequest(_ context.Context, w *webhook_model.Webhook, t *webhook_model.HookTask) (*http.Request, []byte, error) { | ||||
| 	meta := &SlackMeta{} | ||||
| 	if err := json.Unmarshal([]byte(w.Meta), meta); err != nil { | ||||
| 		return nil, nil, fmt.Errorf("newSlackRequest meta json: %w", err) | ||||
|   | ||||
| @@ -191,6 +191,6 @@ type telegramConvertor struct{} | ||||
|  | ||||
| var _ payloadConvertor[TelegramPayload] = telegramConvertor{} | ||||
|  | ||||
| func newTelegramRequest(ctx context.Context, w *webhook_model.Webhook, t *webhook_model.HookTask) (*http.Request, []byte, error) { | ||||
| func newTelegramRequest(_ context.Context, w *webhook_model.Webhook, t *webhook_model.HookTask) (*http.Request, []byte, error) { | ||||
| 	return newJSONRequest(telegramConvertor{}, w, t, true) | ||||
| } | ||||
|   | ||||
| @@ -177,6 +177,6 @@ type wechatworkConvertor struct{} | ||||
|  | ||||
| var _ payloadConvertor[WechatworkPayload] = wechatworkConvertor{} | ||||
|  | ||||
| func newWechatworkRequest(ctx context.Context, w *webhook_model.Webhook, t *webhook_model.HookTask) (*http.Request, []byte, error) { | ||||
| func newWechatworkRequest(_ context.Context, w *webhook_model.Webhook, t *webhook_model.HookTask) (*http.Request, []byte, error) { | ||||
| 	return newJSONRequest(wechatworkConvertor{}, w, t, true) | ||||
| } | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 silverwind
					silverwind