Add latest changes from gitlab-org/gitlab@master
This commit is contained in:
parent
12c5065f1f
commit
da9254b89f
7 changed files with 76 additions and 95 deletions
|
@ -35,7 +35,8 @@ module Resolvers
|
|||
def preloads
|
||||
{
|
||||
alert_management_alert: [:alert_management_alert],
|
||||
labels: [:labels]
|
||||
labels: [:labels],
|
||||
assignees: [:assignees]
|
||||
}
|
||||
end
|
||||
|
||||
|
|
|
@ -38,8 +38,7 @@ module Types
|
|||
description: 'User that created the issue',
|
||||
resolve: -> (obj, _args, _ctx) { Gitlab::Graphql::Loaders::BatchModelLoader.new(User, obj.author_id).find }
|
||||
|
||||
# Remove complexity when BatchLoader is used
|
||||
field :assignees, Types::UserType.connection_type, null: true, complexity: 5,
|
||||
field :assignees, Types::UserType.connection_type, null: true,
|
||||
description: 'Assignees of the issue'
|
||||
|
||||
field :labels, Types::LabelType.connection_type, null: true,
|
||||
|
|
5
changelogs/unreleased/11496-issue-type-nplus1.yml
Normal file
5
changelogs/unreleased/11496-issue-type-nplus1.yml
Normal file
|
@ -0,0 +1,5 @@
|
|||
---
|
||||
title: Graphql Issues - Fix N+1 for Assignees
|
||||
merge_request: 41233
|
||||
author:
|
||||
type: performance
|
|
@ -28,9 +28,8 @@ Each guideline is described in more detail in the sections below:
|
|||
|
||||
> Introduced in GitLab Runner 8.9.
|
||||
|
||||
GitLab and GitLab Runner always perform a full clone by default.
|
||||
While it means that all changes from GitLab are received,
|
||||
it often results in receiving extra commit logs.
|
||||
GitLab and GitLab Runner perform a [shallow clone](../pipelines/settings.md#git-shallow-clone)
|
||||
by default.
|
||||
|
||||
Ideally, you should always use `GIT_DEPTH` with a small number
|
||||
like 10. This will instruct GitLab Runner to perform shallow clones.
|
||||
|
|
|
@ -96,32 +96,7 @@ That's needed when one totally relies on [custom analyzers](#custom-analyzers).
|
|||
|
||||
## Custom Analyzers
|
||||
|
||||
### Custom analyzers with Docker-in-Docker
|
||||
|
||||
When Docker-in-Docker for SAST is enabled,
|
||||
you can provide your own analyzers as a comma-separated list of Docker images.
|
||||
Here's how to add `analyzers/csharp` and `analyzers/perl` to the default images:
|
||||
In `.gitlab-ci.yml` define:
|
||||
|
||||
```yaml
|
||||
include:
|
||||
- template: SAST.gitlab-ci.yml
|
||||
|
||||
variables:
|
||||
SAST_ANALYZER_IMAGES: "my-docker-registry/analyzers/csharp,amy-docker-registry/analyzers/perl"
|
||||
```
|
||||
|
||||
The values must be the full path to the container registry images,
|
||||
like what you would feed to the `docker pull` command.
|
||||
|
||||
NOTE: **Note:**
|
||||
This configuration doesn't benefit from the integrated detection step.
|
||||
SAST has to fetch and spawn each Docker image to establish whether the
|
||||
custom analyzer can scan the source code.
|
||||
|
||||
### Custom analyzers without Docker-in-Docker
|
||||
|
||||
When Docker-in-Docker for SAST is disabled, you can provide your own analyzers by
|
||||
You can provide your own analyzers by
|
||||
defining CI jobs in your CI configuration. For consistency, you should suffix your custom
|
||||
SAST jobs with `-sast`. Here's how to add a scanning job that's based on the
|
||||
Docker image `my-docker-registry/analyzers/csharp` and generates a SAST report
|
||||
|
|
|
@ -48,8 +48,6 @@ To run SAST jobs, by default, you need a GitLab Runner with the
|
|||
[`kubernetes`](https://docs.gitlab.com/runner/install/kubernetes.html) executor.
|
||||
If you're using the shared Runners on GitLab.com, this is enabled by default.
|
||||
|
||||
Beginning with GitLab 13.0, Docker privileged mode is necessary only if you've [enabled Docker-in-Docker for SAST](#enabling-docker-in-docker).
|
||||
|
||||
CAUTION: **Caution:**
|
||||
Our SAST jobs require a Linux container type. Windows containers are not yet supported.
|
||||
|
||||
|
@ -95,9 +93,6 @@ All open source (OSS) analyzers have been moved to the GitLab Core tier. Progres
|
|||
tracked in the corresponding
|
||||
[epic](https://gitlab.com/groups/gitlab-org/-/epics/2098).
|
||||
|
||||
Please note that support for [Docker-in-Docker](#enabling-docker-in-docker)
|
||||
will not be extended to the GitLab Core tier.
|
||||
|
||||
#### Summary of features per tier
|
||||
|
||||
Different features are available in different [GitLab tiers](https://about.gitlab.com/pricing/),
|
||||
|
@ -217,25 +212,6 @@ you can use the `MAVEN_CLI_OPTS` environment variable.
|
|||
|
||||
Read more on [how to use private Maven repositories](../index.md#using-private-maven-repos).
|
||||
|
||||
### Enabling Docker-in-Docker **(ULTIMATE)**
|
||||
|
||||
If needed, you can enable Docker-in-Docker to restore the SAST behavior that existed prior to GitLab
|
||||
13.0. Follow these steps to do so:
|
||||
|
||||
1. Configure a GitLab Runner with Docker-in-Docker in [privileged mode](https://docs.gitlab.com/runner/executors/docker.html#use-docker-in-docker-with-privileged-mode).
|
||||
1. Set the variable `SAST_DISABLE_DIND` set to `false`:
|
||||
|
||||
```yaml
|
||||
include:
|
||||
- template: SAST.gitlab-ci.yml
|
||||
|
||||
variables:
|
||||
SAST_DISABLE_DIND: "false"
|
||||
```
|
||||
|
||||
This creates a single `sast` job in your CI/CD pipeline instead of multiple `<analyzer-name>-sast`
|
||||
jobs.
|
||||
|
||||
#### Enabling Kubesec analyzer
|
||||
|
||||
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/12752) in GitLab Ultimate 12.6.
|
||||
|
@ -329,7 +305,6 @@ The following are Docker image-related variables.
|
|||
| `SECURE_ANALYZERS_PREFIX` | Override the name of the Docker registry providing the default images (proxy). Read more about [customizing analyzers](analyzers.md). |
|
||||
| `SAST_ANALYZER_IMAGE_TAG` | **DEPRECATED:** Override the Docker tag of the default images. Read more about [customizing analyzers](analyzers.md). |
|
||||
| `SAST_DEFAULT_ANALYZERS` | Override the names of default images. Read more about [customizing analyzers](analyzers.md). |
|
||||
| `SAST_DISABLE_DIND` | Disable Docker-in-Docker and run analyzers [individually](#enabling-docker-in-docker). This variable is `true` by default. |
|
||||
|
||||
#### Vulnerability filters
|
||||
|
||||
|
@ -344,18 +319,6 @@ Some analyzers make it possible to filter out vulnerabilities under a given thre
|
|||
| `SAST_FLAWFINDER_LEVEL` | 1 | Ignore Flawfinder vulnerabilities under given risk level. Integer, 0=No risk, 5=High risk. |
|
||||
| `SAST_GOSEC_LEVEL` | 0 | Ignore Gosec vulnerabilities under given confidence level. Integer, 0=Undefined, 1=Low, 2=Medium, 3=High. |
|
||||
|
||||
#### Docker-in-Docker orchestrator
|
||||
|
||||
The following variables configure the Docker-in-Docker orchestrator, and therefore are only used when the Docker-in-Docker mode is [enabled](#enabling-docker-in-docker).
|
||||
|
||||
| Environment variable | Default value | Description |
|
||||
|------------------------------------------|---------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `SAST_ANALYZER_IMAGES` | | Comma-separated list of custom images. Default images are still enabled. Read more about [customizing analyzers](analyzers.md). |
|
||||
| `SAST_PULL_ANALYZER_IMAGES` | 1 | Pull the images from the Docker registry (set to 0 to disable). Read more about [customizing analyzers](analyzers.md). |
|
||||
| `SAST_DOCKER_CLIENT_NEGOTIATION_TIMEOUT` | 2m | Time limit for Docker client negotiation. Timeouts are parsed using Go's [`ParseDuration`](https://golang.org/pkg/time/#ParseDuration). Valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`. For example, `300ms`, `1.5h` or `2h45m`. |
|
||||
| `SAST_PULL_ANALYZER_IMAGE_TIMEOUT` | 5m | Time limit when pulling the image of an analyzer. Timeouts are parsed using Go's [`ParseDuration`](https://golang.org/pkg/time/#ParseDuration). Valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`. For example, `300ms`, `1.5h` or `2h45m`. |
|
||||
| `SAST_RUN_ANALYZER_TIMEOUT` | 20m | Time limit when running an analyzer. Timeouts are parsed using Go's [`ParseDuration`](https://golang.org/pkg/time/#ParseDuration). Valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`. For example, `300ms`, `1.5h` or `2h45m`. |
|
||||
|
||||
#### Analyzer settings
|
||||
|
||||
Some analyzers can be customized with environment variables.
|
||||
|
@ -512,7 +475,6 @@ run successfully. For more information, see [Offline environments](../offline_de
|
|||
|
||||
To use SAST in an offline environment, you need:
|
||||
|
||||
- To keep Docker-In-Docker disabled (default).
|
||||
- A GitLab Runner with the [`docker` or `kubernetes` executor](#requirements).
|
||||
- A Docker Container Registry with locally available copies of SAST [analyzer](https://gitlab.com/gitlab-org/security-products/analyzers) images.
|
||||
- Configure certificate checking of packages (optional).
|
||||
|
|
|
@ -5,10 +5,11 @@ require 'spec_helper'
|
|||
RSpec.describe 'getting an issue list for a project' do
|
||||
include GraphqlHelpers
|
||||
|
||||
let(:project) { create(:project, :repository, :public) }
|
||||
let(:current_user) { create(:user) }
|
||||
let(:issues_data) { graphql_data['project']['issues']['edges'] }
|
||||
let!(:issues) do
|
||||
|
||||
let_it_be(:project) { create(:project, :repository, :public) }
|
||||
let_it_be(:current_user) { create(:user) }
|
||||
let_it_be(:issues, reload: true) do
|
||||
[create(:issue, project: project, discussion_locked: true),
|
||||
create(:issue, :with_alert, project: project)]
|
||||
end
|
||||
|
@ -85,7 +86,7 @@ RSpec.describe 'getting an issue list for a project' do
|
|||
end
|
||||
|
||||
context 'when there is a confidential issue' do
|
||||
let!(:confidential_issue) do
|
||||
let_it_be(:confidential_issue) do
|
||||
create(:issue, :confidential, project: project)
|
||||
end
|
||||
|
||||
|
@ -309,23 +310,12 @@ RSpec.describe 'getting an issue list for a project' do
|
|||
QUERY
|
||||
end
|
||||
|
||||
let(:query) do
|
||||
graphql_query_for(
|
||||
'project',
|
||||
{ 'fullPath' => project.full_path },
|
||||
query_graphql_field('issues', {}, fields)
|
||||
)
|
||||
end
|
||||
|
||||
let_it_be(:project) { create(:project, :public) }
|
||||
let_it_be(:label1) { create(:label, project: project) }
|
||||
let_it_be(:label2) { create(:label, project: project) }
|
||||
let_it_be(:issue1) { create(:issue, project: project, labels: [label1]) }
|
||||
let_it_be(:issue2) { create(:issue, project: project, labels: [label2]) }
|
||||
let_it_be(:issues) { [issue1, issue2] }
|
||||
|
||||
before do
|
||||
stub_feature_flags(graphql_lookahead_support: true)
|
||||
issues.each do |issue|
|
||||
# create a label for each issue we have to properly test N+1
|
||||
label = create(:label, project: project)
|
||||
issue.update!(labels: [label])
|
||||
end
|
||||
end
|
||||
|
||||
def response_label_ids(response_data)
|
||||
|
@ -343,14 +333,64 @@ RSpec.describe 'getting an issue list for a project' do
|
|||
expect(issues_data.count).to eq(2)
|
||||
expect(response_label_ids(issues_data)).to match_array(labels_as_global_ids(issues))
|
||||
|
||||
issues.append create(:issue, project: project, labels: [create(:label, project: project)])
|
||||
new_issues = issues + [create(:issue, project: project, labels: [create(:label, project: project)])]
|
||||
|
||||
expect { post_graphql(query, current_user: current_user) }.not_to exceed_query_limit(control)
|
||||
# graphql_data is memoized (see spec/support/helpers/graphql_helpers.rb:271)
|
||||
# graphql_data is memoized (see spec/support/helpers/graphql_helpers.rb)
|
||||
# so we have to parse the body ourselves the second time
|
||||
response_data = Gitlab::Json.parse(response.body)['data']['project']['issues']['edges']
|
||||
expect(response_data.count).to eq(3)
|
||||
expect(response_label_ids(response_data)).to match_array(labels_as_global_ids(issues))
|
||||
issues_data = Gitlab::Json.parse(response.body)['data']['project']['issues']['edges']
|
||||
expect(issues_data.count).to eq(3)
|
||||
expect(response_label_ids(issues_data)).to match_array(labels_as_global_ids(new_issues))
|
||||
end
|
||||
end
|
||||
|
||||
context 'fetching assignees' do
|
||||
let(:fields) do
|
||||
<<~QUERY
|
||||
edges {
|
||||
node {
|
||||
id
|
||||
assignees {
|
||||
nodes {
|
||||
id
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
QUERY
|
||||
end
|
||||
|
||||
before do
|
||||
issues.each do |issue|
|
||||
# create an assignee for each issue we have to properly test N+1
|
||||
assignee = create(:user)
|
||||
issue.update!(assignees: [assignee])
|
||||
end
|
||||
end
|
||||
|
||||
def response_assignee_ids(response_data)
|
||||
response_data.map do |edge|
|
||||
edge['node']['assignees']['nodes'].map { |node| node['id'] }
|
||||
end.flatten
|
||||
end
|
||||
|
||||
def assignees_as_global_ids(issues)
|
||||
issues.map(&:assignees).flatten.map(&:to_global_id).map(&:to_s)
|
||||
end
|
||||
|
||||
it 'avoids N+1 queries', :aggregate_failures do
|
||||
control = ActiveRecord::QueryRecorder.new { post_graphql(query, current_user: current_user) }
|
||||
expect(issues_data.count).to eq(2)
|
||||
expect(response_assignee_ids(issues_data)).to match_array(assignees_as_global_ids(issues))
|
||||
|
||||
new_issues = issues + [create(:issue, project: project, assignees: [create(:user)])]
|
||||
|
||||
expect { post_graphql(query, current_user: current_user) }.not_to exceed_query_limit(control)
|
||||
# graphql_data is memoized (see spec/support/helpers/graphql_helpers.rb)
|
||||
# so we have to parse the body ourselves the second time
|
||||
issues_data = Gitlab::Json.parse(response.body)['data']['project']['issues']['edges']
|
||||
expect(issues_data.count).to eq(3)
|
||||
expect(response_assignee_ids(issues_data)).to match_array(assignees_as_global_ids(new_issues))
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
Loading…
Reference in a new issue