Add latest changes from gitlab-org/gitlab@master
This commit is contained in:
parent
1f563de514
commit
83cd5db435
23 changed files with 169 additions and 128 deletions
|
@ -205,7 +205,12 @@ export default {
|
||||||
</gl-form-group>
|
</gl-form-group>
|
||||||
</template>
|
</template>
|
||||||
|
|
||||||
<agent-token v-else :agent-token="agentToken" :modal-id="$options.modalId" />
|
<agent-token
|
||||||
|
v-else
|
||||||
|
:agent-name="agentName"
|
||||||
|
:agent-token="agentToken"
|
||||||
|
:modal-id="$options.modalId"
|
||||||
|
/>
|
||||||
|
|
||||||
<template #modal-footer>
|
<template #modal-footer>
|
||||||
<gl-button
|
<gl-button
|
||||||
|
|
|
@ -1,12 +1,12 @@
|
||||||
export function generateAgentRegistrationCommand(agentToken, kasAddress, kasVersion) {
|
export function generateAgentRegistrationCommand({ name, token, version, address }) {
|
||||||
return `helm repo add gitlab https://charts.gitlab.io
|
return `helm repo add gitlab https://charts.gitlab.io
|
||||||
helm repo update
|
helm repo update
|
||||||
helm upgrade --install gitlab-agent gitlab/gitlab-agent \\
|
helm upgrade --install ${name} gitlab/gitlab-agent \\
|
||||||
--namespace gitlab-agent \\
|
--namespace gitlab-agent \\
|
||||||
--create-namespace \\
|
--create-namespace \\
|
||||||
--set image.tag=v${kasVersion} \\
|
--set image.tag=v${version} \\
|
||||||
--set config.token=${agentToken} \\
|
--set config.token=${token} \\
|
||||||
--set config.kasAddress=${kasAddress}`;
|
--set config.kasAddress=${address}`;
|
||||||
}
|
}
|
||||||
|
|
||||||
export function getAgentConfigPath(clusterAgentName) {
|
export function getAgentConfigPath(clusterAgentName) {
|
||||||
|
|
|
@ -21,6 +21,10 @@ export default {
|
||||||
},
|
},
|
||||||
inject: ['kasAddress', 'kasVersion'],
|
inject: ['kasAddress', 'kasVersion'],
|
||||||
props: {
|
props: {
|
||||||
|
agentName: {
|
||||||
|
required: true,
|
||||||
|
type: String,
|
||||||
|
},
|
||||||
agentToken: {
|
agentToken: {
|
||||||
required: true,
|
required: true,
|
||||||
type: String,
|
type: String,
|
||||||
|
@ -32,7 +36,12 @@ export default {
|
||||||
},
|
},
|
||||||
computed: {
|
computed: {
|
||||||
agentRegistrationCommand() {
|
agentRegistrationCommand() {
|
||||||
return generateAgentRegistrationCommand(this.agentToken, this.kasAddress, this.kasVersion);
|
return generateAgentRegistrationCommand({
|
||||||
|
name: this.agentName,
|
||||||
|
token: this.agentToken,
|
||||||
|
version: this.kasVersion,
|
||||||
|
address: this.kasAddress,
|
||||||
|
});
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
|
@ -268,7 +268,12 @@ export default {
|
||||||
</p>
|
</p>
|
||||||
</template>
|
</template>
|
||||||
|
|
||||||
<agent-token v-else :agent-token="agentToken" :modal-id="$options.modalId" />
|
<agent-token
|
||||||
|
v-else
|
||||||
|
:agent-name="agentName"
|
||||||
|
:agent-token="agentToken"
|
||||||
|
:modal-id="$options.modalId"
|
||||||
|
/>
|
||||||
</template>
|
</template>
|
||||||
|
|
||||||
<template v-else>
|
<template v-else>
|
||||||
|
|
|
@ -16,7 +16,7 @@ module Mutations
|
||||||
description: 'SAST CI configuration for the project.'
|
description: 'SAST CI configuration for the project.'
|
||||||
|
|
||||||
def configure_analyzer(project, **args)
|
def configure_analyzer(project, **args)
|
||||||
::Security::CiConfiguration::SastCreateService.new(project, current_user, args[:configuration]).execute
|
::Security::CiConfiguration::SastCreateService.new(project, current_user, args[:configuration].to_h).execute
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
|
@ -16,6 +16,7 @@ second: '(?:\b[A-Z][a-z]+ )+\(([A-Z]{3,5})\)'
|
||||||
exceptions:
|
exceptions:
|
||||||
- AJAX
|
- AJAX
|
||||||
- ANSI
|
- ANSI
|
||||||
|
- APAC
|
||||||
- API
|
- API
|
||||||
- APM
|
- APM
|
||||||
- ARM
|
- ARM
|
||||||
|
@ -40,6 +41,7 @@ exceptions:
|
||||||
- CSRF
|
- CSRF
|
||||||
- CSS
|
- CSS
|
||||||
- CSV
|
- CSV
|
||||||
|
- CTE
|
||||||
- CWE
|
- CWE
|
||||||
- CVE
|
- CVE
|
||||||
- CVS
|
- CVS
|
||||||
|
@ -211,6 +213,7 @@ exceptions:
|
||||||
- TODO
|
- TODO
|
||||||
- TOML
|
- TOML
|
||||||
- TOTP
|
- TOTP
|
||||||
|
- TPS
|
||||||
- TTL
|
- TTL
|
||||||
- UBI
|
- UBI
|
||||||
- UDP
|
- UDP
|
||||||
|
|
|
@ -6,7 +6,7 @@ info: To determine the technical writer assigned to the Stage/Group associated w
|
||||||
|
|
||||||
# Configuring PostgreSQL for scaling **(FREE SELF)**
|
# Configuring PostgreSQL for scaling **(FREE SELF)**
|
||||||
|
|
||||||
In this section, you'll be guided through configuring a PostgreSQL database to
|
In this section, you are guided through configuring a PostgreSQL database to
|
||||||
be used with GitLab in one of our [reference architectures](../reference_architectures/index.md).
|
be used with GitLab in one of our [reference architectures](../reference_architectures/index.md).
|
||||||
There are essentially three setups to choose from.
|
There are essentially three setups to choose from.
|
||||||
|
|
||||||
|
|
|
@ -15,7 +15,7 @@ requiring downtime.
|
||||||
## Dropping Columns
|
## Dropping Columns
|
||||||
|
|
||||||
Removing columns is tricky because running GitLab processes may still be using
|
Removing columns is tricky because running GitLab processes may still be using
|
||||||
the columns. To work around this safely, you will need three steps in three releases:
|
the columns. To work around this safely, you need three steps in three releases:
|
||||||
|
|
||||||
1. Ignoring the column (release M)
|
1. Ignoring the column (release M)
|
||||||
1. Dropping the column (release M+1)
|
1. Dropping the column (release M+1)
|
||||||
|
@ -174,7 +174,7 @@ This will take care of renaming the column, ensuring data stays in sync, and
|
||||||
copying over indexes and foreign keys.
|
copying over indexes and foreign keys.
|
||||||
|
|
||||||
If a column contains one or more indexes that don't contain the name of the
|
If a column contains one or more indexes that don't contain the name of the
|
||||||
original column, the previously described procedure will fail. In that case,
|
original column, the previously described procedure fails. In that case,
|
||||||
you need to rename these indexes.
|
you need to rename these indexes.
|
||||||
|
|
||||||
### Step 2: Add A Post-Deployment Migration
|
### Step 2: Add A Post-Deployment Migration
|
||||||
|
@ -291,8 +291,9 @@ They can also produce a lot of pressure on the database due to it rapidly
|
||||||
updating many rows in sequence.
|
updating many rows in sequence.
|
||||||
|
|
||||||
To reduce database pressure you should instead use a background migration
|
To reduce database pressure you should instead use a background migration
|
||||||
when migrating a column in a large table (for example, `issues`). This will
|
when migrating a column in a large table (for example, `issues`). Background
|
||||||
spread the work / load over a longer time period, without slowing down deployments.
|
migrations spread the work / load over a longer time period, without slowing
|
||||||
|
down deployments.
|
||||||
|
|
||||||
For more information, see [the documentation on cleaning up background
|
For more information, see [the documentation on cleaning up background
|
||||||
migrations](background_migrations.md#cleaning-up).
|
migrations](background_migrations.md#cleaning-up).
|
||||||
|
@ -533,7 +534,7 @@ step approach:
|
||||||
|
|
||||||
Usually this works, but not always. For example, if a field's format is to be
|
Usually this works, but not always. For example, if a field's format is to be
|
||||||
changed from JSON to something else we have a bit of a problem. If we were to
|
changed from JSON to something else we have a bit of a problem. If we were to
|
||||||
change existing data before deploying application code we'll most likely run
|
change existing data before deploying application code we would most likely run
|
||||||
into errors. On the other hand, if we were to migrate after deploying the
|
into errors. On the other hand, if we were to migrate after deploying the
|
||||||
application code we could run into the same problems.
|
application code we could run into the same problems.
|
||||||
|
|
||||||
|
|
|
@ -47,7 +47,7 @@ As a database reviewer, join the internal `#database` Slack channel and ask ques
|
||||||
database related issues with other database reviewers and maintainers.
|
database related issues with other database reviewers and maintainers.
|
||||||
|
|
||||||
There is also an optional database office hours call held bi-weekly, alternating between
|
There is also an optional database office hours call held bi-weekly, alternating between
|
||||||
European/US and APAC friendly hours. You can join the office hours call and bring topics
|
European/US and Asia-Pacific (APAC) friendly hours. You can join the office hours call and bring topics
|
||||||
that require a more in-depth discussion between the database reviewers and maintainers:
|
that require a more in-depth discussion between the database reviewers and maintainers:
|
||||||
|
|
||||||
- [Database Office Hours Agenda](https://docs.google.com/document/d/1wgfmVL30F8SdMg-9yY6Y8djPSxWNvKmhR5XmsvYX1EI/edit).
|
- [Database Office Hours Agenda](https://docs.google.com/document/d/1wgfmVL30F8SdMg-9yY6Y8djPSxWNvKmhR5XmsvYX1EI/edit).
|
||||||
|
|
|
@ -163,7 +163,7 @@ The technique can only optimize `IN` queries that satisfy the following requirem
|
||||||
(the combination of the columns uniquely identifies one particular column in the table).
|
(the combination of the columns uniquely identifies one particular column in the table).
|
||||||
|
|
||||||
WARNING:
|
WARNING:
|
||||||
This technique will not improve the performance of the `COUNT(*)` queries.
|
This technique does not improve the performance of the `COUNT(*)` queries.
|
||||||
|
|
||||||
## The `InOperatorOptimization` module
|
## The `InOperatorOptimization` module
|
||||||
|
|
||||||
|
@ -183,7 +183,7 @@ in `Gitlab::Pagination::Keyset::InOperatorOptimization`.
|
||||||
|
|
||||||
### Basic usage of `QueryBuilder`
|
### Basic usage of `QueryBuilder`
|
||||||
|
|
||||||
To illustrate a basic usage, we will build a query that
|
To illustrate a basic usage, we build a query that
|
||||||
fetches 20 issues with the oldest `created_at` from the group `gitlab-org`.
|
fetches 20 issues with the oldest `created_at` from the group `gitlab-org`.
|
||||||
|
|
||||||
The following ActiveRecord query would produce a query similar to
|
The following ActiveRecord query would produce a query similar to
|
||||||
|
@ -226,10 +226,10 @@ Gitlab::Pagination::Keyset::InOperatorOptimization::QueryBuilder.new(
|
||||||
the order by column expressions is available for locating the record. In this example, the
|
the order by column expressions is available for locating the record. In this example, the
|
||||||
yielded values are `created_at` and `id` SQL expressions. Finding a record is very fast via the
|
yielded values are `created_at` and `id` SQL expressions. Finding a record is very fast via the
|
||||||
primary key, so we don't use the `created_at` value. Providing the `finder_query` lambda is optional.
|
primary key, so we don't use the `created_at` value. Providing the `finder_query` lambda is optional.
|
||||||
If it's not given, the IN operator optimization will only make the ORDER BY columns available to
|
If it's not given, the `IN` operator optimization only makes the `ORDER BY` columns available to
|
||||||
the end-user and not the full database row.
|
the end-user and not the full database row.
|
||||||
|
|
||||||
If it's not given, the IN operator optimization will only make the ORDER BY columns available to
|
If it's not given, the `IN` operator optimization only makes the `ORDER BY` columns available to
|
||||||
the end-user and not the full database row.
|
the end-user and not the full database row.
|
||||||
|
|
||||||
The following database index on the `issues` table must be present
|
The following database index on the `issues` table must be present
|
||||||
|
@ -416,7 +416,7 @@ scope = Issue
|
||||||
.limit(20)
|
.limit(20)
|
||||||
```
|
```
|
||||||
|
|
||||||
To construct the array scope, we'll need to take the Cartesian product of the `project_id IN` and
|
To construct the array scope, we need to take the Cartesian product of the `project_id IN` and
|
||||||
the `issue_type IN` queries. `issue_type` is an ActiveRecord enum, so we need to
|
the `issue_type IN` queries. `issue_type` is an ActiveRecord enum, so we need to
|
||||||
construct the following table:
|
construct the following table:
|
||||||
|
|
||||||
|
@ -589,7 +589,7 @@ LIMIT 20
|
||||||
NOTE:
|
NOTE:
|
||||||
To make the query efficient, the following columns need to be covered with an index: `project_id`, `issue_type`, `created_at`, and `id`.
|
To make the query efficient, the following columns need to be covered with an index: `project_id`, `issue_type`, `created_at`, and `id`.
|
||||||
|
|
||||||
#### Using calculated ORDER BY expression
|
#### Using calculated `ORDER BY` expression
|
||||||
|
|
||||||
The following example orders epic records by the duration between the creation time and closed
|
The following example orders epic records by the duration between the creation time and closed
|
||||||
time. It is calculated with the following formula:
|
time. It is calculated with the following formula:
|
||||||
|
@ -766,7 +766,7 @@ using the generalized `IN` optimization technique.
|
||||||
|
|
||||||
### Array CTE
|
### Array CTE
|
||||||
|
|
||||||
As the first step, we use a common table expression (CTE) for collecting the `projects.id` values.
|
As the first step, we use a Common Table Expression (CTE) for collecting the `projects.id` values.
|
||||||
This is done by wrapping the incoming `array_scope` ActiveRecord relation parameter with a CTE.
|
This is done by wrapping the incoming `array_scope` ActiveRecord relation parameter with a CTE.
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
|
@ -792,7 +792,7 @@ This query produces the following result set with only one column (`projects.id`
|
||||||
### Array mapping
|
### Array mapping
|
||||||
|
|
||||||
For each project (that is, each record storing a project ID in `array_cte`),
|
For each project (that is, each record storing a project ID in `array_cte`),
|
||||||
we will fetch the cursor value identifying the first issue respecting the `ORDER BY` clause.
|
we fetch the cursor value identifying the first issue respecting the `ORDER BY` clause.
|
||||||
|
|
||||||
As an example, let's pick the first record `ID=9` from `array_cte`.
|
As an example, let's pick the first record `ID=9` from `array_cte`.
|
||||||
The following query should fetch the cursor value `(created_at, id)` identifying
|
The following query should fetch the cursor value `(created_at, id)` identifying
|
||||||
|
@ -805,7 +805,7 @@ ORDER BY "issues"."created_at" ASC, "issues"."id" ASC
|
||||||
LIMIT 1;
|
LIMIT 1;
|
||||||
```
|
```
|
||||||
|
|
||||||
We will use `LATERAL JOIN` to loop over the records in the `array_cte` and find the
|
We use `LATERAL JOIN` to loop over the records in the `array_cte` and find the
|
||||||
cursor value for each project. The query would be built using the `array_mapping_scope` lambda
|
cursor value for each project. The query would be built using the `array_mapping_scope` lambda
|
||||||
function.
|
function.
|
||||||
|
|
||||||
|
@ -854,11 +854,11 @@ The table shows the cursor values (`created_at, id`) of the first record for eac
|
||||||
respecting the `ORDER BY` clause.
|
respecting the `ORDER BY` clause.
|
||||||
|
|
||||||
At this point, we have the initial data. To start collecting the actual records from the database,
|
At this point, we have the initial data. To start collecting the actual records from the database,
|
||||||
we'll use a recursive CTE query where each recursion locates one row until
|
we use a recursive CTE query where each recursion locates one row until
|
||||||
the `LIMIT` is reached or no more data can be found.
|
the `LIMIT` is reached or no more data can be found.
|
||||||
|
|
||||||
Here's an outline of the steps we will take in the recursive CTE query
|
Here's an outline of the steps we take in the recursive CTE query
|
||||||
(expressing the steps in SQL is non-trivial but will be explained next):
|
(expressing the steps in SQL is non-trivial but is explained next):
|
||||||
|
|
||||||
1. Sort the initial resultset according to the `ORDER BY` clause.
|
1. Sort the initial resultset according to the `ORDER BY` clause.
|
||||||
1. Pick the top cursor to fetch the record, this is our first record. In the example,
|
1. Pick the top cursor to fetch the record, this is our first record. In the example,
|
||||||
|
@ -994,7 +994,7 @@ After this, the recursion starts again by finding the next lowest cursor value.
|
||||||
|
|
||||||
### Finalizing the query
|
### Finalizing the query
|
||||||
|
|
||||||
For producing the final `issues` rows, we're going to wrap the query with another `SELECT` statement:
|
For producing the final `issues` rows, we wrap the query with another `SELECT` statement:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT "issues".*
|
SELECT "issues".*
|
||||||
|
@ -1034,14 +1034,14 @@ The group and project queries are not using sorting, the necessary columns are r
|
||||||
indexes. These values are accessed frequently so it's very likely that most of the data will be
|
indexes. These values are accessed frequently so it's very likely that most of the data will be
|
||||||
in the PostgreSQL's buffer cache.
|
in the PostgreSQL's buffer cache.
|
||||||
|
|
||||||
The optimized `IN` query will read maximum 519 entries (cursor values) from the index:
|
The optimized `IN` query reads maximum 519 entries (cursor values) from the index:
|
||||||
|
|
||||||
- 500 index-only scans for populating the arrays for each project. The cursor values of the first
|
- 500 index-only scans for populating the arrays for each project. The cursor values of the first
|
||||||
record will be here.
|
record is here.
|
||||||
- Maximum 19 additional index-only scans for the consecutive records.
|
- Maximum 19 additional index-only scans for the consecutive records.
|
||||||
|
|
||||||
The optimized `IN` query will sort the array (cursor values per project array) 20 times, which
|
The optimized `IN` query sorts the array (cursor values per project array) 20 times, which
|
||||||
means we'll sort 20 x 500 rows. However, this might be a less memory-intensive task than
|
means we sort 20 x 500 rows. However, this might be a less memory-intensive task than
|
||||||
sorting 10 000 rows at once.
|
sorting 10 000 rows at once.
|
||||||
|
|
||||||
Performance comparison for the `gitlab-org` group:
|
Performance comparison for the `gitlab-org` group:
|
||||||
|
@ -1053,5 +1053,5 @@ Performance comparison for the `gitlab-org` group:
|
||||||
|
|
||||||
NOTE:
|
NOTE:
|
||||||
Before taking measurements, the group lookup query was executed separately in order to make
|
Before taking measurements, the group lookup query was executed separately in order to make
|
||||||
the group data available in the buffer cache. Since it's a frequently called query, it's going to
|
the group data available in the buffer cache. Since it's a frequently called query, it
|
||||||
hit many shared buffers during the query execution in the production environment.
|
hits many shared buffers during the query execution in the production environment.
|
||||||
|
|
|
@ -99,8 +99,8 @@ such records, so we would follow the same process either way.
|
||||||
We first add the `NOT NULL` constraint with a `NOT VALID` parameter, which enforces consistency
|
We first add the `NOT NULL` constraint with a `NOT VALID` parameter, which enforces consistency
|
||||||
when new records are inserted or current records are updated.
|
when new records are inserted or current records are updated.
|
||||||
|
|
||||||
In the example above, the existing epics with a `NULL` description will not be affected and you'll
|
In the example above, the existing epics with a `NULL` description are not affected and you are
|
||||||
still be able to update records in the `epics` table. However, when you try to update or insert
|
still able to update records in the `epics` table. However, when you try to update or insert
|
||||||
an epic without providing a description, the constraint causes a database error.
|
an epic without providing a description, the constraint causes a database error.
|
||||||
|
|
||||||
Adding or removing a `NOT NULL` clause requires that any application changes are deployed _first_.
|
Adding or removing a `NOT NULL` clause requires that any application changes are deployed _first_.
|
||||||
|
@ -129,7 +129,7 @@ end
|
||||||
#### Data migration to fix existing records (current release)
|
#### Data migration to fix existing records (current release)
|
||||||
|
|
||||||
The approach here depends on the data volume and the cleanup strategy. The number of records that
|
The approach here depends on the data volume and the cleanup strategy. The number of records that
|
||||||
must be fixed on GitLab.com is a nice indicator that will help us decide whether to use a
|
must be fixed on GitLab.com is a nice indicator that helps us decide whether to use a
|
||||||
post-deployment migration or a background data migration:
|
post-deployment migration or a background data migration:
|
||||||
|
|
||||||
- If the data volume is less than `1000` records, then the data migration can be executed within the post-migration.
|
- If the data volume is less than `1000` records, then the data migration can be executed within the post-migration.
|
||||||
|
@ -138,7 +138,7 @@ post-deployment migration or a background data migration:
|
||||||
When unsure about which option to use, please contact the Database team for advice.
|
When unsure about which option to use, please contact the Database team for advice.
|
||||||
|
|
||||||
Back to our example, the epics table is not considerably large nor frequently accessed,
|
Back to our example, the epics table is not considerably large nor frequently accessed,
|
||||||
so we are going to add a post-deployment migration for the 13.0 milestone (current),
|
so we add a post-deployment migration for the 13.0 milestone (current),
|
||||||
`db/post_migrate/20200501000002_cleanup_epics_with_null_description.rb`:
|
`db/post_migrate/20200501000002_cleanup_epics_with_null_description.rb`:
|
||||||
|
|
||||||
```ruby
|
```ruby
|
||||||
|
@ -173,7 +173,7 @@ end
|
||||||
|
|
||||||
#### Validate the `NOT NULL` constraint (next release)
|
#### Validate the `NOT NULL` constraint (next release)
|
||||||
|
|
||||||
Validating the `NOT NULL` constraint will scan the whole table and make sure that each record is correct.
|
Validating the `NOT NULL` constraint scans the whole table and make sure that each record is correct.
|
||||||
|
|
||||||
Still in our example, for the 13.1 milestone (next), we run the `validate_not_null_constraint`
|
Still in our example, for the 13.1 milestone (next), we run the `validate_not_null_constraint`
|
||||||
migration helper in a final post-deployment migration,
|
migration helper in a final post-deployment migration,
|
||||||
|
@ -196,11 +196,11 @@ end
|
||||||
## `NOT NULL` constraints on large tables
|
## `NOT NULL` constraints on large tables
|
||||||
|
|
||||||
If you have to clean up a nullable column for a [high-traffic table](../migration_style_guide.md#high-traffic-tables)
|
If you have to clean up a nullable column for a [high-traffic table](../migration_style_guide.md#high-traffic-tables)
|
||||||
(for example, the `artifacts` in `ci_builds`), your background migration will go on for a while and
|
(for example, the `artifacts` in `ci_builds`), your background migration goes on for a while and
|
||||||
it will need an additional [background migration cleaning up](background_migrations.md#cleaning-up)
|
it needs an additional [background migration cleaning up](background_migrations.md#cleaning-up)
|
||||||
in the release after adding the data migration.
|
in the release after adding the data migration.
|
||||||
|
|
||||||
In that rare case you will need 3 releases end-to-end:
|
In that rare case you need 3 releases end-to-end:
|
||||||
|
|
||||||
1. Release `N.M` - Add the `NOT NULL` constraint and the background-migration to fix the existing records.
|
1. Release `N.M` - Add the `NOT NULL` constraint and the background-migration to fix the existing records.
|
||||||
1. Release `N.M+1` - Cleanup the background migration.
|
1. Release `N.M+1` - Cleanup the background migration.
|
||||||
|
|
|
@ -36,19 +36,19 @@ before attempting to leverage this feature.
|
||||||
|
|
||||||
While partitioning can be very useful when properly applied, it's
|
While partitioning can be very useful when properly applied, it's
|
||||||
imperative to identify if the data and workload of a table naturally fit a
|
imperative to identify if the data and workload of a table naturally fit a
|
||||||
partitioning scheme. There are a few details you'll have to understand
|
partitioning scheme. There are a few details you have to understand
|
||||||
in order to decide if partitioning is a good fit for your particular
|
in order to decide if partitioning is a good fit for your particular
|
||||||
problem.
|
problem.
|
||||||
|
|
||||||
First, a table is partitioned on a partition key, which is a column or
|
First, a table is partitioned on a partition key, which is a column or
|
||||||
set of columns which determine how the data will be split across the
|
set of columns which determine how the data is split across the
|
||||||
partitions. The partition key is used by the database when reading or
|
partitions. The partition key is used by the database when reading or
|
||||||
writing data, to decide which partitions need to be accessed. The
|
writing data, to decide which partitions need to be accessed. The
|
||||||
partition key should be a column that would be included in a `WHERE`
|
partition key should be a column that would be included in a `WHERE`
|
||||||
clause on almost all queries accessing that table.
|
clause on almost all queries accessing that table.
|
||||||
|
|
||||||
Second, it's necessary to understand the strategy the database will
|
Second, it's necessary to understand the strategy the database uses
|
||||||
use to split the data across the partitions. The scheme supported by the
|
to split the data across the partitions. The scheme supported by the
|
||||||
GitLab migration helpers is date-range partitioning, where each partition
|
GitLab migration helpers is date-range partitioning, where each partition
|
||||||
in the table contains data for a single month. In this case, the partitioning
|
in the table contains data for a single month. In this case, the partitioning
|
||||||
key would need to be a timestamp or date column. In order for this type of
|
key would need to be a timestamp or date column. In order for this type of
|
||||||
|
@ -117,7 +117,7 @@ partition key falls in the specified range. For example, the partition
|
||||||
greater than or equal to `2020-01-01` and less than `2020-02-01`.
|
greater than or equal to `2020-01-01` and less than `2020-02-01`.
|
||||||
|
|
||||||
Now, if we look at the previous example query again, the database can
|
Now, if we look at the previous example query again, the database can
|
||||||
use the `WHERE` to recognize that all matching rows will be in the
|
use the `WHERE` to recognize that all matching rows are in the
|
||||||
`audit_events_202001` partition. Rather than searching all of the data
|
`audit_events_202001` partition. Rather than searching all of the data
|
||||||
in all of the partitions, it can search only the single month's worth
|
in all of the partitions, it can search only the single month's worth
|
||||||
of data in the appropriate partition. In a large table, this can
|
of data in the appropriate partition. In a large table, this can
|
||||||
|
@ -164,9 +164,9 @@ be updated to match.
|
||||||
### Step 1: Creating the partitioned copy (Release N)
|
### Step 1: Creating the partitioned copy (Release N)
|
||||||
|
|
||||||
The first step is to add a migration to create the partitioned copy of
|
The first step is to add a migration to create the partitioned copy of
|
||||||
the original table. This migration will also create the appropriate
|
the original table. This migration creates the appropriate
|
||||||
partitions based on the data in the original table, and install a
|
partitions based on the data in the original table, and install a
|
||||||
trigger that will sync writes from the original table into the
|
trigger that syncs writes from the original table into the
|
||||||
partitioned copy.
|
partitioned copy.
|
||||||
|
|
||||||
An example migration of partitioning the `audit_events` table by its
|
An example migration of partitioning the `audit_events` table by its
|
||||||
|
@ -186,15 +186,15 @@ class PartitionAuditEvents < Gitlab::Database::Migration[1.0]
|
||||||
end
|
end
|
||||||
```
|
```
|
||||||
|
|
||||||
Once this has executed, any inserts, updates or deletes in the
|
After this has executed, any inserts, updates, or deletes in the
|
||||||
original table will also be duplicated in the new table. For updates and
|
original table are also duplicated in the new table. For updates and
|
||||||
deletes, the operation will only have an effect if the corresponding row
|
deletes, the operation only has an effect if the corresponding row
|
||||||
exists in the partitioned table.
|
exists in the partitioned table.
|
||||||
|
|
||||||
### Step 2: Backfill the partitioned copy (Release N)
|
### Step 2: Backfill the partitioned copy (Release N)
|
||||||
|
|
||||||
The second step is to add a post-deployment migration that will schedule
|
The second step is to add a post-deployment migration that schedules
|
||||||
the background jobs that will backfill existing data from the original table
|
the background jobs that backfill existing data from the original table
|
||||||
into the partitioned copy.
|
into the partitioned copy.
|
||||||
|
|
||||||
Continuing the above example, the migration would look like:
|
Continuing the above example, the migration would look like:
|
||||||
|
@ -225,7 +225,7 @@ partitioning migration.
|
||||||
The third step must occur at least one release after the release that
|
The third step must occur at least one release after the release that
|
||||||
includes the background migration. This gives time for the background
|
includes the background migration. This gives time for the background
|
||||||
migration to execute properly in self-managed installations. In this step,
|
migration to execute properly in self-managed installations. In this step,
|
||||||
add another post-deployment migration that will cleanup after the
|
add another post-deployment migration that cleans up after the
|
||||||
background migration. This includes forcing any remaining jobs to
|
background migration. This includes forcing any remaining jobs to
|
||||||
execute, and copying data that may have been missed, due to dropped or
|
execute, and copying data that may have been missed, due to dropped or
|
||||||
failed jobs.
|
failed jobs.
|
||||||
|
@ -248,8 +248,7 @@ end
|
||||||
|
|
||||||
After this migration has completed, the original table and partitioned
|
After this migration has completed, the original table and partitioned
|
||||||
table should contain identical data. The trigger installed on the
|
table should contain identical data. The trigger installed on the
|
||||||
original table guarantees that the data will remain in sync going
|
original table guarantees that the data remains in sync going forward.
|
||||||
forward.
|
|
||||||
|
|
||||||
### Step 4: Swap the partitioned and non-partitioned tables (Release N+1)
|
### Step 4: Swap the partitioned and non-partitioned tables (Release N+1)
|
||||||
|
|
||||||
|
|
|
@ -9,7 +9,7 @@ info: To determine the technical writer assigned to the Stage/Group associated w
|
||||||
This document describes various guidelines to follow when writing SQL queries,
|
This document describes various guidelines to follow when writing SQL queries,
|
||||||
either using ActiveRecord/Arel or raw SQL queries.
|
either using ActiveRecord/Arel or raw SQL queries.
|
||||||
|
|
||||||
## Using LIKE Statements
|
## Using `LIKE` Statements
|
||||||
|
|
||||||
The most common way to search for data is using the `LIKE` statement. For
|
The most common way to search for data is using the `LIKE` statement. For
|
||||||
example, to get all issues with a title starting with "Draft:" you'd write the
|
example, to get all issues with a title starting with "Draft:" you'd write the
|
||||||
|
@ -56,10 +56,10 @@ FROM issues
|
||||||
WHERE (title ILIKE 'Draft:%' OR foo ILIKE 'Draft:%')
|
WHERE (title ILIKE 'Draft:%' OR foo ILIKE 'Draft:%')
|
||||||
```
|
```
|
||||||
|
|
||||||
## LIKE & Indexes
|
## `LIKE` & Indexes
|
||||||
|
|
||||||
PostgreSQL won't use any indexes when using `LIKE` / `ILIKE` with a wildcard at
|
PostgreSQL does not use any indexes when using `LIKE` / `ILIKE` with a wildcard at
|
||||||
the start. For example, this will not use any indexes:
|
the start. For example, this does not use any indexes:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT *
|
SELECT *
|
||||||
|
@ -145,7 +145,7 @@ The query:
|
||||||
Project.select("path, user_id").joins(:merge_requests) # SELECT path, user_id FROM "projects" ...
|
Project.select("path, user_id").joins(:merge_requests) # SELECT path, user_id FROM "projects" ...
|
||||||
```
|
```
|
||||||
|
|
||||||
Later on, a new feature adds an extra column to the `projects` table: `user_id`. During deployment there might be a short time window where the database migration is already executed, but the new version of the application code is not deployed yet. When the query mentioned above executes during this period, the query will fail with the following error message: `PG::AmbiguousColumn: ERROR: column reference "user_id" is ambiguous`
|
Later on, a new feature adds an extra column to the `projects` table: `user_id`. During deployment there might be a short time window where the database migration is already executed, but the new version of the application code is not deployed yet. When the query mentioned above executes during this period, the query fails with the following error message: `PG::AmbiguousColumn: ERROR: column reference "user_id" is ambiguous`
|
||||||
|
|
||||||
The problem is caused by the way the attributes are selected from the database. The `user_id` column is present in both the `users` and `merge_requests` tables. The query planner cannot decide which table to use when looking up the `user_id` column.
|
The problem is caused by the way the attributes are selected from the database. The `user_id` column is present in both the `users` and `merge_requests` tables. The query planner cannot decide which table to use when looking up the `user_id` column.
|
||||||
|
|
||||||
|
@ -210,7 +210,7 @@ Project.select(:path, :user_id).joins(:merge_requests)
|
||||||
# SELECT "projects"."path", "user_id" FROM "projects" ...
|
# SELECT "projects"."path", "user_id" FROM "projects" ...
|
||||||
```
|
```
|
||||||
|
|
||||||
When a column list is given, ActiveRecord tries to match the arguments against the columns defined in the `projects` table and prepend the table name automatically. In this case, the `id` column is not going to be a problem, but the `user_id` column could return unexpected data:
|
When a column list is given, ActiveRecord tries to match the arguments against the columns defined in the `projects` table and prepend the table name automatically. In this case, the `id` column is not a problem, but the `user_id` column could return unexpected data:
|
||||||
|
|
||||||
```ruby
|
```ruby
|
||||||
Project.select(:id, :user_id).joins(:merge_requests)
|
Project.select(:id, :user_id).joins(:merge_requests)
|
||||||
|
@ -225,7 +225,7 @@ Project.select(:id, :user_id).joins(:merge_requests)
|
||||||
## Plucking IDs
|
## Plucking IDs
|
||||||
|
|
||||||
Never use ActiveRecord's `pluck` to pluck a set of values into memory only to
|
Never use ActiveRecord's `pluck` to pluck a set of values into memory only to
|
||||||
use them as an argument for another query. For example, this will execute an
|
use them as an argument for another query. For example, this executes an
|
||||||
extra unnecessary database query and load a lot of unnecessary data into memory:
|
extra unnecessary database query and load a lot of unnecessary data into memory:
|
||||||
|
|
||||||
```ruby
|
```ruby
|
||||||
|
@ -314,10 +314,10 @@ union = Gitlab::SQL::Union.new([projects, more_projects, ...])
|
||||||
Project.from("(#{union.to_sql}) projects")
|
Project.from("(#{union.to_sql}) projects")
|
||||||
```
|
```
|
||||||
|
|
||||||
### Uneven columns in the UNION sub-queries
|
### Uneven columns in the `UNION` sub-queries
|
||||||
|
|
||||||
When the UNION query has uneven columns in the SELECT clauses, the database returns an error.
|
When the `UNION` query has uneven columns in the `SELECT` clauses, the database returns an error.
|
||||||
Consider the following UNION query:
|
Consider the following `UNION` query:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT id FROM users WHERE id = 1
|
SELECT id FROM users WHERE id = 1
|
||||||
|
@ -333,7 +333,7 @@ each UNION query must have the same number of columns
|
||||||
```
|
```
|
||||||
|
|
||||||
This problem is apparent and it can be easily fixed during development. One edge-case is when
|
This problem is apparent and it can be easily fixed during development. One edge-case is when
|
||||||
UNION queries are combined with explicit column listing where the list comes from the
|
`UNION` queries are combined with explicit column listing where the list comes from the
|
||||||
`ActiveRecord` schema cache.
|
`ActiveRecord` schema cache.
|
||||||
|
|
||||||
Example (bad, avoid it):
|
Example (bad, avoid it):
|
||||||
|
@ -387,17 +387,17 @@ User.connection.execute(Gitlab::SQL::Union.new([scope1, scope2]).to_sql)
|
||||||
|
|
||||||
When ordering records based on the time they were created, you can order
|
When ordering records based on the time they were created, you can order
|
||||||
by the `id` column instead of ordering by `created_at`. Because IDs are always
|
by the `id` column instead of ordering by `created_at`. Because IDs are always
|
||||||
unique and incremented in the order that rows are created, doing so will produce the
|
unique and incremented in the order that rows are created, doing so produces the
|
||||||
exact same results. This also means there's no need to add an index on
|
exact same results. This also means there's no need to add an index on
|
||||||
`created_at` to ensure consistent performance as `id` is already indexed by
|
`created_at` to ensure consistent performance as `id` is already indexed by
|
||||||
default.
|
default.
|
||||||
|
|
||||||
## Use WHERE EXISTS instead of WHERE IN
|
## Use `WHERE EXISTS` instead of `WHERE IN`
|
||||||
|
|
||||||
While `WHERE IN` and `WHERE EXISTS` can be used to produce the same data it is
|
While `WHERE IN` and `WHERE EXISTS` can be used to produce the same data it is
|
||||||
recommended to use `WHERE EXISTS` whenever possible. While in many cases
|
recommended to use `WHERE EXISTS` whenever possible. While in many cases
|
||||||
PostgreSQL can optimise `WHERE IN` quite well there are also many cases where
|
PostgreSQL can optimise `WHERE IN` quite well there are also many cases where
|
||||||
`WHERE EXISTS` will perform (much) better.
|
`WHERE EXISTS` performs (much) better.
|
||||||
|
|
||||||
In Rails you have to use this by creating SQL fragments:
|
In Rails you have to use this by creating SQL fragments:
|
||||||
|
|
||||||
|
@ -446,7 +446,7 @@ method. This method differs from our `.safe_find_or_create_by` methods
|
||||||
because it performs the `INSERT`, and then performs the `SELECT` commands only if that call
|
because it performs the `INSERT`, and then performs the `SELECT` commands only if that call
|
||||||
fails.
|
fails.
|
||||||
|
|
||||||
If the `INSERT` fails, it will leave a dead tuple around and
|
If the `INSERT` fails, it leaves a dead tuple around and
|
||||||
increment the primary key sequence (if any), among [other downsides](https://api.rubyonrails.org/classes/ActiveRecord/Relation.html#method-i-create_or_find_by).
|
increment the primary key sequence (if any), among [other downsides](https://api.rubyonrails.org/classes/ActiveRecord/Relation.html#method-i-create_or_find_by).
|
||||||
|
|
||||||
We prefer `.safe_find_or_create_by` if the common path is that we
|
We prefer `.safe_find_or_create_by` if the common path is that we
|
||||||
|
|
|
@ -13,16 +13,16 @@ module Security
|
||||||
private
|
private
|
||||||
|
|
||||||
def variables(params)
|
def variables(params)
|
||||||
collect_values(params, 'value')
|
collect_values(params, :value)
|
||||||
end
|
end
|
||||||
|
|
||||||
def default_sast_values(params)
|
def default_sast_values(params)
|
||||||
collect_values(params, 'defaultValue')
|
collect_values(params, :default_value)
|
||||||
end
|
end
|
||||||
|
|
||||||
def collect_values(config, key)
|
def collect_values(config, key)
|
||||||
global_variables = config['global']&.to_h { |k| [k['field'], k[key]] } || {}
|
global_variables = config[:global]&.to_h { |k| [k[:field], k[key]] } || {}
|
||||||
pipeline_variables = config['pipeline']&.to_h { |k| [k['field'], k[key]] } || {}
|
pipeline_variables = config[:pipeline]&.to_h { |k| [k[:field], k[key]] } || {}
|
||||||
|
|
||||||
analyzer_variables = collect_analyzer_values(config, key)
|
analyzer_variables = collect_analyzer_values(config, key)
|
||||||
|
|
||||||
|
@ -31,10 +31,10 @@ module Security
|
||||||
|
|
||||||
def collect_analyzer_values(config, key)
|
def collect_analyzer_values(config, key)
|
||||||
analyzer_variables = analyzer_variables_for(config, key)
|
analyzer_variables = analyzer_variables_for(config, key)
|
||||||
analyzer_variables['SAST_EXCLUDED_ANALYZERS'] = if key == 'value'
|
analyzer_variables['SAST_EXCLUDED_ANALYZERS'] = if key == :value
|
||||||
config['analyzers']
|
config[:analyzers]
|
||||||
&.reject {|a| a['enabled'] }
|
&.reject {|a| a[:enabled] }
|
||||||
&.collect {|a| a['name'] }
|
&.collect {|a| a[:name] }
|
||||||
&.sort
|
&.sort
|
||||||
&.join(', ')
|
&.join(', ')
|
||||||
else
|
else
|
||||||
|
@ -45,10 +45,10 @@ module Security
|
||||||
end
|
end
|
||||||
|
|
||||||
def analyzer_variables_for(config, key)
|
def analyzer_variables_for(config, key)
|
||||||
config['analyzers']
|
config[:analyzers]
|
||||||
&.select {|a| a['enabled'] && a['variables'] }
|
&.select {|a| a[:enabled] && a[:variables] }
|
||||||
&.flat_map {|a| a['variables'] }
|
&.flat_map {|a| a[:variables] }
|
||||||
&.collect {|v| [v['field'], v[key]] }.to_h
|
&.collect {|v| [v[:field], v[key]] }.to_h
|
||||||
end
|
end
|
||||||
|
|
||||||
def update_existing_content!
|
def update_existing_content!
|
||||||
|
|
|
@ -35,6 +35,7 @@ RSpec.describe 'Cluster agent registration', :js do
|
||||||
expect(page).to have_content('You cannot see this token again after you close this window.')
|
expect(page).to have_content('You cannot see this token again after you close this window.')
|
||||||
expect(page).to have_content('example-agent-token')
|
expect(page).to have_content('example-agent-token')
|
||||||
expect(page).to have_content('helm upgrade --install')
|
expect(page).to have_content('helm upgrade --install')
|
||||||
|
expect(page).to have_content('example-agent-2')
|
||||||
|
|
||||||
within find('.modal-footer') do
|
within find('.modal-footer') do
|
||||||
click_button('Close')
|
click_button('Close')
|
||||||
|
|
|
@ -11,6 +11,7 @@ import {
|
||||||
TOKEN_NAME_LIMIT,
|
TOKEN_NAME_LIMIT,
|
||||||
TOKEN_STATUS_ACTIVE,
|
TOKEN_STATUS_ACTIVE,
|
||||||
MAX_LIST_COUNT,
|
MAX_LIST_COUNT,
|
||||||
|
CREATE_TOKEN_MODAL,
|
||||||
} from '~/clusters/agents/constants';
|
} from '~/clusters/agents/constants';
|
||||||
import createNewAgentToken from '~/clusters/agents/graphql/mutations/create_new_agent_token.mutation.graphql';
|
import createNewAgentToken from '~/clusters/agents/graphql/mutations/create_new_agent_token.mutation.graphql';
|
||||||
import getClusterAgentQuery from '~/clusters/agents/graphql/queries/get_cluster_agent.query.graphql';
|
import getClusterAgentQuery from '~/clusters/agents/graphql/queries/get_cluster_agent.query.graphql';
|
||||||
|
@ -231,7 +232,11 @@ describe('CreateTokenButton', () => {
|
||||||
});
|
});
|
||||||
|
|
||||||
it('shows agent instructions', () => {
|
it('shows agent instructions', () => {
|
||||||
expect(findAgentInstructions().exists()).toBe(true);
|
expect(findAgentInstructions().props()).toMatchObject({
|
||||||
|
agentName,
|
||||||
|
agentToken: 'token-secret',
|
||||||
|
modalId: CREATE_TOKEN_MODAL,
|
||||||
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
it('renders a close button', () => {
|
it('renders a close button', () => {
|
||||||
|
|
|
@ -7,6 +7,7 @@ import CodeBlock from '~/vue_shared/components/code_block.vue';
|
||||||
import ModalCopyButton from '~/vue_shared/components/modal_copy_button.vue';
|
import ModalCopyButton from '~/vue_shared/components/modal_copy_button.vue';
|
||||||
|
|
||||||
const kasAddress = 'kas.example.com';
|
const kasAddress = 'kas.example.com';
|
||||||
|
const agentName = 'my-agent';
|
||||||
const agentToken = 'agent-token';
|
const agentToken = 'agent-token';
|
||||||
const kasVersion = '15.0.0';
|
const kasVersion = '15.0.0';
|
||||||
const modalId = INSTALL_AGENT_MODAL_ID;
|
const modalId = INSTALL_AGENT_MODAL_ID;
|
||||||
|
@ -26,6 +27,7 @@ describe('InstallAgentModal', () => {
|
||||||
};
|
};
|
||||||
|
|
||||||
const propsData = {
|
const propsData = {
|
||||||
|
agentName,
|
||||||
agentToken,
|
agentToken,
|
||||||
modalId,
|
modalId,
|
||||||
};
|
};
|
||||||
|
@ -61,7 +63,12 @@ describe('InstallAgentModal', () => {
|
||||||
it('renders a copy button', () => {
|
it('renders a copy button', () => {
|
||||||
expect(findCopyButton().props()).toMatchObject({
|
expect(findCopyButton().props()).toMatchObject({
|
||||||
title: 'Copy command',
|
title: 'Copy command',
|
||||||
text: generateAgentRegistrationCommand(agentToken, kasAddress, kasVersion),
|
text: generateAgentRegistrationCommand({
|
||||||
|
name: agentName,
|
||||||
|
token: agentToken,
|
||||||
|
version: kasVersion,
|
||||||
|
address: kasAddress,
|
||||||
|
}),
|
||||||
modalId,
|
modalId,
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -71,6 +78,7 @@ describe('InstallAgentModal', () => {
|
||||||
});
|
});
|
||||||
|
|
||||||
it('shows code block with agent installation command', () => {
|
it('shows code block with agent installation command', () => {
|
||||||
|
expect(findCodeBlock().props('code')).toContain(`helm upgrade --install ${agentName}`);
|
||||||
expect(findCodeBlock().props('code')).toContain(`--set config.token=${agentToken}`);
|
expect(findCodeBlock().props('code')).toContain(`--set config.token=${agentToken}`);
|
||||||
expect(findCodeBlock().props('code')).toContain(`--set config.kasAddress=${kasAddress}`);
|
expect(findCodeBlock().props('code')).toContain(`--set config.kasAddress=${kasAddress}`);
|
||||||
expect(findCodeBlock().props('code')).toContain(`--set image.tag=v${kasVersion}`);
|
expect(findCodeBlock().props('code')).toContain(`--set image.tag=v${kasVersion}`);
|
||||||
|
|
|
@ -15,6 +15,7 @@ import {
|
||||||
EVENT_ACTIONS_SELECT,
|
EVENT_ACTIONS_SELECT,
|
||||||
MODAL_TYPE_EMPTY,
|
MODAL_TYPE_EMPTY,
|
||||||
MODAL_TYPE_REGISTER,
|
MODAL_TYPE_REGISTER,
|
||||||
|
INSTALL_AGENT_MODAL_ID,
|
||||||
} from '~/clusters_list/constants';
|
} from '~/clusters_list/constants';
|
||||||
import getAgentsQuery from '~/clusters_list/graphql/queries/get_agents.query.graphql';
|
import getAgentsQuery from '~/clusters_list/graphql/queries/get_agents.query.graphql';
|
||||||
import getAgentConfigurations from '~/clusters_list/graphql/queries/agent_configurations.query.graphql';
|
import getAgentConfigurations from '~/clusters_list/graphql/queries/agent_configurations.query.graphql';
|
||||||
|
@ -222,7 +223,11 @@ describe('InstallAgentModal', () => {
|
||||||
});
|
});
|
||||||
|
|
||||||
it('shows agent instructions', () => {
|
it('shows agent instructions', () => {
|
||||||
expect(findAgentInstructions().exists()).toBe(true);
|
expect(findAgentInstructions().props()).toMatchObject({
|
||||||
|
agentName: 'agent-name',
|
||||||
|
agentToken: 'mock-agent-token',
|
||||||
|
modalId: INSTALL_AGENT_MODAL_ID,
|
||||||
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
describe('error creating agent', () => {
|
describe('error creating agent', () => {
|
||||||
|
|
|
@ -4,54 +4,54 @@ require 'spec_helper'
|
||||||
|
|
||||||
RSpec.describe Security::CiConfiguration::SastBuildAction do
|
RSpec.describe Security::CiConfiguration::SastBuildAction do
|
||||||
let(:default_sast_values) do
|
let(:default_sast_values) do
|
||||||
{ 'global' =>
|
{ global:
|
||||||
[
|
[
|
||||||
{ 'field' => 'SECURE_ANALYZERS_PREFIX', 'defaultValue' => 'registry.gitlab.com/security-products', 'value' => 'registry.gitlab.com/security-products' }
|
{ field: 'SECURE_ANALYZERS_PREFIX', default_value: 'registry.gitlab.com/security-products', value: 'registry.gitlab.com/security-products' }
|
||||||
],
|
],
|
||||||
'pipeline' =>
|
pipeline:
|
||||||
[
|
[
|
||||||
{ 'field' => 'stage', 'defaultValue' => 'test', 'value' => 'test' },
|
{ field: 'stage', default_value: 'test', value: 'test' },
|
||||||
{ 'field' => 'SEARCH_MAX_DEPTH', 'defaultValue' => 4, 'value' => 4 },
|
{ field: 'SEARCH_MAX_DEPTH', default_value: 4, value: 4 },
|
||||||
{ 'field' => 'SAST_EXCLUDED_PATHS', 'defaultValue' => 'spec, test, tests, tmp', 'value' => 'spec, test, tests, tmp' }
|
{ field: 'SAST_EXCLUDED_PATHS', default_value: 'spec, test, tests, tmp', value: 'spec, test, tests, tmp' }
|
||||||
] }
|
] }
|
||||||
end
|
end
|
||||||
|
|
||||||
let(:params) do
|
let(:params) do
|
||||||
{ 'global' =>
|
{ global:
|
||||||
[
|
[
|
||||||
{ 'field' => 'SECURE_ANALYZERS_PREFIX', 'defaultValue' => 'registry.gitlab.com/security-products', 'value' => 'new_registry' }
|
{ field: 'SECURE_ANALYZERS_PREFIX', default_value: 'registry.gitlab.com/security-products', value: 'new_registry' }
|
||||||
],
|
],
|
||||||
'pipeline' =>
|
pipeline:
|
||||||
[
|
[
|
||||||
{ 'field' => 'stage', 'defaultValue' => 'test', 'value' => 'security' },
|
{ field: 'stage', default_value: 'test', value: 'security' },
|
||||||
{ 'field' => 'SEARCH_MAX_DEPTH', 'defaultValue' => 4, 'value' => 1 },
|
{ field: 'SEARCH_MAX_DEPTH', default_value: 4, value: 1 },
|
||||||
{ 'field' => 'SAST_EXCLUDED_PATHS', 'defaultValue' => 'spec, test, tests, tmp', 'value' => 'spec,docs' }
|
{ field: 'SAST_EXCLUDED_PATHS', default_value: 'spec, test, tests, tmp', value: 'spec,docs' }
|
||||||
] }
|
] }
|
||||||
end
|
end
|
||||||
|
|
||||||
let(:params_with_analyzer_info) do
|
let(:params_with_analyzer_info) do
|
||||||
params.merge( { 'analyzers' =>
|
params.merge( { analyzers:
|
||||||
[
|
[
|
||||||
{
|
{
|
||||||
'name' => "bandit",
|
name: "bandit",
|
||||||
'enabled' => false
|
enabled: false
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
'name' => "brakeman",
|
name: "brakeman",
|
||||||
'enabled' => true,
|
enabled: true,
|
||||||
'variables' => [
|
variables: [
|
||||||
{ 'field' => "SAST_BRAKEMAN_LEVEL",
|
{ field: "SAST_BRAKEMAN_LEVEL",
|
||||||
'defaultValue' => "1",
|
default_value: "1",
|
||||||
'value' => "2" }
|
value: "2" }
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
'name' => "flawfinder",
|
name: "flawfinder",
|
||||||
'enabled' => true,
|
enabled: true,
|
||||||
'variables' => [
|
variables: [
|
||||||
{ 'field' => "SAST_FLAWFINDER_LEVEL",
|
{ field: "SAST_FLAWFINDER_LEVEL",
|
||||||
'defaultValue' => "1",
|
default_value: "1",
|
||||||
'value' => "1" }
|
value: "1" }
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
] }
|
] }
|
||||||
|
@ -59,15 +59,15 @@ RSpec.describe Security::CiConfiguration::SastBuildAction do
|
||||||
end
|
end
|
||||||
|
|
||||||
let(:params_with_all_analyzers_enabled) do
|
let(:params_with_all_analyzers_enabled) do
|
||||||
params.merge( { 'analyzers' =>
|
params.merge( { analyzers:
|
||||||
[
|
[
|
||||||
{
|
{
|
||||||
'name' => "flawfinder",
|
name: "flawfinder",
|
||||||
'enabled' => true
|
enabled: true
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
'name' => "brakeman",
|
name: "brakeman",
|
||||||
'enabled' => true
|
enabled: true
|
||||||
}
|
}
|
||||||
] }
|
] }
|
||||||
)
|
)
|
||||||
|
@ -162,15 +162,15 @@ RSpec.describe Security::CiConfiguration::SastBuildAction do
|
||||||
|
|
||||||
context 'with update stage and SEARCH_MAX_DEPTH and set SECURE_ANALYZERS_PREFIX to default' do
|
context 'with update stage and SEARCH_MAX_DEPTH and set SECURE_ANALYZERS_PREFIX to default' do
|
||||||
let(:params) do
|
let(:params) do
|
||||||
{ 'global' =>
|
{ global:
|
||||||
[
|
[
|
||||||
{ 'field' => 'SECURE_ANALYZERS_PREFIX', 'defaultValue' => 'registry.gitlab.com/security-products', 'value' => 'registry.gitlab.com/security-products' }
|
{ field: 'SECURE_ANALYZERS_PREFIX', default_value: 'registry.gitlab.com/security-products', value: 'registry.gitlab.com/security-products' }
|
||||||
],
|
],
|
||||||
'pipeline' =>
|
pipeline:
|
||||||
[
|
[
|
||||||
{ 'field' => 'stage', 'defaultValue' => 'test', 'value' => 'brand_new_stage' },
|
{ field: 'stage', default_value: 'test', value: 'brand_new_stage' },
|
||||||
{ 'field' => 'SEARCH_MAX_DEPTH', 'defaultValue' => 4, 'value' => 5 },
|
{ field: 'SEARCH_MAX_DEPTH', default_value: 4, value: 5 },
|
||||||
{ 'field' => 'SAST_EXCLUDED_PATHS', 'defaultValue' => 'spec, test, tests, tmp', 'value' => 'spec,docs' }
|
{ field: 'SAST_EXCLUDED_PATHS', default_value: 'spec, test, tests, tmp', value: 'spec,docs' }
|
||||||
] }
|
] }
|
||||||
end
|
end
|
||||||
|
|
||||||
|
@ -273,9 +273,9 @@ RSpec.describe Security::CiConfiguration::SastBuildAction do
|
||||||
|
|
||||||
context 'with one empty parameter' do
|
context 'with one empty parameter' do
|
||||||
let(:params) do
|
let(:params) do
|
||||||
{ 'global' =>
|
{ global:
|
||||||
[
|
[
|
||||||
{ 'field' => 'SECURE_ANALYZERS_PREFIX', 'defaultValue' => 'registry.gitlab.com/security-products', 'value' => '' }
|
{ field: 'SECURE_ANALYZERS_PREFIX', default_value: 'registry.gitlab.com/security-products', value: '' }
|
||||||
] }
|
] }
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|
BIN
vendor/project_templates/cluster_management.tar.gz
vendored
BIN
vendor/project_templates/cluster_management.tar.gz
vendored
Binary file not shown.
Binary file not shown.
BIN
vendor/project_templates/middleman.tar.gz
vendored
BIN
vendor/project_templates/middleman.tar.gz
vendored
Binary file not shown.
Binary file not shown.
Loading…
Reference in a new issue