Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2020-09-15 03:09:24 +00:00
parent 09bd62c59d
commit 5fe82ba3d3
45 changed files with 787 additions and 138 deletions

View File

@ -12,6 +12,7 @@ module AlertManagement
include Gitlab::SQL::Pattern
include Presentable
include Gitlab::Utils::StrongMemoize
include Referable
STATUSES = {
triggered: 0,
@ -170,6 +171,25 @@ module AlertManagement
with_prometheus_alert.where(id: ids)
end
def self.reference_prefix
'^alert#'
end
def self.reference_pattern
@reference_pattern ||= %r{
(#{Project.reference_pattern})?
#{Regexp.escape(reference_prefix)}(?<alert>\d+)
}x
end
def self.link_reference_pattern
@link_reference_pattern ||= super("alert_management", /(?<alert>\d+)\/details(\#)?/)
end
def self.reference_valid?(reference)
reference.to_i > 0 && reference.to_i <= Gitlab::Database::MAX_INT_VALUE
end
def prometheus?
monitoring_tool == Gitlab::AlertManagement::AlertParams::MONITORING_TOOLS[:prometheus]
end
@ -178,10 +198,10 @@ module AlertManagement
increment!(:events)
end
# required for todos (typically contains an identifier like issue iid)
# no-op; we could use iid, but we don't have a reference prefix
def to_reference(_from = nil, full: false)
''
def to_reference(from = nil, full: false)
reference = "#{self.class.reference_prefix}#{iid}"
"#{project.to_reference_base(from, full: full)}#{reference}"
end
def execute_services

View File

@ -0,0 +1,12 @@
# frozen_string_literal: true
class AuthenticationEvent < ApplicationRecord
belongs_to :user, optional: true
validates :provider, :user_name, :result, presence: true
enum result: {
failed: 0,
success: 1
}
end

View File

@ -41,7 +41,7 @@ module Packages
}
end
::Gitlab::Database.bulk_insert(::Packages::Nuget::DependencyLinkMetadatum.table_name, rows.compact)
::Gitlab::Database.bulk_insert(::Packages::Nuget::DependencyLinkMetadatum.table_name, rows.compact) # rubocop:disable Gitlab/BulkInsert
end
def raw_dependency_for(dependency)

View File

@ -15,7 +15,7 @@ module Packages
tags_to_create = @tags - existing_tags
@package.tags.with_name(tags_to_destroy).delete_all if tags_to_destroy.any?
::Gitlab::Database.bulk_insert(Packages::Tag.table_name, rows(tags_to_create)) if tags_to_create.any?
::Gitlab::Database.bulk_insert(Packages::Tag.table_name, rows(tags_to_create)) if tags_to_create.any? # rubocop:disable Gitlab/BulkInsert
end
private

View File

@ -0,0 +1,5 @@
---
title: Add issues and merge_requests filtering by state for search API
merge_request: 41989
author:
type: added

View File

@ -0,0 +1,5 @@
---
title: Add AuthenticationEvent to store sign-in events
merge_request: 39652
author:
type: added

View File

@ -0,0 +1,5 @@
---
title: Add GFM reference format for alerts
merge_request: 40922
author:
type: added

View File

@ -0,0 +1,33 @@
# frozen_string_literal: true
class CreateAuthenticationEvents < ActiveRecord::Migration[6.0]
include Gitlab::Database::MigrationHelpers
DOWNTIME = false
disable_ddl_transaction!
def up
unless table_exists?(:authentication_events)
with_lock_retries do
create_table :authentication_events do |t|
t.datetime_with_timezone :created_at, null: false
t.references :user, foreign_key: { on_delete: :nullify }, index: true
t.integer :result, limit: 2, null: false
t.inet :ip_address
t.text :provider, null: false, index: true
t.text :user_name, null: false
end
end
end
add_text_limit :authentication_events, :provider, 64
add_text_limit :authentication_events, :user_name, 255
end
def down
with_lock_retries do
drop_table :authentication_events
end
end
end

View File

@ -0,0 +1 @@
5642f7d91bbbd20d1e3a964b6a06a4da14474db58f47e3ee0ce3273f7cd7a9e8

View File

@ -9539,6 +9539,27 @@ CREATE SEQUENCE public.audit_events_id_seq
ALTER SEQUENCE public.audit_events_id_seq OWNED BY public.audit_events.id;
CREATE TABLE public.authentication_events (
id bigint NOT NULL,
created_at timestamp with time zone NOT NULL,
user_id bigint,
result smallint NOT NULL,
ip_address inet,
provider text NOT NULL,
user_name text NOT NULL,
CONSTRAINT check_45a6cc4e80 CHECK ((char_length(user_name) <= 255)),
CONSTRAINT check_c64f424630 CHECK ((char_length(provider) <= 64))
);
CREATE SEQUENCE public.authentication_events_id_seq
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1;
ALTER SEQUENCE public.authentication_events_id_seq OWNED BY public.authentication_events.id;
CREATE TABLE public.award_emoji (
id integer NOT NULL,
name character varying,
@ -16965,6 +16986,8 @@ ALTER TABLE ONLY public.atlassian_identities ALTER COLUMN user_id SET DEFAULT ne
ALTER TABLE ONLY public.audit_events ALTER COLUMN id SET DEFAULT nextval('public.audit_events_id_seq'::regclass);
ALTER TABLE ONLY public.authentication_events ALTER COLUMN id SET DEFAULT nextval('public.authentication_events_id_seq'::regclass);
ALTER TABLE ONLY public.award_emoji ALTER COLUMN id SET DEFAULT nextval('public.award_emoji_id_seq'::regclass);
ALTER TABLE ONLY public.background_migration_jobs ALTER COLUMN id SET DEFAULT nextval('public.background_migration_jobs_id_seq'::regclass);
@ -17896,6 +17919,9 @@ ALTER TABLE ONLY public.audit_events_part_5fc467ac26
ALTER TABLE ONLY public.audit_events
ADD CONSTRAINT audit_events_pkey PRIMARY KEY (id);
ALTER TABLE ONLY public.authentication_events
ADD CONSTRAINT authentication_events_pkey PRIMARY KEY (id);
ALTER TABLE ONLY public.award_emoji
ADD CONSTRAINT award_emoji_pkey PRIMARY KEY (id);
@ -19333,6 +19359,10 @@ CREATE INDEX index_approvers_on_user_id ON public.approvers USING btree (user_id
CREATE UNIQUE INDEX index_atlassian_identities_on_extern_uid ON public.atlassian_identities USING btree (extern_uid);
CREATE INDEX index_authentication_events_on_provider ON public.authentication_events USING btree (provider);
CREATE INDEX index_authentication_events_on_user_id ON public.authentication_events USING btree (user_id);
CREATE INDEX index_award_emoji_on_awardable_type_and_awardable_id ON public.award_emoji USING btree (awardable_type, awardable_id);
CREATE INDEX index_award_emoji_on_user_id_and_name ON public.award_emoji USING btree (user_id, name);
@ -23162,6 +23192,9 @@ ALTER TABLE ONLY public.webauthn_registrations
ALTER TABLE ONLY public.packages_build_infos
ADD CONSTRAINT fk_rails_b18868292d FOREIGN KEY (package_id) REFERENCES public.packages_packages(id) ON DELETE CASCADE;
ALTER TABLE ONLY public.authentication_events
ADD CONSTRAINT fk_rails_b204656a54 FOREIGN KEY (user_id) REFERENCES public.users(id) ON DELETE SET NULL;
ALTER TABLE ONLY public.merge_trains
ADD CONSTRAINT fk_rails_b29261ce31 FOREIGN KEY (user_id) REFERENCES public.users(id) ON DELETE CASCADE;

View File

@ -1075,7 +1075,7 @@ recovery efforts by preventing writes that may conflict with the unreplicated wr
To enable writes again, an administrator can:
1. [Check](#check-for-data-loss) for data loss.
1. Attempt to [recover](#recover-missing-data) missing data.
1. Attempt to [recover](#data-recovery) missing data.
1. Either [enable writes](#enable-writes-or-accept-data-loss) in the virtual storage or
[accept data loss](#enable-writes-or-accept-data-loss) if necessary, depending on the version of
GitLab.
@ -1169,17 +1169,6 @@ Virtual storage: default
To check a project's repository checksums across on all Gitaly nodes, run the
[replicas Rake task](../raketasks/praefect.md#replica-checksums) on the main GitLab node.
### Recover missing data
The Praefect `reconcile` sub-command can be used to recover unreplicated changes from another replica.
The source must be on a later version than the target storage.
```shell
sudo /opt/gitlab/embedded/bin/praefect -config /var/opt/gitlab/praefect/config.toml reconcile -virtual <virtual-storage> -reference <up-to-date-storage> -target <outdated-storage> -f
```
Refer to [Gitaly node recovery](#gitaly-node-recovery) section for more details on the `reconcile` sub-command.
### Enable writes or accept data loss
Praefect provides the following subcommands to re-enable writes:
@ -1203,31 +1192,53 @@ Praefect provides the following subcommands to re-enable writes:
CAUTION: **Caution:**
`accept-dataloss` causes permanent data loss by overwriting other versions of the repository. Data
[recovery efforts](#recover-missing-data) must be performed before using it.
[recovery efforts](#data-recovery) must be performed before using it.
## Gitaly node recovery
## Data recovery
When a secondary Gitaly node fails and is no longer able to replicate changes, it starts
to drift from the primary Gitaly node. If the failed Gitaly node eventually recovers,
it needs to be reconciled with the primary Gitaly node. The primary Gitaly node is considered
the single source of truth for the state of a shard.
If a Gitaly node fails replication jobs for any reason, it ends up hosting outdated versions of
the affected repositories. Praefect provides tools for automatically or manually reconciling
the outdated repositories in order to bring them fully up to date again.
The Praefect `reconcile` sub-command allows for the manual reconciliation between a secondary
Gitaly node and the current primary Gitaly node.
### Automatic reconciliation
Run the following command on the Praefect server after all placeholders
(`<virtual-storage>` and `<target-storage>`) have been replaced:
> [Introduced](https://gitlab.com/gitlab-org/gitaly/-/issues/2717) in GitLab 13.4.
Praefect automatically reconciles repositories that are not up to date. By default, this is done every
five minutes. For each outdated repository on a healthy Gitaly node, the Praefect picks a
random, fully up to date replica of the repository on another healthy Gitaly node to replicate from. A
replication job is scheduled only if there are no other replication jobs pending for the target
repository.
The reconciliation frequency can be changed via the configuration. The value can be any valid
[Go duration value](https://golang.org/pkg/time/#ParseDuration). Values below 0 disable the feature.
Examples:
```ruby
praefect['reconciliation_scheduling_interval'] = '5m' # the default value
```
```ruby
praefect['reconciliation_scheduling_interval'] = '30s' # reconcile every 30 seconds
```
```ruby
praefect['reconciliation_scheduling_interval'] = '0' # disable the feature
```
### Manual reconciliation
The Praefect `reconcile` sub-command allows for the manual reconciliation between two Gitaly nodes. The
command replicates every repository on a later version on the reference storage to the target storage.
```shell
sudo /opt/gitlab/embedded/bin/praefect -config /var/opt/gitlab/praefect/config.toml reconcile -virtual <virtual-storage> -target <target-storage>
sudo /opt/gitlab/embedded/bin/praefect -config /var/opt/gitlab/praefect/config.toml reconcile -virtual <virtual-storage> -reference <up-to-date-storage> -target <outdated-storage> -f
```
- Replace the placeholder `<virtual-storage>` with the virtual storage containing the Gitaly node storage to be checked.
- Replace the placeholder `<target-storage>` with the Gitaly storage name.
The command will return a list of repositories that were found to be
inconsistent against the current primary. Each of these inconsistencies will
also be logged with an accompanying replication job ID.
- Replace the placeholder `<up-to-date-storage>` with the Gitaly storage name containing up to date repositories.
- Replace the placeholder `<outdated-storage>` with the Gitaly storage name containing outdated repositories.
## Migrate existing repositories to Gitaly Cluster

View File

@ -397,6 +397,7 @@ GET /groups/:id/search
| `id` | integer/string | yes | The ID or [URL-encoded path of the group](README.md#namespaced-path-encoding) owned by the authenticated user |
| `scope` | string | yes | The scope to search in |
| `search` | string | yes | The search query |
| `state` | string | no | Filtering by state, currently only supported for issues and merge requests. It is ignored for other scopes |
Search the expression within the specified scope. Currently these scopes are supported: projects, issues, merge_requests, milestones, users.
@ -741,6 +742,7 @@ GET /projects/:id/search
| `scope` | string | yes | The scope to search in |
| `search` | string | yes | The search query |
| `ref` | string | no | The name of a repository branch or tag to search on. The project's default branch is used by default. This is only applicable for scopes: commits, blobs, and wiki_blobs. |
| `state` | string | no | Filtering by state, currently only supported for issues and merge requests. It is ignored for other scopes |
Search the expression within the specified scope. Currently these scopes are supported: issues, merge_requests, milestones, notes, wiki_blobs, commits, blobs, users.

View File

@ -60,7 +60,7 @@ Caches:
- Are disabled if not defined globally or per job (using `cache:`).
- Are available for all jobs in your `.gitlab-ci.yml` if enabled globally.
- Can be used in subsequent pipelines by the same job in which the cache was created (if not defined globally).
- Are stored where the Runner is installed **and** uploaded to S3 if [distributed cache is enabled](https://docs.gitlab.com/runner/configuration/autoscale.html#distributed-runners-caching).
- Are stored where GitLab Runner is installed **and** uploaded to S3 if [distributed cache is enabled](https://docs.gitlab.com/runner/configuration/autoscale.html#distributed-runners-caching).
- If defined per job, are used:
- By the same job in a subsequent pipeline.
- By subsequent jobs in the same pipeline, if they have identical dependencies.
@ -80,33 +80,33 @@ can't link to files outside it.
## Good caching practices
We have the cache from the perspective of the developers (who consume a cache
within the job) and the cache from the perspective of the Runner. Depending on
which type of Runner you are using, cache can act differently.
within the job) and the cache from the perspective of the runner. Depending on
which type of runner you are using, cache can act differently.
From the perspective of the developer, to ensure maximum availability of the
cache, when declaring `cache` in your jobs, use one or a mix of the following:
- [Tag your Runners](../runners/README.md#use-tags-to-limit-the-number-of-jobs-using-the-runner) and use the tag on jobs
- [Tag your runners](../runners/README.md#use-tags-to-limit-the-number-of-jobs-using-the-runner) and use the tag on jobs
that share their cache.
- [Use sticky Runners](../runners/README.md#prevent-a-specific-runner-from-being-enabled-for-other-projects)
- [Use sticky runners](../runners/README.md#prevent-a-specific-runner-from-being-enabled-for-other-projects)
that will be only available to a particular project.
- [Use a `key`](../yaml/README.md#cachekey) that fits your workflow (for example,
different caches on each branch). For that, you can take advantage of the
[CI/CD predefined variables](../variables/README.md#predefined-environment-variables).
TIP: **Tip:**
Using the same Runner for your pipeline, is the most simple and efficient way to
Using the same runner for your pipeline, is the most simple and efficient way to
cache files in one stage or pipeline, and pass this cache to subsequent stages
or pipelines in a guaranteed manner.
From the perspective of the Runner, in order for cache to work effectively, one
From the perspective of the runner, in order for cache to work effectively, one
of the following must be true:
- Use a single Runner for all your jobs.
- Use multiple Runners (in autoscale mode or not) that use
- Use a single runner for all your jobs.
- Use multiple runners (in autoscale mode or not) that use
[distributed caching](https://docs.gitlab.com/runner/configuration/autoscale.html#distributed-runners-caching),
where the cache is stored in S3 buckets (like shared Runners on GitLab.com).
- Use multiple Runners (not in autoscale mode) of the same architecture that
where the cache is stored in S3 buckets (like shared runners on GitLab.com).
- Use multiple runners (not in autoscale mode) of the same architecture that
share a common network-mounted directory (using NFS or something similar)
where the cache will be stored.
@ -364,27 +364,27 @@ be prepared to regenerate any cached files in each job that needs them.
Assuming you have properly [defined `cache` in `.gitlab-ci.yml`](../yaml/README.md#cache)
according to your workflow, the availability of the cache ultimately depends on
how the Runner has been configured (the executor type and whether different
Runners are used for passing the cache between jobs).
how the runner has been configured (the executor type and whether different
runners are used for passing the cache between jobs).
### Where the caches are stored
Since the Runner is the one responsible for storing the cache, it's essential
Since the runner is the one responsible for storing the cache, it's essential
to know **where** it's stored. All the cache paths defined under a job in
`.gitlab-ci.yml` are archived in a single `cache.zip` file and stored in the
Runner's configured cache location. By default, they are stored locally in the
machine where the Runner is installed and depends on the type of the executor.
runner's configured cache location. By default, they are stored locally in the
machine where the runner is installed and depends on the type of the executor.
| GitLab Runner executor | Default path of the cache |
| ---------------------- | ------------------------- |
| [Shell](https://docs.gitlab.com/runner/executors/shell.html) | Locally, stored under the `gitlab-runner` user's home directory: `/home/gitlab-runner/cache/<user>/<project>/<cache-key>/cache.zip`. |
| [Docker](https://docs.gitlab.com/runner/executors/docker.html) | Locally, stored under [Docker volumes](https://docs.gitlab.com/runner/executors/docker.html#the-builds-and-cache-storage): `/var/lib/docker/volumes/<volume-id>/_data/<user>/<project>/<cache-key>/cache.zip`. |
| [Docker machine](https://docs.gitlab.com/runner/executors/docker_machine.html) (autoscale Runners) | Behaves the same as the Docker executor. |
| [Docker machine](https://docs.gitlab.com/runner/executors/docker_machine.html) (autoscale runners) | Behaves the same as the Docker executor. |
### How archiving and extracting works
In the most simple scenario, consider that you use only one machine where the
Runner is installed, and all jobs of your project run on the same host.
runner is installed, and all jobs of your project run on the same host.
Let's see the following example of two jobs that belong to two consecutive
stages:
@ -426,17 +426,17 @@ Here's what happens behind the scenes:
1. `after_script` is executed.
1. `cache` runs and the `vendor/` directory is zipped into `cache.zip`.
This file is then saved in the directory based on the
[Runner's setting](#where-the-caches-are-stored) and the `cache: key`.
[runner's setting](#where-the-caches-are-stored) and the `cache: key`.
1. `job B` runs.
1. The cache is extracted (if found).
1. `before_script` is executed.
1. `script` is executed.
1. Pipeline finishes.
By using a single Runner on a single machine, you'll not have the issue where
`job B` might execute on a Runner different from `job A`, thus guaranteeing the
By using a single runner on a single machine, you'll not have the issue where
`job B` might execute on a runner different from `job A`, thus guaranteeing the
cache between stages. That will only work if the build goes from stage `build`
to `test` in the same Runner/machine, otherwise, you [might not have the cache
to `test` in the same runner/machine, otherwise, you [might not have the cache
available](#cache-mismatch).
During the caching process, there's also a couple of things to consider:
@ -448,13 +448,13 @@ During the caching process, there's also a couple of things to consider:
their cache.
- When extracting the cache from `cache.zip`, everything in the zip file is
extracted in the job's working directory (usually the repository which is
pulled down), and the Runner doesn't mind if the archive of `job A` overwrites
pulled down), and the runner doesn't mind if the archive of `job A` overwrites
things in the archive of `job B`.
The reason why it works this way is because the cache created for one Runner
The reason why it works this way is because the cache created for one runner
often will not be valid when used by a different one which can run on a
**different architecture** (e.g., when the cache includes binary files). And
since the different steps might be executed by Runners running on different
since the different steps might be executed by runners running on different
machines, it is a safe default.
### Cache mismatch
@ -464,17 +464,17 @@ mismatch and a few ideas how to fix it.
| Reason of a cache mismatch | How to fix it |
| -------------------------- | ------------- |
| You use multiple standalone Runners (not in autoscale mode) attached to one project without a shared cache | Use only one Runner for your project or use multiple Runners with distributed cache enabled |
| You use Runners in autoscale mode without a distributed cache enabled | Configure the autoscale Runner to use a distributed cache |
| The machine the Runner is installed on is low on disk space or, if you've set up distributed cache, the S3 bucket where the cache is stored doesn't have enough space | Make sure you clear some space to allow new caches to be stored. Currently, there's no automatic way to do this. |
| You use multiple standalone runners (not in autoscale mode) attached to one project without a shared cache | Use only one runner for your project or use multiple runners with distributed cache enabled |
| You use runners in autoscale mode without a distributed cache enabled | Configure the autoscale runner to use a distributed cache |
| The machine the runner is installed on is low on disk space or, if you've set up distributed cache, the S3 bucket where the cache is stored doesn't have enough space | Make sure you clear some space to allow new caches to be stored. Currently, there's no automatic way to do this. |
| You use the same `key` for jobs where they cache different paths. | Use different cache keys to that the cache archive is stored to a different location and doesn't overwrite wrong caches. |
Let's explore some examples.
#### Examples
Let's assume you have only one Runner assigned to your project, so the cache
will be stored in the Runner's machine by default. If two jobs, A and B,
Let's assume you have only one runner assigned to your project, so the cache
will be stored in the runner's machine by default. If two jobs, A and B,
have the same cache key, but they cache different paths, cache B would overwrite
cache A, even if their `paths` don't match:
@ -513,7 +513,7 @@ job B:
To fix that, use different `keys` for each job.
In another case, let's assume you have more than one Runners assigned to your
In another case, let's assume you have more than one runner assigned to your
project, but the distributed cache is not enabled. The second time the
pipeline is run, we want `job A` and `job B` to re-use their cache (which in this case
will be different):
@ -542,11 +542,11 @@ job B:
In that case, even if the `key` is different (no fear of overwriting), you
might experience that the cached files "get cleaned" before each stage if the
jobs run on different Runners in the subsequent pipelines.
jobs run on different runners in the subsequent pipelines.
## Clearing the cache
GitLab Runners use [cache](../yaml/README.md#cache) to speed up the execution
Runners use [cache](../yaml/README.md#cache) to speed up the execution
of your jobs by reusing existing data. This however, can sometimes lead to an
inconsistent behavior.
@ -565,9 +565,9 @@ If you want to avoid editing `.gitlab-ci.yml`, you can easily clear the cache
via GitLab's UI:
1. Navigate to your project's **CI/CD > Pipelines** page.
1. Click on the **Clear Runner caches** button to clean up the cache.
1. Click on the **Clear runner caches** button to clean up the cache.
![Clear Runners cache](img/clear_runners_cache.png)
![Clear runner caches](img/clear_runners_cache.png)
1. On the next push, your CI/CD job will use a new cache.

View File

@ -81,6 +81,8 @@ merge request widget.
To make the Unit test report output files browsable, include them with the
[`artifacts:paths`](yaml/README.md#artifactspaths) keyword as well, as shown in the [Ruby example](#ruby-example).
To upload the report even if the job fails (for example if the tests do not pass), use the [`artifacts:when:always`](yaml/README.md#artifactswhen)
keyword.
NOTE: **Note:**
You cannot have multiple tests with the same name and class in your JUnit report format XML file.

View File

@ -377,6 +377,56 @@ Recommendations:
- Use a [feature flag](../../operations/feature_flags.md) to have a control over the impact when
adding new metrics.
##### Known events in usage data payload
All events added in [`known_events.yml`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/usage_data_counters/known_events.yml) are automatically added to usage data generation under the `redis_hll_counters` key. This column is stored in [version-app as a JSON](https://gitlab.com/gitlab-services/version-gitlab-com/-/blob/master/db/schema.rb#L209).
For each event we add metrics for the weekly and monthly time frames, and totals for each where applicable:
- `#{event_name}_weekly` data for 7 days for daily [aggregation](#adding-new-events) events and data for last complete week for weekly [aggregation](#adding-new-events) events.
- `#{event_name}_monthly` data for 28 days for daily [aggregation](#adding-new-events) events and data for last 4 complete weeks for weekly [aggregation](#adding-new-events) events.
- `#{category}_total_unique_counts_weekly` total unique counts for events in same category for last 7 days or last complete week, if events are in the same Redis slot and if we have more than one metric.
- `#{event_name}_weekly` - Data for 7 days for daily [aggregation](#adding-new-events) events and data for the last complete week for weekly [aggregation](#adding-new-events) events.
- `#{event_name}_monthly` - Data for 28 days for daily [aggregation](#adding-new-events) events and data for the last 4 complete weeks for weekly [aggregation](#adding-new-events) events.
- `#{category}_total_unique_counts_weekly` - Total unique counts for events in the same category for the last 7 days or the last complete week, if events are in the same Redis slot and we have more than one metric.
- `#{event_name}_weekly`: Data for 7 days for daily [aggregation](#adding-new-events) events and data for last complete week for weekly [aggregation](#adding-new-events) events.
- `#{event_name}_monthly`: Data for 28 days for daily [aggregation](#adding-new-events) events and data for last 4 complete weeks for weekly [aggregation](#adding-new-events) events.
- `#{category}_total_unique_counts_weekly` total unique counts for events in same category for last 7 days or last complete week, if events are in the same Redis slot and if we have more than one metric.
- `#{event_name}_weekly`: Data for 7 days for daily [aggregation](#adding-new-events) events and data for the last complete week for weekly [aggregation](#adding-new-events) events.
- `#{event_name}_monthly`: Data for 28 days for daily [aggregation](#adding-new-events) events and data for the last 4 complete weeks for weekly [aggregation](#adding-new-events) events.
- `#{category}_total_unique_counts_weekly`: Total unique counts for events in the same category for the last 7 days or the last complete week, if events are in the same Redis slot and we have more than one metric.
- `#{category}_total_unique_counts_monthly`: Total unique counts for events in same category for the last 28 days or the last 4 complete weeks, if events are in the same Redis slot and we have more than one metric.
Example of `redis_hll_counters` data:
```ruby
{:redis_hll_counters=>
{"compliance"=>
{"g_compliance_dashboard_weekly"=>0,
"g_compliance_dashboard_monthly"=>0,
"g_compliance_audit_events_weekly"=>0,
"g_compliance_audit_events_monthly"=>0,
"compliance_total_unique_counts_weekly"=>0,
"compliance_total_unique_counts_monthly"=>0},
"analytics"=>
{"g_analytics_contribution_weekly"=>0,
"g_analytics_contribution_monthly"=>0,
"g_analytics_insights_weekly"=>0,
"g_analytics_insights_monthly"=>0,
"analytics_total_unique_counts_weekly"=>0,
"analytics_total_unique_counts_monthly"=>0},
"ide_edit"=>
{"g_edit_by_web_ide_weekly"=>0,
"g_edit_by_web_ide_monthly"=>0,
"g_edit_by_sfe_weekly"=>0,
"g_edit_by_sfe_monthly"=>0,
"ide_edit_total_unique_counts_weekly"=>0,
"ide_edit_total_unique_counts_monthly"=>0},
"search"=>
{"i_search_total_weekly"=>0, "i_search_total_monthly"=>0, "i_search_advanced_weekly"=>0, "i_search_advanced_monthly"=>0, "i_search_paid_weekly"=>0, "i_search_paid_monthly"=>0, "search_total_unique_counts_weekly"=>0, "search_total_unique_counts_monthly"=>0},
"source_code"=>{"wiki_action_weekly"=>0, "wiki_action_monthly"=>0}
}
```
Example usage:
```ruby

View File

@ -42,10 +42,10 @@ information directly in the merge request.
To enable Container Scanning in your pipeline, you need the following:
- [GitLab Runner](https://docs.gitlab.com/runner/) with the [Docker](https://docs.gitlab.com/runner/executors/docker.html)
or [Kubernetes](https://docs.gitlab.com/runner/install/kubernetes.html) executor.
- Docker `18.09.03` or higher installed on the same computer as the Runner. If you're using the
shared Runners on GitLab.com, then this is already the case.
- [GitLab Runner](https://docs.gitlab.com/runner/) with the [`docker`](https://docs.gitlab.com/runner/executors/docker.html)
or [`kubernetes`](https://docs.gitlab.com/runner/install/kubernetes.html) executor.
- Docker `18.09.03` or higher installed on the same computer as the runner. If you're using the
shared runners on GitLab.com, then this is already the case.
- [Build and push](../../packages/container_registry/index.md#container-registry-examples-with-gitlab-cicd)
your Docker image to your project's container registry. The name of the Docker image should use
the following [predefined environment variables](../../../ci/variables/predefined_variables.md):
@ -221,8 +221,8 @@ To use Container Scanning in an offline environment, you need:
NOTE: **Note:**
GitLab Runner has a [default `pull policy` of `always`](https://docs.gitlab.com/runner/executors/docker.html#using-the-always-pull-policy),
meaning the Runner tries to pull Docker images from the GitLab container registry even if a local
copy is available. GitLab Runner's [`pull_policy` can be set to `if-not-present`](https://docs.gitlab.com/runner/executors/docker.html#using-the-if-not-present-pull-policy)
meaning the runner tries to pull Docker images from the GitLab container registry even if a local
copy is available. The GitLab Runner [`pull_policy` can be set to `if-not-present`](https://docs.gitlab.com/runner/executors/docker.html#using-the-if-not-present-pull-policy)
in an offline environment if you prefer using only locally available Docker images. However, we
recommend keeping the pull policy setting to `always` if not in an offline environment, as this
enables the use of updated scanners in your CI/CD pipelines.
@ -421,7 +421,7 @@ Read more about the [solutions for vulnerabilities](../index.md#solutions-for-vu
### `docker: Error response from daemon: failed to copy xattrs`
When the GitLab Runner uses the Docker executor and NFS is used
When the runner uses the `docker` executor and NFS is used
(for example, `/var/lib/docker` is on an NFS mount), Container Scanning might fail with
an error like the following:
@ -430,6 +430,6 @@ docker: Error response from daemon: failed to copy xattrs: failed to set xattr "
```
This is a result of a bug in Docker which is now [fixed](https://github.com/containerd/continuity/pull/138 "fs: add WithAllowXAttrErrors CopyOpt").
To prevent the error, ensure the Docker version that the Runner is using is
To prevent the error, ensure the Docker version that the runner is using is
`18.09.03` or higher. For more information, see
[issue #10241](https://gitlab.com/gitlab-org/gitlab/-/issues/10241 "Investigate why Container Scanning is not working with NFS mounts").

View File

@ -566,8 +566,8 @@ To use DAST in an offline environment, you need:
NOTE: **Note:**
GitLab Runner has a [default `pull policy` of `always`](https://docs.gitlab.com/runner/executors/docker.html#using-the-always-pull-policy),
meaning the Runner tries to pull Docker images from the GitLab container registry even if a local
copy is available. GitLab Runner's [`pull_policy` can be set to `if-not-present`](https://docs.gitlab.com/runner/executors/docker.html#using-the-if-not-present-pull-policy)
meaning the runner tries to pull Docker images from the GitLab container registry even if a local
copy is available. The GitLab Runner [`pull_policy` can be set to `if-not-present`](https://docs.gitlab.com/runner/executors/docker.html#using-the-if-not-present-pull-policy)
in an offline environment if you prefer using only locally available Docker images. However, we
recommend keeping the pull policy setting to `always` if not in an offline environment, as this
enables the use of updated scanners in your CI/CD pipelines.

View File

@ -43,10 +43,10 @@ The results are sorted by the severity of the vulnerability:
To run Dependency Scanning jobs, by default, you need GitLab Runner with the
[`docker`](https://docs.gitlab.com/runner/executors/docker.html) or
[`kubernetes`](https://docs.gitlab.com/runner/install/kubernetes.html) executor.
If you're using the shared Runners on GitLab.com, this is enabled by default.
If you're using the shared runners on GitLab.com, this is enabled by default.
CAUTION: **Caution:**
If you use your own Runners, make sure your installed version of Docker
If you use your own runners, make sure your installed version of Docker
is **not** `19.03.0`. See [troubleshooting information](#error-response-from-daemon-error-processing-tar-file-docker-tar-relocation-error) for details.
## Supported languages and package managers
@ -362,8 +362,8 @@ Here are the requirements for using Dependency Scanning in an offline environmen
NOTE: **Note:**
GitLab Runner has a [default `pull policy` of `always`](https://docs.gitlab.com/runner/executors/docker.html#using-the-always-pull-policy),
meaning the Runner tries to pull Docker images from the GitLab container registry even if a local
copy is available. GitLab Runner's [`pull_policy` can be set to `if-not-present`](https://docs.gitlab.com/runner/executors/docker.html#using-the-if-not-present-pull-policy)
meaning the runner tries to pull Docker images from the GitLab container registry even if a local
copy is available. The GitLab Runner [`pull_policy` can be set to `if-not-present`](https://docs.gitlab.com/runner/executors/docker.html#using-the-if-not-present-pull-policy)
in an offline environment if you prefer using only locally available Docker images. However, we
recommend keeping the pull policy setting to `always` if not in an offline environment, as this
enables the use of updated scanners in your CI/CD pipelines.

View File

@ -96,7 +96,7 @@ above. You can find more information at each of the pages below:
To use many GitLab features, including
[security scans](../index.md#working-in-an-offline-environment)
and [Auto DevOps](../../../topics/autodevops/index.md), the GitLab Runner must be able to fetch the
and [Auto DevOps](../../../topics/autodevops/index.md), the runner must be able to fetch the
relevant Docker images.
The process for making these images available without direct access to the public internet
@ -124,7 +124,7 @@ The pipeline downloads the Docker images needed for the Security Scanners and sa
[job artifacts](../../../ci/pipelines/job_artifacts.md) or pushes them to the [Container Registry](../../packages/container_registry/index.md)
of the project where the pipeline is executed. These archives can be transferred to another location
and [loaded](https://docs.docker.com/engine/reference/commandline/load/) in a Docker daemon.
This method requires a GitLab Runner with access to both `gitlab.com` (including
This method requires a runner with access to both `gitlab.com` (including
`registry.gitlab.com`) and the local offline instance. This runner must run in
[privileged mode](https://docs.gitlab.com/runner/executors/docker.html#use-docker-in-docker-with-privileged-mode)
to be able to use the `docker` command inside the jobs. This runner can be installed in a DMZ or on

View File

@ -43,16 +43,16 @@ A pipeline consists of multiple jobs, including SAST and DAST scanning. If any j
## Requirements
To run SAST jobs, by default, you need a GitLab Runner with the
To run SAST jobs, by default, you need GitLab Runner with the
[`docker`](https://docs.gitlab.com/runner/executors/docker.html) or
[`kubernetes`](https://docs.gitlab.com/runner/install/kubernetes.html) executor.
If you're using the shared Runners on GitLab.com, this is enabled by default.
If you're using the shared runners on GitLab.com, this is enabled by default.
CAUTION: **Caution:**
Our SAST jobs require a Linux container type. Windows containers are not yet supported.
CAUTION: **Caution:**
If you use your own Runners, make sure the Docker version installed
If you use your own runners, make sure the Docker version installed
is **not** `19.03.0`. See [troubleshooting information](#error-response-from-daemon-error-processing-tar-file-docker-tar-relocation-error) for details.
## Supported languages and frameworks
@ -476,14 +476,14 @@ run successfully. For more information, see [Offline environments](../offline_de
To use SAST in an offline environment, you need:
- A GitLab Runner with the [`docker` or `kubernetes` executor](#requirements).
- GitLab Runner with the [`docker` or `kubernetes` executor](#requirements).
- A Docker Container Registry with locally available copies of SAST [analyzer](https://gitlab.com/gitlab-org/security-products/analyzers) images.
- Configure certificate checking of packages (optional).
NOTE: **Note:**
GitLab Runner has a [default `pull policy` of `always`](https://docs.gitlab.com/runner/executors/docker.html#using-the-always-pull-policy),
meaning the Runner tries to pull Docker images from the GitLab container registry even if a local
copy is available. GitLab Runner's [`pull_policy` can be set to `if-not-present`](https://docs.gitlab.com/runner/executors/docker.html#using-the-if-not-present-pull-policy)
meaning the runner tries to pull Docker images from the GitLab container registry even if a local
copy is available. The GitLab Runner [`pull_policy` can be set to `if-not-present`](https://docs.gitlab.com/runner/executors/docker.html#using-the-if-not-present-pull-policy)
in an offline environment if you prefer using only locally available Docker images. However, we
recommend keeping the pull policy setting to `always` if not in an offline environment, as this
enables the use of updated scanners in your CI/CD pipelines.

View File

@ -37,13 +37,13 @@ GitLab displays identified secrets visibly in a few places:
To run Secret Detection jobs, by default, you need GitLab Runner with the
[`docker`](https://docs.gitlab.com/runner/executors/docker.html) or
[`kubernetes`](https://docs.gitlab.com/runner/install/kubernetes.html) executor.
If you're using the shared Runners on GitLab.com, this is enabled by default.
If you're using the shared runners on GitLab.com, this is enabled by default.
CAUTION: **Caution:**
Our Secret Detection jobs currently expect a Linux container type. Windows containers are not yet supported.
CAUTION: **Caution:**
If you use your own Runners, make sure the Docker version installed
If you use your own runners, make sure the Docker version installed
is **not** `19.03.0`. See [troubleshooting information](../sast#error-response-from-daemon-error-processing-tar-file-docker-tar-relocation-error) for details.
### Making Secret Detection available to all GitLab tiers

View File

@ -35,7 +35,7 @@ To use the instance, group, project, or pipeline security dashboard:
the [supported reports](#supported-reports).
1. The configured jobs must use the [new `reports` syntax](../../../ci/pipelines/job_artifacts.md#artifactsreports).
1. [GitLab Runner](https://docs.gitlab.com/runner/) 11.5 or newer must be used.
If you're using the shared Runners on GitLab.com, this is already the case.
If you're using the shared runners on GitLab.com, this is already the case.
## Pipeline Security

View File

@ -616,8 +616,8 @@ To use License Compliance in an offline environment, you need:
NOTE: **Note:**
GitLab Runner has a [default `pull policy` of `always`](https://docs.gitlab.com/runner/executors/docker.html#using-the-always-pull-policy),
meaning the Runner tries to pull Docker images from the GitLab container registry even if a local
copy is available. GitLab Runner's [`pull_policy` can be set to `if-not-present`](https://docs.gitlab.com/runner/executors/docker.html#using-the-if-not-present-pull-policy)
meaning the runner tries to pull Docker images from the GitLab container registry even if a local
copy is available. The GitLab Runner [`pull_policy` can be set to `if-not-present`](https://docs.gitlab.com/runner/executors/docker.html#using-the-if-not-present-pull-policy)
in an offline environment if you prefer using only locally available Docker images. However, we
recommend keeping the pull policy setting to `always` if not in an offline environment, as this
enables the use of updated scanners in your CI/CD pipelines.

View File

@ -438,6 +438,11 @@ GFM recognizes the following:
| commit range comparison | `9ba12248...b19a04f5` | `namespace/project@9ba12248...b19a04f5` | `project@9ba12248...b19a04f5` |
| repository file references | `[README](doc/README)` | | |
| repository file line references | `[README](doc/README#L13)` | | |
| [alert](../operations/incident_management/alerts.md) | `^alert#123` | `namespace/project^alert#123` | `project^alert#123` |
For example, referencing an issue by using `#123` will format the output as a link
to issue number 123 with text `#123`. Likewise, a link to issue number 123 will be
recognized and formatted with text `#123`.
In addition to this, links to some objects are also recognized and formatted. Some examples of these are:

View File

@ -81,8 +81,8 @@ To display the Deploy Boards for a specific [environment](../../ci/environments/
[OpenShift docs](https://docs.openshift.com/container-platform/3.7/dev_guide/deployments/kubernetes_deployments.html#kubernetes-deployments-vs-deployment-configurations)
and [GitLab issue #4584](https://gitlab.com/gitlab-org/gitlab/-/issues/4584).
1. [Configure GitLab Runner](../../ci/runners/README.md) with the [Docker](https://docs.gitlab.com/runner/executors/docker.html) or
[Kubernetes](https://docs.gitlab.com/runner/executors/kubernetes.html) executor.
1. [Configure GitLab Runner](../../ci/runners/README.md) with the [`docker`](https://docs.gitlab.com/runner/executors/docker.html) or
[`kubernetes`](https://docs.gitlab.com/runner/executors/kubernetes.html) executor.
1. Configure the [Kubernetes integration](clusters/index.md) in your project for the
cluster. The Kubernetes namespace is of particular note as you will need it
for your deployment scripts (exposed by the `KUBE_NAMESPACE` env variable).

View File

@ -236,12 +236,12 @@ below.
CAUTION: **Warning:**
Interactive Web Terminals for the Web IDE is currently in **Beta**.
Shared Runners [do not yet support Interactive Web Terminals](https://gitlab.com/gitlab-org/gitlab/-/issues/24674),
so you would need to use your own private Runner(s) to make use of this feature.
Shared runners [do not yet support Interactive Web Terminals](https://gitlab.com/gitlab-org/gitlab/-/issues/24674),
so you would need to use your own private runner to make use of this feature.
[Interactive Web Terminals](../../../ci/interactive_web_terminal/index.md)
give the project [Maintainers](../../permissions.md#project-members-permissions)
user access to a terminal to interact with the Runner directly from
user access to a terminal to interact with the runner directly from
GitLab, including through the Web IDE.
### Runner configuration
@ -249,7 +249,7 @@ GitLab, including through the Web IDE.
Some things need to be configured in the runner for the interactive web terminal
to work:
- The Runner needs to have
- The runner needs to have
[`[session_server]` configured properly](https://docs.gitlab.com/runner/configuration/advanced-configuration.html#the-session_server-section).
This section requires at least a `session_timeout` value (which defaults to 1800
seconds) and a `listen_address` value. If `advertise_address` is not defined, `listen_address` is used.
@ -346,7 +346,7 @@ environment.
NOTE: **Note:**
Only file changes in the Web IDE are synced to the terminal.
Changes made in the terminal are **not** synced to the Web IDE.
This feature is only available for Kubernetes Runners.
This feature is only available for Kubernetes runners.
To enable file syncing to the web terminal, the `.gitlab/.gitlab-webide.yml`
file needs to have a `webide-file-sync` service configured. Here is an example
@ -373,7 +373,7 @@ terminal:
more information.
- `$CI_PROJECT_DIR` is a
[predefined environment variable](../../../ci/variables/predefined_variables.md)
for GitLab Runners. This is where your project's repository will be.
for GitLab Runner. This is where your project's repository will be.
Once you have configured the web terminal for file syncing, then when the web
terminal is started, a **Terminal** status will be visible in the status bar.

View File

@ -17,6 +17,10 @@ module API
# This is a separate method so that EE can redefine it.
%w(issues merge_requests milestones notes wiki_blobs commits blobs users)
end
def self.search_states
%w(all opened closed merged)
end
end
end
end

View File

@ -32,6 +32,7 @@ module API
search_params = {
scope: params[:scope],
search: params[:search],
state: params[:state],
snippets: snippets?,
page: params[:page],
per_page: params[:per_page]
@ -79,6 +80,7 @@ module API
type: String,
desc: 'The scope of the search',
values: Helpers::SearchHelpers.global_search_scopes
optional :state, type: String, desc: 'Filter results by state', values: Helpers::SearchHelpers.search_states
use :pagination
end
get do
@ -100,6 +102,7 @@ module API
type: String,
desc: 'The scope of the search',
values: Helpers::SearchHelpers.group_search_scopes
optional :state, type: String, desc: 'Filter results by state', values: Helpers::SearchHelpers.search_states
use :pagination
end
get ':id/(-/)search' do
@ -122,6 +125,7 @@ module API
desc: 'The scope of the search',
values: Helpers::SearchHelpers.project_search_scopes
optional :ref, type: String, desc: 'The name of a repository branch or tag. If not given, the default branch is used'
optional :state, type: String, desc: 'Filter results by state', values: Helpers::SearchHelpers.search_states
use :pagination
end
get ':id/(-/)search' do

View File

@ -0,0 +1,29 @@
# frozen_string_literal: true
module Banzai
module Filter
class AlertReferenceFilter < IssuableReferenceFilter
self.reference_type = :alert
def self.object_class
AlertManagement::Alert
end
def self.object_sym
:alert
end
def parent_records(parent, ids)
parent.alert_management_alerts.where(iid: ids.to_a)
end
def url_for_object(alert, project)
::Gitlab::Routing.url_helpers.details_project_alert_management_url(
project,
alert.iid,
only_path: context[:only_path]
)
end
end
end
end

View File

@ -59,7 +59,8 @@ module Banzai
Filter::CommitRangeReferenceFilter,
Filter::CommitReferenceFilter,
Filter::LabelReferenceFilter,
Filter::MilestoneReferenceFilter
Filter::MilestoneReferenceFilter,
Filter::AlertReferenceFilter
]
end

View File

@ -23,7 +23,8 @@ module Banzai
Filter::MergeRequestReferenceFilter,
Filter::SnippetReferenceFilter,
Filter::CommitRangeReferenceFilter,
Filter::CommitReferenceFilter
Filter::CommitReferenceFilter,
Filter::AlertReferenceFilter
]
end

View File

@ -0,0 +1,19 @@
# frozen_string_literal: true
module Banzai
module ReferenceParser
class AlertParser < BaseParser
self.reference_type = :alert
def references_relation
AlertManagement::Alert
end
private
def can_read_reference?(user, alert, node)
can?(user, :read_alert_management_alert, alert)
end
end
end
end

View File

@ -8,8 +8,6 @@ msgid ""
msgstr ""
"Project-Id-Version: gitlab 1.0.0\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2020-09-11 14:23+1000\n"
"PO-Revision-Date: 2020-09-11 14:23+1000\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
"Language: \n"

View File

@ -9,7 +9,7 @@ module RuboCop
MSG = 'Use the `BulkInsertSafe` concern, instead of using `Gitlab::Database.bulk_insert`. See https://docs.gitlab.com/ee/development/insert_into_tables_in_batches.html'
def_node_matcher :raw_union?, <<~PATTERN
(send (const (const nil? :Gitlab) :Database) :bulk_insert ...)
(send (const (const _ :Gitlab) :Database) :bulk_insert ...)
PATTERN
def on_send(node)

View File

@ -247,6 +247,7 @@ RSpec.describe 'GitLab Markdown', :aggregate_failures do
expect(doc).to reference_commits
expect(doc).to reference_labels
expect(doc).to reference_milestones
expect(doc).to reference_alerts
end
aggregate_failures 'TaskListFilter' do

View File

@ -245,6 +245,15 @@ References should be parseable even inside _<%= merge_request.to_reference %>_ e
- Group milestone by name in quotes: <%= group_milestone.to_reference(format: :name) %>
- Group milestone by URL is ignore: <%= urls.milestone_url(group_milestone) %>
##### AlertReferenceFilter
- Alert: <%= alert.to_reference %>
- Alert in another project: <%= xalert.to_reference(project) %>
- Ignored in code: `<%= alert.to_reference %>`
- Ignored in links: [Link to <%= alert.to_reference %>](#alert-link)
- Alert by URL: <%= alert.details_url %>
- Link to alert by reference: [Alert](<%= alert.to_reference %>)
- Link to alert by URL: [Alert](<%= alert.details_url %>)
### Task Lists
- [ ] Incomplete task 1

View File

@ -0,0 +1,223 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Banzai::Filter::AlertReferenceFilter do
include FilterSpecHelper
let_it_be(:project) { create(:project, :public) }
let_it_be(:alert) { create(:alert_management_alert, project: project) }
let_it_be(:reference) { alert.to_reference }
it 'requires project context' do
expect { described_class.call('') }.to raise_error(ArgumentError, /:project/)
end
%w(pre code a style).each do |elem|
it "ignores valid references contained inside '#{elem}' element" do
exp = act = "<#{elem}>Alert #{reference}</#{elem}>"
expect(reference_filter(act).to_html).to eq exp
end
end
context 'internal reference' do
it 'links to a valid reference' do
doc = reference_filter("See #{reference}")
expect(doc.css('a').first.attr('href')).to eq alert.details_url
end
it 'links with adjacent text' do
doc = reference_filter("Alert (#{reference}.)")
expect(doc.to_html).to match(%r{\(<a.+>#{Regexp.escape(reference)}</a>\.\)})
end
it 'ignores invalid alert IDs' do
exp = act = "Alert #{invalidate_reference(reference)}"
expect(reference_filter(act).to_html).to eq exp
end
it 'includes a title attribute' do
doc = reference_filter("Alert #{reference}")
expect(doc.css('a').first.attr('title')).to eq alert.title
end
it 'escapes the title attribute' do
allow(alert).to receive(:title).and_return(%{"></a>whatever<a title="})
doc = reference_filter("Alert #{reference}")
expect(doc.text).to eq "Alert #{reference}"
end
it 'includes default classes' do
doc = reference_filter("Alert #{reference}")
expect(doc.css('a').first.attr('class')).to eq 'gfm gfm-alert has-tooltip'
end
it 'includes a data-project attribute' do
doc = reference_filter("Alert #{reference}")
link = doc.css('a').first
expect(link).to have_attribute('data-project')
expect(link.attr('data-project')).to eq project.id.to_s
end
it 'includes a data-alert attribute' do
doc = reference_filter("See #{reference}")
link = doc.css('a').first
expect(link).to have_attribute('data-alert')
expect(link.attr('data-alert')).to eq alert.id.to_s
end
it 'supports an :only_path context' do
doc = reference_filter("Alert #{reference}", only_path: true)
link = doc.css('a').first.attr('href')
expect(link).not_to match %r(https?://)
expect(link).to eq urls.details_project_alert_management_url(project, alert.iid, only_path: true)
end
end
context 'cross-project / cross-namespace complete reference' do
let_it_be(:namespace) { create(:namespace) }
let_it_be(:project2) { create(:project, :public, namespace: namespace) }
let_it_be(:alert) { create(:alert_management_alert, project: project2) }
let_it_be(:reference) { "#{project2.full_path}^alert##{alert.iid}" }
it 'links to a valid reference' do
doc = reference_filter("See #{reference}")
expect(doc.css('a').first.attr('href')).to eq alert.details_url
end
it 'link has valid text' do
doc = reference_filter("See (#{reference}.)")
expect(doc.css('a').first.text).to eql(reference)
end
it 'has valid text' do
doc = reference_filter("See (#{reference}.)")
expect(doc.text).to eql("See (#{reference}.)")
end
it 'ignores invalid alert IDs on the referenced project' do
exp = act = "See #{invalidate_reference(reference)}"
expect(reference_filter(act).to_html).to eq exp
end
end
context 'cross-project / same-namespace complete reference' do
let_it_be(:namespace) { create(:namespace) }
let_it_be(:project) { create(:project, :public, namespace: namespace) }
let_it_be(:project2) { create(:project, :public, namespace: namespace) }
let_it_be(:alert) { create(:alert_management_alert, project: project2) }
let_it_be(:reference) { "#{project2.full_path}^alert##{alert.iid}" }
it 'links to a valid reference' do
doc = reference_filter("See #{reference}")
expect(doc.css('a').first.attr('href')).to eq alert.details_url
end
it 'link has valid text' do
doc = reference_filter("See (#{project2.path}^alert##{alert.iid}.)")
expect(doc.css('a').first.text).to eql("#{project2.path}^alert##{alert.iid}")
end
it 'has valid text' do
doc = reference_filter("See (#{project2.path}^alert##{alert.iid}.)")
expect(doc.text).to eql("See (#{project2.path}^alert##{alert.iid}.)")
end
it 'ignores invalid alert IDs on the referenced project' do
exp = act = "See #{invalidate_reference(reference)}"
expect(reference_filter(act).to_html).to eq exp
end
end
context 'cross-project shorthand reference' do
let_it_be(:namespace) { create(:namespace) }
let_it_be(:project) { create(:project, :public, namespace: namespace) }
let_it_be(:project2) { create(:project, :public, namespace: namespace) }
let_it_be(:alert) { create(:alert_management_alert, project: project2) }
let_it_be(:reference) { "#{project2.path}^alert##{alert.iid}" }
it 'links to a valid reference' do
doc = reference_filter("See #{reference}")
expect(doc.css('a').first.attr('href')).to eq alert.details_url
end
it 'link has valid text' do
doc = reference_filter("See (#{project2.path}^alert##{alert.iid}.)")
expect(doc.css('a').first.text).to eql("#{project2.path}^alert##{alert.iid}")
end
it 'has valid text' do
doc = reference_filter("See (#{project2.path}^alert##{alert.iid}.)")
expect(doc.text).to eql("See (#{project2.path}^alert##{alert.iid}.)")
end
it 'ignores invalid alert IDs on the referenced project' do
exp = act = "See #{invalidate_reference(reference)}"
expect(reference_filter(act).to_html).to eq exp
end
end
context 'cross-project URL reference' do
let_it_be(:namespace) { create(:namespace, name: 'cross-reference') }
let_it_be(:project2) { create(:project, :public, namespace: namespace) }
let_it_be(:alert) { create(:alert_management_alert, project: project2) }
let_it_be(:reference) { alert.details_url }
it 'links to a valid reference' do
doc = reference_filter("See #{reference}")
expect(doc.css('a').first.attr('href')).to eq alert.details_url
end
it 'links with adjacent text' do
doc = reference_filter("See (#{reference}.)")
expect(doc.to_html).to match(%r{\(<a.+>#{Regexp.escape(alert.to_reference(project))}</a>\.\)})
end
it 'ignores invalid alert IDs on the referenced project' do
act = "See #{invalidate_reference(reference)}"
expect(reference_filter(act).to_html).to match(%r{<a.+>#{Regexp.escape(invalidate_reference(reference))}</a>})
end
end
context 'group context' do
let_it_be(:group) { create(:group) }
it 'links to a valid reference' do
reference = "#{project.full_path}^alert##{alert.iid}"
result = reference_filter("See #{reference}", { project: nil, group: group } )
expect(result.css('a').first.attr('href')).to eq(alert.details_url)
end
it 'ignores internal references' do
exp = act = "See ^alert##{alert.iid}"
expect(reference_filter(act, project: nil, group: group).to_html).to eq exp
end
end
end

View File

@ -0,0 +1,49 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Banzai::ReferenceParser::AlertParser do
include ReferenceParserHelpers
let(:project) { create(:project, :public) }
let(:user) { create(:user) }
let(:alert) { create(:alert_management_alert, project: project) }
subject { described_class.new(Banzai::RenderContext.new(project, user)) }
let(:link) { empty_html_link }
describe '#nodes_visible_to_user' do
context 'when the link has a data-issue attribute' do
before do
link['data-alert'] = alert.id.to_s
end
it_behaves_like "referenced feature visibility", "issues", "merge_requests" do
before do
project.add_developer(user) if enable_user?
end
end
end
end
describe '#referenced_by' do
describe 'when the link has a data-alert attribute' do
context 'using an existing alert ID' do
it 'returns an Array of alerts' do
link['data-alert'] = alert.id.to_s
expect(subject.referenced_by([link])).to eq([alert])
end
end
context 'using a non-existing alert ID' do
it 'returns an empty Array' do
link['data-alert'] = ''
expect(subject.referenced_by([link])).to eq([])
end
end
end
end
end

View File

@ -332,8 +332,39 @@ RSpec.describe AlertManagement::Alert do
end
end
describe '.reference_pattern' do
subject { described_class.reference_pattern }
it { is_expected.to match('gitlab-org/gitlab^alert#123') }
end
describe '.link_reference_pattern' do
subject { described_class.link_reference_pattern }
it { is_expected.to match(triggered_alert.details_url) }
it { is_expected.not_to match("#{Gitlab.config.gitlab.url}/gitlab-org/gitlab/alert_management/123") }
it { is_expected.not_to match("#{Gitlab.config.gitlab.url}/gitlab-org/gitlab/issues/123") }
it { is_expected.not_to match("gitlab-org/gitlab/-/alert_management/123") }
end
describe '.reference_valid?' do
using RSpec::Parameterized::TableSyntax
where(:ref, :result) do
'123456' | true
'1' | true
'-1' | false
nil | false
'123456891012345678901234567890' | false
end
with_them do
it { expect(described_class.reference_valid?(ref)).to eq(result) }
end
end
describe '#to_reference' do
it { expect(triggered_alert.to_reference).to eq('') }
it { expect(triggered_alert.to_reference).to eq("^alert##{triggered_alert.iid}") }
end
describe '#trigger' do

View File

@ -0,0 +1,15 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe AuthenticationEvent do
describe 'associations' do
it { is_expected.to belong_to(:user).optional }
end
describe 'validations' do
it { is_expected.to validate_presence_of(:provider) }
it { is_expected.to validate_presence_of(:user_name) }
it { is_expected.to validate_presence_of(:result) }
end
end

View File

@ -47,6 +47,17 @@ RSpec.describe API::Search do
end
end
shared_examples 'filter by state' do |scope:, search:|
it 'respects scope filtering' do
get api(endpoint, user), params: { scope: scope, search: search, state: state }
documents = Gitlab::Json.parse(response.body)
expect(documents.count).to eq(1)
expect(documents.first['state']).to eq(state)
end
end
describe 'GET /search' do
let(:endpoint) { '/search' }
@ -88,42 +99,84 @@ RSpec.describe API::Search do
end
context 'for issues scope' do
before do
create(:issue, project: project, title: 'awesome issue')
get api(endpoint, user), params: { scope: 'issues', search: 'awesome' }
end
it_behaves_like 'response is correct', schema: 'public_api/v4/issues'
it_behaves_like 'ping counters', scope: :issues
describe 'pagination' do
context 'without filtering by state' do
before do
create(:issue, project: project, title: 'another issue')
create(:issue, project: project, title: 'awesome issue')
get api(endpoint, user), params: { scope: 'issues', search: 'awesome' }
end
include_examples 'pagination', scope: :issues
it_behaves_like 'response is correct', schema: 'public_api/v4/issues'
it_behaves_like 'ping counters', scope: :issues
describe 'pagination' do
before do
create(:issue, project: project, title: 'another issue')
end
include_examples 'pagination', scope: :issues
end
end
context 'filter by state' do
before do
create(:issue, project: project, title: 'awesome opened issue')
create(:issue, :closed, project: project, title: 'awesome closed issue')
end
context 'state: opened' do
let(:state) { 'opened' }
include_examples 'filter by state', scope: :issues, search: 'awesome'
end
context 'state: closed' do
let(:state) { 'closed' }
include_examples 'filter by state', scope: :issues, search: 'awesome'
end
end
end
context 'for merge_requests scope' do
before do
create(:merge_request, source_project: repo_project, title: 'awesome mr')
get api(endpoint, user), params: { scope: 'merge_requests', search: 'awesome' }
end
it_behaves_like 'response is correct', schema: 'public_api/v4/merge_requests'
it_behaves_like 'ping counters', scope: :merge_requests
describe 'pagination' do
context 'without filtering by state' do
before do
create(:merge_request, source_project: repo_project, title: 'another mr', target_branch: 'another_branch')
create(:merge_request, source_project: repo_project, title: 'awesome mr')
get api(endpoint, user), params: { scope: 'merge_requests', search: 'awesome' }
end
include_examples 'pagination', scope: :merge_requests
it_behaves_like 'response is correct', schema: 'public_api/v4/merge_requests'
it_behaves_like 'ping counters', scope: :merge_requests
describe 'pagination' do
before do
create(:merge_request, source_project: repo_project, title: 'another mr', target_branch: 'another_branch')
end
include_examples 'pagination', scope: :merge_requests
end
end
context 'filter by state' do
before do
create(:merge_request, source_project: project, title: 'awesome opened mr')
create(:merge_request, :closed, project: project, title: 'awesome closed mr')
end
context 'state: opened' do
let(:state) { 'opened' }
include_examples 'filter by state', scope: :merge_requests, search: 'awesome'
end
context 'state: closed' do
let(:state) { 'closed' }
include_examples 'filter by state', scope: :merge_requests, search: 'awesome'
end
end
end

View File

@ -13,7 +13,14 @@ RSpec.describe RuboCop::Cop::Gitlab::BulkInsert, type: :rubocop do
it 'flags the use of Gitlab::Database.bulk_insert' do
expect_offense(<<~SOURCE)
Gitlab::Database.bulk_insert('merge_request_diff_files', rows)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Use the `BulkInsertSafe` concern, instead of using `Gitlab::Database.bulk_insert`. See https://docs.gitlab.com/ee/development/insert_into_tables_in_batches.html
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ #{RuboCop::Cop::Gitlab::BulkInsert::MSG}
SOURCE
end
it 'flags the use of ::Gitlab::Database.bulk_insert' do
expect_offense(<<~SOURCE)
::Gitlab::Database.bulk_insert('merge_request_diff_files', rows)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ #{RuboCop::Cop::Gitlab::BulkInsert::MSG}
SOURCE
end
end

View File

@ -87,6 +87,10 @@ class MarkdownFeature
@group_milestone ||= create(:milestone, name: 'group-milestone', group: group)
end
def alert
@alert ||= create(:alert_management_alert, project: project)
end
# Cross-references -----------------------------------------------------------
def xproject
@ -125,6 +129,10 @@ class MarkdownFeature
@xmilestone ||= create(:milestone, project: xproject)
end
def xalert
@xalert ||= create(:alert_management_alert, project: xproject)
end
def urls
Gitlab::Routing.url_helpers
end

View File

@ -174,6 +174,15 @@ module MarkdownMatchers
end
end
# AlertReferenceFilter
matcher :reference_alerts do
set_default_markdown_messages
match do |actual|
expect(actual).to have_selector('a.gfm.gfm-alert', count: 5)
end
end
# TaskListFilter
matcher :parse_task_lists do
set_default_markdown_messages

View File

@ -1,6 +1,7 @@
# frozen_string_literal: true
RSpec.shared_examples "referenced feature visibility" do |*related_features|
let(:enable_user?) { false }
let(:feature_fields) do
related_features.map { |feature| (feature + "_access_level").to_sym }
end
@ -35,8 +36,11 @@ RSpec.shared_examples "referenced feature visibility" do |*related_features|
end
context "when feature is enabled" do
# The project is public
# Allows implementing specs to enable finer-tuned permissions
let(:enable_user?) { true }
it "creates reference" do
# The project is public
set_features_fields_to(ProjectFeature::ENABLED)
expect(subject.nodes_visible_to_user(user, [link])).to eq([link])