Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2021-06-30 18:07:05 +00:00
parent 9376fdc13e
commit 533f020a0b
50 changed files with 515 additions and 243 deletions

View file

@ -1 +1 @@
21e7c85471a8d7401fad69be300eaff1c0384577
82a7a8e90f5bf3f0cae18d158a28eb8a7a1693c6

10
Gemfile
View file

@ -335,13 +335,9 @@ gem 'peek', '~> 1.1'
gem 'snowplow-tracker', '~> 0.6.1'
# Metrics
group :metrics do
gem 'method_source', '~> 1.0', require: false
gem 'webrick', '~> 1.6.1', require: false
# Prometheus
gem 'prometheus-client-mmap', '~> 0.12.0'
end
gem 'method_source', '~> 1.0', require: false
gem 'webrick', '~> 1.6.1', require: false
gem 'prometheus-client-mmap', '~> 0.12.0', require: 'prometheus/client'
group :development do
gem 'lefthook', '~> 0.7.0', require: false

View file

@ -1,4 +1,33 @@
const supportedMethods = ['patch', 'post', 'put'];
const SUPPORTED_METHODS = ['patch', 'post', 'put'];
function needsCaptchaResponse(err) {
return (
SUPPORTED_METHODS.includes(err?.config?.method) && err?.response?.data?.needs_captcha_response
);
}
const showCaptchaModalAndResubmit = async (axios, data, errConfig) => {
// NOTE: We asynchronously import and unbox the module. Since this is included globally, we don't
// do a regular import because that would increase the size of the webpack bundle.
const { waitForCaptchaToBeSolved } = await import('~/captcha/wait_for_captcha_to_be_solved');
// show the CAPTCHA modal and wait for it to be solved or closed
const captchaResponse = await waitForCaptchaToBeSolved(data.captcha_site_key);
// resubmit the original request with the captcha_response and spam_log_id in the headers
const originalData = JSON.parse(errConfig.data);
const originalHeaders = errConfig.headers;
return axios({
method: errConfig.method,
url: errConfig.url,
headers: {
...originalHeaders,
'X-GitLab-Captcha-Response': captchaResponse,
'X-GitLab-Spam-Log-Id': data.spam_log_id,
},
data: originalData,
});
};
export function registerCaptchaModalInterceptor(axios) {
return axios.interceptors.response.use(
@ -6,29 +35,8 @@ export function registerCaptchaModalInterceptor(axios) {
return response;
},
(err) => {
if (
supportedMethods.includes(err?.config?.method) &&
err?.response?.data?.needs_captcha_response
) {
const { data } = err.response;
const captchaSiteKey = data.captcha_site_key;
const spamLogId = data.spam_log_id;
// eslint-disable-next-line promise/no-promise-in-callback
return import('~/captcha/wait_for_captcha_to_be_solved')
.then(({ waitForCaptchaToBeSolved }) => waitForCaptchaToBeSolved(captchaSiteKey))
.then((captchaResponse) => {
const errConfig = err.config;
const originalData = JSON.parse(errConfig.data);
return axios({
method: errConfig.method,
url: errConfig.url,
data: {
...originalData,
captcha_response: captchaResponse,
spam_log_id: spamLogId,
},
});
});
if (needsCaptchaResponse(err)) {
return showCaptchaModalAndResubmit(axios, err.response.data, err.config);
}
return Promise.reject(err);

View file

@ -118,7 +118,10 @@ export const fetchDiffFilesBatch = ({ commit, state, dispatch }) => {
commit(types.SET_BATCH_LOADING, false);
if (window.gon?.features?.diffsVirtualScrolling && !scrolledVirtualScroller) {
const index = state.diffFiles.findIndex((f) => f.file_hash === hash);
const index = state.diffFiles.findIndex(
(f) =>
f.file_hash === hash || f[INLINE_DIFF_LINES_KEY].find((l) => l.line_code === hash),
);
if (index >= 0) {
eventHub.$emit('scrollToIndex', index);

View file

@ -1,11 +1,9 @@
import { registerCaptchaModalInterceptor } from '~/captcha/captcha_modal_axios_interceptor';
import axios from '../../lib/utils/axios_utils';
export default class Service {
constructor(endpoint) {
this.endpoint = `${endpoint}.json`;
this.realtimeEndpoint = `${endpoint}/realtime_changes`;
registerCaptchaModalInterceptor(axios);
}
getData() {

View file

@ -1,4 +1,5 @@
import axios from 'axios';
import { registerCaptchaModalInterceptor } from '~/captcha/captcha_modal_axios_interceptor';
import setupAxiosStartupCalls from './axios_startup_calls';
import csrf from './csrf';
import suppressAjaxErrorsDuringNavigation from './suppress_ajax_errors_during_navigation';
@ -41,6 +42,8 @@ axios.interceptors.response.use(
(err) => suppressAjaxErrorsDuringNavigation(err, isUserNavigating),
);
registerCaptchaModalInterceptor(axios);
export default axios;
/**

View file

@ -47,17 +47,13 @@ module SpammableActions
end
end
# TODO: This method is currently only needed for issue create and update. It can be removed when:
#
# 1. Issue create is is converted to a client/JS based approach instead of the legacy HAML
# `_recaptcha_form.html.haml` which is rendered via the `projects/issues/verify` template.
# In this case, which is based on the legacy reCAPTCHA implementation using the HTML/HAML form,
# the 'g-recaptcha-response' field name comes from `Recaptcha::ClientHelper#recaptcha_tags` in the
# recaptcha gem, which is called from the HAML `_recaptcha_form.html.haml` form.
# 2. Issue update is converted to use the headers-based approach, which will require adding
# support to captcha_modal_axios_interceptor.js like we have already added to
# apollo_captcha_link.js.
# In this case, the `captcha_response` field name comes from our captcha_modal_axios_interceptor.js.
# TODO: This method is currently only needed for issue create, to convert spam/CAPTCHA values from
# params, and instead be passed as headers, as the spam services now all expect. It can be removed
# when issue create is is converted to a client/JS based approach instead of the legacy HAML
# `_recaptcha_form.html.haml` which is rendered via the `projects/issues/verify` template.
# In that case, which is based on the legacy reCAPTCHA implementation using the HTML/HAML form,
# the 'g-recaptcha-response' field name comes from `Recaptcha::ClientHelper#recaptcha_tags` in the
# recaptcha gem, which is called from the HAML `_recaptcha_form.html.haml` form.
def extract_legacy_spam_params_to_headers
request.headers['X-GitLab-Captcha-Response'] = params['g-recaptcha-response'] || params[:captcha_response]
request.headers['X-GitLab-Spam-Log-Id'] = params[:spam_log_id]

View file

@ -52,7 +52,8 @@ class Projects::CommitController < Projects::ApplicationController
# rubocop: disable CodeReuse/ActiveRecord
def pipelines
@pipelines = @commit.pipelines.order(id: :desc)
@pipelines = @pipelines.where(ref: params[:ref]).page(params[:page]).per(30) if params[:ref]
@pipelines = @pipelines.where(ref: params[:ref]) if params[:ref]
@pipelines = @pipelines.page(params[:page])
respond_to do |format|
format.html

View file

@ -336,7 +336,6 @@ class Projects::IssuesController < Projects::ApplicationController
end
def update_service
extract_legacy_spam_params_to_headers
spam_params = ::Spam::SpamParams.new_from_request(request: request)
::Issues::UpdateService.new(project: project, current_user: current_user, params: issue_params, spam_params: spam_params)
end

View file

@ -167,7 +167,7 @@ class Projects::MergeRequestsController < Projects::MergeRequests::ApplicationCo
def pipelines
set_pipeline_variables
@pipelines = @pipelines.page(params[:page]).per(30)
@pipelines = @pipelines.page(params[:page])
Gitlab::PollingInterval.set_header(response, interval: 10_000)

View file

@ -42,7 +42,6 @@ class Projects::PipelinesController < Projects::ApplicationController
.new(project, current_user, index_params)
.execute
.page(params[:page])
.per(20)
@pipelines_count = limited_pipelines_count(project)

View file

@ -29,6 +29,8 @@ module Ci
BridgeStatusError = Class.new(StandardError)
paginates_per 15
sha_attribute :source_sha
sha_attribute :target_sha

View file

@ -54,9 +54,6 @@ Rails.application.configure do
# Enable serving of images, stylesheets, and JavaScripts from an asset server
config.action_controller.asset_host = ENV['GITLAB_CDN_HOST'] if ENV['GITLAB_CDN_HOST'].present?
# We use a env var to keep at old default until we enable this for GitLab.com
config.active_record.legacy_connection_handling = !Gitlab::Utils.to_boolean(ENV.fetch('ENABLE_RAILS_61_CONNECTION_HANDLING', false))
# Do not dump schema after migrations.
config.active_record.dump_schema_after_migration = false

View file

@ -1,8 +0,0 @@
---
name: gitlab_experiment_middleware
introduced_by_url:
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/323643
milestone: '14.1'
type: development
group: group::adoption
default_enabled: false

View file

@ -1,7 +1,5 @@
# frozen_string_literal: true
require 'prometheus/client'
# Keep separate directories for separate processes
def prometheus_default_multiproc_dir
return unless Rails.env.development? || Rails.env.test?

View file

@ -7,17 +7,3 @@ Gitlab::Experiment.configure do |config|
pool: ->(&block) { Gitlab::Redis::SharedState.with { |redis| block.call(redis) } }
)
end
# TODO: This shim should be removed after the feature flag is rolled out, as
# it only exists to facilitate the feature flag control of the behavior.
module Gitlab::Experiment::MiddlewareWithFeatureFlags
attr_reader :app
def call(env)
return app.call(env) unless Feature.enabled?(:gitlab_experiment_middleware)
super
end
end
Gitlab::Experiment::Middleware.prepend(Gitlab::Experiment::MiddlewareWithFeatureFlags)

View file

@ -0,0 +1,15 @@
# frozen_string_literal: true
class RemovePartialIndexForHashedStorageMigration < ActiveRecord::Migration[6.0]
include Gitlab::Database::MigrationHelpers
disable_ddl_transaction!
def up
remove_concurrent_index :projects, :id, name: 'index_on_id_partial_with_legacy_storage'
end
def down
add_concurrent_index :projects, :id, where: 'storage_version < 2 or storage_version IS NULL', name: 'index_on_id_partial_with_legacy_storage'
end
end

View file

@ -0,0 +1 @@
966299fecd160b594f0837f19cc01b38fc365fa749982f9245c296d912e3eb2f

View file

@ -24076,8 +24076,6 @@ CREATE INDEX index_oauth_openid_requests_on_access_grant_id ON oauth_openid_requ
CREATE UNIQUE INDEX index_on_deploy_keys_id_and_type_and_public ON keys USING btree (id, type) WHERE (public = true);
CREATE INDEX index_on_id_partial_with_legacy_storage ON projects USING btree (id) WHERE ((storage_version < 2) OR (storage_version IS NULL));
CREATE INDEX index_on_identities_lower_extern_uid_and_provider ON identities USING btree (lower((extern_uid)::text), provider);
CREATE UNIQUE INDEX index_on_instance_statistics_recorded_at_and_identifier ON analytics_usage_trends_measurements USING btree (identifier, recorded_at);

View file

@ -0,0 +1,14 @@
---
# Error: gitlab.HeaderGerunds
#
# Checks for headers that start with gerunds (ing words).
# Related to: https://docs.gitlab.com/ee/development/documentation/structure.html
#
# For a list of all options, see https://errata-ai.gitbook.io/vale/getting-started/styles
extends: substitution
message: 'Can this header start with an imperative verb, instead of a gerund (ing word)?'
link: https://docs.gitlab.com/ee/development/documentation/styleguide/#heading-titles
level: suggestion
scope: heading
swap:
- '^\w*ing.*': 'Troubleshooting'

View file

@ -29,6 +29,21 @@ hosts or use IP ranges:
---
**For installations using cloud native Helm charts**
You can set the required IPs under the `gitlab.webservice.monitoring.ipWhitelist` key. For example:
```yaml
gitlab:
webservice:
monitoring:
# Monitoring IP whitelist
ipWhitelist:
- 0.0.0.0/0 # Default
```
---
**For installations from source**
1. Edit `config/gitlab.yml`:

View file

@ -541,7 +541,7 @@ You must use a Kubernetes network plugin that implements support for
`NetworkPolicy`. The default network plugin for Kubernetes (`kubenet`)
[does not implement](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/#kubenet)
support for it. The [Cilium](https://cilium.io/) network plugin can be
installed as a [cluster application](../../user/clusters/applications.md#install-cilium-using-gitlab-cicd)
installed as a [cluster application](../../user/project/clusters/protect/container_network_security/quick_start_guide.md#use-the-cluster-management-template-to-install-cilium)
to enable support for network policies.
You can enable deployment of a network policy by setting the following
@ -577,7 +577,7 @@ networkPolicy:
```
For more information on installing Network Policies, see
[Install Cilium using GitLab CI/CD](../../user/clusters/applications.md#install-cilium-using-gitlab-cicd).
[Use the Cluster Management Template to Install Cilium](../../user/project/clusters/protect/container_network_security/quick_start_guide.md#use-the-cluster-management-template-to-install-cilium).
### Cilium Network Policy
@ -596,7 +596,7 @@ As the default network plugin for Kubernetes (`kubenet`)
support for it, you must have [Cilium](https://docs.cilium.io/en/v1.8/intro/) as your Kubernetes network plugin.
The [Cilium](https://cilium.io/) network plugin can be
installed as a [cluster application](../../user/clusters/applications.md#install-cilium-using-gitlab-cicd)
installed with a [cluster management project template](../../user/project/clusters/protect/container_network_security/quick_start_guide.md#use-the-cluster-management-template-to-install-cilium)
to enable support for network policies.
#### Configuration
@ -643,11 +643,10 @@ ciliumNetworkPolicy:
enabled: true
alerts:
enabled: true
```
For more information on installing Network Policies, see
[Install Cilium using GitLab CI/CD](../../user/clusters/applications.md#install-cilium-using-gitlab-cicd).
[Use the Cluster Management Template to Install Cilium](../../user/project/clusters/protect/container_network_security/quick_start_guide.md#use-the-cluster-management-template-to-install-cilium).
### Running commands in the container

View file

@ -50,9 +50,9 @@ The following table shows the supported metrics, at which level they are support
| Metric | Level | API version | Chart (UI) version | Comments |
|---------------------------|---------------------|--------------------------------------|---------------------------------------|-----------|
| `deployment_frequency` | Project-level | [13.7+](../../api/dora/metrics.md) | [13.8+](#deployment-frequency-charts) | The [old API endpoint](../../api/dora4_project_analytics.md) was [deprecated](https://gitlab.com/gitlab-org/gitlab/-/issues/323713) in 13.10. |
| `deployment_frequency` | Group-level | [13.10+](../../api/dora/metrics.md) | To be supported | |
| `deployment_frequency` | Group-level | [13.10+](../../api/dora/metrics.md) | [13.12+](#deployment-frequency-charts) | |
| `lead_time_for_changes` | Project-level | [13.10+](../../api/dora/metrics.md) | [13.11+](#lead-time-charts) | Unit in seconds. Aggregation method is median. |
| `lead_time_for_changes` | Group-level | [13.10+](../../api/dora/metrics.md) | To be supported | Unit in seconds. Aggregation method is median. |
| `lead_time_for_changes` | Group-level | [13.10+](../../api/dora/metrics.md) | [14.0+](#lead-time-charts) | Unit in seconds. Aggregation method is median. |
| `change_failure_rate` | Project/Group-level | To be supported | To be supported | |
| `time_to_restore_service` | Project/Group-level | To be supported | To be supported | |

View file

@ -27,20 +27,19 @@ your application's Kubernetes namespace. This section has the following
prerequisites:
- Your project contains at least one [environment](../../../ci/environments/index.md)
- You've [installed Cilium](../../clusters/applications.md#install-cilium-using-gitlab-cicd)
- You've [installed Cilium](../../project/clusters/protect/container_network_security/quick_start_guide.md#use-the-cluster-management-template-to-install-cilium)
- You've configured the [Prometheus service](../../project/integrations/prometheus.md#enabling-prometheus-integration)
If you're using custom Helm values for Cilium, you must enable Hubble
with flow metrics for each namespace by adding the following lines to
your [Cilium values](../../clusters/applications.md#install-cilium-using-gitlab-cicd):
your [Cilium values](../../project/clusters/protect/container_network_security/quick_start_guide.md#use-the-cluster-management-template-to-install-cilium):
```yaml
global:
hubble:
enabled: true
metrics:
enabled:
- 'flow:sourceContext=namespace;destinationContext=namespace'
hubble:
enabled: true
metrics:
enabled:
- 'flow:sourceContext=namespace;destinationContext=namespace'
```
The **Container Network Policy** section displays the following information
@ -54,7 +53,11 @@ about your packet flow:
If a significant percentage of packets is dropped, you should
investigate it for potential threats by
[examining the Cilium logs](../../clusters/applications.md#install-cilium-using-gitlab-cicd).
examining the Cilium logs:
```shell
kubectl -n gitlab-managed-apps logs -l k8s-app=cilium -c cilium-monitor
```
## Container Network Policy management
@ -67,7 +70,7 @@ status, and create and edit deployed policies. This section has the
following prerequisites:
- Your project contains at least one [environment](../../../ci/environments/index.md)
- You've [installed Cilium](../../clusters/applications.md#install-cilium-using-gitlab-cicd)
- You've [installed Cilium](../../project/clusters/protect/container_network_security/quick_start_guide.md#use-the-cluster-management-template-to-install-cilium)
Network policies are fetched directly from the selected environment's
deployment platform. Changes performed outside of this tab are

View file

@ -448,42 +448,21 @@ There are several components that work in concert for the Agent to generate the
- A working Kubernetes cluster.
- Cilium integration through either of these options:
- Installation through [GitLab Managed Apps](../applications.md#install-cilium-using-gitlab-cicd).
- Installation through [cluster management template](../../project/clusters/protect/container_network_security/quick_start_guide.md#use-the-cluster-management-template-to-install-cilium).
- Enablement of [hubble-relay](https://docs.cilium.io/en/v1.8/concepts/overview/#hubble) on an
existing installation.
- One or more network policies through any of these options:
- Use the [Container Network Policy editor](../../application_security/threat_monitoring/index.md#container-network-policy-editor) to create and manage policies.
- Use an [AutoDevOps](../../application_security/threat_monitoring/index.md#container-network-policy-management) configuration.
- Add the required labels and annotations to existing network policies.
- Use a configuration repository to inform the Agent through a `config.yaml` file, which
repositories can synchronize with. This repository might be the same, or a separate GitLab
project.
- A configuration repository with [Cilium configured in `config.yaml`](repository.md#surface-network-security-alerts-from-cluster-to-gitlab)
The setup process follows the same steps as [GitOps](#get-started-with-gitops-and-the-gitlab-agent),
with the following differences:
- When you define a configuration repository, you must do so with [Cilium settings](#define-a-configuration-repository-with-cilium-settings).
- When you define a configuration repository, you must do so with [Cilium settings](repository.md#surface-network-security-alerts-from-cluster-to-gitlab).
- You do not need to specify the `gitops` configuration section.
### Define a configuration repository with Cilium settings
You need a GitLab repository to contain your Agent configuration. The minimal repository layout
looks like this:
```plaintext
.gitlab/agents/<agent-name>/config.yaml
```
Your `config.yaml` file must specify the `host` and `port` of your Hubble Relay service. If your
Cilium integration was performed through [GitLab Managed Apps](../applications.md#install-cilium-using-gitlab-cicd),
you can use `hubble-relay.gitlab-managed-apps.svc.cluster.local:80`:
```yaml
cilium:
hubble_relay_address: "<hubble-relay-host>:<hubble-relay-port>"
...
```
## Management interfaces
Users with at least the [Developer](../../permissions.md) can access the user interface

View file

@ -157,7 +157,9 @@ cilium:
hubble_relay_address: "<hubble-relay-host>:<hubble-relay-port>"
```
If your Cilium integration was performed through GitLab Managed Apps, you can use `hubble-relay.gitlab-managed-apps.svc.cluster.local:80` as the address:
If your Cilium integration was performed through [GitLab Managed Apps](../applications.md#install-cilium-using-gitlab-cicd) or the
[cluster management template](../../project/clusters/protect/container_network_security/quick_start_guide.md#use-the-cluster-management-template-to-install-cilium),
you can use `hubble-relay.gitlab-managed-apps.svc.cluster.local:80` as the address:
```yaml
cilium:

View file

@ -460,7 +460,7 @@ You can check Cilium's installation status on the cluster management page:
WARNING:
Installation and removal of the Cilium requires a **manual**
[restart](https://docs.cilium.io/en/stable/gettingstarted/k8s-install-gke/#restart-unmanaged-pods)
[restart](https://docs.cilium.io/en/stable/gettingstarted/k8s-install-helm/#restart-unmanaged-pods)
of all affected pods in all namespaces to ensure that they are
[managed](https://docs.cilium.io/en/v1.8/operations/troubleshooting/#ensure-managed-pod)
by the correct networking plugin. Whenever Hubble is enabled, its related pod might require a

Binary file not shown.

Before

Width:  |  Height:  |  Size: 158 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.5 KiB

View file

@ -65,7 +65,7 @@ You can create a thread without replying to a standard comment.
Prerequisites:
- You must have at least the [Guest role](../permissions.md#project-members-permissions).
- You must be in an issue, commit, snippet, or merge request.
- You must be in an issue, merge request, commit, or snippet.
To create a thread:
@ -95,25 +95,30 @@ You can edit your own comment at any time.
Anyone with the [Maintainer role](../permissions.md) or
higher can also edit a comment made by someone else.
## Resolvable comments and threads
## Resolve a thread
> - [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/5022) in GitLab 8.11.
> - Resolvable threads can be added only to merge request diffs.
> - Resolving comments individually was [removed](https://gitlab.com/gitlab-org/gitlab/-/issues/28750) in GitLab 13.6.
Thread resolution helps keep track of progress during planning or code review.
You can resolve a thread when you want to finish a conversation.
Every thread in merge requests, commits, commit diffs, and
snippets is initially displayed as unresolved. They can then be individually resolved by anyone
with at least the Developer role to the project or by the author of the change being reviewed.
If the thread has been resolved and a non-member un-resolves their own response,
this also unresolves the discussion thread.
If the non-member then resolves this same response, this resolves the discussion thread.
Prerequisites:
The need to resolve threads prevents you from forgetting to address feedback and lets you
hide threads that are no longer relevant.
- You must have at least the [Developer role](../permissions.md#project-members-permissions)
or be the author of the change being reviewed.
- You must be in an issue, merge request, commit, or snippet.
!["A thread between two people on a piece of code"](img/thread_view.png)
To resolve a thread:
1. Go to the thread.
1. Below the last reply, in the **Reply** field, either:
- Select **Resolve thread**.
- Enter text, select the **Resolve thread** checkbox, and select **Add comment now**.
At the top of the page, the number of unresolved threads is updated.
![Count of unresolved threads](img/unresolved_threads_v14_1.png)
### Commit threads in the context of a merge request

View file

@ -136,7 +136,7 @@ merge requests, code snippets, and commits.
When performing inline reviews to implementations
to your codebase through merge requests you can
gather feedback through [resolvable threads](discussions/index.md#resolvable-comments-and-threads).
gather feedback through [resolvable threads](discussions/index.md#resolve-a-thread).
### GitLab Flavored Markdown (GFM)

View file

@ -532,12 +532,24 @@ If you get this error, ensure that:
### `npm publish` returns `npm ERR! 400 Bad Request`
If you get this error, your package name may not meet the
If you get this error, one of the following problems could be causing it.
#### Package name does not meet the naming convention
Your package name may not meet the
[`@scope/package-name` package naming convention](#package-naming-convention).
Ensure the name meets the convention exactly, including the case.
Then try to publish again.
#### Package already exists
Your package has already been published to another project in the same
root namespace and therefore cannot be published again using the same name.
This is also true even if the prior published package shares the same name,
but not the version.
### `npm publish` returns `npm ERR! 500 Internal Server Error - PUT`
This is a [known issue](https://gitlab.com/gitlab-org/gitlab/-/issues/238950) in GitLab

View file

@ -24,16 +24,110 @@ The following steps are recommended to install and use Container Network Securit
into the **Base domain** field on the **Details** tab. Save the changes to the Kubernetes
cluster.
1. [Install and configure Cilium](../../../../clusters/applications.md#install-cilium-using-gitlab-cicd).
1. [Install and configure Cilium](#use-the-cluster-management-template-to-install-cilium).
1. Be sure to restart all pods that were running before Cilium was installed by running this command
in your cluster:
`kubectl get pods --all-namespaces -o custom-columns=NAMESPACE:.metadata.namespace,NAME:.metadata.name,HOSTNETWORK:.spec.hostNetwork --no-headers=true | grep '<none>' | awk '{print "-n "$1" "$2}' | xargs -L 1 -r kubectl delete pod`
You can skip this step if `nodeinit.restartPods` is set to `true` on your Helm chart.
It's possible to install and manage Cilium in other ways. For example, you could use the GitLab Helm
chart to install Cilium manually in a Kubernetes cluster, and then connect it back to GitLab.
However, such methods aren't documented or officially supported by GitLab.
### Use the Cluster Management template to install Cilium
[Cilium](https://cilium.io/) is a networking plug-in for Kubernetes that you can use to implement
support for [`NetworkPolicy`](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
resources. For more information, see [Network Policies](../../../../../topics/autodevops/stages.md#network-policy).
You can use the [Cluster Management Project Template](../../../../clusters/management_project_template.md)
to install Cilium in your Kubernetes cluster.
1. In your cluster management project, go to `helmfile.yaml` and uncomment `- path: applications/cilium/helmfile.yaml`.
1. In `applications/cilium/helmfile.yaml`, set `clusterType` to either `gke` or `eks` based on which Kubernetes provider your are using.
```yaml
environments:
default:
values:
# Set to "gke" or "eks" based on your cluster type
- clusterType: ""
```
1. Merge or push these changes to the default branch of your cluster management project,
and [GitLab CI/CD](../../../../../ci/README.md) will automatically install Cilium.
WARNING:
Installation and removal of the Cilium requires a **manual**
[restart](https://docs.cilium.io/en/stable/gettingstarted/k8s-install-helm/#restart-unmanaged-pods)
of all affected pods in all namespaces to ensure that they are
[managed](https://docs.cilium.io/en/stable/operations/troubleshooting/#ensure-managed-pod)
by the correct networking plug-in. When Hubble is enabled, its related pod might require a
restart depending on whether it started prior to Cilium. For more information, see
[Failed Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#failed-deployment)
in the Kubernetes docs.
NOTE:
Major upgrades might require additional setup steps. For more information, see
the official [upgrade guide](https://docs.cilium.io/en/stable/operations/upgrade/).
Support for installing the Cilium application is provided by the
GitLab Container Security group. If you run into unknown issues,
[open a new issue](https://gitlab.com/gitlab-org/gitlab/-/issues/new), and ping at
least 2 people from the
[Container Security group](https://about.gitlab.com/handbook/product/categories/#container-security-group).
### Configure the Cilium Helm chart
You can customize Cilium's Helm variables by editing the `applications/cilium/values.yaml`
file in your cluster management project. Refer to the [Cilium Helm reference](https://docs.cilium.io/en/stable/helm-reference/)
for the available configuration options.
By default, Cilium's
[audit mode](https://docs.cilium.io/en/stable/gettingstarted/policy-creation/#enable-policy-audit-mode)
is enabled. In audit mode, Cilium doesn't drop disallowed packets. You
can use `policy-verdict` log to observe policy-related decisions. You
can disable audit mode by setting `policyAuditMode: false` in
`applications/cilium/values.yaml`.
The Cilium monitor log for traffic is logged out by the
`cilium-monitor` sidecar container. You can check these logs with the following command:
```shell
kubectl -n gitlab-managed-apps logs -l k8s-app=cilium -c cilium-monitor
```
You can disable the monitor log in `application/cilium/values.yaml`:
```yaml
monitor:
enabled: false
```
The [Hubble](https://github.com/cilium/hubble) monitoring daemon is enabled by default
and it's set to collect per namespace flow metrics. This metrics are accessible on the
[Threat Monitoring](../../../../application_security/threat_monitoring/index.md)
dashboard. You can disable Hubble by adding the following to
`applications/cilium/values.yaml`:
```yaml
hubble:
enabled: false
```
You can also adjust Helm values for Hubble by using
`applications/cilium/values.yaml`:
```yaml
hubble:
enabled: true
metrics:
enabled:
- 'flow:sourceContext=namespace;destinationContext=namespace'
```
## Managing Network Policies
Managing NetworkPolicies through GitLab is advantageous over managing the policies in Kubernetes
@ -62,16 +156,14 @@ editor.
To view statistics for Container Network Security, you must follow the installation steps above and
configure GitLab integration with Prometheus. Also, if you use custom Helm values for Cilium, you
must enable Hubble with flow metrics for each namespace by adding the following lines to
your [Cilium values](../../../../clusters/applications.md#install-cilium-using-gitlab-cicd):
your [Cilium values](../../../../clusters/applications.md#install-cilium-using-gitlab-cicd):
your [Cilium values](#use-the-cluster-management-template-to-install-cilium):
```yaml
global:
hubble:
enabled: true
metrics:
enabled:
- 'flow:sourceContext=namespace;destinationContext=namespace'
hubble:
enabled: true
metrics:
enabled:
- 'flow:sourceContext=namespace;destinationContext=namespace'
```
Additional information about the statistics page is available in the
@ -97,15 +189,14 @@ kubectl -n gitlab-managed-apps logs -l k8s-app=cilium -c cilium-monitor
By default, Cilium is installed in Audit mode only, meaning that NetworkPolicies log policy
violations but don't block any traffic. To set Cilium to Blocking mode, you must add the following
lines to the `.gitlab/managed-apps/cilium/values.yaml` file in your cluster management project:
lines to the `applications/cilium/values.yaml` file in your cluster management project:
```yaml
config:
policyAuditMode: false
agent:
monitor:
eventTypes: ["drop"]
monitor:
eventTypes: ["drop"]
```
### Traffic is not being allowed as expected

View file

@ -27,7 +27,7 @@ important parts of the merge request:
![Merge request tab positions](img/merge_request_tab_position_v13_11.png)
- **Overview**: Contains the description, notifications from pipelines, and a
discussion area for [comment threads](../../discussions/index.md#resolvable-comments-and-threads)
discussion area for [comment threads](../../discussions/index.md#resolve-a-thread))
and [code suggestions](reviews/suggestions.md). The right sidebar provides fields
to add assignees, reviewers, labels, and a milestone to your work, and the
[merge request widgets area](widgets.md) reports results from pipelines and tests.

View file

@ -100,7 +100,7 @@ When you submit your review, GitLab:
### Resolving/Unresolving threads
Review comments can also resolve or unresolve [resolvable threads](../../../discussions/index.md#resolvable-comments-and-threads).
Review comments can also resolve or unresolve [resolvable threads](../../../discussions/index.md#resolve-a-thread)).
When replying to a comment, a checkbox is displayed to resolve or unresolve
the thread after publication.

View file

@ -1,5 +1,8 @@
# frozen_string_literal: true
require 'webrick'
require 'prometheus/client/rack/exporter'
module Gitlab
module Metrics
module Exporter

View file

@ -1,8 +1,5 @@
# frozen_string_literal: true
require 'webrick'
require 'prometheus/client/rack/exporter'
module Gitlab
module Metrics
module Exporter

View file

@ -1,8 +1,5 @@
# frozen_string_literal: true
require 'webrick'
require 'prometheus/client/rack/exporter'
module Gitlab
module Metrics
module Exporter

View file

@ -1,7 +1,5 @@
# frozen_string_literal: true
require 'prometheus/client'
module Gitlab
module Metrics
module Prometheus

View file

@ -19,11 +19,10 @@ module Gitlab
private
def paginate_with_limit_optimization(relation)
# do not paginate relation if it is already paginated
pagination_data = if relation.respond_to?(:current_page) && relation.current_page == params[:page] && relation.limit_value == params[:per_page]
relation
else
pagination_data = if needs_pagination?(relation)
relation.page(params[:page]).per(params[:per_page])
else
relation
end
return pagination_data unless pagination_data.is_a?(ActiveRecord::Relation)
@ -39,6 +38,14 @@ module Gitlab
end
end
def needs_pagination?(relation)
return true unless relation.respond_to?(:current_page)
return true if params[:page].present? && relation.current_page != params[:page].to_i
return true if params[:per_page].present? && relation.limit_value != params[:per_page].to_i
false
end
def add_default_order(relation)
if relation.is_a?(ActiveRecord::Relation) && relation.order_values.empty?
relation = relation.order(:id) # rubocop: disable CodeReuse/ActiveRecord

View file

@ -12979,9 +12979,6 @@ msgstr ""
msgid "Events"
msgstr ""
msgid "Events in %{project_path}"
msgstr ""
msgid "Every %{action} attempt has failed: %{job_error_message}. Please try again."
msgstr ""
@ -15253,9 +15250,6 @@ msgstr ""
msgid "Group %{group_name} was successfully created."
msgstr ""
msgid "Group Audit Events"
msgstr ""
msgid "Group Git LFS status:"
msgstr ""
@ -15286,6 +15280,9 @@ msgstr ""
msgid "Group applications"
msgstr ""
msgid "Group audit events"
msgstr ""
msgid "Group avatar"
msgstr ""
@ -15394,9 +15391,6 @@ msgstr ""
msgid "Group was successfully updated."
msgstr ""
msgid "Group-level events in %{group_name} (no project-level events)"
msgstr ""
msgid "Group: %{group_name}"
msgstr ""
@ -17377,6 +17371,9 @@ msgstr ""
msgid "Instance administrators group already exists"
msgstr ""
msgid "Instance audit events"
msgstr ""
msgid "Instance overview"
msgstr ""
@ -25208,9 +25205,6 @@ msgstr ""
msgid "Project Access Tokens"
msgstr ""
msgid "Project Audit Events"
msgstr ""
msgid "Project Badges"
msgstr ""
@ -25238,6 +25232,9 @@ msgstr ""
msgid "Project and wiki repositories"
msgstr ""
msgid "Project audit events"
msgstr ""
msgid "Project avatar"
msgstr ""
@ -34189,6 +34186,15 @@ msgstr ""
msgid "Track groups of issues that share a theme, across projects and milestones"
msgstr ""
msgid "Track important events in your GitLab instance."
msgstr ""
msgid "Track important events in your group."
msgstr ""
msgid "Track important events in your project."
msgstr ""
msgid "Track time with quick actions"
msgstr ""
@ -36550,6 +36556,15 @@ msgstr ""
msgid "Welcome, %{name}!"
msgstr ""
msgid "What are group audit events?"
msgstr ""
msgid "What are instance audit events?"
msgstr ""
msgid "What are project audit events?"
msgstr ""
msgid "What are you searching for?"
msgstr ""

View file

@ -483,7 +483,7 @@ RSpec.describe Projects::CommitController do
end
context 'when rendering a JSON format' do
it 'responds with serialized pipelines' do
it 'responds with serialized pipelines', :aggregate_failures do
get_pipelines(id: commit.id, format: :json)
expect(response).to be_ok
@ -491,6 +491,26 @@ RSpec.describe Projects::CommitController do
expect(json_response['count']['all']).to eq 1
expect(response).to include_pagination_headers
end
context 'with pagination' do
let!(:extra_pipeline) { create(:ci_pipeline, project: project, ref: project.default_branch, sha: commit.sha, status: :running) }
it 'paginates the result when ref is blank' do
allow(Ci::Pipeline).to receive(:default_per_page).and_return(1)
get_pipelines(id: commit.id, format: :json)
expect(json_response['pipelines'].count).to eq(1)
end
it 'paginates the result when ref is present' do
allow(Ci::Pipeline).to receive(:default_per_page).and_return(1)
get_pipelines(id: commit.id, ref: project.default_branch, format: :json)
expect(json_response['pipelines'].count).to eq(1)
end
end
end
end
end

View file

@ -1016,10 +1016,13 @@ RSpec.describe Projects::IssuesController do
let(:spammy_title) { 'Whatever' }
let!(:spam_logs) { create_list(:spam_log, 2, user: user, title: spammy_title) }
before do
request.headers['X-GitLab-Captcha-Response'] = 'a-valid-captcha-response'
request.headers['X-GitLab-Spam-Log-Id'] = spam_logs.last.id
end
def update_verified_issue
update_issue(
issue_params: { title: spammy_title },
additional_params: { spam_log_id: spam_logs.last.id, 'g-recaptcha-response': true })
update_issue(issue_params: { title: spammy_title })
end
it 'returns 200 status' do
@ -1036,8 +1039,9 @@ RSpec.describe Projects::IssuesController do
it 'does not mark spam log as recaptcha_verified when it does not belong to current_user' do
spam_log = create(:spam_log)
request.headers['X-GitLab-Spam-Log-Id'] = spam_log.id
expect { update_issue(issue_params: { spam_log_id: spam_log.id, 'g-recaptcha-response': true }) }
expect { update_issue }
.not_to change { SpamLog.last.recaptcha_verified }
end
end

View file

@ -860,6 +860,20 @@ RSpec.describe Projects::MergeRequestsController do
end
end
end
context 'with pagination' do
before do
create(:ci_pipeline, project: merge_request.source_project, ref: merge_request.source_branch, sha: merge_request.diff_head_sha)
end
it 'paginates the result' do
allow(Ci::Pipeline).to receive(:default_per_page).and_return(1)
get :pipelines, params: { namespace_id: project.namespace.to_param, project_id: project, id: merge_request.iid }, format: :json
expect(json_response['pipelines'].count).to eq(1)
end
end
end
describe 'GET context commits' do

View file

@ -66,6 +66,14 @@ RSpec.describe Projects::PipelinesController do
expect(json_response['pipelines'][0]).not_to include('coverage')
end
it 'paginates the result' do
allow(Ci::Pipeline).to receive(:default_per_page).and_return(2)
get_pipelines_index_json
check_pipeline_response(returned: 2, all: 6)
end
context 'when performing gitaly calls', :request_store do
it 'limits the Gitaly requests' do
# Isolate from test preparation (Repository#exists? is also cached in RequestStore)

View file

@ -59,6 +59,8 @@ RSpec.describe 'Import/Export - Connect to another instance', :js do
expect(page).to have_content 'Showing 1-1 of %{total} groups from %{url}' % { url: source_url, total: total }
expect(page).to have_content stub_path
visit '/'
wait_for_all_requests
end
end

View file

@ -1,6 +1,7 @@
import MockAdapter from 'axios-mock-adapter';
import { registerCaptchaModalInterceptor } from '~/captcha/captcha_modal_axios_interceptor';
import UnsolvedCaptchaError from '~/captcha/unsolved_captcha_error';
import { waitForCaptchaToBeSolved } from '~/captcha/wait_for_captcha_to_be_solved';
import axios from '~/lib/utils/axios_utils';
import httpStatusCodes from '~/lib/utils/http_status';
@ -25,22 +26,24 @@ describe('registerCaptchaModalInterceptor', () => {
let mock;
beforeEach(() => {
waitForCaptchaToBeSolved.mockRejectedValue(new UnsolvedCaptchaError());
mock = new MockAdapter(axios);
mock.onAny('/no-captcha').reply(200, AXIOS_RESPONSE);
mock.onAny('/error').reply(404, AXIOS_RESPONSE);
mock.onAny('/captcha').reply((config) => {
mock.onAny('/endpoint-without-captcha').reply(200, AXIOS_RESPONSE);
mock.onAny('/endpoint-with-unrelated-error').reply(404, AXIOS_RESPONSE);
mock.onAny('/endpoint-with-captcha').reply((config) => {
if (!supportedMethods.includes(config.method)) {
return [httpStatusCodes.METHOD_NOT_ALLOWED, { method: config.method }];
}
try {
const { captcha_response, spam_log_id, ...rest } = JSON.parse(config.data);
// eslint-disable-next-line babel/camelcase
if (captcha_response === CAPTCHA_RESPONSE && spam_log_id === SPAM_LOG_ID) {
return [httpStatusCodes.OK, { ...rest, method: config.method, CAPTCHA_SUCCESS }];
}
} catch (e) {
return [httpStatusCodes.BAD_REQUEST, { method: config.method }];
const data = JSON.parse(config.data);
const {
'X-GitLab-Captcha-Response': captchaResponse,
'X-GitLab-Spam-Log-Id': spamLogId,
} = config.headers;
if (captchaResponse === CAPTCHA_RESPONSE && spamLogId === SPAM_LOG_ID) {
return [httpStatusCodes.OK, { ...data, method: config.method, CAPTCHA_SUCCESS }];
}
return [httpStatusCodes.CONFLICT, NEEDS_CAPTCHA_RESPONSE];
@ -56,7 +59,7 @@ describe('registerCaptchaModalInterceptor', () => {
describe.each([...supportedMethods, ...unsupportedMethods])('For HTTP method %s', (method) => {
it('successful requests are passed through', async () => {
const { data, status } = await axios[method]('/no-captcha');
const { data, status } = await axios[method]('/endpoint-without-captcha');
expect(status).toEqual(httpStatusCodes.OK);
expect(data).toEqual(AXIOS_RESPONSE);
@ -64,7 +67,7 @@ describe('registerCaptchaModalInterceptor', () => {
});
it('error requests without needs_captcha_response_errors are passed through', async () => {
await expect(() => axios[method]('/error')).rejects.toThrow(
await expect(() => axios[method]('/endpoint-with-unrelated-error')).rejects.toThrow(
expect.objectContaining({
response: expect.objectContaining({
status: httpStatusCodes.NOT_FOUND,
@ -79,21 +82,35 @@ describe('registerCaptchaModalInterceptor', () => {
describe.each(supportedMethods)('For HTTP method %s', (method) => {
describe('error requests with needs_captcha_response_errors', () => {
const submittedData = { ID: 12345 };
const submittedHeaders = { 'Submitted-Header': 67890 };
it('re-submits request if captcha was solved correctly', async () => {
waitForCaptchaToBeSolved.mockResolvedValue(CAPTCHA_RESPONSE);
const { data: returnedData } = await axios[method]('/captcha', submittedData);
waitForCaptchaToBeSolved.mockResolvedValueOnce(CAPTCHA_RESPONSE);
const axiosResponse = await axios[method]('/endpoint-with-captcha', submittedData, {
headers: submittedHeaders,
});
const {
data: returnedData,
config: { headers: returnedHeaders },
} = axiosResponse;
expect(waitForCaptchaToBeSolved).toHaveBeenCalledWith(CAPTCHA_SITE_KEY);
expect(returnedData).toEqual({ ...submittedData, CAPTCHA_SUCCESS, method });
expect(returnedHeaders).toEqual(
expect.objectContaining({
...submittedHeaders,
'X-GitLab-Captcha-Response': CAPTCHA_RESPONSE,
'X-GitLab-Spam-Log-Id': SPAM_LOG_ID,
}),
);
expect(mock.history[method]).toHaveLength(2);
});
it('does not re-submit request if captcha was not solved', async () => {
const error = new Error('Captcha not solved');
waitForCaptchaToBeSolved.mockRejectedValue(error);
await expect(() => axios[method]('/captcha', submittedData)).rejects.toThrow(error);
await expect(() => axios[method]('/endpoint-with-captcha', submittedData)).rejects.toThrow(
new UnsolvedCaptchaError(),
);
expect(waitForCaptchaToBeSolved).toHaveBeenCalledWith(CAPTCHA_SITE_KEY);
expect(mock.history[method]).toHaveLength(1);
@ -103,7 +120,7 @@ describe('registerCaptchaModalInterceptor', () => {
describe.each(unsupportedMethods)('For HTTP method %s', (method) => {
it('ignores captcha response', async () => {
await expect(() => axios[method]('/captcha')).rejects.toThrow(
await expect(() => axios[method]('/endpoint-with-captcha')).rejects.toThrow(
expect.objectContaining({
response: expect.objectContaining({
status: httpStatusCodes.METHOD_NOT_ALLOWED,

View file

@ -130,6 +130,80 @@ RSpec.describe Gitlab::Pagination::OffsetPagination do
end
end
context 'when resource already paginated' do
let(:resource) { Project.all.page(1).per(1) }
context 'when per_page param is specified' do
let(:query) { base_query.merge(page: 1, per_page: 2) }
it 'returns appropriate amount of resources based on per_page param' do
expect(subject.paginate(resource).count).to eq 2
end
end
context 'when page and per page params are strings' do
let(:query) { base_query.merge(page: '1', per_page: '1') }
it 'returns appropriate amount of resources' do
expect(subject.paginate(resource).count).to eq 1
end
end
context 'when per_page param is blank' do
let(:query) { base_query.merge(page: 1) }
it 'returns appropriate amount of resources' do
expect(subject.paginate(resource).count).to eq 1
end
end
context 'when page param is blank' do
let(:query) { base_query }
it 'returns appropriate amount of resources based on resource per(N)' do
expect(subject.paginate(resource).count).to eq 1
end
end
end
context 'when resource does not respond to limit_value' do
let(:custom_collection) do
Class.new do
include Enumerable
def initialize(items)
@collection = items
end
def each
@collection.each { |item| yield item }
end
def page(number)
Kaminari.paginate_array(@collection).page(number)
end
end
end
let(:resource) { custom_collection.new(Project.all).page(query[:page]) }
context 'when page param is blank' do
let(:query) { base_query }
it 'returns appropriate amount of resources' do
expect(subject.paginate(resource).count).to eq 3
end
end
context 'when per_page param is blank' do
let(:query) { base_query.merge(page: 1) }
it 'returns appropriate amount of resources with default per page value' do
expect(subject.paginate(resource).count).to eq 3
end
end
end
context 'when resource is a paginatable array' do
let(:resource) { Kaminari.paginate_array(Project.all.to_a) }

View file

@ -11,6 +11,10 @@ RSpec.describe Ci::Pipeline, :mailer, factory_default: :keep do
let_it_be(:namespace) { create_default(:namespace).freeze }
let_it_be(:project) { create_default(:project, :repository).freeze }
it 'paginates 15 pipeleines per page' do
expect(described_class.default_per_page).to eq(15)
end
it_behaves_like 'having unique enum values'
it { is_expected.to belong_to(:project) }

View file

@ -34,7 +34,7 @@ RSpec.describe Integrations::BaseChatNotification do
end
describe '#execute' do
subject(:chat_service) { described_class.new }
subject(:chat_integration) { described_class.new }
let_it_be(:project) { create(:project, :repository) }
@ -43,7 +43,7 @@ RSpec.describe Integrations::BaseChatNotification do
let(:data) { Gitlab::DataBuilder::Push.build_sample(subject.project, user) }
before do
allow(chat_service).to receive_messages(
allow(chat_integration).to receive_messages(
project: project,
project_id: project.id,
service_hook: true,
@ -57,8 +57,8 @@ RSpec.describe Integrations::BaseChatNotification do
context 'with a repository' do
it 'returns true' do
expect(chat_service).to receive(:notify).and_return(true)
expect(chat_service.execute(data)).to be true
expect(chat_integration).to receive(:notify).and_return(true)
expect(chat_integration.execute(data)).to be true
end
end
@ -66,8 +66,8 @@ RSpec.describe Integrations::BaseChatNotification do
it 'returns true' do
subject.project = create(:project, :empty_repo)
expect(chat_service).to receive(:notify).and_return(true)
expect(chat_service.execute(data)).to be true
expect(chat_integration).to receive(:notify).and_return(true)
expect(chat_integration.execute(data)).to be true
end
end
@ -75,8 +75,8 @@ RSpec.describe Integrations::BaseChatNotification do
it 'does not remove spaces' do
allow(project).to receive(:full_name).and_return('Project Name')
expect(chat_service).to receive(:get_message).with(any_args, hash_including(project_name: 'Project Name'))
chat_service.execute(data)
expect(chat_integration).to receive(:get_message).with(any_args, hash_including(project_name: 'Project Name'))
chat_integration.execute(data)
end
end
@ -89,76 +89,76 @@ RSpec.describe Integrations::BaseChatNotification do
let(:data) { Gitlab::DataBuilder::Note.build(note, user) }
shared_examples 'notifies the chat service' do
shared_examples 'notifies the chat integration' do
specify do
expect(chat_service).to receive(:notify).with(any_args)
expect(chat_integration).to receive(:notify).with(any_args)
chat_service.execute(data)
chat_integration.execute(data)
end
end
shared_examples 'does not notify the chat service' do
shared_examples 'does not notify the chat integration' do
specify do
expect(chat_service).not_to receive(:notify).with(any_args)
expect(chat_integration).not_to receive(:notify).with(any_args)
chat_service.execute(data)
chat_integration.execute(data)
end
end
it_behaves_like 'notifies the chat service'
it_behaves_like 'notifies the chat integration'
context 'with label filter' do
subject(:chat_service) { described_class.new(labels_to_be_notified: '~Bug') }
subject(:chat_integration) { described_class.new(labels_to_be_notified: '~Bug') }
it_behaves_like 'notifies the chat service'
it_behaves_like 'notifies the chat integration'
context 'MergeRequest events' do
let(:data) { create(:merge_request, labels: [label]).to_hook_data(user) }
it_behaves_like 'notifies the chat service'
it_behaves_like 'notifies the chat integration'
end
context 'Issue events' do
let(:data) { issue.to_hook_data(user) }
it_behaves_like 'notifies the chat service'
it_behaves_like 'notifies the chat integration'
end
end
context 'when labels_to_be_notified_behavior is not defined' do
subject(:chat_service) { described_class.new(labels_to_be_notified: label_filter) }
subject(:chat_integration) { described_class.new(labels_to_be_notified: label_filter) }
context 'no matching labels' do
let(:label_filter) { '~some random label' }
it_behaves_like 'does not notify the chat service'
it_behaves_like 'does not notify the chat integration'
end
context 'only one label matches' do
let(:label_filter) { '~some random label, ~Bug' }
it_behaves_like 'notifies the chat service'
it_behaves_like 'notifies the chat integration'
end
end
context 'when labels_to_be_notified_behavior is blank' do
subject(:chat_service) { described_class.new(labels_to_be_notified: label_filter, labels_to_be_notified_behavior: '') }
subject(:chat_integration) { described_class.new(labels_to_be_notified: label_filter, labels_to_be_notified_behavior: '') }
context 'no matching labels' do
let(:label_filter) { '~some random label' }
it_behaves_like 'does not notify the chat service'
it_behaves_like 'does not notify the chat integration'
end
context 'only one label matches' do
let(:label_filter) { '~some random label, ~Bug' }
it_behaves_like 'notifies the chat service'
it_behaves_like 'notifies the chat integration'
end
end
context 'when labels_to_be_notified_behavior is match_any' do
subject(:chat_service) do
subject(:chat_integration) do
described_class.new(
labels_to_be_notified: label_filter,
labels_to_be_notified_behavior: 'match_any'
@ -168,24 +168,24 @@ RSpec.describe Integrations::BaseChatNotification do
context 'no label filter' do
let(:label_filter) { nil }
it_behaves_like 'notifies the chat service'
it_behaves_like 'notifies the chat integration'
end
context 'no matching labels' do
let(:label_filter) { '~some random label' }
it_behaves_like 'does not notify the chat service'
it_behaves_like 'does not notify the chat integration'
end
context 'only one label matches' do
let(:label_filter) { '~some random label, ~Bug' }
it_behaves_like 'notifies the chat service'
it_behaves_like 'notifies the chat integration'
end
end
context 'when labels_to_be_notified_behavior is match_all' do
subject(:chat_service) do
subject(:chat_integration) do
described_class.new(
labels_to_be_notified: label_filter,
labels_to_be_notified_behavior: 'match_all'
@ -195,31 +195,31 @@ RSpec.describe Integrations::BaseChatNotification do
context 'no label filter' do
let(:label_filter) { nil }
it_behaves_like 'notifies the chat service'
it_behaves_like 'notifies the chat integration'
end
context 'no matching labels' do
let(:label_filter) { '~some random label' }
it_behaves_like 'does not notify the chat service'
it_behaves_like 'does not notify the chat integration'
end
context 'only one label matches' do
let(:label_filter) { '~some random label, ~Bug' }
it_behaves_like 'does not notify the chat service'
it_behaves_like 'does not notify the chat integration'
end
context 'labels matches exactly' do
let(:label_filter) { '~Bug, ~Backend, ~Community contribution' }
it_behaves_like 'notifies the chat service'
it_behaves_like 'notifies the chat integration'
end
context 'labels matches but object has more' do
let(:label_filter) { '~Bug, ~Backend' }
it_behaves_like 'notifies the chat service'
it_behaves_like 'notifies the chat integration'
end
context 'labels are distributed on multiple objects' do
@ -241,22 +241,22 @@ RSpec.describe Integrations::BaseChatNotification do
})
end
it_behaves_like 'does not notify the chat service'
it_behaves_like 'does not notify the chat integration'
end
end
end
context 'with "channel" property' do
before do
allow(chat_service).to receive(:channel).and_return(channel)
allow(chat_integration).to receive(:channel).and_return(channel)
end
context 'empty string' do
let(:channel) { '' }
it 'does not include the channel' do
expect(chat_service).to receive(:notify).with(any_args, hash_excluding(:channel)).and_return(true)
expect(chat_service.execute(data)).to be(true)
expect(chat_integration).to receive(:notify).with(any_args, hash_excluding(:channel)).and_return(true)
expect(chat_integration.execute(data)).to be(true)
end
end
@ -264,20 +264,20 @@ RSpec.describe Integrations::BaseChatNotification do
let(:channel) { ' ' }
it 'does not include the channel' do
expect(chat_service).to receive(:notify).with(any_args, hash_excluding(:channel)).and_return(true)
expect(chat_service.execute(data)).to be(true)
expect(chat_integration).to receive(:notify).with(any_args, hash_excluding(:channel)).and_return(true)
expect(chat_integration.execute(data)).to be(true)
end
end
end
shared_examples 'with channel specified' do |channel, expected_channels|
before do
allow(chat_service).to receive(:push_channel).and_return(channel)
allow(chat_integration).to receive(:push_channel).and_return(channel)
end
it 'notifies all channels' do
expect(chat_service).to receive(:notify).with(any_args, hash_including(channel: expected_channels)).and_return(true)
expect(chat_service.execute(data)).to be(true)
expect(chat_integration).to receive(:notify).with(any_args, hash_including(channel: expected_channels)).and_return(true)
expect(chat_integration.execute(data)).to be(true)
end
end