Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2022-02-10 21:15:20 +00:00
parent e0277d5393
commit 9abffa14d6
45 changed files with 1161 additions and 703 deletions

View File

@ -1 +1 @@
3074a5673df93a6a765c2bdde84bd4f6f670afcb
6130522b84e1ea354467c2a51989c66fb0566d9d

View File

@ -544,7 +544,6 @@ export default {
.put(joinPaths(issueToMove.webPath, 'reorder'), {
move_before_id: isMovingToBeginning ? null : getIdFromGraphQLId(moveBeforeId),
move_after_id: isMovingToEnd ? null : getIdFromGraphQLId(moveAfterId),
group_full_path: this.isProject ? undefined : this.fullPath,
})
.then(() => {
const serializedVariables = JSON.stringify(this.queryVariables);

View File

@ -7,12 +7,11 @@ import createFlash from '~/flash';
import axios from '~/lib/utils/axios_utils';
import { s__ } from '~/locale';
const updateIssue = (url, issueList, { move_before_id, move_after_id }) =>
const updateIssue = (url, { move_before_id, move_after_id }) =>
axios
.put(`${url}/reorder`, {
move_before_id,
move_after_id,
group_full_path: issueList.dataset.groupFullPath,
})
.catch(() => {
createFlash({
@ -52,7 +51,7 @@ const initManualOrdering = () => {
const beforeId = prev && parseInt(prev.dataset.id, 10);
const afterId = next && parseInt(next.dataset.id, 10);
updateIssue(url, issueList, { move_after_id: afterId, move_before_id: beforeId });
updateIssue(url, { move_after_id: afterId, move_before_id: beforeId });
},
}),
);

View File

@ -1,5 +1,6 @@
<script>
import { GlFormGroup, GlButton, GlFormInput, GlForm, GlAlert } from '@gitlab/ui';
import { GlFormGroup, GlButton, GlFormInput, GlForm, GlAlert, GlSprintf, GlLink } from '@gitlab/ui';
import { helpPagePath } from '~/helpers/help_page_helper';
import {
CREATE_BRANCH_ERROR_GENERIC,
CREATE_BRANCH_ERROR_WITH_CONTEXT,
@ -7,6 +8,7 @@ import {
I18N_NEW_BRANCH_LABEL_BRANCH,
I18N_NEW_BRANCH_LABEL_SOURCE,
I18N_NEW_BRANCH_SUBMIT_BUTTON_TEXT,
I18N_NEW_BRANCH_PERMISSION_ALERT,
} from '../constants';
import createBranchMutation from '../graphql/mutations/create_branch.mutation.graphql';
import ProjectDropdown from './project_dropdown.vue';
@ -17,6 +19,8 @@ const DEFAULT_ALERT_PARAMS = {
title: '',
message: '',
variant: DEFAULT_ALERT_VARIANT,
link: undefined,
dismissible: true,
};
export default {
@ -27,10 +31,16 @@ export default {
GlFormInput,
GlForm,
GlAlert,
GlSprintf,
GlLink,
ProjectDropdown,
SourceBranchDropdown,
},
inject: ['initialBranchName'],
inject: {
initialBranchName: {
default: '',
},
},
data() {
return {
selectedProject: null,
@ -40,6 +50,7 @@ export default {
alertParams: {
...DEFAULT_ALERT_PARAMS,
},
hasPermission: false,
};
},
computed: {
@ -49,19 +60,38 @@ export default {
showAlert() {
return Boolean(this.alertParams?.message);
},
isBranchNameValid() {
return (this.branchName ?? '').trim().length > 0;
},
disableSubmitButton() {
return !(this.selectedProject && this.selectedSourceBranchName && this.branchName);
return !(this.selectedProject && this.selectedSourceBranchName && this.isBranchNameValid);
},
},
methods: {
displayAlert({ title, message, variant = DEFAULT_ALERT_VARIANT } = {}) {
displayAlert({
title,
message,
variant = DEFAULT_ALERT_VARIANT,
link,
dismissible = true,
} = {}) {
this.alertParams = {
title,
message,
variant,
link,
dismissible,
};
},
onAlertDismiss() {
setPermissionAlert() {
this.displayAlert({
message: I18N_NEW_BRANCH_PERMISSION_ALERT,
variant: 'warning',
link: helpPagePath('user/permissions', { anchor: 'project-members-permissions' }),
dismissible: false,
});
},
dismissAlert() {
this.alertParams = {
...DEFAULT_ALERT_PARAMS,
};
@ -69,6 +99,14 @@ export default {
onProjectSelect(project) {
this.selectedProject = project;
this.selectedSourceBranchName = null; // reset branch selection
this.hasPermission = this.selectedProject.userPermissions.pushCode;
if (!this.hasPermission) {
this.setPermissionAlert();
} else {
// clear alert if the user has permissions for the newly-selected project.
this.dismissAlert();
}
},
onSourceBranchSelect(branchName) {
this.selectedSourceBranchName = branchName;
@ -127,10 +165,18 @@ export default {
class="gl-mb-5"
:variant="alertParams.variant"
:title="alertParams.title"
@dismiss="onAlertDismiss"
:dismissible="alertParams.dismissible"
@dismiss="dismissAlert"
>
{{ alertParams.message }}
<gl-sprintf :message="alertParams.message">
<template #link="{ content }">
<gl-link :href="alertParams.link" target="_blank">
{{ content }}
</gl-link>
</template>
</gl-sprintf>
</gl-alert>
<gl-form-group :label="$options.i18n.I18N_NEW_BRANCH_LABEL_DROPDOWN" label-for="project-select">
<project-dropdown
id="project-select"
@ -140,26 +186,28 @@ export default {
/>
</gl-form-group>
<gl-form-group
:label="$options.i18n.I18N_NEW_BRANCH_LABEL_SOURCE"
label-for="source-branch-select"
>
<source-branch-dropdown
id="source-branch-select"
:selected-project="selectedProject"
:selected-branch-name="selectedSourceBranchName"
@change="onSourceBranchSelect"
@error="onError"
/>
</gl-form-group>
<template v-if="selectedProject && hasPermission">
<gl-form-group
:label="$options.i18n.I18N_NEW_BRANCH_LABEL_SOURCE"
label-for="source-branch-select"
>
<source-branch-dropdown
id="source-branch-select"
:selected-project="selectedProject"
:selected-branch-name="selectedSourceBranchName"
@change="onSourceBranchSelect"
@error="onError"
/>
</gl-form-group>
<gl-form-group
:label="$options.i18n.I18N_NEW_BRANCH_LABEL_BRANCH"
label-for="branch-name-input"
class="gl-max-w-62"
>
<gl-form-input id="branch-name-input" v-model="branchName" type="text" required />
</gl-form-group>
<gl-form-group
:label="$options.i18n.I18N_NEW_BRANCH_LABEL_BRANCH"
label-for="branch-name-input"
class="gl-max-w-62"
>
<gl-form-input id="branch-name-input" v-model="branchName" type="text" required />
</gl-form-group>
</template>
<div class="form-actions">
<gl-button

View File

@ -23,3 +23,6 @@ export const I18N_NEW_BRANCH_SUCCESS_TITLE = s__(
export const I18N_NEW_BRANCH_SUCCESS_MESSAGE = s__(
'JiraConnect|You can now close this window and return to Jira.',
);
export const I18N_NEW_BRANCH_PERMISSION_ALERT = s__(
"JiraConnect|You don't have permission to create branches for this project. Select a different project or contact the project owner for access. %{linkStart}Learn more.%{linkEnd}",
);

View File

@ -26,6 +26,9 @@ query jiraGetProjects(
repository {
empty
}
userPermissions {
pushCode
}
}
pageInfo {
...PageInfo

View File

@ -124,6 +124,9 @@ export default {
const fileName = this.requests[0].truncatedUrl;
return `${fileName}_perf_bar_${Date.now()}.json`;
},
memoryReportPath() {
return mergeUrlParams({ performance_bar: 'memory' }, window.location.href);
},
},
mounted() {
this.currentRequest = this.requestId;
@ -182,6 +185,15 @@ export default {
s__('PerformanceBar|Download')
}}</a>
</div>
<div
v-if="currentRequest.details && env === 'development'"
id="peek-memory-report"
class="view"
>
<a class="gl-text-blue-200" :href="memoryReportPath">{{
s__('PerformanceBar|Memory report')
}}</a>
</div>
<div v-if="currentRequest.details" id="peek-flamegraph" class="view">
<span class="gl-text-white-200">{{ s__('PerformanceBar|Flamegraph with mode:') }}</span>
<a class="gl-text-blue-200" :href="flamegraphPath('wall')">{{

View File

@ -2,6 +2,7 @@
import { GlDropdown, GlDropdownItem, GlIcon, GlTooltipDirective } from '@gitlab/ui';
import createFlash from '~/flash';
import axios from '~/lib/utils/axios_utils';
import { confirmAction } from '~/lib/utils/confirm_via_gl_modal/confirm_via_gl_modal';
import { s__, __, sprintf } from '~/locale';
import GlCountdown from '~/vue_shared/components/gl_countdown.vue';
import eventHub from '../../event_hub';
@ -28,7 +29,7 @@ export default {
};
},
methods: {
onClickAction(action) {
async onClickAction(action) {
if (action.scheduled_at) {
const confirmationMessage = sprintf(
s__(
@ -36,9 +37,10 @@ export default {
),
{ jobName: action.name },
);
// https://gitlab.com/gitlab-org/gitlab-foss/issues/52156
// eslint-disable-next-line no-alert
if (!window.confirm(confirmationMessage)) {
const confirmed = await confirmAction(confirmationMessage);
if (!confirmed) {
return;
}
}

View File

@ -13,6 +13,7 @@
# only_shared: boolean
# limit: integer
# include_subgroups: boolean
# include_ancestor_groups: boolean
# params:
# sort: string
# visibility_level: int
@ -113,12 +114,19 @@ class GroupProjectsFinder < ProjectsFinder
options.fetch(:include_subgroups, false)
end
# ancestor groups are supported only for owned projects not for shared
def include_ancestor_groups?
options.fetch(:include_ancestor_groups, false)
end
def owned_projects
if include_subgroups?
Project.for_group_and_its_subgroups(group)
else
group.projects
end
return group.projects unless include_subgroups? || include_ancestor_groups?
union_relations = []
union_relations << Project.for_group_and_its_subgroups(group) if include_subgroups?
union_relations << Project.for_group_and_its_ancestor_groups(group) if include_ancestor_groups?
Project.from_union(union_relations)
end
def shared_projects

View File

@ -729,6 +729,7 @@ class Project < ApplicationRecord
scope :joins_import_state, -> { joins("INNER JOIN project_mirror_data import_state ON import_state.project_id = projects.id") }
scope :for_group, -> (group) { where(group: group) }
scope :for_group_and_its_subgroups, ->(group) { where(namespace_id: group.self_and_descendants.select(:id)) }
scope :for_group_and_its_ancestor_groups, ->(group) { where(namespace_id: group.self_and_ancestors.select(:id)) }
class << self
# Searches for a list of projects based on the query given in `query`.

View File

@ -1,11 +1,12 @@
- can_edit_max_page_size=can?(current_user, :update_max_pages_size)
- can_enforce_https_only=Gitlab.config.pages.external_http || Gitlab.config.pages.external_https
- return unless can_edit_max_page_size || can_enforce_https_only
= form_for @project, url: project_pages_path(@project), html: { class: 'inline', title: pages_https_only_title } do |f|
- if can?(current_user, :update_max_pages_size)
- if can_edit_max_page_size
= render_if_exists 'shared/pages/max_pages_size_input', form: f
.gl-mt-3
= f.submit s_('GitLabPages|Save changes'), class: 'btn btn-confirm gl-button'
- if Gitlab.config.pages.external_http || Gitlab.config.pages.external_https
- if can_enforce_https_only
.form-group
.form-check
= f.check_box :pages_https_only, class: 'form-check-input', disabled: pages_https_only_disabled?
@ -17,5 +18,5 @@
%p
= s_("GitLabPages|When enabled, all attempts to visit your website through HTTP are automatically redirected to HTTPS using a response with status code 301. Requires a valid certificate for all domains. %{docs_link_start}Learn more.%{link_end}").html_safe % { docs_link_start: docs_link_start, link_end: link_end }
.gl-mt-3
= f.submit s_('GitLabPages|Save changes'), class: 'btn btn-confirm gl-button'
.gl-mt-3
= f.submit s_('GitLabPages|Save changes'), class: 'btn btn-confirm gl-button'

View File

@ -1,7 +1,7 @@
= render 'shared/alerts/positioning_disabled' if @sort == 'relative_position'
- if @issues.to_a.any?
%ul.content-list.issues-list.issuable-list{ class: issue_manual_ordering_class, data: { group_full_path: @group&.full_path } }
%ul.content-list.issues-list.issuable-list{ class: issue_manual_ordering_class }
= render partial: 'projects/issues/issue', collection: @issues
= paginate @issues, theme: "gitlab"
- else

View File

@ -3,4 +3,5 @@
Rails.application.configure do |config|
config.middleware.use(Gitlab::RequestProfiler::Middleware)
config.middleware.use(Gitlab::Middleware::Speedscope)
config.middleware.use(Gitlab::Middleware::MemoryReport)
end

View File

@ -91,11 +91,6 @@ printer = RubyProf::CallStackPrinter.new(result)
printer.print(File.open('/tmp/profile.html', 'w'))
```
[GitLab-Profiler](https://gitlab.com/gitlab-com/gitlab-profiler) is a project
that builds on this to add some additional niceties, such as allowing
configuration with a single YAML file for multiple URLs, and uploading of the
profile and log output to S3.
## Speedscope flamegraphs
You can generate a flamegraph for a particular URL by selecting a flamegraph sampling mode button in the performance bar or by adding the `performance_bar=flamegraph` parameter to the request.

View File

@ -137,6 +137,7 @@ than the [webhook integration](#configure-a-webhook).
- Merge request
- Tag push
1. Enter the **Jenkins server URL**.
1. Optional. Clear the **Enable SSL verification** checkbox to disable [SSL verification](../user/project/integrations/overview.md#ssl-verification).
1. Enter the **Project name**.
The project name should be URL-friendly, where spaces are replaced with underscores. To ensure

View File

@ -1196,7 +1196,7 @@ To edit an existing site profile:
1. Edit the fields then select **Save profile**.
If a site profile is linked to a security policy, a user cannot edit the profile from this page. See
[Scan Execution Policies](../policies/index.md#scan-execution-policy-editor)
[Scan execution policies](../policies/scan-execution-policies.md)
for more information.
#### Delete a site profile
@ -1210,7 +1210,7 @@ To delete an existing site profile:
1. Select **Delete** to confirm the deletion.
If a site profile is linked to a security policy, a user cannot delete the profile from this page.
See [Scan Execution Policies](../policies/index.md#scan-execution-policy-editor)
See [Scan execution policies](../policies/scan-execution-policies.md)
for more information.
#### Validate a site profile
@ -1353,7 +1353,7 @@ To edit a scanner profile:
1. Select **Save profile**.
If a scanner profile is linked to a security policy, a user cannot edit the profile from this page.
See [Scan Execution Policies](../policies/index.md#scan-execution-policy-editor)
See [Scan execution policies](../policies/scan-execution-policies.md)
for more information.
#### Delete a scanner profile
@ -1367,7 +1367,7 @@ To delete a scanner profile:
1. Select **Delete**.
If a scanner profile is linked to a security policy, a user cannot delete the profile from this
page. See [Scan Execution Policies](../policies/index.md#scan-execution-policy-editor)
page. See [Scan execution policies](../policies/scan-execution-policies.md)
for more information.
## Auditing

View File

@ -17,8 +17,9 @@ can access these by navigating to your project's **Security & Compliance > Polic
GitLab supports the following security policies:
- [Container Network Policy](#container-network-policy) (Deprecated in GitLab 14.8)
- [Scan Execution Policy](#scan-execution-policy-schema)
- [Scan Execution Policy](scan-execution-policies.md)
- [Scan Result Policy](scan-result-policies.md)
- [Container Network Policy](#container-network-policy) (DEPRECATED)
## Policy management
@ -77,6 +78,53 @@ by the Rule mode, Rule mode is automatically
disabled. If the YAML is incorrect, you must use YAML
mode to fix your policy before Rule mode is available again.
## Security Policies project
NOTE:
We recommend using the [Security Policies project](#security-policies-project)
exclusively for managing policies for the project. Do not add your application's source code to such
projects.
The Security Policies feature is a repository to store policies. All security policies are stored in
the `.gitlab/security-policies/policy.yml` YAML file. The format for this YAML is specific to the type of policy that is being stored there. Examples and schema information are available for the following policy types:
- [Scan execution policy](scan-execution-policies.md#example-security-policies-project)
- [Scan result policy](scan-result-policies.md#example-security-scan-result-policies-project)
Policies created in this project are applied through a background job that runs once every 10
minutes. Allow up to 10 minutes for any policy changes committed to this project to take effect.
## Security Policy project selection
NOTE:
Only project Owners have the [permissions](../../permissions.md#project-members-permissions)
to select Security Policy Project.
When the Security Policy project is created and policies are created within that repository, you
must create an association between that project and the project you want to apply policies to:
1. On the top bar, select **Menu > Projects** and find your project.
1. On the left sidebar, select **Security & Compliance > Policies**.
1. Select **Edit Policy Project**, and search for and select the
project you would like to link from the dropdown menu.
1. Select **Save**.
![Security Policy Project](img/security_policy_project_v14_6.png)
### Unlink Security Policy projects
Project owners can unlink Security Policy projects from development projects. To do this, follow
the steps described in [Security Policy project selection](#security-policy-project-selection),
but select the trash can icon in the modal.
## Scan execution policies
See [Scan execution policies](scan-execution-policies.md).
## Scan result policy editor
See [Scan result policies](scan-result-policies.md).
## Container Network Policy
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/32365) in GitLab 12.9.
@ -182,476 +230,7 @@ There are two ways to create policy alerts:
Once added, the UI updates and displays a warning about the dangers of too many alerts.
## Security Policies project
NOTE:
We recommend using the [Security Policies project](#security-policies-project)
exclusively for managing policies for the project. Do not add your application's source code to such
projects.
The Security Policies feature is a repository to store policies. All security policies are stored as
the `.gitlab/security-policies/policy.yml` YAML file with this format:
```yaml
---
scan_execution_policy:
- name: Enforce DAST in every pipeline
description: This policy enforces pipeline configuration to have a job with DAST scan
enabled: true
rules:
- type: pipeline
branches:
- master
actions:
- scan: dast
scanner_profile: Scanner Profile A
site_profile: Site Profile B
- name: Enforce DAST in every pipeline in main branch
description: This policy enforces pipeline configuration to have a job with DAST scan for main branch
enabled: true
rules:
- type: pipeline
branches:
- main
actions:
- scan: dast
scanner_profile: Scanner Profile C
site_profile: Site Profile D
scan_result_policy:
- name: critical vulnerability CS approvals
description: critical severity level only for container scanning
enabled: true
rules:
- type: scan_finding
branches:
- main
scanners:
- container_scanning
vulnerabilities_allowed: 1
severity_levels:
- critical
vulnerability_states:
- newly_detected
actions:
- type: require_approval
approvals_required: 1
user_approvers:
- adalberto.dare
- name: secondary CS approvals
description: secondary only for container scanning
enabled: true
rules:
- type: scan_finding
branches:
- main
scanners:
- container_scanning
vulnerabilities_allowed: 1
severity_levels:
- low
- unknown
vulnerability_states:
- newly_detected
actions:
- type: require_approval
approvals_required: 1
user_approvers:
- sam.white
```
## Security Policy project selection
NOTE:
Only project Owners have the [permissions](../../permissions.md#project-members-permissions)
to select Security Policy Project.
When the Security Policy project is created and policies are created within that repository, you
must create an association between that project and the project you want to apply policies to:
1. On the top bar, select **Menu > Projects** and find your project.
1. On the left sidebar, select **Security & Compliance > Policies**.
1. Select **Edit Policy Project**, and search for and select the
project you would like to link from the dropdown menu.
1. Select **Save**.
![Security Policy Project](img/security_policy_project_v14_6.png)
### Unlink Security Policy projects
Project owners can unlink Security Policy projects from development projects. To do this, follow
the steps described in [Security Policy project selection](#security-policy-project-selection),
but select the trash can icon in the modal.
## Scan execution policies
Project owners can use scan execution policies to require that security scans run on a specified
schedule or with the project pipeline. Required scans are injected into the CI pipeline as new jobs
with a long, random job name. In the unlikely event of a job name collision, the security policy job
overwrites any pre-existing job in the pipeline.
This feature has some overlap with [compliance framework pipelines](../../project/settings/#compliance-pipeline-configuration),
as we have not [unified the user experience for these two features](https://gitlab.com/groups/gitlab-org/-/epics/7312).
For details on the similarities and differences between these features, see
[Enforce scan execution](../#enforce-scan-execution).
### Scan Execution Policy editor
NOTE:
Only project Owners have the [permissions](../../permissions.md#project-members-permissions)
to select Security Policy Project.
Once your policy is complete, save it by selecting **Create via merge request**
at the bottom of the editor. You are redirected to the merge request on the project's
configured security policy project. If one does not link to your project, a security
policy project is automatically created. Existing policies can also be
removed from the editor interface by selecting **Delete policy**
at the bottom of the editor.
![Scan Execution Policy Editor YAML Mode](img/scan_execution_policy_yaml_mode_v14_7.png)
The policy editor currently only supports the YAML mode. The Rule mode is tracked in the [Allow Users to Edit Rule-mode Scan Execution Policies in the Policy UI](https://gitlab.com/groups/gitlab-org/-/epics/5363) epic.
### Scan Execution Policies Schema
The YAML file with Scan Execution Policies consists of an array of objects matching Scan Execution Policy Schema nested under the `scan_execution_policy` key. You can configure a maximum of 5 policies under the `scan_execution_policy` key.
When you save a new policy, GitLab validates its contents against [this JSON schema](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/app/validators/json_schemas/security_orchestration_policy.json).
If you're not familiar with how to read [JSON schemas](https://json-schema.org/),
the following sections and tables provide an alternative.
| Field | Type | Possible values | Description |
|-------|------|-----------------|-------------|
| `scan_execution_policy` | `array` of Scan Execution Policy | | List of scan execution policies (maximum 5) |
### Scan Execution Policy Schema
| Field | Type | Possible values | Description |
|-------|------|-----------------|-------------|
| `name` | `string` | | Name of the policy. |
| `description` (optional) | `string` | | Description of the policy. |
| `enabled` | `boolean` | `true`, `false` | Flag to enable (`true`) or disable (`false`) the policy. |
| `rules` | `array` of rules | | List of rules that the policy applies. |
| `actions` | `array` of actions | | List of actions that the policy enforces. |
### `pipeline` rule type
This rule enforces the defined actions whenever the pipeline runs for a selected branch.
| Field | Type | Possible values | Description |
|-------|------|-----------------|-------------|
| `type` | `string` | `pipeline` | The rule's type. |
| `branches` | `array` of `string` | `*` or the branch's name | The branch the given policy applies to (supports wildcard). |
### `schedule` rule type
This rule enforces the defined actions and schedules a scan on the provided date/time.
| Field | Type | Possible values | Description |
|------------|------|-----------------|-------------|
| `type` | `string` | `schedule` | The rule's type. |
| `branches` | `array` of `string` | `*` or the branch's name | The branch the given policy applies to (supports wildcard). |
| `cadence` | `string` | CRON expression (for example, `0 0 * * *`) | A whitespace-separated string containing five fields that represents the scheduled time. |
| `clusters` | `object` | | The cluster where the given policy enforces running selected scans (only for `container_scanning`/`cluster_image_scanning` scans). The key of the object is the name of the Kubernetes cluster configured for your project in GitLab. In the optionally provided value of the object, you can precisely select Kubernetes resources that are scanned. |
#### `cluster` schema
Use this schema to define `clusters` objects in the [`schedule` rule type](#schedule-rule-type).
| Field | Type | Possible values | Description |
|--------------|---------------------|--------------------------|-------------|
| `containers` | `array` of `string` | | The container name that is scanned (only the first value is currently supported). |
| `resources` | `array` of `string` | | The resource name that is scanned (only the first value is currently supported). |
| `namespaces` | `array` of `string` | | The namespace that is scanned (only the first value is currently supported). |
| `kinds` | `array` of `string` | `deployment`/`daemonset` | The resource kind that should be scanned (only the first value is currently supported). |
### `scan` action type
This action executes the selected `scan` with additional parameters when conditions for at least one
rule in the defined policy are met.
| Field | Type | Possible values | Description |
|-------|------|-----------------|-------------|
| `scan` | `string` | `dast`, `secret_detection`, `sast`, `container_scanning`, `cluster_image_scanning` | The action's type. |
| `site_profile` | `string` | Name of the selected [DAST site profile](../dast/index.md#site-profile). | The DAST site profile to execute the DAST scan. This field should only be set if `scan` type is `dast`. |
| `scanner_profile` | `string` or `null` | Name of the selected [DAST scanner profile](../dast/index.md#scanner-profile). | The DAST scanner profile to execute the DAST scan. This field should only be set if `scan` type is `dast`.|
| `variables` | `object` | | Set of variables applied and enforced for the selected scan. The object's key is the variable name with a value provided as a string. |
Note the following:
- You must create the [site profile](../dast/index.md#site-profile) and [scanner profile](../dast/index.md#scanner-profile)
with selected names for each project that is assigned to the selected Security Policy Project.
Otherwise, the policy is not applied and a job with an error message is created instead.
- Once you associate the site profile and scanner profile by name in the policy, it is not possible
to modify or delete them. If you want to modify them, you must first disable the policy by setting
the `active` flag to `false`.
- When configuring policies with a scheduled DAST scan, the author of the commit in the security
policy project's repository must have access to the scanner and site profiles. Otherwise, the scan
is not scheduled successfully.
- For a secret detection scan, only rules with the default ruleset are supported. [Custom rulesets](../secret_detection/index.md#custom-rulesets)
are not supported.
- A secret detection scan runs in `normal` mode when executed as part of a pipeline, and in
[`historic`](../secret_detection/index.md#full-history-secret-detection)
mode when executed as part of a scheduled scan.
- A container scanning and cluster image scanning scans configured for the `pipeline` rule type ignores the cluster defined in the `clusters` object.
They use predefined CI/CD variables defined for your project. Cluster selection with the `clusters` object is supported for the `schedule` rule type.
Cluster with name provided in `clusters` object must be created and configured for the project. To be able to successfully perform the `container_scanning`/`cluster_image_scanning` scans for the cluster you must follow instructions for the [Cluster Image Scanning feature](../cluster_image_scanning/index.md#prerequisites).
- The SAST scan uses the default template and runs in a [child pipeline](../../../ci/pipelines/parent_child_pipelines.md).
### Example security policies project
You can use this example in a `.gitlab/security-policies/policy.yml`, as described in
[Security policies project](#security-policies-project).
```yaml
---
scan_execution_policy:
- name: Enforce DAST in every release pipeline
description: This policy enforces pipeline configuration to have a job with DAST scan for release branches
enabled: true
rules:
- type: pipeline
branches:
- release/*
actions:
- scan: dast
scanner_profile: Scanner Profile A
site_profile: Site Profile B
- name: Enforce DAST and secret detection scans every 10 minutes
description: This policy enforces DAST and secret detection scans to run every 10 minutes
enabled: true
rules:
- type: schedule
branches:
- main
cadence: "*/10 * * * *"
actions:
- scan: dast
scanner_profile: Scanner Profile C
site_profile: Site Profile D
- scan: secret_detection
- name: Enforce Secret Detection and Container Scanning in every default branch pipeline
description: This policy enforces pipeline configuration to have a job with Secret Detection and Container Scanning scans for the default branch
enabled: true
rules:
- type: pipeline
branches:
- main
actions:
- scan: secret_detection
- scan: sast
variables:
SAST_EXCLUDED_ANALYZERS: brakeman
- scan: container_scanning
- name: Enforce Cluster Image Scanning on production-cluster every 24h
description: This policy enforces Cluster Image Scanning scan to run every 24 hours
enabled: true
rules:
- type: schedule
cadence: "15 3 * * *"
clusters:
production-cluster:
containers:
- database
resources:
- production-application
namespaces:
- production-namespace
kinds:
- deployment
actions:
- scan: cluster_image_scanning
```
In this example:
- For every pipeline executed on branches that match the `release/*` wildcard (for example, branch
`release/v1.2.1`), DAST scans run with `Scanner Profile A` and `Site Profile B`.
- DAST and secret detection scans run every 10 minutes. The DAST scan runs with `Scanner Profile C`
and `Site Profile D`.
- Secret detection, container scanning, and SAST scans run for every pipeline executed on the `main`
branch. The SAST scan runs with the `SAST_EXCLUDED_ANALYZER` variable set to `"brakeman"`.
- Cluster Image Scanning scan runs every 24h. The scan runs on the `production-cluster` cluster and fetches vulnerabilities
from the container with the name `database` configured for deployment with the name `production-application` in the `production-namespace` namespace.
### Example for scan execution policy editor
You can use this example in the YAML mode of the [Scan Execution Policy editor](#scan-execution-policy-editor).
It corresponds to a single object from the previous example.
```yaml
name: Enforce Secret Detection and Container Scanning in every default branch pipeline
description: This policy enforces pipeline configuration to have a job with Secret Detection and Container Scanning scans for the default branch
enabled: true
rules:
- type: pipeline
branches:
- main
actions:
- scan: secret_detection
- scan: container_scanning
```
### Scan Result Policy editor
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/77814) in GitLab 14.8 with a flag named `scan_result_policy`. Disabled by default.
NOTE:
Only project Owners have the [permissions](../../permissions.md#project-members-permissions)
to select Security Policy Project.
Once your policy is complete, save it by selecting **Create merge request** at the bottom of the
editor. This redirects you to the merge request on the project's configured security policy project.
If a security policy project doesn't link to your project, GitLab creates such a project for you.
Existing policies can also be removed from the editor interface by selecting **Delete policy** at
the bottom of the editor.
The policy editor only supports YAML mode. To follow work on Rule mode, see the epic
[Allow Users to Edit Rule-mode Scan Result Policies in the Policy UI](https://gitlab.com/groups/gitlab-org/-/epics/5363).
![Scan Result Policy Editor YAML Mode](img/scan_result_policy_yaml_mode_v14_6.png)
### Scan Result Policies schema
The YAML file with Scan Result Policies consists of an array of objects matching the Scan Result
Policy schema nested under the `scan_result_policy` key. You can configure a maximum of five
policies under the `scan_result_policy` key.
When you save a new policy, GitLab validates its contents against [this JSON schema](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/app/validators/json_schemas/security_orchestration_policy.json).
If you're not familiar with how to read [JSON schemas](https://json-schema.org/),
the following sections and tables provide an alternative.
| Field | Type | Possible values | Description |
|-------|------|-----------------|-------------|
| `scan_result_policy` | `array` of Scan Result Policy | | List of Scan Result Policies (maximum 5). |
### Scan Result Policy schema
| Field | Type | Possible values | Description |
|-------|------|-----------------|-------------|
| `name` | `string` | | Name of the policy. |
| `description` (optional) | `string` | | Description of the policy. |
| `enabled` | `boolean` | `true`, `false` | Flag to enable (`true`) or disable (`false`) the policy. |
| `rules` | `array` of rules | | List of rules that the policy applies. |
| `actions` | `array` of actions | | List of actions that the policy enforces. |
### `scan_finding` rule type
This rule enforces the defined actions based on the information provided.
| Field | Type | Possible values | Description |
|------------|------|-----------------|-------------|
| `type` | `string` | `scan_finding` | The rule's type. |
| `branches` | `array` of `string` | `*` or the branch's name | The branch the given policy applies to (supports wildcard). |
| `scanners` | `array` of `string` | `sast`, `secret_detection`, `dependency_scanning`, `container_scanning`, `dast`, `coverage_fuzzing`, `api_fuzzing` | The security scanners for this rule to consider. |
| `vulnerabilities_allowed` | `integer` | Greater than or equal to zero | Number of vulnerabilities allowed before this rule is considered. |
| `severity_levels` | `array` of `string` | `info`, `unknown`, `low`, `medium`, `high`, `critical`| The severity levels for this rule to consider. |
| `vulnerability_states` | `array` of `string` | `newly_detected`, `detected`, `confirmed`, `resolved`, `dismissed` | The vulnerability states for this rule to consider when the target branch is set to the default branch. |
### `require_approval` action type
This action sets an approval rule to be required when conditions are met for at least one rule in
the defined policy.
| Field | Type | Possible values | Description |
|-------|------|-----------------|-------------|
| `type` | `string` | `require_approval` | The action's type. |
| `approvals_required` | `integer` | Greater than or equal to zero | The number of MR approvals required. |
| `user_approvers` | `array` of `string` | Username of one of more users | The users to consider as approvers. |
| `user_approvers_ids` | `array` of `integer` | ID of one of more users | The IDs of users to consider as approvers. |
| `group_approvers` | `array` of `string` | Path of one of more groups | The groups to consider as approvers. |
| `group_approvers_ids` | `array` of `integer` | ID of one of more groups | The IDs of groups to consider as approvers. |
Requirements and limitations:
- You must add the respective [security scanning tools](../index.md#security-scanning-tools).
Otherwise, Scan Result Policies won't have any effect.
- The maximum number of policies is five.
- Each policy can have a maximum of five rules.
### Example security scan result policies project
You can use this example in a `.gitlab/security-policies/policy.yml`, as described in
[Security policies project](#security-policies-project):
```yaml
---
scan_result_policy:
- name: critical vulnerability CS approvals
description: critical severity level only for container scanning
enabled: true
rules:
- type: scan_finding
branches:
- main
scanners:
- container_scanning
vulnerabilities_allowed: 0
severity_levels:
- critical
vulnerability_states:
- newly_detected
actions:
- type: require_approval
approvals_required: 1
user_approvers:
- adalberto.dare
- name: secondary CS approvals
description: secondary only for container scanning
enabled: true
rules:
- type: scan_finding
branches:
- main
scanners:
- container_scanning
vulnerabilities_allowed: 1
severity_levels:
- low
- unknown
vulnerability_states:
- newly_detected
actions:
- type: require_approval
approvals_required: 1
user_approvers:
- sam.white
```
In this example:
- Every MR that contains new `critical` vulnerabilities identified by container scanning requires
one approval from `alberto.dare`.
- Every MR that contains more than one new `low` or `unknown` vulnerability identified by container
scanning requires one approval from `sam.white`.
### Example for Scan Result Policy editor
You can use this example in the YAML mode of the [Scan Result Policy editor](#scan-result-policy-editor).
It corresponds to a single object from the previous example:
```yaml
- name: critical vulnerability CS approvals
description: critical severity level only for container scanning
enabled: true
rules:
- type: scan_finding
branches:
- main
scanners:
- container_scanning
vulnerabilities_allowed: 1
severity_levels:
- critical
vulnerability_states:
- newly_detected
actions:
- type: require_approval
approvals_required: 1
user_approvers:
- adalberto.dare
```
## Roadmap
See the [Category Direction page](https://about.gitlab.com/direction/protect/container_network_security/)
for more information on the product direction of Container Network Security.
See the [Category Direction page](https://about.gitlab.com/direction/protect/security_orchestration/)
for more information on the product direction of security policies within GitLab.

View File

@ -0,0 +1,219 @@
---
stage: Protect
group: Container Security
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
---
# Scan execution policies **(ULTIMATE)**
Project owners can use scan execution policies to require that security scans run on a specified
schedule or with the project pipeline. Required scans are injected into the CI pipeline as new jobs
with a long, random job name. In the unlikely event of a job name collision, the security policy job
overwrites any pre-existing job in the pipeline.
This feature has some overlap with [compliance framework pipelines](../../project/settings/#compliance-pipeline-configuration),
as we have not [unified the user experience for these two features](https://gitlab.com/groups/gitlab-org/-/epics/7312).
For details on the similarities and differences between these features, see
[Enforce scan execution](../#enforce-scan-execution).
## Scan execution policy editor
NOTE:
Only project Owners have the [permissions](../../permissions.md#project-members-permissions)
to select Security Policy Project.
Once your policy is complete, save it by selecting **Create via merge request**
at the bottom of the editor. You are redirected to the merge request on the project's
configured security policy project. If one does not link to your project, a security
policy project is automatically created. Existing policies can also be
removed from the editor interface by selecting **Delete policy**
at the bottom of the editor.
All scan execution policy changes are applied through a background job that runs once every 10
minutes. Allow up to 10 minutes for any policy changes committed to this project to take effect.
![Scan Execution Policy Editor YAML Mode](img/scan_execution_policy_yaml_mode_v14_7.png)
The policy editor currently only supports the YAML mode. The Rule mode is tracked in the [Allow Users to Edit Rule-mode Scan Execution Policies in the Policy UI](https://gitlab.com/groups/gitlab-org/-/epics/5363) epic.
## Scan execution policies schema
The YAML file with scan execution policies consists of an array of objects matching scan execution
policy schema nested under the `scan_execution_policy` key. You can configure a maximum of 5
policies under the `scan_execution_policy` key.
When you save a new policy, GitLab validates its contents against [this JSON schema](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/app/validators/json_schemas/security_orchestration_policy.json).
If you're not familiar with how to read [JSON schemas](https://json-schema.org/),
the following sections and tables provide an alternative.
| Field | Type | Possible values | Description |
|-------|------|-----------------|-------------|
| `scan_execution_policy` | `array` of scan execution policy | | List of scan execution policies (maximum 5) |
## Scan execution policy schema
| Field | Type | Possible values | Description |
|-------|------|-----------------|-------------|
| `name` | `string` | | Name of the policy. |
| `description` (optional) | `string` | | Description of the policy. |
| `enabled` | `boolean` | `true`, `false` | Flag to enable (`true`) or disable (`false`) the policy. |
| `rules` | `array` of rules | | List of rules that the policy applies. |
| `actions` | `array` of actions | | List of actions that the policy enforces. |
## `pipeline` rule type
This rule enforces the defined actions whenever the pipeline runs for a selected branch.
| Field | Type | Possible values | Description |
|-------|------|-----------------|-------------|
| `type` | `string` | `pipeline` | The rule's type. |
| `branches` | `array` of `string` | `*` or the branch's name | The branch the given policy applies to (supports wildcard). |
## `schedule` rule type
This rule enforces the defined actions and schedules a scan on the provided date/time.
| Field | Type | Possible values | Description |
|------------|------|-----------------|-------------|
| `type` | `string` | `schedule` | The rule's type. |
| `branches` | `array` of `string` | `*` or the branch's name | The branch the given policy applies to (supports wildcard). |
| `cadence` | `string` | CRON expression (for example, `0 0 * * *`) | A whitespace-separated string containing five fields that represents the scheduled time. |
| `clusters` | `object` | | The cluster where the given policy enforces running selected scans (only for `container_scanning`/`cluster_image_scanning` scans). The key of the object is the name of the Kubernetes cluster configured for your project in GitLab. In the optionally provided value of the object, you can precisely select Kubernetes resources that are scanned. |
### `cluster` schema
Use this schema to define `clusters` objects in the [`schedule` rule type](#schedule-rule-type).
| Field | Type | Possible values | Description |
|--------------|---------------------|--------------------------|-------------|
| `containers` | `array` of `string` | | The container name that is scanned (only the first value is currently supported). |
| `resources` | `array` of `string` | | The resource name that is scanned (only the first value is currently supported). |
| `namespaces` | `array` of `string` | | The namespace that is scanned (only the first value is currently supported). |
| `kinds` | `array` of `string` | `deployment`/`daemonset` | The resource kind that should be scanned (only the first value is currently supported). |
## `scan` action type
This action executes the selected `scan` with additional parameters when conditions for at least one
rule in the defined policy are met.
| Field | Type | Possible values | Description |
|-------|------|-----------------|-------------|
| `scan` | `string` | `dast`, `secret_detection`, `sast`, `container_scanning`, `cluster_image_scanning` | The action's type. |
| `site_profile` | `string` | Name of the selected [DAST site profile](../dast/index.md#site-profile). | The DAST site profile to execute the DAST scan. This field should only be set if `scan` type is `dast`. |
| `scanner_profile` | `string` or `null` | Name of the selected [DAST scanner profile](../dast/index.md#scanner-profile). | The DAST scanner profile to execute the DAST scan. This field should only be set if `scan` type is `dast`.|
| `variables` | `object` | | Set of variables applied and enforced for the selected scan. The object's key is the variable name with a value provided as a string. |
Note the following:
- You must create the [site profile](../dast/index.md#site-profile) and [scanner profile](../dast/index.md#scanner-profile)
with selected names for each project that is assigned to the selected Security Policy Project.
Otherwise, the policy is not applied and a job with an error message is created instead.
- Once you associate the site profile and scanner profile by name in the policy, it is not possible
to modify or delete them. If you want to modify them, you must first disable the policy by setting
the `active` flag to `false`.
- When configuring policies with a scheduled DAST scan, the author of the commit in the security
policy project's repository must have access to the scanner and site profiles. Otherwise, the scan
is not scheduled successfully.
- For a secret detection scan, only rules with the default ruleset are supported. [Custom rulesets](../secret_detection/index.md#custom-rulesets)
are not supported.
- A secret detection scan runs in `normal` mode when executed as part of a pipeline, and in
[`historic`](../secret_detection/index.md#full-history-secret-detection)
mode when executed as part of a scheduled scan.
- A container scanning and cluster image scanning scans configured for the `pipeline` rule type ignores the cluster defined in the `clusters` object.
They use predefined CI/CD variables defined for your project. Cluster selection with the `clusters` object is supported for the `schedule` rule type.
Cluster with name provided in `clusters` object must be created and configured for the project. To be able to successfully perform the `container_scanning`/`cluster_image_scanning` scans for the cluster you must follow instructions for the [Cluster Image Scanning feature](../cluster_image_scanning/index.md#prerequisites).
- The SAST scan uses the default template and runs in a [child pipeline](../../../ci/pipelines/parent_child_pipelines.md).
## Example security policies project
You can use this example in a `.gitlab/security-policies/policy.yml`, as described in
[Security policies project](index.md#security-policies-project).
```yaml
---
scan_execution_policy:
- name: Enforce DAST in every release pipeline
description: This policy enforces pipeline configuration to have a job with DAST scan for release branches
enabled: true
rules:
- type: pipeline
branches:
- release/*
actions:
- scan: dast
scanner_profile: Scanner Profile A
site_profile: Site Profile B
- name: Enforce DAST and secret detection scans every 10 minutes
description: This policy enforces DAST and secret detection scans to run every 10 minutes
enabled: true
rules:
- type: schedule
branches:
- main
cadence: "*/10 * * * *"
actions:
- scan: dast
scanner_profile: Scanner Profile C
site_profile: Site Profile D
- scan: secret_detection
- name: Enforce Secret Detection and Container Scanning in every default branch pipeline
description: This policy enforces pipeline configuration to have a job with Secret Detection and Container Scanning scans for the default branch
enabled: true
rules:
- type: pipeline
branches:
- main
actions:
- scan: secret_detection
- scan: sast
variables:
SAST_EXCLUDED_ANALYZERS: brakeman
- scan: container_scanning
- name: Enforce Cluster Image Scanning on production-cluster every 24h
description: This policy enforces Cluster Image Scanning scan to run every 24 hours
enabled: true
rules:
- type: schedule
cadence: "15 3 * * *"
clusters:
production-cluster:
containers:
- database
resources:
- production-application
namespaces:
- production-namespace
kinds:
- deployment
actions:
- scan: cluster_image_scanning
```
In this example:
- For every pipeline executed on branches that match the `release/*` wildcard (for example, branch
`release/v1.2.1`), DAST scans run with `Scanner Profile A` and `Site Profile B`.
- DAST and secret detection scans run every 10 minutes. The DAST scan runs with `Scanner Profile C`
and `Site Profile D`.
- Secret detection, container scanning, and SAST scans run for every pipeline executed on the `main`
branch. The SAST scan runs with the `SAST_EXCLUDED_ANALYZER` variable set to `"brakeman"`.
- Cluster Image Scanning scan runs every 24h. The scan runs on the `production-cluster` cluster and fetches vulnerabilities
from the container with the name `database` configured for deployment with the name `production-application` in the `production-namespace` namespace.
## Example for scan execution policy editor
You can use this example in the YAML mode of the [scan execution policy editor](#scan-execution-policy-editor).
It corresponds to a single object from the previous example.
```yaml
name: Enforce Secret Detection and Container Scanning in every default branch pipeline
description: This policy enforces pipeline configuration to have a job with Secret Detection and Container Scanning scans for the default branch
enabled: true
rules:
- type: pipeline
branches:
- main
actions:
- scan: secret_detection
- scan: container_scanning
```

View File

@ -0,0 +1,175 @@
---
stage: Protect
group: Container Security
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
---
# Scan result policies **(ULTIMATE)**
You can use scan result policies to take action based on scan results. For example, one type of scan
result policy is a security approval policy that allows approval to be required based on the
findings of one or more security scan jobs. Scan result policies are evaluated after a CI scanning
job is fully executed.
## Scan result policy editor
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/77814) in GitLab 14.8 with a flag named `scan_result_policy`. Disabled by default.
NOTE:
Only project Owners have the [permissions](../../permissions.md#project-members-permissions)
to select Security Policy Project.
Once your policy is complete, save it by selecting **Create merge request** at the bottom of the
editor. This redirects you to the merge request on the project's configured security policy project.
If a security policy project doesn't link to your project, GitLab creates such a project for you.
Existing policies can also be removed from the editor interface by selecting **Delete policy** at
the bottom of the editor.
All scan result policy changes are applied through a background job that runs once every 10 minutes.
Allow up to 10 minutes for any policy changes committed to this project to take effect.
The policy editor only supports YAML mode. To follow work on Rule mode, see the epic
[Allow Users to Edit Rule-mode scan result policies in the Policy UI](https://gitlab.com/groups/gitlab-org/-/epics/5363).
![Scan Result Policy Editor YAML Mode](img/scan_result_policy_yaml_mode_v14_6.png)
## Scan result policies schema
The YAML file with scan result policies consists of an array of objects matching the scan result
policy schema nested under the `scan_result_policy` key. You can configure a maximum of five
policies under the `scan_result_policy` key.
When you save a new policy, GitLab validates its contents against [this JSON schema](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/app/validators/json_schemas/security_orchestration_policy.json).
If you're not familiar with how to read [JSON schemas](https://json-schema.org/),
the following sections and tables provide an alternative.
| Field | Type | Possible values | Description |
|-------|------|-----------------|-------------|
| `scan_result_policy` | `array` of Scan Result Policy | | List of scan result policies (maximum 5). |
## Scan result policy schema
| Field | Type | Possible values | Description |
|-------|------|-----------------|-------------|
| `name` | `string` | | Name of the policy. |
| `description` (optional) | `string` | | Description of the policy. |
| `enabled` | `boolean` | `true`, `false` | Flag to enable (`true`) or disable (`false`) the policy. |
| `rules` | `array` of rules | | List of rules that the policy applies. |
| `actions` | `array` of actions | | List of actions that the policy enforces. |
## `scan_finding` rule type
This rule enforces the defined actions based on the information provided.
| Field | Type | Possible values | Description |
|------------|------|-----------------|-------------|
| `type` | `string` | `scan_finding` | The rule's type. |
| `branches` | `array` of `string` | `*` or the branch's name | The branch the given policy applies to (supports wildcard). |
| `scanners` | `array` of `string` | `sast`, `secret_detection`, `dependency_scanning`, `container_scanning`, `dast`, `coverage_fuzzing`, `api_fuzzing` | The security scanners for this rule to consider. |
| `vulnerabilities_allowed` | `integer` | Greater than or equal to zero | Number of vulnerabilities allowed before this rule is considered. |
| `severity_levels` | `array` of `string` | `info`, `unknown`, `low`, `medium`, `high`, `critical`| The severity levels for this rule to consider. |
| `vulnerability_states` | `array` of `string` | `newly_detected`, `detected`, `confirmed`, `resolved`, `dismissed` | The vulnerability states for this rule to consider when the target branch is set to the default branch. |
## `require_approval` action type
This action sets an approval rule to be required when conditions are met for at least one rule in
the defined policy.
| Field | Type | Possible values | Description |
|-------|------|-----------------|-------------|
| `type` | `string` | `require_approval` | The action's type. |
| `approvals_required` | `integer` | Greater than or equal to zero | The number of MR approvals required. |
| `user_approvers` | `array` of `string` | Username of one of more users | The users to consider as approvers. |
| `user_approvers_ids` | `array` of `integer` | ID of one of more users | The IDs of users to consider as approvers. |
| `group_approvers` | `array` of `string` | Path of one of more groups | The groups to consider as approvers. |
| `group_approvers_ids` | `array` of `integer` | ID of one of more groups | The IDs of groups to consider as approvers. |
Requirements and limitations:
- You must add the respective [security scanning tools](../index.md#security-scanning-tools).
Otherwise, scan result policies won't have any effect.
- The maximum number of policies is five.
- Each policy can have a maximum of five rules.
## Example security scan result policies project
You can use this example in a `.gitlab/security-policies/policy.yml`, as described in
[Security policies project](index.md#security-policies-project):
```yaml
---
scan_result_policy:
- name: critical vulnerability CS approvals
description: critical severity level only for container scanning
enabled: true
rules:
- type: scan_finding
branches:
- main
scanners:
- container_scanning
vulnerabilities_allowed: 0
severity_levels:
- critical
vulnerability_states:
- newly_detected
actions:
- type: require_approval
approvals_required: 1
user_approvers:
- adalberto.dare
- name: secondary CS approvals
description: secondary only for container scanning
enabled: true
rules:
- type: scan_finding
branches:
- main
scanners:
- container_scanning
vulnerabilities_allowed: 1
severity_levels:
- low
- unknown
vulnerability_states:
- newly_detected
actions:
- type: require_approval
approvals_required: 1
user_approvers:
- sam.white
```
In this example:
- Every MR that contains new `critical` vulnerabilities identified by container scanning requires
one approval from `alberto.dare`.
- Every MR that contains more than one new `low` or `unknown` vulnerability identified by container
scanning requires one approval from `sam.white`.
## Example for Scan Result Policy editor
You can use this example in the YAML mode of the [Scan Result Policy editor](#scan-result-policy-editor).
It corresponds to a single object from the previous example:
```yaml
- name: critical vulnerability CS approvals
description: critical severity level only for container scanning
enabled: true
rules:
- type: scan_finding
branches:
- main
scanners:
- container_scanning
vulnerabilities_allowed: 1
severity_levels:
- critical
vulnerability_states:
- newly_detected
actions:
- type: require_approval
approvals_required: 1
user_approvers:
- adalberto.dare
```

View File

@ -46,6 +46,7 @@ integration in GitLab.
1. Select **Atlassian Bamboo**.
1. Ensure the **Active** checkbox is selected.
1. Enter the base URL of your Bamboo server. For example, `https://bamboo.example.com`.
1. Optional. Clear the **Enable SSL verification** checkbox to disable [SSL verification](overview.md#ssl-verification).
1. Enter the [build key](#identify-the-bamboo-build-plan-build-key) from your Bamboo
build plan.
1. If necessary, enter a username and password for a Bamboo user that has

View File

@ -82,6 +82,15 @@ instance configuration or provide custom settings.
Read more about [Project integration management](../../admin_area/settings/project_integration_management.md).
## SSL verification
By default, the SSL certificate for outgoing HTTP requests is verified based on
an internal list of Certificate Authorities. This means the certificate cannot
be self-signed.
You can turn off SSL verification in the configuration settings for [webhooks](webhooks.md#configure-a-webhook)
and some integrations.
## Troubleshooting integrations
Some integrations use service hooks for integration with external applications. To confirm which ones use service hooks, see the [integrations listing](#integrations-listing) above. Learn more about [troubleshooting service hooks](webhooks.md#troubleshoot-webhooks).

View File

@ -57,7 +57,7 @@ You can configure a webhook for a group or a project.
The URL must be percent-encoded if it contains one or more special characters.
1. In **Secret token**, enter the [secret token](#validate-payloads-by-using-a-secret-token) to validate payloads.
1. In the **Trigger** section, select the [events](webhook_events.md) to trigger the webhook.
1. Optional. Clear the **Enable SSL verification** checkbox to disable [SSL verification](#verify-an-ssl-certificate).
1. Optional. Clear the **Enable SSL verification** checkbox to disable [SSL verification](overview.md#ssl-verification).
1. Select **Add webhook**.
## Test a webhook
@ -123,15 +123,6 @@ The token is sent with the hook request in the
`X-Gitlab-Token` HTTP header. Your webhook endpoint can check the token to verify
that the request is legitimate.
## Verify an SSL certificate
By default, the SSL certificate of the webhook endpoint is verified based on
an internal list of Certificate Authorities. This means the certificate cannot
be self-signed.
You can turn off SSL verification in the [webhook settings](#configure-a-webhook)
in your GitLab projects.
## Filter push events by branch
Push events can be filtered by branch using a branch name or wildcard pattern

View File

@ -224,9 +224,9 @@ branches by using the GitLab web interface:
1. Next to the branch you want to delete, select the **Delete** button (**{remove}**).
1. On the confirmation dialog, type the branch name and select **Delete protected branch**.
You can delete a protected branch from the UI only.
This prevents you from accidentally deleting a branch
from the command line or from a Git client application.
Protected branches can only be deleted by using GitLab either from the UI or API.
This prevents accidentally deleting a branch through local Git commands or
third-party Git clients.
<!-- ## Troubleshooting

View File

@ -293,6 +293,7 @@ module API
optional :with_merge_requests_enabled, type: Boolean, default: false, desc: 'Limit by enabled merge requests feature'
optional :with_shared, type: Boolean, default: true, desc: 'Include projects shared to this group'
optional :include_subgroups, type: Boolean, default: false, desc: 'Includes projects in subgroups of this group'
optional :include_ancestor_groups, type: Boolean, default: false, desc: 'Includes projects in ancestors of this group'
optional :min_access_level, type: Integer, values: Gitlab::Access.all_values, desc: 'Limit by minimum access level of authenticated user on projects'
use :pagination
@ -302,7 +303,8 @@ module API
get ":id/projects" do
finder_options = {
only_owned: !params[:with_shared],
include_subgroups: params[:include_subgroups]
include_subgroups: params[:include_subgroups],
include_ancestor_groups: params[:include_ancestor_groups]
}
projects = find_group_projects(params, finder_options)

View File

@ -20,6 +20,8 @@ module Gitlab
!thread.nil?
end
# Possible options:
# - synchronous [Boolean] if true, turns `start` into a blocking call
def initialize(**options)
@synchronous = options[:synchronous]
@mutex = Mutex.new

View File

@ -9,6 +9,10 @@ module Gitlab
class BaseExporter < Daemon
attr_reader :server
# @param settings [Hash] SettingsLogic hash containing the `*_exporter` config
# @param log_enabled [Boolean] whether to log HTTP requests
# @param log_file [String] path to where the server log should be located
# @param gc_requests [Boolean] whether to run a major GC after each scraper request
def initialize(settings, log_enabled:, log_file:, gc_requests: false, **options)
super(**options)

View File

@ -0,0 +1,45 @@
# frozen_string_literal: true
module Gitlab
module Middleware
class MemoryReport
def initialize(app)
@app = app
end
def call(env)
request = ActionDispatch::Request.new(env)
return @app.call(env) unless rendering_memory_profiler?(request)
begin
require 'memory_profiler'
report = MemoryProfiler.report do
@app.call(env)
end
report = report_to_string(report)
headers = { 'Content-Type' => 'text/plain' }
[200, headers, [report]]
rescue StandardError => e
::Gitlab::ErrorTracking.track_exception(e)
[500, { 'Content-Type' => 'text/plain' }, ["Could not generate memory report: #{e}"]]
end
end
private
def rendering_memory_profiler?(request)
Rails.env.development? && request.params['performance_bar'] == 'memory'
end
def report_to_string(report)
io = StringIO.new
report.pretty_print(io, detailed_report: true, scale_bytes: true, normalize_paths: true)
io.string
end
end
end
end

View File

@ -20515,6 +20515,9 @@ msgstr ""
msgid "JiraConnect|You can now close this window and return to Jira."
msgstr ""
msgid "JiraConnect|You don't have permission to create branches for this project. Select a different project or contact the project owner for access. %{linkStart}Learn more.%{linkEnd}"
msgstr ""
msgid "JiraRequest|A connection error occurred while connecting to Jira. Try your request again."
msgstr ""
@ -26199,6 +26202,9 @@ msgstr ""
msgid "PerformanceBar|Memory"
msgstr ""
msgid "PerformanceBar|Memory report"
msgstr ""
msgid "PerformanceBar|Redis calls"
msgstr ""
@ -40167,6 +40173,9 @@ msgstr ""
msgid "VulnerabilityManagement|An unverified non-confirmed finding"
msgstr ""
msgid "VulnerabilityManagement|At least one identifier is required"
msgstr ""
msgid "VulnerabilityManagement|Change status"
msgstr ""
@ -40182,6 +40191,9 @@ msgstr ""
msgid "VulnerabilityManagement|Manually add a vulnerability entry into the vulnerability report."
msgstr ""
msgid "VulnerabilityManagement|Name is a required field"
msgstr ""
msgid "VulnerabilityManagement|Needs triage"
msgstr ""
@ -40197,6 +40209,12 @@ msgstr ""
msgid "VulnerabilityManagement|Select a method"
msgstr ""
msgid "VulnerabilityManagement|Severity is a required field"
msgstr ""
msgid "VulnerabilityManagement|Something went wrong while creating vulnerability"
msgstr ""
msgid "VulnerabilityManagement|Something went wrong while trying to delete the comment. Please try again later."
msgstr ""
@ -40221,6 +40239,12 @@ msgstr ""
msgid "VulnerabilityManagement|Something went wrong, could not update vulnerability state."
msgstr ""
msgid "VulnerabilityManagement|Status is a required field"
msgstr ""
msgid "VulnerabilityManagement|Submit vulnerability"
msgstr ""
msgid "VulnerabilityManagement|Summary, detailed description, steps to reproduce, etc."
msgstr ""

View File

@ -23,6 +23,7 @@ require_relative '../lib/gitlab/metrics/system'
require_relative '../lib/gitlab/metrics/samplers/base_sampler'
require_relative '../lib/gitlab/metrics/samplers/ruby_sampler'
require_relative '../lib/gitlab/metrics/exporter/base_exporter'
require_relative '../lib/gitlab/metrics/exporter/web_exporter'
require_relative '../lib/gitlab/metrics/exporter/sidekiq_exporter'
require_relative '../lib/gitlab/metrics/exporter/metrics_middleware'
require_relative '../lib/gitlab/metrics/exporter/health_checks_middleware'

View File

@ -7,7 +7,7 @@ require_relative 'dependencies'
class MetricsServer # rubocop:disable Gitlab/NamespacedClass
class << self
def spawn(target, metrics_dir:, wipe_metrics_dir: false, trapped_signals: [])
raise "The only valid target is 'sidekiq' currently" unless target == 'sidekiq'
raise "Target must be one of [puma,sidekiq]" unless %w(puma sidekiq).include?(target)
pid = Process.fork
@ -52,11 +52,18 @@ class MetricsServer # rubocop:disable Gitlab/NamespacedClass
# Warming up ensures that these files exist prior to the exporter starting up.
Gitlab::Metrics::Samplers::RubySampler.initialize_instance(prefix: name, warmup: true).start
exporter_class = "Gitlab::Metrics::Exporter::#{@target.camelize}Exporter".constantize
settings = Settings.new(Settings.monitoring[name])
server = exporter_class.instance(settings, gc_requests: true, synchronous: true)
default_opts = { gc_requests: true, synchronous: true }
exporter =
case @target
when 'puma'
Gitlab::Metrics::Exporter::WebExporter.instance(**default_opts)
else
exporter_class = "Gitlab::Metrics::Exporter::#{@target.camelize}Exporter".constantize
settings = Settings.new(Settings.monitoring[name])
exporter_class.instance(settings, **default_opts)
end
server.start
exporter.start
end
def name

View File

@ -12,6 +12,11 @@ RSpec.describe 'bin/metrics-server', :aggregate_failures do
{
'test' => {
'monitoring' => {
'web_exporter' => {
'address' => 'localhost',
'enabled' => true,
'port' => 3807
},
'sidekiq_exporter' => {
'address' => 'localhost',
'enabled' => true,
@ -22,56 +27,58 @@ RSpec.describe 'bin/metrics-server', :aggregate_failures do
}
end
context 'with a running server' do
let(:metrics_dir) { Dir.mktmpdir }
%w(puma sidekiq).each do |target|
context "with a running server targeting #{target}" do
let(:metrics_dir) { Dir.mktmpdir }
before do
# We need to send a request to localhost
WebMock.allow_net_connect!
before do
# We need to send a request to localhost
WebMock.allow_net_connect!
config_file.write(YAML.dump(config))
config_file.close
config_file.write(YAML.dump(config))
config_file.close
env = {
'GITLAB_CONFIG' => config_file.path,
'METRICS_SERVER_TARGET' => 'sidekiq',
'WIPE_METRICS_DIR' => '1',
'prometheus_multiproc_dir' => metrics_dir
}
@pid = Process.spawn(env, 'bin/metrics-server', pgroup: true)
end
after do
webmock_enable!
if @pid
pgrp = Process.getpgid(@pid)
Timeout.timeout(10) do
Process.kill('TERM', -pgrp)
Process.waitpid(@pid)
end
expect(Gitlab::ProcessManagement.process_alive?(@pid)).to be(false)
env = {
'GITLAB_CONFIG' => config_file.path,
'METRICS_SERVER_TARGET' => target,
'WIPE_METRICS_DIR' => '1',
'prometheus_multiproc_dir' => metrics_dir
}
@pid = Process.spawn(env, 'bin/metrics-server', pgroup: true)
end
rescue Errno::ESRCH => _
# 'No such process' means the process died before
ensure
config_file.unlink
FileUtils.rm_rf(metrics_dir, secure: true)
end
it 'serves /metrics endpoint' do
expect do
Timeout.timeout(10) do
http_ok = false
until http_ok
sleep 1
response = Gitlab::HTTP.try_get("http://localhost:3807/metrics", allow_local_requests: true)
http_ok = response&.success?
after do
webmock_enable!
if @pid
pgrp = Process.getpgid(@pid)
Timeout.timeout(10) do
Process.kill('TERM', -pgrp)
Process.waitpid(@pid)
end
expect(Gitlab::ProcessManagement.process_alive?(@pid)).to be(false)
end
end.not_to raise_error
rescue Errno::ESRCH => _
# 'No such process' means the process died before
ensure
config_file.unlink
FileUtils.rm_rf(metrics_dir, secure: true)
end
it 'serves /metrics endpoint' do
expect do
Timeout.timeout(10) do
http_ok = false
until http_ok
sleep 1
response = Gitlab::HTTP.try_get("http://localhost:3807/metrics", allow_local_requests: true)
http_ok = response&.success?
end
end
end.not_to raise_error
end
end
end
end

View File

@ -25,8 +25,9 @@ RSpec.describe 'Create GitLab branches from Jira', :js do
it 'select project and branch and submit the form' do
visit new_jira_connect_branch_path(issue_key: 'ACME-123', issue_summary: 'My issue !@#$% title')
expect(page).to have_field('Branch name', with: 'ACME-123-my-issue-title')
expect(page).to have_button('Create branch', disabled: true)
# initially, branch field should be hidden.
expect(page).not_to have_field('Branch name')
# Select project1
@ -44,6 +45,7 @@ RSpec.describe 'Create GitLab branches from Jira', :js do
click_on 'Alice / foo'
end
expect(page).to have_field('Branch name', with: 'ACME-123-my-issue-title')
expect(page).to have_button('Create branch', disabled: false)
click_on 'master'

View File

@ -4,6 +4,7 @@ require 'spec_helper'
RSpec.describe 'Pipelines', :js do
include ProjectForksHelper
include Spec::Support::Helpers::ModalHelpers
let(:project) { create(:project) }
@ -436,7 +437,9 @@ RSpec.describe 'Pipelines', :js do
context 'when user played a delayed job immediately' do
before do
find('[data-testid="pipelines-manual-actions-dropdown"]').click
page.accept_confirm { click_button('delayed job 1') }
accept_gl_confirm do
click_button 'delayed job 1'
end
wait_for_requests
end

View File

@ -9,13 +9,29 @@ RSpec.describe GroupProjectsFinder do
describe 'with a group member current user' do
before do
group.add_maintainer(current_user)
root_group.add_maintainer(current_user)
end
context "only shared" do
let(:options) { { only_shared: true } }
it { is_expected.to match_array([shared_project_3, shared_project_2, shared_project_1]) }
context 'with ancestor groups projects' do
before do
options[:include_ancestor_groups] = true
end
it { is_expected.to match_array([shared_project_3, shared_project_2, shared_project_1]) }
end
context 'with subgroups projects' do
before do
options[:include_subgroups] = true
end
it { is_expected.to match_array([shared_project_3, shared_project_2, shared_project_1]) }
end
end
context "only owned" do
@ -29,9 +45,46 @@ RSpec.describe GroupProjectsFinder do
it { is_expected.to match_array([private_project, public_project, subgroup_project, subgroup_private_project]) }
end
context 'without subgroups projects' do
context 'with ancestor group projects' do
before do
options[:include_ancestor_groups] = true
end
it { is_expected.to match_array([private_project, public_project, root_group_public_project, root_group_private_project, root_group_private_project_2]) }
end
context 'with ancestor groups and subgroups projects' do
before do
options[:include_ancestor_groups] = true
options[:include_subgroups] = true
end
it { is_expected.to match_array([private_project, public_project, root_group_public_project, root_group_private_project, root_group_private_project_2, subgroup_private_project, subgroup_project]) }
end
context 'without subgroups and ancestor group projects' do
it { is_expected.to match_array([private_project, public_project]) }
end
context 'when user is member only of a subgroup' do
let(:subgroup_member) { create(:user) }
context 'with ancestor groups and subgroups projects' do
before do
group.add_maintainer(subgroup_member)
options[:include_ancestor_groups] = true
options[:include_subgroups] = true
end
it 'does not return parent group projects' do
finder = described_class.new(group: group, current_user: subgroup_member, params: params, options: options)
projects = finder.execute
expect(projects).to match_array([private_project, public_project, subgroup_project, subgroup_private_project, root_group_public_project])
end
end
end
end
context "all" do
@ -90,6 +143,7 @@ RSpec.describe GroupProjectsFinder do
before do
private_project.add_maintainer(current_user)
subgroup_private_project.add_maintainer(current_user)
root_group_private_project.add_maintainer(current_user)
end
context 'with subgroups projects' do
@ -100,6 +154,23 @@ RSpec.describe GroupProjectsFinder do
it { is_expected.to match_array([private_project, public_project, subgroup_project, subgroup_private_project]) }
end
context 'with ancestor groups projects' do
before do
options[:include_ancestor_groups] = true
end
it { is_expected.to match_array([private_project, public_project, root_group_public_project, root_group_private_project]) }
end
context 'with ancestor groups and subgroups projects' do
before do
options[:include_ancestor_groups] = true
options[:include_subgroups] = true
end
it { is_expected.to match_array([private_project, public_project, root_group_private_project, root_group_public_project, subgroup_private_project, subgroup_project]) }
end
context 'without subgroups projects' do
it { is_expected.to match_array([private_project, public_project]) }
end
@ -118,6 +189,23 @@ RSpec.describe GroupProjectsFinder do
it { is_expected.to match_array([public_project, subgroup_project]) }
end
context 'with ancestor groups projects' do
before do
options[:include_ancestor_groups] = true
end
it { is_expected.to match_array([public_project, root_group_public_project]) }
end
context 'with ancestor groups and subgroups projects' do
before do
options[:include_subgroups] = true
options[:include_ancestor_groups] = true
end
it { is_expected.to match_array([public_project, root_group_public_project, subgroup_project]) }
end
context 'without subgroups projects' do
it { is_expected.to eq([public_project]) }
end

View File

@ -735,7 +735,6 @@ describe('CE IssuesListApp component', () => {
data: JSON.stringify({
move_before_id: getIdFromGraphQLId(moveBeforeId),
move_after_id: getIdFromGraphQLId(moveAfterId),
group_full_path: isProject ? undefined : defaultProvide.fullPath,
}),
});
});

View File

@ -1,4 +1,4 @@
import { GlAlert, GlForm, GlFormInput, GlButton } from '@gitlab/ui';
import { GlAlert, GlForm, GlFormInput, GlButton, GlSprintf } from '@gitlab/ui';
import { shallowMount } from '@vue/test-utils';
import Vue from 'vue';
import VueApollo from 'vue-apollo';
@ -10,17 +10,12 @@ import SourceBranchDropdown from '~/jira_connect/branches/components/source_bran
import {
CREATE_BRANCH_ERROR_GENERIC,
CREATE_BRANCH_ERROR_WITH_CONTEXT,
I18N_NEW_BRANCH_PERMISSION_ALERT,
} from '~/jira_connect/branches/constants';
import createBranchMutation from '~/jira_connect/branches/graphql/mutations/create_branch.mutation.graphql';
import { mockProjects } from '../mock_data';
const mockProject = {
id: 'test',
fullPath: 'test-path',
repository: {
branchNames: ['main', 'f-test', 'release'],
rootRef: 'main',
},
};
const mockProject = mockProjects[0];
const mockCreateBranchMutationResponse = {
data: {
createBranch: {
@ -52,14 +47,15 @@ describe('NewBranchForm', () => {
const findSourceBranchDropdown = () => wrapper.findComponent(SourceBranchDropdown);
const findProjectDropdown = () => wrapper.findComponent(ProjectDropdown);
const findAlert = () => wrapper.findComponent(GlAlert);
const findAlertSprintf = () => findAlert().findComponent(GlSprintf);
const findForm = () => wrapper.findComponent(GlForm);
const findInput = () => wrapper.findComponent(GlFormInput);
const findButton = () => wrapper.findComponent(GlButton);
const completeForm = async () => {
await findInput().vm.$emit('input', 'cool-branch-name');
await findProjectDropdown().vm.$emit('change', mockProject);
await findSourceBranchDropdown().vm.$emit('change', 'source-branch');
await findInput().vm.$emit('input', 'cool-branch-name');
};
function createMockApolloProvider({
@ -87,27 +83,107 @@ describe('NewBranchForm', () => {
});
describe('when selecting items from dropdowns', () => {
describe('when a project is selected', () => {
it('sets the `selectedProject` prop for ProjectDropdown and SourceBranchDropdown', async () => {
describe('when no project selected', () => {
beforeEach(() => {
createComponent();
});
const projectDropdown = findProjectDropdown();
await projectDropdown.vm.$emit('change', mockProject);
it('hides source branch selection and branch name input', () => {
expect(findSourceBranchDropdown().exists()).toBe(false);
expect(findInput().exists()).toBe(false);
});
expect(projectDropdown.props('selectedProject')).toEqual(mockProject);
expect(findSourceBranchDropdown().props('selectedProject')).toEqual(mockProject);
it('disables the submit button', () => {
expect(findButton().props('disabled')).toBe(true);
});
});
describe('when a source branch is selected', () => {
it('sets the `selectedBranchName` prop for SourceBranchDropdown', async () => {
describe('when a valid project is selected', () => {
describe("when a source branch isn't selected", () => {
beforeEach(async () => {
createComponent();
await findProjectDropdown().vm.$emit('change', mockProject);
});
it('sets the `selectedProject` prop for ProjectDropdown and SourceBranchDropdown', () => {
expect(findProjectDropdown().props('selectedProject')).toEqual(mockProject);
expect(findSourceBranchDropdown().exists()).toBe(true);
expect(findSourceBranchDropdown().props('selectedProject')).toEqual(mockProject);
});
it('disables the submit button', () => {
expect(findButton().props('disabled')).toBe(true);
});
it('renders branch input field', () => {
expect(findInput().exists()).toBe(true);
});
});
describe('when `initialBranchName` is provided', () => {
it('sets value of branch name input to `initialBranchName` by default', async () => {
const mockInitialBranchName = 'ap1-test-branch-name';
createComponent({ provide: { initialBranchName: mockInitialBranchName } });
await findProjectDropdown().vm.$emit('change', mockProject);
expect(findInput().attributes('value')).toBe(mockInitialBranchName);
});
});
describe('when a source branch is selected', () => {
it('sets the `selectedBranchName` prop for SourceBranchDropdown', async () => {
createComponent();
await completeForm();
const mockBranchName = 'main';
const sourceBranchDropdown = findSourceBranchDropdown();
await sourceBranchDropdown.vm.$emit('change', mockBranchName);
expect(sourceBranchDropdown.props('selectedBranchName')).toBe(mockBranchName);
});
describe.each`
branchName | submitButtonDisabled
${undefined} | ${true}
${''} | ${true}
${' '} | ${true}
${'test-branch'} | ${false}
`('when branch name is $branchName', ({ branchName, submitButtonDisabled }) => {
it(`sets submit button 'disabled' prop to ${submitButtonDisabled}`, async () => {
createComponent();
await completeForm();
await findInput().vm.$emit('input', branchName);
expect(findButton().props('disabled')).toBe(submitButtonDisabled);
});
});
});
});
describe("when user doesn't have push permissions for the selected project", () => {
beforeEach(async () => {
createComponent();
const mockBranchName = 'main';
const sourceBranchDropdown = findSourceBranchDropdown();
await sourceBranchDropdown.vm.$emit('change', mockBranchName);
const projectDropdown = findProjectDropdown();
await projectDropdown.vm.$emit('change', {
...mockProject,
userPermissions: { pushCode: false },
});
});
expect(sourceBranchDropdown.props('selectedBranchName')).toBe(mockBranchName);
it('displays an alert', () => {
const alert = findAlert();
expect(alert.exists()).toBe(true);
expect(findAlertSprintf().attributes('message')).toBe(I18N_NEW_BRANCH_PERMISSION_ALERT);
expect(alert.props('variant')).toBe('warning');
expect(alert.props('dismissible')).toBe(false);
});
it('hides source branch selection and branch name input', () => {
expect(findSourceBranchDropdown().exists()).toBe(false);
expect(findInput().exists()).toBe(false);
});
});
});
@ -179,7 +255,7 @@ describe('NewBranchForm', () => {
it('displays an alert', () => {
const alert = findAlert();
expect(alert.exists()).toBe(true);
expect(alert.text()).toBe(alertText);
expect(findAlertSprintf().attributes('message')).toBe(alertText);
expect(alert.props()).toMatchObject({ title: alertTitle, variant: 'danger' });
});
@ -190,15 +266,6 @@ describe('NewBranchForm', () => {
});
});
describe('when `initialBranchName` is specified', () => {
it('sets value of branch name input to `initialBranchName` by default', () => {
const mockInitialBranchName = 'ap1-test-branch-name';
createComponent({ provide: { initialBranchName: mockInitialBranchName } });
expect(findInput().attributes('value')).toBe(mockInitialBranchName);
});
});
describe('error handling', () => {
describe.each`
component | componentName
@ -209,13 +276,15 @@ describe('NewBranchForm', () => {
beforeEach(async () => {
createComponent();
await completeForm();
await wrapper.findComponent(component).vm.$emit('error', { message: mockErrorMessage });
});
it('displays an alert', () => {
const alert = findAlert();
expect(alert.exists()).toBe(true);
expect(alert.text()).toBe(mockErrorMessage);
expect(findAlertSprintf().attributes('message')).toBe(mockErrorMessage);
expect(alert.props('variant')).toBe('danger');
});

View File

@ -14,30 +14,7 @@ import ProjectDropdown from '~/jira_connect/branches/components/project_dropdown
import { PROJECTS_PER_PAGE } from '~/jira_connect/branches/constants';
import getProjectsQuery from '~/jira_connect/branches/graphql/queries/get_projects.query.graphql';
const mockProjects = [
{
id: 'test',
name: 'test',
nameWithNamespace: 'test',
avatarUrl: 'https://gitlab.com',
path: 'test-path',
fullPath: 'test-path',
repository: {
empty: false,
},
},
{
id: 'gitlab',
name: 'GitLab',
nameWithNamespace: 'gitlab-org/gitlab',
avatarUrl: 'https://gitlab.com',
path: 'gitlab',
fullPath: 'gitlab-org/gitlab',
repository: {
empty: false,
},
},
];
import { mockProjects } from '../mock_data';
const mockProjectsQueryResponse = {
data: {
@ -134,7 +111,7 @@ describe('ProjectDropdown', () => {
});
describe('when selecting a dropdown item', () => {
it('emits `change` event with the selected project name', async () => {
it('emits `change` event with the selected project', async () => {
const mockProject = mockProjects[0];
const itemToSelect = findDropdownItemByProjectId(mockProject.id);
await itemToSelect.vm.$emit('click');
@ -146,7 +123,7 @@ describe('ProjectDropdown', () => {
describe('when `selectedProject` prop is specified', () => {
const mockProject = mockProjects[0];
beforeEach(async () => {
beforeEach(() => {
wrapper.setProps({
selectedProject: mockProject,
});

View File

@ -7,15 +7,16 @@ import waitForPromises from 'helpers/wait_for_promises';
import SourceBranchDropdown from '~/jira_connect/branches/components/source_branch_dropdown.vue';
import { BRANCHES_PER_PAGE } from '~/jira_connect/branches/constants';
import getProjectQuery from '~/jira_connect/branches/graphql/queries/get_project.query.graphql';
import { mockProjects } from '../mock_data';
const mockProject = {
id: 'test',
fullPath: 'test-path',
repository: {
branchNames: ['main', 'f-test', 'release'],
rootRef: 'main',
},
};
const mockSelectedProject = mockProjects[0];
const mockProjectQueryResponse = {
data: {
@ -76,7 +77,7 @@ describe('SourceBranchDropdown', () => {
describe('when `selectedProject` becomes specified', () => {
beforeEach(async () => {
wrapper.setProps({
selectedProject: mockProject,
selectedProject: mockSelectedProject,
});
await waitForPromises();
@ -101,7 +102,7 @@ describe('SourceBranchDropdown', () => {
it('renders loading icon in dropdown', () => {
createComponent({
mockApollo: createMockApolloProvider({ getProjectQueryLoading: true }),
props: { selectedProject: mockProject },
props: { selectedProject: mockSelectedProject },
});
expect(findLoadingIcon().isVisible()).toBe(true);
@ -111,7 +112,7 @@ describe('SourceBranchDropdown', () => {
describe('when branches have loaded', () => {
describe('when searching branches', () => {
it('triggers a refetch', async () => {
createComponent({ mountFn: mount, props: { selectedProject: mockProject } });
createComponent({ mountFn: mount, props: { selectedProject: mockSelectedProject } });
await waitForPromises();
jest.clearAllMocks();
@ -129,7 +130,7 @@ describe('SourceBranchDropdown', () => {
describe('template', () => {
beforeEach(async () => {
createComponent({ props: { selectedProject: mockProject } });
createComponent({ props: { selectedProject: mockSelectedProject } });
await waitForPromises();
});

View File

@ -0,0 +1,30 @@
export const mockProjects = [
{
id: 'test',
name: 'test',
nameWithNamespace: 'test',
avatarUrl: 'https://gitlab.com',
path: 'test-path',
fullPath: 'test-path',
repository: {
empty: false,
},
userPermissions: {
pushCode: true,
},
},
{
id: 'gitlab',
name: 'GitLab',
nameWithNamespace: 'gitlab-org/gitlab',
avatarUrl: 'https://gitlab.com',
path: 'gitlab',
fullPath: 'gitlab-org/gitlab',
repository: {
empty: false,
},
userPermissions: {
pushCode: true,
},
},
];

View File

@ -6,10 +6,16 @@ import waitForPromises from 'helpers/wait_for_promises';
import { TEST_HOST } from 'spec/test_constants';
import createFlash from '~/flash';
import axios from '~/lib/utils/axios_utils';
import { confirmAction } from '~/lib/utils/confirm_via_gl_modal/confirm_via_gl_modal';
import PipelinesManualActions from '~/pipelines/components/pipelines_list/pipelines_manual_actions.vue';
import GlCountdown from '~/vue_shared/components/gl_countdown.vue';
jest.mock('~/flash');
jest.mock('~/lib/utils/confirm_via_gl_modal/confirm_via_gl_modal', () => {
return {
confirmAction: jest.fn(),
};
});
describe('Pipelines Actions dropdown', () => {
let wrapper;
@ -36,6 +42,7 @@ describe('Pipelines Actions dropdown', () => {
wrapper = null;
mock.restore();
confirmAction.mockReset();
});
describe('manual actions', () => {
@ -112,11 +119,11 @@ describe('Pipelines Actions dropdown', () => {
it('makes post request after confirming', async () => {
mock.onPost(scheduledJobAction.path).reply(200);
jest.spyOn(window, 'confirm').mockReturnValue(true);
confirmAction.mockResolvedValueOnce(true);
findAllDropdownItems().at(0).vm.$emit('click');
expect(window.confirm).toHaveBeenCalled();
expect(confirmAction).toHaveBeenCalled();
await waitForPromises();
@ -125,11 +132,11 @@ describe('Pipelines Actions dropdown', () => {
it('does not make post request if confirmation is cancelled', async () => {
mock.onPost(scheduledJobAction.path).reply(200);
jest.spyOn(window, 'confirm').mockReturnValue(false);
confirmAction.mockResolvedValueOnce(false);
findAllDropdownItems().at(0).vm.$emit('click');
expect(window.confirm).toHaveBeenCalled();
expect(confirmAction).toHaveBeenCalled();
await waitForPromises();

View File

@ -0,0 +1,91 @@
# frozen_string_literal: true
require 'spec_helper'
require 'memory_profiler'
RSpec.describe Gitlab::Middleware::MemoryReport do
let(:app) { proc { |env| [200, { 'Content-Type' => 'text/plain' }, ['Hello world!']] } }
let(:middleware) { described_class.new(app) }
describe '#call' do
shared_examples 'returns original response' do
it 'returns original response' do
expect(MemoryProfiler).not_to receive(:report)
status, headers, body = middleware.call(env)
expect(status).to eq(200)
expect(headers).to eq({ 'Content-Type' => 'text/plain' })
expect(body.first).to eq('Hello world!')
end
it 'does not call the MemoryProfiler' do
expect(MemoryProfiler).not_to receive(:report)
middleware.call(env)
end
end
context 'when the Rails environment is not development' do
let(:env) { Rack::MockRequest.env_for('/') }
it_behaves_like 'returns original response'
end
context 'when the Rails environment is development' do
before do
allow(Rails.env).to receive(:development?).and_return(true)
end
context 'when memory report is not requested' do
let(:env) { Rack::MockRequest.env_for('/') }
it_behaves_like 'returns original response'
end
context 'when memory report is requested' do
let(:env) { Rack::MockRequest.env_for('/', params: { 'performance_bar' => 'memory' }) }
before do
allow(env).to receive(:[]).and_call_original
allow(app).to receive(:call).and_return(empty_memory_report)
end
let(:empty_memory_report) do
report = MemoryProfiler::Results.new
report.register_results(MemoryProfiler::StatHash.new, MemoryProfiler::StatHash.new, 1)
end
it 'returns a memory report' do
expect(MemoryProfiler).to receive(:report).and_yield
status, headers, body = middleware.call(env)
expect(status).to eq(200)
expect(headers).to eq({ 'Content-Type' => 'text/plain' })
expect(body.first).to include('Total allocated: 0 B')
end
context 'when something goes wrong with creating the report' do
before do
expect(MemoryProfiler).to receive(:report).and_raise(StandardError, 'something went terribly wrong!')
end
it 'logs the error' do
expect(::Gitlab::ErrorTracking).to receive(:track_exception)
middleware.call(env)
end
it 'returns the error' do
status, headers, body = middleware.call(env)
expect(status).to eq(500)
expect(headers).to eq({ 'Content-Type' => 'text/plain' })
expect(body.first).to include('Could not generate memory report: something went terribly wrong!')
end
end
end
end
end
end

View File

@ -15,9 +15,16 @@ RSpec.describe MetricsServer do # rubocop:disable RSpec/FilePath
# we need to reset it after testing.
let!(:old_multiprocess_files_dir) { prometheus_config.multiprocess_files_dir }
let(:ruby_sampler_double) { double(Gitlab::Metrics::Samplers::RubySampler) }
before do
# We do not want this to have knock-on effects on the test process.
allow(Gitlab::ProcessManagement).to receive(:modify_signals)
# This being a singleton, we stub it out because only one instance is allowed
# to exist per process.
allow(Gitlab::Metrics::Samplers::RubySampler).to receive(:initialize_instance).and_return(ruby_sampler_double)
allow(ruby_sampler_double).to receive(:start)
end
after do
@ -27,35 +34,49 @@ RSpec.describe MetricsServer do # rubocop:disable RSpec/FilePath
FileUtils.rm_rf(metrics_dir, secure: true)
end
describe '.spawn' do
context 'when in parent process' do
it 'forks into a new process and detaches it' do
expect(Process).to receive(:fork).and_return(99)
expect(Process).to receive(:detach).with(99)
%w(puma sidekiq).each do |target|
context "when targeting #{target}" do
describe '.spawn' do
context 'when in parent process' do
it 'forks into a new process and detaches it' do
expect(Process).to receive(:fork).and_return(99)
expect(Process).to receive(:detach).with(99)
described_class.spawn('sidekiq', metrics_dir: metrics_dir)
end
end
context 'when in child process' do
before do
# This signals the process that it's "inside" the fork
expect(Process).to receive(:fork).and_return(nil)
expect(Process).not_to receive(:detach)
end
it 'starts the metrics server with the given arguments' do
expect_next_instance_of(MetricsServer) do |server|
expect(server).to receive(:start)
described_class.spawn(target, metrics_dir: metrics_dir)
end
end
described_class.spawn('sidekiq', metrics_dir: metrics_dir)
context 'when in child process' do
before do
# This signals the process that it's "inside" the fork
expect(Process).to receive(:fork).and_return(nil)
expect(Process).not_to receive(:detach)
end
it 'starts the metrics server with the given arguments' do
expect_next_instance_of(MetricsServer) do |server|
expect(server).to receive(:start)
end
described_class.spawn(target, metrics_dir: metrics_dir)
end
it 'resets signal handlers from parent process' do
expect(Gitlab::ProcessManagement).to receive(:modify_signals).with(%i[A B], 'DEFAULT')
described_class.spawn(target, metrics_dir: metrics_dir, trapped_signals: %i[A B])
end
end
end
end
end
it 'resets signal handlers from parent process' do
expect(Gitlab::ProcessManagement).to receive(:modify_signals).with(%i[A B], 'DEFAULT')
described_class.spawn('sidekiq', metrics_dir: metrics_dir, trapped_signals: %i[A B])
context 'when targeting invalid target' do
describe '.spawn' do
it 'raises an error' do
expect { described_class.spawn('unsupported', metrics_dir: metrics_dir) }.to(
raise_error('Target must be one of [puma,sidekiq]')
)
end
end
end
@ -64,7 +85,6 @@ RSpec.describe MetricsServer do # rubocop:disable RSpec/FilePath
let(:exporter_class) { Class.new(Gitlab::Metrics::Exporter::BaseExporter) }
let(:exporter_double) { double('fake_exporter', start: true) }
let(:settings) { { "fake_exporter" => { "enabled" => true } } }
let(:ruby_sampler_double) { double(Gitlab::Metrics::Samplers::RubySampler) }
subject(:metrics_server) { described_class.new('fake', metrics_dir, true)}
@ -74,9 +94,6 @@ RSpec.describe MetricsServer do # rubocop:disable RSpec/FilePath
settings['fake_exporter'], gc_requests: true, synchronous: true
).and_return(exporter_double)
expect(Settings).to receive(:monitoring).and_return(settings)
allow(Gitlab::Metrics::Samplers::RubySampler).to receive(:initialize_instance).and_return(ruby_sampler_double)
allow(ruby_sampler_double).to receive(:start)
end
it 'configures ::Prometheus::Client' do

View File

@ -6240,6 +6240,21 @@ RSpec.describe Project, factory_default: :keep do
end
end
describe '.for_group_and_its_ancestor_groups' do
it 'returns projects for group and its ancestors' do
group_1 = create(:group)
project_1 = create(:project, namespace: group_1)
group_2 = create(:group, parent: group_1)
project_2 = create(:project, namespace: group_2)
group_3 = create(:group, parent: group_2)
project_3 = create(:project, namespace: group_2)
group_4 = create(:group, parent: group_3)
create(:project, namespace: group_4)
expect(described_class.for_group_and_its_ancestor_groups(group_3)).to match_array([project_1, project_2, project_3])
end
end
describe '.deployments' do
subject { project.deployments }

View File

@ -1163,17 +1163,33 @@ RSpec.describe API::Groups do
expect(json_response.length).to eq(3)
end
it "returns projects including those in subgroups" do
subgroup = create(:group, parent: group1)
create(:project, group: subgroup)
create(:project, group: subgroup)
context 'when include_subgroups is true' do
it "returns projects including those in subgroups" do
subgroup = create(:group, parent: group1)
create(:project, group: subgroup)
create(:project, group: subgroup)
get api("/groups/#{group1.id}/projects", user1), params: { include_subgroups: true }
get api("/groups/#{group1.id}/projects", user1), params: { include_subgroups: true }
expect(response).to have_gitlab_http_status(:ok)
expect(response).to include_pagination_headers
expect(json_response).to be_an(Array)
expect(json_response.length).to eq(5)
expect(response).to have_gitlab_http_status(:ok)
expect(response).to include_pagination_headers
expect(json_response).to be_an(Array)
expect(json_response.length).to eq(5)
end
end
context 'when include_ancestor_groups is true' do
it 'returns ancestors groups projects' do
subgroup = create(:group, parent: group1)
subgroup_project = create(:project, group: subgroup)
get api("/groups/#{subgroup.id}/projects", user1), params: { include_ancestor_groups: true }
records = Gitlab::Json.parse(response.body)
expect(response).to have_gitlab_http_status(:ok)
expect(response).to include_pagination_headers
expect(records.map { |r| r['id'] }).to match_array([project1.id, project3.id, subgroup_project.id, archived_project.id])
end
end
it "does not return a non existing group" do

View File

@ -1,7 +1,8 @@
# frozen_string_literal: true
RSpec.shared_context 'GroupProjectsFinder context' do
let_it_be(:group) { create(:group) }
let_it_be(:root_group) { create(:group) }
let_it_be(:group) { create(:group, parent: root_group) }
let_it_be(:subgroup) { create(:group, parent: group) }
let_it_be(:current_user) { create(:user) }
let(:params) { {} }
@ -16,6 +17,9 @@ RSpec.shared_context 'GroupProjectsFinder context' do
let_it_be(:shared_project_3) { create(:project, :internal, path: '5', name: 'c') }
let_it_be(:subgroup_project) { create(:project, :public, path: '6', group: subgroup, name: 'b') }
let_it_be(:subgroup_private_project) { create(:project, :private, path: '7', group: subgroup, name: 'a') }
let_it_be(:root_group_public_project) { create(:project, :public, path: '8', group: root_group, name: 'root-public-project') }
let_it_be(:root_group_private_project) { create(:project, :private, path: '9', group: root_group, name: 'root-private-project') }
let_it_be(:root_group_private_project_2) { create(:project, :private, path: '10', group: root_group, name: 'root-private-project-2') }
before do
shared_project_1.project_group_links.create!(group_access: Gitlab::Access::MAINTAINER, group: group)