Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2022-02-24 09:14:06 +00:00
parent 89bfc148f9
commit 563c8efdee
87 changed files with 676 additions and 271 deletions

View file

@ -31,36 +31,40 @@ export default class ProtectedBranchEdit {
const wrap = this.$wrap.get(0);
const forcePushToggle = initToggle(wrap.querySelector('.js-force-push-toggle'));
forcePushToggle.$on('change', (value) => {
forcePushToggle.isLoading = true;
forcePushToggle.disabled = true;
this.updateProtectedBranch(
{
allow_force_push: value,
},
() => {
forcePushToggle.isLoading = false;
forcePushToggle.disabled = false;
},
);
});
if (this.hasLicense) {
const codeOwnerToggle = initToggle(wrap.querySelector('.js-code-owner-toggle'));
codeOwnerToggle.$on('change', (value) => {
codeOwnerToggle.isLoading = true;
codeOwnerToggle.disabled = true;
if (forcePushToggle) {
forcePushToggle.$on('change', (value) => {
forcePushToggle.isLoading = true;
forcePushToggle.disabled = true;
this.updateProtectedBranch(
{
code_owner_approval_required: value,
allow_force_push: value,
},
() => {
codeOwnerToggle.isLoading = false;
codeOwnerToggle.disabled = false;
forcePushToggle.isLoading = false;
forcePushToggle.disabled = false;
},
);
});
}
if (this.hasLicense) {
const codeOwnerToggle = initToggle(wrap.querySelector('.js-code-owner-toggle'));
if (codeOwnerToggle) {
codeOwnerToggle.$on('change', (value) => {
codeOwnerToggle.isLoading = true;
codeOwnerToggle.disabled = true;
this.updateProtectedBranch(
{
code_owner_approval_required: value,
},
() => {
codeOwnerToggle.isLoading = false;
codeOwnerToggle.disabled = false;
},
);
});
}
}
}
updateProtectedBranch(formData, callback) {

View file

@ -8,7 +8,7 @@ class Admin::RunnerProjectsController < Admin::ApplicationController
def create
@runner = Ci::Runner.find(params[:runner_project][:runner_id])
if ::Ci::AssignRunnerService.new(@runner, @project, current_user).execute
if ::Ci::Runners::AssignRunnerService.new(@runner, @project, current_user).execute
redirect_to edit_admin_runner_url(@runner), notice: s_('Runners|Runner assigned to project.')
else
redirect_to edit_admin_runner_url(@runner), alert: 'Failed adding runner to project'

View file

@ -23,7 +23,7 @@ class Admin::RunnersController < Admin::ApplicationController
end
def update
if Ci::UpdateRunnerService.new(@runner).update(runner_params)
if Ci::Runners::UpdateRunnerService.new(@runner).update(runner_params)
respond_to do |format|
format.html { redirect_to edit_admin_runner_path(@runner) }
end
@ -34,13 +34,13 @@ class Admin::RunnersController < Admin::ApplicationController
end
def destroy
Ci::UnregisterRunnerService.new(@runner, current_user).execute
Ci::Runners::UnregisterRunnerService.new(@runner, current_user).execute
redirect_to admin_runners_path, status: :found
end
def resume
if Ci::UpdateRunnerService.new(@runner).update(active: true)
if Ci::Runners::UpdateRunnerService.new(@runner).update(active: true)
redirect_to admin_runners_path, notice: _('Runner was successfully updated.')
else
redirect_to admin_runners_path, alert: _('Runner was not updated.')
@ -48,7 +48,7 @@ class Admin::RunnersController < Admin::ApplicationController
end
def pause
if Ci::UpdateRunnerService.new(@runner).update(active: false)
if Ci::Runners::UpdateRunnerService.new(@runner).update(active: false)
redirect_to admin_runners_path, notice: _('Runner was successfully updated.')
else
redirect_to admin_runners_path, alert: _('Runner was not updated.')

View file

@ -24,7 +24,7 @@ class Groups::RunnersController < Groups::ApplicationController
end
def update
if Ci::UpdateRunnerService.new(@runner).update(runner_params)
if Ci::Runners::UpdateRunnerService.new(@runner).update(runner_params)
redirect_to group_runner_path(@group, @runner), notice: _('Runner was successfully updated.')
else
render 'edit'
@ -35,14 +35,14 @@ class Groups::RunnersController < Groups::ApplicationController
if @runner.belongs_to_more_than_one_project?
redirect_to group_settings_ci_cd_path(@group, anchor: 'runners-settings'), status: :found, alert: _('Runner was not deleted because it is assigned to multiple projects.')
else
Ci::UnregisterRunnerService.new(@runner, current_user).execute
Ci::Runners::UnregisterRunnerService.new(@runner, current_user).execute
redirect_to group_settings_ci_cd_path(@group, anchor: 'runners-settings'), status: :found
end
end
def resume
if Ci::UpdateRunnerService.new(@runner).update(active: true)
if Ci::Runners::UpdateRunnerService.new(@runner).update(active: true)
redirect_to group_settings_ci_cd_path(@group, anchor: 'runners-settings'), notice: _('Runner was successfully updated.')
else
redirect_to group_settings_ci_cd_path(@group, anchor: 'runners-settings'), alert: _('Runner was not updated.')
@ -50,7 +50,7 @@ class Groups::RunnersController < Groups::ApplicationController
end
def pause
if Ci::UpdateRunnerService.new(@runner).update(active: false)
if Ci::Runners::UpdateRunnerService.new(@runner).update(active: false)
redirect_to group_settings_ci_cd_path(@group, anchor: 'runners-settings'), notice: _('Runner was successfully updated.')
else
redirect_to group_settings_ci_cd_path(@group, anchor: 'runners-settings'), alert: _('Runner was not updated.')

View file

@ -14,7 +14,7 @@ class Projects::RunnerProjectsController < Projects::ApplicationController
path = project_runners_path(project)
if ::Ci::AssignRunnerService.new(@runner, @project, current_user).execute
if ::Ci::Runners::AssignRunnerService.new(@runner, @project, current_user).execute
redirect_to path, notice: s_('Runners|Runner assigned to project.')
else
assign_to_messages = @runner.errors.messages[:assign_to]

View file

@ -14,7 +14,7 @@ class Projects::RunnersController < Projects::ApplicationController
end
def update
if Ci::UpdateRunnerService.new(@runner).update(runner_params)
if Ci::Runners::UpdateRunnerService.new(@runner).update(runner_params)
redirect_to project_runner_path(@project, @runner), notice: _('Runner was successfully updated.')
else
render 'edit'
@ -23,14 +23,14 @@ class Projects::RunnersController < Projects::ApplicationController
def destroy
if @runner.only_for?(project)
Ci::UnregisterRunnerService.new(@runner, current_user).execute
Ci::Runners::UnregisterRunnerService.new(@runner, current_user).execute
end
redirect_to project_runners_path(@project), status: :found
end
def resume
if Ci::UpdateRunnerService.new(@runner).update(active: true)
if Ci::Runners::UpdateRunnerService.new(@runner).update(active: true)
redirect_to project_runners_path(@project), notice: _('Runner was successfully updated.')
else
redirect_to project_runners_path(@project), alert: _('Runner was not updated.')
@ -38,7 +38,7 @@ class Projects::RunnersController < Projects::ApplicationController
end
def pause
if Ci::UpdateRunnerService.new(@runner).update(active: false)
if Ci::Runners::UpdateRunnerService.new(@runner).update(active: false)
redirect_to project_runners_path(@project), notice: _('Runner was successfully updated.')
else
redirect_to project_runners_path(@project), alert: _('Runner was not updated.')

View file

@ -20,7 +20,7 @@ module Mutations
error = authenticate_delete_runner!(runner)
return { errors: [error] } if error
::Ci::UnregisterRunnerService.new(runner, current_user).execute
::Ci::Runners::UnregisterRunnerService.new(runner, current_user).execute
{ errors: runner.errors.full_messages }
end

View file

@ -53,7 +53,7 @@ module Mutations
def resolve(id:, **runner_attrs)
runner = authorized_find!(id)
unless ::Ci::UpdateRunnerService.new(runner).update(runner_attrs)
unless ::Ci::Runners::UpdateRunnerService.new(runner).update(runner_attrs)
return { runner: nil, errors: runner.errors.full_messages }
end

View file

@ -1,20 +0,0 @@
# frozen_string_literal: true
module Ci
class AssignRunnerService
# @param [Ci::Runner] runner the runner to assign to a project
# @param [Project] project the new project to assign the runner to
# @param [User] user the user performing the operation
def initialize(runner, project, user)
@runner = runner
@project = project
@user = user
end
def execute
return false unless @user.present? && @user.can?(:assign_runner, @runner)
@runner.assign_to(@project, @user)
end
end
end

View file

@ -1,58 +0,0 @@
# frozen_string_literal: true
module Ci
class RegisterRunnerService
def execute(registration_token, attributes)
runner_type_attrs = extract_runner_type_attrs(registration_token)
return unless runner_type_attrs
::Ci::Runner.create(attributes.merge(runner_type_attrs))
end
private
def extract_runner_type_attrs(registration_token)
@attrs_from_token ||= check_token(registration_token)
return unless @attrs_from_token
attrs = @attrs_from_token.clone
case attrs[:runner_type]
when :project_type
attrs[:projects] = [attrs.delete(:scope)]
when :group_type
attrs[:groups] = [attrs.delete(:scope)]
end
attrs
end
def check_token(registration_token)
if runner_registration_token_valid?(registration_token)
# Create shared runner. Requires admin access
{ runner_type: :instance_type }
elsif runner_registrar_valid?('project') && project = ::Project.find_by_runners_token(registration_token)
# Create a specific runner for the project
{ runner_type: :project_type, scope: project }
elsif runner_registrar_valid?('group') && group = ::Group.find_by_runners_token(registration_token)
# Create a specific runner for the group
{ runner_type: :group_type, scope: group }
end
end
def runner_registration_token_valid?(registration_token)
ActiveSupport::SecurityUtils.secure_compare(registration_token, Gitlab::CurrentSettings.runners_registration_token)
end
def runner_registrar_valid?(type)
Feature.disabled?(:runner_registration_control) || Gitlab::CurrentSettings.valid_runner_registrars.include?(type)
end
def token_scope
@attrs_from_token[:scope]
end
end
end
Ci::RegisterRunnerService.prepend_mod

View file

@ -0,0 +1,22 @@
# frozen_string_literal: true
module Ci
module Runners
class AssignRunnerService
# @param [Ci::Runner] runner the runner to assign to a project
# @param [Project] project the new project to assign the runner to
# @param [User] user the user performing the operation
def initialize(runner, project, user)
@runner = runner
@project = project
@user = user
end
def execute
return false unless @user.present? && @user.can?(:assign_runner, @runner)
@runner.assign_to(@project, @user)
end
end
end
end

View file

@ -0,0 +1,60 @@
# frozen_string_literal: true
module Ci
module Runners
class RegisterRunnerService
def execute(registration_token, attributes)
runner_type_attrs = extract_runner_type_attrs(registration_token)
return unless runner_type_attrs
::Ci::Runner.create(attributes.merge(runner_type_attrs))
end
private
def extract_runner_type_attrs(registration_token)
@attrs_from_token ||= check_token(registration_token)
return unless @attrs_from_token
attrs = @attrs_from_token.clone
case attrs[:runner_type]
when :project_type
attrs[:projects] = [attrs.delete(:scope)]
when :group_type
attrs[:groups] = [attrs.delete(:scope)]
end
attrs
end
def check_token(registration_token)
if runner_registration_token_valid?(registration_token)
# Create shared runner. Requires admin access
{ runner_type: :instance_type }
elsif runner_registrar_valid?('project') && project = ::Project.find_by_runners_token(registration_token)
# Create a specific runner for the project
{ runner_type: :project_type, scope: project }
elsif runner_registrar_valid?('group') && group = ::Group.find_by_runners_token(registration_token)
# Create a specific runner for the group
{ runner_type: :group_type, scope: group }
end
end
def runner_registration_token_valid?(registration_token)
ActiveSupport::SecurityUtils.secure_compare(registration_token, Gitlab::CurrentSettings.runners_registration_token)
end
def runner_registrar_valid?(type)
Feature.disabled?(:runner_registration_control) || Gitlab::CurrentSettings.valid_runner_registrars.include?(type)
end
def token_scope
@attrs_from_token[:scope]
end
end
end
end
Ci::Runners::RegisterRunnerService.prepend_mod

View file

@ -0,0 +1,22 @@
# frozen_string_literal: true
module Ci
module Runners
class UnregisterRunnerService
attr_reader :runner, :author
# @param [Ci::Runner] runner the runner to unregister/destroy
# @param [User, authentication token String] author the user or the authentication token that authorizes the removal
def initialize(runner, author)
@runner = runner
@author = author
end
def execute
@runner&.destroy
end
end
end
end
Ci::Runners::UnregisterRunnerService.prepend_mod

View file

@ -0,0 +1,21 @@
# frozen_string_literal: true
module Ci
module Runners
class UpdateRunnerService
attr_reader :runner
def initialize(runner)
@runner = runner
end
def update(params)
params[:active] = !params.delete(:paused) if params.include?(:paused)
runner.update(params).tap do |updated|
runner.tick_runner_queue if updated
end
end
end
end
end

View file

@ -1,20 +0,0 @@
# frozen_string_literal: true
module Ci
class UnregisterRunnerService
attr_reader :runner, :author
# @param [Ci::Runner] runner the runner to unregister/destroy
# @param [User, authentication token String] author the user or the authentication token that authorizes the removal
def initialize(runner, author)
@runner = runner
@author = author
end
def execute
@runner&.destroy
end
end
end
Ci::UnregisterRunnerService.prepend_mod

View file

@ -1,19 +0,0 @@
# frozen_string_literal: true
module Ci
class UpdateRunnerService
attr_reader :runner
def initialize(runner)
@runner = runner
end
def update(params)
params[:active] = !params.delete(:paused) if params.include?(:paused)
runner.update(params).tap do |updated|
runner.tick_runner_queue if updated
end
end
end
end

View file

@ -2,13 +2,12 @@
require_relative '../config/bundler_setup'
require 'stackprof'
$:.unshift 'spec'
require 'spec_helper'
require 'rspec'
filename = ARGV[0].split('/').last
interval = ENV.fetch('INTERVAL', 1000).to_i
limit = ENV.fetch('LIMIT', 20)
raw = ENV.fetch('RAW', false) == 'true'
raw = ENV.fetch('RAW', false) == 'true' # Set this to true if you want to generate a flamegraph
output_file = "tmp/#{filename}.dump"
StackProf.run(mode: :wall, out: output_file, interval: interval, raw: raw) do

View file

@ -18,7 +18,7 @@ To host the GitLab product documentation, you can use:
- Your own web server
After you create a website by using one of these methods, you redirect the UI links
in the product to point to your website.
in the product to point to your website.
NOTE:
The website you create must be hosted under a subdirectory that matches

View file

@ -616,7 +616,7 @@ Note the following:
- IP address, you must add it as a Subject Alternative Name to the certificate.
- When running Praefect sub-commands such as `dial-nodes` and `list-untracked-repositories` from the command line with
[Gitaly TLS enabled](configure_gitaly.md#enable-tls-support), you must set the `SSL_CERT_DIR` or `SSL_CERT_FILE`
environment variable so that the Gitaly certificate is trusted. For example:
environment variable so that the Gitaly certificate is trusted. For example:
```shell
sudo SSL_CERT_DIR=/etc/gitlab/trusted_certs /opt/gitlab/embedded/bin/praefect -config /var/opt/gitlab/praefect/config.toml dial-nodes

View file

@ -554,7 +554,7 @@ Is [some cases](index.md#known-issues) the Praefect database can get out of sync
a given repository is fully synced on all nodes, run the [`gitlab:praefect:replicas` Rake task](../raketasks/praefect.md#replica-checksums)
that checksums the repository on all Gitaly nodes.
The [Praefect dataloss](recovery.md#check-for-data-loss) command only checks the state of the repo in the Praefect database, and cannot
The [Praefect dataloss](recovery.md#check-for-data-loss) command only checks the state of the repository in the Praefect database, and cannot
be relied to detect sync problems in this scenario.
### Relation does not exist errors

View file

@ -800,7 +800,7 @@ Reports that go over the 20 MB limit aren't loaded. Affected reports:
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/8638) in GitLab 13.3.
You can set a limit on the content of repository files that are indexed in
Elasticsearch. Any files larger than this limit only index the file name.
Elasticsearch. Any files larger than this limit only index the filename.
The file content is neither indexed nor searchable.
Setting a limit helps reduce the memory usage of the indexing processes and

View file

@ -30,7 +30,7 @@ From left to right, the performance bar displays:
is enabled. It shows which server role was used for the query.
"Primary" means that the query was sent to the read/write primary server.
"Replica" means it was sent to a read-only replica.
- **Config name**: shows up only when the
- **Configuration name**: shows up only when the
`GITLAB_MULTIPLE_DATABASE_METRICS` environment variable is set. This is
used to distinguish between different databases configured for different
GitLab features. The name shown is the same name used to configure database

View file

@ -5967,6 +5967,29 @@ The edge type for [`ComplianceFramework`](#complianceframework).
| <a id="complianceframeworkedgecursor"></a>`cursor` | [`String!`](#string) | A cursor for use in pagination. |
| <a id="complianceframeworkedgenode"></a>`node` | [`ComplianceFramework`](#complianceframework) | The item at the end of the edge. |
#### `ComplianceViolationConnection`
The connection type for [`ComplianceViolation`](#complianceviolation).
##### Fields
| Name | Type | Description |
| ---- | ---- | ----------- |
| <a id="complianceviolationconnectionedges"></a>`edges` | [`[ComplianceViolationEdge]`](#complianceviolationedge) | A list of edges. |
| <a id="complianceviolationconnectionnodes"></a>`nodes` | [`[ComplianceViolation]`](#complianceviolation) | A list of nodes. |
| <a id="complianceviolationconnectionpageinfo"></a>`pageInfo` | [`PageInfo!`](#pageinfo) | Information to aid in pagination. |
#### `ComplianceViolationEdge`
The edge type for [`ComplianceViolation`](#complianceviolation).
##### Fields
| Name | Type | Description |
| ---- | ---- | ----------- |
| <a id="complianceviolationedgecursor"></a>`cursor` | [`String!`](#string) | A cursor for use in pagination. |
| <a id="complianceviolationedgenode"></a>`node` | [`ComplianceViolation`](#complianceviolation) | The item at the end of the edge. |
#### `ConnectedAgentConnection`
The connection type for [`ConnectedAgent`](#connectedagent).
@ -9499,6 +9522,20 @@ Represents a ComplianceFramework associated with a Project.
| <a id="complianceframeworkname"></a>`name` | [`String!`](#string) | Name of the compliance framework. |
| <a id="complianceframeworkpipelineconfigurationfullpath"></a>`pipelineConfigurationFullPath` | [`String`](#string) | Full path of the compliance pipeline configuration stored in a project repository, such as `.gitlab/.compliance-gitlab-ci.yml@compliance/hipaa` **(ULTIMATE)**. |
### `ComplianceViolation`
Compliance violation associated with a merged merge request. Available only when feature flag `compliance_violations_graphql_type` is enabled. This flag is disabled by default, because the feature is experimental and is subject to change without notice.
#### Fields
| Name | Type | Description |
| ---- | ---- | ----------- |
| <a id="complianceviolationid"></a>`id` | [`ID!`](#id) | Compliance violation ID. |
| <a id="complianceviolationmergerequest"></a>`mergeRequest` | [`MergeRequest!`](#mergerequest) | Merge request the compliance violation occurred in. |
| <a id="complianceviolationreason"></a>`reason` | [`ComplianceViolationReason!`](#complianceviolationreason) | Reason the compliance violation occurred. |
| <a id="complianceviolationseveritylevel"></a>`severityLevel` | [`ComplianceViolationSeverity!`](#complianceviolationseverity) | Severity of the compliance violation. |
| <a id="complianceviolationviolatinguser"></a>`violatingUser` | [`UserCore!`](#usercore) | User suspected of causing the compliance violation. |
### `ComposerMetadata`
Composer metadata.
@ -11054,6 +11091,7 @@ four standard [pagination arguments](#connection-pagination-arguments):
| <a id="groupistemporarystorageincreaseenabled"></a>`isTemporaryStorageIncreaseEnabled` | [`Boolean!`](#boolean) | Status of the temporary storage increase. |
| <a id="grouplfsenabled"></a>`lfsEnabled` | [`Boolean`](#boolean) | Indicates if Large File Storage (LFS) is enabled for namespace. |
| <a id="groupmentionsdisabled"></a>`mentionsDisabled` | [`Boolean`](#boolean) | Indicates if a group is disabled from getting mentioned. |
| <a id="groupmergerequestviolations"></a>`mergeRequestViolations` | [`ComplianceViolationConnection`](#complianceviolationconnection) | Compliance violations reported on merge requests merged within the group. Available only when feature flag `compliance_violations_graphql_type` is enabled. This flag is disabled by default, because the feature is experimental and is subject to change without notice. (see [Connections](#connections)) |
| <a id="groupname"></a>`name` | [`String!`](#string) | Name of the namespace. |
| <a id="grouporganizations"></a>`organizations` | [`CustomerRelationsOrganizationConnection`](#customerrelationsorganizationconnection) | Find organizations of this group. (see [Connections](#connections)) |
| <a id="grouppackagesettings"></a>`packageSettings` | [`PackageSettings`](#packagesettings) | Package settings for the namespace. |
@ -17077,6 +17115,28 @@ Mode of a commit action.
| <a id="commitencodingbase64"></a>`BASE64` | Base64 encoding. |
| <a id="commitencodingtext"></a>`TEXT` | Text encoding. |
### `ComplianceViolationReason`
Reason for the compliance violation.
| Value | Description |
| ----- | ----------- |
| <a id="complianceviolationreasonapproved_by_committer"></a>`APPROVED_BY_COMMITTER` | Approved by committer. |
| <a id="complianceviolationreasonapproved_by_insufficient_users"></a>`APPROVED_BY_INSUFFICIENT_USERS` | Approved by insufficient users. |
| <a id="complianceviolationreasonapproved_by_merge_request_author"></a>`APPROVED_BY_MERGE_REQUEST_AUTHOR` | Approved by merge request author. |
### `ComplianceViolationSeverity`
Severity of the compliance violation.
| Value | Description |
| ----- | ----------- |
| <a id="complianceviolationseveritycritical"></a>`CRITICAL` | Critical severity. |
| <a id="complianceviolationseverityhigh"></a>`HIGH` | High severity. |
| <a id="complianceviolationseverityinfo"></a>`INFO` | Info severity. |
| <a id="complianceviolationseveritylow"></a>`LOW` | Low severity. |
| <a id="complianceviolationseveritymedium"></a>`MEDIUM` | Medium severity. |
### `ConanMetadatumFileTypeEnum`
Conan file types.

View file

@ -79,7 +79,7 @@ The Authorization code with PKCE flow, PKCE for short, makes it possible to secu
the OAuth exchange of client credentials for access tokens on public clients without
requiring access to the _Client Secret_ at all. This makes the PKCE flow advantageous
for single page JavaScript applications or other client side apps where keeping secrets
from the user is a technical impossibility.
from the user is a technical impossibility.
Before starting the flow, generate the `STATE`, the `CODE_VERIFIER` and the `CODE_CHALLENGE`.

View file

@ -265,6 +265,13 @@ All the time!
- As the lookup is similar to a cache lookup (in the GitLab implementation), we can use
the same key for both. This is how `Gitlab::Cache.fetch_once` works.
#### Possible downsides
- Adding new attributes to a cached object using `Gitlab::JsonCache`
and `Gitlab::SafeRequestStore`, for example, can lead to stale data issues
where the cache data doesn't have the appropriate value for the new attribute
(see this past [incident](https://gitlab.com/gitlab-com/gl-infra/production/-/issues/6372)).
### When to use SQL caching
Rails uses this automatically for identical queries in a request, so no action is

View file

@ -128,7 +128,7 @@ and you should make sure your version matches the version used by GitLab.
## Update linter configuration
[Vale configuration](#vale) and [markdownlint configuration](#markdownlint) is under source control in each
[Vale configuration](#vale) and [markdownlint configuration](#markdownlint) is under source control in each
project, so updates must be committed to each project individually.
We consider the configuration in the `gitlab` project as the source of truth and that's where all updates should

View file

@ -18,7 +18,7 @@ info: To determine the technical writer assigned to the Stage/Group associated w
## Act as SaaS
When developing locally, there are times when you need your instance to act like the SaaS version of the product.
When developing locally, there are times when you need your instance to act like the SaaS version of the product.
In those instances, you can simulate SaaS by exporting an environment variable as seen below:
`export GITLAB_SIMULATE_SAAS=1`

View file

@ -262,7 +262,7 @@ module Gitlab
end
```
A worker that is only defined in the EE codebase can subscribe to an event in the same way by
A worker that is only defined in the EE codebase can subscribe to an event in the same way by
declaring the subscription in `ee/lib/ee/gitlab/event_store.rb`.
Subscriptions are stored in memory when the Rails app is loaded and they are immediately frozen.

View file

@ -448,7 +448,7 @@ The first way is simply by running the experiment. Assuming the experiment has b
The second way doesn't run the experiment and is intended to be used if the experiment only needs to surface in the client layer. To accomplish this we can simply `.publish` the experiment. This won't run any logic, but does surface the experiment details in the client layer so they can be utilized there.
An example might be to publish an experiment in a `before_action` in a controller. Assuming we've defined the `PillColorExperiment` class, like we have above, we can surface it to the client by publishing it instead of running it:
An example might be to publish an experiment in a `before_action` in a controller. Assuming we've defined the `PillColorExperiment` class, like we have above, we can surface it to the client by publishing it instead of running it:
```ruby
before_action -> { experiment(:pill_color).publish }, only: [:show]

View file

@ -136,7 +136,7 @@ the class name with `js-`.
## ES Module Syntax
For most JavaScript files, use ES module syntax to import or export from modules.
For most JavaScript files, use ES module syntax to import or export from modules.
Prefer named exports, as they improve name consistency.
```javascript

View file

@ -127,11 +127,11 @@ A project with that name already exists.
##### `Exception: Error importing repository into (namespace) - No space left on device`
The disk has insufficient space to complete the import.
The disk has insufficient space to complete the import.
During import, the tarball is cached in your configured `shared_path` directory. Verify the
disk has enough free space to accommodate both the cached tarball and the unpacked
project files on disk.
project files on disk.
### Importing via the Rails console

View file

@ -8,7 +8,7 @@ info: To determine the technical writer assigned to the Stage/Group associated w
GitLab, like most large applications, enforces limits within certain features.
The absences of limits can affect security, performance, data, or could even
exhaust the allocated resources for the application.
exhaust the allocated resources for the application.
Every new feature should have safe usage limits included in its implementation.
Limits are applicable for:
@ -23,6 +23,6 @@ Limits are applicable for:
## Additional reading
- Existing [GitLab application limits](../administration/instance_limits.md)
- Existing [GitLab application limits](../administration/instance_limits.md)
- Product processes: [introducing application limits](https://about.gitlab.com/handbook/product/product-processes/#introducing-application-limits)
- Development docs: [guide for adding application limits](application_limits.md)

View file

@ -69,7 +69,7 @@ In addition, there are a few circumstances where we would always run the full RS
- when the `pipeline:run-all-rspec` label is set on the merge request
- when the merge request is created by an automation (e.g. Gitaly update or MR targeting a stable branch)
- when the merge request is created in a security mirror
- when any CI config file is changed (i.e. `.gitlab-ci.yml` or `.gitlab/ci/**/*`)
- when any CI configuration file is changed (i.e. `.gitlab-ci.yml` or `.gitlab/ci/**/*`)
### Jest minimal jobs
@ -85,7 +85,7 @@ In addition, there are a few circumstances where we would always run the full Je
- when the `pipeline:run-all-jest` label is set on the merge request
- when the merge request is created by an automation (e.g. Gitaly update or MR targeting a stable branch)
- when the merge request is created in a security mirror
- when any CI config file is changed (i.e. `.gitlab-ci.yml` or `.gitlab/ci/**/*`)
- when any CI configuration file is changed (i.e. `.gitlab-ci.yml` or `.gitlab/ci/**/*`)
- when any frontend "core" file is changed (i.e. `package.json`, `yarn.lock`, `babel.config.js`, `jest.config.*.js`, `config/helpers/**/*.js`)
- when any vendored JavaScript file is changed (i.e. `vendor/assets/javascripts/**/*`)
- when any backend file is changed ([see the patterns list for details](https://gitlab.com/gitlab-org/gitlab/-/blob/3616946936c1adbd9e754c1bd06f86ba670796d8/.gitlab/ci/rules.gitlab-ci.yml#L205-216))
@ -218,7 +218,7 @@ of `gitlab-org/gitlab-foss`. These jobs are only created in the following cases:
- when the `pipeline:run-as-if-foss` label is set on the merge request
- when the merge request is created in the `gitlab-org/security/gitlab` project
- when any CI config file is changed (i.e. `.gitlab-ci.yml` or `.gitlab/ci/**/*`)
- when any CI configuration file is changed (i.e. `.gitlab-ci.yml` or `.gitlab/ci/**/*`)
The `* as-if-foss` jobs are run in addition to the regular EE-context jobs. They have the `FOSS_ONLY='1'` variable
set and get the `ee/` folder removed before the tests start running.

View file

@ -112,7 +112,7 @@ while and there are no issues, we can proceed.
### Proposed solution: Migrate data by using MultiStore with the fallback strategy
We need a way to migrate users to a new Redis store without causing any inconveniences from UX perspective.
We need a way to migrate users to a new Redis store without causing any inconveniences from UX perspective.
We also want the ability to fall back to the "old" Redis instance if something goes wrong with the new instance.
Migration Requirements:
@ -129,13 +129,13 @@ We need to write data into both Redis instances (old + new).
We read from the new instance, but we need to fall back to the old instance when pre-fetching from the new dedicated Redis instance that failed.
We need to log any issues or exceptions with a new instance, but still fall back to the old instance.
The proposed migration strategy is to implement and use the [MultiStore](https://gitlab.com/gitlab-org/gitlab/-/blob/fcc42e80ed261a862ee6ca46b182eee293ae60b6/lib/gitlab/redis/multi_store.rb).
We used this approach with [adding new dedicated Redis instance for session keys](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/579).
The proposed migration strategy is to implement and use the [MultiStore](https://gitlab.com/gitlab-org/gitlab/-/blob/fcc42e80ed261a862ee6ca46b182eee293ae60b6/lib/gitlab/redis/multi_store.rb).
We used this approach with [adding new dedicated Redis instance for session keys](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/579).
Also MultiStore comes with corresponding [specs](https://gitlab.com/gitlab-org/gitlab/-/blob/master/spec/lib/gitlab/redis/multi_store_spec.rb).
The MultiStore looks like a `redis-rb ::Redis` instance.
In the new Redis instance class you added in [Step 1](#step-1-support-configuring-the-new-instance),
In the new Redis instance class you added in [Step 1](#step-1-support-configuring-the-new-instance),
override the [Redis](https://gitlab.com/gitlab-org/gitlab/-/blob/fcc42e80ed261a862ee6ca46b182eee293ae60b6/lib/gitlab/redis/sessions.rb#L20-28) method from the `::Gitlab::Redis::Wrapper`
```ruby
@ -177,7 +177,7 @@ bin/feature-flag use_primary_store_as_default_for_foo
```
By enabling `use_primary_and_secondary_stores_for_foo` feature flag, our `Gitlab::Redis::Foo` will use `MultiStore` to write to both new Redis instance
and the [old (fallback-instance)](#fallback-instance).
and the [old (fallback-instance)](#fallback-instance).
If we fail to fetch data from the new instance, we will fallback and read from the old Redis instance.
We can monitor logs for `Gitlab::Redis::MultiStore::ReadFromPrimaryError`, and also the Prometheus counter `gitlab_redis_multi_store_read_fallback_total`.
@ -218,7 +218,7 @@ When a command outside of the supported list is used, `method_missing` will pass
This ensures that anything unexpected behaves like it would before.
NOTE:
By tracking `gitlab_redis_multi_store_method_missing_total` counter and `Gitlab::Redis::MultiStore::MethodMissingError`,
By tracking `gitlab_redis_multi_store_method_missing_total` counter and `Gitlab::Redis::MultiStore::MethodMissingError`,
a developer will need to add an implementation for missing Redis commands before proceeding with the migration.
##### Errors

View file

@ -209,17 +209,17 @@ sequenceDiagram
- `uuid` - GitLab instance unique identifier
- `hostname` - GitLab instance hostname
- `version` - GitLab instance current versions
- `version` - GitLab instance current versions
- `elapsed` - Amount of time which passed since Service Ping report process started and moment of error occurrence
- `message` - Error message
<pre>
<code>
{
"uuid"=>"02333324-1cd7-4c3b-a45b-a4993f05fb1d",
"hostname"=>"127.0.0.1",
"version"=>"14.7.0-pre",
"elapsed"=>0.006946,
"uuid"=>"02333324-1cd7-4c3b-a45b-a4993f05fb1d",
"hostname"=>"127.0.0.1",
"version"=>"14.7.0-pre",
"elapsed"=>0.006946,
"message"=>'PG::UndefinedColumn: ERROR: column \"non_existent_attribute\" does not exist\nLINE 1: SELECT COUNT(non_existent_attribute) FROM \"issues\" /*applica...'
}
</code>
@ -576,7 +576,7 @@ skip_db_write:
ServicePing::SubmitService.new(skip_db_write: true).execute
```
## Manually upload Service Ping payload
## Manually upload Service Ping payload
> [Introduced](https://gitlab.com/groups/gitlab-org/-/epics/7388) in GitLab 14.8 with a flag named `admin_application_settings_service_usage_data_center`. Disabled by default.
@ -596,7 +596,7 @@ To upload payload manually:
## Monitoring
Service Ping reporting process state is monitored with [internal SiSense dashboard](https://app.periscopedata.com/app/gitlab/968489/Product-Intelligence---Service-Ping-Health).
Service Ping reporting process state is monitored with [internal SiSense dashboard](https://app.periscopedata.com/app/gitlab/968489/Product-Intelligence---Service-Ping-Health).
## Troubleshooting

View file

@ -259,7 +259,7 @@ these scenarios, since `:always` should be considered the exception, not the rul
To allow for reads to be served from replicas, we added two additional consistency modes: `:sticky` and `:delayed`.
When you declare either `:sticky` or `:delayed` consistency, workers become eligible for database
load-balancing.
load-balancing.
In both cases, if the replica is not up-to-date and the time from scheduling the job was less than the minimum delay interval,
the jobs sleep up to the minimum delay interval (0.8 seconds). This gives the replication process time to finish.

View file

@ -130,7 +130,7 @@ You can set this variable inside the `fabricate_via_api` call. For a consistent
- Add the word `activated` to the end of a variable's name.
- Inside the `initialize` method, set the variable's default value.
For example:
For example:
```ruby
def initialize

View file

@ -20,7 +20,7 @@ This is a partial list of the [RSpec metadata](https://relishapp.com/rspec/rspec
| `:github` | The test requires a GitHub personal access token. |
| `:group_saml` | The test requires a GitLab instance that has SAML SSO enabled at the group level. Interacts with an external SAML identity provider. Paired with the `:orchestrated` tag. |
| `:instance_saml` | The test requires a GitLab instance that has SAML SSO enabled at the instance level. Interacts with an external SAML identity provider. Paired with the `:orchestrated` tag. |
| `:integrations` | This aims to test the available [integrations](../../../user/project/integrations/overview.md#integrations-listing). The test requires Docker to be installed in the run context. It will provision the containers and can be run against a local instance or using the `gitlab-qa` scenario `Test::Integration::Integrations` |
| `:integrations` | This aims to test the available [integrations](../../../user/project/integrations/overview.md#integrations-listing). The test requires Docker to be installed in the run context. It will provision the containers and can be run against a local instance or using the `gitlab-qa` scenario `Test::Integration::Integrations` |
| `:service_ping_disabled` | The test interacts with the GitLab configuration service ping at the instance level to turn admin setting service ping checkbox on or off. This tag will have the test run only in the `service_ping_disabled` job and must be paired with the `:orchestrated` and `:requires_admin` tags. |
| `:jira` | The test requires a Jira Server. [GitLab-QA](https://gitlab.com/gitlab-org/gitlab-qa) provisions the Jira Server in a Docker container when the `Test::Integration::Jira` test scenario is run.
| `:kubernetes` | The test includes a GitLab instance that is configured to be run behind an SSH tunnel, allowing a TLS-accessible GitLab. This test also includes provisioning of at least one Kubernetes cluster to test against. _This tag is often be paired with `:orchestrated`._ |

View file

@ -122,7 +122,7 @@ signed in.
FLAG:
On self-managed GitLab, by default this feature is not available. To make it available, ask an administrator to [enable the feature flag](../administration/feature_flags.md) named `omniauth_login_minimal_scopes`. On GitLab.com, this feature is not available.
If you use a GitLab instance for authentication, you can reduce access rights when an OAuth application is used for sign in.
If you use a GitLab instance for authentication, you can reduce access rights when an OAuth application is used for sign in.
Any OAuth application can advertise the purpose of the application with the
authorization parameter: `gl_auth_type=login`. If the application is

View file

@ -101,7 +101,7 @@ from outside the Marketplace, which allows you to install the application:
![Button labeled "upload app"](img/jira-upload-app_v13_11.png)
1. For **App descriptor URL**, provide the full URL to your manifest file, based
on your instance configuration. For example: `https://your.domain/your-path/-/jira_connect/app_descriptor.json`.
on your instance configuration. By default, your manifest file is located at `/-/jira_connect/app_descriptor.json`. For example, if your GitLab self-managed instance domain is `app.pet-store.cloud`, your manifest file is located at `https://app.pet-store.cloud/-/jira_connect/app_descriptor.json`.
1. Select **Upload**. Jira fetches the content of your `app_descriptor` file and installs
it.
1. If the upload is successful, Jira displays a modal panel: **Installed and ready to go!**

View file

@ -434,7 +434,7 @@ gitlab_rails['backup_upload_storage_options'] = {
###### SSE-KMS
To enable SSE-KMS, you'll need the [KMS key via its Amazon Resource Name (ARN)
in the `arn:aws:kms:region:acct-id:key/key-id` format](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html). Under the `backup_upload_storage_options` config setting, set:
in the `arn:aws:kms:region:acct-id:key/key-id` format](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html). Under the `backup_upload_storage_options` configuration setting, set:
- `server_side_encryption` to `aws:kms`.
- `server_side_encryption_kms_key_id` to the ARN of the key.
@ -1461,7 +1461,7 @@ To prepare the new server:
1. On the top bar, select **Menu > Admin**.
1. On the left sidebar, select **Monitoring > Background Jobs**.
1. Under the Sidekiq dashboard, verify that the numbers
match with what was shown on the old server.
match with what was shown on the old server.
1. While still under the Sidekiq dashboard, select **Cron** and then **Enable All**
to re-enable periodic background jobs.
1. Test that read-only operations on the GitLab instance work as expected. For example, browse through project repository files, merge requests, and issues.

View file

@ -24,7 +24,7 @@ The results use the default ordering of the GitLab Rails application.
## Limit search results
To list only projects with recent activity, pass a date with the `SINCE` environment variable. The
time you specify is parsed by the Rails [TimeZone#parse function](https://api.rubyonrails.org/classes/ActiveSupport/TimeZone.html#method-i-parse).
time you specify is parsed by the Rails [`TimeZone#parse` function](https://api.rubyonrails.org/classes/ActiveSupport/TimeZone.html#method-i-parse).
```shell
# Omnibus

View file

@ -93,7 +93,7 @@ The **rate limit** is 5 requests per minute per user.
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/339151) in GitLab 14.7.
There is a rate limit per IP address on the `/users/sign_up` endpoint. This is to mitigate attempts to misuse the endpoint. For example, to mass
discover usernames or email addresses in use.
discover usernames or email addresses in use.
The **rate limit** is 20 calls per minute per IP address.
@ -113,7 +113,7 @@ The **rate limit** is 10 calls per minute per signed-in user.
There is a rate limit for the internal endpoint `/users/:username/exists`, used upon sign up to check if a chosen username has already been taken.
This is to mitigate the risk of misuses, such as mass discovery of usernames in use.
The **rate limit** is 20 calls per minute per IP address.
The **rate limit** is 20 calls per minute per IP address.
## Troubleshooting

View file

@ -154,7 +154,7 @@ To change the namespace linked to a subscription:
for that group.
1. Select **Proceed to checkout**.
Subscription charges are calculated based on the total number of users in a group, including its subgroups and nested projects. If the [total number of users](gitlab_com/index.md#view-seat-usage) exceeds the number of seats in your subscription, your account is charged for the additional users and you need to pay for the overage before you can change the linked namespace.
Subscription charges are calculated based on the total number of users in a group, including its subgroups and nested projects. If the [total number of users](gitlab_com/index.md#view-seat-usage) exceeds the number of seats in your subscription, your account is charged for the additional users and you need to pay for the overage before you can change the linked namespace.
Only one namespace can be linked to a subscription.

View file

@ -190,7 +190,7 @@ You can override the default values in the `values.yaml` file in the
`HELM_UPGRADE_VALUES_FILE` [CI/CD variable](#cicd-variables) with
the path and name.
Some values can not be overridden with the options above. Settings like `replicaCount` should instead be overridden with the `REPLICAS`
Some values can not be overridden with the options above. Settings like `replicaCount` should instead be overridden with the `REPLICAS`
[build and deployment](#build-and-deployment) CI/CD variable. Follow [this issue](https://gitlab.com/gitlab-org/cluster-integration/auto-deploy-image/-/issues/31) for more information.
NOTE:

View file

@ -159,3 +159,11 @@ If all goes well, this is what you should see:
Running hooks in /etc/ca-certificates/update.d...
done.
```
### Disable Version Check and Service Ping
The Version Check and Service Ping services improve the GitLab user experience and ensure that
users are on the most up-to-date instances of GitLab. These two services can be turned off for air-gapped
environments so that they do not attempt and fail to reach out to GitLab services.
Learn more about [disabling usage statistics](../../user/admin_area/settings/usage_statistics.md#enable-or-disable-usage-statistics).

View file

@ -232,7 +232,7 @@ cannot guarantee that upgrading between major versions is seamless.
A *major* upgrade requires the following steps:
1. Start by identifying a [supported upgrade path](#upgrade-paths). This is essential for a successful *major* version upgrade.
1. Start by identifying a [supported upgrade path](#upgrade-paths). This is essential for a successful *major* version upgrade.
1. Upgrade to the latest minor version of the preceding major version.
1. Upgrade to the "dot zero" release of the next major version (`X.0.Z`).
1. Optional. Follow the [upgrade path](#upgrade-paths), and proceed with upgrading to newer releases of that major version.

View file

@ -39,7 +39,7 @@ release if the patch release is not the latest. For example, upgrading from
14.1.1 to 14.2.0 should be safe even if 14.1.2 has been released. We do recommend
you check the release posts of any releases between your current and target
version just in case they include any migrations that may require you to upgrade
one release at a time.
one release at a time.
We also recommend you verify the [version specific upgrading instructions](index.md#version-specific-upgrading-instructions) relevant to your [upgrade path](index.md#upgrade-paths).

View file

@ -172,6 +172,7 @@ By default, impersonation is enabled. GitLab can be configured to [disable imper
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/1772) in GitLab 13.8.
> - [Feature flag removed](https://gitlab.com/gitlab-org/gitlab/-/issues/292436) in GitLab 13.9.
> - [Moved to delivery by email](https://gitlab.com/gitlab-org/gitlab/-/issues/343859) in GitLab 14.8.
An administrator can export user permissions for all users in the GitLab instance from the Admin Area's Users page.
The export lists direct membership the users have in groups and projects.
@ -189,6 +190,11 @@ Only the first 100,000 user accounts are exported.
![user permission export button](img/export_permissions_v13_11.png)
GitLab creates a CSV file and:
- In GitLab 14.7 and earlier, the file is downloaded in your browser.
- In GitLab 14.8 and later, the file is sent to your primary email address.
#### Users statistics
The **Users statistics** page provides an overview of user accounts by role. These statistics are

View file

@ -40,7 +40,7 @@ To view the number of merge requests merged per month:
1. On the top bar, select **Menu > Projects** and find your project.
1. On the left sidebar, select **Analytics > Merge request**.
1. Optional. Filter results:
1. Optional. Filter results:
1. Select the filter bar.
1. Select a parameter.
1. Select a value or enter text to refine the results.

View file

@ -68,7 +68,7 @@ To view the median time spent in each stage:
- In the **From** field, select a start date.
- In the **To** field, select an end date.
1. To view the median time for each stage, above the **Filter results** text box, point to a stage.
## View the lead time and cycle time for issues
Value stream analytics shows the lead time and cycle time for issues in your project:
@ -116,7 +116,7 @@ To view deployment metrics, you must have a
[production environment configured](../../ci/environments/index.md#deployment-tier-of-environments).
Value stream analytics shows the following deployment metrics for your project:
- Deploys: The number of successful deployments in the date range.
- Deployment Frequency: The average number of successful deployments per day in the date range.
@ -174,14 +174,14 @@ This example shows a workflow through all seven stages in one day. In this
example, milestones have been created and CI for testing and setting environments is configured.
- 09:00: Create issue. **Issue** stage starts.
- 11:00: Add issue to a milestone, start work on the issue, and create a branch locally.
**Issue** stage stops and **Plan** stage starts.
- 11:00: Add issue to a milestone, start work on the issue, and create a branch locally.
**Issue** stage stops and **Plan** stage starts.
- 12:00: Make the first commit.
- 12:30: Make the second commit to the branch that mentions the issue number. **Plan** stage stops and **Code** stage starts.
- 14:00: Push branch and create a merge request that contains the [issue closing pattern](../project/issues/managing_issues.md#closing-issues-automatically). **Code** stage stops and **Test** and **Review** stages start.
- The CI takes 5 minutes to run scripts defined in [`.gitlab-ci.yml`](../../ci/yaml/index.md).
- The CI takes 5 minutes to run scripts defined in [`.gitlab-ci.yml`](../../ci/yaml/index.md).
**Test** stage stops.
- Review merge request.
- Review merge request.
- 19:00: Merge the merge request. **Review** stage stops and **Staging** stage starts.
- 19:30: Deployment to the `production` environment starts and finishes. **Staging** stops.
@ -191,7 +191,7 @@ Value stream analytics records the following times for each stage:
- **Plan**: 11:00 to 12:00: 1 hr
- **Code**: 12:00 to 14:00: 2 hrs
- **Test**: 5 minutes
- **Review**: 14:00 to 19:00: 5 hrs
- **Review**: 14:00 to 19:00: 5 hrs
- **Staging**: 19:00 to 19:30: 30 minutes
There are some additional considerations for this example:
@ -202,5 +202,5 @@ still collects analytics data for the issue.
as every merge request should be tested.
- This example illustrates only one cycle of multiple stages. The value
stream analytics dashboard shows the calculated median elapsed time for these issues.
- Value stream analytics identifies production environments based on the
- Value stream analytics identifies production environments based on the
[deployment tier of environments](../../ci/environments/index.md#deployment-tier-of-environments).

View file

@ -804,7 +804,7 @@ variables:
If the value must be generated or regenerated on expiration, you can provide a program or script for
the API fuzzer to execute on a specified interval. The provided script runs in an Alpine Linux
container that has Python 3 and Bash installed.
container that has Python 3 and Bash installed.
You have to set the environment variable `FUZZAPI_OVERRIDES_CMD` to the program or script you would like
to execute. The provided command creates the overrides JSON file as defined previously.
@ -813,7 +813,7 @@ You might want to install other scripting runtimes like NodeJS or Ruby, or maybe
your overrides command. In this case, we recommend setting the `FUZZAPI_PRE_SCRIPT` to the file path of a script which
provides those prerequisites. The script provided by `FUZZAPI_PRE_SCRIPT` is executed once, before the analyzer starts.
See the [Alpine Linux package management](https://wiki.alpinelinux.org/wiki/Alpine_Linux_package_management)
See the [Alpine Linux package management](https://wiki.alpinelinux.org/wiki/Alpine_Linux_package_management)
page for information about installing Alpine Linux packages.
You must provide three CI/CD variables, each set for correct operation:

View file

@ -8,13 +8,13 @@ info: To determine the technical writer assigned to the Stage/Group associated w
## Description
A private RFC 1918 was identified in the target application. Public facing websites should not be issuing
requests to private IP Addresses. Attackers attempting to execute subsequent attacks, such as Server-Side
A private RFC 1918 was identified in the target application. Public facing websites should not be issuing
requests to private IP Addresses. Attackers attempting to execute subsequent attacks, such as Server-Side
Request Forgery (SSRF), may be able to use this information to identify additional internal targets.
## Remediation
Identify the resource that is incorrectly specifying an internal IP address and replace it with it's public
Identify the resource that is incorrectly specifying an internal IP address and replace it with it's public
facing version, or remove the reference from the target application.
## Details

View file

@ -8,8 +8,8 @@ info: To determine the technical writer assigned to the Stage/Group associated w
## Description
The target web server is configured to list the contents of directories that do not contain an index file
such as `index.html`. This could lead to accidental exposure of sensitive information, or give an attacker
The target web server is configured to list the contents of directories that do not contain an index file
such as `index.html`. This could lead to accidental exposure of sensitive information, or give an attacker
details on how filenames and directories are structured and stored.
## Remediation
@ -17,11 +17,11 @@ details on how filenames and directories are structured and stored.
Directory indexing should be disabled.
Apache:
For Apache based web sites, ensure all `<Directory>` definitions have `Options -Indexes` configured in the
For Apache based web sites, ensure all `<Directory>` definitions have `Options -Indexes` configured in the
`apache2.conf` or `httpd.conf` configuration file.
NGINX:
For NGINX based websites, ensure all `location` definitions have the `autoindex off` directive set in the
For NGINX based websites, ensure all `location` definitions have the `autoindex off` directive set in the
`nginx.conf` file.
IIS:

View file

@ -479,8 +479,8 @@ Follow these steps to provide the bearer token with `DAST_API_OVERRIDES_ENV`:
`{"headers":{"Authorization":"Bearer dXNlcm5hbWU6cGFzc3dvcmQ="}}` (substitute your token). You
can create CI/CD variables from the GitLab projects page at **Settings > CI/CD**, in the
**Variables** section.
Due to the format of `TEST_API_BEARERAUTH` it's not possible to mask the variable.
To mask the token's value, you can create a second variable with the token value's, and define
Due to the format of `TEST_API_BEARERAUTH` it's not possible to mask the variable.
To mask the token's value, you can create a second variable with the token value's, and define
`TEST_API_BEARERAUTH` with the value `{"headers":{"Authorization":"Bearer $MASKED_VARIABLE"}}`.
1. In your `.gitlab-ci.yml` file, set `DAST_API_OVERRIDES_ENV` to the variable you just created:
@ -876,7 +876,7 @@ variables:
If the value must be generated or regenerated on expiration, you can provide a program or script for
the DAST API scanner to execute on a specified interval. The provided command runs in an Alpine Linux
container that has Python 3 and Bash installed.
container that has Python 3 and Bash installed.
You have to set the environment variable `DAST_API_OVERRIDES_CMD` to the program or script you would like
to execute. The provided command creates the overrides JSON file as defined previously.
@ -885,7 +885,7 @@ You might want to install other scripting runtimes like NodeJS or Ruby, or maybe
your overrides command. In this case, we recommend setting the `DAST_API_PRE_SCRIPT` to the file path of a script which
provides those prerequisites. The script provided by `DAST_API_PRE_SCRIPT` is executed once, before the analyzer starts.
See the [Alpine Linux package management](https://wiki.alpinelinux.org/wiki/Alpine_Linux_package_management)
See the [Alpine Linux package management](https://wiki.alpinelinux.org/wiki/Alpine_Linux_package_management)
page for information about installing Alpine Linux packages.
You must provide three CI/CD variables, each set for correct operation:

View file

@ -878,12 +878,12 @@ variables:
## Reports JSON format
SAST outputs a report file in JSON format. The report file contains details of all found vulnerabilities.
To download the report file, you can either:
SAST outputs a report file in JSON format. The report file contains details of all found vulnerabilities.
To download the report file, you can either:
- Download the file from the CI/CD pipelines page.
- In the pipelines tab on merge requests, set [`artifacts: paths`](../../../ci/yaml/index.md#artifactspaths) to `gl-sast-report.json`.
- In the pipelines tab on merge requests, set [`artifacts: paths`](../../../ci/yaml/index.md#artifactspaths) to `gl-sast-report.json`.
For information, see [Download job artifacts](../../../ci/pipelines/job_artifacts.md#download-job-artifacts).
For details of the report file's schema, see

View file

@ -442,9 +442,9 @@ secret_detection:
### `secret-detection` job fails with `ERR fatal: ambiguous argument` message
Your `secret-detection` job can fail with `ERR fatal: ambiguous argument` error if your
repository's default branch is unrelated to the branch the job was triggered for.
Your `secret-detection` job can fail with `ERR fatal: ambiguous argument` error if your
repository's default branch is unrelated to the branch the job was triggered for.
See issue [!352014](https://gitlab.com/gitlab-org/gitlab/-/issues/352014) for more details.
To resolve the issue, make sure to correctly [set your default branch](../../project/repository/branches/default.md#change-the-default-branch-name-for-a-project) on your repository. You should set it to a branch
that has related history with the branch you run the `secret-detection` job on.
To resolve the issue, make sure to correctly [set your default branch](../../project/repository/branches/default.md#change-the-default-branch-name-for-a-project) on your repository. You should set it to a branch
that has related history with the branch you run the `secret-detection` job on.

View file

@ -56,7 +56,7 @@ A vendor revocation receiver service integrates with a GitLab instance to receiv
a web notification and respond to leaked token requests.
To implement a receiver service to revoke leaked tokens:
1. Create a publicly accessible HTTP service matching the corresponding API contract
below. Your service should be idempotent and rate-limited.
1. When a pipeline corresponding to its revocable token type (in the example, `my_api_token`)

View file

@ -7,7 +7,7 @@ info: To determine the technical writer assigned to the Stage/Group associated w
# Vulnerability Report **(ULTIMATE)**
The Vulnerability Report provides information about vulnerabilities from scans of the default branch. It contains cumulative results of all successful jobs, regardless of whether the pipeline was successful.
The Vulnerability Report provides information about vulnerabilities from scans of the default branch. It contains cumulative results of all successful jobs, regardless of whether the pipeline was successful.
The scan results from a pipeline are only ingested after all the jobs in the pipeline complete. Partial results for a pipeline with jobs in progress can be seen in the pipeline security tab.

View file

@ -21,7 +21,7 @@ Then you can run Kubernetes API commands as part of your GitLab CI/CD pipeline.
To ensure access to your cluster is safe:
- Each agent has a separate context (`kubecontext`).
- Only the project where the agent is, and any additional projects you authorize can access the agent in your cluster.
- Only the project where the agent is, and any additional projects you authorize can access the agent in your cluster.
You do not need to have a runner in the cluster with the agent.
@ -208,7 +208,7 @@ SPDY protocol.
[An issue exists](https://gitlab.com/gitlab-org/gitlab/-/issues/346248) to add support for these commands.
### Grant write permissions to `~/.kube/cache`
Tools like `kubectl`, Helm, `kpt`, and `kustomize` cache information about
the cluster in `~/.kube/cache`. If this directory is not writable, the tool fetches information on each invocation,
making interactions slower and creating unnecessary load on the cluster. For the best experience, in the

View file

@ -226,7 +226,7 @@ To change the SAML app used for sign in:
### Migrate to a different SAML provider
You can migrate to a different SAML provider. During the migration process users will not be able to access any of the SAML groups.
To mitigate this, you can disable [SSO enforcement](#sso-enforcement).
To mitigate this, you can disable [SSO enforcement](#sso-enforcement).
To migrate SAML providers:

View file

@ -51,7 +51,7 @@ Once [Group Single Sign-On](index.md) has been configured, we can:
The SAML application that was created during [Single sign-on](index.md) setup for [Azure](https://docs.microsoft.com/en-us/azure/active-directory/manage-apps/view-applications-portal) now needs to be set up for SCIM. You can refer to [Azure SCIM setup documentation](https://docs.microsoft.com/en-us/azure/active-directory/app-provisioning/use-scim-to-provision-users-and-groups#getting-started).
1. In your app, go to the Provisioning tab, and set the **Provisioning Mode** to **Automatic**.
1. In your app, go to the Provisioning tab, and set the **Provisioning Mode** to **Automatic**.
Then fill in the **Admin Credentials**, and save. The **Tenant URL** and **secret token** are the items
retrieved in the [previous step](#gitlab-configuration).
@ -60,7 +60,7 @@ The SAML application that was created during [Single sign-on](index.md) setup fo
- **Settings**: We recommend setting a notification email and selecting the **Send an email notification when a failure occurs** checkbox.
You also control what is actually synced by selecting the **Scope**. For example, **Sync only assigned users and groups** only syncs the users and groups assigned to the application. Otherwise, it syncs the whole Active Directory.
- **Mappings**: We recommend keeping **Provision Azure Active Directory Users** enabled, and disable **Provision Azure Active Directory Groups**.
- **Mappings**: We recommend keeping **Provision Azure Active Directory Users** enabled, and disable **Provision Azure Active Directory Groups**.
Leaving **Provision Azure Active Directory Groups** enabled does not break the SCIM user provisioning, but it causes errors in Azure AD that may be confusing and misleading.
1. You can then test the connection by selecting **Test Connection**. If the connection is successful, save your configuration before moving on. See below for [troubleshooting](#troubleshooting).

View file

@ -16,7 +16,7 @@ We recommend deleting unnecessary packages and files. This page offers examples
## Check Package Registry Storage Use
The Usage Quotas page (**Settings > Usage Quotas > Storage**) displays storage usage for Packages.
The Usage Quotas page (**Settings > Usage Quotas > Storage**) displays storage usage for Packages.
## Delete a package

View file

@ -24,7 +24,7 @@ Project access tokens are similar to [group access tokens](../../group/settings/
and [personal access tokens](../../profile/personal_access_tokens.md), except they are
associated with a project rather than a group or user.
In self-managed instances, project access tokens are subject to the same [maximum lifetime limits](../../admin_area/settings/account_and_limit_settings.md#limit-the-lifetime-of-personal-access-tokens) as personal access tokens if the limit is set.
In self-managed instances, project access tokens are subject to the same [maximum lifetime limits](../../admin_area/settings/account_and_limit_settings.md#limit-the-lifetime-of-personal-access-tokens) as personal access tokens if the limit is set.
You can use project access tokens:

View file

@ -38,7 +38,7 @@ module API
attributes[:maintenance_note] ||= deprecated_note if deprecated_note
attributes[:active] = !attributes.delete(:paused) if attributes.include?(:paused)
@runner = ::Ci::RegisterRunnerService.new.execute(params[:token], attributes)
@runner = ::Ci::Runners::RegisterRunnerService.new.execute(params[:token], attributes)
forbidden! unless @runner
if @runner.persisted?
@ -57,7 +57,7 @@ module API
delete '/', feature_category: :runner do
authenticate_runner!
destroy_conditionally!(current_runner) { ::Ci::UnregisterRunnerService.new(current_runner, params[:token]).execute }
destroy_conditionally!(current_runner) { ::Ci::Runners::UnregisterRunnerService.new(current_runner, params[:token]).execute }
end
desc 'Validates authentication credentials' do

View file

@ -90,7 +90,7 @@ module API
runner = get_runner(params.delete(:id))
authenticate_update_runner!(runner)
params[:active] = !params.delete(:paused) if params.include?(:paused)
update_service = ::Ci::UpdateRunnerService.new(runner)
update_service = ::Ci::Runners::UpdateRunnerService.new(runner)
if update_service.update(declared_params(include_missing: false))
present runner, with: Entities::Ci::RunnerDetails, current_user: current_user
@ -110,7 +110,7 @@ module API
authenticate_delete_runner!(runner)
destroy_conditionally!(runner) { ::Ci::UnregisterRunnerService.new(runner, current_user).execute }
destroy_conditionally!(runner) { ::Ci::Runners::UnregisterRunnerService.new(runner, current_user).execute }
end
desc 'List jobs running on a runner' do
@ -187,7 +187,7 @@ module API
runner = get_runner(params[:runner_id])
authenticate_enable_runner!(runner)
if ::Ci::AssignRunnerService.new(runner, user_project, current_user).execute
if ::Ci::Runners::AssignRunnerService.new(runner, user_project, current_user).execute
present runner, with: Entities::Ci::Runner
else
render_validation_error!(runner)

View file

@ -99,6 +99,7 @@ module Gitlab
flags = create_flags(data['flags'])
links = create_links(data['links'])
location = create_location(data['location'] || {})
evidence = create_evidence(data['evidence'])
signatures = create_signatures(tracking_data(data))
if @vulnerability_finding_signatures_enabled && !signatures.empty?
@ -117,6 +118,7 @@ module Gitlab
name: finding_name(data, identifiers, location),
compare_key: data['cve'] || '',
location: location,
evidence: evidence,
severity: parse_severity_level(data['severity']),
confidence: parse_confidence_level(data['confidence']),
scanner: create_scanner(data['scanner']),
@ -253,6 +255,12 @@ module Gitlab
raise NotImplementedError
end
def create_evidence(evidence_data)
return unless evidence_data.is_a?(Hash)
::Gitlab::Ci::Reports::Security::Evidence.new(data: evidence_data)
end
def finding_name(data, identifiers, location)
return data['message'] if data['message'].present?
return data['name'] if data['name'].present?

View file

@ -0,0 +1,17 @@
# frozen_string_literal: true
module Gitlab
module Ci
module Reports
module Security
class Evidence
attr_reader :data
def initialize(data:)
@data = data
end
end
end
end
end
end

View file

@ -13,6 +13,7 @@ module Gitlab
attr_reader :flags
attr_reader :links
attr_reader :location
attr_reader :evidence
attr_reader :metadata_version
attr_reader :name
attr_reader :old_location
@ -33,13 +34,14 @@ module Gitlab
alias_method :cve, :compare_key
def initialize(compare_key:, identifiers:, flags: [], links: [], remediations: [], location:, metadata_version:, name:, original_data:, report_type:, scanner:, scan:, uuid:, confidence: nil, severity: nil, details: {}, signatures: [], project_id: nil, vulnerability_finding_signatures_enabled: false) # rubocop:disable Metrics/ParameterLists
def initialize(compare_key:, identifiers:, flags: [], links: [], remediations: [], location:, evidence:, metadata_version:, name:, original_data:, report_type:, scanner:, scan:, uuid:, confidence: nil, severity: nil, details: {}, signatures: [], project_id: nil, vulnerability_finding_signatures_enabled: false) # rubocop:disable Metrics/ParameterLists
@compare_key = compare_key
@confidence = confidence
@identifiers = identifiers
@flags = flags
@links = links
@location = location
@evidence = evidence
@metadata_version = metadata_version
@name = name
@original_data = original_data
@ -65,6 +67,7 @@ module Gitlab
flags
links
location
evidence
metadata_version
name
project_fingerprint

View file

@ -15032,9 +15032,6 @@ msgstr ""
msgid "Failed to generate export, please try again later."
msgstr ""
msgid "Failed to generate report, please try again after sometime"
msgstr ""
msgid "Failed to get ref."
msgstr ""
@ -16510,6 +16507,9 @@ msgstr ""
msgid "GitLab KAS"
msgstr ""
msgid "GitLab Memberships CSV Export"
msgstr ""
msgid "GitLab Pages"
msgstr ""
@ -30678,6 +30678,9 @@ msgstr ""
msgid "Report abuse to admin"
msgstr ""
msgid "Report is generating and will be sent to your email address."
msgstr ""
msgid "Reported %{timeAgo} by %{reportedBy}"
msgstr ""
@ -35555,6 +35558,9 @@ msgstr ""
msgid "SuperSonics|Maximum users"
msgstr ""
msgid "SuperSonics|Offline cloud"
msgstr ""
msgid "SuperSonics|Paste your activation code"
msgstr ""
@ -36346,6 +36352,9 @@ msgstr ""
msgid "The CSV export will be created in the background. Once finished, it will be sent to %{email} in an attachment."
msgstr ""
msgid "The CSV export you requested of all user memberships is attached to this email."
msgstr ""
msgid "The GitLab subscription service (customers.gitlab.com) is currently experiencing an outage. You can monitor the status and get updates at %{linkStart}status.gitlab.com%{linkEnd}."
msgstr ""

View file

@ -105,7 +105,7 @@ RSpec.describe Admin::RunnersController do
describe '#destroy' do
it 'destroys the runner' do
expect_next_instance_of(Ci::UnregisterRunnerService, runner, user) do |service|
expect_next_instance_of(Ci::Runners::UnregisterRunnerService, runner, user) do |service|
expect(service).to receive(:execute).once.and_call_original
end

View file

@ -190,7 +190,7 @@ RSpec.describe Groups::RunnersController do
end
it 'destroys the runner and redirects' do
expect_next_instance_of(Ci::UnregisterRunnerService, runner, user) do |service|
expect_next_instance_of(Ci::Runners::UnregisterRunnerService, runner, user) do |service|
expect(service).to receive(:execute).once.and_call_original
end

View file

@ -37,7 +37,7 @@ RSpec.describe Projects::RunnersController do
describe '#destroy' do
it 'destroys the runner' do
expect_next_instance_of(Ci::UnregisterRunnerService, runner, user) do |service|
expect_next_instance_of(Ci::Runners::UnregisterRunnerService, runner, user) do |service|
expect(service).to receive(:execute).once.and_call_original
end

View file

@ -0,0 +1,60 @@
# frozen_string_literal: true
FactoryBot.define do
factory :ci_reports_security_evidence, class: '::Gitlab::Ci::Reports::Security::Evidence' do
data do
{
summary: 'Credit card detected',
request: {
headers: [{ name: 'Accept', value: '*/*' }],
method: 'GET',
url: 'http://goat:8080/WebGoat/logout',
body: nil
},
response: {
headers: [{ name: 'Content-Length', value: '0' }],
reason_phrase: 'OK',
status_code: 200,
body: nil
},
source: {
id: 'assert:Response Body Analysis',
name: 'Response Body Analysis',
url: 'htpp://hostname/documentation'
},
supporting_messages: [
{
name: 'Origional',
request: {
headers: [{ name: 'Accept', value: '*/*' }],
method: 'GET',
url: 'http://goat:8080/WebGoat/logout',
body: ''
}
},
{
name: 'Recorded',
request: {
headers: [{ name: 'Accept', value: '*/*' }],
method: 'GET',
url: 'http://goat:8080/WebGoat/logout',
body: ''
},
response: {
headers: [{ name: 'Content-Length', value: '0' }],
reason_phrase: 'OK',
status_code: 200,
body: ''
}
}
]
}
end
skip_create
initialize_with do
::Gitlab::Ci::Reports::Security::Evidence.new(**attributes)
end
end
end

View file

@ -6,6 +6,7 @@ FactoryBot.define do
confidence { :medium }
identifiers { Array.new(1) { association(:ci_reports_security_identifier) } }
location factory: :ci_reports_security_locations_sast
evidence factory: :ci_reports_security_evidence
metadata_version { 'sast:1.0' }
name { 'Cipher with no integrity' }
report_type { :sast }
@ -25,7 +26,53 @@ FactoryBot.define do
name: "Cipher does not check for integrity first?",
url: "https://crypto.stackexchange.com/questions/31428/pbewithmd5anddes-cipher-does-not-check-for-integrity-first"
}
]
],
evidence: {
summary: 'Credit card detected',
request: {
headers: [{ name: 'Accept', value: '*/*' }],
method: 'GET',
url: 'http://goat:8080/WebGoat/logout',
body: nil
},
response: {
headers: [{ name: 'Content-Length', value: '0' }],
reason_phrase: 'OK',
status_code: 200,
body: nil
},
source: {
id: 'assert:Response Body Analysis',
name: 'Response Body Analysis',
url: 'htpp://hostname/documentation'
},
supporting_messages: [
{
name: 'Origional',
request: {
headers: [{ name: 'Accept', value: '*/*' }],
method: 'GET',
url: 'http://goat:8080/WebGoat/logout',
body: ''
}
},
{
name: 'Recorded',
request: {
headers: [{ name: 'Accept', value: '*/*' }],
method: 'GET',
url: 'http://goat:8080/WebGoat/logout',
body: ''
},
response: {
headers: [{ name: 'Content-Length', value: '0' }],
reason_phrase: 'OK',
status_code: 200,
body: ''
}
}
]
}
}.deep_stringify_keys
end
scanner factory: :ci_reports_security_scanner

View file

@ -12,6 +12,76 @@
"id": "gemnasium",
"name": "Gemnasium"
},
"evidence": {
"source": {
"id": "assert:CORS - Bad 'Origin' value",
"name": "CORS - Bad 'Origin' value"
},
"summary": "The Origin header was changed to an invalid value of http://peachapisecurity.com and the response contained an Access-Control-Allow-Origin header which included this invalid Origin, indicating that the CORS configuration on the server is overly permissive.\n\n\n",
"request": {
"headers": [
{
"name": "Host",
"value": "127.0.0.1:7777"
}
],
"method": "GET",
"url": "http://127.0.0.1:7777/api/users",
"body": ""
},
"response": {
"headers": [
{
"name": "Server",
"value": "TwistedWeb/20.3.0"
}
],
"reason_phrase": "OK",
"status_code": 200,
"body": "[{\"user_id\":1,\"user\":\"admin\",\"first\":\"Joe\",\"last\":\"Smith\",\"password\":\"Password!\"}]"
},
"supporting_messages": [
{
"name": "Origional",
"request": {
"headers": [
{
"name": "Host",
"value": "127.0.0.1:7777"
}
],
"method": "GET",
"url": "http://127.0.0.1:7777/api/users",
"body": ""
}
},
{
"name": "Recorded",
"request": {
"headers": [
{
"name": "Host",
"value": "127.0.0.1:7777"
}
],
"method": "GET",
"url": "http://127.0.0.1:7777/api/users",
"body": ""
},
"response": {
"headers": [
{
"name": "Server",
"value": "TwistedWeb/20.3.0"
}
],
"reason_phrase": "OK",
"status_code": 200,
"body": "[{\"user_id\":1,\"user\":\"admin\",\"first\":\"Joe\",\"last\":\"Smith\",\"password\":\"Password!\"}]"
}
}
]
},
"location": {},
"identifiers": [
{
@ -57,6 +127,76 @@
"id": "gemnasium",
"name": "Gemnasium"
},
"evidence": {
"source": {
"id": "assert:CORS - Bad 'Origin' value",
"name": "CORS - Bad 'Origin' value"
},
"summary": "The Origin header was changed to an invalid value of http://peachapisecurity.com and the response contained an Access-Control-Allow-Origin header which included this invalid Origin, indicating that the CORS configuration on the server is overly permissive.\n\n\n",
"request": {
"headers": [
{
"name": "Host",
"value": "127.0.0.1:7777"
}
],
"method": "GET",
"url": "http://127.0.0.1:7777/api/users",
"body": ""
},
"response": {
"headers": [
{
"name": "Server",
"value": "TwistedWeb/20.3.0"
}
],
"reason_phrase": "OK",
"status_code": 200,
"body": "[{\"user_id\":1,\"user\":\"admin\",\"first\":\"Joe\",\"last\":\"Smith\",\"password\":\"Password!\"}]"
},
"supporting_messages": [
{
"name": "Origional",
"request": {
"headers": [
{
"name": "Host",
"value": "127.0.0.1:7777"
}
],
"method": "GET",
"url": "http://127.0.0.1:7777/api/users",
"body": ""
}
},
{
"name": "Recorded",
"request": {
"headers": [
{
"name": "Host",
"value": "127.0.0.1:7777"
}
],
"method": "GET",
"url": "http://127.0.0.1:7777/api/users",
"body": ""
},
"response": {
"headers": [
{
"name": "Server",
"value": "TwistedWeb/20.3.0"
}
],
"reason_phrase": "OK",
"status_code": 200,
"body": "[{\"user_id\":1,\"user\":\"admin\",\"first\":\"Joe\",\"last\":\"Smith\",\"password\":\"Password!\"}]"
}
}
]
},
"location": {},
"identifiers": [
{

View file

@ -73,6 +73,21 @@ describe('ProtectedBranchEdit', () => {
});
});
describe('when toggles are not available in the DOM on page load', () => {
beforeEach(() => {
create({ hasLicense: true });
setFixtures('');
});
it('does not instantiate the force push toggle', () => {
expect(findForcePushToggle()).toBe(null);
});
it('does not instantiate the code owner toggle', () => {
expect(findCodeOwnerToggle()).toBe(null);
});
});
describe.each`
description | checkedOption | patchParam | finder
${'force push'} | ${'forcePushToggleChecked'} | ${'allow_force_push'} | ${findForcePushToggle}

View file

@ -55,7 +55,7 @@ RSpec.describe Mutations::Ci::Runner::Delete do
it 'deletes runner' do
mutation_params[:id] = project_runner.to_global_id
expect_next_instance_of(::Ci::UnregisterRunnerService, project_runner, current_ctx[:current_user]) do |service|
expect_next_instance_of(::Ci::Runners::UnregisterRunnerService, project_runner, current_ctx[:current_user]) do |service|
expect(service).to receive(:execute).once.and_call_original
end
@ -75,7 +75,7 @@ RSpec.describe Mutations::Ci::Runner::Delete do
it 'does not delete project runner' do
mutation_params[:id] = two_projects_runner.to_global_id
allow_next_instance_of(::Ci::UnregisterRunnerService) do |service|
allow_next_instance_of(::Ci::Runners::UnregisterRunnerService) do |service|
expect(service).not_to receive(:execute)
end
expect { subject }.not_to change { Ci::Runner.count }
@ -89,7 +89,7 @@ RSpec.describe Mutations::Ci::Runner::Delete do
let(:current_ctx) { { current_user: admin_user } }
it 'deletes runner' do
expect_next_instance_of(::Ci::UnregisterRunnerService, runner, current_ctx[:current_user]) do |service|
expect_next_instance_of(::Ci::Runners::UnregisterRunnerService, runner, current_ctx[:current_user]) do |service|
expect(service).to receive(:execute).once.and_call_original
end

View file

@ -342,6 +342,18 @@ RSpec.describe Gitlab::Ci::Parsers::Security::Common do
end
end
describe 'parsing evidence' do
it 'returns evidence object for each finding', :aggregate_failures do
evidences = report.findings.map(&:evidence)
expect(evidences.first.data).not_to be_empty
expect(evidences.first.data["summary"]).to match(/The Origin header was changed/)
expect(evidences.size).to eq(3)
expect(evidences.compact.size).to eq(2)
expect(evidences.first).to be_a(::Gitlab::Ci::Reports::Security::Evidence)
end
end
describe 'setting the uuid' do
let(:finding_uuids) { report.findings.map(&:uuid) }
let(:uuid_1) do

View file

@ -951,7 +951,7 @@ RSpec.describe Ci::Runner do
let!(:last_update) { runner.ensure_runner_queue_value }
before do
Ci::UpdateRunnerService.new(runner).update(description: 'new runner') # rubocop: disable Rails/SaveBang
Ci::Runners::UpdateRunnerService.new(runner).update(description: 'new runner') # rubocop: disable Rails/SaveBang
end
it 'sets a new last_update value' do

View file

@ -15,7 +15,7 @@ RSpec.describe API::Ci::Runner, :clean_gitlab_redis_shared_state do
context 'when invalid token is provided' do
it 'returns 403 error' do
allow_next_instance_of(::Ci::RegisterRunnerService) do |service|
allow_next_instance_of(::Ci::Runners::RegisterRunnerService) do |service|
allow(service).to receive(:execute).and_return(nil)
end
@ -43,7 +43,7 @@ RSpec.describe API::Ci::Runner, :clean_gitlab_redis_shared_state do
let_it_be(:new_runner) { create(:ci_runner) }
before do
allow_next_instance_of(::Ci::RegisterRunnerService) do |service|
allow_next_instance_of(::Ci::Runners::RegisterRunnerService) do |service|
expected_params = {
description: 'server.hostname',
maintenance_note: 'Some maintainer notes',
@ -108,7 +108,7 @@ RSpec.describe API::Ci::Runner, :clean_gitlab_redis_shared_state do
let(:new_runner) { create(:ci_runner) }
it 'converts to maintenance_note param' do
allow_next_instance_of(::Ci::RegisterRunnerService) do |service|
allow_next_instance_of(::Ci::Runners::RegisterRunnerService) do |service|
expect(service).to receive(:execute)
.once
.with('valid token', a_hash_including('maintenance_note' => 'Some maintainer notes')
@ -133,7 +133,7 @@ RSpec.describe API::Ci::Runner, :clean_gitlab_redis_shared_state do
let_it_be(:new_runner) { create(:ci_runner) }
it 'uses active value in registration' do
expect_next_instance_of(::Ci::RegisterRunnerService) do |service|
expect_next_instance_of(::Ci::Runners::RegisterRunnerService) do |service|
expected_params = { active: false }.stringify_keys
expect(service).to receive(:execute)

View file

@ -530,7 +530,7 @@ RSpec.describe API::Ci::Runners do
context 'admin user' do
context 'when runner is shared' do
it 'deletes runner' do
expect_next_instance_of(Ci::UnregisterRunnerService, shared_runner, admin) do |service|
expect_next_instance_of(Ci::Runners::UnregisterRunnerService, shared_runner, admin) do |service|
expect(service).to receive(:execute).once.and_call_original
end
@ -548,7 +548,7 @@ RSpec.describe API::Ci::Runners do
context 'when runner is not shared' do
it 'deletes used project runner' do
expect_next_instance_of(Ci::UnregisterRunnerService, project_runner, admin) do |service|
expect_next_instance_of(Ci::Runners::UnregisterRunnerService, project_runner, admin) do |service|
expect(service).to receive(:execute).once.and_call_original
end
@ -561,7 +561,7 @@ RSpec.describe API::Ci::Runners do
end
it 'returns 404 if runner does not exist' do
allow_next_instance_of(Ci::UnregisterRunnerService) do |service|
allow_next_instance_of(Ci::Runners::UnregisterRunnerService) do |service|
expect(service).not_to receive(:execute)
end
@ -646,7 +646,7 @@ RSpec.describe API::Ci::Runners do
context 'unauthorized user' do
it 'does not delete project runner' do
allow_next_instance_of(Ci::UnregisterRunnerService) do |service|
allow_next_instance_of(Ci::Runners::UnregisterRunnerService) do |service|
expect(service).not_to receive(:execute)
end

View file

@ -20,6 +20,8 @@ RSpec.describe 'getting group information' do
fields = all_graphql_fields_for('Group')
# TODO: Set required timelogs args elsewhere https://gitlab.com/gitlab-org/gitlab/-/issues/325499
fields.selection['timelogs(startDate: "2021-03-01" endDate: "2021-03-30")'] = fields.selection.delete('timelogs')
# TODO: Remove when `compliance_violations_graphql_type` feature flag is removed in https://gitlab.com/gitlab-org/gitlab/-/issues/350249
fields.selection.delete('mergeRequestViolations')
graphql_query_for(
'group',

View file

@ -2,7 +2,7 @@
require 'spec_helper'
RSpec.describe ::Ci::AssignRunnerService, '#execute' do
RSpec.describe ::Ci::Runners::AssignRunnerService, '#execute' do
subject { described_class.new(runner, project, user).execute }
let_it_be(:runner) { build(:ci_runner) }

View file

@ -2,7 +2,7 @@
require 'spec_helper'
RSpec.describe ::Ci::RegisterRunnerService, '#execute' do
RSpec.describe ::Ci::Runners::RegisterRunnerService, '#execute' do
let(:registration_token) { 'abcdefg123456' }
let(:token) { }
let(:args) { {} }

View file

@ -2,7 +2,7 @@
require 'spec_helper'
RSpec.describe ::Ci::UnregisterRunnerService, '#execute' do
RSpec.describe ::Ci::Runners::UnregisterRunnerService, '#execute' do
subject { described_class.new(runner, 'some_token').execute }
let(:runner) { create(:ci_runner) }

View file

@ -2,7 +2,7 @@
require 'spec_helper'
RSpec.describe Ci::UpdateRunnerService do
RSpec.describe Ci::Runners::UpdateRunnerService do
let(:runner) { create(:ci_runner) }
describe '#update' do