Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2022-09-17 00:10:28 +00:00
parent 743886e398
commit ad304c533e
29 changed files with 535 additions and 146 deletions

View File

@ -1,8 +1,5 @@
# E2E tests pipeline loaded dynamically by script: scripts/generate-e2e-pipeline
default:
interruptible: true
include:
- local: .gitlab/ci/global.gitlab-ci.yml
- local: .gitlab/ci/package-and-test/rules.gitlab-ci.yml

View File

@ -1,6 +1,3 @@
default:
interruptible: true
stages:
- prepare
- deploy

View File

@ -24,7 +24,7 @@ module ContainerExpirationPolicies
begin
service_result = Projects::ContainerRepository::CleanupTagsService
.new(repository, nil, policy_params.merge('container_expiration_policy' => true))
.new(container_repository: repository, params: policy_params.merge('container_expiration_policy' => true))
.execute
rescue StandardError
repository.cleanup_unfinished!

View File

@ -0,0 +1,17 @@
# frozen_string_literal: true
module Projects
module ContainerRepository
class BaseContainerRepositoryService < ::BaseContainerService
include ::Gitlab::Utils::StrongMemoize
alias_method :container_repository, :container
def initialize(container_repository:, current_user: nil, params: {})
super(container: container_repository, current_user: current_user, params: params)
end
delegate :project, to: :container_repository
end
end
end

View File

@ -2,106 +2,107 @@
module Projects
module ContainerRepository
class CleanupTagsBaseService
include BaseServiceUtility
include ::Gitlab::Utils::StrongMemoize
class CleanupTagsBaseService < BaseContainerRepositoryService
private
def filter_out_latest
@tags.reject!(&:latest?)
def filter_out_latest!(tags)
tags.reject!(&:latest?)
end
def filter_by_name
def filter_by_name!(tags)
regex_delete = ::Gitlab::UntrustedRegexp.new("\\A#{name_regex_delete || name_regex}\\z")
regex_retain = ::Gitlab::UntrustedRegexp.new("\\A#{name_regex_keep}\\z")
@tags.select! do |tag|
tags.select! do |tag|
# regex_retain will override any overlapping matches by regex_delete
regex_delete.match?(tag.name) && !regex_retain.match?(tag.name)
end
end
# Should return [tags_to_delete, tags_to_keep]
def partition_by_keep_n
return [@tags, []] unless keep_n
def partition_by_keep_n(tags)
return [tags, []] unless keep_n
order_by_date_desc
tags = order_by_date_desc(tags)
@tags.partition.with_index { |_, index| index >= keep_n_as_integer }
tags.partition.with_index { |_, index| index >= keep_n_as_integer }
end
# Should return [tags_to_delete, tags_to_keep]
def partition_by_older_than
return [@tags, []] unless older_than
def partition_by_older_than(tags)
return [tags, []] unless older_than
older_than_timestamp = older_than_in_seconds.ago
@tags.partition do |tag|
tags.partition do |tag|
timestamp = pushed_at(tag)
timestamp && timestamp < older_than_timestamp
end
end
def order_by_date_desc
def order_by_date_desc(tags)
now = DateTime.current
@tags.sort_by! { |tag| pushed_at(tag) || now }
.reverse!
tags.sort_by! { |tag| pushed_at(tag) || now }
.reverse!
end
def delete_tags
return success(deleted: []) unless @tags.any?
def delete_tags(tags)
return success(deleted: []) unless tags.any?
service = Projects::ContainerRepository::DeleteTagsService.new(
@project,
@current_user,
tags: @tags.map(&:name),
project,
current_user,
tags: tags.map(&:name),
container_expiration_policy: container_expiration_policy
)
service.execute(@container_repository)
service.execute(container_repository)
end
def can_destroy?
return true if container_expiration_policy
can?(@current_user, :destroy_container_image, @project)
can?(current_user, :destroy_container_image, project)
end
def valid_regex?
%w[name_regex_delete name_regex name_regex_keep].each do |param_name|
regex = @params[param_name]
regex = params[param_name]
::Gitlab::UntrustedRegexp.new(regex) unless regex.blank?
end
true
rescue RegexpError => e
::Gitlab::ErrorTracking.log_exception(e, project_id: @project.id)
::Gitlab::ErrorTracking.log_exception(e, project_id: project.id)
false
end
def older_than
@params['older_than']
params['older_than']
end
def name_regex_delete
@params['name_regex_delete']
params['name_regex_delete']
end
def name_regex
@params['name_regex']
params['name_regex']
end
def name_regex_keep
@params['name_regex_keep']
params['name_regex_keep']
end
def container_expiration_policy
@params['container_expiration_policy']
params['container_expiration_policy']
end
def keep_n
@params['keep_n']
params['keep_n']
end
def project
container_repository.project
end
def keep_n_as_integer

View File

@ -3,35 +3,32 @@
module Projects
module ContainerRepository
class CleanupTagsService < CleanupTagsBaseService
def initialize(container_repository, user = nil, params = {})
@container_repository = container_repository
@current_user = user
@params = params.dup
def initialize(container_repository:, current_user: nil, params: {})
super
@project = container_repository.project
@tags = container_repository.tags
tags_size = @tags.size
@counts = {
original_size: tags_size,
cached_tags_count: 0
}
@params = params.dup
@counts = { cached_tags_count: 0 }
end
def execute
return error('access denied') unless can_destroy?
return error('invalid regex') unless valid_regex?
filter_out_latest
filter_by_name
tags = container_repository.tags
@counts[:original_size] = tags.size
truncate
populate_from_cache
filter_out_latest!(tags)
filter_by_name!(tags)
filter_keep_n
filter_by_older_than
tags = truncate(tags)
populate_from_cache(tags)
delete_tags.merge(@counts).tap do |result|
result[:before_delete_size] = @tags.size
tags = filter_keep_n(tags)
tags = filter_by_older_than(tags)
@counts[:before_delete_size] = tags.size
delete_tags(tags).merge(@counts).tap do |result|
result[:deleted_size] = result[:deleted]&.size
result[:status] = :error if @counts[:before_truncate_size] != @counts[:after_truncate_size]
@ -40,40 +37,45 @@ module Projects
private
def filter_keep_n
@tags, tags_to_keep = partition_by_keep_n
def filter_keep_n(tags)
tags, tags_to_keep = partition_by_keep_n(tags)
cache_tags(tags_to_keep)
tags
end
def filter_by_older_than
@tags, tags_to_keep = partition_by_older_than
def filter_by_older_than(tags)
tags, tags_to_keep = partition_by_older_than(tags)
cache_tags(tags_to_keep)
tags
end
def pushed_at(tag)
tag.created_at
end
def truncate
@counts[:before_truncate_size] = @tags.size
@counts[:after_truncate_size] = @tags.size
def truncate(tags)
@counts[:before_truncate_size] = tags.size
@counts[:after_truncate_size] = tags.size
return if max_list_size == 0
return tags if max_list_size == 0
# truncate the list to make sure that after the #filter_keep_n
# execution, the resulting list will be max_list_size
truncated_size = max_list_size + keep_n_as_integer
return if @tags.size <= truncated_size
return tags if tags.size <= truncated_size
@tags = @tags.sample(truncated_size)
@counts[:after_truncate_size] = @tags.size
tags = tags.sample(truncated_size)
@counts[:after_truncate_size] = tags.size
tags
end
def populate_from_cache
@counts[:cached_tags_count] = cache.populate(@tags) if caching_enabled?
def populate_from_cache(tags)
@counts[:cached_tags_count] = cache.populate(tags) if caching_enabled?
end
def cache_tags(tags)
@ -82,7 +84,7 @@ module Projects
def cache
strong_memoize(:cache) do
::Gitlab::ContainerRepository::Tags::Cache.new(@container_repository)
::Gitlab::ContainerRepository::Tags::Cache.new(container_repository)
end
end

View File

@ -8,12 +8,9 @@ module Projects
TAGS_PAGE_SIZE = 1000
def initialize(container_repository, user = nil, params = {})
@container_repository = container_repository
@current_user = user
def initialize(container_repository:, current_user: nil, params: {})
super
@params = params.dup
@project = container_repository.project
end
def execute
@ -21,7 +18,7 @@ module Projects
return error('invalid regex') unless valid_regex?
with_timeout do |start_time, result|
@container_repository.each_tags_page(page_size: TAGS_PAGE_SIZE) do |tags|
container_repository.each_tags_page(page_size: TAGS_PAGE_SIZE) do |tags|
execute_for_tags(tags, result)
raise TimeoutError if timeout?(start_time)
@ -32,19 +29,18 @@ module Projects
private
def execute_for_tags(tags, overall_result)
@tags = tags
original_size = @tags.size
original_size = tags.size
filter_out_latest
filter_by_name
filter_out_latest!(tags)
filter_by_name!(tags)
filter_by_keep_n
filter_by_older_than
tags = filter_by_keep_n(tags)
tags = filter_by_older_than(tags)
overall_result[:before_delete_size] += @tags.size
overall_result[:before_delete_size] += tags.size
overall_result[:original_size] += original_size
result = delete_tags
result = delete_tags(tags)
overall_result[:deleted_size] += result[:deleted]&.size
overall_result[:deleted] += result[:deleted]
@ -68,12 +64,12 @@ module Projects
result
end
def filter_by_keep_n
@tags, _ = partition_by_keep_n
def filter_by_keep_n(tags)
partition_by_keep_n(tags).first
end
def filter_by_older_than
@tags, _ = partition_by_older_than
def filter_by_older_than(tags)
partition_by_older_than(tags).first
end
def pushed_at(tag)

View File

@ -2,20 +2,28 @@
class ServiceResponse
def self.success(message: nil, payload: {}, http_status: :ok)
new(status: :success, message: message, payload: payload, http_status: http_status)
new(status: :success,
message: message,
payload: payload,
http_status: http_status)
end
def self.error(message:, payload: {}, http_status: nil)
new(status: :error, message: message, payload: payload, http_status: http_status)
def self.error(message:, payload: {}, http_status: nil, reason: nil)
new(status: :error,
message: message,
payload: payload,
http_status: http_status,
reason: reason)
end
attr_reader :status, :message, :http_status, :payload
attr_reader :status, :message, :http_status, :payload, :reason
def initialize(status:, message: nil, payload: {}, http_status: nil)
def initialize(status:, message: nil, payload: {}, http_status: nil, reason: nil)
self.status = status
self.message = message
self.payload = payload
self.http_status = http_status
self.reason = reason
end
def track_exception(as: StandardError, **extra_data)
@ -41,7 +49,11 @@ class ServiceResponse
end
def to_h
(payload || {}).merge(status: status, message: message, http_status: http_status)
(payload || {}).merge(
status: status,
message: message,
http_status: http_status,
reason: reason)
end
def success?
@ -60,5 +72,5 @@ class ServiceResponse
private
attr_writer :status, :message, :http_status, :payload
attr_writer :status, :message, :http_status, :payload, :reason
end

View File

@ -24,7 +24,7 @@ class CleanupContainerRepositoryWorker
return unless valid?
Projects::ContainerRepository::CleanupTagsService
.new(container_repository, current_user, params)
.new(container_repository: container_repository, current_user: current_user, params: params)
.execute
end

View File

@ -0,0 +1,40 @@
# frozen_string_literal: true
class RescheduleIssueWorkItemTypeIdBackfill < Gitlab::Database::Migration[2.0]
MIGRATION = 'BackfillWorkItemTypeIdForIssues'
BATCH_SIZE = 10_000
MAX_BATCH_SIZE = 30_000
SUB_BATCH_SIZE = 100
INTERVAL = 1.minute
restrict_gitlab_migration gitlab_schema: :gitlab_main
class MigrationWorkItemType < MigrationRecord
self.table_name = 'work_item_types'
def self.id_by_type
where(namespace_id: nil).order(:base_type).pluck(:base_type, :id).to_h
end
end
def up
# We expect no more than 5 types. Only 3 of them are expected to have associated issues at the moment
MigrationWorkItemType.id_by_type.each do |base_type, type_id|
queue_batched_background_migration(
MIGRATION,
:issues,
:id,
base_type,
type_id,
job_interval: INTERVAL,
batch_size: BATCH_SIZE,
max_batch_size: MAX_BATCH_SIZE,
sub_batch_size: SUB_BATCH_SIZE
)
end
end
def down
Gitlab::Database::BackgroundMigration::BatchedMigration.where(job_class_name: MIGRATION).delete_all
end
end

View File

@ -0,0 +1 @@
77d17e190cc1b879960763ef32458480897e3da9483503d99c18b5aacd080ce3

View File

@ -906,7 +906,7 @@ project.container_repositories.find_each do |repo|
puts repo.attributes
# Start the tag cleanup
puts Projects::ContainerRepository::CleanupTagsService.new(repo, user, policy.attributes.except("created_at", "updated_at")).execute()
puts Projects::ContainerRepository::CleanupTagsService.new(container_repository: repo, current_user: user, params: policy.attributes.except("created_at", "updated_at")).execute
end
```

View File

@ -31,7 +31,10 @@ The following API resources are available in the project context:
| [Commits](commits.md) | `/projects/:id/repository/commits`, `/projects/:id/statuses` |
| [Container Registry](container_registry.md) | `/projects/:id/registry/repositories` |
| [Custom attributes](custom_attributes.md) | `/projects/:id/custom_attributes` (also available for groups and users) |
| [Composer distributions](packages/composer.md) | `/projects/:id/packages/composer` (also available for groups) |
| [Conan distributions](packages/conan.md) | `/projects/:id/packages/conan` (also available standalone) |
| [Debian distributions](packages/debian_project_distributions.md) | `/projects/:id/debian_distributions` (also available for groups) |
| [Debian packages](packages/debian.md) | `/projects/:id/packages/debian` (also available for groups) |
| [Dependencies](dependencies.md) **(ULTIMATE)** | `/projects/:id/dependencies` |
| [Deploy keys](deploy_keys.md) | `/projects/:id/deploy_keys` (also available standalone) |
| [Deploy tokens](deploy_tokens.md) | `/projects/:id/deploy_tokens` (also available for groups and standalone) |
@ -43,6 +46,8 @@ The following API resources are available in the project context:
| [Feature Flag User Lists](feature_flag_user_lists.md) | `/projects/:id/feature_flags_user_lists` |
| [Feature Flags](feature_flags.md) | `/projects/:id/feature_flags` |
| [Freeze Periods](freeze_periods.md) | `/projects/:id/freeze_periods` |
| [Go Proxy](packages/go_proxy.md) | `/projects/:id/packages/go` |
| [Helm repository](packages/helm.md) | `/projects/:id/packages/helm_repository` |
| [Integrations](integrations.md) (Formerly "services") | `/projects/:id/integrations` |
| [Invitations](invitations.md) | `/projects/:id/invitations` (also available for groups) |
| [Issue boards](boards.md) | `/projects/:id/boards` |
@ -51,8 +56,10 @@ The following API resources are available in the project context:
| [Issues](issues.md) | `/projects/:id/issues` (also available for groups and standalone) |
| [Iterations](iterations.md) **(PREMIUM)** | `/projects/:id/iterations` (also available for groups) |
| [Jobs](jobs.md) | `/projects/:id/jobs`, `/projects/:id/pipelines/.../jobs` |
| [Jobs Artifacts](job_artifacts.md) | `/projects/:id/jobs/:job_id/artifacts` |
| [Labels](labels.md) | `/projects/:id/labels` |
| [Managed licenses](managed_licenses.md) **(ULTIMATE)** | `/projects/:id/managed_licenses` |
| [Maven repository](packages/maven.md) | `/projects/:id/packages/maven` (also available for groups and standalone) |
| [Members](members.md) | `/projects/:id/members` (also available for groups) |
| [Merge request approvals](merge_request_approvals.md) **(PREMIUM)** | `/projects/:id/approvals`, `/projects/:id/merge_requests/.../approvals` |
| [Merge requests](merge_requests.md) | `/projects/:id/merge_requests` (also available for groups and standalone) |
@ -60,6 +67,8 @@ The following API resources are available in the project context:
| [Metadata](metadata.md) | `/metadata` |
| [Notes](notes.md) (comments) | `/projects/:id/issues/.../notes`, `/projects/:id/snippets/.../notes`, `/projects/:id/merge_requests/.../notes` (also available for groups) |
| [Notification settings](notification_settings.md) | `/projects/:id/notification_settings` (also available for groups and standalone) |
| [NPM repository](packages/npm.md) | `/projects/:id/packages/npm` |
| [NuGet packages](packages/nuget.md) | `/projects/:id/packages/nuget` (also available for groups) |
| [Packages](packages.md) | `/projects/:id/packages` |
| [Pages domains](pages_domains.md) | `/projects/:id/pages` (also available standalone) |
| [Pipeline schedules](pipeline_schedules.md) | `/projects/:id/pipeline_schedules` |
@ -78,6 +87,7 @@ The following API resources are available in the project context:
| [Protected branches](protected_branches.md) | `/projects/:id/protected_branches` |
| [Protected environments](protected_environments.md) | `/projects/:id/protected_environments` |
| [Protected tags](protected_tags.md) | `/projects/:id/protected_tags` |
| [PyPI packages](packages/pypi.md) | `/projects/:id/packages/pypi` (also available for groups) |
| [Release links](releases/links.md) | `/projects/:id/releases/.../assets/links` |
| [Releases](releases/index.md) | `/projects/:id/releases` |
| [Remote mirrors](remote_mirrors.md) | `/projects/:id/remote_mirrors` |
@ -85,9 +95,11 @@ The following API resources are available in the project context:
| [Repository files](repository_files.md) | `/projects/:id/repository/files` |
| [Repository submodules](repository_submodules.md) | `/projects/:id/repository/submodules` |
| [Resource label events](resource_label_events.md) | `/projects/:id/issues/.../resource_label_events`, `/projects/:id/merge_requests/.../resource_label_events` (also available for groups) |
| [Ruby gems](packages/rubygems.md) | `/projects/:id/packages/rubygems` |
| [Runners](runners.md) | `/projects/:id/runners` (also available standalone) |
| [Search](search.md) | `/projects/:id/search` (also available for groups and standalone) |
| [Tags](tags.md) | `/projects/:id/repository/tags` |
| [Terraform modules](packages/terraform-modules.md) | `/projects/:id/packages/terraform/mdoules` (also available standalone) |
| [User-starred metrics dashboards](metrics_user_starred_dashboards.md ) | `/projects/:id/metrics/user_starred_dashboards` |
| [Visual Review discussions](visual_review_discussions.md) **(PREMIUM)** | `/projects/:id/merge_requests/:merge_request_id/visual_review_discussions` |
| [Vulnerabilities](vulnerabilities.md) **(ULTIMATE)** | `/vulnerabilities/:id` |

View File

@ -48,6 +48,7 @@ GET /projects/:id/members
| `query` | string | no | A query string to search for members |
| `user_ids` | array of integers | no | Filter the results on the given user IDs |
| `skip_users` | array of integers | no | Filter skipped users out of the results |
| `show_seat_info` | boolean | no | Show seat information for users |
```shell
curl --header "PRIVATE-TOKEN: <your_access_token>" "https://gitlab.example.com/api/v4/groups/:id/members"
@ -132,6 +133,7 @@ GET /projects/:id/members/all
| `id` | integer/string | yes | The ID or [URL-encoded path of the project or group](index.md#namespaced-path-encoding) owned by the authenticated user |
| `query` | string | no | A query string to search for members |
| `user_ids` | array of integers | no | Filter the results on the given user IDs |
| `show_seat_info` | boolean | no | Show seat information for users |
| `state` | string | no | Filter results by member state, one of `awaiting` or `active` **(PREMIUM)** |
```shell

View File

@ -239,12 +239,14 @@ GitLab administrators can add a namespace to the reduced cost factor
### Additional costs on GitLab SaaS
GitLab SaaS shared runners have different cost factors, depending on the runner type (Linux, Windows, macOS) and the virtual machine configuration.
GitLab SaaS runners have different cost factors, depending on the runner type (Linux, Windows, macOS) and the virtual machine configuration.
| GitLab SaaS runner type | Virtual machine configuration | CI/CD minutes cost factor |
| GitLab SaaS runner type | Machine Type | CI/CD minutes cost factor |
| :--------- | :------------------- | :--------- |
| Linux OS + Docker executor| 1 vCPU, 3.75 GB RAM |1|
| macOS + shell executor | 4 vCPU, 10 GB RAM| 6 |
| Linux OS + Docker executor| Small |1|
| Linux OS + Docker executor| Medium |2|
| Linux OS + Docker executor| Large |3|
| macOS + shell executor | Large| 6 |
### Monthly reset of CI/CD minutes

View File

@ -7,8 +7,8 @@ type: reference
# Runner SaaS **(FREE SAAS)**
If you use GitLab SaaS (GitLab.com), your CI jobs automatically run on runners provided by GitLab.
No configuration is required. Your jobs can run on:
If you use GitLab SaaS (GitLab.com), your [untagged](../../ci/runners/configure_runners.md#use-tags-to-control-which-jobs-a-runner-can-run) CI jobs automatically run in containers on the Linux Runners.
As long as shared runners are enabled for your project, no configuration is required. Your jobs can run on:
- [Linux runners](saas/linux_saas_runner.md).
- [Windows runners](saas/windows_saas_runner.md) ([Beta](../../policy/alpha-beta-support.md#beta-features)).

View File

@ -4,43 +4,97 @@ group: Runner
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
---
# SaaS runners on Linux **(FREE SAAS)**
# SaaS runners on Linux
SaaS runners on Linux are autoscaled ephemeral Google Cloud Platform virtual machines.
When you run jobs on SaaS runners on Linux, the runners are on auto-scaled ephemeral virtual machine (VM) instances.
Each VM uses the Google Container-Optimized OS (COS) and the latest version of Docker Engine.
The default region for the VMs is `us-east1`.
Autoscaling means reduced queue times to spin up CI/CD jobs, and isolated VMs for each job, thus maximizing security. These shared runners are available on GitLab.com.
## Machine types available for private projects (x86-64)
GitLab offers Ultimate tier capabilities and included CI/CD minutes per group per month for our [Open Source](https://about.gitlab.com/solutions/open-source/join/), [Education](https://about.gitlab.com/solutions/education/), and [Startups](https://about.gitlab.com/solutions/startups/) programs. For private projects, GitLab offers various [plans](https://about.gitlab.com/pricing/), starting with a Free tier.
For the SaaS runners on Linux, GitLab offers a range of machine types for use in private projects.
For Free, Premium, and Ultimate plan customers, jobs on these instances consume the CI/CD minutes allocated to your namespace.
All your CI/CD jobs run on [n1-standard-1 instances](https://cloud.google.com/compute/docs/machine-types) with 3.75GB of RAM, Google COS and the latest Docker Engine
installed. Instances provide 1 vCPU and 25GB of HDD disk space. The default
region of the VMs is US East1.
Each instance is used only for one job. This ensures that any sensitive data left on the system can't be accessed by other people's CI/CD jobs.
| | Small | Medium | Large |
|-------------------|---------------------------|---------------------------|--------------------------|
| Specs | 1 vCPU, 3.75GB RAM | 2 vCPUs, 8GB RAM | 4 vCPUs, 16GB RAM |
| GitLab CI/CD tags | `saas-linux-medium-amd64` | `saas-linux-medium-amd64` | `saas-linux-large-amd64` |
| Subscription | Free, Premium, Ultimate | Free, Premium, Ultimate | Premium, Ultimate |
The `small` machine type is the default. Your job runs on this machine type if you don't specify
a [tags:](../../yaml/index.md#tags) keyword in your `.gitlab-ci.yml` file.
CI/CD jobs that run on `medium` and `large` machine types **will** consume CI minutes at a different rate than CI/CD jobs on the `small` machine type.
Refer to the CI/CD minutes [cost factor](../../../ci/pipelines/cicd_minutes.md#cost-factor) for the cost factor applied to the machine type based on size.
## Example of how to tag a job
To use a machine type other than `small`, add a `tags:` keyword to your job.
For example:
```yaml
stages:
- Prebuild
- Build
- Unit Test
job_001:
stage: Prebuild
script:
- echo "this job runs on the default (small) instance"
job_002:
tags: [ saas-linux-medium-amd64 ]
stage: Build
script:
- echo "this job runs on the medium instance"
job_003:
tags: [ saas-linux-large-amd64 ]
stage: Unit Test
script:
- echo "this job runs on the large instance"
```
## SaaS runners for GitLab projects
The `gitlab-shared-runners-manager-X.gitlab.com` fleet of runners are dedicated for
GitLab projects and related community forks. These runners are backed by a Google Compute
`n1-standard-2` machine type and do not run untagged jobs. Unlike the machine types used
for private projects, each virtual machine is re-used up to 40 times.
## SaaS runners on Linux settings
Below are the settings for SaaS runners on Linux.
| Setting | GitLab.com | Default |
|-------------------------------------------------------------------------|------------------|---------|
| Executor | `docker+machine` | - |
| Default Docker image | `ruby:2.5` | - |
| `privileged` (run [Docker in Docker](https://hub.docker.com/_/docker/)) | `true` | `false` |
- **Cache**: These runners share a
[distributed cache](https://docs.gitlab.com/runner/configuration/autoscale.html#distributed-runners-caching)
that's stored in a Google Cloud Storage (GCS) bucket. Cache contents not updated within
the last 14 days are automatically removed, based on the
[object lifecycle management policy](https://cloud.google.com/storage/docs/lifecycle).
- **Timeout settings**: Jobs handled by the SaaS Runners on Linux
**time out after 3 hours**, regardless of the timeout configured in a
project. For details, see issues [#4010](https://gitlab.com/gitlab-com/gl-infra/reliability/-/issues/4010)
and [#4070](https://gitlab.com/gitlab-com/gl-infra/reliability/-/issues/4070).
NOTE:
The final disk space your jobs can use will be less than 25GB. Some disk space allocated to the instance will be occupied by the operating system, the Docker image, and a copy of your cloned repository.
The `gitlab-shared-runners-manager-X.gitlab.com` fleet of runners are dedicated for GitLab projects as well as community forks of them. They use a slightly larger machine type (n1-standard-2) and have a bigger SSD disk size. They don't run untagged jobs and unlike the general fleet of shared runners, the instances are re-used up to 40 times.
Jobs handled by shared runners on GitLab.com (`shared-runners-manager-X.gitlab.com`)
**time out after 3 hours**, regardless of the timeout configured in a
project. Check issue [#4010](https://gitlab.com/gitlab-com/gl-infra/reliability/-/issues/4010) and [#4070](https://gitlab.com/gitlab-com/gl-infra/reliability/-/issues/4070) for the reference.
Jobs handled by shared runners on Windows and macOS on GitLab.com **time out after 1 hour** while this service is in the [Beta](../../../policy/alpha-beta-support.md#beta-features) stage.
Below are the runners' settings.
| Setting | GitLab.com | Default |
| ----------- | ----------------- | ---------- |
| Executor | `docker+machine` | - |
| Default Docker image | `ruby:2.5` | - |
| `privileged` (run [Docker in Docker](https://hub.docker.com/_/docker/)) | `true` | `false` |
These runners share a [distributed cache](https://docs.gitlab.com/runner/configuration/autoscale.html#distributed-runners-caching) through use of a Google Cloud Storage (GCS) bucket. Cache contents not updated within the last 14 days are automatically removed through use of an [object lifecycle management policy](https://cloud.google.com/storage/docs/lifecycle).
The final disk space your jobs can use will be less than 25GB. Some disk space
allocated to the instance will be occupied by the operating system, the Docker image,
and a copy of your cloned repository.
## Pre-clone script
With SaaS runners on Linux, you can run commands in a CI
With SaaS runners on Linux, you can run commands in a CI/CD
job before the runner attempts to run `git init` and `git fetch` to
download a GitLab repository. The
[`pre_clone_script`](https://docs.gitlab.com/runner/configuration/advanced-configuration.html#the-runners-section)
@ -55,12 +109,13 @@ To use this feature, define a [CI/CD variable](../../../ci/variables/index.md#cu
`CI_PRE_CLONE_SCRIPT` that contains a bash script.
NOTE:
The `CI_PRE_CLONE_SCRIPT` variable does not work on GitLab SaaS Windows or macOS Runners.
The `CI_PRE_CLONE_SCRIPT` variable does not work on GitLab SaaS Windows or macOS runners.
### Pre-clone script example
This example was used in the `gitlab-org/gitlab` project until November 2021.
The project no longer uses this optimization because the [pack-objects cache](../../../administration/gitaly/configure_gitaly.md#pack-objects-cache)
The project no longer uses this optimization because the
[pack-objects cache](../../../administration/gitaly/configure_gitaly.md#pack-objects-cache)
lets Gitaly serve the full CI/CD fetch traffic. See [Git fetch caching](../../../development/pipelines.md#git-fetch-caching).
The `CI_PRE_CLONE_SCRIPT` was defined as a project CI/CD variable:

View File

@ -206,6 +206,31 @@ response = ServiceResponse.success(payload: { issue: issue })
response.payload[:issue] # => issue
```
Error responses can also specify the failure `reason` which can be used by the caller
to understand the nature of the failure.
The caller, if an HTTP endpoint, could translate the reason symbol into an HTTP status code:
```ruby
response = ServiceResponse.error(
message: 'Job is in a state that cannot be retried',
reason: :job_not_retrieable)
if response.success?
head :ok
if response.reason == :job_not_retriable
head :unprocessable_entity
else
head :bad_request
end
```
For common failures such as resource `:not_found` or operation `:forbidden`, we could
leverage the Rails [HTTP status symbols](http://www.railsstatuscodes.com/) as long as
they are sufficiently specific for the domain logic involved.
For other failures use domain-specific reasons whenever possible.
For example: `:job_not_retriable`, `:duplicate_package`, `:merge_request_not_mergeable`.
### Finders
Everything in `app/finders`, typically used for retrieving data from a database.

View File

@ -142,7 +142,7 @@ To preview the upcoming changes to the CI/CD configuration:
```yaml
include:
remote: 'https://gitlab.com/gitlab-org/gitlab/-/blob/2851f4d5/lib/gitlab/ci/templates/Jobs/SAST.latest.gitlab-ci.yml'
remote: 'https://gitlab.com/gitlab-org/gitlab/-/raw/2851f4d5/lib/gitlab/ci/templates/Jobs/SAST.latest.gitlab-ci.yml'
```
1. Verify that scanning jobs succeed in the MR. You'll notice findings from the removed analyzers in _Fixed_ and findings from Semgrep in _New_. (Some findings may show different names, descriptions, and severities, since GitLab manages and edits the Semgrep rulesets.)

View File

@ -746,6 +746,31 @@ variables:
SECURE_LOG_LEVEL: "debug"
```
### Pipeline errors related to changes in the GitLab-managed CI/CD template
The [the GitLab-managed SAST CI/CD template](#configure-sast-manually) controls which [analyzer](analyzers.md) jobs run and how they're configured. While using the template, you might experience a job failure or other pipeline error. For example, you might:
- See an error message like `'<your job>' needs 'spotbugs-sast' job, but 'spotbugs-sast' is not in any previous stage` when you view an affected pipeline.
- Experience another type of unexpected issue with your CI/CD pipeline configuration.
If you're experiencing a job failure or seeing a SAST-related `yaml invalid` pipeline status, you can temporarily revert to an older version of the template so your pipelines keep working while you investigate the issue. To use an older version of the template, change the existing `include` statement in your CI/CD YAML file to refer to a specific template version, such as `v15.3.3-ee`:
```yaml
include:
remote: 'https://gitlab.com/gitlab-org/gitlab/-/raw/v15.3.3-ee/lib/gitlab/ci/templates/Jobs/SAST.gitlab-ci.yml'
```
If your GitLab instance has limited network connectivity, you can also download the file and host it elsewhere.
We recommend that you only use this solution temporarily and that you return to [the standard template](#configure-sast-manually) as soon as possible.
### Errors in a specific analyzer job
GitLab SAST [analyzers](analyzers.md) are released as container images.
If you're seeing a new error that doesn't appear to be related to [the GitLab-managed SAST CI/CD template](#configure-sast-manually) or changes in your own project, you can try [pinning the affected analyzer to a specific older version](#pinning-to-minor-image-version).
Each [analyzer project](analyzers.md#sast-analyzers) has a `CHANGELOG.md` file listing the changes made in each available version.
### `Error response from daemon: error processing tar file: docker-tar: relocation error`
This error occurs when the Docker version that runs the SAST job is `19.03.0`.

View File

@ -0,0 +1,65 @@
# frozen_string_literal: true
module Gitlab
module BackgroundMigration
# Backfills the `issues.work_item_type_id` column, replacing any
# instances of `NULL` with the appropriate `work_item_types.id` based on `issues.issue_type`
class BackfillWorkItemTypeIdForIssues < BatchedMigrationJob
# Basic AR model for issues table
class MigrationIssue < ApplicationRecord
self.table_name = 'issues'
scope :base_query, ->(base_type) { where(work_item_type_id: nil, issue_type: base_type) }
end
MAX_UPDATE_RETRIES = 3
scope_to ->(relation) {
relation.where(issue_type: base_type)
}
job_arguments :base_type, :base_type_id
def perform
each_sub_batch(
operation_name: :update_all,
batching_scope: -> (relation) { relation.where(work_item_type_id: nil) }
) do |sub_batch|
first, last = sub_batch.pick(Arel.sql('min(id), max(id)'))
# The query need to be reconstructed because .each_batch modifies the default scope
# See: https://gitlab.com/gitlab-org/gitlab/-/issues/330510
reconstructed_sub_batch = MigrationIssue.unscoped.base_query(base_type).where(id: first..last)
update_with_retry(reconstructed_sub_batch, base_type_id)
end
end
private
# Retry mechanism required as update statements on the issues table will randomly take longer than
# expected due to gin indexes https://gitlab.com/gitlab-org/gitlab/-/merge_requests/71869#note_775796352
def update_with_retry(sub_batch, base_type_id)
update_attempt = 1
begin
update_batch(sub_batch, base_type_id)
rescue ActiveRecord::StatementTimeout, ActiveRecord::QueryCanceled => e
update_attempt += 1
if update_attempt <= MAX_UPDATE_RETRIES
# sleeping 30 seconds as it might take a long time to clean the gin index pending list
sleep(30)
retry
end
raise e
end
end
def update_batch(sub_batch, base_type_id)
sub_batch.update_all(work_item_type_id: base_type_id)
end
end
end
end

View File

@ -0,0 +1,77 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Gitlab::BackgroundMigration::BackfillWorkItemTypeIdForIssues, :migration, schema: 20220825142324 do
let(:batch_column) { 'id' }
let(:sub_batch_size) { 2 }
let(:pause_ms) { 0 }
# let_it_be can't be used in migration specs because all tables but `work_item_types` are deleted after each spec
let(:issue_type_enum) { { issue: 0, incident: 1, test_case: 2, requirement: 3, task: 4 } }
let(:namespace) { table(:namespaces).create!(name: 'namespace', path: 'namespace') }
let(:project) { table(:projects).create!(namespace_id: namespace.id, project_namespace_id: namespace.id) }
let(:issues_table) { table(:issues) }
let(:issue_type) { table(:work_item_types).find_by!(namespace_id: nil, base_type: issue_type_enum[:issue]) }
let(:issue1) { issues_table.create!(project_id: project.id, issue_type: issue_type_enum[:issue]) }
let(:issue2) { issues_table.create!(project_id: project.id, issue_type: issue_type_enum[:issue]) }
let(:issue3) { issues_table.create!(project_id: project.id, issue_type: issue_type_enum[:issue]) }
let(:incident1) { issues_table.create!(project_id: project.id, issue_type: issue_type_enum[:incident]) }
# test_case and requirement are EE only, but enum values exist on the FOSS model
let(:test_case1) { issues_table.create!(project_id: project.id, issue_type: issue_type_enum[:test_case]) }
let(:requirement1) { issues_table.create!(project_id: project.id, issue_type: issue_type_enum[:requirement]) }
let(:start_id) { issue1.id }
let(:end_id) { requirement1.id }
let(:all_issues) { [issue1, issue2, issue3, incident1, test_case1, requirement1] }
let(:migration) do
described_class.new(
start_id: start_id,
end_id: end_id,
batch_table: :issues,
batch_column: :id,
sub_batch_size: sub_batch_size,
pause_ms: pause_ms,
job_arguments: [issue_type_enum[:issue], issue_type.id],
connection: ApplicationRecord.connection
)
end
subject(:migrate) { migration.perform }
it 'sets work_item_type_id only for the given type' do
expect(all_issues).to all(have_attributes(work_item_type_id: nil))
expect { migrate }.to make_queries_matching(/UPDATE \"issues\" SET "work_item_type_id"/, 2)
all_issues.each(&:reload)
expect([issue1, issue2, issue3]).to all(have_attributes(work_item_type_id: issue_type.id))
expect(all_issues - [issue1, issue2, issue3]).to all(have_attributes(work_item_type_id: nil))
end
it 'tracks timings of queries' do
expect(migration.batch_metrics.timings).to be_empty
expect { migrate }.to change { migration.batch_metrics.timings }
end
context 'when database timeouts' do
using RSpec::Parameterized::TableSyntax
where(error_class: [ActiveRecord::StatementTimeout, ActiveRecord::QueryCanceled])
with_them do
it 'retries on timeout error' do
expect(migration).to receive(:update_batch).exactly(3).times.and_raise(error_class)
expect(migration).to receive(:sleep).with(30).twice
expect do
migrate
end.to raise_error(error_class)
end
end
end
end

View File

@ -8,7 +8,7 @@ RSpec.describe OmniAuth::Strategies::Bitbucket do
describe '#callback_url' do
let(:base_url) { 'https://example.com' }
context 'when script name is present' do
context 'when script name is not present' do
it 'has the correct default callback path' do
allow(subject).to receive(:full_host) { base_url }
allow(subject).to receive(:script_name).and_return('')
@ -17,7 +17,7 @@ RSpec.describe OmniAuth::Strategies::Bitbucket do
end
end
context 'when script name is not present' do
context 'when script name is present' do
it 'sets the callback path with script_name' do
allow(subject).to receive(:full_host) { base_url }
allow(subject).to receive(:script_name).and_return('/v1')

View File

@ -0,0 +1,54 @@
# frozen_string_literal: true
require 'spec_helper'
require_migration!
RSpec.describe RescheduleIssueWorkItemTypeIdBackfill, :migration do
let_it_be(:migration) { described_class::MIGRATION }
let_it_be(:interval) { 2.minutes }
let_it_be(:issue_type_enum) { { issue: 0, incident: 1, test_case: 2, requirement: 3, task: 4 } }
let_it_be(:base_work_item_type_ids) do
table(:work_item_types).where(namespace_id: nil).order(:base_type).each_with_object({}) do |type, hash|
hash[type.base_type] = type.id
end
end
describe '#up' do
it 'correctly schedules background migrations' do
Sidekiq::Testing.fake! do
freeze_time do
migrate!
scheduled_migrations = Gitlab::Database::BackgroundMigration::BatchedMigration.where(
job_class_name: migration
)
work_item_types = table(:work_item_types).where(namespace_id: nil)
expect(scheduled_migrations.count).to eq(work_item_types.count)
[:issue, :incident, :test_case, :requirement, :task].each do |issue_type|
expect(migration).to have_scheduled_batched_migration(
table_name: :issues,
column_name: :id,
job_arguments: [issue_type_enum[issue_type], base_work_item_type_ids[issue_type_enum[issue_type]]],
interval: interval,
batch_size: described_class::BATCH_SIZE,
max_batch_size: described_class::MAX_BATCH_SIZE,
sub_batch_size: described_class::SUB_BATCH_SIZE,
batch_class_name: described_class::BATCH_CLASS_NAME
)
end
end
end
end
end
describe '#down' do
it 'deletes all batched migration records' do
migrate!
schema_migrate_down!
expect(migration).not_to have_scheduled_batched_migration
end
end
end

View File

@ -24,7 +24,7 @@ RSpec.describe ContainerExpirationPolicies::CleanupService do
it 'completely clean up the repository' do
expect(Projects::ContainerRepository::CleanupTagsService)
.to receive(:new).with(repository, nil, cleanup_tags_service_params).and_return(cleanup_tags_service)
.to receive(:new).with(container_repository: repository, params: cleanup_tags_service_params).and_return(cleanup_tags_service)
expect(cleanup_tags_service).to receive(:execute).and_return(status: :success, deleted_size: 1)
response = subject

View File

@ -11,7 +11,7 @@ RSpec.describe Projects::ContainerRepository::CleanupTagsService, :clean_gitlab_
let_it_be(:project, reload: true) { create(:project, :private) }
let(:repository) { create(:container_repository, :root, project: project) }
let(:service) { described_class.new(repository, user, params) }
let(:service) { described_class.new(container_repository: repository, current_user: user, params: params) }
let(:tags) { %w[latest A Ba Bb C D E] }
before do

View File

@ -12,7 +12,7 @@ RSpec.describe Projects::ContainerRepository::Gitlab::CleanupTagsService do
let_it_be(:project, reload: true) { create(:project, :private) }
let(:repository) { create(:container_repository, :root, :import_done, project: project) }
let(:service) { described_class.new(repository, user, params) }
let(:service) { described_class.new(container_repository: repository, current_user: user, params: params) }
let(:tags) { %w[latest A Ba Bb C D E] }
before do

View File

@ -43,14 +43,14 @@ RSpec.describe ServiceResponse do
end
describe '.error' do
it 'creates a failed response without HTTP status' do
it 'creates an error response without HTTP status' do
response = described_class.error(message: 'Bad apple')
expect(response).to be_error
expect(response.message).to eq('Bad apple')
end
it 'creates a failed response with HTTP status' do
it 'creates an error response with HTTP status' do
response = described_class.error(message: 'Bad apple', http_status: 400)
expect(response).to be_error
@ -58,7 +58,7 @@ RSpec.describe ServiceResponse do
expect(response.http_status).to eq(400)
end
it 'creates a failed response with payload' do
it 'creates an error response with payload' do
response = described_class.error(message: 'Bad apple',
payload: { bad: 'apple' })
@ -66,6 +66,15 @@ RSpec.describe ServiceResponse do
expect(response.message).to eq('Bad apple')
expect(response.payload).to eq(bad: 'apple')
end
it 'creates an error response with a reason' do
response = described_class.error(message: 'Bad apple',
reason: :permission_denied)
expect(response).to be_error
expect(response.message).to eq('Bad apple')
expect(response.reason).to eq(:permission_denied)
end
end
describe '#success?' do

View File

@ -17,7 +17,7 @@ RSpec.describe CleanupContainerRepositoryWorker, :clean_gitlab_redis_shared_stat
it 'executes the destroy service' do
expect(Projects::ContainerRepository::CleanupTagsService).to receive(:new)
.with(repository, user, params)
.with(container_repository: repository, current_user: user, params: params)
.and_return(service)
expect(service).to receive(:execute)