Add latest changes from gitlab-org/gitlab@master
This commit is contained in:
parent
52eb17ad85
commit
29516285eb
|
@ -67,9 +67,11 @@ export default {
|
|||
v-for="packageName in feature.packages"
|
||||
:key="packageName"
|
||||
size="md"
|
||||
class="whats-new-item-badge gl-mr-2"
|
||||
variant="tier"
|
||||
icon="license"
|
||||
class="gl-mr-2"
|
||||
>
|
||||
<gl-icon name="license" />{{ packageName }}
|
||||
{{ packageName }}
|
||||
</gl-badge>
|
||||
</div>
|
||||
<div
|
||||
|
|
|
@ -47,12 +47,6 @@
|
|||
margin-top: calc(#{$performance-bar-height} + #{$system-header-height} + #{$header-height});
|
||||
}
|
||||
|
||||
.gl-badge.whats-new-item-badge {
|
||||
background-color: $purple-light;
|
||||
color: $purple;
|
||||
@include gl-font-weight-bold;
|
||||
}
|
||||
|
||||
.whats-new-item-title-link {
|
||||
&:hover,
|
||||
&:focus,
|
||||
|
|
|
@ -4,6 +4,7 @@ class Admin::RunnerProjectsController < Admin::ApplicationController
|
|||
before_action :project, only: [:create]
|
||||
|
||||
feature_category :runner
|
||||
urgency :low
|
||||
|
||||
def create
|
||||
@runner = Ci::Runner.find(params[:runner_project][:runner_id])
|
||||
|
|
|
@ -9,6 +9,7 @@ class Admin::RunnersController < Admin::ApplicationController
|
|||
end
|
||||
|
||||
feature_category :runner
|
||||
urgency :low
|
||||
|
||||
def index
|
||||
end
|
||||
|
|
|
@ -6,6 +6,7 @@ class Groups::RunnersController < Groups::ApplicationController
|
|||
before_action :runner, only: [:edit, :update, :destroy, :pause, :resume, :show]
|
||||
|
||||
feature_category :runner
|
||||
urgency :low
|
||||
|
||||
def index
|
||||
finder = Ci::RunnersFinder.new(current_user: current_user, params: { group: @group })
|
||||
|
|
|
@ -38,6 +38,23 @@ class Projects::PipelinesController < Projects::ApplicationController
|
|||
|
||||
POLLING_INTERVAL = 10_000
|
||||
|
||||
content_security_policy do |policy|
|
||||
next if policy.directives.blank?
|
||||
|
||||
default_script_src = policy.directives['script-src'] || policy.directives['default-src']
|
||||
script_src_values = Array.wrap(default_script_src) | ["'self'", "'unsafe-eval'", 'https://*.zuora.com']
|
||||
|
||||
default_frame_src = policy.directives['frame-src'] || policy.directives['default-src']
|
||||
frame_src_values = Array.wrap(default_frame_src) | ["'self'", 'https://*.zuora.com']
|
||||
|
||||
default_child_src = policy.directives['child-src'] || policy.directives['default-src']
|
||||
child_src_values = Array.wrap(default_child_src) | ["'self'", 'https://*.zuora.com']
|
||||
|
||||
policy.script_src(*script_src_values)
|
||||
policy.frame_src(*frame_src_values)
|
||||
policy.child_src(*child_src_values)
|
||||
end
|
||||
|
||||
feature_category :continuous_integration, [
|
||||
:charts, :show, :config_variables, :stage, :cancel, :retry,
|
||||
:builds, :dag, :failures, :status,
|
||||
|
|
|
@ -6,6 +6,7 @@ class Projects::RunnerProjectsController < Projects::ApplicationController
|
|||
layout 'project_settings'
|
||||
|
||||
feature_category :runner
|
||||
urgency :low
|
||||
|
||||
def create
|
||||
@runner = Ci::Runner.find(params[:runner_project][:runner_id])
|
||||
|
|
|
@ -5,6 +5,7 @@ class Projects::RunnersController < Projects::ApplicationController
|
|||
before_action :runner, only: [:edit, :update, :destroy, :pause, :resume, :show]
|
||||
|
||||
feature_category :runner
|
||||
urgency :low
|
||||
|
||||
def index
|
||||
redirect_to project_settings_ci_cd_path(@project, anchor: 'js-runners-settings')
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
|
||||
class RunnerSetupController < ApplicationController
|
||||
feature_category :runner
|
||||
urgency :low
|
||||
|
||||
def platforms
|
||||
render json: Gitlab::Ci::RunnerInstructions::OS.merge(Gitlab::Ci::RunnerInstructions::OTHER_ENVIRONMENTS)
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
# sort: string
|
||||
# my_reaction_emoji: string
|
||||
# public_only: boolean
|
||||
# include_hidden: boolean
|
||||
# due_date: date or '0', '', 'overdue', 'week', or 'month'
|
||||
# created_after: datetime
|
||||
# created_before: datetime
|
||||
|
@ -48,6 +47,8 @@ class IssuesFinder < IssuableFinder
|
|||
|
||||
# rubocop: disable CodeReuse/ActiveRecord
|
||||
def with_confidentiality_access_check
|
||||
return Issue.all if params.user_can_see_all_issues?
|
||||
|
||||
# Only admins can see hidden issues, so for non-admins, we filter out any hidden issues
|
||||
issues = Issue.without_hidden
|
||||
|
||||
|
@ -75,9 +76,7 @@ class IssuesFinder < IssuableFinder
|
|||
private
|
||||
|
||||
def init_collection
|
||||
if params.include_hidden?
|
||||
Issue.all
|
||||
elsif params.public_only?
|
||||
if params.public_only?
|
||||
Issue.public_only
|
||||
else
|
||||
with_confidentiality_access_check
|
||||
|
|
|
@ -6,10 +6,6 @@ class IssuesFinder
|
|||
params.fetch(:public_only, false)
|
||||
end
|
||||
|
||||
def include_hidden?
|
||||
user_can_see_all_issues?
|
||||
end
|
||||
|
||||
def filter_by_any_due_date?
|
||||
due_date? && params[:due_date] == Issue::AnyDueDate.name
|
||||
end
|
||||
|
|
|
@ -212,7 +212,7 @@ module ProjectsHelper
|
|||
end
|
||||
|
||||
def no_password_message
|
||||
push_pull_link_start = '<a href="%{url}" target="_blank" rel="noopener noreferrer">'.html_safe % { url: help_page_path('gitlab-basics/start-using-git', anchor: 'pull-and-push') }
|
||||
push_pull_link_start = '<a href="%{url}" target="_blank" rel="noopener noreferrer">'.html_safe % { url: help_page_path('topics/git/terminology', anchor: 'pull-and-push') }
|
||||
clone_with_https_link_start = '<a href="%{url}" target="_blank" rel="noopener noreferrer">'.html_safe % { url: help_page_path('gitlab-basics/start-using-git', anchor: 'clone-with-https') }
|
||||
set_password_link_start = '<a href="%{url}">'.html_safe % { url: edit_profile_password_path }
|
||||
set_up_pat_link_start = '<a href="%{url}">'.html_safe % { url: profile_personal_access_tokens_path }
|
||||
|
|
|
@ -142,9 +142,7 @@ class Issue < ApplicationRecord
|
|||
scope :with_issue_type, ->(types) { where(issue_type: types) }
|
||||
scope :without_issue_type, ->(types) { where.not(issue_type: types) }
|
||||
|
||||
scope :public_only, -> {
|
||||
without_hidden.where(confidential: false)
|
||||
}
|
||||
scope :public_only, -> { where(confidential: false) }
|
||||
|
||||
scope :confidential_only, -> { where(confidential: true) }
|
||||
|
||||
|
|
|
@ -70,7 +70,7 @@ module Clusters
|
|||
{
|
||||
'clusters-path': clusterable.index_path,
|
||||
'dashboard-endpoint': clusterable.metrics_dashboard_path(cluster),
|
||||
'documentation-path': help_page_path('user/project/clusters/index', anchor: 'monitoring-your-kubernetes-cluster'),
|
||||
'documentation-path': help_page_path('user/infrastructure/clusters/manage/clusters_health'),
|
||||
'add-dashboard-documentation-path': help_page_path('operations/metrics/dashboards/index.md', anchor: 'add-a-new-dashboard-to-your-project'),
|
||||
'empty-getting-started-svg-path': image_path('illustrations/monitoring/getting_started.svg'),
|
||||
'empty-loading-svg-path': image_path('illustrations/monitoring/loading.svg'),
|
||||
|
|
|
@ -3,15 +3,11 @@
|
|||
module Groups
|
||||
# Service class for counting and caching the number of open issues of a group.
|
||||
class OpenIssuesCountService < Groups::CountService
|
||||
# TOTAL_COUNT_KEY includes confidential and hidden issues (admin)
|
||||
# TOTAL_COUNT_WITHOUT_HIDDEN_KEY includes confidential issues but not hidden issues (reporter and above)
|
||||
# PUBLIC_COUNT_WITHOUT_HIDDEN_KEY does not include confidential or hidden issues (guest)
|
||||
TOTAL_COUNT_KEY = 'group_open_issues_including_hidden_count'
|
||||
TOTAL_COUNT_WITHOUT_HIDDEN_KEY = 'group_open_issues_without_hidden_count'
|
||||
PUBLIC_COUNT_WITHOUT_HIDDEN_KEY = 'group_open_public_issues_without_hidden_count'
|
||||
PUBLIC_COUNT_KEY = 'group_public_open_issues_count'
|
||||
TOTAL_COUNT_KEY = 'group_total_open_issues_count'
|
||||
|
||||
def clear_all_cache_keys
|
||||
[cache_key(TOTAL_COUNT_KEY), cache_key(TOTAL_COUNT_WITHOUT_HIDDEN_KEY), cache_key(PUBLIC_COUNT_WITHOUT_HIDDEN_KEY)].each do |key|
|
||||
[cache_key(PUBLIC_COUNT_KEY), cache_key(TOTAL_COUNT_KEY)].each do |key|
|
||||
Rails.cache.delete(key)
|
||||
end
|
||||
end
|
||||
|
@ -19,19 +15,7 @@ module Groups
|
|||
private
|
||||
|
||||
def cache_key_name
|
||||
if include_hidden?
|
||||
TOTAL_COUNT_KEY
|
||||
elsif public_only?
|
||||
PUBLIC_COUNT_WITHOUT_HIDDEN_KEY
|
||||
else
|
||||
TOTAL_COUNT_WITHOUT_HIDDEN_KEY
|
||||
end
|
||||
end
|
||||
|
||||
def include_hidden?
|
||||
strong_memoize(:user_is_admin) do
|
||||
user&.can_admin_all_resources?
|
||||
end
|
||||
public_only? ? PUBLIC_COUNT_KEY : TOTAL_COUNT_KEY
|
||||
end
|
||||
|
||||
def public_only?
|
||||
|
@ -51,8 +35,7 @@ module Groups
|
|||
state: 'opened',
|
||||
non_archived: true,
|
||||
include_subgroups: true,
|
||||
public_only: public_only?,
|
||||
include_hidden: include_hidden?
|
||||
public_only: public_only?
|
||||
).execute
|
||||
end
|
||||
|
||||
|
|
|
@ -7,12 +7,8 @@ module Projects
|
|||
include Gitlab::Utils::StrongMemoize
|
||||
|
||||
# Cache keys used to store issues count
|
||||
# TOTAL_COUNT_KEY includes confidential and hidden issues (admin)
|
||||
# TOTAL_COUNT_WITHOUT_HIDDEN_KEY includes confidential issues but not hidden issues (reporter and above)
|
||||
# PUBLIC_COUNT_WITHOUT_HIDDEN_KEY does not include confidential or hidden issues (guest)
|
||||
TOTAL_COUNT_KEY = 'project_open_issues_including_hidden_count'
|
||||
TOTAL_COUNT_WITHOUT_HIDDEN_KEY = 'project_open_issues_without_hidden_count'
|
||||
PUBLIC_COUNT_WITHOUT_HIDDEN_KEY = 'project_open_public_issues_without_hidden_count'
|
||||
PUBLIC_COUNT_KEY = 'public_open_issues_count'
|
||||
TOTAL_COUNT_KEY = 'total_open_issues_count'
|
||||
|
||||
def initialize(project, user = nil)
|
||||
@user = user
|
||||
|
@ -20,98 +16,59 @@ module Projects
|
|||
super(project)
|
||||
end
|
||||
|
||||
# rubocop: disable CodeReuse/ActiveRecord
|
||||
def refresh_cache(&block)
|
||||
if block_given?
|
||||
super(&block)
|
||||
else
|
||||
update_cache_for_key(total_count_cache_key) do
|
||||
issues_with_hidden
|
||||
end
|
||||
|
||||
update_cache_for_key(public_count_without_hidden_cache_key) do
|
||||
issues_without_hidden_without_confidential
|
||||
end
|
||||
|
||||
update_cache_for_key(total_count_without_hidden_cache_key) do
|
||||
issues_without_hidden_with_confidential
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def relation_for_count
|
||||
self.class.query(@project, public_only: public_only?, include_hidden: include_hidden?)
|
||||
end
|
||||
|
||||
def cache_key_name
|
||||
if include_hidden?
|
||||
TOTAL_COUNT_KEY
|
||||
elsif public_only?
|
||||
PUBLIC_COUNT_WITHOUT_HIDDEN_KEY
|
||||
else
|
||||
TOTAL_COUNT_WITHOUT_HIDDEN_KEY
|
||||
end
|
||||
end
|
||||
|
||||
def include_hidden?
|
||||
user_is_admin?
|
||||
public_only? ? PUBLIC_COUNT_KEY : TOTAL_COUNT_KEY
|
||||
end
|
||||
|
||||
def public_only?
|
||||
!user_is_at_least_reporter?
|
||||
end
|
||||
|
||||
def user_is_admin?
|
||||
strong_memoize(:user_is_admin) do
|
||||
@user&.can_admin_all_resources?
|
||||
end
|
||||
end
|
||||
|
||||
def user_is_at_least_reporter?
|
||||
strong_memoize(:user_is_at_least_reporter) do
|
||||
@user && @project.team.member?(@user, Gitlab::Access::REPORTER)
|
||||
end
|
||||
end
|
||||
|
||||
def total_count_without_hidden_cache_key
|
||||
cache_key(TOTAL_COUNT_WITHOUT_HIDDEN_KEY)
|
||||
def relation_for_count
|
||||
self.class.query(@project, public_only: public_only?)
|
||||
end
|
||||
|
||||
def public_count_without_hidden_cache_key
|
||||
cache_key(PUBLIC_COUNT_WITHOUT_HIDDEN_KEY)
|
||||
def public_count_cache_key
|
||||
cache_key(PUBLIC_COUNT_KEY)
|
||||
end
|
||||
|
||||
def total_count_cache_key
|
||||
cache_key(TOTAL_COUNT_KEY)
|
||||
end
|
||||
|
||||
def issues_with_hidden
|
||||
self.class.query(@project, public_only: false, include_hidden: true).count
|
||||
# rubocop: disable CodeReuse/ActiveRecord
|
||||
def refresh_cache(&block)
|
||||
count_grouped_by_confidential = self.class.query(@project, public_only: false).group(:confidential).count
|
||||
public_count = count_grouped_by_confidential[false] || 0
|
||||
total_count = public_count + (count_grouped_by_confidential[true] || 0)
|
||||
|
||||
update_cache_for_key(public_count_cache_key) do
|
||||
public_count
|
||||
end
|
||||
|
||||
update_cache_for_key(total_count_cache_key) do
|
||||
total_count
|
||||
end
|
||||
end
|
||||
|
||||
def issues_without_hidden_without_confidential
|
||||
self.class.query(@project, public_only: true, include_hidden: false).count
|
||||
end
|
||||
|
||||
def issues_without_hidden_with_confidential
|
||||
self.class.query(@project, public_only: false, include_hidden: false).count
|
||||
end
|
||||
|
||||
# We only show total issues count for admins, who are allowed to view hidden issues.
|
||||
# We also only show issues count including confidential for reporters, who are allowed to view confidential issues.
|
||||
# We only show issues count including confidential for reporters, who are allowed to view confidential issues.
|
||||
# This will still show a discrepancy on issues number but should be less than before.
|
||||
# Check https://gitlab.com/gitlab-org/gitlab-foss/issues/38418 description.
|
||||
# rubocop: disable CodeReuse/ActiveRecord
|
||||
|
||||
def self.query(projects, public_only: true, include_hidden: false)
|
||||
if include_hidden
|
||||
Issue.opened.with_issue_type(Issue::TYPES_FOR_LIST).where(project: projects)
|
||||
elsif public_only
|
||||
Issue.public_only.opened.with_issue_type(Issue::TYPES_FOR_LIST).where(project: projects)
|
||||
# rubocop: disable CodeReuse/ActiveRecord
|
||||
def self.query(projects, public_only: true)
|
||||
issues_filtered_by_type = Issue.opened.with_issue_type(Issue::TYPES_FOR_LIST)
|
||||
|
||||
if public_only
|
||||
issues_filtered_by_type.public_only.where(project: projects)
|
||||
else
|
||||
Issue.without_hidden.opened.with_issue_type(Issue::TYPES_FOR_LIST).where(project: projects)
|
||||
issues_filtered_by_type.where(project: projects)
|
||||
end
|
||||
end
|
||||
# rubocop: enable CodeReuse/ActiveRecord
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
= external_link(domain.url, domain.url)
|
||||
- unless @project.public_pages?
|
||||
.card-footer.gl-alert-warning
|
||||
- help_page = help_page_path('/user/project/pages/pages_access_control')
|
||||
- help_page = help_page_path('user/project/pages/pages_access_control')
|
||||
- link_start = '<a href="%{url}" target="_blank" class="gl-alert-link" rel="noopener noreferrer">'.html_safe % { url: help_page }
|
||||
- link_end = '</a>'.html_safe
|
||||
= html_escape_once(s_('GitLabPages|Access Control is enabled for this Pages website; only authorized users will be able to access it. To make your website publicly available, navigate to your project\'s %{strong_start}Settings > General > Visibility%{strong_end} and select %{strong_start}Everyone%{strong_end} in pages section. Read the %{link_start}documentation%{link_end} for more information.')).html_safe % { link_start: link_start, link_end: link_end, strong_start: '<strong>'.html_safe, strong_end: '</strong>'.html_safe }
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
- name: "Container Network and Host Security"
|
||||
announcement_milestone: "14.8"
|
||||
announcement_date: "2022-02-22"
|
||||
removal_milestone: "15.0"
|
||||
removal_date: "2022-05-22"
|
||||
breaking_change: true
|
||||
reporter: sam.white
|
||||
body: | # Do not modify this line, instead modify the lines below.
|
||||
All functionality related to the Container Network Security and Container Host Security categories was deprecated in GitLab 14.8 and is scheduled for removal in GitLab 15.0. Users who need a replacement for this functionality are encouraged to evaluate the following open source projects as potential solutions that can be installed and managed outside of GitLab: [AppArmor](https://gitlab.com/apparmor/apparmor), [Cilium](https://github.com/cilium/cilium), [Falco](https://github.com/falcosecurity/falco), [FluentD](https://github.com/fluent/fluentd), [Pod Security Admission](https://kubernetes.io/docs/concepts/security/pod-security-admission/). To integrate these technologies with GitLab, add the desired Helm charts in your copy of the [Cluster Management Project Template](https://docs.gitlab.com/ee/user/clusters/management_project_template.html). Deploy these Helm charts in production by calling commands through the GitLab [Secure CI/CD Tunnel](https://docs.gitlab.com/ee/user/clusters/agent/repository.html#run-kubectl-commands-using-the-cicd-tunnel).
|
||||
|
||||
As part of this change, the following capabilities within GitLab are scheduled for removal in GitLab 15.0:
|
||||
|
||||
- The **Security & Compliance > Threat Monitoring** page.
|
||||
- The Network Policy security policy type, as found on the **Security & Compliance > Policies** page.
|
||||
- The ability to manage integrations with the following technologies through GitLab: AppArmor, Cilium, Falco, FluentD, and Pod Security Policies.
|
||||
- All APIs related to the above functionality.
|
||||
|
||||
For additional context, or to provide feedback regarding this change, please reference our [deprecation issue](https://gitlab.com/groups/gitlab-org/-/epics/7476).
|
||||
# The following items are not published on the docs page, but may be used in the future.
|
||||
stage: "Protect"
|
||||
issue_url: https://gitlab.com/groups/gitlab-org/-/epics/7477
|
|
@ -0,0 +1,18 @@
|
|||
- name: "Vulnerability Check"
|
||||
announcement_milestone: "14.8"
|
||||
announcement_date: "2022-02-22"
|
||||
removal_milestone: "15.0"
|
||||
removal_date: "2022-05-22"
|
||||
breaking_change: true
|
||||
reporter: sam.white
|
||||
body: | # Do not modify this line, instead modify the lines below.
|
||||
The vulnerability check feature was deprecated in GitLab 14.8 and is scheduled for removal in GitLab 15.0. We encourage you to migrate to the new security approvals feature instead. You can do so by navigating to **Security & Compliance > Policies** and creating a new Scan Result Policy.
|
||||
|
||||
The new security approvals feature is similar to vulnerability check. For example, both can require approvals for MRs that contain security vulnerabilities. However, security approvals improve the previous experience in several ways:
|
||||
|
||||
- Users can choose who is allowed to edit security approval rules. An independent security or compliance team can therefore manage rules in a way that prevents development project maintainers from modifying the rules.
|
||||
- Multiple rules can be created and chained together to allow for filtering on different severity thresholds for each scanner type.
|
||||
- A two-step approval process can be enforced for any desired changes to security approval rules.
|
||||
- A single set of security policies can be applied to multiple development projects to allow for ease in maintaining a single, centralized ruleset.
|
||||
stage: "Protect"
|
||||
issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/357300
|
|
@ -533,7 +533,7 @@ To delete the account immediately, you can manually
|
|||
|
||||
## Update user email addresses
|
||||
|
||||
Email addresses on the LDAP server are considered the source of truth for users when LDAP is used to sign in.
|
||||
Email addresses on the LDAP server are considered the source of truth for users when LDAP is used to sign in.
|
||||
|
||||
Updating user email addresses must be done on the LDAP server that manages the user. The email address for GitLab is updated either:
|
||||
|
||||
|
|
|
@ -800,7 +800,7 @@ gitaly['concurrency'] = [
|
|||
|
||||
- `rpc` is the name of the RPC to set a concurrency limit for per repository.
|
||||
- `max_per_repo` is the maximum number of in-flight RPC calls for the given RPC per repository.
|
||||
- `max_queue_time` is the maximum amount of time a request can wait in the concurrency queue to
|
||||
- `max_queue_time` is the maximum amount of time a request can wait in the concurrency queue to
|
||||
be picked up by Gitaly.
|
||||
- `max_queue_size` is the maximum size the concurrency queue can grow to before requests are rejected by
|
||||
Gitaly.
|
||||
|
@ -808,7 +808,7 @@ gitaly['concurrency'] = [
|
|||
This limits the number of in-flight RPC calls for the given RPCs. The limit is applied per
|
||||
repository. In the example above:
|
||||
|
||||
- Each repository served by the Gitaly server can have at most 20 simultaneous `PostUploadPackWithSidechannel` and
|
||||
- Each repository served by the Gitaly server can have at most 20 simultaneous `PostUploadPackWithSidechannel` and
|
||||
`SSHUploadPackWithSidechannel` RPC calls in flight.
|
||||
- If another request comes in for a repository that has used up its 20 slots, that request gets
|
||||
queued.
|
||||
|
|
|
@ -413,8 +413,8 @@ Gitaly can be configured to limit requests based on:
|
|||
Monitor Gitaly request limiting with the `gitaly_requests_dropped_total` Prometheus metric. This metric provides a total count
|
||||
of requests dropped due to request limiting. The `reason` label indicates why a request was dropped:
|
||||
|
||||
- `rate`, due to rate limiting.
|
||||
- `max_size`, because the concurrency queue size was reached.
|
||||
- `rate`, due to rate limiting.
|
||||
- `max_size`, because the concurrency queue size was reached.
|
||||
- `max_time`, because the request exceeded the maximum queue wait time as configured in Gitaly.
|
||||
|
||||
### Monitor Gitaly concurrency limiting
|
||||
|
@ -427,7 +427,7 @@ the Gitaly logs and Prometheus:
|
|||
- In Prometheus, look for the following metrics:
|
||||
- `gitaly_concurrency_limiting_in_progress` indicates how many concurrent requests are
|
||||
being processed.
|
||||
- `gitaly_concurrency_limiting_queued` indicates how many requests for an RPC for a given
|
||||
- `gitaly_concurrency_limiting_queued` indicates how many requests for an RPC for a given
|
||||
repository are waiting due to the concurrency limit being reached.
|
||||
- `gitaly_concurrency_limiting_acquiring_seconds` indiciates how long a request has to
|
||||
wait due to concurrency limits before being processed.
|
||||
|
|
|
@ -62,7 +62,7 @@ Using the consolidated object storage configuration has a number of advantages:
|
|||
- It enables the use of [encrypted S3 buckets](#encrypted-s3-buckets).
|
||||
- It [uploads files to S3 with proper `Content-MD5` headers](https://gitlab.com/gitlab-org/gitlab-workhorse/-/issues/222).
|
||||
|
||||
Because [direct upload mode](../development/uploads/implementation.md#direct-upload)
|
||||
Because [direct upload mode](../development/uploads/index.md#direct-upload)
|
||||
must be enabled, only the following providers can be used:
|
||||
|
||||
- [Amazon S3-compatible providers](#s3-compatible-connection-settings)
|
||||
|
|
|
@ -280,7 +280,7 @@ ApplicationSetting.update(container_registry_token_expire_delay: 30)
|
|||
The default expiration and the expiration on GitLab.com is 15 minutes.
|
||||
|
||||
## Using the dependency proxy behind a proxy
|
||||
|
||||
|
||||
1. Edit `/etc/gitlab/gitlab.rb` and add the following lines:
|
||||
|
||||
```ruby
|
||||
|
|
|
@ -86,7 +86,7 @@ card "Database" as database {
|
|||
card "redis" as redis {
|
||||
collections "**Redis Persistent** x3" as redis_persistent #FF6347
|
||||
collections "**Redis Cache** x3" as redis_cache #FF6347
|
||||
|
||||
|
||||
redis_cache -[hidden]-> redis_persistent
|
||||
}
|
||||
|
||||
|
@ -2304,7 +2304,7 @@ future with further specific cloud provider details.
|
|||
| Webservice | 16 | 32 vCPU, 28.8 GB memory | `n1-highcpu-32` | `m5.8xlarge` | 510 vCPU, 472 GB memory |
|
||||
| Sidekiq | 4 | 4 vCPU, 15 GB memory | `n1-standard-4` | `m5.xlarge` | 15.5 vCPU, 50 GB memory |
|
||||
| Supporting services such as NGINX, Prometheus | 2 | 4 vCPU, 15 GB memory | `n1-standard-4` | `m5.xlarge` | 7.75 vCPU, 25 GB memory |
|
||||
|
||||
|
||||
- For this setup, we **recommend** and regularly [test](index.md#validation-and-test-results)
|
||||
[Google Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine) and [Amazon Elastic Kubernetes Service (EKS)](https://aws.amazon.com/eks/). Other Kubernetes services may also work, but your mileage may vary.
|
||||
- Nodes configuration is shown as it is forced to ensure pod vcpu / memory ratios and avoid scaling during **performance testing**.
|
||||
|
@ -2377,7 +2377,7 @@ card "Database" as database {
|
|||
card "redis" as redis {
|
||||
collections "**Redis Persistent** x3" as redis_persistent #FF6347
|
||||
collections "**Redis Cache** x3" as redis_cache #FF6347
|
||||
|
||||
|
||||
redis_cache -[hidden]-> redis_persistent
|
||||
}
|
||||
|
||||
|
|
|
@ -83,7 +83,7 @@ Notify.test_email(u.email, "Test email for #{u.name}", 'Test email').deliver_now
|
|||
Adding a semicolon(`;`) and a follow-up statement at the end of a statement prevents the default implicit return output. This is useful if you are already explicitly printing details and potentially have a lot of return output:
|
||||
|
||||
```ruby
|
||||
puts ActiveRecord::Base.descendants; :ok
|
||||
puts ActiveRecord::Base.descendants; :ok
|
||||
Project.select(&:pages_deployed?).each {|p| puts p.pages_url }; true
|
||||
```
|
||||
|
||||
|
@ -800,9 +800,9 @@ Available permission levels are listed in
|
|||
|
||||
### Get all error messages associated with groups, subgroups, members, and requesters
|
||||
|
||||
Collect error messages associated with groups, subgroups, members, and requesters. This
|
||||
captures error messages that may not appear in the Web interface. This can be especially helpful
|
||||
for troubleshooting issues with [LDAP group sync](../auth/ldap/ldap_synchronization.md#group-sync)
|
||||
Collect error messages associated with groups, subgroups, members, and requesters. This
|
||||
captures error messages that may not appear in the Web interface. This can be especially helpful
|
||||
for troubleshooting issues with [LDAP group sync](../auth/ldap/ldap_synchronization.md#group-sync)
|
||||
and unexpected behavior with users and their membership in groups and subgroups.
|
||||
|
||||
```ruby
|
||||
|
|
|
@ -67,7 +67,7 @@ For source installations the following settings are nested under `uploads:` and
|
|||
|---------|-------------|---------|
|
||||
| `enabled` | Enable/disable object storage | `false` |
|
||||
| `remote_directory` | The bucket name where Uploads will be stored| |
|
||||
| `direct_upload` | Set to `true` to remove Puma from the Upload path. Workhorse handles the actual Artifact Upload to Object Storage while Puma does minimal processing to keep track of the upload. There is no need for local shared storage. The option may be removed if support for a single storage type for all files is introduced. Read more on [direct upload](../development/uploads/implementation.md#direct-upload). | `false` |
|
||||
| `direct_upload` | Set to `true` to remove Puma from the Upload path. Workhorse handles the actual Artifact Upload to Object Storage while Puma does minimal processing to keep track of the upload. There is no need for local shared storage. The option may be removed if support for a single storage type for all files is introduced. Read more on [direct upload](../development/uploads/index.md#direct-upload). | `false` |
|
||||
| `background_upload` | Set to `false` to disable automatic upload. Option may be removed once upload is direct to S3 (if `direct_upload` is set to `true` it will override `background_upload`) | `true` |
|
||||
| `proxy_download` | Set to `true` to enable proxying all files served. Option allows to reduce egress traffic as this allows clients to download directly from remote storage instead of proxying all data | `false` |
|
||||
| `connection` | Various connection options described below | |
|
||||
|
|
|
@ -14,6 +14,8 @@ This is the API documentation of [GitLab Packages](../administration/packages/in
|
|||
|
||||
Get a list of project packages. All package types are included in results. When
|
||||
accessed without authentication, only packages of public projects are returned.
|
||||
By default, packages with `default` and `error` status are returned. Use the `status` parameter to view other
|
||||
packages.
|
||||
|
||||
```plaintext
|
||||
GET /projects/:id/packages
|
||||
|
@ -27,7 +29,7 @@ GET /projects/:id/packages
|
|||
| `package_type` | string | no | Filter the returned packages by type. One of `conan`, `maven`, `npm`, `pypi`, `composer`, `nuget`, `helm`, `terraform_module`, or `golang`. (_Introduced in GitLab 12.9_)
|
||||
| `package_name` | string | no | Filter the project packages with a fuzzy search by name. (_Introduced in GitLab 12.9_)
|
||||
| `include_versionless` | boolean | no | When set to true, versionless packages are included in the response. (_Introduced in GitLab 13.8_)
|
||||
| `status` | string | no | Filter the returned packages by status. One of `default` (default), `hidden`, or `processing`. (_Introduced in GitLab 13.9_)
|
||||
| `status` | string | no | Filter the returned packages by status. One of `default` (default), `hidden`, `processing`, `error`, or `pending_destruction`. (_Introduced in GitLab 13.9_)
|
||||
|
||||
```shell
|
||||
curl --header "PRIVATE-TOKEN: <your_access_token>" "https://gitlab.example.com/api/v4/projects/:id/packages"
|
||||
|
@ -78,6 +80,8 @@ can result in malformed data or broken packages.
|
|||
|
||||
Get a list of project packages at the group level.
|
||||
When accessed without authentication, only packages of public projects are returned.
|
||||
By default, packages with `default` and `error` status are returned. Use the `status` parameter to view other
|
||||
packages.
|
||||
|
||||
```plaintext
|
||||
GET /groups/:id/packages
|
||||
|
@ -92,7 +96,7 @@ GET /groups/:id/packages
|
|||
| `package_type` | string | no | Filter the returned packages by type. One of `conan`, `maven`, `npm`, `pypi`, `composer`, `nuget`, `helm`, or `golang`. (_Introduced in GitLab 12.9_) |
|
||||
| `package_name` | string | no | Filter the project packages with a fuzzy search by name. (_[Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/30980) in GitLab 13.0_)
|
||||
| `include_versionless` | boolean | no | When set to true, versionless packages are included in the response. (_Introduced in GitLab 13.8_)
|
||||
| `status` | string | no | Filter the returned packages by status. One of `default` (default), `hidden`, or `processing`. (_Introduced in GitLab 13.9_)
|
||||
| `status` | string | no | Filter the returned packages by status. One of `default` (default), `hidden`, `processing`, `error`, or `pending_destruction`. (_Introduced in GitLab 13.9_)
|
||||
|
||||
```shell
|
||||
curl --header "PRIVATE-TOKEN: <your_access_token>" "https://gitlab.example.com/api/v4/groups/:id/packages?exclude_subgroups=false"
|
||||
|
|
|
@ -11,7 +11,7 @@ type: reference, api
|
|||
|
||||
FLAG:
|
||||
On self-managed GitLab, by default this feature is not available. To make it available,
|
||||
ask an administrator to [enable the feature flag](../administration/feature_flags.md) named `ci_secure_files`. Limited to 100 secure files per project. Files must be smaller than 5 MB. The feature is not ready for production use.
|
||||
ask an administrator to [enable the feature flag](../administration/feature_flags.md) named `ci_secure_files`. Limited to 100 secure files per project. Files must be smaller than 5 MB. The feature is not ready for production use.
|
||||
|
||||
## List project secure files
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ Each image is running a specific version of macOS and Xcode.
|
|||
| macos-10.14-xcode-10 | <https://gitlab.com/gitlab-org/ci-cd/shared-runners/images/macstadium/orka/-/blob/main/toolchain/mojave.yml> |
|
||||
| macos-10.15-xcode-11 | <https://gitlab.com/gitlab-org/ci-cd/shared-runners/images/macstadium/orka/-/blob/main/toolchain/catalina.yml> |
|
||||
| macos-11-xcode-12 | <https://gitlab.com/gitlab-org/ci-cd/shared-runners/images/macstadium/orka/-/blob/main/toolchain/big-sur.yml> |
|
||||
| macos-11-xcode-13 | <https://gitlab.com/gitlab-org/ci-cd/shared-runners/images/macstadium/orka/-/blob/main/toolchain/monterey.yml>
|
||||
| macos-11-xcode-13 | <https://gitlab.com/gitlab-org/ci-cd/shared-runners/images/macstadium/orka/-/blob/main/toolchain/monterey.yml>
|
||||
|
||||
### Image update policy
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ Depending on the used constructs, we can classify migrations to be either:
|
|||
Migrations cannot mix **DDL** and **DML** changes as the application requires the structure
|
||||
(as described by `db/structure.sql`) to be exactly the same across all decomposed databases.
|
||||
|
||||
### Data Definition Language (DDL)
|
||||
### Data Definition Language (DDL)
|
||||
|
||||
The DDL migrations are all migrations that:
|
||||
|
||||
|
|
|
@ -47,7 +47,7 @@ The Content Editor requires two properties:
|
|||
|
||||
- `renderMarkdown` is an asynchronous function that returns the response (String) of invoking the
|
||||
[Markdown API](../../api/markdown.md).
|
||||
- `uploadsPath` is a URL that points to a [GitLab upload service](../uploads/implementation.md#upload-encodings)
|
||||
- `uploadsPath` is a URL that points to a [GitLab upload service](../uploads/index.md)
|
||||
with `multipart/form-data` support.
|
||||
|
||||
See the [`WikiForm.vue`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/app/assets/javascripts/pages/shared/wikis/components/wiki_form.vue#L207)
|
||||
|
|
|
@ -400,7 +400,7 @@ We are still learning the best practices for both **type policies** and **reacti
|
|||
Take a moment to improve this guide or [leave a comment](https://gitlab.com/gitlab-org/frontend/rfcs/-/issues/100)
|
||||
if you use it!
|
||||
|
||||
In the example below we define a `@client` query and its `typedefs`:
|
||||
In the example below we define a `@client` query and its `typedefs`:
|
||||
|
||||
```javascript
|
||||
// ./graphql/typedefs.graphql
|
||||
|
|
|
@ -144,7 +144,7 @@ In these cases, use the following workflow:
|
|||
If the page is not assigned to a specific group, follow the
|
||||
[Technical Writing review process for development guidelines](https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments-to-development-guidelines).
|
||||
The Technical Writer may ask for additional approvals as previously suggested before merging the MR.
|
||||
|
||||
|
||||
### Reviewer values
|
||||
|
||||
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/57293) in GitLab 14.1.
|
||||
|
|
|
@ -526,7 +526,7 @@ end
|
|||
|
||||
The usage of shared temporary storage is required if your intent
|
||||
is to persistent file for a disk-based storage, and not Object Storage.
|
||||
[Workhorse direct_upload](uploads/implementation.md#direct-upload) when accepting file
|
||||
[Workhorse direct_upload](uploads/index.md#direct-upload) when accepting file
|
||||
can write it to shared storage, and later GitLab Rails can perform a move operation.
|
||||
The move operation on the same destination is instantaneous.
|
||||
The system instead of performing `copy` operation just re-attaches file into a new place.
|
||||
|
@ -550,7 +550,7 @@ that implements a seamless support for Shared and Object Storage-based persisten
|
|||
#### Data access
|
||||
|
||||
Each feature that accepts data uploads or allows to download them needs to use
|
||||
[Workhorse direct_upload](uploads/implementation.md#direct-upload). It means that uploads needs to be
|
||||
[Workhorse direct_upload](uploads/index.md#direct-upload). It means that uploads needs to be
|
||||
saved directly to Object Storage by Workhorse, and all downloads needs to be served
|
||||
by Workhorse.
|
||||
|
||||
|
@ -562,5 +562,5 @@ can time out, which is especially problematic for slow clients. If clients take
|
|||
to upload/download the processing slot might be killed due to request processing
|
||||
timeout (usually between 30s-60s).
|
||||
|
||||
For the above reasons it is required that [Workhorse direct_upload](uploads/implementation.md#direct-upload) is implemented
|
||||
For the above reasons it is required that [Workhorse direct_upload](uploads/index.md#direct-upload) is implemented
|
||||
for all file uploads and downloads.
|
||||
|
|
|
@ -151,7 +151,7 @@ During this phase, the idea is to collect as much information as possible about
|
|||
1. Empty file structure (API file, base service for this package)
|
||||
1. Authentication system for "logging in" to the package manager
|
||||
1. Identify metadata and create applicable tables
|
||||
1. Workhorse route for [object storage direct upload](uploads/implementation.md#direct-upload)
|
||||
1. Workhorse route for [object storage direct upload](uploads/index.md#direct-upload)
|
||||
1. Endpoints required for upload/publish
|
||||
1. Endpoints required for install/download
|
||||
1. Endpoints required for required actions
|
||||
|
@ -210,7 +210,7 @@ File uploads should be handled by GitLab Workhorse using object accelerated uplo
|
|||
the workhorse proxy that checks all incoming requests to GitLab intercept the upload request,
|
||||
upload the file, and forward a request to the main GitLab codebase only containing the metadata
|
||||
and file location rather than the file itself. An overview of this process can be found in the
|
||||
[development documentation](uploads/implementation.md#direct-upload).
|
||||
[development documentation](uploads/index.md#direct-upload).
|
||||
|
||||
In terms of code, this means a route must be added to the
|
||||
[GitLab Workhorse project](https://gitlab.com/gitlab-org/gitlab-workhorse) for each upload endpoint being added
|
||||
|
|
|
@ -26,7 +26,7 @@ A metric definition has the [`instrumentation_class`](metrics_dictionary.md) fie
|
|||
|
||||
The defined instrumentation class should inherit one of the existing metric classes: `DatabaseMetric`, `RedisMetric`, `RedisHLLMetric`, or `GenericMetric`.
|
||||
|
||||
The current convention is that a single instrumentation class corresponds to a single metric. On a rare occasions, there are exceptions to that convention like [Redis metrics](#redis-metrics). To use a single instrumentation class for more than one metric, please reach out to one of the `@gitlab-org/growth/product-intelligence/engineers` members to consult about your case.
|
||||
The current convention is that a single instrumentation class corresponds to a single metric. On a rare occasions, there are exceptions to that convention like [Redis metrics](#redis-metrics). To use a single instrumentation class for more than one metric, please reach out to one of the `@gitlab-org/growth/product-intelligence/engineers` members to consult about your case.
|
||||
|
||||
Using the instrumentation classes ensures that metrics can fail safe individually, without breaking the entire
|
||||
process of Service Ping generation.
|
||||
|
|
|
@ -23,7 +23,7 @@ Please be sure to include the `feature_flag` tag so that the test can be skipped
|
|||
`name`
|
||||
|
||||
- Format: `feature_flag: { name: 'feature_flag_name' }`
|
||||
- Used only for informational purposes at this time. It should be included to help quickly determine what
|
||||
- Used only for informational purposes at this time. It should be included to help quickly determine what
|
||||
feature flag is under test.
|
||||
|
||||
`scope`
|
||||
|
@ -36,23 +36,23 @@ This is due to the fact that admin access is not available there.
|
|||
**WARNING:** You are strongly advised to first try and [enable feature flags only for a group, project, user](../../feature_flags/index.md#feature-actors),
|
||||
or [feature group](../../feature_flags/index.md#feature-groups).
|
||||
|
||||
- If a global feature flag must be used, it is strongly recommended to apply `scope: :global` to the `feature_flag` metadata. This is, however, left up to the SET's discretion to determine the level of risk.
|
||||
- For example, a test uses a global feature flag that only affects a small area of the application and is also needed to check for critical issues on live environments.
|
||||
In such a scenario, it would be riskier to skip running the test. For cases like this, `scope` can be left out of the metadata so that it can still run in live environments
|
||||
- If a global feature flag must be used, it is strongly recommended to apply `scope: :global` to the `feature_flag` metadata. This is, however, left up to the SET's discretion to determine the level of risk.
|
||||
- For example, a test uses a global feature flag that only affects a small area of the application and is also needed to check for critical issues on live environments.
|
||||
In such a scenario, it would be riskier to skip running the test. For cases like this, `scope` can be left out of the metadata so that it can still run in live environments
|
||||
with admin access, such as staging.
|
||||
|
||||
**Note on `requires_admin`:** This tag should still be applied if there are other actions within the test that require admin access that are unrelated to updating a
|
||||
**Note on `requires_admin`:** This tag should still be applied if there are other actions within the test that require admin access that are unrelated to updating a
|
||||
feature flag (ex: creating a user via the API).
|
||||
|
||||
The code below would enable a feature flag named `:feature_flag_name` for the project
|
||||
created by the test:
|
||||
|
||||
```ruby
|
||||
RSpec.describe "with feature flag enabled", feature_flag: {
|
||||
name: 'feature_flag_name',
|
||||
scope: :project
|
||||
RSpec.describe "with feature flag enabled", feature_flag: {
|
||||
name: 'feature_flag_name',
|
||||
scope: :project
|
||||
} do
|
||||
|
||||
|
||||
let(:project) { Resource::Project.fabricate_via_api! }
|
||||
|
||||
before do
|
||||
|
|
|
@ -1,154 +1,11 @@
|
|||
---
|
||||
stage: none
|
||||
group: unassigned
|
||||
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
|
||||
redirect_to: 'index.md'
|
||||
remove_date: '2022-07-25'
|
||||
---
|
||||
|
||||
# Uploads guide: Why GitLab uses custom upload logic
|
||||
This document was moved to [another location](index.md).
|
||||
|
||||
This page is for developers trying to better understand the history behind GitLab uploads and the
|
||||
technical challenges associated with uploads.
|
||||
|
||||
## Problem description
|
||||
|
||||
GitLab and [GitLab Workhorse](https://gitlab.com/gitlab-org/gitlab-workhorse) use special rules for handling file uploads,
|
||||
because in an ordinary Rails application file uploads can become expensive as files grow in size.
|
||||
Rails often sacrifices performance to provide a better developer experience, including how it handles
|
||||
`multipart/form-post` uploads. In any Rack server, Rails applications included, when such a request arrives at the application server,
|
||||
several things happen:
|
||||
|
||||
1. A [Rack middleware](https://github.com/rack/rack/blob/main/lib/rack/multipart.rb) intercepts the request and parses the request body.
|
||||
1. The middleware writes each file in the multipart request to a temporary directory on disk.
|
||||
1. A `params` hash is constructed with entries pointing to the respective files on disk.
|
||||
1. A Rails controller acts on the file contents.
|
||||
|
||||
While this is convenient for developers, it is costly for the Ruby server process to buffer large files on disk.
|
||||
Because of Ruby's [global interpreter lock](https://en.wikipedia.org/wiki/Global_interpreter_lock),
|
||||
only a single thread of execution of a given Ruby process can be on CPU. This means the amount of CPU
|
||||
time spent doing this is not available to other worker threads serving user requests.
|
||||
Buffering files to disk also means spending more time in I/O routines and mode switches, which are expensive operations.
|
||||
|
||||
The following diagram shows how GitLab handled such a request prior to putting optimizations in place.
|
||||
|
||||
```mermaid
|
||||
graph TB
|
||||
subgraph "load balancers"
|
||||
LB(Proxy)
|
||||
end
|
||||
|
||||
subgraph "Shared storage"
|
||||
nfs(NFS)
|
||||
end
|
||||
|
||||
subgraph "redis cluster"
|
||||
r(persisted redis)
|
||||
end
|
||||
LB-- 1 -->Workhorse
|
||||
|
||||
subgraph "web or API fleet"
|
||||
Workhorse-- 2 -->rails
|
||||
end
|
||||
rails-- "3 (write files)" -->nfs
|
||||
rails-- "4 (schedule a job)" -->r
|
||||
|
||||
subgraph sidekiq
|
||||
s(sidekiq)
|
||||
end
|
||||
s-- "5 (fetch a job)" -->r
|
||||
s-- "6 (read files)" -->nfs
|
||||
```
|
||||
|
||||
We went through two major iterations of our uploads architecture to improve on these problems:
|
||||
|
||||
1. [Moving disk buffering to Workhorse.](#moving-disk-buffering-to-workhorse)
|
||||
1. [Uploading to Object Storage from Workhorse.](#moving-to-object-storage-and-direct-uploads)
|
||||
|
||||
### Moving disk buffering to Workhorse
|
||||
|
||||
To address the performance issues resulting from buffering files in Ruby, we moved this logic to Workhorse instead,
|
||||
our reverse proxy fronting the GitLab Rails application.
|
||||
Workhorse is written in Go, and is much better at dealing with stream processing and I/O than Rails.
|
||||
|
||||
There are two parts to this implementation:
|
||||
|
||||
1. In Workhorse, a request handler detects `multipart/form-data` content in an incoming user request.
|
||||
If such a request is detected, Workhorse hijacks the request body before forwarding it to Rails.
|
||||
Workhorse writes all files to disk, rewrites the multipart form fields to point to the new locations, signs the
|
||||
request, then forwards it to Rails.
|
||||
1. In Rails, a [custom multipart Rack middleware](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/middleware/multipart.rb)
|
||||
identifies any signed multipart requests coming from Workhorse and prepares the `params` hash Rails
|
||||
would expect, now pointing to the files cached by Workhorse. This makes it a drop-in replacement for `Rack::Multipart`.
|
||||
|
||||
The diagram below shows how GitLab handles such a request today:
|
||||
|
||||
```mermaid
|
||||
graph TB
|
||||
subgraph "load balancers"
|
||||
LB(HA Proxy)
|
||||
end
|
||||
|
||||
subgraph "Shared storage"
|
||||
nfs(NFS)
|
||||
end
|
||||
|
||||
subgraph "redis cluster"
|
||||
r(persisted redis)
|
||||
end
|
||||
LB-- 1 -->Workhorse
|
||||
|
||||
subgraph "web or API fleet"
|
||||
Workhorse-- "3 (without files)" -->rails
|
||||
end
|
||||
Workhorse -- "2 (write files)" -->nfs
|
||||
rails-- "4 (schedule a job)" -->r
|
||||
|
||||
subgraph sidekiq
|
||||
s(sidekiq)
|
||||
end
|
||||
s-- "5 (fetch a job)" -->r
|
||||
s-- "6 (read files)" -->nfs
|
||||
```
|
||||
|
||||
While this "one-size-fits-all" solution greatly improves performance for multipart uploads without compromising
|
||||
developer ergonomics, it severely limits GitLab [availability](#availability-challenges)
|
||||
and [scalability](#scalability-challenges).
|
||||
|
||||
#### Availability challenges
|
||||
|
||||
Moving file buffering to Workhorse addresses the immediate performance problems stemming from Ruby not being good at
|
||||
handling large file uploads. However, a remaining issue of this solution is its reliance on attached storage,
|
||||
whether via ordinary hard drives or network attached storage like NFS.
|
||||
NFS is a [single point of failure](https://en.wikipedia.org/wiki/Single_point_of_failure), and is unsuitable for
|
||||
deploying GitLab in highly available, cloud native environments.
|
||||
|
||||
#### Scalability challenges
|
||||
|
||||
NFS is not a part of cloud native installations, such as those running in Kubernetes.
|
||||
In Kubernetes, machine boundaries translate to pods, and without network-attached storage, disk-buffered uploads
|
||||
must be written directly to the pod's file system.
|
||||
|
||||
Using disk buffering presents us with a scalability challenge here. If Workhorse can only
|
||||
write files to a pod's private file system, then these files are inaccessible outside of this particular pod.
|
||||
With disk buffering, a Rails controller will accept a file upload and enqueue it for upload in a Sidekiq
|
||||
background job. Therefore, Sidekiq requires access to these files.
|
||||
However, in a cloud native environment all Sidekiq instances run on separate pods, so they are
|
||||
not able to access files buffered to disk on a web server pod.
|
||||
|
||||
Therefore, all features that involve Sidekiq uploading disk-buffered files severely limit the scalability of GitLab.
|
||||
|
||||
## Moving to object storage and direct uploads
|
||||
|
||||
To address these availability and scalability problems,
|
||||
instead of buffering files to disk, we have added support for uploading files directly
|
||||
from Workhorse to a given destination. While it remains possible to upload to local or network-attached storage
|
||||
this way, you should use a highly available
|
||||
[object store](https://en.wikipedia.org/wiki/Object_storage),
|
||||
such as AWS S3, Google GCS, or Azure, for scalability reasons.
|
||||
|
||||
With direct uploads, Workhorse does not buffer files to disk. Instead, it first authorizes the request with
|
||||
the Rails application to find out where to upload it, then streams the file directly to its ultimate destination.
|
||||
|
||||
To learn more about how disk buffering and direct uploads are implemented, see:
|
||||
|
||||
- [How uploads work technically](implementation.md)
|
||||
- [Adding new uploads](working_with_uploads.md)
|
||||
<!-- This redirect file can be deleted after <2022-07-25>. -->
|
||||
<!-- Redirects that point to other docs in the same project expire in three months. -->
|
||||
<!-- Redirects that point to docs in a different project or site (for example, link is not relative and starts with `https:`) expire in one year. -->
|
||||
<!-- Before deletion, see: https://docs.gitlab.com/ee/development/documentation/redirects.html -->
|
||||
|
|
|
@ -1,190 +1,11 @@
|
|||
---
|
||||
stage: none
|
||||
group: unassigned
|
||||
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
|
||||
redirect_to: 'index.md'
|
||||
remove_date: '2022-07-25'
|
||||
---
|
||||
|
||||
# Uploads guide: How uploads work technically
|
||||
This document was moved to [another location](index.md).
|
||||
|
||||
This page is for developers trying to better understand what kinds of uploads exist in GitLab and how they are implemented.
|
||||
|
||||
## Kinds of uploads and how to choose between them
|
||||
|
||||
We can identify three major use-cases for an upload:
|
||||
|
||||
1. **storage:** if we are uploading for storing a file (like artifacts, packages, or discussion attachments). In this case [direct upload](#direct-upload) is the proper level as it's the less resource-intensive operation. Additional information can be found on [File Storage in GitLab](../file_storage.md).
|
||||
1. **in-controller/synchronous processing:** if we allow processing **small files** synchronously, using [disk buffered upload](#disk-buffered-upload) may speed up development.
|
||||
1. **Sidekiq/asynchronous processing:** Asynchronous processing must implement [direct upload](#direct-upload), the reason being that it's the only way to support Cloud Native deployments without a shared NFS.
|
||||
|
||||
Selecting the proper acceleration is a tradeoff between speed of development and operational costs.
|
||||
|
||||
For more details about currently broken feature see [epic &1802](https://gitlab.com/groups/gitlab-org/-/epics/1802).
|
||||
|
||||
### Handling repository uploads
|
||||
|
||||
Some features involves Git repository uploads without using a regular Git client.
|
||||
Some examples are uploading a repository file from the web interface and [design management](../../user/project/issues/design_management.md).
|
||||
|
||||
Those uploads requires the rails controller to act as a Git client in lieu of the user.
|
||||
Those operation falls into _in-controller/synchronous processing_ category, but we have no warranties on the file size.
|
||||
|
||||
In case of a LFS upload, the file pointer is committed synchronously, but file upload to object storage is performed asynchronously with Sidekiq.
|
||||
|
||||
## Upload encodings
|
||||
|
||||
By upload encoding we mean how the file is included within the incoming request.
|
||||
|
||||
We have three kinds of file encoding in our uploads:
|
||||
|
||||
1. <i class="fa fa-check-circle"></i> **multipart**: `multipart/form-data` is the most common, a file is encoded as a part of a multipart encoded request.
|
||||
1. <i class="fa fa-check-circle"></i> **body**: some APIs uploads files as the whole request body.
|
||||
1. <i class="fa fa-times-circle"></i> **JSON**: some JSON APIs upload files as base64-encoded strings. This requires a change to GitLab Workhorse,
|
||||
which is tracked [in this issue](https://gitlab.com/gitlab-org/gitlab/-/issues/325068).
|
||||
|
||||
## Uploading technologies
|
||||
|
||||
By uploading technologies we mean how all the involved services interact with each other.
|
||||
|
||||
GitLab supports 3 kinds of uploading technologies, here follows a brief description with a sequence diagram for each one. Diagrams are not meant to be exhaustive.
|
||||
|
||||
### Rack Multipart upload
|
||||
|
||||
This is the default kind of upload, and it's the most expensive in terms of resources.
|
||||
|
||||
In this case, Workhorse is unaware of files being uploaded and acts as a regular proxy.
|
||||
|
||||
When a multipart request reaches the rails application, `Rack::Multipart` leaves behind temporary files in `/tmp` and uses valuable Ruby process time to copy files around.
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant c as Client
|
||||
participant w as Workhorse
|
||||
participant r as Rails
|
||||
|
||||
activate c
|
||||
c ->>+w: POST /some/url/upload
|
||||
w->>+r: POST /some/url/upload
|
||||
|
||||
r->>r: save the incoming file on /tmp
|
||||
r->>r: read the file for processing
|
||||
|
||||
r-->>-c: request result
|
||||
deactivate c
|
||||
deactivate w
|
||||
```
|
||||
|
||||
### Disk buffered upload
|
||||
|
||||
This kind of upload avoids wasting resources caused by handling upload writes to `/tmp` in rails.
|
||||
|
||||
This optimization is not active by default on REST API requests.
|
||||
|
||||
When enabled, Workhorse looks for files in multipart MIME requests, uploading
|
||||
any it finds to a temporary file on shared storage. The MIME data in the request
|
||||
is replaced with the path to the corresponding file before it is forwarded to
|
||||
Rails.
|
||||
|
||||
To prevent abuse of this feature, Workhorse signs the modified request with a
|
||||
special header, stating which entries it modified. Rails ignores any
|
||||
unsigned path entries.
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant c as Client
|
||||
participant w as Workhorse
|
||||
participant r as Rails
|
||||
participant s as NFS
|
||||
|
||||
activate c
|
||||
c ->>+w: POST /some/url/upload
|
||||
|
||||
w->>+s: save the incoming file on a temporary location
|
||||
s-->>-w: request result
|
||||
|
||||
w->>+r: POST /some/url/upload
|
||||
Note over w,r: file was replaced with its location<br>and other metadata
|
||||
|
||||
opt requires async processing
|
||||
r->>+redis: schedule a job
|
||||
redis-->>-r: job is scheduled
|
||||
end
|
||||
|
||||
r-->>-c: request result
|
||||
deactivate c
|
||||
w->>-w: cleanup
|
||||
|
||||
opt requires async processing
|
||||
activate sidekiq
|
||||
sidekiq->>+redis: fetch a job
|
||||
redis-->>-sidekiq: job
|
||||
|
||||
sidekiq->>+s: read file
|
||||
s-->>-sidekiq: file
|
||||
|
||||
sidekiq->>sidekiq: process file
|
||||
|
||||
deactivate sidekiq
|
||||
end
|
||||
```
|
||||
|
||||
### Direct upload
|
||||
|
||||
This is the more advanced acceleration technique we have in place.
|
||||
|
||||
Workhorse asks Rails for temporary pre-signed object storage URLs and directly uploads to object storage.
|
||||
|
||||
In this setup, an extra Rails route must be implemented in order to handle authorization. Examples of this can be found in:
|
||||
|
||||
- [`Projects::LfsStorageController`](https://gitlab.com/gitlab-org/gitlab/-/blob/cc723071ad337573e0360a879cbf99bc4fb7adb9/app/controllers/projects/lfs_storage_controller.rb)
|
||||
and [its routes](https://gitlab.com/gitlab-org/gitlab/-/blob/cc723071ad337573e0360a879cbf99bc4fb7adb9/config/routes/git_http.rb#L31-32).
|
||||
- [API endpoints for uploading packages](../packages.md#file-uploads).
|
||||
|
||||
Direct upload falls back to _disk buffered upload_ when `direct_upload` is disabled inside the [object storage setting](../../administration/uploads.md#object-storage-settings).
|
||||
The answer to the `/authorize` call contains only a file system path.
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant c as Client
|
||||
participant w as Workhorse
|
||||
participant r as Rails
|
||||
participant os as Object Storage
|
||||
|
||||
activate c
|
||||
c ->>+w: POST /some/url/upload
|
||||
|
||||
w ->>+r: POST /some/url/upload/authorize
|
||||
Note over w,r: this request has an empty body
|
||||
r-->>-w: presigned OS URL
|
||||
|
||||
w->>+os: PUT file
|
||||
Note over w,os: file is stored on a temporary location. Rails select the destination
|
||||
os-->>-w: request result
|
||||
|
||||
w->>+r: POST /some/url/upload
|
||||
Note over w,r: file was replaced with its location<br>and other metadata
|
||||
|
||||
r->>+os: move object to final destination
|
||||
os-->>-r: request result
|
||||
|
||||
opt requires async processing
|
||||
r->>+redis: schedule a job
|
||||
redis-->>-r: job is scheduled
|
||||
end
|
||||
|
||||
r-->>-c: request result
|
||||
deactivate c
|
||||
w->>-w: cleanup
|
||||
|
||||
opt requires async processing
|
||||
activate sidekiq
|
||||
sidekiq->>+redis: fetch a job
|
||||
redis-->>-sidekiq: job
|
||||
|
||||
sidekiq->>+os: get object
|
||||
os-->>-sidekiq: file
|
||||
|
||||
sidekiq->>sidekiq: process file
|
||||
|
||||
deactivate sidekiq
|
||||
end
|
||||
```
|
||||
<!-- This redirect file can be deleted after <2022-07-25>. -->
|
||||
<!-- Redirects that point to other docs in the same project expire in three months. -->
|
||||
<!-- Redirects that point to docs in a different project or site (for example, link is not relative and starts with `https:`) expire in one year. -->
|
||||
<!-- Before deletion, see: https://docs.gitlab.com/ee/development/documentation/redirects.html -->
|
||||
|
|
|
@ -6,9 +6,159 @@ info: To determine the technical writer assigned to the Stage/Group associated w
|
|||
|
||||
# Uploads development guide
|
||||
|
||||
Uploads are an integral part of many GitLab features. To understand how GitLab handles uploads, refer to
|
||||
the following pages:
|
||||
Uploads are an integral part of many GitLab features. To understand how GitLab handles uploads, this page
|
||||
provides an overview of the key mechanisms for transferring files to a storage destination.
|
||||
|
||||
- [Why GitLab uses custom upload logic.](background.md)
|
||||
- [How uploads work technically.](implementation.md)
|
||||
- [How to add new uploads.](working_with_uploads.md)
|
||||
GitLab uploads are configured by feature. All features that involve uploads provide the same configuration options,
|
||||
but they can be configured independently of one another. For example, Git LFS uploads can be configured
|
||||
independently of CI/CD build artifact uploads, but they both offer the same set of settings keys. These settings
|
||||
govern how an upload is processed, which can have a dramatic impact on performance and scalability.
|
||||
|
||||
This page summarizes the upload settings that are important in deciding how such files are handled. The sections
|
||||
that follow then describe each of these mechanisms in more detail.
|
||||
|
||||
## How upload settings drive upload flow
|
||||
|
||||
Before we examine individual upload strategies in more detail, let's examine a high-level
|
||||
breakdown of which upload settings map to each of these strategies.
|
||||
|
||||
Upload settings themselves are documented in [Uploads administration](../../administration/uploads.md).
|
||||
Here, we focus on how these settings drive the internals of GitLab upload logic.
|
||||
At the top level, we distinguish between two **destinations** for uploaded files:
|
||||
|
||||
- [**Local storage**](#local-storage) - Files are stored on a volume attached to the web server node.
|
||||
- [**Object storage**](#object-storage) - Files are stored in a remote object store bucket.
|
||||
|
||||
In this table, `x.y.z` specifies the path taken through `gitlab.yml`:
|
||||
|
||||
| Setting | Value | Behavior |
|
||||
| -------------------------------------- | ------- | ------------------------------- |
|
||||
| `<feature>.object_store.enabled` | `false` | Files are stored locally in `<feature>.storage_path` |
|
||||
| `<feature>.object_store.enabled` | `true` | Files are stored remotely in `<feature>.object_store.remote_directory` |
|
||||
|
||||
When using object storage, administrators can control how those files are moved into the respective bucket.
|
||||
This move can happen in one of these ways:
|
||||
|
||||
- [Rails controller upload](#rails-controller-upload).
|
||||
- [Background upload](#background-upload).
|
||||
- [Direct upload](#direct-upload).
|
||||
|
||||
These strategies activate as per the following `<feature>.object_store.*` settings:
|
||||
|
||||
| | `background_upload` = `false` | `background_upload` = `true` |
|
||||
| ------------------------- | ----------------------------- | ------------------------------- |
|
||||
| `direct_upload` = `false` | Controller upload | Background upload |
|
||||
| `direct_upload` = `true` | Direct upload | Direct upload (takes precedence)|
|
||||
|
||||
Individual Sidekiq workers might also store files in object storage, which is not something we cover here.
|
||||
More importantly, `background_upload` does not imply _all files are uploaded by Sidekiq._
|
||||
Sidekiq workers that store files in object storage could still exist when this setting is `false`.
|
||||
Those cases are never user-initiated uploads, but they might occur in response to another user-initiated
|
||||
action, such as exporting a GitLab repository.
|
||||
|
||||
Finally, Workhorse assists most user-initiated uploads using an upload buffering mechanism to keep slow work out of Rails controllers.
|
||||
This mechanism is explained in [Workhorse assisted uploads](#workhorse-assisted-uploads),
|
||||
as it runs orthogonal to much of what we discuss beforehand.
|
||||
|
||||
We now look at each case in more detail.
|
||||
|
||||
## Local storage
|
||||
|
||||
Local storage is the simplest path an upload can take. It was how GitLab treated uploads in its early days.
|
||||
It assumes a storage volume (like a disk or network attached storage) is accessible
|
||||
to the Rails application at `storage_path`. This file path is relative to the Rails root directory and,
|
||||
like any upload setting, configurable per feature.
|
||||
|
||||
When a client sends a file upload, Workhorse first buffers the file to disk, a mechanism explained in more
|
||||
detail in [Workhorse assisted uploads](#workhorse-assisted-uploads). When the request reaches the Rails
|
||||
application, the file already exists on local storage, so Rails merely has to move it to the specified
|
||||
directory to finalize the transaction.
|
||||
|
||||
Local storage cannot be used with cloud-native GitLab (CNG) installations. It is therefore not used for
|
||||
GitLab SaaS either.
|
||||
|
||||
## Object storage
|
||||
|
||||
To provide horizontally scalable storage, you must use an object store provider such as:
|
||||
|
||||
- Amazon AWS.
|
||||
- Google Cloud Storage (GCS).
|
||||
- Azure Cloud Storage.
|
||||
|
||||
Using object storage provides two main benefits:
|
||||
|
||||
- Ease of adding more storage capacity: cloud providers do this for you automatically.
|
||||
- Enabling horizontal scaling of your GitLab installation: multiple GitLab application servers can access the same data
|
||||
when it is stored in object storage.
|
||||
|
||||
CNG installations including GitLab SaaS always use object storage (GCS in the case of GitLab SaaS.)
|
||||
|
||||
A challenge with uploading to a remote object store is that it includes an outgoing HTTP request from
|
||||
GitLab to the object store provider. As mentioned above, there are three different strategies available for how
|
||||
this HTTP request is sent.
|
||||
|
||||
- [Rails controller upload](#rails-controller-upload).
|
||||
- [Background upload](#background-upload).
|
||||
- [Direct upload](#direct-upload).
|
||||
|
||||
### Rails controller upload
|
||||
|
||||
When neither background upload nor direct upload are available, Rails uploads the file to object storage
|
||||
as part of the controller `create` action. Which controller is responsible depends on the kind of file uploaded.
|
||||
|
||||
A Rails controller upload is very similar to uploading to local storage. The main difference: Rails must
|
||||
send an HTTP request to the object store. This happens via the [CarrierWave Fog](https://github.com/carrierwaveuploader/carrierwave#fog)
|
||||
uploader.
|
||||
|
||||
As with local storage, this strategy benefits from [Workhorse assistance](#workhorse-assisted-uploads) to
|
||||
keep some of the costly I/O work out of Ruby and Rails. Direct upload does a better job at this because it also keeps the HTTP PUT requests to object storage outside Puma.
|
||||
|
||||
This strategy is only suitable for small file uploads, as it is subject to Puma's 60 second request timeout.
|
||||
|
||||
### Background upload
|
||||
|
||||
WARNING:
|
||||
This strategy is deprecated in GitLab 14.9 and later, and is scheduled to [be removed in GitLab 15.0](https://gitlab.com/gitlab-org/gitlab/-/issues/26600).
|
||||
|
||||
With background uploads enabled:
|
||||
|
||||
1. Files are uploaded as if they were to reside in local storage.
|
||||
1. When Rails saves the upload metadata and the transaction completes, a Sidekiq job is scheduled.
|
||||
1. The Sidekiq job transfers the file to the object store bucket.
|
||||
- If the job completes, the upload record is updated to reflect the file's new location.
|
||||
- If the job fails or gets lost, the upload stays in local storage and has the lifecycle of a normal local storage upload.
|
||||
|
||||
As Rails and Sidekiq must cooperate to move the file to its final destination, it requires shared
|
||||
storage and as such is unsuitable for CNG installations. We do not use background upload in GitLab SaaS.
|
||||
|
||||
As background upload is an extension of local storage, it benefits from the same [Workhorse assistance](#workhorse-assisted-uploads) to
|
||||
keep costly I/O work out of Ruby and Rails.
|
||||
|
||||
### Direct upload
|
||||
|
||||
Direct upload is the recommended way to move large files into object storage in CNG installations like GitLab SaaS.
|
||||
|
||||
With direct upload enabled, Workhorse:
|
||||
|
||||
1. Authorizes the request with Rails.
|
||||
1. Establishes a connection with the object store itself to transfer the file to a temporary location.
|
||||
1. When the transfer is complete, Workhorse finalizes the request with Rails. Rails issues an object store copy operation to put the file in its final location.
|
||||
1. Completes the upload by deleting the temporary file in object storage.
|
||||
|
||||
This strategy is a different form of [Workhorse assistance](#workhorse-assisted-uploads). It does not rely on shared storage that is accessible by both Workhorse and Puma.
|
||||
|
||||
Of all existing upload strategies, direct upload is best able to handle large (gigabyte) uploads. However, because Puma still does an object storage copy operation, which takes time proportional to the size of the upload, there remains a possibility of hitting Puma timeouts.
|
||||
|
||||
## Workhorse assisted uploads
|
||||
|
||||
Most uploads receive assistance from Workhorse in some way.
|
||||
|
||||
- Often, Workhorse buffers the upload to a temporary file. Workhorse adds metadata to the request to tell
|
||||
Puma the name and location of the temporary file. This requires shared temporary storage between Workhorse and Puma.
|
||||
All GitLab installations (including CNG) have this shared temporary storage.
|
||||
- Workhorse sometimes pre-processes the file. For example, for CI artifact uploads, Workhorse creates a separate index
|
||||
of the contents of the ZIP file. By doing this in Workhorse we bypass the Puma request timeout.
|
||||
Compared to Sidekiq background processing, this has the advantage that the user does not see an intermediate state
|
||||
where GitLab accepts the file but has not yet processed it.
|
||||
- With direct upload, Workhorse can both pre-process the file and upload it to object storage.
|
||||
Uploading a large file to object storage takes time; by doing this in Workhorse we avoid the Puma request timeout.
|
||||
|
|
|
@ -6,7 +6,7 @@ info: To determine the technical writer assigned to the Stage/Group associated w
|
|||
|
||||
# Uploads guide: Adding new uploads
|
||||
|
||||
In this section, we describe how to add a new upload route [accelerated](implementation.md#uploading-technologies) by Workhorse for [body and multipart](implementation.md#upload-encodings) encoded uploads.
|
||||
Here, we describe how to add a new upload route [accelerated](index.md#workhorse-assisted-uploads) by Workhorse.
|
||||
|
||||
Upload routes belong to one of these categories:
|
||||
|
||||
|
@ -15,31 +15,31 @@ Upload routes belong to one of these categories:
|
|||
1. GraphQL API: uploads handled by a GraphQL resolve function.
|
||||
|
||||
WARNING:
|
||||
GraphQL uploads do not support [direct upload](implementation.md#direct-upload) yet. Depending on the use case, the feature may not work on installations without NFS (like GitLab.com or Kubernetes installations). Uploading to object storage inside the GraphQL resolve function may result in timeout errors. For more details please follow [issue #280819](https://gitlab.com/gitlab-org/gitlab/-/issues/280819).
|
||||
GraphQL uploads do not support [direct upload](index.md#direct-upload). Depending on the use case, the feature may not work on installations without NFS (like GitLab.com or Kubernetes installations). Uploading to object storage inside the GraphQL resolve function may result in timeout errors. For more details, follow [issue #280819](https://gitlab.com/gitlab-org/gitlab/-/issues/280819).
|
||||
|
||||
## Update Workhorse for the new route
|
||||
|
||||
For both the Rails controller and Grape API uploads, Workhorse has to be updated in order to get the
|
||||
For both the Rails controller and Grape API uploads, Workhorse must be updated to get the
|
||||
support for the new upload route.
|
||||
|
||||
1. Open a new issue in the [Workhorse tracker](https://gitlab.com/gitlab-org/gitlab-workhorse/-/issues/new) describing precisely the new upload route:
|
||||
- The route's URL.
|
||||
- The [upload encoding](implementation.md#upload-encodings).
|
||||
- The upload encoding.
|
||||
- If possible, provide a dump of the upload request.
|
||||
1. Implement and get the MR merged for this issue above.
|
||||
1. Ask the Maintainers of [Workhorse](https://gitlab.com/gitlab-org/gitlab-workhorse) to create a new release. You can do that in the MR
|
||||
directly during the maintainer review or ask for it in the `#workhorse` Slack channel.
|
||||
1. Ask the Maintainers of [Workhorse](https://gitlab.com/gitlab-org/gitlab-workhorse) to create a new release. You can do that in the merge request
|
||||
directly during the maintainer review, or ask for it in the `#workhorse` Slack channel.
|
||||
1. Bump the [Workhorse version file](https://gitlab.com/gitlab-org/gitlab/-/blob/master/GITLAB_WORKHORSE_VERSION)
|
||||
to the version you have from the previous points, or bump it in the same merge request that contains
|
||||
the Rails changes (see [Implementing the new route with a Rails controller](#implementing-the-new-route-with-a-rails-controller) or [Implementing the new route with a Grape API endpoint](#implementing-the-new-route-with-a-grape-api-endpoint) below).
|
||||
the Rails changes. Refer to [Implementing the new route with a Rails controller](#implementing-the-new-route-with-a-rails-controller) or [Implementing the new route with a Grape API endpoint](#implementing-the-new-route-with-a-grape-api-endpoint) below.
|
||||
|
||||
## Implementing the new route with a Rails controller
|
||||
|
||||
For a Rails controller upload, we usually have a [multipart](implementation.md#upload-encodings) upload and there are a
|
||||
For a Rails controller upload, we usually have a `multipart/form-data` upload and there are a
|
||||
few things to do:
|
||||
|
||||
1. The upload is available under the parameter name you're using. For example, it could be an `artifact`
|
||||
or a nested parameter such as `user[avatar]`. Let's say that we have the upload under the
|
||||
or a nested parameter such as `user[avatar]`. If you have the upload under the
|
||||
`file` parameter, reading `params[:file]` should get you an [`UploadedFile`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/uploaded_file.rb) instance.
|
||||
1. Generally speaking, it's a good idea to check if the instance is from the [`UploadedFile`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/uploaded_file.rb) class. For example, see how we checked
|
||||
[that the parameter is indeed an `UploadedFile`](https://gitlab.com/gitlab-org/gitlab/-/commit/ea30fe8a71bf16ba07f1050ab4820607b5658719#51c0cc7a17b7f12c32bc41cfab3649ff2739b0eb_79_77).
|
||||
|
@ -53,7 +53,7 @@ builds automatically for you.
|
|||
|
||||
## Implementing the new route with a Grape API endpoint
|
||||
|
||||
For a Grape API upload, we can have [body or a multipart](implementation.md#upload-encodings) upload. Things are slightly more complicated: two endpoints are needed. One for the
|
||||
For a Grape API upload, we can have a body or multipart upload. Things are slightly more complicated: two endpoints are needed. One for the
|
||||
Workhorse pre-upload authorization and one for accepting the upload metadata from Workhorse:
|
||||
|
||||
1. Implement an endpoint with the URL + `/authorize` suffix that will:
|
||||
|
@ -70,8 +70,8 @@ use `requires :file, type: ::API::Validations::Types::WorkhorseFile`.
|
|||
- Check that the request is coming from Workhorse with the `require_gitlab_workhorse!` from the
|
||||
[API helpers](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/api/helpers.rb).
|
||||
- Check the user permissions.
|
||||
- The remaining code of the processing. This is where the code must be reading the parameter (for
|
||||
our example, it would be `params[:file]`).
|
||||
- The remaining code of the processing. In this step, the code must read the parameter. For
|
||||
our example, it would be `params[:file]`.
|
||||
|
||||
WARNING:
|
||||
**Do not** call `UploadedFile#from_params` directly! Do not build an [`UploadedFile`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/uploaded_file.rb)
|
||||
|
@ -124,40 +124,40 @@ Therefore, document new uploads here by slotting them into the following tables:
|
|||
|
||||
### CarrierWave integration
|
||||
|
||||
| File | Carrierwave usage | Categorized |
|
||||
| File | CarrierWave usage | Categorized |
|
||||
|---------------------------------------------------------|----------------------------------------------------------------------------------|---------------------|
|
||||
| `app/models/project.rb` | `include Avatarable` | :white_check_mark: |
|
||||
| `app/models/projects/topic.rb` | `include Avatarable` | :white_check_mark: |
|
||||
| `app/models/group.rb` | `include Avatarable` | :white_check_mark: |
|
||||
| `app/models/user.rb` | `include Avatarable` | :white_check_mark: |
|
||||
| `app/models/terraform/state_version.rb` | `include FileStoreMounter` | :white_check_mark: |
|
||||
| `app/models/ci/job_artifact.rb` | `include FileStoreMounter` | :white_check_mark: |
|
||||
| `app/models/ci/pipeline_artifact.rb` | `include FileStoreMounter` | :white_check_mark: |
|
||||
| `app/models/pages_deployment.rb` | `include FileStoreMounter` | :white_check_mark: |
|
||||
| `app/models/lfs_object.rb` | `include FileStoreMounter` | :white_check_mark: |
|
||||
| `app/models/dependency_proxy/blob.rb` | `include FileStoreMounter` | :white_check_mark: |
|
||||
| `app/models/dependency_proxy/manifest.rb` | `include FileStoreMounter` | :white_check_mark: |
|
||||
| `app/models/packages/composer/cache_file.rb` | `include FileStoreMounter` | :white_check_mark: |
|
||||
| `app/models/packages/package_file.rb` | `include FileStoreMounter` | :white_check_mark: |
|
||||
| `app/models/concerns/packages/debian/component_file.rb` | `include FileStoreMounter` | :white_check_mark: |
|
||||
| `app/models/project.rb` | `include Avatarable` | **{check-circle}** Yes |
|
||||
| `app/models/projects/topic.rb` | `include Avatarable` | **{check-circle}** Yes |
|
||||
| `app/models/group.rb` | `include Avatarable` | **{check-circle}** Yes |
|
||||
| `app/models/user.rb` | `include Avatarable` | **{check-circle}** Yes |
|
||||
| `app/models/terraform/state_version.rb` | `include FileStoreMounter` | **{check-circle}** Yes |
|
||||
| `app/models/ci/job_artifact.rb` | `include FileStoreMounter` | **{check-circle}** Yes |
|
||||
| `app/models/ci/pipeline_artifact.rb` | `include FileStoreMounter` | **{check-circle}** Yes |
|
||||
| `app/models/pages_deployment.rb` | `include FileStoreMounter` | **{check-circle}** Yes |
|
||||
| `app/models/lfs_object.rb` | `include FileStoreMounter` | **{check-circle}** Yes |
|
||||
| `app/models/dependency_proxy/blob.rb` | `include FileStoreMounter` | **{check-circle}** Yes |
|
||||
| `app/models/dependency_proxy/manifest.rb` | `include FileStoreMounter` | **{check-circle}** Yes |
|
||||
| `app/models/packages/composer/cache_file.rb` | `include FileStoreMounter` | **{check-circle}** Yes |
|
||||
| `app/models/packages/package_file.rb` | `include FileStoreMounter` | **{check-circle}** Yes |
|
||||
| `app/models/concerns/packages/debian/component_file.rb` | `include FileStoreMounter` | **{check-circle}** Yes |
|
||||
| `ee/app/models/issuable_metric_image.rb` | `include FileStoreMounter` | |
|
||||
| `ee/app/models/vulnerabilities/remediation.rb` | `include FileStoreMounter` | |
|
||||
| `ee/app/models/vulnerabilities/export.rb` | `include FileStoreMounter` | |
|
||||
| `app/models/packages/debian/project_distribution.rb` | `include Packages::Debian::Distribution` | :white_check_mark: |
|
||||
| `app/models/packages/debian/group_distribution.rb` | `include Packages::Debian::Distribution` | :white_check_mark: |
|
||||
| `app/models/packages/debian/project_component_file.rb` | `include Packages::Debian::ComponentFile` | :white_check_mark: |
|
||||
| `app/models/packages/debian/group_component_file.rb` | `include Packages::Debian::ComponentFile` | :white_check_mark: |
|
||||
| `app/models/merge_request_diff.rb` | `mount_uploader :external_diff, ExternalDiffUploader` | :white_check_mark: |
|
||||
| `app/models/note.rb` | `mount_uploader :attachment, AttachmentUploader` | :white_check_mark: |
|
||||
| `app/models/appearance.rb` | `mount_uploader :logo, AttachmentUploader` | :white_check_mark: |
|
||||
| `app/models/appearance.rb` | `mount_uploader :header_logo, AttachmentUploader` | :white_check_mark: |
|
||||
| `app/models/appearance.rb` | `mount_uploader :favicon, FaviconUploader` | :white_check_mark: |
|
||||
| `app/models/packages/debian/project_distribution.rb` | `include Packages::Debian::Distribution` | **{check-circle}** Yes |
|
||||
| `app/models/packages/debian/group_distribution.rb` | `include Packages::Debian::Distribution` | **{check-circle}** Yes |
|
||||
| `app/models/packages/debian/project_component_file.rb` | `include Packages::Debian::ComponentFile` | **{check-circle}** Yes |
|
||||
| `app/models/packages/debian/group_component_file.rb` | `include Packages::Debian::ComponentFile` | **{check-circle}** Yes |
|
||||
| `app/models/merge_request_diff.rb` | `mount_uploader :external_diff, ExternalDiffUploader` | **{check-circle}** Yes |
|
||||
| `app/models/note.rb` | `mount_uploader :attachment, AttachmentUploader` | **{check-circle}** Yes |
|
||||
| `app/models/appearance.rb` | `mount_uploader :logo, AttachmentUploader` | **{check-circle}** Yes |
|
||||
| `app/models/appearance.rb` | `mount_uploader :header_logo, AttachmentUploader` | **{check-circle}** Yes |
|
||||
| `app/models/appearance.rb` | `mount_uploader :favicon, FaviconUploader` | **{check-circle}** Yes |
|
||||
| `app/models/project.rb` | `mount_uploader :bfg_object_map, AttachmentUploader` | |
|
||||
| `app/models/import_export_upload.rb` | `mount_uploader :import_file, ImportExportUploader` | :white_check_mark: |
|
||||
| `app/models/import_export_upload.rb` | `mount_uploader :export_file, ImportExportUploader` | :white_check_mark: |
|
||||
| `app/models/import_export_upload.rb` | `mount_uploader :import_file, ImportExportUploader` | **{check-circle}** Yes |
|
||||
| `app/models/import_export_upload.rb` | `mount_uploader :export_file, ImportExportUploader` | **{check-circle}** Yes |
|
||||
| `app/models/ci/deleted_object.rb` | `mount_uploader :file, DeletedObjectUploader` | |
|
||||
| `app/models/design_management/action.rb` | `mount_uploader :image_v432x230, DesignManagement::DesignV432x230Uploader` | :white_check_mark: |
|
||||
| `app/models/concerns/packages/debian/distribution.rb` | `mount_uploader :signed_file, Packages::Debian::DistributionReleaseFileUploader` | :white_check_mark: |
|
||||
| `app/models/bulk_imports/export_upload.rb` | `mount_uploader :export_file, ExportUploader` | :white_check_mark: |
|
||||
| `app/models/design_management/action.rb` | `mount_uploader :image_v432x230, DesignManagement::DesignV432x230Uploader` | **{check-circle}** Yes |
|
||||
| `app/models/concerns/packages/debian/distribution.rb` | `mount_uploader :signed_file, Packages::Debian::DistributionReleaseFileUploader` | **{check-circle}** Yes |
|
||||
| `app/models/bulk_imports/export_upload.rb` | `mount_uploader :export_file, ExportUploader` | **{check-circle}** Yes |
|
||||
| `ee/app/models/user_permission_export_upload.rb` | `mount_uploader :file, AttachmentUploader` | |
|
||||
| `app/models/ci/secure_file.rb` | `include FileStoreMounter` | |
|
||||
|
|
|
@ -128,6 +128,25 @@ relative URL in the `authBackend` setting:
|
|||
gitlab-workhorse -authBackend http://localhost:8080/gitlab
|
||||
```
|
||||
|
||||
## TLS support
|
||||
|
||||
A listener with TLS can be configured to be used for incoming requests.
|
||||
Paths to the files containing a certificate and matching private key for the server must be provided:
|
||||
|
||||
```toml
|
||||
[[listeners]]
|
||||
network = "tcp"
|
||||
addr = "localhost:3443"
|
||||
[listeners.tls]
|
||||
certificate = "/path/to/certificate"
|
||||
key = "/path/to/private/key"
|
||||
min_version = "tls1.2"
|
||||
max_version = "tls1.3"
|
||||
```
|
||||
|
||||
The `certificate` file should contain the concatenation
|
||||
of the server's certificate, any intermediates, and the CA's certificate.
|
||||
|
||||
## Interaction of authBackend and authSocket
|
||||
|
||||
The interaction between `authBackend` and `authSocket` can be confusing.
|
||||
|
|
|
@ -28,7 +28,7 @@ Sign in to the AliCloud platform and create an application on it. AliCloud gener
|
|||
Select **Save**.
|
||||
|
||||
1. Add OAuth scopes in the application details page:
|
||||
|
||||
|
||||
1. Under the **Application Name** column, select the name of the application you created. The application's details page opens.
|
||||
1. Under the **Application OAuth Scopes** tab, select **Add OAuth Scopes**.
|
||||
1. Select the **aliuid** and **profile** checkboxes.
|
||||
|
|
|
@ -67,5 +67,5 @@ Use [feature flags](../operations/feature_flags.md) to control and strategically
|
|||
|
||||
## Deploy to Google Cloud
|
||||
|
||||
GitLab [Cloud Seed](../cloud_seed/index.md) is an open-source Incubation Engineering program that
|
||||
GitLab [Cloud Seed](../cloud_seed/index.md) is an open-source Incubation Engineering program that
|
||||
enables you to set up deployment credentials and deploy your application to Google Cloud Run with minimal friction.
|
||||
|
|
|
@ -30,6 +30,25 @@ For removal reviewers (Technical Writers only):
|
|||
|
||||
## 15.0
|
||||
|
||||
### Container Network and Host Security
|
||||
|
||||
WARNING:
|
||||
This feature was changed or removed in 15.0
|
||||
as a [breaking change](https://docs.gitlab.com/ee/development/contributing/#breaking-changes).
|
||||
Before updating GitLab, review the details carefully to determine if you need to make any
|
||||
changes to your code, settings, or workflow.
|
||||
|
||||
All functionality related to the Container Network Security and Container Host Security categories was deprecated in GitLab 14.8 and is scheduled for removal in GitLab 15.0. Users who need a replacement for this functionality are encouraged to evaluate the following open source projects as potential solutions that can be installed and managed outside of GitLab: [AppArmor](https://gitlab.com/apparmor/apparmor), [Cilium](https://github.com/cilium/cilium), [Falco](https://github.com/falcosecurity/falco), [FluentD](https://github.com/fluent/fluentd), [Pod Security Admission](https://kubernetes.io/docs/concepts/security/pod-security-admission/). To integrate these technologies with GitLab, add the desired Helm charts in your copy of the [Cluster Management Project Template](https://docs.gitlab.com/ee/user/clusters/management_project_template.html). Deploy these Helm charts in production by calling commands through the GitLab [Secure CI/CD Tunnel](https://docs.gitlab.com/ee/user/clusters/agent/repository.html#run-kubectl-commands-using-the-cicd-tunnel).
|
||||
|
||||
As part of this change, the following capabilities within GitLab are scheduled for removal in GitLab 15.0:
|
||||
|
||||
- The **Security & Compliance > Threat Monitoring** page.
|
||||
- The Network Policy security policy type, as found on the **Security & Compliance > Policies** page.
|
||||
- The ability to manage integrations with the following technologies through GitLab: AppArmor, Cilium, Falco, FluentD, and Pod Security Policies.
|
||||
- All APIs related to the above functionality.
|
||||
|
||||
For additional context, or to provide feedback regarding this change, please reference our [deprecation issue](https://gitlab.com/groups/gitlab-org/-/epics/7476).
|
||||
|
||||
### Container registry authentication with htpasswd
|
||||
|
||||
WARNING:
|
||||
|
@ -42,6 +61,23 @@ The Container Registry supports [authentication](https://gitlab.com/gitlab-org/c
|
|||
|
||||
Since it isn't used in the context of GitLab (the product), `htpasswd` authentication will be deprecated in GitLab 14.9 and removed in GitLab 15.0.
|
||||
|
||||
### Vulnerability Check
|
||||
|
||||
WARNING:
|
||||
This feature was changed or removed in 15.0
|
||||
as a [breaking change](https://docs.gitlab.com/ee/development/contributing/#breaking-changes).
|
||||
Before updating GitLab, review the details carefully to determine if you need to make any
|
||||
changes to your code, settings, or workflow.
|
||||
|
||||
The vulnerability check feature was deprecated in GitLab 14.8 and is scheduled for removal in GitLab 15.0. We encourage you to migrate to the new security approvals feature instead. You can do so by navigating to **Security & Compliance > Policies** and creating a new Scan Result Policy.
|
||||
|
||||
The new security approvals feature is similar to vulnerability check. For example, both can require approvals for MRs that contain security vulnerabilities. However, security approvals improve the previous experience in several ways:
|
||||
|
||||
- Users can choose who is allowed to edit security approval rules. An independent security or compliance team can therefore manage rules in a way that prevents development project maintainers from modifying the rules.
|
||||
- Multiple rules can be created and chained together to allow for filtering on different severity thresholds for each scanner type.
|
||||
- A two-step approval process can be enforced for any desired changes to security approval rules.
|
||||
- A single set of security policies can be applied to multiple development projects to allow for ease in maintaining a single, centralized ruleset.
|
||||
|
||||
## 14.9
|
||||
|
||||
### Integrated error tracking disabled by default
|
||||
|
|
|
@ -190,7 +190,7 @@ sudo gitlab-rake gitlab:background_migrations:finalize[CopyColumnUsingBackground
|
|||
|
||||
In GitLab 14.8, the `BackfillNamespaceIdForNamespaceRoute` batched background migration job
|
||||
may fail to complete. When retried, a `500 Server Error` is returned. This issue was
|
||||
[resolved](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/82387) in GitLab 14.9.
|
||||
[resolved](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/82387) in GitLab 14.9.
|
||||
|
||||
To resolve this issue, [upgrade GitLab](../../../update/index.md) from 14.8 to 14.9.
|
||||
You can ignore the failed batch migration until after you update to GitLab 14.9.
|
||||
|
|
|
@ -149,7 +149,7 @@ As an administrator you can set either a global or namespace-specific limit on t
|
|||
|
||||
## Archive jobs
|
||||
|
||||
Archiving jobs is useful for reducing the CI/CD footprint on the system by removing some
|
||||
Archiving jobs is useful for reducing the CI/CD footprint on the system by removing some
|
||||
of the capabilities of the jobs (metadata stored in the database needed to run the job),
|
||||
but persisting the traces and artifacts for auditing purposes.
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@ tier. Users can continue to access the features in a paid tier without sharing u
|
|||
|
||||
- [Email from GitLab](../email_from_gitlab.md).
|
||||
|
||||
#### Features available in 14.4 and later
|
||||
### Features available in 14.4 and later
|
||||
|
||||
- [Repository size limit](../settings/account_and_limit_settings.md#repository-size-limit).
|
||||
- [Restrict group access by IP address](../../group/index.md#restrict-group-access-by-ip-address).
|
||||
|
@ -53,7 +53,7 @@ tier. Users can continue to access the features in a paid tier without sharing u
|
|||
NOTE:
|
||||
Registration is not yet required for participation, but may be added in a future milestone.
|
||||
|
||||
#### Enable Registration Features
|
||||
### Enable registration features
|
||||
|
||||
1. Sign in as a user with administrator access.
|
||||
1. On the top bar, select **Menu > Admin**.
|
||||
|
|
|
@ -139,7 +139,7 @@ OpenAPI 2.x lets you specify the accepted media types globally or per operation,
|
|||
- In [GitLab 14.10 and later](https://gitlab.com/gitlab-org/gitlab/-/issues/333304), the default behavior is to select one of the supported media types to use. The first supported media type is chosen from the list. This behavior is configurable.
|
||||
- In GitLab 14.9 and earlier, the default behavior is to perform testing using all supported media types. This means if two media types are listed (for example, `application/json` and `application/xml`), tests are performed using JSON, and then the same tests using XML.
|
||||
|
||||
Testing the same operation (for example, `POST /user`) using different media types (for example, `application/json` and `application/xml`) is not always desirable.
|
||||
Testing the same operation (for example, `POST /user`) using different media types (for example, `application/json` and `application/xml`) is not always desirable.
|
||||
For example, if the target application executes the same code regardless of the request content type, it will take longer to finish the test session, and it may report duplicate vulnerabilities related to the request body depending on the target app.
|
||||
|
||||
The environment variable `FUZZAPI_OPENAPI_ALL_MEDIA_TYPES` lets you specify whether or not to use all supported media types instead of one when generating requests for a given operation. When the environmental variable `FUZZAPI_OPENAPI_ALL_MEDIA_TYPES` is set to any value, API Fuzzing will try to generate requests for all supported media types instead of one in a given operation. This will cause testing to take longer as testing is repeated for each provided media type.
|
||||
|
@ -1087,7 +1087,7 @@ You can provide the following properties to exclude specific parameters during t
|
|||
- `body-json`: Use this property to exclude specific JSON nodes from a request that uses the media type `application/json`. The property's value is an array, each entry of the array is a [JSON Path](https://goessner.net/articles/JsonPath/) expression.
|
||||
- `body-xml`: Use this property to exclude specific XML nodes from a request that uses media type `application/xml`. The property's value is an array, each entry of the array is a [XPath v2](https://www.w3.org/TR/xpath20/) expression.
|
||||
|
||||
The following JSON document is an example of the expected structure to exclude parameters.
|
||||
The following JSON document is an example of the expected structure to exclude parameters.
|
||||
|
||||
```json
|
||||
{
|
||||
|
@ -1155,11 +1155,11 @@ To exclude the `password` field in a request that uses `application/x-www-form-u
|
|||
|
||||
The exclude parameters uses `body-form` when the request uses a content type `application/x-www-form-urlencoded`.
|
||||
|
||||
##### Excluding a specific JSON nodes using JSON Path
|
||||
##### Excluding a specific JSON nodes using JSON Path
|
||||
|
||||
To exclude the `schema` property in the root object, set the `body-json` property's value to an array with the JSON Path expression `[ "$.schema" ]`.
|
||||
|
||||
The JSON Path expression uses special syntax to identify JSON nodes: `$` refers to the root of the JSON document, `.` refers to the current object (in our case the root object), and the text `schema` refers to a property name. Thus, the JSON path expression `$.schema` refers to a property `schema` in the root object.
|
||||
The JSON Path expression uses special syntax to identify JSON nodes: `$` refers to the root of the JSON document, `.` refers to the current object (in our case the root object), and the text `schema` refers to a property name. Thus, the JSON path expression `$.schema` refers to a property `schema` in the root object.
|
||||
For instance, the JSON document looks like this:
|
||||
|
||||
```json
|
||||
|
@ -1168,13 +1168,13 @@ For instance, the JSON document looks like this:
|
|||
}
|
||||
```
|
||||
|
||||
The exclude parameters uses `body-json` when the request uses a content type `application/json`. Each entry in `body-json` is expected to be a [JSON Path expression](https://goessner.net/articles/JsonPath/). In JSON Path, characters like `$`, `*`, `.` among others have special meaning.
|
||||
The exclude parameters uses `body-json` when the request uses a content type `application/json`. Each entry in `body-json` is expected to be a [JSON Path expression](https://goessner.net/articles/JsonPath/). In JSON Path, characters like `$`, `*`, `.` among others have special meaning.
|
||||
|
||||
##### Excluding multiple JSON nodes using JSON Path
|
||||
##### Excluding multiple JSON nodes using JSON Path
|
||||
|
||||
To exclude the property `password` on each entry of an array of `users` at the root level, set the `body-json` property's value to an array with the JSON Path expression `[ "$.users[*].paswword" ]`.
|
||||
|
||||
The JSON Path expression starts with `$` to refer to the root node and uses `.` to refer to the current node. Then, it uses `users` to refer to a property and the characters `[` and `]` to enclose the index in the array you want to use, instead of providing a number as an index you use `*` to specify any index. After the index reference, we find `.` which now refers to any given selected index in the array, preceded by a property name `password`.
|
||||
The JSON Path expression starts with `$` to refer to the root node and uses `.` to refer to the current node. Then, it uses `users` to refer to a property and the characters `[` and `]` to enclose the index in the array you want to use, instead of providing a number as an index you use `*` to specify any index. After the index reference, we find `.` which now refers to any given selected index in the array, preceded by a property name `password`.
|
||||
|
||||
For instance, the JSON document looks like this:
|
||||
|
||||
|
@ -1184,7 +1184,7 @@ For instance, the JSON document looks like this:
|
|||
}
|
||||
```
|
||||
|
||||
The exclude parameters uses `body-json` when the request uses a content type `application/json`. Each entry in `body-json` is expected to be a [JSON Path expression](https://goessner.net/articles/JsonPath/). In JSON Path characters like `$`, `*`, `.` among others have special meaning.
|
||||
The exclude parameters uses `body-json` when the request uses a content type `application/json`. Each entry in `body-json` is expected to be a [JSON Path expression](https://goessner.net/articles/JsonPath/). In JSON Path characters like `$`, `*`, `.` among others have special meaning.
|
||||
|
||||
##### Excluding an XML attribute
|
||||
|
||||
|
@ -1196,17 +1196,17 @@ For instance, the JSON document looks like this:
|
|||
|
||||
```json
|
||||
{
|
||||
"body-xml": [
|
||||
"body-xml": [
|
||||
"/credentials/@isEnabled"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
The exclude parameters uses `body-xml` when the request uses a content type `application/xml`. Each entry in `body-xml` is expected to be an [XPath v2 expression](https://www.w3.org/TR/xpath20/). In XPath expressions, characters like `@`, `/`, `:`, `[`, `]` among others have special meanings.
|
||||
The exclude parameters uses `body-xml` when the request uses a content type `application/xml`. Each entry in `body-xml` is expected to be an [XPath v2 expression](https://www.w3.org/TR/xpath20/). In XPath expressions, characters like `@`, `/`, `:`, `[`, `]` among others have special meanings.
|
||||
|
||||
##### Excluding an XML element's text
|
||||
|
||||
To exclude the text of the `username` element contained in root node `credentials`, set the `body-xml` property's value to an array with the XPath expression `[/credentials/username/text()" ]`.
|
||||
To exclude the text of the `username` element contained in root node `credentials`, set the `body-xml` property's value to an array with the XPath expression `[/credentials/username/text()" ]`.
|
||||
|
||||
In the XPath expression `/credentials/username/text()`, the first character `/` refers to the root XML node, and then after it indicates an XML element's name `credentials`. Similarly, the character `/` refers to the current element, followed by a new XML element's name `username`. Last part has a `/` that refers to the current element, and uses a XPath function called `text()` which identifies the text of the current element.
|
||||
|
||||
|
@ -1214,17 +1214,17 @@ For instance, the JSON document looks like this:
|
|||
|
||||
```json
|
||||
{
|
||||
"body-xml": [
|
||||
"body-xml": [
|
||||
"/credentials/username/text()"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
The exclude parameters uses `body-xml` when the request uses a content type `application/xml`. Each entry in `body-xml` is expected to be a [XPath v2 expression](https://www.w3.org/TR/xpath20/). In XPath expressions characters like `@`, `/`, `:`, `[`, `]` among others have special meanings.
|
||||
The exclude parameters uses `body-xml` when the request uses a content type `application/xml`. Each entry in `body-xml` is expected to be a [XPath v2 expression](https://www.w3.org/TR/xpath20/). In XPath expressions characters like `@`, `/`, `:`, `[`, `]` among others have special meanings.
|
||||
|
||||
##### Excluding an XML element
|
||||
|
||||
To exclude the element `username` contained in root node `credentials`, set the `body-xml` property's value to an array with the XPath expression `[/credentials/username" ]`.
|
||||
To exclude the element `username` contained in root node `credentials`, set the `body-xml` property's value to an array with the XPath expression `[/credentials/username" ]`.
|
||||
|
||||
In the XPath expression `/credentials/username`, the first character `/` refers to the root XML node, and then after it indicates an XML element's name `credentials`. Similarly, the character `/` refers to the current element, followed by a new XML element's name `username`.
|
||||
|
||||
|
@ -1232,7 +1232,7 @@ For instance, the JSON document looks like this:
|
|||
|
||||
```json
|
||||
{
|
||||
"body-xml": [
|
||||
"body-xml": [
|
||||
"/credentials/username"
|
||||
]
|
||||
}
|
||||
|
@ -1242,21 +1242,21 @@ The exclude parameters uses `body-xml` when the request uses a content type `app
|
|||
|
||||
##### Excluding an XML node with namespaces
|
||||
|
||||
To exclude a XML element `login` which is defined in namespace `s`, and contained in `credentials` root node, set the `body-xml` property's value to an array with the XPath expression `[ "/credentials/s:login" ]`.
|
||||
To exclude a XML element `login` which is defined in namespace `s`, and contained in `credentials` root node, set the `body-xml` property's value to an array with the XPath expression `[ "/credentials/s:login" ]`.
|
||||
|
||||
In the XPath expression `/credentials/s:login`, the first character `/` refers to the root XML node, and then after it indicates an XML element's name `credentials`. Similarly, the character `/` refers to the current element, followed by a new XML element's name `s:login`. Notice that name contains the character `:`, this character separates the namespace from the node name.
|
||||
In the XPath expression `/credentials/s:login`, the first character `/` refers to the root XML node, and then after it indicates an XML element's name `credentials`. Similarly, the character `/` refers to the current element, followed by a new XML element's name `s:login`. Notice that name contains the character `:`, this character separates the namespace from the node name.
|
||||
|
||||
The namespace name should have been defined in the XML document which is part of the body request. You may check the namespace in the specification document HAR, OpenAPI, or Postman Collection file.
|
||||
|
||||
```json
|
||||
{
|
||||
"body-xml": [
|
||||
"body-xml": [
|
||||
"/credentials/s:login"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
The exclude parameters uses `body-xml` when the request uses a content type `application/xml`. Each entry in `body-xml` is expected to be a [XPath v2 expression](https://www.w3.org/TR/xpath20/). In XPath expressions characters like `@`, `/`, `:`, `[`, `]` among others have special meanings.
|
||||
The exclude parameters uses `body-xml` when the request uses a content type `application/xml`. Each entry in `body-xml` is expected to be a [XPath v2 expression](https://www.w3.org/TR/xpath20/). In XPath expressions characters like `@`, `/`, `:`, `[`, `]` among others have special meanings.
|
||||
|
||||
#### Using a JSON string
|
||||
|
||||
|
@ -1294,7 +1294,7 @@ variables:
|
|||
FUZZAPI_EXCLUDE_PARAMETER_FILE: api-fuzzing-exclude-parameters.json
|
||||
```
|
||||
|
||||
The `api-fuzzing-exclude-parameters.json` is a JSON document that follows the structure of [exclude parameters document](#exclude-parameters-using-a-json-document).
|
||||
The `api-fuzzing-exclude-parameters.json` is a JSON document that follows the structure of [exclude parameters document](#exclude-parameters-using-a-json-document).
|
||||
|
||||
### Exclude URLS
|
||||
|
||||
|
@ -1348,7 +1348,7 @@ variables:
|
|||
|
||||
##### Excluding URL using regular expressions
|
||||
|
||||
In order to exclude exactly `https://target/api/v1/user/create` and `https://target/api/v2/user/create` or any other version (`v3`,`v4`, and more). We could use `https://target/api/v.*/user/create$`, in the previous regular expression `.` indicates any character and `*` indicates zero or more times, additionally `$` indicates that the URL should end there.
|
||||
In order to exclude exactly `https://target/api/v1/user/create` and `https://target/api/v2/user/create` or any other version (`v3`,`v4`, and more). We could use `https://target/api/v.*/user/create$`, in the previous regular expression `.` indicates any character and `*` indicates zero or more times, additionally `$` indicates that the URL should end there.
|
||||
|
||||
```yaml
|
||||
variables:
|
||||
|
|
|
@ -1041,7 +1041,7 @@ You can provide the following properties to exclude specific parameters during t
|
|||
- `body-json`: Use this property to exclude specific JSON nodes from a request that uses the media type `application/json`. The property's value is an array, each entry of the array is a [JSON Path](https://goessner.net/articles/JsonPath/) expression.
|
||||
- `body-xml`: Use this property to exclude specific XML nodes from a request that uses media type `application/xml`. The property's value is an array, each entry of the array is a [XPath v2](https://www.w3.org/TR/xpath20/) expression.
|
||||
|
||||
Thus, the following JSON document is an example of the expected structure to exclude parameters.
|
||||
Thus, the following JSON document is an example of the expected structure to exclude parameters.
|
||||
|
||||
```json
|
||||
{
|
||||
|
@ -1109,11 +1109,11 @@ To exclude the `password` field in a request that uses `application/x-www-form-u
|
|||
|
||||
The exclude parameters uses `body-form` when the request uses a content type `application/x-www-form-urlencoded`.
|
||||
|
||||
##### Excluding a specific JSON nodes using JSON Path
|
||||
##### Excluding a specific JSON nodes using JSON Path
|
||||
|
||||
To exclude the `schema` property in the root object, set the `body-json` property's value to an array with the JSON Path expression `[ "$.schema" ]`.
|
||||
|
||||
The JSON Path expression uses special syntax to identify JSON nodes: `$` refers to the root of the JSON document, `.` refers to the current object (in our case the root object), and the text `schema` refers to a property name. Thus, the JSON path expression `$.schema` refers to a property `schema` in the root object.
|
||||
The JSON Path expression uses special syntax to identify JSON nodes: `$` refers to the root of the JSON document, `.` refers to the current object (in our case the root object), and the text `schema` refers to a property name. Thus, the JSON path expression `$.schema` refers to a property `schema` in the root object.
|
||||
For instance, the JSON document looks like this:
|
||||
|
||||
```json
|
||||
|
@ -1122,13 +1122,13 @@ For instance, the JSON document looks like this:
|
|||
}
|
||||
```
|
||||
|
||||
The exclude parameters uses `body-json` when the request uses a content type `application/json`. Each entry in `body-json` is expected to be a [JSON Path expression](https://goessner.net/articles/JsonPath/). In JSON Path characters like `$`, `*`, `.` among others have special meaning.
|
||||
The exclude parameters uses `body-json` when the request uses a content type `application/json`. Each entry in `body-json` is expected to be a [JSON Path expression](https://goessner.net/articles/JsonPath/). In JSON Path characters like `$`, `*`, `.` among others have special meaning.
|
||||
|
||||
##### Excluding multiple JSON nodes using JSON Path
|
||||
##### Excluding multiple JSON nodes using JSON Path
|
||||
|
||||
To exclude the property `password` on each entry of an array of `users` at the root level, set the `body-json` property's value to an array with the JSON Path expression `[ "$.users[*].paswword" ]`.
|
||||
|
||||
The JSON Path expression starts with `$` to refer to the root node and uses `.` to refer to the current node. Then, it uses `users` to refer to a property and the characters `[` and `]` to enclose the index in the array you want to use, instead of providing a number as an index you use `*` to specify any index. After the index reference, we find `.` which now refers to any given selected index in the array, preceded by a property name `password`.
|
||||
The JSON Path expression starts with `$` to refer to the root node and uses `.` to refer to the current node. Then, it uses `users` to refer to a property and the characters `[` and `]` to enclose the index in the array you want to use, instead of providing a number as an index you use `*` to specify any index. After the index reference, we find `.` which now refers to any given selected index in the array, preceded by a property name `password`.
|
||||
|
||||
For instance, the JSON document looks like this:
|
||||
|
||||
|
@ -1138,7 +1138,7 @@ For instance, the JSON document looks like this:
|
|||
}
|
||||
```
|
||||
|
||||
The exclude parameters uses `body-json` when the request uses a content type `application/json`. Each entry in `body-json` is expected to be a [JSON Path expression](https://goessner.net/articles/JsonPath/). In JSON Path characters like `$`, `*`, `.` among others have special meaning.
|
||||
The exclude parameters uses `body-json` when the request uses a content type `application/json`. Each entry in `body-json` is expected to be a [JSON Path expression](https://goessner.net/articles/JsonPath/). In JSON Path characters like `$`, `*`, `.` among others have special meaning.
|
||||
|
||||
##### Excluding a XML attribute
|
||||
|
||||
|
@ -1150,17 +1150,17 @@ For instance, the JSON document looks like this:
|
|||
|
||||
```json
|
||||
{
|
||||
"body-xml": [
|
||||
"body-xml": [
|
||||
"/credentials/@isEnabled"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
The exclude parameters uses `body-xml` when the request uses a content type `application/xml`. Each entry in `body-xml` is expected to be a [XPath v2 expression](https://www.w3.org/TR/xpath20/). In XPath expressions characters like `@`, `/`, `:`, `[`, `]` among others have special meanings.
|
||||
The exclude parameters uses `body-xml` when the request uses a content type `application/xml`. Each entry in `body-xml` is expected to be a [XPath v2 expression](https://www.w3.org/TR/xpath20/). In XPath expressions characters like `@`, `/`, `:`, `[`, `]` among others have special meanings.
|
||||
|
||||
##### Excluding a XML text's element
|
||||
|
||||
To exclude the text of the `username` element contained in root node `credentials`, set the `body-xml` property's value to an array with the XPath expression `[/credentials/username/text()" ]`.
|
||||
To exclude the text of the `username` element contained in root node `credentials`, set the `body-xml` property's value to an array with the XPath expression `[/credentials/username/text()" ]`.
|
||||
|
||||
In the XPath expression `/credentials/username/text()`, the first character `/` refers to the root XML node, and then after it indicates an XML element's name `credentials`. Similarly, the character `/` refers to the current element, followed by a new XML element's name `username`. Last part has a `/` that refers to the current element, and uses a XPath function called `text()` which identifies the text of the current element.
|
||||
|
||||
|
@ -1168,17 +1168,17 @@ For instance, the JSON document looks like this:
|
|||
|
||||
```json
|
||||
{
|
||||
"body-xml": [
|
||||
"body-xml": [
|
||||
"/credentials/username/text()"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
The exclude parameters uses `body-xml` when the request uses a content type `application/xml`. Each entry in `body-xml` is expected to be a [XPath v2 expression](https://www.w3.org/TR/xpath20/). In XPath expressions characters like `@`, `/`, `:`, `[`, `]` among others have special meanings.
|
||||
The exclude parameters uses `body-xml` when the request uses a content type `application/xml`. Each entry in `body-xml` is expected to be a [XPath v2 expression](https://www.w3.org/TR/xpath20/). In XPath expressions characters like `@`, `/`, `:`, `[`, `]` among others have special meanings.
|
||||
|
||||
##### Excluding an XML element
|
||||
|
||||
To exclude the element `username` contained in root node `credentials`, set the `body-xml` property's value to an array with the XPath expression `[/credentials/username" ]`.
|
||||
To exclude the element `username` contained in root node `credentials`, set the `body-xml` property's value to an array with the XPath expression `[/credentials/username" ]`.
|
||||
|
||||
In the XPath expression `/credentials/username`, the first character `/` refers to the root XML node, and then after it indicates an XML element's name `credentials`. Similarly, the character `/` refers to the current element, followed by a new XML element's name `username`.
|
||||
|
||||
|
@ -1186,31 +1186,31 @@ For instance, the JSON document looks like this:
|
|||
|
||||
```json
|
||||
{
|
||||
"body-xml": [
|
||||
"body-xml": [
|
||||
"/credentials/username"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
The exclude parameters uses `body-xml` when the request uses a content type `application/xml`. Each entry in `body-xml` is expected to be a [XPath v2 expression](https://www.w3.org/TR/xpath20/). In XPath expressions characters like `@`, `/`, `:`, `[`, `]` among others have special meanings.
|
||||
The exclude parameters uses `body-xml` when the request uses a content type `application/xml`. Each entry in `body-xml` is expected to be a [XPath v2 expression](https://www.w3.org/TR/xpath20/). In XPath expressions characters like `@`, `/`, `:`, `[`, `]` among others have special meanings.
|
||||
|
||||
##### Excluding an XML node with namespaces
|
||||
|
||||
To exclude anXML element `login` which is defined in namespace `s`, and contained in `credentials` root node, set the `body-xml` property's value to an array with the XPath expression `[ "/credentials/s:login" ]`.
|
||||
To exclude anXML element `login` which is defined in namespace `s`, and contained in `credentials` root node, set the `body-xml` property's value to an array with the XPath expression `[ "/credentials/s:login" ]`.
|
||||
|
||||
In the XPath expression `/credentials/s:login`, the first character `/` refers to the root XML node, and then after it indicates an XML element's name `credentials`. Similarly, the character `/` refers to the current element, followed by a new XML element's name `s:login`. Notice that name contains the character `:`, this character separates the namespace from the node name.
|
||||
In the XPath expression `/credentials/s:login`, the first character `/` refers to the root XML node, and then after it indicates an XML element's name `credentials`. Similarly, the character `/` refers to the current element, followed by a new XML element's name `s:login`. Notice that name contains the character `:`, this character separates the namespace from the node name.
|
||||
|
||||
The namespace name should have been defined in the XML document which is part of the body request. You may check the namespace in the specification document HAR, OpenAPI, or Postman Collection file.
|
||||
|
||||
```json
|
||||
{
|
||||
"body-xml": [
|
||||
"body-xml": [
|
||||
"/credentials/s:login"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
The exclude parameters uses `body-xml` when the request uses a content type `application/xml`. Each entry in `body-xml` is expected to be an [XPath v2 expression](https://www.w3.org/TR/xpath20/). In XPath, expressions characters like `@`, `/`, `:`, `[`, `]` among others have special meanings.
|
||||
The exclude parameters uses `body-xml` when the request uses a content type `application/xml`. Each entry in `body-xml` is expected to be an [XPath v2 expression](https://www.w3.org/TR/xpath20/). In XPath, expressions characters like `@`, `/`, `:`, `[`, `]` among others have special meanings.
|
||||
|
||||
#### Using a JSON string
|
||||
|
||||
|
@ -1248,7 +1248,7 @@ variables:
|
|||
DAST_API_EXCLUDE_PARAMETER_FILE: dast-api-exclude-parameters.json
|
||||
```
|
||||
|
||||
The `dast-api-exclude-parameters.json` is a JSON document that follows the structure of [exclude parameters document](#exclude-parameters-using-a-json-document).
|
||||
The `dast-api-exclude-parameters.json` is a JSON document that follows the structure of [exclude parameters document](#exclude-parameters-using-a-json-document).
|
||||
|
||||
### Exclude URLS
|
||||
|
||||
|
@ -1302,7 +1302,7 @@ variables:
|
|||
|
||||
##### Excluding URL using regular expressions
|
||||
|
||||
In order to exclude exactly `https://target/api/v1/user/create` and `https://target/api/v2/user/create` or any other version (`v3`,`v4`, and more). We could use `https://target/api/v.*/user/create$`, in the previous regular expression `.` indicates any character and `*` indicates zero or more times, additionally `$` indicates that the URL should end there.
|
||||
In order to exclude exactly `https://target/api/v1/user/create` and `https://target/api/v2/user/create` or any other version (`v3`,`v4`, and more). We could use `https://target/api/v.*/user/create$`, in the previous regular expression `.` indicates any character and `*` indicates zero or more times, additionally `$` indicates that the URL should end there.
|
||||
|
||||
```yaml
|
||||
variables:
|
||||
|
|
|
@ -94,7 +94,7 @@ To authorize the agent to access all of the GitLab projects in a group or subgro
|
|||
|
||||
- The Kubernetes projects must be in the same group hierarchy as the project where the agent's configuration is.
|
||||
- You can install additional agents into the same cluster to accommodate additional hierarchies.
|
||||
- All of the subgroups of an authorized group also have access to the same agent (without being specified individually).
|
||||
- All of the subgroups of an authorized group also have access to the same agent (without being specified individually).
|
||||
- You can authorize up to 100 groups.
|
||||
|
||||
All the projects that belong to the group and its subgroups are now authorized to access the agent.
|
||||
|
|
|
@ -38,7 +38,11 @@ To link one epic to another:
|
|||
- **relates to**
|
||||
- **[blocks](#blocking-epics)**
|
||||
- **[is blocked by](#blocking-epics)**
|
||||
1. Enter the epic number or paste in the full URL of the epic.
|
||||
1. To enter the linked epic, either:
|
||||
|
||||
- Enter `&`, followed by the epic's number. For example, `&123`.
|
||||
- Enter `&`, followed by a word from the epic's title. For example, `&Deliver`.
|
||||
- Paste in the epic's full URL.
|
||||
|
||||
![Adding a related epic](img/related_epics_add_v14_9.png)
|
||||
|
||||
|
|
|
@ -279,7 +279,7 @@ To view the activity feed in Atom format, select the
|
|||
|
||||
Similar to how you [share a project with a group](../project/members/share_project_with_groups.md),
|
||||
you can share a group with another group. To invite a group, you must be a member of it. Members get direct access
|
||||
to the shared group. This includes members who inherited group membership from a parent group.
|
||||
to the shared group. This includes members who inherited group membership from a parent group.
|
||||
|
||||
To share a given group, for example, `Frontend` with another group, for example,
|
||||
`Engineering`:
|
||||
|
|
|
@ -144,7 +144,7 @@ The **Lead Time for Changes** metrics display below the **Filter results** text
|
|||
To view deployment metrics, you must have a
|
||||
[production environment configured](../../../ci/environments/index.md#deployment-tier-of-environments).
|
||||
|
||||
Value stream analytics shows the following deployment metrics for your group:
|
||||
Value stream analytics shows the following deployment metrics for your group:
|
||||
|
||||
- Deploys: The number of successful deployments in the date range.
|
||||
- Deployment Frequency: The average number of successful deployments per day in the date range.
|
||||
|
@ -179,7 +179,7 @@ In GitLab 13.8 and earlier, metrics are calculated based on when the deployment
|
|||
> - Filter by stop date toggle [added](https://gitlab.com/gitlab-org/gitlab/-/issues/352428) in GitLab 14.9
|
||||
> - Data refresh badge [added](https://gitlab.com/gitlab-org/gitlab/-/issues/341739) in GitLab 14.9
|
||||
|
||||
Plans for value stream analytics to filter items by stop event instead of start event are tracked in this [epic](https://gitlab.com/groups/gitlab-org/-/epics/6046). With the completion of this work, value stream analytics will only display items with a stop event in the date range.
|
||||
Plans for value stream analytics to filter items by stop event instead of start event are tracked in this [epic](https://gitlab.com/groups/gitlab-org/-/epics/6046). With the completion of this work, value stream analytics will only display items with a stop event in the date range.
|
||||
|
||||
To preview this functionality, you can use the **Filter by stop date** toggle to enable or disable this filter until the [default filtering mode is introduced](../../../update/deprecations.md#value-stream-analytics-filtering-calculation-change) and the toggle is removed.
|
||||
|
||||
|
|
|
@ -4,10 +4,15 @@ group: Configure
|
|||
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
|
||||
---
|
||||
|
||||
# Clusters health **(FREE)**
|
||||
# Clusters health (DEPRECATED) **(FREE)**
|
||||
|
||||
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/4701) in GitLab 10.6.
|
||||
> - [Moved](https://gitlab.com/gitlab-org/gitlab/-/issues/208224) from GitLab Ultimate to GitLab Free in 13.2.
|
||||
> - [Deprecated](https://gitlab.com/groups/gitlab-org/configure/-/epics/8) in GitLab 14.5.
|
||||
|
||||
WARNING:
|
||||
This feature was [deprecated](https://gitlab.com/groups/gitlab-org/configure/-/epics/8) in GitLab 14.5. However, you can **still use** Prometheus
|
||||
for Kubernetes clusters connected to GitLab by [enabling Prometheus manually](../../../project/integrations/prometheus.md#manual-configuration-of-prometheus).
|
||||
|
||||
When [the Prometheus cluster integration is enabled](../../../clusters/integrations.md#prometheus-cluster-integration), GitLab monitors the cluster's health. At the top of the cluster settings page, CPU and Memory utilization is displayed, along with the total amount available. Keeping an eye on cluster resources can be important, if the cluster runs out of memory pods may be shutdown or fail to start.
|
||||
|
||||
|
|
|
@ -91,8 +91,8 @@ Introduced in GitLab 13.6, the themes [Solarized](https://gitlab.com/gitlab-org/
|
|||
|
||||
## Diff colors
|
||||
|
||||
A diff compares the old/removed content with the new/added content (e.g. when
|
||||
[reviewing a merge request](../project/merge_requests/reviews/index.md#review-a-merge-request) or in a
|
||||
A diff compares the old/removed content with the new/added content (e.g. when
|
||||
[reviewing a merge request](../project/merge_requests/reviews/index.md#review-a-merge-request) or in a
|
||||
[Markdown inline diff](../markdown.md#inline-diff)).
|
||||
Typically, the colors red and green are used for removed and added lines in diffs.
|
||||
The exact colors depend on the selected [syntax highlighting theme](#syntax-highlighting-theme).
|
||||
|
|
|
@ -29,7 +29,7 @@ module API
|
|||
mutually_exclusive :maintainer_note, :maintainer_note
|
||||
mutually_exclusive :active, :paused
|
||||
end
|
||||
post '/', feature_category: :runner do
|
||||
post '/', urgency: :low, feature_category: :runner do
|
||||
attributes = attributes_for_keys(%i[description maintainer_note maintenance_note active paused locked run_untagged tag_list access_level maximum_timeout])
|
||||
.merge(get_runner_details_from_request)
|
||||
|
||||
|
@ -54,7 +54,7 @@ module API
|
|||
params do
|
||||
requires :token, type: String, desc: %q(Runner's authentication token)
|
||||
end
|
||||
delete '/', feature_category: :runner do
|
||||
delete '/', urgency: :low, feature_category: :runner do
|
||||
authenticate_runner!
|
||||
|
||||
destroy_conditionally!(current_runner) { ::Ci::Runners::UnregisterRunnerService.new(current_runner, params[:token]).execute }
|
||||
|
@ -66,7 +66,7 @@ module API
|
|||
params do
|
||||
requires :token, type: String, desc: %q(Runner's authentication token)
|
||||
end
|
||||
post '/verify', feature_category: :runner do
|
||||
post '/verify', urgency: :low, feature_category: :runner do
|
||||
authenticate_runner!
|
||||
status 200
|
||||
body "200"
|
||||
|
@ -78,7 +78,7 @@ module API
|
|||
params do
|
||||
requires :token, type: String, desc: 'The current authentication token of the runner'
|
||||
end
|
||||
post '/reset_authentication_token', feature_category: :runner do
|
||||
post '/reset_authentication_token', urgency: :low, feature_category: :runner do
|
||||
authenticate_runner!
|
||||
|
||||
current_runner.reset_token!
|
||||
|
|
|
@ -8,6 +8,7 @@ module API
|
|||
before { authenticate! }
|
||||
|
||||
feature_category :runner
|
||||
urgency :low
|
||||
|
||||
resource :runners do
|
||||
desc 'Get runners available for user' do
|
||||
|
|
|
@ -37,10 +37,6 @@ module Gitlab
|
|||
class V1_0 < ActiveRecord::Migration[6.1] # rubocop:disable Naming/ClassAndModuleCamelCase
|
||||
include LockRetriesConcern
|
||||
include Gitlab::Database::MigrationHelpers::V2
|
||||
end
|
||||
|
||||
class V2_0 < V1_0 # rubocop:disable Naming/ClassAndModuleCamelCase
|
||||
include Gitlab::Database::MigrationHelpers::RestrictGitlabSchema
|
||||
|
||||
# When running migrations, the `db:migrate` switches connection of
|
||||
# ActiveRecord::Base depending where the migration runs.
|
||||
|
@ -50,6 +46,10 @@ module Gitlab
|
|||
end
|
||||
end
|
||||
|
||||
class V2_0 < V1_0 # rubocop:disable Naming/ClassAndModuleCamelCase
|
||||
include Gitlab::Database::MigrationHelpers::RestrictGitlabSchema
|
||||
end
|
||||
|
||||
def self.[](version)
|
||||
version = version.to_s
|
||||
name = "V#{version.tr('.', '_')}"
|
||||
|
|
|
@ -22,6 +22,7 @@ module Gitlab
|
|||
observation.query_statistics = connection.execute(<<~SQL)
|
||||
SELECT query, calls, total_time, max_time, mean_time, rows
|
||||
FROM pg_stat_statements
|
||||
WHERE pg_get_userbyid(userid) = current_user
|
||||
ORDER BY total_time DESC
|
||||
SQL
|
||||
end
|
||||
|
|
|
@ -18,6 +18,10 @@ module Gitlab
|
|||
"#{self.subscriptions_url}/payment_forms/cc_validation"
|
||||
end
|
||||
|
||||
def self.payment_validation_form_id
|
||||
"payment_method_validation"
|
||||
end
|
||||
|
||||
def self.registration_validation_form_url
|
||||
"#{self.subscriptions_url}/payment_forms/cc_registration_validation"
|
||||
end
|
||||
|
@ -83,5 +87,6 @@ end
|
|||
Gitlab::SubscriptionPortal.prepend_mod
|
||||
Gitlab::SubscriptionPortal::SUBSCRIPTIONS_URL = Gitlab::SubscriptionPortal.subscriptions_url.freeze
|
||||
Gitlab::SubscriptionPortal::PAYMENT_FORM_URL = Gitlab::SubscriptionPortal.payment_form_url.freeze
|
||||
Gitlab::SubscriptionPortal::PAYMENT_VALIDATION_FORM_ID = Gitlab::SubscriptionPortal.payment_validation_form_id.freeze
|
||||
Gitlab::SubscriptionPortal::RENEWAL_SERVICE_EMAIL = Gitlab::SubscriptionPortal.renewal_service_email.freeze
|
||||
Gitlab::SubscriptionPortal::REGISTRATION_VALIDATION_FORM_URL = Gitlab::SubscriptionPortal.registration_validation_form_url.freeze
|
||||
|
|
|
@ -5781,6 +5781,9 @@ msgstr ""
|
|||
msgid "Billings|By reactivating your trial, you will receive an additional 30 days of %{planName}. Your trial can be only reactivated once."
|
||||
msgstr ""
|
||||
|
||||
msgid "Billings|Error validating card details"
|
||||
msgstr ""
|
||||
|
||||
msgid "Billings|Extend trial"
|
||||
msgstr ""
|
||||
|
||||
|
|
|
@ -0,0 +1,37 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require_relative '../../migration_helpers'
|
||||
|
||||
module RuboCop
|
||||
module Cop
|
||||
module Migration
|
||||
class MigrationRecord < RuboCop::Cop::Cop
|
||||
include MigrationHelpers
|
||||
|
||||
ENFORCED_SINCE = 2022_04_26_00_00_00
|
||||
|
||||
MSG = <<~MSG
|
||||
Don't inherit from ActiveRecord::Base but use MigrationRecord instead.
|
||||
See https://docs.gitlab.com/ee/development/database/migrations_for_multiple_databases.html#example-usage-of-activerecord-classes.
|
||||
MSG
|
||||
|
||||
def_node_search :inherits_from_active_record_base?, <<~PATTERN
|
||||
(class _ (const (const _ :ActiveRecord) :Base) _)
|
||||
PATTERN
|
||||
|
||||
def on_class(node)
|
||||
return unless relevant_migration?(node)
|
||||
return unless inherits_from_active_record_base?(node)
|
||||
|
||||
add_offense(node, location: :expression)
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def relevant_migration?(node)
|
||||
in_migration?(node) && version(node) >= ENFORCED_SINCE
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
|
@ -1,49 +0,0 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require 'spec_helper'
|
||||
|
||||
RSpec.describe IssuesFinder::Params do
|
||||
describe '#include_hidden' do
|
||||
subject { described_class.new(params, user, IssuesFinder) }
|
||||
|
||||
context 'when param is not set' do
|
||||
let(:params) { {} }
|
||||
|
||||
context 'with an admin', :enable_admin_mode do
|
||||
let(:user) { create(:user, :admin) }
|
||||
|
||||
it 'returns true' do
|
||||
expect(subject.include_hidden?).to be_truthy
|
||||
end
|
||||
end
|
||||
|
||||
context 'with a regular user' do
|
||||
let(:user) { create(:user) }
|
||||
|
||||
it 'returns false' do
|
||||
expect(subject.include_hidden?).to be_falsey
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
context 'when param is set' do
|
||||
let(:params) { { include_hidden: true } }
|
||||
|
||||
context 'with an admin', :enable_admin_mode do
|
||||
let(:user) { create(:user, :admin) }
|
||||
|
||||
it 'returns true' do
|
||||
expect(subject.include_hidden?).to be_truthy
|
||||
end
|
||||
end
|
||||
|
||||
context 'with a regular user' do
|
||||
let(:user) { create(:user) }
|
||||
|
||||
it 'returns false' do
|
||||
expect(subject.include_hidden?).to be_falsey
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
|
@ -12,52 +12,8 @@ RSpec.describe IssuesFinder do
|
|||
context 'scope: all' do
|
||||
let(:scope) { 'all' }
|
||||
|
||||
context 'include_hidden and public_only params' do
|
||||
let_it_be(:banned_user) { create(:user, :banned) }
|
||||
let_it_be(:hidden_issue) { create(:issue, project: project1, author: banned_user) }
|
||||
let_it_be(:confidential_issue) { create(:issue, project: project1, confidential: true) }
|
||||
|
||||
context 'when user is an admin', :enable_admin_mode do
|
||||
let(:user) { create(:user, :admin) }
|
||||
|
||||
it 'returns all issues' do
|
||||
expect(issues).to contain_exactly(issue1, issue2, issue3, issue4, issue5, hidden_issue, confidential_issue)
|
||||
end
|
||||
end
|
||||
|
||||
context 'when user is not an admin' do
|
||||
context 'when public_only is true' do
|
||||
let(:params) { { public_only: true } }
|
||||
|
||||
it 'returns public issues' do
|
||||
expect(issues).to contain_exactly(issue1, issue2, issue3, issue4, issue5)
|
||||
end
|
||||
end
|
||||
|
||||
context 'when public_only is false' do
|
||||
let(:params) { { public_only: false } }
|
||||
|
||||
it 'returns public and confidential issues' do
|
||||
expect(issues).to contain_exactly(issue1, issue2, issue3, issue4, issue5, confidential_issue)
|
||||
end
|
||||
end
|
||||
|
||||
context 'when public_only is not set' do
|
||||
it 'returns public and confidential issue' do
|
||||
expect(issues).to contain_exactly(issue1, issue2, issue3, issue4, issue5, confidential_issue)
|
||||
end
|
||||
end
|
||||
|
||||
context 'when ban_user_feature_flag is false' do
|
||||
before do
|
||||
stub_feature_flags(ban_user_feature_flag: false)
|
||||
end
|
||||
|
||||
it 'returns all issues' do
|
||||
expect(issues).to contain_exactly(issue1, issue2, issue3, issue4, issue5, hidden_issue, confidential_issue)
|
||||
end
|
||||
end
|
||||
end
|
||||
it 'returns all issues' do
|
||||
expect(issues).to contain_exactly(issue1, issue2, issue3, issue4, issue5)
|
||||
end
|
||||
|
||||
context 'user does not have read permissions' do
|
||||
|
@ -1148,64 +1104,132 @@ RSpec.describe IssuesFinder do
|
|||
end
|
||||
|
||||
describe '#with_confidentiality_access_check' do
|
||||
let(:user) { create(:user) }
|
||||
let(:guest) { create(:user) }
|
||||
|
||||
let_it_be(:authorized_user) { create(:user) }
|
||||
let_it_be(:banned_user) { create(:user, :banned) }
|
||||
let_it_be(:project) { create(:project, namespace: authorized_user.namespace) }
|
||||
let_it_be(:public_issue) { create(:issue, project: project) }
|
||||
let_it_be(:confidential_issue) { create(:issue, project: project, confidential: true) }
|
||||
let_it_be(:hidden_issue) { create(:issue, project: project, author: banned_user) }
|
||||
|
||||
shared_examples 'returns public, does not return confidential' do
|
||||
shared_examples 'returns public, does not return hidden or confidential' do
|
||||
it 'returns only public issues' do
|
||||
expect(subject).to include(public_issue)
|
||||
expect(subject).not_to include(confidential_issue, hidden_issue)
|
||||
end
|
||||
end
|
||||
|
||||
shared_examples 'returns public and confidential, does not return hidden' do
|
||||
it 'returns only public and confidential issues' do
|
||||
expect(subject).to include(public_issue, confidential_issue)
|
||||
expect(subject).not_to include(hidden_issue)
|
||||
end
|
||||
end
|
||||
|
||||
shared_examples 'returns public and hidden, does not return confidential' do
|
||||
it 'returns only public and hidden issues' do
|
||||
expect(subject).to include(public_issue, hidden_issue)
|
||||
expect(subject).not_to include(confidential_issue)
|
||||
end
|
||||
end
|
||||
|
||||
shared_examples 'returns public and confidential' do
|
||||
it 'returns public and confidential issues' do
|
||||
expect(subject).to include(public_issue, confidential_issue)
|
||||
shared_examples 'returns public, confidential, and hidden' do
|
||||
it 'returns all issues' do
|
||||
expect(subject).to include(public_issue, confidential_issue, hidden_issue)
|
||||
end
|
||||
end
|
||||
|
||||
subject { described_class.new(user, params).with_confidentiality_access_check }
|
||||
|
||||
context 'when no project filter is given' do
|
||||
let(:params) { {} }
|
||||
|
||||
context 'for an anonymous user' do
|
||||
it_behaves_like 'returns public, does not return confidential'
|
||||
subject { described_class.new(nil, params).with_confidentiality_access_check }
|
||||
|
||||
it_behaves_like 'returns public, does not return hidden or confidential'
|
||||
|
||||
context 'when feature flag is disabled' do
|
||||
before do
|
||||
stub_feature_flags(ban_user_feature_flag: false)
|
||||
end
|
||||
|
||||
it_behaves_like 'returns public and hidden, does not return confidential'
|
||||
end
|
||||
end
|
||||
|
||||
context 'for a user without project membership' do
|
||||
it_behaves_like 'returns public, does not return confidential'
|
||||
subject { described_class.new(user, params).with_confidentiality_access_check }
|
||||
|
||||
it_behaves_like 'returns public, does not return hidden or confidential'
|
||||
|
||||
context 'when feature flag is disabled' do
|
||||
before do
|
||||
stub_feature_flags(ban_user_feature_flag: false)
|
||||
end
|
||||
|
||||
it_behaves_like 'returns public and hidden, does not return confidential'
|
||||
end
|
||||
end
|
||||
|
||||
context 'for a guest user' do
|
||||
subject { described_class.new(guest, params).with_confidentiality_access_check }
|
||||
|
||||
before do
|
||||
project.add_guest(user)
|
||||
project.add_guest(guest)
|
||||
end
|
||||
|
||||
it_behaves_like 'returns public, does not return confidential'
|
||||
it_behaves_like 'returns public, does not return hidden or confidential'
|
||||
|
||||
context 'when feature flag is disabled' do
|
||||
before do
|
||||
stub_feature_flags(ban_user_feature_flag: false)
|
||||
end
|
||||
|
||||
it_behaves_like 'returns public and hidden, does not return confidential'
|
||||
end
|
||||
end
|
||||
|
||||
context 'for a project member with access to view confidential issues' do
|
||||
before do
|
||||
project.add_reporter(user)
|
||||
end
|
||||
subject { described_class.new(authorized_user, params).with_confidentiality_access_check }
|
||||
|
||||
it_behaves_like 'returns public and confidential'
|
||||
it_behaves_like 'returns public and confidential, does not return hidden'
|
||||
|
||||
context 'when feature flag is disabled' do
|
||||
before do
|
||||
stub_feature_flags(ban_user_feature_flag: false)
|
||||
end
|
||||
|
||||
it_behaves_like 'returns public, confidential, and hidden'
|
||||
end
|
||||
end
|
||||
|
||||
context 'for an admin' do
|
||||
let(:user) { create(:user, :admin) }
|
||||
let(:admin_user) { create(:user, :admin) }
|
||||
|
||||
subject { described_class.new(admin_user, params).with_confidentiality_access_check }
|
||||
|
||||
context 'when admin mode is enabled', :enable_admin_mode do
|
||||
it_behaves_like 'returns public and confidential'
|
||||
it_behaves_like 'returns public, confidential, and hidden'
|
||||
|
||||
context 'when feature flag is disabled' do
|
||||
before do
|
||||
stub_feature_flags(ban_user_feature_flag: false)
|
||||
end
|
||||
|
||||
it_behaves_like 'returns public, confidential, and hidden'
|
||||
end
|
||||
end
|
||||
|
||||
context 'when admin mode is disabled' do
|
||||
it_behaves_like 'returns public, does not return confidential'
|
||||
it_behaves_like 'returns public, does not return hidden or confidential'
|
||||
|
||||
context 'when feature flag is disabled' do
|
||||
before do
|
||||
stub_feature_flags(ban_user_feature_flag: false)
|
||||
end
|
||||
|
||||
it_behaves_like 'returns public and hidden, does not return confidential'
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
@ -1214,9 +1238,17 @@ RSpec.describe IssuesFinder do
|
|||
let(:params) { { project_id: project.id } }
|
||||
|
||||
context 'for an anonymous user' do
|
||||
let(:user) { nil }
|
||||
subject { described_class.new(nil, params).with_confidentiality_access_check }
|
||||
|
||||
it_behaves_like 'returns public, does not return confidential'
|
||||
it_behaves_like 'returns public, does not return hidden or confidential'
|
||||
|
||||
context 'when feature flag is disabled' do
|
||||
before do
|
||||
stub_feature_flags(ban_user_feature_flag: false)
|
||||
end
|
||||
|
||||
it_behaves_like 'returns public and hidden, does not return confidential'
|
||||
end
|
||||
|
||||
it 'does not filter by confidentiality' do
|
||||
expect(Issue).not_to receive(:where).with(a_string_matching('confidential'), anything)
|
||||
|
@ -1225,7 +1257,17 @@ RSpec.describe IssuesFinder do
|
|||
end
|
||||
|
||||
context 'for a user without project membership' do
|
||||
it_behaves_like 'returns public, does not return confidential'
|
||||
subject { described_class.new(user, params).with_confidentiality_access_check }
|
||||
|
||||
it_behaves_like 'returns public, does not return hidden or confidential'
|
||||
|
||||
context 'when feature flag is disabled' do
|
||||
before do
|
||||
stub_feature_flags(ban_user_feature_flag: false)
|
||||
end
|
||||
|
||||
it_behaves_like 'returns public and hidden, does not return confidential'
|
||||
end
|
||||
|
||||
it 'filters by confidentiality' do
|
||||
expect(subject.to_sql).to match("issues.confidential")
|
||||
|
@ -1233,11 +1275,21 @@ RSpec.describe IssuesFinder do
|
|||
end
|
||||
|
||||
context 'for a guest user' do
|
||||
subject { described_class.new(guest, params).with_confidentiality_access_check }
|
||||
|
||||
before do
|
||||
project.add_guest(user)
|
||||
project.add_guest(guest)
|
||||
end
|
||||
|
||||
it_behaves_like 'returns public, does not return confidential'
|
||||
it_behaves_like 'returns public, does not return hidden or confidential'
|
||||
|
||||
context 'when feature flag is disabled' do
|
||||
before do
|
||||
stub_feature_flags(ban_user_feature_flag: false)
|
||||
end
|
||||
|
||||
it_behaves_like 'returns public and hidden, does not return confidential'
|
||||
end
|
||||
|
||||
it 'filters by confidentiality' do
|
||||
expect(subject.to_sql).to match("issues.confidential")
|
||||
|
@ -1245,18 +1297,40 @@ RSpec.describe IssuesFinder do
|
|||
end
|
||||
|
||||
context 'for a project member with access to view confidential issues' do
|
||||
before do
|
||||
project.add_reporter(user)
|
||||
subject { described_class.new(authorized_user, params).with_confidentiality_access_check }
|
||||
|
||||
it_behaves_like 'returns public and confidential, does not return hidden'
|
||||
|
||||
context 'when feature flag is disabled' do
|
||||
before do
|
||||
stub_feature_flags(ban_user_feature_flag: false)
|
||||
end
|
||||
|
||||
it_behaves_like 'returns public, confidential, and hidden'
|
||||
end
|
||||
|
||||
it_behaves_like 'returns public and confidential'
|
||||
it 'does not filter by confidentiality' do
|
||||
expect(Issue).not_to receive(:where).with(a_string_matching('confidential'), anything)
|
||||
|
||||
subject
|
||||
end
|
||||
end
|
||||
|
||||
context 'for an admin' do
|
||||
let(:user) { create(:user, :admin) }
|
||||
let(:admin_user) { create(:user, :admin) }
|
||||
|
||||
subject { described_class.new(admin_user, params).with_confidentiality_access_check }
|
||||
|
||||
context 'when admin mode is enabled', :enable_admin_mode do
|
||||
it_behaves_like 'returns public and confidential'
|
||||
it_behaves_like 'returns public, confidential, and hidden'
|
||||
|
||||
context 'when feature flag is disabled' do
|
||||
before do
|
||||
stub_feature_flags(ban_user_feature_flag: false)
|
||||
end
|
||||
|
||||
it_behaves_like 'returns public, confidential, and hidden'
|
||||
end
|
||||
|
||||
it 'does not filter by confidentiality' do
|
||||
expect(Issue).not_to receive(:where).with(a_string_matching('confidential'), anything)
|
||||
|
@ -1266,7 +1340,19 @@ RSpec.describe IssuesFinder do
|
|||
end
|
||||
|
||||
context 'when admin mode is disabled' do
|
||||
it_behaves_like 'returns public, does not return confidential'
|
||||
it_behaves_like 'returns public, does not return hidden or confidential'
|
||||
|
||||
context 'when feature flag is disabled' do
|
||||
before do
|
||||
stub_feature_flags(ban_user_feature_flag: false)
|
||||
end
|
||||
|
||||
it_behaves_like 'returns public and hidden, does not return confidential'
|
||||
end
|
||||
|
||||
it 'filters by confidentiality' do
|
||||
expect(subject.to_sql).to match("issues.confidential")
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -279,7 +279,7 @@ RSpec.describe ProjectsHelper do
|
|||
it 'returns message prompting user to set password or set up a PAT' do
|
||||
stub_application_setting(password_authentication_enabled_for_git?: true)
|
||||
|
||||
expect(helper.no_password_message).to eq('Your account is authenticated with SSO or SAML. To <a href="/help/gitlab-basics/start-using-git#pull-and-push" target="_blank" rel="noopener noreferrer">push and pull</a> over HTTP with Git using this account, you must <a href="/-/profile/password/edit">set a password</a> or <a href="/-/profile/personal_access_tokens">set up a Personal Access Token</a> to use instead of a password. For more information, see <a href="/help/gitlab-basics/start-using-git#clone-with-https" target="_blank" rel="noopener noreferrer">Clone with HTTPS</a>.')
|
||||
expect(helper.no_password_message).to eq('Your account is authenticated with SSO or SAML. To <a href="/help/topics/git/terminology#pull-and-push" target="_blank" rel="noopener noreferrer">push and pull</a> over HTTP with Git using this account, you must <a href="/-/profile/password/edit">set a password</a> or <a href="/-/profile/personal_access_tokens">set up a Personal Access Token</a> to use instead of a password. For more information, see <a href="/help/gitlab-basics/start-using-git#clone-with-https" target="_blank" rel="noopener noreferrer">Clone with HTTPS</a>.')
|
||||
end
|
||||
end
|
||||
|
||||
|
@ -287,7 +287,7 @@ RSpec.describe ProjectsHelper do
|
|||
it 'returns message prompting user to set up a PAT' do
|
||||
stub_application_setting(password_authentication_enabled_for_git?: false)
|
||||
|
||||
expect(helper.no_password_message).to eq('Your account is authenticated with SSO or SAML. To <a href="/help/gitlab-basics/start-using-git#pull-and-push" target="_blank" rel="noopener noreferrer">push and pull</a> over HTTP with Git using this account, you must <a href="/-/profile/personal_access_tokens">set up a Personal Access Token</a> to use instead of a password. For more information, see <a href="/help/gitlab-basics/start-using-git#clone-with-https" target="_blank" rel="noopener noreferrer">Clone with HTTPS</a>.')
|
||||
expect(helper.no_password_message).to eq('Your account is authenticated with SSO or SAML. To <a href="/help/topics/git/terminology#pull-and-push" target="_blank" rel="noopener noreferrer">push and pull</a> over HTTP with Git using this account, you must <a href="/-/profile/personal_access_tokens">set up a Personal Access Token</a> to use instead of a password. For more information, see <a href="/help/gitlab-basics/start-using-git#clone-with-https" target="_blank" rel="noopener noreferrer">Clone with HTTPS</a>.')
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -43,6 +43,7 @@ RSpec.describe Gitlab::Database::Migrations::Observers::QueryStatistics do
|
|||
<<~SQL
|
||||
SELECT query, calls, total_time, max_time, mean_time, rows
|
||||
FROM pg_stat_statements
|
||||
WHERE pg_get_userbyid(userid) = current_user
|
||||
ORDER BY total_time DESC
|
||||
SQL
|
||||
end
|
||||
|
|
|
@ -56,6 +56,7 @@ RSpec.describe ::Gitlab::SubscriptionPortal do
|
|||
where(:method_name, :result) do
|
||||
:default_subscriptions_url | 'https://customers.staging.gitlab.com'
|
||||
:payment_form_url | 'https://customers.staging.gitlab.com/payment_forms/cc_validation'
|
||||
:payment_validation_form_id | 'payment_method_validation'
|
||||
:registration_validation_form_url | 'https://customers.staging.gitlab.com/payment_forms/cc_registration_validation'
|
||||
:subscriptions_graphql_url | 'https://customers.staging.gitlab.com/graphql'
|
||||
:subscriptions_more_minutes_url | 'https://customers.staging.gitlab.com/buy_pipeline_minutes'
|
||||
|
|
|
@ -1241,24 +1241,12 @@ RSpec.describe Issue do
|
|||
end
|
||||
|
||||
describe '.public_only' do
|
||||
let_it_be(:banned_user) { create(:user, :banned) }
|
||||
let_it_be(:public_issue) { create(:issue, project: reusable_project) }
|
||||
let_it_be(:confidential_issue) { create(:issue, project: reusable_project, confidential: true) }
|
||||
let_it_be(:hidden_issue) { create(:issue, project: reusable_project, author: banned_user) }
|
||||
|
||||
it 'only returns public issues' do
|
||||
public_issue = create(:issue, project: reusable_project)
|
||||
create(:issue, project: reusable_project, confidential: true)
|
||||
|
||||
expect(described_class.public_only).to eq([public_issue])
|
||||
end
|
||||
|
||||
context 'when feature flag is disabled' do
|
||||
before do
|
||||
stub_feature_flags(ban_user_feature_flag: false)
|
||||
end
|
||||
|
||||
it 'returns public and hidden issues' do
|
||||
expect(described_class.public_only).to contain_exactly(public_issue, hidden_issue)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
describe '.confidential_only' do
|
||||
|
|
|
@ -121,7 +121,7 @@ RSpec.describe Clusters::ClusterPresenter do
|
|||
it do
|
||||
is_expected.to include('clusters-path': clusterable_presenter.index_path,
|
||||
'dashboard-endpoint': clusterable_presenter.metrics_dashboard_path(cluster),
|
||||
'documentation-path': help_page_path('user/project/clusters/index', anchor: 'monitoring-your-kubernetes-cluster'),
|
||||
'documentation-path': help_page_path('user/infrastructure/clusters/manage/clusters_health'),
|
||||
'add-dashboard-documentation-path': help_page_path('operations/metrics/dashboards/index.md', anchor: 'add-a-new-dashboard-to-your-project'),
|
||||
'empty-getting-started-svg-path': match_asset_path('/assets/illustrations/monitoring/getting_started.svg'),
|
||||
'empty-loading-svg-path': match_asset_path('/assets/illustrations/monitoring/loading.svg'),
|
||||
|
|
|
@ -0,0 +1,80 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require 'fast_spec_helper'
|
||||
require_relative '../../../../rubocop/cop/migration/migration_record'
|
||||
|
||||
RSpec.describe RuboCop::Cop::Migration::MigrationRecord do
|
||||
subject(:cop) { described_class.new }
|
||||
|
||||
let(:migration) do
|
||||
<<~SOURCE
|
||||
class MyMigration < Gitlab::Database::Migration[2.0]
|
||||
class Project < ActiveRecord::Base
|
||||
end
|
||||
|
||||
def change
|
||||
end
|
||||
end
|
||||
SOURCE
|
||||
end
|
||||
|
||||
shared_examples 'a disabled cop' do
|
||||
it 'does not register any offenses' do
|
||||
expect_no_offenses(migration)
|
||||
end
|
||||
end
|
||||
|
||||
context 'outside of a migration' do
|
||||
it_behaves_like 'a disabled cop'
|
||||
end
|
||||
|
||||
context 'in migration' do
|
||||
before do
|
||||
allow(cop).to receive(:in_migration?).and_return(true)
|
||||
end
|
||||
|
||||
context 'in an old migration' do
|
||||
before do
|
||||
allow(cop).to receive(:version).and_return(described_class::ENFORCED_SINCE - 5)
|
||||
end
|
||||
|
||||
it_behaves_like 'a disabled cop'
|
||||
end
|
||||
|
||||
context 'that is recent' do
|
||||
before do
|
||||
allow(cop).to receive(:version).and_return(described_class::ENFORCED_SINCE)
|
||||
end
|
||||
|
||||
it 'adds an offense if inheriting from ActiveRecord::Base' do
|
||||
expect_offense(<<~RUBY)
|
||||
class Project < ActiveRecord::Base
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Don't inherit from ActiveRecord::Base but use MigrationRecord instead.[...]
|
||||
end
|
||||
RUBY
|
||||
end
|
||||
|
||||
context 'when migration inhertis from ::ActiveRecord::Base' do
|
||||
let(:migration) do
|
||||
<<~SOURCE
|
||||
class MyMigration < Gitlab::Database::Migration[2.0]
|
||||
class Project < ::ActiveRecord::Base
|
||||
end
|
||||
|
||||
def change
|
||||
end
|
||||
end
|
||||
SOURCE
|
||||
end
|
||||
|
||||
it 'adds an offense' do
|
||||
expect_offense(<<~RUBY)
|
||||
class Project < ::ActiveRecord::Base
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Don't inherit from ActiveRecord::Base but use MigrationRecord instead.[...]
|
||||
end
|
||||
RUBY
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
|
@ -3,18 +3,12 @@
|
|||
require 'spec_helper'
|
||||
|
||||
RSpec.describe Groups::OpenIssuesCountService, :use_clean_rails_memory_store_caching do
|
||||
let_it_be(:group) { create(:group, :public) }
|
||||
let_it_be(:group) { create(:group, :public)}
|
||||
let_it_be(:project) { create(:project, :public, namespace: group) }
|
||||
let_it_be(:admin) { create(:user, :admin) }
|
||||
let_it_be(:user) { create(:user) }
|
||||
let_it_be(:banned_user) { create(:user, :banned) }
|
||||
|
||||
before do
|
||||
create(:issue, :opened, project: project)
|
||||
create(:issue, :opened, confidential: true, project: project)
|
||||
create(:issue, :opened, author: banned_user, project: project)
|
||||
create(:issue, :closed, project: project)
|
||||
end
|
||||
let_it_be(:issue) { create(:issue, :opened, project: project) }
|
||||
let_it_be(:confidential) { create(:issue, :opened, confidential: true, project: project) }
|
||||
let_it_be(:closed) { create(:issue, :closed, project: project) }
|
||||
|
||||
subject { described_class.new(group, user) }
|
||||
|
||||
|
@ -26,27 +20,17 @@ RSpec.describe Groups::OpenIssuesCountService, :use_clean_rails_memory_store_cac
|
|||
it 'uses the IssuesFinder to scope issues' do
|
||||
expect(IssuesFinder)
|
||||
.to receive(:new)
|
||||
.with(user, group_id: group.id, state: 'opened', non_archived: true, include_subgroups: true, public_only: true, include_hidden: false)
|
||||
.with(user, group_id: group.id, state: 'opened', non_archived: true, include_subgroups: true, public_only: true)
|
||||
|
||||
subject.count
|
||||
end
|
||||
end
|
||||
|
||||
describe '#count' do
|
||||
shared_examples 'counts public issues, does not count hidden or confidential' do
|
||||
it 'counts only public issues' do
|
||||
expect(subject.count).to eq(1)
|
||||
end
|
||||
|
||||
it 'uses PUBLIC_COUNT_WITHOUT_HIDDEN_KEY cache key' do
|
||||
expect(subject.cache_key).to include('group_open_public_issues_without_hidden_count')
|
||||
end
|
||||
end
|
||||
|
||||
context 'when user is nil' do
|
||||
let(:user) { nil }
|
||||
|
||||
it_behaves_like 'counts public issues, does not count hidden or confidential'
|
||||
it 'does not include confidential issues in the issue count' do
|
||||
expect(described_class.new(group).count).to eq(1)
|
||||
end
|
||||
end
|
||||
|
||||
context 'when user is provided' do
|
||||
|
@ -55,13 +39,9 @@ RSpec.describe Groups::OpenIssuesCountService, :use_clean_rails_memory_store_cac
|
|||
group.add_reporter(user)
|
||||
end
|
||||
|
||||
it 'includes confidential issues and does not include hidden issues in count' do
|
||||
it 'returns the right count with confidential issues' do
|
||||
expect(subject.count).to eq(2)
|
||||
end
|
||||
|
||||
it 'uses TOTAL_COUNT_WITHOUT_HIDDEN_KEY cache key' do
|
||||
expect(subject.cache_key).to include('group_open_issues_without_hidden_count')
|
||||
end
|
||||
end
|
||||
|
||||
context 'when user cannot read confidential issues' do
|
||||
|
@ -69,24 +49,8 @@ RSpec.describe Groups::OpenIssuesCountService, :use_clean_rails_memory_store_cac
|
|||
group.add_guest(user)
|
||||
end
|
||||
|
||||
it_behaves_like 'counts public issues, does not count hidden or confidential'
|
||||
end
|
||||
|
||||
context 'when user is an admin' do
|
||||
let(:user) { admin }
|
||||
|
||||
context 'when admin mode is enabled', :enable_admin_mode do
|
||||
it 'includes confidential and hidden issues in count' do
|
||||
expect(subject.count).to eq(3)
|
||||
end
|
||||
|
||||
it 'uses TOTAL_COUNT_KEY cache key' do
|
||||
expect(subject.cache_key).to include('group_open_issues_including_hidden_count')
|
||||
end
|
||||
end
|
||||
|
||||
context 'when admin mode is disabled' do
|
||||
it_behaves_like 'counts public issues, does not count hidden or confidential'
|
||||
it 'does not include confidential issues' do
|
||||
expect(subject.count).to eq(1)
|
||||
end
|
||||
end
|
||||
|
||||
|
@ -97,13 +61,11 @@ RSpec.describe Groups::OpenIssuesCountService, :use_clean_rails_memory_store_cac
|
|||
describe '#clear_all_cache_keys' do
|
||||
it 'calls `Rails.cache.delete` with the correct keys' do
|
||||
expect(Rails.cache).to receive(:delete)
|
||||
.with(['groups', 'open_issues_count_service', 1, group.id, described_class::PUBLIC_COUNT_WITHOUT_HIDDEN_KEY])
|
||||
.with(['groups', 'open_issues_count_service', 1, group.id, described_class::PUBLIC_COUNT_KEY])
|
||||
expect(Rails.cache).to receive(:delete)
|
||||
.with(['groups', 'open_issues_count_service', 1, group.id, described_class::TOTAL_COUNT_KEY])
|
||||
expect(Rails.cache).to receive(:delete)
|
||||
.with(['groups', 'open_issues_count_service', 1, group.id, described_class::TOTAL_COUNT_WITHOUT_HIDDEN_KEY])
|
||||
|
||||
described_class.new(group).clear_all_cache_keys
|
||||
subject.clear_all_cache_keys
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -279,7 +279,7 @@ RSpec.describe Issues::CloseService do
|
|||
|
||||
it 'verifies the number of queries' do
|
||||
recorded = ActiveRecord::QueryRecorder.new { close_issue }
|
||||
expected_queries = 32
|
||||
expected_queries = 30
|
||||
|
||||
expect(recorded.count).to be <= expected_queries
|
||||
expect(recorded.cached_count).to eq(0)
|
||||
|
|
|
@ -5,7 +5,6 @@ require 'spec_helper'
|
|||
RSpec.describe Projects::BatchOpenIssuesCountService do
|
||||
let!(:project_1) { create(:project) }
|
||||
let!(:project_2) { create(:project) }
|
||||
let!(:banned_user) { create(:user, :banned) }
|
||||
|
||||
let(:subject) { described_class.new([project_1, project_2]) }
|
||||
|
||||
|
@ -13,41 +12,32 @@ RSpec.describe Projects::BatchOpenIssuesCountService do
|
|||
before do
|
||||
create(:issue, project: project_1)
|
||||
create(:issue, project: project_1, confidential: true)
|
||||
create(:issue, project: project_1, author: banned_user)
|
||||
|
||||
create(:issue, project: project_2)
|
||||
create(:issue, project: project_2, confidential: true)
|
||||
create(:issue, project: project_2, author: banned_user)
|
||||
end
|
||||
|
||||
context 'when cache is clean', :aggregate_failures do
|
||||
context 'when cache is clean' do
|
||||
it 'refreshes cache keys correctly' do
|
||||
expect(get_cache_key(project_1)).to eq(nil)
|
||||
expect(get_cache_key(project_2)).to eq(nil)
|
||||
subject.refresh_cache_and_retrieve_data
|
||||
|
||||
subject.count_service.new(project_1).refresh_cache
|
||||
subject.count_service.new(project_2).refresh_cache
|
||||
# It does not update total issues cache
|
||||
expect(Rails.cache.read(get_cache_key(subject, project_1))).to eq(nil)
|
||||
expect(Rails.cache.read(get_cache_key(subject, project_2))).to eq(nil)
|
||||
|
||||
expect(get_cache_key(project_1)).to eq(1)
|
||||
expect(get_cache_key(project_2)).to eq(1)
|
||||
|
||||
expect(get_cache_key(project_1, true)).to eq(2)
|
||||
expect(get_cache_key(project_2, true)).to eq(2)
|
||||
|
||||
expect(get_cache_key(project_1, true, true)).to eq(3)
|
||||
expect(get_cache_key(project_2, true, true)).to eq(3)
|
||||
expect(Rails.cache.read(get_cache_key(subject, project_1, true))).to eq(1)
|
||||
expect(Rails.cache.read(get_cache_key(subject, project_1, true))).to eq(1)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def get_cache_key(project, with_confidential = false, with_hidden = false)
|
||||
def get_cache_key(subject, project, public_key = false)
|
||||
service = subject.count_service.new(project)
|
||||
|
||||
if with_confidential && with_hidden
|
||||
Rails.cache.read(service.cache_key(service.class::TOTAL_COUNT_KEY))
|
||||
elsif with_confidential
|
||||
Rails.cache.read(service.cache_key(service.class::TOTAL_COUNT_WITHOUT_HIDDEN_KEY))
|
||||
if public_key
|
||||
service.cache_key(service.class::PUBLIC_COUNT_KEY)
|
||||
else
|
||||
Rails.cache.read(service.cache_key(service.class::PUBLIC_COUNT_WITHOUT_HIDDEN_KEY))
|
||||
service.cache_key(service.class::TOTAL_COUNT_KEY)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -4,102 +4,89 @@ require 'spec_helper'
|
|||
|
||||
RSpec.describe Projects::OpenIssuesCountService, :use_clean_rails_memory_store_caching do
|
||||
let(:project) { create(:project) }
|
||||
let(:user) { create(:user) }
|
||||
let(:banned_user) { create(:user, :banned) }
|
||||
|
||||
subject { described_class.new(project, user) }
|
||||
subject { described_class.new(project) }
|
||||
|
||||
it_behaves_like 'a counter caching service'
|
||||
|
||||
before do
|
||||
create(:issue, :opened, project: project)
|
||||
create(:issue, :opened, confidential: true, project: project)
|
||||
create(:issue, :opened, author: banned_user, project: project)
|
||||
create(:issue, :closed, project: project)
|
||||
|
||||
described_class.new(project).refresh_cache
|
||||
end
|
||||
|
||||
describe '#count' do
|
||||
shared_examples 'counts public issues, does not count hidden or confidential' do
|
||||
it 'counts only public issues' do
|
||||
expect(subject.count).to eq(1)
|
||||
end
|
||||
|
||||
it 'uses PUBLIC_COUNT_WITHOUT_HIDDEN_KEY cache key' do
|
||||
expect(subject.cache_key).to include('project_open_public_issues_without_hidden_count')
|
||||
end
|
||||
end
|
||||
|
||||
context 'when user is nil' do
|
||||
let(:user) { nil }
|
||||
it 'does not include confidential issues in the issue count' do
|
||||
create(:issue, :opened, project: project)
|
||||
create(:issue, :opened, confidential: true, project: project)
|
||||
|
||||
it_behaves_like 'counts public issues, does not count hidden or confidential'
|
||||
expect(described_class.new(project).count).to eq(1)
|
||||
end
|
||||
end
|
||||
|
||||
context 'when user is provided' do
|
||||
let(:user) { create(:user) }
|
||||
|
||||
context 'when user can read confidential issues' do
|
||||
before do
|
||||
project.add_reporter(user)
|
||||
end
|
||||
|
||||
it 'includes confidential issues and does not include hidden issues in count' do
|
||||
expect(subject.count).to eq(2)
|
||||
it 'returns the right count with confidential issues' do
|
||||
create(:issue, :opened, project: project)
|
||||
create(:issue, :opened, confidential: true, project: project)
|
||||
|
||||
expect(described_class.new(project, user).count).to eq(2)
|
||||
end
|
||||
|
||||
it 'uses TOTAL_COUNT_WITHOUT_HIDDEN_KEY cache key' do
|
||||
expect(subject.cache_key).to include('project_open_issues_without_hidden_count')
|
||||
it 'uses total_open_issues_count cache key' do
|
||||
expect(described_class.new(project, user).cache_key_name).to eq('total_open_issues_count')
|
||||
end
|
||||
end
|
||||
|
||||
context 'when user cannot read confidential or hidden issues' do
|
||||
context 'when user cannot read confidential issues' do
|
||||
before do
|
||||
project.add_guest(user)
|
||||
end
|
||||
|
||||
it_behaves_like 'counts public issues, does not count hidden or confidential'
|
||||
end
|
||||
it 'does not include confidential issues' do
|
||||
create(:issue, :opened, project: project)
|
||||
create(:issue, :opened, confidential: true, project: project)
|
||||
|
||||
context 'when user is an admin' do
|
||||
let_it_be(:user) { create(:user, :admin) }
|
||||
|
||||
context 'when admin mode is enabled', :enable_admin_mode do
|
||||
it 'includes confidential and hidden issues in count' do
|
||||
expect(subject.count).to eq(3)
|
||||
end
|
||||
|
||||
it 'uses TOTAL_COUNT_KEY cache key' do
|
||||
expect(subject.cache_key).to include('project_open_issues_including_hidden_count')
|
||||
end
|
||||
expect(described_class.new(project, user).count).to eq(1)
|
||||
end
|
||||
|
||||
context 'when admin mode is disabled' do
|
||||
it_behaves_like 'counts public issues, does not count hidden or confidential'
|
||||
it 'uses public_open_issues_count cache key' do
|
||||
expect(described_class.new(project, user).cache_key_name).to eq('public_open_issues_count')
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
describe '#refresh_cache', :aggregate_failures do
|
||||
context 'when cache is empty' do
|
||||
it 'refreshes cache keys correctly' do
|
||||
expect(Rails.cache.read(described_class.new(project).cache_key(described_class::PUBLIC_COUNT_WITHOUT_HIDDEN_KEY))).to eq(1)
|
||||
expect(Rails.cache.read(described_class.new(project).cache_key(described_class::TOTAL_COUNT_WITHOUT_HIDDEN_KEY))).to eq(2)
|
||||
expect(Rails.cache.read(described_class.new(project).cache_key(described_class::TOTAL_COUNT_KEY))).to eq(3)
|
||||
end
|
||||
end
|
||||
|
||||
context 'when cache is outdated' do
|
||||
it 'refreshes cache keys correctly' do
|
||||
describe '#refresh_cache' do
|
||||
before do
|
||||
create(:issue, :opened, project: project)
|
||||
create(:issue, :opened, project: project)
|
||||
create(:issue, :opened, confidential: true, project: project)
|
||||
create(:issue, :opened, author: banned_user, project: project)
|
||||
end
|
||||
|
||||
described_class.new(project).refresh_cache
|
||||
context 'when cache is empty' do
|
||||
it 'refreshes cache keys correctly' do
|
||||
subject.refresh_cache
|
||||
|
||||
expect(Rails.cache.read(described_class.new(project).cache_key(described_class::PUBLIC_COUNT_WITHOUT_HIDDEN_KEY))).to eq(2)
|
||||
expect(Rails.cache.read(described_class.new(project).cache_key(described_class::TOTAL_COUNT_WITHOUT_HIDDEN_KEY))).to eq(4)
|
||||
expect(Rails.cache.read(described_class.new(project).cache_key(described_class::TOTAL_COUNT_KEY))).to eq(6)
|
||||
expect(Rails.cache.read(subject.cache_key(described_class::PUBLIC_COUNT_KEY))).to eq(2)
|
||||
expect(Rails.cache.read(subject.cache_key(described_class::TOTAL_COUNT_KEY))).to eq(3)
|
||||
end
|
||||
end
|
||||
|
||||
context 'when cache is outdated' do
|
||||
before do
|
||||
subject.refresh_cache
|
||||
end
|
||||
|
||||
it 'refreshes cache keys correctly' do
|
||||
create(:issue, :opened, project: project)
|
||||
create(:issue, :opened, confidential: true, project: project)
|
||||
|
||||
subject.refresh_cache
|
||||
|
||||
expect(Rails.cache.read(subject.cache_key(described_class::PUBLIC_COUNT_KEY))).to eq(3)
|
||||
expect(Rails.cache.read(subject.cache_key(described_class::TOTAL_COUNT_KEY))).to eq(5)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -20,3 +20,13 @@ URL = "unix:/home/git/gitlab/redis/redis.socket"
|
|||
[image_resizer]
|
||||
max_scaler_procs = 4 # Recommendation: CPUs / 2
|
||||
max_filesize = 250000
|
||||
|
||||
[[listeners]]
|
||||
network = "tcp"
|
||||
addr = "127.0.0.1:3443"
|
||||
|
||||
[listeners.tls]
|
||||
certificate = "/path/to/certificate"
|
||||
key = "/path/to/private/key"
|
||||
min_version = "tls1.2"
|
||||
max_version = "tls1.3"
|
||||
|
|
|
@ -39,6 +39,14 @@ password = "redis password"
|
|||
provider = "test provider"
|
||||
[image_resizer]
|
||||
max_scaler_procs = 123
|
||||
[[listeners]]
|
||||
network = "tcp"
|
||||
addr = "localhost:3443"
|
||||
[listeners.tls]
|
||||
certificate = "/path/to/certificate"
|
||||
key = "/path/to/private/key"
|
||||
min_version = "tls1.1"
|
||||
max_version = "tls1.2"
|
||||
`
|
||||
_, err = io.WriteString(f, data)
|
||||
require.NoError(t, err)
|
||||
|
@ -57,6 +65,15 @@ max_scaler_procs = 123
|
|||
require.Equal(t, []string{"127.0.0.1/8", "192.168.0.1/8"}, cfg.TrustedCIDRsForXForwardedFor)
|
||||
require.Equal(t, []string{"10.0.0.1/8"}, cfg.TrustedCIDRsForPropagation)
|
||||
require.Equal(t, 60*time.Second, cfg.ShutdownTimeout.Duration)
|
||||
|
||||
require.Len(t, cfg.Listeners, 1)
|
||||
listener := cfg.Listeners[0]
|
||||
require.Equal(t, "/path/to/certificate", listener.Tls.Certificate)
|
||||
require.Equal(t, "/path/to/private/key", listener.Tls.Key)
|
||||
require.Equal(t, "tls1.1", listener.Tls.MinVersion)
|
||||
require.Equal(t, "tls1.2", listener.Tls.MaxVersion)
|
||||
require.Equal(t, "tcp", listener.Network)
|
||||
require.Equal(t, "localhost:3443", listener.Addr)
|
||||
}
|
||||
|
||||
func TestConfigErrorHelp(t *testing.T) {
|
||||
|
|
|
@ -84,6 +84,19 @@ type ImageResizerConfig struct {
|
|||
MaxFilesize uint64 `toml:"max_filesize"`
|
||||
}
|
||||
|
||||
type TlsConfig struct {
|
||||
Certificate string `toml:"certificate"`
|
||||
Key string `toml:"key"`
|
||||
MinVersion string `toml:"min_version"`
|
||||
MaxVersion string `toml:"max_version"`
|
||||
}
|
||||
|
||||
type ListenerConfig struct {
|
||||
Network string `toml:"network"`
|
||||
Addr string `toml:"addr"`
|
||||
Tls *TlsConfig `toml:"tls"`
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
Redis *RedisConfig `toml:"redis"`
|
||||
Backend *url.URL `toml:"-"`
|
||||
|
@ -106,6 +119,7 @@ type Config struct {
|
|||
ShutdownTimeout TomlDuration `toml:"shutdown_timeout"`
|
||||
TrustedCIDRsForXForwardedFor []string `toml:"trusted_cidrs_for_x_forwarded_for"`
|
||||
TrustedCIDRsForPropagation []string `toml:"trusted_cidrs_for_propagation"`
|
||||
Listeners []ListenerConfig `toml:"listeners"`
|
||||
}
|
||||
|
||||
var DefaultImageResizerConfig = ImageResizerConfig{
|
||||
|
|
|
@ -0,0 +1,107 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"syscall"
|
||||
|
||||
"gitlab.com/gitlab-org/labkit/log"
|
||||
|
||||
"gitlab.com/gitlab-org/gitlab/workhorse/internal/config"
|
||||
)
|
||||
|
||||
var tlsVersions = map[string]uint16{
|
||||
"": 0, // Default value in tls.Config
|
||||
"tls1.0": tls.VersionTLS10,
|
||||
"tls1.1": tls.VersionTLS11,
|
||||
"tls1.2": tls.VersionTLS12,
|
||||
"tls1.3": tls.VersionTLS13,
|
||||
}
|
||||
|
||||
type Server struct {
|
||||
Handler http.Handler
|
||||
Umask int
|
||||
ListenerConfigs []config.ListenerConfig
|
||||
Errors chan error
|
||||
|
||||
servers []*http.Server
|
||||
}
|
||||
|
||||
func (s *Server) Run() error {
|
||||
oldUmask := syscall.Umask(s.Umask)
|
||||
defer syscall.Umask(oldUmask)
|
||||
|
||||
for _, cfg := range s.ListenerConfigs {
|
||||
listener, err := s.newListener("upstream", cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("server.Run: failed creating a listener: %v", err)
|
||||
}
|
||||
|
||||
s.runUpstreamServer(listener)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) Close() error {
|
||||
return s.allServers(func(srv *http.Server) error { return srv.Close() })
|
||||
}
|
||||
|
||||
func (s *Server) Shutdown(ctx context.Context) error {
|
||||
return s.allServers(func(srv *http.Server) error { return srv.Shutdown(ctx) })
|
||||
}
|
||||
|
||||
func (s *Server) allServers(callback func(*http.Server) error) error {
|
||||
var resultErr error
|
||||
errC := make(chan error, len(s.servers))
|
||||
for _, server := range s.servers {
|
||||
server := server // Capture loop variable
|
||||
go func() { errC <- callback(server) }()
|
||||
}
|
||||
|
||||
for range s.servers {
|
||||
if err := <-errC; err != nil {
|
||||
resultErr = err
|
||||
}
|
||||
}
|
||||
|
||||
return resultErr
|
||||
}
|
||||
|
||||
func (s *Server) runUpstreamServer(listener net.Listener) {
|
||||
srv := &http.Server{
|
||||
Addr: listener.Addr().String(),
|
||||
Handler: s.Handler,
|
||||
}
|
||||
go func() {
|
||||
s.Errors <- srv.Serve(listener)
|
||||
}()
|
||||
|
||||
s.servers = append(s.servers, srv)
|
||||
}
|
||||
|
||||
func (s *Server) newListener(name string, cfg config.ListenerConfig) (net.Listener, error) {
|
||||
if cfg.Tls == nil {
|
||||
log.WithFields(log.Fields{"address": cfg.Addr, "network": cfg.Network}).Infof("Running %v server", name)
|
||||
|
||||
return net.Listen(cfg.Network, cfg.Addr)
|
||||
}
|
||||
|
||||
cert, err := tls.LoadX509KeyPair(cfg.Tls.Certificate, cfg.Tls.Key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.WithFields(log.Fields{"address": cfg.Addr, "network": cfg.Network}).Infof("Running %v server with tls", name)
|
||||
|
||||
tlsConfig := &tls.Config{
|
||||
MinVersion: tlsVersions[cfg.Tls.MinVersion],
|
||||
MaxVersion: tlsVersions[cfg.Tls.MaxVersion],
|
||||
Certificates: []tls.Certificate{cert},
|
||||
}
|
||||
|
||||
return tls.Listen(cfg.Network, cfg.Addr, tlsConfig)
|
||||
}
|
|
@ -0,0 +1,165 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"gitlab.com/gitlab-org/gitlab/workhorse/internal/config"
|
||||
)
|
||||
|
||||
const (
|
||||
certFile = "testdata/localhost.crt"
|
||||
keyFile = "testdata/localhost.key"
|
||||
)
|
||||
|
||||
func TestRun(t *testing.T) {
|
||||
srv := defaultServer()
|
||||
|
||||
require.NoError(t, srv.Run())
|
||||
defer srv.Close()
|
||||
|
||||
require.Len(t, srv.servers, 2)
|
||||
|
||||
clients := buildClients(t, srv.servers)
|
||||
for url, client := range clients {
|
||||
resp, err := client.Get(url)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 200, resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestShutdown(t *testing.T) {
|
||||
ready := make(chan bool)
|
||||
done := make(chan bool)
|
||||
statusCodes := make(chan int)
|
||||
|
||||
srv := defaultServer()
|
||||
srv.Handler = http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
||||
ready <- true
|
||||
<-done
|
||||
rw.WriteHeader(200)
|
||||
})
|
||||
|
||||
require.NoError(t, srv.Run())
|
||||
defer srv.Close()
|
||||
|
||||
clients := buildClients(t, srv.servers)
|
||||
|
||||
for url, client := range clients {
|
||||
go func(url string, client *http.Client) {
|
||||
resp, err := client.Get(url)
|
||||
require.NoError(t, err)
|
||||
statusCodes <- resp.StatusCode
|
||||
}(url, client)
|
||||
}
|
||||
|
||||
for range clients {
|
||||
<-ready
|
||||
} // initiate requests
|
||||
|
||||
shutdownError := make(chan error)
|
||||
go func() {
|
||||
shutdownError <- srv.Shutdown(context.Background())
|
||||
}()
|
||||
|
||||
for url, client := range clients {
|
||||
require.Eventually(t, func() bool {
|
||||
_, err := client.Get(url)
|
||||
return err != nil
|
||||
}, time.Second, 10*time.Millisecond, "server must stop accepting new requests")
|
||||
}
|
||||
|
||||
for range clients {
|
||||
done <- true
|
||||
} // finish requests
|
||||
|
||||
require.NoError(t, <-shutdownError)
|
||||
require.ElementsMatch(t, []int{200, 200}, []int{<-statusCodes, <-statusCodes})
|
||||
}
|
||||
|
||||
func TestShutdown_withTimeout(t *testing.T) {
|
||||
ready := make(chan bool)
|
||||
done := make(chan bool)
|
||||
|
||||
srv := defaultServer()
|
||||
srv.Handler = http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
||||
ready <- true
|
||||
<-done
|
||||
rw.WriteHeader(200)
|
||||
})
|
||||
|
||||
require.NoError(t, srv.Run())
|
||||
defer srv.Close()
|
||||
|
||||
clients := buildClients(t, srv.servers)
|
||||
|
||||
for url, client := range clients {
|
||||
go func(url string, client *http.Client) {
|
||||
client.Get(url)
|
||||
}(url, client)
|
||||
}
|
||||
|
||||
for range clients {
|
||||
<-ready
|
||||
} // initiate requets
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
err := srv.Shutdown(ctx)
|
||||
require.Error(t, err)
|
||||
require.EqualError(t, err, "context deadline exceeded")
|
||||
}
|
||||
|
||||
func defaultServer() Server {
|
||||
return Server{
|
||||
ListenerConfigs: []config.ListenerConfig{
|
||||
{
|
||||
Addr: "127.0.0.1:0",
|
||||
Network: "tcp",
|
||||
},
|
||||
{
|
||||
Addr: "127.0.0.1:0",
|
||||
Network: "tcp",
|
||||
Tls: &config.TlsConfig{
|
||||
Certificate: certFile,
|
||||
Key: keyFile,
|
||||
},
|
||||
},
|
||||
},
|
||||
Handler: http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
||||
rw.WriteHeader(200)
|
||||
}),
|
||||
Errors: make(chan error),
|
||||
}
|
||||
}
|
||||
|
||||
func buildClients(t *testing.T, servers []*http.Server) map[string]*http.Client {
|
||||
httpsClient := &http.Client{}
|
||||
certpool := x509.NewCertPool()
|
||||
|
||||
tlsCertificate, err := tls.LoadX509KeyPair(certFile, keyFile)
|
||||
require.NoError(t, err)
|
||||
|
||||
certificate, err := x509.ParseCertificate(tlsCertificate.Certificate[0])
|
||||
require.NoError(t, err)
|
||||
|
||||
certpool.AddCert(certificate)
|
||||
httpsClient.Transport = &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
RootCAs: certpool,
|
||||
},
|
||||
}
|
||||
|
||||
httpServer, httpsServer := servers[0], servers[1]
|
||||
return map[string]*http.Client{
|
||||
"http://" + httpServer.Addr: http.DefaultClient,
|
||||
"https://" + httpsServer.Addr: httpsClient,
|
||||
}
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIEjjCCAvagAwIBAgIQC2au+A/aGQ2Z21O0wVoEwjANBgkqhkiG9w0BAQsFADCB
|
||||
pTEeMBwGA1UEChMVbWtjZXJ0IGRldmVsb3BtZW50IENBMT0wOwYDVQQLDDRpZ29y
|
||||
ZHJvemRvdkBJZ29ycy1NYWNCb29rLVByby0yLmxvY2FsIChJZ29yIERyb3pkb3Yp
|
||||
MUQwQgYDVQQDDDtta2NlcnQgaWdvcmRyb3pkb3ZASWdvcnMtTWFjQm9vay1Qcm8t
|
||||
Mi5sb2NhbCAoSWdvciBEcm96ZG92KTAeFw0yMjAzMDcwNDMxMjRaFw0yNDA2MDcw
|
||||
NDMxMjRaMGgxJzAlBgNVBAoTHm1rY2VydCBkZXZlbG9wbWVudCBjZXJ0aWZpY2F0
|
||||
ZTE9MDsGA1UECww0aWdvcmRyb3pkb3ZASWdvcnMtTWFjQm9vay1Qcm8tMi5sb2Nh
|
||||
bCAoSWdvciBEcm96ZG92KTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
|
||||
AMJ8ofGdcnenVRtNGViF4oxPv+CCFA6D2nfsjkJG8kmO6WW7VlbhJYxCMAuyFF1F
|
||||
b2UI2rrTFL8Aeq1KxeQzdrb3cpCquVH/UQ00G4ply28XVPRdbIyLQvOThMEeLL6v
|
||||
6gb4edL5oZmo/vWhdQxv0NGt282PAEt+bjnbdl28on8WVzmsw/m0nZ2BVWke+oUM
|
||||
krfsbyFaZj7aW8w0dNeK25ANy/Ldx55ENRDquphwYHDnpFOQpkHo5nPuoms5j2Sf
|
||||
GW3u3hgeFhRrFjqDstU3OKdA4AdHntDjl0gHm35w1m8PXiql/3EpkEMMx5ixQAqM
|
||||
cMZ7VVzy0HIjqsjdJZpzjx8CAwEAAaN2MHQwDgYDVR0PAQH/BAQDAgWgMBMGA1Ud
|
||||
JQQMMAoGCCsGAQUFBwMBMB8GA1UdIwQYMBaAFKTVZ2JsYLGJOP+UX0AwGO/81Kab
|
||||
MCwGA1UdEQQlMCOCCWxvY2FsaG9zdIcEfwAAAYcQAAAAAAAAAAAAAAAAAAAAATAN
|
||||
BgkqhkiG9w0BAQsFAAOCAYEAkGntoogSlhukGqTNbTXN9T/gXLtx9afWlgcBEafF
|
||||
MYQoJ1DOwXoYCQkMsxE0xWUyLDTpvjzfKkkyQwWzTwcYqRHOKafKYVSvENU5oaDY
|
||||
c2nk32SfkcF6bqJ50uBlFMEvKFExU1U+YSJhuEH/iqT9sSd52uwmnB0TJhSOc3J/
|
||||
1ZapKM2G71ezi8OyizwlwDJAwQ37CqrYS2slVO6Cy8zJ1l/ZsZ+kxRb+ME0LREI0
|
||||
J/rFTo9A6iyuXeBQ2jiRUrC6pmmbUQbVSjROx4RSmWoI/58/VnuZBY9P62OAOgUv
|
||||
pukfAbh3SUjN5++m4Py7WjP/y+L2ILPOFtxTY+CQPWQ5Hbff8iMB4NNfutdU1wSS
|
||||
CzXT1zWbU12kXod80wkMqWvNb3yU5spqXV6WYhOHiDIyqpPIqp5/i93Ck3Hd6/BQ
|
||||
DYlNOQsVHdSjWzNw9UubjpatiFqMK4hvJZE0haoLlmfDeZeqWk9oAuuCibLJGPg4
|
||||
TQri+lKgi0e76ynUr1zP1xUR
|
||||
-----END CERTIFICATE-----
|
|
@ -0,0 +1,28 @@
|
|||
-----BEGIN PRIVATE KEY-----
|
||||
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDCfKHxnXJ3p1Ub
|
||||
TRlYheKMT7/gghQOg9p37I5CRvJJjullu1ZW4SWMQjALshRdRW9lCNq60xS/AHqt
|
||||
SsXkM3a293KQqrlR/1ENNBuKZctvF1T0XWyMi0Lzk4TBHiy+r+oG+HnS+aGZqP71
|
||||
oXUMb9DRrdvNjwBLfm4523ZdvKJ/Flc5rMP5tJ2dgVVpHvqFDJK37G8hWmY+2lvM
|
||||
NHTXituQDcvy3ceeRDUQ6rqYcGBw56RTkKZB6OZz7qJrOY9knxlt7t4YHhYUaxY6
|
||||
g7LVNzinQOAHR57Q45dIB5t+cNZvD14qpf9xKZBDDMeYsUAKjHDGe1Vc8tByI6rI
|
||||
3SWac48fAgMBAAECggEALuZXNyi8vdYAVAEXp51BsIxavQ0hQQ7S1DCbbagmLU7l
|
||||
Qb8XZwQMRfKAG5HqD0P7ROYJuRvF2PmIm9l4Nzuh2SV63yAMaJWlOgXizlEV6cg6
|
||||
mGMfFhVPI+XjEZ7xM1rAmMW6uwGv0ppKQXmZ/FHKjYXbh4qAi7QFaLZfqOMgXHzf
|
||||
C4nxf0xMzPP7rBnaxAGBRJWC+/UWxd1MVoHRjink4V/Tdy4zu+cEJ+2wuGawp4nz
|
||||
dEWYITzXMcBUKmZQHiOm+r58HpWK3mgXpJQBg3WqjR2iNa+ElyoPoGC6zu5Jd8Xg
|
||||
mMG2jHPFu+2F4UvymgxbKZqKHqcNjO7WMZRtIRiJgQKBgQDZGXUme0S5Bh8/y1us
|
||||
ltEfy4INFYJAejVxPwv7mRLtySqZLkWAPQTaSGgIk/XMTBYS3Ia9XD6Jl3zwo1qF
|
||||
R+y3ZkusGmk73o35kBxjc6purDei7CqMzwulbFTsUglDiF9T4X24bv1yK3lP2n8A
|
||||
Y6kLsscEC1wIEuwV5HFyQ2S9zwKBgQDlVepMrQ84FxQxN474LakwWLSkwo+6jS37
|
||||
61VPUqDUQpE4fGM6+F3fG+9YDMgvOVDneZ0MvzoiDRynbzF7K3k3fIBrYYbTRz7J
|
||||
p23BbTninzhrYTE/xd3LuFCZibCXA7nRa0QmYdXG4nUM2jjsjdR5AG7c/qJQDNun
|
||||
SXTbfM49sQKBgQCM9Jl6hbiGBTKO4gNAmJ9o7GIhCqEKKg6+23d1QNroZp9w23km
|
||||
nPeknjRltWN25MPENUiKc/Tqst/dAcLJHHzWSuXA9Vj0FTjLG0VDURsMRmbNMlci
|
||||
G1/tZNvyoAUBwu5Z8OMGt5F46j8WmL+yygI85TOQLavwVhDQ2gTKcnVbQwKBgQC0
|
||||
2VCf0KU8xS5eNYLgARn3jyw89VTkduq5S3aFzBIZ8LiWQ7j4yt0z0NKoq8O9QcSk
|
||||
FUocwDv2mEJtYwkxKTI46ExY4Zqxx/Aik47AxwKrzIVwYD+3G7DxMtMUkPkZzY1e
|
||||
MOmYHvS3FuPZE8lp+dqA5S+HxKF44Pria9HkOAJnsQKBgE853d9sR0DlJtEj64yu
|
||||
FX1rCle/UUODClktPgrwuM+xYutxOiEu6HUWHJI2yvWNk4oNL8Xd0IkR9NlwdatU
|
||||
E3+WDua+yYAsI9yWYn3+iqp+owNATkEDjWGivt0Onmgttt5kLHzPFCViIcgl32vv
|
||||
7V/plCsmgrS98xZHRrriTLvz
|
||||
-----END PRIVATE KEY-----
|
|
@ -22,6 +22,7 @@ import (
|
|||
"gitlab.com/gitlab-org/gitlab/workhorse/internal/queueing"
|
||||
"gitlab.com/gitlab-org/gitlab/workhorse/internal/redis"
|
||||
"gitlab.com/gitlab-org/gitlab/workhorse/internal/secret"
|
||||
"gitlab.com/gitlab-org/gitlab/workhorse/internal/server"
|
||||
"gitlab.com/gitlab-org/gitlab/workhorse/internal/upstream"
|
||||
)
|
||||
|
||||
|
@ -155,6 +156,7 @@ func buildConfig(arg0 string, args []string) (*bootConfig, *config.Config, error
|
|||
cfg.ShutdownTimeout = cfgFromFile.ShutdownTimeout
|
||||
cfg.TrustedCIDRsForXForwardedFor = cfgFromFile.TrustedCIDRsForXForwardedFor
|
||||
cfg.TrustedCIDRsForPropagation = cfgFromFile.TrustedCIDRsForPropagation
|
||||
cfg.Listeners = cfgFromFile.Listeners
|
||||
|
||||
return boot, cfg, nil
|
||||
}
|
||||
|
@ -177,14 +179,6 @@ func run(boot bootConfig, cfg config.Config) error {
|
|||
}
|
||||
}
|
||||
|
||||
// Change the umask only around net.Listen()
|
||||
oldUmask := syscall.Umask(boot.listenUmask)
|
||||
listener, err := net.Listen(boot.listenNetwork, boot.listenAddr)
|
||||
syscall.Umask(oldUmask)
|
||||
if err != nil {
|
||||
return fmt.Errorf("main listener: %v", err)
|
||||
}
|
||||
|
||||
finalErrors := make(chan error)
|
||||
|
||||
// The profiler will only be activated by HTTP requests. HTTP
|
||||
|
@ -241,8 +235,19 @@ func run(boot bootConfig, cfg config.Config) error {
|
|||
done := make(chan os.Signal, 1)
|
||||
signal.Notify(done, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
server := http.Server{Handler: up}
|
||||
go func() { finalErrors <- server.Serve(listener) }()
|
||||
listenerFromBootConfig := config.ListenerConfig{
|
||||
Network: boot.listenNetwork,
|
||||
Addr: boot.listenAddr,
|
||||
}
|
||||
srv := &server.Server{
|
||||
Handler: up,
|
||||
Umask: boot.listenUmask,
|
||||
ListenerConfigs: append(cfg.Listeners, listenerFromBootConfig),
|
||||
Errors: finalErrors,
|
||||
}
|
||||
if err := srv.Run(); err != nil {
|
||||
return fmt.Errorf("running server: %v", err)
|
||||
}
|
||||
|
||||
select {
|
||||
case err := <-finalErrors:
|
||||
|
@ -254,6 +259,6 @@ func run(boot bootConfig, cfg config.Config) error {
|
|||
defer cancel()
|
||||
|
||||
redis.Shutdown()
|
||||
return server.Shutdown(ctx)
|
||||
return srv.Shutdown(ctx)
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue