Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2022-08-26 21:10:03 +00:00
parent 4612d16c2d
commit 317dbfafea
64 changed files with 1194 additions and 752 deletions

View File

@ -7,6 +7,8 @@
MERGE_REQUEST_URL: ${CI_MERGE_REQUEST_PROJECT_URL}/-/merge_requests/${CI_MERGE_REQUEST_IID}
before_script:
- apk update && apk add git curl bash
- echo "NOTIFY_CHANNEL is ${NOTIFY_CHANNEL}"
- echo "CI_PIPELINE_URL is ${CI_PIPELINE_URL}"
notify-update-gitaly:
extends:
@ -19,8 +21,6 @@ notify-update-gitaly:
NOTIFY_CHANNEL: g_gitaly
GITALY_UPDATE_BRANCH: release-tools/update-gitaly
script:
- echo "NOTIFY_CHANNEL is ${NOTIFY_CHANNEL}"
- echo "CI_PIPELINE_URL is ${CI_PIPELINE_URL}"
- scripts/slack ${NOTIFY_CHANNEL} "☠️ \`${GITALY_UPDATE_BRANCH}\` failed! ☠️ See ${CI_PIPELINE_URL} (triggered from ${MERGE_REQUEST_URL})" ci_failing "GitLab QA Bot"
notify-security-pipeline:
@ -30,7 +30,17 @@ notify-security-pipeline:
variables:
NOTIFY_CHANNEL: f_upcoming_release
script:
- echo "NOTIFY_CHANNEL is ${NOTIFY_CHANNEL}"
- echo "CI_PIPELINE_URL is ${CI_PIPELINE_URL}"
# <!subteam^S0127FU8PDE> mentions the `@release-managers` group
- scripts/slack ${NOTIFY_CHANNEL} "<!subteam^S0127FU8PDE> ☠️ Pipeline for merged result failed! ☠️ See ${CI_PIPELINE_URL} (triggered from ${MERGE_REQUEST_URL})" ci_failing "GitLab Release Tools Bot"
notify-pipeline-failure:
extends:
- .notify-slack
rules:
- if: '$NOTIFY_PIPELINE_FAILURE_CHANNEL'
when: on_failure
allow_failure: true
variables:
NOTIFY_CHANNEL: "${NOTIFY_PIPELINE_FAILURE_CHANNEL}"
script:
- scripts/slack ${NOTIFY_CHANNEL} "❌ `${CI_COMMIT_REF_NAME}` pipeline failed! See ${CI_PIPELINE_URL}" ci_failing "notify-pipeline-failure"

View File

@ -1520,6 +1520,12 @@
changes: ["vendor/gems/omniauth-gitlab/**/*"]
- <<: *if-merge-request-labels-run-all-rspec
.vendor:rules:omniauth-salesforce:
rules:
- <<: *if-merge-request
changes: ["vendor/gems/omniauth-salesforce/**/*"]
- <<: *if-merge-request-labels-run-all-rspec
.vendor:rules:devise-pbkdf2-encryptable:
rules:
- <<: *if-merge-request

View File

@ -46,6 +46,14 @@ vendor omniauth-gitlab:
include: vendor/gems/omniauth-gitlab/.gitlab-ci.yml
strategy: depend
vendor omniauth-salesforce:
extends:
- .vendor:rules:omniauth-salesforce
needs: []
trigger:
include: vendor/gems/omniauth-salesforce/.gitlab-ci.yml
strategy: depend
vendor devise-pbkdf2-encryptable:
extends:
- .vendor:rules:devise-pbkdf2-encryptable

View File

@ -54,7 +54,7 @@ gem 'omniauth-twitter', '~> 1.4'
gem 'omniauth_crowd', '~> 2.4.0', path: 'vendor/gems/omniauth_crowd' # See vendor/gems/omniauth_crowd/README.md
gem 'omniauth-authentiq', '~> 0.3.3'
gem 'gitlab-omniauth-openid-connect', '~> 0.9.0', require: 'omniauth_openid_connect'
gem 'omniauth-salesforce', '~> 1.0.5'
gem 'omniauth-salesforce', '~> 1.0.5', path: 'vendor/gems/omniauth-salesforce' # See gem README.md
gem 'omniauth-atlassian-oauth2', '~> 0.2.0'
gem 'rack-oauth2', '~> 1.21.2'
gem 'jwt', '~> 2.1.0'

View File

@ -39,6 +39,13 @@ PATH
omniauth (~> 1.0)
omniauth-oauth2 (~> 1.7.1)
PATH
remote: vendor/gems/omniauth-salesforce
specs:
omniauth-salesforce (1.0.5)
omniauth (~> 1.0, < 3)
omniauth-oauth2 (~> 1.0)
PATH
remote: vendor/gems/omniauth_crowd
specs:
@ -926,9 +933,6 @@ GEM
omniauth (>= 1.9, < 3)
omniauth-oauth2-generic (0.2.2)
omniauth-oauth2 (~> 1.0)
omniauth-salesforce (1.0.5)
omniauth (~> 1.0)
omniauth-oauth2 (~> 1.0)
omniauth-saml (1.10.0)
omniauth (~> 1.3, >= 1.3.2)
ruby-saml (~> 1.7)
@ -1667,7 +1671,7 @@ DEPENDENCIES
omniauth-gitlab (~> 4.0.0)!
omniauth-google-oauth2 (~> 0.6.0)
omniauth-oauth2-generic (~> 0.2.2)
omniauth-salesforce (~> 1.0.5)
omniauth-salesforce (~> 1.0.5)!
omniauth-saml (~> 1.10)
omniauth-shibboleth (~> 1.3.0)
omniauth-twitter (~> 1.4)

View File

@ -0,0 +1 @@
import '../show/index';

View File

@ -50,7 +50,8 @@ export default {
props: {
registrationToken: {
type: String,
required: true,
required: false,
default: null,
},
groupFullPath: {
type: String,
@ -191,6 +192,7 @@ export default {
/>
<registration-dropdown
v-if="registrationToken"
class="gl-ml-auto"
:registration-token="registrationToken"
:type="$options.GROUP_TYPE"

View File

@ -11,6 +11,7 @@ class Groups::RunnersController < Groups::ApplicationController
def index
finder = Ci::RunnersFinder.new(current_user: current_user, params: { group: @group })
@group_runners_limited_count = finder.execute.except(:limit, :offset).page.total_count_with_limit(:all, limit: 1000)
@group_runner_registration_token = @group.runners_token if can?(current_user, :register_group_runners, group)
Gitlab::Tracking.event(self.class.name, 'index', user: current_user, namespace: @group)
end

View File

@ -17,13 +17,13 @@ module Groups
def create_deploy_token
result = Groups::DeployTokens::CreateService.new(@group, current_user, deploy_token_params).execute
@new_deploy_token = result[:deploy_token]
if result[:status] == :success
@created_deploy_token = result[:deploy_token]
respond_to do |format|
format.json do
# IMPORTANT: It's a security risk to expose the token value more than just once here!
json = API::Entities::DeployTokenWithToken.represent(@new_deploy_token).as_json
json = API::Entities::DeployTokenWithToken.represent(@created_deploy_token).as_json
render json: json, status: result[:http_status]
end
format.html do
@ -32,6 +32,7 @@ module Groups
end
end
else
@new_deploy_token = result[:deploy_token]
respond_to do |format|
format.json { render json: { message: result[:message] }, status: result[:http_status] }
format.html do

View File

@ -34,13 +34,13 @@ module Projects
def create_deploy_token
result = Projects::DeployTokens::CreateService.new(@project, current_user, deploy_token_params).execute
@new_deploy_token = result[:deploy_token]
if result[:status] == :success
@created_deploy_token = result[:deploy_token]
respond_to do |format|
format.json do
# IMPORTANT: It's a security risk to expose the token value more than just once here!
json = API::Entities::DeployTokenWithToken.represent(@new_deploy_token).as_json
json = API::Entities::DeployTokenWithToken.represent(@created_deploy_token).as_json
render json: json, status: result[:http_status]
end
format.html do
@ -49,6 +49,7 @@ module Projects
end
end
else
@new_deploy_token = result[:deploy_token]
respond_to do |format|
format.json { render json: { message: result[:message] }, status: result[:http_status] }
format.html do

View File

@ -84,7 +84,6 @@ module Ci
def group_runners_data_attributes(group)
{
registration_token: group.runners_token,
group_id: group.id,
group_full_path: group.full_path,
runner_install_help_page: 'https://docs.gitlab.com/runner/install/',

View File

@ -1,9 +1,9 @@
# frozen_string_literal: true
module DeployTokensHelper
def expand_deploy_tokens_section?(deploy_token)
deploy_token.persisted? ||
deploy_token.errors.present? ||
def expand_deploy_tokens_section?(new_deploy_token, created_deploy_token)
created_deploy_token ||
new_deploy_token.errors.present? ||
Rails.env.test?
end

View File

@ -14,13 +14,13 @@ class GroupAccessTokenEntity < API::Entities::PersonalAccessToken
group_id: group.path)
end
expose :access_level do |token, options|
expose :role do |token, options|
group = options.fetch(:group)
next unless group
next unless token.user
group.member(token.user)&.access_level
group.member(token.user)&.human_access
end
end
# rubocop: enable Gitlab/NamespacedClass

View File

@ -15,13 +15,13 @@ class ProjectAccessTokenEntity < API::Entities::PersonalAccessToken
project_id: project.path)
end
expose :access_level do |token, options|
expose :role do |token, options|
project = options.fetch(:project)
next unless project
next unless token.user
project.member(token.user)&.access_level
project.member(token.user)&.human_access
end
end
# rubocop: enable Gitlab/NamespacedClass

View File

@ -1,3 +1,3 @@
- page_title s_('Runners|Runners')
#js-group-runners{ data: group_runners_data_attributes(@group).merge( { group_runners_limited_count: @group_runners_limited_count } ) }
#js-group-runners{ data: group_runners_data_attributes(@group).merge( { group_runners_limited_count: @group_runners_limited_count, registration_token: @group_runner_registration_token } ) }

View File

@ -1,4 +1,4 @@
- expanded = expand_deploy_tokens_section?(@new_deploy_token)
- expanded = expand_deploy_tokens_section?(@new_deploy_token, @created_deploy_token)
%section.settings.no-animate#js-deploy-tokens{ class: ('expanded' if expanded), data: { qa_selector: 'deploy_tokens_settings_content' } }
.settings-header
@ -8,11 +8,10 @@
%p
= description
.settings-content
- if @new_deploy_token.persisted?
= render 'shared/deploy_tokens/new_deploy_token', deploy_token: @new_deploy_token
- if @created_deploy_token
= render 'shared/deploy_tokens/new_deploy_token', deploy_token: @created_deploy_token
%h5.gl-mt-0
= s_('DeployTokens|New deploy token')
= render 'shared/deploy_tokens/form', group_or_project: group_or_project, token: @new_deploy_token, presenter: @deploy_tokens
%hr
= render 'shared/deploy_tokens/table', group_or_project: group_or_project, active_tokens: @deploy_tokens

View File

@ -1,8 +0,0 @@
---
name: ci_docker_image_pull_policy
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/85588
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/363186
milestone: '15.1'
type: development
group: group::pipeline authoring
default_enabled: true

View File

@ -1,8 +0,0 @@
---
name: tag_list_keyset_pagination
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/74239
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/345595
milestone: '14.5'
type: development
group: group::source code
default_enabled: true

View File

@ -2308,11 +2308,11 @@ use Google Cloud's Kubernetes Engine (GKE) or AWS Elastic Kubernetes Service (EK
and CPU requirements should translate to most other providers. We hope to update this in the
future with further specific cloud provider details.
| Service | Nodes | Configuration | GCP | AWS | Min Allocatable CPUs and Memory |
|-----------------------------------------------|-------|-------------------------|-----------------|--------------|---------------------------------|
| Webservice | 4 | 32 vCPU, 28.8 GB memory | `n1-highcpu-32` | `c5.9xlarge` | 127.5 vCPU, 118 GB memory |
| Sidekiq | 4 | 4 vCPU, 15 GB memory | `n1-standard-4` | `m5.xlarge` | 15.5 vCPU, 50 GB memory |
| Supporting services such as NGINX, Prometheus | 2 | 4 vCPU, 15 GB memory | `n1-standard-4` | `m5.xlarge` | 7.75 vCPU, 25 GB memory |
| Service | Nodes | Configuration | GCP | AWS | Min Allocatable CPUs and Memory |
|---------------------|-------|-------------------------|-----------------|--------------|---------------------------------|
| Webservice | 4 | 32 vCPU, 28.8 GB memory | `n1-highcpu-32` | `c5.9xlarge` | 127.5 vCPU, 118 GB memory |
| Sidekiq | 4 | 4 vCPU, 15 GB memory | `n1-standard-4` | `m5.xlarge` | 15.5 vCPU, 50 GB memory |
| Supporting services | 2 | 4 vCPU, 15 GB memory | `n1-standard-4` | `m5.xlarge` | 7.75 vCPU, 25 GB memory |
- For this setup, we **recommend** and regularly [test](index.md#validation-and-test-results)
[Google Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine) and [Amazon Elastic Kubernetes Service (EKS)](https://aws.amazon.com/eks/). Other Kubernetes services may also work, but your mileage may vary.
@ -2359,7 +2359,7 @@ card "Kubernetes via Helm Charts" as kubernetes {
collections "**Sidekiq** x4" as sidekiq #ff8dd1
}
card "**Supporting Services**" as support
card "**Supporting Services** x2" as support
}
card "**Internal Load Balancer**" as ilb #9370DB
@ -2425,29 +2425,42 @@ documents how to apply the calculated configuration to the Helm Chart.
#### Webservice
Webservice pods typically need about 1 vCPU and 1.25 GB of memory _per worker_.
Each Webservice pod consumes roughly 4 vCPUs and 5 GB of memory using
Webservice pods typically need about 1 CPU and 1.25 GB of memory _per worker_.
Each Webservice pod consumes roughly 4 CPUs and 5 GB of memory using
the [recommended topology](#cluster-topology) because four worker processes
are created by default and each pod has other small processes running.
For 10,000 users we recommend a total Puma worker count of around 80.
With the [provided recommendations](#cluster-topology) this allows the deployment of up to 20
Webservice pods with 4 workers per pod and 5 pods per node. Expand available resources using
the ratio of 1 vCPU to 1.25 GB of memory _per each worker process_ for each additional
the ratio of 1 CPU to 1.25 GB of memory _per each worker process_ for each additional
Webservice pod.
For further information on resource usage, see the [Webservice resources](https://docs.gitlab.com/charts/charts/gitlab/webservice/#resources).
#### Sidekiq
Sidekiq pods should generally have 1 vCPU and 2 GB of memory.
Sidekiq pods should generally have 0.9 CPU and 2 GB of memory.
[The provided starting point](#cluster-topology) allows the deployment of up to
14 Sidekiq pods. Expand available resources using the 1 vCPU to 2GB memory
14 Sidekiq pods. Expand available resources using the 0.9 CPU to 2 GB memory
ratio for each additional pod.
For further information on resource usage, see the [Sidekiq resources](https://docs.gitlab.com/charts/charts/gitlab/sidekiq/#resources).
### Supporting
The Supporting Node Pool is designed to house all supporting deployments that don't need to be
on the Webservice and Sidekiq pools.
This includes various deployments related to the Cloud Provider's implementation and supporting
GitLab deployments such as NGINX or [GitLab Shell](https://docs.gitlab.com/charts/charts/gitlab/gitlab-shell/).
If you wish to make any additional deployments, such as for Monitoring, it's recommended
to deploy these in this pool where possible and not in the Webservice or Sidekiq pools, as the Supporting pool has been designed
specifically to accommodate several additional deployments. However, if your deployments don't fit into the
pool as given, you can increase the node pool accordingly.
<div align="right">
<a type="button" class="btn btn-default" href="#setup-components">
Back to setup components <i class="fa fa-angle-double-up" aria-hidden="true"></i>

View File

@ -2306,11 +2306,11 @@ use Google Cloud's Kubernetes Engine (GKE) or AWS Elastic Kubernetes Service (EK
and CPU requirements should translate to most other providers. We hope to update this in the
future with further specific cloud provider details.
| Service | Nodes | Configuration | GCP | AWS | Min Allocatable CPUs and Memory |
|-----------------------------------------------|-------|-------------------------|-----------------|-----------------|---------------------------------|
| Webservice | 7 | 32 vCPU, 28.8 GB memory | `n1-highcpu-32` | `c5.9xlarge` | 223 vCPU, 206.5 GB memory |
| Sidekiq | 4 | 4 vCPU, 15 GB memory | `n1-standard-4` | `m5.xlarge` | 15.5 vCPU, 50 GB memory |
| Supporting services such as NGINX, Prometheus | 2 | 4 vCPU, 15 GB memory | `n1-standard-4` | `m5.xlarge` | 7.75 vCPU, 25 GB memory |
| Service | Nodes | Configuration | GCP | AWS | Min Allocatable CPUs and Memory |
|---------------------|-------|-------------------------|-----------------|--------------|---------------------------------|
| Webservice | 7 | 32 vCPU, 28.8 GB memory | `n1-highcpu-32` | `c5.9xlarge` | 223 vCPU, 206.5 GB memory |
| Sidekiq | 4 | 4 vCPU, 15 GB memory | `n1-standard-4` | `m5.xlarge` | 15.5 vCPU, 50 GB memory |
| Supporting services | 2 | 4 vCPU, 15 GB memory | `n1-standard-4` | `m5.xlarge` | 7.75 vCPU, 25 GB memory |
- For this setup, we **recommend** and regularly [test](index.md#validation-and-test-results)
[Google Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine) and [Amazon Elastic Kubernetes Service (EKS)](https://aws.amazon.com/eks/). Other Kubernetes services may also work, but your mileage may vary.
@ -2353,11 +2353,11 @@ card "Kubernetes via Helm Charts" as kubernetes {
card "**External Load Balancer**" as elb #6a9be7
together {
collections "**Webservice** x4" as gitlab #32CD32
collections "**Webservice** x7" as gitlab #32CD32
collections "**Sidekiq** x4" as sidekiq #ff8dd1
}
card "**Supporting Services**" as support
card "**Supporting Services** x2" as support
}
card "**Internal Load Balancer**" as ilb #9370DB
@ -2423,29 +2423,42 @@ documents how to apply the calculated configuration to the Helm Chart.
#### Webservice
Webservice pods typically need about 1 vCPU and 1.25 GB of memory _per worker_.
Each Webservice pod consumes roughly 4 vCPUs and 5 GB of memory using
Webservice pods typically need about 1 CPU and 1.25 GB of memory _per worker_.
Each Webservice pod consumes roughly 4 CPUs and 5 GB of memory using
the [recommended topology](#cluster-topology) because four worker processes
are created by default and each pod has other small processes running.
For 25,000 users we recommend a total Puma worker count of around 140.
With the [provided recommendations](#cluster-topology) this allows the deployment of up to 35
Webservice pods with 4 workers per pod and 5 pods per node. Expand available resources using
the ratio of 1 vCPU to 1.25 GB of memory _per each worker process_ for each additional
the ratio of 1 CPU to 1.25 GB of memory _per each worker process_ for each additional
Webservice pod.
For further information on resource usage, see the [Webservice resources](https://docs.gitlab.com/charts/charts/gitlab/webservice/#resources).
#### Sidekiq
Sidekiq pods should generally have 1 vCPU and 2 GB of memory.
Sidekiq pods should generally have 0.9 CPU and 2 GB of memory.
[The provided starting point](#cluster-topology) allows the deployment of up to
14 Sidekiq pods. Expand available resources using the 1 vCPU to 2GB memory
14 Sidekiq pods. Expand available resources using the 0.9 CPU to 2 GB memory
ratio for each additional pod.
For further information on resource usage, see the [Sidekiq resources](https://docs.gitlab.com/charts/charts/gitlab/sidekiq/#resources).
### Supporting
The Supporting Node Pool is designed to house all supporting deployments that don't need to be
on the Webservice and Sidekiq pools.
This includes various deployments related to the Cloud Provider's implementation and supporting
GitLab deployments such as NGINX or [GitLab Shell](https://docs.gitlab.com/charts/charts/gitlab/gitlab-shell/).
If you wish to make any additional deployments, such as for Monitoring, it's recommended
to deploy these in this pool where possible and not in the Webservice or Sidekiq pools, as the Supporting pool has been designed
specifically to accommodate several additional deployments. However, if your deployments don't fit into the
pool as given, you can increase the node pool accordingly.
<div align="right">
<a type="button" class="btn btn-default" href="#setup-components">
Back to setup components <i class="fa fa-angle-double-up" aria-hidden="true"></i>

View File

@ -1014,11 +1014,11 @@ use Google Cloud's Kubernetes Engine (GKE) or AWS Elastic Kubernetes Service (EK
and CPU requirements should translate to most other providers. We hope to update this in the
future with further specific cloud provider details.
| Service | Nodes | Configuration | GCP | AWS | Min Allocatable CPUs and Memory |
|-----------------------------------------------|-------|------------------------|-----------------|--------------|---------------------------------|
| Webservice | 3 | 8 vCPU, 7.2 GB memory | `n1-highcpu-8` | `c5.2xlarge` | 23.7 vCPU, 16.9 GB memory |
| Sidekiq | 1 | 4 vCPU, 15 GB memory | `n1-standard-4` | `m5.xlarge` | 3.9 vCPU, 11.8 GB memory |
| Supporting services such as NGINX, Prometheus | 2 | 2 vCPU, 7.5 GB memory | `n1-standard-2` | `m5.large` | 1.9 vCPU, 5.5 GB memory |
| Service | Nodes | Configuration | GCP | AWS | Min Allocatable CPUs and Memory |
|---------------------|-------|------------------------|-----------------|--------------|---------------------------------|
| Webservice | 3 | 8 vCPU, 7.2 GB memory | `n1-highcpu-8` | `c5.2xlarge` | 23.7 vCPU, 16.9 GB memory |
| Sidekiq | 2 | 4 vCPU, 15 GB memory | `n1-standard-4` | `m5.xlarge` | 7.8 vCPU, 25.9 GB memory |
| Supporting services | 2 | 2 vCPU, 7.5 GB memory | `n1-standard-2` | `m5.large` | 1.9 vCPU, 5.5 GB memory |
- For this setup, we **recommend** and regularly [test](index.md#validation-and-test-results)
[Google Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine) and [Amazon Elastic Kubernetes Service (EKS)](https://aws.amazon.com/eks/). Other Kubernetes services may also work, but your mileage may vary.
@ -1054,9 +1054,10 @@ card "Kubernetes via Helm Charts" as kubernetes {
together {
collections "**Webservice** x3" as gitlab #32CD32
card "**Sidekiq**" as sidekiq #ff8dd1
collections "**Supporting Services** x2" as support
collections "**Sidekiq** x2" as sidekiq #ff8dd1
}
collections "**Supporting Services** x2" as support
}
card "**Gitaly**" as gitaly #FF8C00
@ -1087,29 +1088,42 @@ documents how to apply the calculated configuration to the Helm Chart.
#### Webservice
Webservice pods typically need about 1 vCPU and 1.25 GB of memory _per worker_.
Each Webservice pod consumes roughly 4 vCPUs and 5 GB of memory using
Webservice pods typically need about 1 CPU and 1.25 GB of memory _per worker_.
Each Webservice pod consumes roughly 4 CPUs and 5 GB of memory using
the [recommended topology](#cluster-topology) because two worker processes
are created by default and each pod has other small processes running.
For 2,000 users we recommend a total Puma worker count of around 12.
With the [provided recommendations](#cluster-topology) this allows the deployment of up to 3
Webservice pods with 4 workers per pod and 1 pod per node. Expand available resources using
the ratio of 1 vCPU to 1.25 GB of memory _per each worker process_ for each additional
the ratio of 1 CPU to 1.25 GB of memory _per each worker process_ for each additional
Webservice pod.
For further information on resource usage, see the [Webservice resources](https://docs.gitlab.com/charts/charts/gitlab/webservice/#resources).
#### Sidekiq
Sidekiq pods should generally have 1 vCPU and 2 GB of memory.
Sidekiq pods should generally have 0.9 CPU and 2 GB of memory.
[The provided starting point](#cluster-topology) allows the deployment of up to
4 Sidekiq pods. Expand available resources using the 1 vCPU to 2 GB memory
4 Sidekiq pods. Expand available resources using the 0.9 CPU to 2 GB memory
ratio for each additional pod.
For further information on resource usage, see the [Sidekiq resources](https://docs.gitlab.com/charts/charts/gitlab/sidekiq/#resources).
### Supporting
The Supporting Node Pool is designed to house all supporting deployments that don't need to be
on the Webservice and Sidekiq pools.
This includes various deployments related to the Cloud Provider's implementation and supporting
GitLab deployments such as NGINX or [GitLab Shell](https://docs.gitlab.com/charts/charts/gitlab/gitlab-shell/).
If you wish to make any additional deployments, such as for Monitoring, it's recommended
to deploy these in this pool where possible and not in the Webservice or Sidekiq pools, as the Supporting pool has been designed
specifically to accommodate several additional deployments. However, if your deployments don't fit into the
pool as given, you can increase the node pool accordingly.
<div align="right">
<a type="button" class="btn btn-default" href="#setup-components">
Back to setup components <i class="fa fa-angle-double-up" aria-hidden="true"></i>

View File

@ -2268,11 +2268,11 @@ use Google Cloud's Kubernetes Engine (GKE) or AWS Elastic Kubernetes Service (EK
and CPU requirements should translate to most other providers. We hope to update this in the
future with further specific cloud provider details.
| Service | Nodes | Configuration | GCP | AWS | Min Allocatable CPUs and Memory |
|-----------------------------------------------|-------|-------------------------|-----------------|--------------|---------------------------------|
| Webservice | 2 | 16 vCPU, 14.4 GB memory | `n1-highcpu-16` | `c5.4xlarge` | 31.8 vCPU, 24.8 GB memory |
| Sidekiq | 3 | 4 vCPU, 15 GB memory | `n1-standard-4` | `m5.xlarge` | 11.8 vCPU, 38.9 GB memory |
| Supporting services such as NGINX, Prometheus | 2 | 2 vCPU, 7.5 GB memory | `n1-standard-2` | `m5.large` | 3.9 vCPU, 11.8 GB memory |
| Service | Nodes | Configuration | GCP | AWS | Min Allocatable CPUs and Memory |
|---------------------|-------|-------------------------|-----------------|--------------|---------------------------------|
| Webservice | 2 | 16 vCPU, 14.4 GB memory | `n1-highcpu-16` | `c5.4xlarge` | 31.8 vCPU, 24.8 GB memory |
| Sidekiq | 3 | 4 vCPU, 15 GB memory | `n1-standard-4` | `m5.xlarge` | 11.8 vCPU, 38.9 GB memory |
| Supporting services | 2 | 2 vCPU, 7.5 GB memory | `n1-standard-2` | `m5.large` | 3.9 vCPU, 11.8 GB memory |
- For this setup, we **recommend** and regularly [test](index.md#validation-and-test-results)
[Google Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine) and [Amazon Elastic Kubernetes Service (EKS)](https://aws.amazon.com/eks/). Other Kubernetes services may also work, but your mileage may vary.
@ -2314,11 +2314,11 @@ card "Kubernetes via Helm Charts" as kubernetes {
card "**External Load Balancer**" as elb #6a9be7
together {
collections "**Webservice** x4" as gitlab #32CD32
collections "**Sidekiq** x4" as sidekiq #ff8dd1
collections "**Webservice** x2" as gitlab #32CD32
collections "**Sidekiq** x3" as sidekiq #ff8dd1
}
card "**Supporting Services**" as support
card "**Supporting Services** x2" as support
}
card "**Internal Load Balancer**" as ilb #9370DB
@ -2381,29 +2381,42 @@ documents how to apply the calculated configuration to the Helm Chart.
#### Webservice
Webservice pods typically need about 1 vCPU and 1.25 GB of memory _per worker_.
Each Webservice pod consumes roughly 4 vCPUs and 5 GB of memory using
Webservice pods typically need about 1 CPU and 1.25 GB of memory _per worker_.
Each Webservice pod consumes roughly 4 CPUs and 5 GB of memory using
the [recommended topology](#cluster-topology) because four worker processes
are created by default and each pod has other small processes running.
For 3,000 users we recommend a total Puma worker count of around 16.
With the [provided recommendations](#cluster-topology) this allows the deployment of up to 4
Webservice pods with 4 workers per pod and 2 pods per node. Expand available resources using
the ratio of 1 vCPU to 1.25 GB of memory _per each worker process_ for each additional
the ratio of 1 CPU to 1.25 GB of memory _per each worker process_ for each additional
Webservice pod.
For further information on resource usage, see the [Webservice resources](https://docs.gitlab.com/charts/charts/gitlab/webservice/#resources).
#### Sidekiq
Sidekiq pods should generally have 1 vCPU and 2 GB of memory.
Sidekiq pods should generally have 0.9 CPU and 2 GB of memory.
[The provided starting point](#cluster-topology) allows the deployment of up to
8 Sidekiq pods. Expand available resources using the 1 vCPU to 2GB memory
8 Sidekiq pods. Expand available resources using the 0.9 CPU to 2 GB memory
ratio for each additional pod.
For further information on resource usage, see the [Sidekiq resources](https://docs.gitlab.com/charts/charts/gitlab/sidekiq/#resources).
### Supporting
The Supporting Node Pool is designed to house all supporting deployments that don't need to be
on the Webservice and Sidekiq pools.
This includes various deployments related to the Cloud Provider's implementation and supporting
GitLab deployments such as NGINX or [GitLab Shell](https://docs.gitlab.com/charts/charts/gitlab/gitlab-shell/).
If you wish to make any additional deployments, such as for Monitoring, it's recommended
to deploy these in this pool where possible and not in the Webservice or Sidekiq pools, as the Supporting pool has been designed
specifically to accommodate several additional deployments. However, if your deployments don't fit into the
pool as given, you can increase the node pool accordingly.
<div align="right">
<a type="button" class="btn btn-default" href="#setup-components">
Back to setup components <i class="fa fa-angle-double-up" aria-hidden="true"></i>

View File

@ -2322,11 +2322,11 @@ use Google Cloud's Kubernetes Engine (GKE) or AWS Elastic Kubernetes Service (EK
and CPU requirements should translate to most other providers. We hope to update this in the
future with further specific cloud provider details.
| Service | Nodes | Configuration | GCP | AWS | Min Allocatable CPUs and Memory |
|-----------------------------------------------|-------|-------------------------|-----------------|--------------|---------------------------------|
| Webservice | 16 | 32 vCPU, 28.8 GB memory | `n1-highcpu-32` | `m5.8xlarge` | 510 vCPU, 472 GB memory |
| Sidekiq | 4 | 4 vCPU, 15 GB memory | `n1-standard-4` | `m5.xlarge` | 15.5 vCPU, 50 GB memory |
| Supporting services such as NGINX, Prometheus | 2 | 4 vCPU, 15 GB memory | `n1-standard-4` | `m5.xlarge` | 7.75 vCPU, 25 GB memory |
| Service | Nodes | Configuration | GCP | AWS | Min Allocatable CPUs and Memory |
|---------------------|-------|-------------------------|-----------------|--------------|---------------------------------|
| Webservice | 16 | 32 vCPU, 28.8 GB memory | `n1-highcpu-32` | `m5.8xlarge` | 510 vCPU, 472 GB memory |
| Sidekiq | 4 | 4 vCPU, 15 GB memory | `n1-standard-4` | `m5.xlarge` | 15.5 vCPU, 50 GB memory |
| Supporting services | 2 | 4 vCPU, 15 GB memory | `n1-standard-4` | `m5.xlarge` | 7.75 vCPU, 25 GB memory |
- For this setup, we **recommend** and regularly [test](index.md#validation-and-test-results)
[Google Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine) and [Amazon Elastic Kubernetes Service (EKS)](https://aws.amazon.com/eks/). Other Kubernetes services may also work, but your mileage may vary.
@ -2369,11 +2369,11 @@ card "Kubernetes via Helm Charts" as kubernetes {
card "**External Load Balancer**" as elb #6a9be7
together {
collections "**Webservice** x4" as gitlab #32CD32
collections "**Webservice** x16" as gitlab #32CD32
collections "**Sidekiq** x4" as sidekiq #ff8dd1
}
card "**Supporting Services**" as support
card "**Supporting Services** x2" as support
}
card "**Internal Load Balancer**" as ilb #9370DB
@ -2439,29 +2439,42 @@ documents how to apply the calculated configuration to the Helm Chart.
#### Webservice
Webservice pods typically need about 1 vCPU and 1.25 GB of memory _per worker_.
Each Webservice pod consumes roughly 4 vCPUs and 5 GB of memory using
Webservice pods typically need about 1 CPU and 1.25 GB of memory _per worker_.
Each Webservice pod consumes roughly 4 CPUs and 5 GB of memory using
the [recommended topology](#cluster-topology) because four worker processes
are created by default and each pod has other small processes running.
For 50,000 users we recommend a total Puma worker count of around 320.
With the [provided recommendations](#cluster-topology) this allows the deployment of up to 80
Webservice pods with 4 workers per pod and 5 pods per node. Expand available resources using
the ratio of 1 vCPU to 1.25 GB of memory _per each worker process_ for each additional
the ratio of 1 CPU to 1.25 GB of memory _per each worker process_ for each additional
Webservice pod.
For further information on resource usage, see the [Webservice resources](https://docs.gitlab.com/charts/charts/gitlab/webservice/#resources).
#### Sidekiq
Sidekiq pods should generally have 1 vCPU and 2 GB of memory.
Sidekiq pods should generally have 0.9 CPU and 2 GB of memory.
[The provided starting point](#cluster-topology) allows the deployment of up to
14 Sidekiq pods. Expand available resources using the 1 vCPU to 2GB memory
14 Sidekiq pods. Expand available resources using the 0.9 CPU to 2 GB memory
ratio for each additional pod.
For further information on resource usage, see the [Sidekiq resources](https://docs.gitlab.com/charts/charts/gitlab/sidekiq/#resources).
### Supporting
The Supporting Node Pool is designed to house all supporting deployments that don't need to be
on the Webservice and Sidekiq pools.
This includes various deployments related to the Cloud Provider's implementation and supporting
GitLab deployments such as NGINX or [GitLab Shell](https://docs.gitlab.com/charts/charts/gitlab/gitlab-shell/).
If you wish to make any additional deployments, such as for Monitoring, it's recommended
to deploy these in this pool where possible and not in the Webservice or Sidekiq pools, as the Supporting pool has been designed
specifically to accommodate several additional deployments. However, if your deployments don't fit into the
pool as given, you can increase the node pool accordingly.
<div align="right">
<a type="button" class="btn btn-default" href="#setup-components">
Back to setup components <i class="fa fa-angle-double-up" aria-hidden="true"></i>

View File

@ -2289,11 +2289,11 @@ card "Kubernetes via Helm Charts" as kubernetes {
card "**External Load Balancer**" as elb #6a9be7
together {
collections "**Webservice** x4" as gitlab #32CD32
collections "**Sidekiq** x4" as sidekiq #ff8dd1
collections "**Webservice** x5" as gitlab #32CD32
collections "**Sidekiq** x3" as sidekiq #ff8dd1
}
card "**Supporting Services**" as support
card "**Supporting Services** x2" as support
}
card "**Internal Load Balancer**" as ilb #9370DB
@ -2356,29 +2356,42 @@ documents how to apply the calculated configuration to the Helm Chart.
#### Webservice
Webservice pods typically need about 1 vCPU and 1.25 GB of memory _per worker_.
Each Webservice pod consumes roughly 4 vCPUs and 5 GB of memory using
Webservice pods typically need about 1 CPU and 1.25 GB of memory _per worker_.
Each Webservice pod consumes roughly 4 CPUs and 5 GB of memory using
the [recommended topology](#cluster-topology) because four worker processes
are created by default and each pod has other small processes running.
For 5,000 users we recommend a total Puma worker count of around 40.
With the [provided recommendations](#cluster-topology) this allows the deployment of up to 10
Webservice pods with 4 workers per pod and 2 pods per node. Expand available resources using
the ratio of 1 vCPU to 1.25 GB of memory _per each worker process_ for each additional
the ratio of 1 CPU to 1.25 GB of memory _per each worker process_ for each additional
Webservice pod.
For further information on resource usage, see the [Webservice resources](https://docs.gitlab.com/charts/charts/gitlab/webservice/#resources).
#### Sidekiq
Sidekiq pods should generally have 1 vCPU and 2 GB of memory.
Sidekiq pods should generally have 0.9 CPU and 2 GB of memory.
[The provided starting point](#cluster-topology) allows the deployment of up to
8 Sidekiq pods. Expand available resources using the 1 vCPU to 2GB memory
8 Sidekiq pods. Expand available resources using the 0.9 CPU to 2 GB memory
ratio for each additional pod.
For further information on resource usage, see the [Sidekiq resources](https://docs.gitlab.com/charts/charts/gitlab/sidekiq/#resources).
### Supporting
The Supporting Node Pool is designed to house all supporting deployments that don't need to be
on the Webservice and Sidekiq pools.
This includes various deployments related to the Cloud Provider's implementation and supporting
GitLab deployments such as NGINX or [GitLab Shell](https://docs.gitlab.com/charts/charts/gitlab/gitlab-shell/).
If you wish to make any additional deployments, such as for Monitoring, it's recommended
to deploy these in this pool where possible and not in the Webservice or Sidekiq pools, as the Supporting pool has been designed
specifically to accommodate several additional deployments. However, if your deployments don't fit into the
pool as given, you can increase the node pool accordingly.
<div align="right">
<a type="button" class="btn btn-default" href="#setup-components">
Back to setup components <i class="fa fa-angle-double-up" aria-hidden="true"></i>

View File

@ -1896,13 +1896,10 @@ image:
#### `image:pull_policy`
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/21619) in GitLab 15.1 [with a flag](../../administration/feature_flags.md) named `ci_docker_image_pull_policy`. Disabled by default.
> - [Enabled on GitLab.com and self-managed](https://gitlab.com/gitlab-org/gitlab/-/issues/363186) in GitLab 15.2.
> - [Generally available](https://gitlab.com/gitlab-org/gitlab/-/issues/363186) in GitLab 15.4. [Feature flag `ci_docker_image_pull_policy`](https://gitlab.com/gitlab-org/gitlab/-/issues/363186) removed.
> - Requires GitLab Runner 15.1 or later.
FLAG:
On self-managed GitLab, by default this feature is not available. To make it available,
ask an administrator to [enable the feature flag](../../administration/feature_flags.md) named `ci_docker_image_pull_policy`.
The feature is not ready for production use.
The pull policy that the runner uses to fetch the Docker image.
**Keyword type**: Job keyword. You can use it only as part of a job or in the [`default` section](#default).
@ -3642,12 +3639,9 @@ in that container.
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/21619) in GitLab 15.1 [with a flag](../../administration/feature_flags.md) named `ci_docker_image_pull_policy`. Disabled by default.
> - [Enabled on GitLab.com and self-managed](https://gitlab.com/gitlab-org/gitlab/-/issues/363186) in GitLab 15.2.
> - [Generally available](https://gitlab.com/gitlab-org/gitlab/-/issues/363186) in GitLab 15.4. [Feature flag `ci_docker_image_pull_policy`](https://gitlab.com/gitlab-org/gitlab/-/issues/363186) removed.
> - Requires GitLab Runner 15.1 or later.
FLAG:
On self-managed GitLab, by default this feature is available. To hide the feature,
ask an administrator to [disable the feature flag](../../administration/feature_flags.md) named `ci_docker_image_pull_policy`.
The pull policy that the runner uses to fetch the Docker image.
**Keyword type**: Job keyword. You can use it only as part of a job or in the [`default` section](#default).

View File

@ -8,7 +8,7 @@ module API
expose :name, :entrypoint
expose :ports, using: Entities::Ci::JobRequest::Port
expose :pull_policy, if: ->(_) { ::Feature.enabled?(:ci_docker_image_pull_policy) }
expose :pull_policy
end
end
end

View File

@ -8,7 +8,7 @@ module API
expose :name, :entrypoint
expose :ports, using: Entities::Ci::JobRequest::Port
expose :pull_policy, if: ->(_) { ::Feature.enabled?(:ci_docker_image_pull_policy) }
expose :pull_policy
expose :alias, :command
expose :variables
end

View File

@ -11,10 +11,7 @@ module Gitlab
include ::Gitlab::Ci::Config::Entry::Imageable
validations do
validates :config, allowed_keys: IMAGEABLE_ALLOWED_KEYS,
if: :ci_docker_image_pull_policy_enabled?
validates :config, allowed_keys: IMAGEABLE_LEGACY_ALLOWED_KEYS,
unless: :ci_docker_image_pull_policy_enabled?
validates :config, allowed_keys: IMAGEABLE_ALLOWED_KEYS
end
def value
@ -25,7 +22,7 @@ module Gitlab
name: @config[:name],
entrypoint: @config[:entrypoint],
ports: (ports_value if ports_defined?),
pull_policy: (ci_docker_image_pull_policy_enabled? ? pull_policy_value : nil)
pull_policy: pull_policy_value
}.compact
else
{}

View File

@ -13,7 +13,6 @@ module Gitlab
include ::Gitlab::Config::Entry::Configurable
IMAGEABLE_ALLOWED_KEYS = %i[name entrypoint ports pull_policy].freeze
IMAGEABLE_LEGACY_ALLOWED_KEYS = %i[name entrypoint ports].freeze
included do
include ::Gitlab::Config::Entry::Validatable
@ -47,10 +46,6 @@ module Gitlab
opt(:with_image_ports)
end
def ci_docker_image_pull_policy_enabled?
::Feature.enabled?(:ci_docker_image_pull_policy)
end
def skip_config_hash_validation?
true
end

View File

@ -11,14 +11,9 @@ module Gitlab
include ::Gitlab::Ci::Config::Entry::Imageable
ALLOWED_KEYS = %i[command alias variables].freeze
LEGACY_ALLOWED_KEYS = %i[command alias variables].freeze
validations do
validates :config, allowed_keys: ALLOWED_KEYS + IMAGEABLE_ALLOWED_KEYS,
if: :ci_docker_image_pull_policy_enabled?
validates :config, allowed_keys: LEGACY_ALLOWED_KEYS + IMAGEABLE_LEGACY_ALLOWED_KEYS,
unless: :ci_docker_image_pull_policy_enabled?
validates :config, allowed_keys: ALLOWED_KEYS + IMAGEABLE_ALLOWED_KEYS
validates :command, array_of_strings: true, allow_nil: true
validates :alias, type: String, allow_nil: true
validates :alias, type: String, presence: true, unless: ->(record) { record.ports.blank? }
@ -43,7 +38,7 @@ module Gitlab
{ name: @config }
elsif hash?
@config.merge(
pull_policy: (pull_policy_value if ci_docker_image_pull_policy_enabled?)
pull_policy: pull_policy_value
).compact
else
{}

View File

@ -38,7 +38,7 @@ module Gitlab
if finder.is_a?(BranchesFinder)
Feature.enabled?(:branch_list_keyset_pagination, project)
elsif finder.is_a?(TagsFinder)
Feature.enabled?(:tag_list_keyset_pagination, project)
true
elsif finder.is_a?(::Repositories::TreeFinder)
Feature.enabled?(:repository_tree_gitaly_pagination, project)
else
@ -52,7 +52,7 @@ module Gitlab
if finder.is_a?(BranchesFinder)
Feature.enabled?(:branch_list_keyset_pagination, project)
elsif finder.is_a?(TagsFinder)
Feature.enabled?(:tag_list_keyset_pagination, project)
true
elsif finder.is_a?(::Repositories::TreeFinder)
Feature.enabled?(:repository_tree_gitaly_pagination, project)
else

View File

@ -176,13 +176,13 @@
"url-loader": "^4.1.1",
"uuid": "8.1.0",
"visibilityjs": "^1.2.4",
"vue": "^2.7.10",
"vue": "2.6.14",
"vue-apollo": "^3.0.7",
"vue-loader": "^15.10",
"vue-loader": "15.9.6",
"vue-observe-visibility": "^1.0.0",
"vue-resize": "^1.0.1",
"vue-router": "3.4.9",
"vue-template-compiler": "^2.7.10",
"vue-template-compiler": "2.6.14",
"vue-virtual-scroll-list": "^1.4.7",
"vuedraggable": "^2.23.0",
"vuex": "^3.6.2",

View File

@ -1,12 +1,7 @@
# frozen_string_literal: true
module QA
# TODO: remove feature flag upon rollout completion
# FF rollout issue: https://gitlab.com/gitlab-org/gitlab/-/issues/363186
RSpec.describe 'Verify', :runner, feature_flag: {
name: 'ci_docker_image_pull_policy',
scope: :global
} do
RSpec.describe 'Verify', :runner do
describe 'Pipeline with image:pull_policy' do
let(:runner_name) { "qa-runner-#{Faker::Alphanumeric.alphanumeric(number: 8)}" }
let(:job_name) { "test-job-#{pull_policies.join('-')}" }
@ -27,10 +22,6 @@ module QA
end
before do
Runtime::Feature.enable(:ci_docker_image_pull_policy)
# Give the feature some time to switch
sleep(30)
update_runner_policy(allowed_policies)
add_ci_file
Flow::Login.sign_in
@ -39,8 +30,6 @@ module QA
end
after do
Runtime::Feature.disable(:ci_docker_image_pull_policy)
runner.remove_via_api!
end

View File

@ -391,4 +391,36 @@ describe('GroupRunnersApp', () => {
});
});
});
describe('when user has permission to register group runner', () => {
beforeEach(() => {
createComponent({
propsData: {
registrationToken: mockRegistrationToken,
groupFullPath: mockGroupFullPath,
groupRunnersLimitedCount: mockGroupRunnersCount,
},
});
});
it('shows the register group runner button', () => {
expect(findRegistrationDropdown().exists()).toBe(true);
});
});
describe('when user has no permission to register group runner', () => {
beforeEach(() => {
createComponent({
propsData: {
registrationToken: null,
groupFullPath: mockGroupFullPath,
groupRunnersLimitedCount: mockGroupRunnersCount,
},
});
});
it('does not show the register group runner button', () => {
expect(findRegistrationDropdown().exists()).toBe(false);
});
});
});

View File

@ -131,17 +131,32 @@ RSpec.describe Ci::RunnersHelper do
describe '#group_runners_data_attributes' do
let(:group) { create(:group) }
it 'returns group data to render a runner list' do
expect(helper.group_runners_data_attributes(group)).to include(
registration_token: group.runners_token,
group_id: group.id,
group_full_path: group.full_path,
runner_install_help_page: 'https://docs.gitlab.com/runner/install/',
online_contact_timeout_secs: 7200,
stale_timeout_secs: 7889238,
empty_state_svg_path: start_with('/assets/illustrations/pipelines_empty'),
empty_state_filtered_svg_path: start_with('/assets/illustrations/magnifying-glass')
)
context 'when user can register group runners' do
before do
allow(helper).to receive(:can?).with(user, :register_group_runners, group).and_return(true)
end
it 'returns group data to render a runner list' do
expect(helper.group_runners_data_attributes(group)).to include(
group_id: group.id,
group_full_path: group.full_path,
runner_install_help_page: 'https://docs.gitlab.com/runner/install/',
online_contact_timeout_secs: 7200,
stale_timeout_secs: 7889238,
empty_state_svg_path: start_with('/assets/illustrations/pipelines_empty'),
empty_state_filtered_svg_path: start_with('/assets/illustrations/magnifying-glass')
)
end
end
context 'when user cannot register group runners' do
before do
allow(helper).to receive(:can?).with(user, :register_group_runners, group).and_return(false)
end
it 'returns empty registration token' do
expect(helper.group_runners_data_attributes(group)).not_to include(registration_token: group.runners_token)
end
end
end

View File

@ -0,0 +1,80 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe 'load_balancing', :delete, :reestablished_active_record_base do
subject(:initialize_load_balancer) do
load Rails.root.join('config/initializers/load_balancing.rb')
end
context 'for a clustered puma worker' do
let!(:group) { create(:group, name: 'my group') }
before do
# Setup host-based load balancing
# Patch in our load balancer config, simply pointing at the test database twice
allow(Gitlab::Database::LoadBalancing::Configuration).to receive(:for_model) do |base_model|
db_host = base_model.connection_pool.db_config.host
Gitlab::Database::LoadBalancing::Configuration.new(base_model, [db_host, db_host])
end
# Pretend we are in clustered environment
allow(Gitlab::Cluster::LifecycleEvents).to receive(:in_clustered_puma?).and_return(true)
# Stub out middleware call, as not idempotent
allow(Gitlab::Application.instance.middleware).to receive(:use)
end
after do
# reset load balancing to original state
allow(Gitlab::Database::LoadBalancing::Configuration).to receive(:for_model).and_call_original
allow(Gitlab::Cluster::LifecycleEvents).to receive(:in_clustered_puma?).and_call_original
load Rails.root.join('config/initializers/load_balancing.rb')
end
def simulate_puma_worker
pid = Process.fork do
# We call this in config/puma.rb
Gitlab::Cluster::LifecycleEvents.do_worker_start
yield
end
Process.waitpid(pid)
expect($?).to be_success
end
it 'makes a query to a replica successfully' do
# Clear any previous sticky writes
::Gitlab::Database::LoadBalancing::Session.clear_session
initialize_load_balancer
process_read, process_write = IO.pipe
simulate_puma_worker do
process_read.close
group = Group.find_by_name('my group')
process_write.write group.name
end
process_write.close
expect(process_read.read).to eq(group.name)
end
it 'makes a query to the primary successfully' do
initialize_load_balancer
expect do
simulate_puma_worker do
Group.touch_all
end
group.reload
end.to change(group, :updated_at)
end
end
end

View File

@ -32,14 +32,4 @@ RSpec.describe API::Entities::Ci::JobRequest::Image do
it 'returns the pull policy' do
expect(subject[:pull_policy]).to eq(['if-not-present'])
end
context 'when the FF ci_docker_image_pull_policy is disabled' do
before do
stub_feature_flags(ci_docker_image_pull_policy: false)
end
it 'does not return the pull policy' do
expect(subject).not_to have_key(:pull_policy)
end
end
end

View File

@ -40,12 +40,4 @@ RSpec.describe API::Entities::Ci::JobRequest::Service do
expect(subject[:ports]).to be_nil
end
end
context 'when the FF ci_docker_image_pull_policy is disabled' do
before do
stub_feature_flags(ci_docker_image_pull_policy: false)
end
it { is_expected.not_to have_key(:pull_policy) }
end
end

View File

@ -4,8 +4,6 @@ require 'spec_helper'
RSpec.describe Gitlab::Ci::Config::Entry::Image do
before do
stub_feature_flags(ci_docker_image_pull_policy: true)
entry.compose!
end
@ -129,18 +127,6 @@ RSpec.describe Gitlab::Ci::Config::Entry::Image do
it 'is valid' do
expect(entry).to be_valid
end
context 'when the feature flag ci_docker_image_pull_policy is disabled' do
before do
stub_feature_flags(ci_docker_image_pull_policy: false)
entry.compose!
end
it 'is not valid' do
expect(entry).not_to be_valid
expect(entry.errors).to include('image config contains unknown keys: pull_policy')
end
end
end
describe '#value' do
@ -150,19 +136,6 @@ RSpec.describe Gitlab::Ci::Config::Entry::Image do
pull_policy: ['if-not-present']
)
end
context 'when the feature flag ci_docker_image_pull_policy is disabled' do
before do
stub_feature_flags(ci_docker_image_pull_policy: false)
entry.compose!
end
it 'is not valid' do
expect(entry.value).to eq(
name: 'image:1.0'
)
end
end
end
end
end

View File

@ -4,7 +4,6 @@ require 'spec_helper'
RSpec.describe Gitlab::Ci::Config::Entry::Service do
before do
stub_feature_flags(ci_docker_image_pull_policy: true)
entry.compose!
end
@ -149,18 +148,6 @@ RSpec.describe Gitlab::Ci::Config::Entry::Service do
it 'is valid' do
expect(entry).to be_valid
end
context 'when the feature flag ci_docker_image_pull_policy is disabled' do
before do
stub_feature_flags(ci_docker_image_pull_policy: false)
entry.compose!
end
it 'is not valid' do
expect(entry).not_to be_valid
expect(entry.errors).to include('service config contains unknown keys: pull_policy')
end
end
end
describe '#value' do
@ -170,18 +157,6 @@ RSpec.describe Gitlab::Ci::Config::Entry::Service do
pull_policy: ['if-not-present']
)
end
context 'when the feature flag ci_docker_image_pull_policy is disabled' do
before do
stub_feature_flags(ci_docker_image_pull_policy: false)
end
it 'is not valid' do
expect(entry.value).to eq(
name: 'postgresql:9.5'
)
end
end
end
end
end

View File

@ -997,18 +997,6 @@ module Gitlab
scheduling_type: :stage
})
end
context 'when the feature flag ci_docker_image_pull_policy is disabled' do
before do
stub_feature_flags(ci_docker_image_pull_policy: false)
end
it { is_expected.not_to be_valid }
it "returns no job" do
expect(processor.jobs).to eq({})
end
end
end
context 'when a service has pull_policy' do
@ -1042,18 +1030,6 @@ module Gitlab
scheduling_type: :stage
})
end
context 'when the feature flag ci_docker_image_pull_policy is disabled' do
before do
stub_feature_flags(ci_docker_image_pull_policy: false)
end
it { is_expected.not_to be_valid }
it "returns no job" do
expect(processor.jobs).to eq({})
end
end
end
end

File diff suppressed because it is too large Load Diff

View File

@ -833,22 +833,6 @@ RSpec.describe API::Ci::Runner, :clean_gitlab_redis_shared_state do
'image' => { 'name' => 'ruby', 'pull_policy' => ['if-not-present'], 'entrypoint' => nil, 'ports' => [] }
)
end
context 'when the FF ci_docker_image_pull_policy is disabled' do
before do
stub_feature_flags(ci_docker_image_pull_policy: false)
end
it 'returns the image without pull policy' do
request_job
expect(response).to have_gitlab_http_status(:created)
expect(json_response).to include(
'id' => job.id,
'image' => { 'name' => 'ruby', 'entrypoint' => nil, 'ports' => [] }
)
end
end
end
context 'when service has pull_policy' do
@ -873,23 +857,6 @@ RSpec.describe API::Ci::Runner, :clean_gitlab_redis_shared_state do
'ports' => [], 'pull_policy' => ['if-not-present'], 'variables' => [] }]
)
end
context 'when the FF ci_docker_image_pull_policy is disabled' do
before do
stub_feature_flags(ci_docker_image_pull_policy: false)
end
it 'returns the service without pull policy' do
request_job
expect(response).to have_gitlab_http_status(:created)
expect(json_response).to include(
'id' => job.id,
'services' => [{ 'alias' => nil, 'command' => nil, 'entrypoint' => nil, 'name' => 'postgres:11.9',
'ports' => [], 'variables' => [] }]
)
end
end
end
describe 'a job with excluded artifacts' do

View File

@ -17,10 +17,6 @@ RSpec.describe API::Tags do
end
describe 'GET /projects/:id/repository/tags', :use_clean_rails_memory_store_caching do
before do
stub_feature_flags(tag_list_keyset_pagination: false)
end
let(:route) { "/projects/#{project_id}/repository/tags" }
context 'sorting' do
@ -154,50 +150,44 @@ RSpec.describe API::Tags do
end
end
context 'with keyset pagination on', :aggregate_errors do
before do
stub_feature_flags(tag_list_keyset_pagination: true)
end
context 'with keyset pagination option', :aggregate_errors do
let(:base_params) { { pagination: 'keyset' } }
context 'with keyset pagination option' do
let(:base_params) { { pagination: 'keyset' } }
context 'with gitaly pagination params' do
context 'with high limit' do
let(:params) { base_params.merge(per_page: 100) }
context 'with gitaly pagination params' do
context 'with high limit' do
let(:params) { base_params.merge(per_page: 100) }
it 'returns all repository tags' do
get api(route, user), params: params
it 'returns all repository tags' do
get api(route, user), params: params
expect(response).to have_gitlab_http_status(:ok)
expect(response).to match_response_schema('public_api/v4/tags')
expect(response.headers).not_to include('Link')
tag_names = json_response.map { |x| x['name'] }
expect(tag_names).to match_array(project.repository.tag_names)
end
expect(response).to have_gitlab_http_status(:ok)
expect(response).to match_response_schema('public_api/v4/tags')
expect(response.headers).not_to include('Link')
tag_names = json_response.map { |x| x['name'] }
expect(tag_names).to match_array(project.repository.tag_names)
end
end
context 'with low limit' do
let(:params) { base_params.merge(per_page: 2) }
context 'with low limit' do
let(:params) { base_params.merge(per_page: 2) }
it 'returns limited repository tags' do
get api(route, user), params: params
it 'returns limited repository tags' do
get api(route, user), params: params
expect(response).to have_gitlab_http_status(:ok)
expect(response).to match_response_schema('public_api/v4/tags')
expect(response.headers).to include('Link')
tag_names = json_response.map { |x| x['name'] }
expect(tag_names).to match_array(%w(v1.1.0 v1.1.1))
end
expect(response).to have_gitlab_http_status(:ok)
expect(response).to match_response_schema('public_api/v4/tags')
expect(response.headers).to include('Link')
tag_names = json_response.map { |x| x['name'] }
expect(tag_names).to match_array(%w(v1.1.0 v1.1.1))
end
end
context 'with missing page token' do
let(:params) { base_params.merge(page_token: 'unknown') }
context 'with missing page token' do
let(:params) { base_params.merge(page_token: 'unknown') }
it_behaves_like '422 response' do
let(:request) { get api(route, user), params: params }
let(:message) { 'Invalid page token: refs/tags/unknown' }
end
it_behaves_like '422 response' do
let(:request) { get api(route, user), params: params }
let(:message) { 'Invalid page token: refs/tags/unknown' }
end
end
end

View File

@ -27,7 +27,7 @@ RSpec.describe GroupAccessTokenEntity do
scopes: token.scopes,
user_id: token.user_id,
revoke_path: expected_revoke_path,
access_level: ::Gitlab::Access::DEVELOPER
role: 'Developer'
))
expect(json).not_to include(:token)
@ -48,7 +48,7 @@ RSpec.describe GroupAccessTokenEntity do
scopes: token.scopes,
user_id: token.user_id,
revoke_path: expected_revoke_path,
access_level: nil
role: nil
))
expect(json).not_to include(:token)

View File

@ -28,7 +28,7 @@ RSpec.describe ProjectAccessTokenEntity do
scopes: token.scopes,
user_id: token.user_id,
revoke_path: expected_revoke_path,
access_level: ::Gitlab::Access::DEVELOPER
role: 'Developer'
))
expect(json).not_to include(:token)
@ -52,7 +52,7 @@ RSpec.describe ProjectAccessTokenEntity do
scopes: token.scopes,
user_id: token.user_id,
revoke_path: expected_revoke_path,
access_level: nil
role: nil
))
expect(json).not_to include(:token)

View File

@ -344,6 +344,7 @@ module GitalySetup
def spawn_gitaly(toml = nil)
check_gitaly_config!
spawning_process = Process.pid
pids = []
if toml
@ -364,6 +365,8 @@ module GitalySetup
# running until `make test` cleans it up.
next if ENV['GITALY_PID_FILE']
next if Process.pid != spawning_process
pids.each { |pid| stop(pid) }
end
rescue StandardError

View File

@ -23,7 +23,7 @@ module TestEnv
'merged-target' => '21751bf',
'markdown' => '0ed8c6c',
'lfs' => '55bc176',
'master' => 'b83d6e3',
'master' => 'b83d6e391c22777fca1ed3012fce84f633d7fed0',
'merge-test' => '5937ac0',
"'test'" => 'e56497b',
'orphaned-branch' => '45127a9',

View File

@ -30,6 +30,27 @@ RSpec.shared_examples 'a deploy token in settings' do
expect(page).to have_selector("input[name='deploy-token-user'][value='deployer']")
expect(page).to have_selector("input[name='deploy-token'][readonly='readonly']")
end
expect(find("input#deploy_token_name").value).to eq nil
expect(find("input#deploy_token_read_repository").checked?).to eq false
end
context "with form errors" do
before do
visit page_path
fill_in "deploy_token_name", with: "new_deploy_key"
fill_in "deploy_token_username", with: "deployer"
click_button "Create deploy token"
end
it "shows form errors" do
expect(page).to have_text("Scopes can't be blank")
end
it "keeps form inputs" do
expect(find("input#deploy_token_name").value).to eq "new_deploy_key"
expect(find("input#deploy_token_username").value).to eq "deployer"
end
end
context 'when User#time_display_relative is false', :js do

View File

@ -0,0 +1,28 @@
workflow:
rules:
- if: $CI_MERGE_REQUEST_ID
.rspec:
cache:
key: omniauth-salesforce
paths:
- vendor/gems/omniauth-salesforce/vendor/ruby
before_script:
- cd vendor/gems/omniauth-salesforce
- ruby -v # Print out ruby version for debugging
- gem install bundler --no-document # Bundler is not installed with the image
- bundle config set --local path 'vendor' # Install dependencies into ./vendor/ruby
- bundle config set with 'development'
- bundle config set --local frozen 'true' # Disallow Gemfile.lock changes on CI
- bundle config # Show bundler configuration
- bundle install -j $(nproc)
script:
- bundle exec rspec
rspec-2.7:
image: "ruby:2.7"
extends: .rspec
rspec-3.0:
image: "ruby:3.0"
extends: .rspec

12
vendor/gems/omniauth-salesforce/Gemfile vendored Executable file
View File

@ -0,0 +1,12 @@
source 'https://rubygems.org'
# Specify your gem's dependencies in omniauth-salesforce.gemspec
gemspec
group :development, :test do
gem 'guard'
gem 'guard-rspec'
gem 'guard-bundler'
gem 'rb-fsevent'
gem 'growl'
end

View File

@ -0,0 +1,121 @@
PATH
remote: .
specs:
omniauth-salesforce (1.0.5)
omniauth (~> 1.0, < 3)
omniauth-oauth2 (~> 1.0)
GEM
remote: https://rubygems.org/
specs:
addressable (2.8.1)
public_suffix (>= 2.0.2, < 6.0)
coderay (1.1.3)
crack (0.4.5)
rexml
diff-lcs (1.5.0)
docile (1.4.0)
faraday (2.5.2)
faraday-net_http (>= 2.0, < 3.1)
ruby2_keywords (>= 0.0.4)
faraday-net_http (3.0.0)
ffi (1.15.5)
formatador (1.1.0)
growl (1.0.3)
guard (2.18.0)
formatador (>= 0.2.4)
listen (>= 2.7, < 4.0)
lumberjack (>= 1.0.12, < 2.0)
nenv (~> 0.1)
notiffany (~> 0.0)
pry (>= 0.13.0)
shellany (~> 0.0)
thor (>= 0.18.1)
guard-bundler (3.0.0)
bundler (>= 2.1, < 3)
guard (~> 2.2)
guard-compat (~> 1.1)
guard-compat (1.2.1)
guard-rspec (4.7.3)
guard (~> 2.1)
guard-compat (~> 1.1)
rspec (>= 2.99.0, < 4.0)
hashdiff (1.0.1)
hashie (5.0.0)
jwt (2.4.1)
listen (3.7.1)
rb-fsevent (~> 0.10, >= 0.10.3)
rb-inotify (~> 0.9, >= 0.9.10)
lumberjack (1.2.8)
method_source (1.0.0)
multi_xml (0.6.0)
nenv (0.3.0)
notiffany (0.1.3)
nenv (~> 0.1)
shellany (~> 0.0)
oauth2 (2.0.7)
faraday (>= 0.17.3, < 3.0)
jwt (>= 1.0, < 3.0)
multi_xml (~> 0.5)
rack (>= 1.2, < 3)
rash_alt (>= 0.4, < 1)
version_gem (~> 1.1)
omniauth (1.9.2)
hashie (>= 3.4.6)
rack (>= 1.6.2, < 3)
omniauth-oauth2 (1.7.3)
oauth2 (>= 1.4, < 3)
omniauth (>= 1.9, < 3)
pry (0.14.1)
coderay (~> 1.1)
method_source (~> 1.0)
public_suffix (5.0.0)
rack (2.2.4)
rack-test (2.0.2)
rack (>= 1.3)
rash_alt (0.4.12)
hashie (>= 3.4)
rb-fsevent (0.11.1)
rb-inotify (0.10.1)
ffi (~> 1.0)
rexml (3.2.5)
rspec (2.99.0)
rspec-core (~> 2.99.0)
rspec-expectations (~> 2.99.0)
rspec-mocks (~> 2.99.0)
rspec-core (2.99.2)
rspec-expectations (2.99.2)
diff-lcs (>= 1.1.3, < 2.0)
rspec-mocks (2.99.4)
ruby2_keywords (0.0.5)
shellany (0.0.1)
simplecov (0.21.2)
docile (~> 1.1)
simplecov-html (~> 0.11)
simplecov_json_formatter (~> 0.1)
simplecov-html (0.12.3)
simplecov_json_formatter (0.1.4)
thor (1.2.1)
version_gem (1.1.0)
webmock (3.18.1)
addressable (>= 2.8.0)
crack (>= 0.3.2)
hashdiff (>= 0.4.0, < 2.0.0)
PLATFORMS
ruby
DEPENDENCIES
growl
guard
guard-bundler
guard-rspec
omniauth-salesforce!
rack-test
rb-fsevent
rspec (~> 2.7)
simplecov
webmock
BUNDLED WITH
2.3.20

10
vendor/gems/omniauth-salesforce/Guardfile vendored Executable file
View File

@ -0,0 +1,10 @@
guard 'rspec', :version => 2 do
watch(%r{^spec/.+_spec\.rb$})
watch(%r{^lib/(.+)\.rb$}) { |m| "spec/#{m[1]}_spec.rb" }
watch('spec/spec_helper.rb') { "spec" }
end
guard 'bundler' do
watch('Gemfile')
watch('omniauth-salesforce.gemspec')
end

View File

@ -0,0 +1,5 @@
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

60
vendor/gems/omniauth-salesforce/README.md vendored Executable file
View File

@ -0,0 +1,60 @@
# omniauth-salesforce
This is fork of [omniauth-salesforce](https://github.com/realdoug/omniauth-salesforce) to support:
1. OmniAuth v1 and v2. OmniAuth v2 disables GET requests by default
and defaults to POST. GitLab already has patched v1 to use POST,
but other dependencies need to be updated:
https://gitlab.com/gitlab-org/gitlab/-/issues/30073.
There is active discussion with the gem owner (via email) about adding some GitLab employees as gem
authors so tha they can push changes. If that happens, the updated/canonical gem
should be used in favor of this vendored fork.
[OmniAuth](https://github.com/intridea/omniauth) Strategy for [salesforce.com](salesforce.com).
Note: This is a fork of the [original](https://github.com/richardvanhook/omniauth-salesforce) project and is now the main repository for the omniauth-salesforce gem.
## See it in action
[http://omniauth-salesforce-example.herokuapp.com](http://omniauth-salesforce-example.herokuapp.com)
[Source for above app](https://github.com/richardvanhook/omniauth-salesforce-example)
## Basic Usage
```ruby
require "sinatra"
require "omniauth"
require "omniauth-salesforce"
class MyApplication < Sinatra::Base
use Rack::Session
use OmniAuth::Builder do
provider :salesforce, ENV['SALESFORCE_KEY'], ENV['SALESFORCE_SECRET']
end
end
```
## Including other sites
```ruby
use OmniAuth::Builder do
provider :salesforce,
ENV['SALESFORCE_KEY'],
ENV['SALESFORCE_SECRET']
provider OmniAuth::Strategies::SalesforceSandbox,
ENV['SALESFORCE_SANDBOX_KEY'],
ENV['SALESFORCE_SANDBOX_SECRET']
provider OmniAuth::Strategies::SalesforcePreRelease,
ENV['SALESFORCE_PRERELEASE_KEY'],
ENV['SALESFORCE_PRERELEASE_SECRET']
provider OmniAuth::Strategies::DatabaseDotCom,
ENV['DATABASE_DOT_COM_KEY'],
ENV['DATABASE_DOT_COM_SECRET']
end
```
## Resources
* [Article: Digging Deeper into OAuth 2.0 on Force.com](http://wiki.developerforce.com/index.php/Digging_Deeper_into_OAuth_2.0_on_Force.com)

12
vendor/gems/omniauth-salesforce/Rakefile vendored Executable file
View File

@ -0,0 +1,12 @@
#!/usr/bin/env rake
require "bundler/gem_tasks"
require 'rspec/core/rake_task'
desc 'Default: run specs.'
task :default => :spec
desc "Run specs"
RSpec::Core::RakeTask.new
desc 'Run specs'
task :default => :spec

View File

@ -0,0 +1,2 @@
require "omniauth-salesforce/version"
require 'omniauth/strategies/salesforce'

View File

@ -0,0 +1,5 @@
module OmniAuth
module Salesforce
VERSION = "1.0.5"
end
end

View File

@ -0,0 +1,97 @@
require 'omniauth-oauth2'
require 'openssl'
require 'base64'
module OmniAuth
module Strategies
class Salesforce < OmniAuth::Strategies::OAuth2
MOBILE_USER_AGENTS = 'webos|ipod|iphone|ipad|android|blackberry|mobile'
option :client_options, {
:site => 'https://login.salesforce.com',
:authorize_url => '/services/oauth2/authorize',
:token_url => '/services/oauth2/token'
}
option :authorize_options, [
:scope,
:display,
:immediate,
:state,
:prompt
]
def request_phase
req = Rack::Request.new(@env)
options.update(req.params)
ua = req.user_agent.to_s
if !options.has_key?(:display)
mobile_request = ua.downcase =~ Regexp.new(MOBILE_USER_AGENTS)
options[:display] = mobile_request ? 'touch' : 'page'
end
super
end
def auth_hash
signed_value = access_token.params['id'] + access_token.params['issued_at']
raw_expected_signature = OpenSSL::HMAC.digest('sha256', options.client_secret.to_s, signed_value)
expected_signature = Base64.strict_encode64 raw_expected_signature
signature = access_token.params['signature']
fail! "Salesforce user id did not match signature!" unless signature == expected_signature
super
end
uid { raw_info['id'] }
info do
{
'name' => raw_info['display_name'],
'email' => raw_info['email'],
'nickname' => raw_info['nick_name'],
'first_name' => raw_info['first_name'],
'last_name' => raw_info['last_name'],
'location' => '',
'description' => '',
'image' => raw_info['photos']['thumbnail'] + "?oauth_token=#{access_token.token}",
'phone' => '',
'urls' => raw_info['urls']
}
end
credentials do
hash = {'token' => access_token.token}
hash.merge!('instance_url' => access_token.params["instance_url"])
hash.merge!('refresh_token' => access_token.refresh_token) if access_token.refresh_token
hash
end
def raw_info
access_token.options[:mode] = :header
@raw_info ||= access_token.post(access_token['id']).parsed
end
extra do
raw_info.merge({
'instance_url' => access_token.params['instance_url'],
'pod' => access_token.params['instance_url'],
'signature' => access_token.params['signature'],
'issued_at' => access_token.params['issued_at']
})
end
end
class SalesforceSandbox < OmniAuth::Strategies::Salesforce
default_options[:client_options][:site] = 'https://test.salesforce.com'
end
class DatabaseDotCom < OmniAuth::Strategies::Salesforce
default_options[:client_options][:site] = 'https://login.database.com'
end
class SalesforcePreRelease < OmniAuth::Strategies::Salesforce
default_options[:client_options][:site] = 'https://prerellogin.pre.salesforce.com/'
end
end
end

View File

@ -0,0 +1,24 @@
# -*- encoding: utf-8 -*-
require File.expand_path('../lib/omniauth-salesforce/version', __FILE__)
Gem::Specification.new do |gem|
gem.authors = ["Richard Vanhook"]
gem.email = ["rvanhook@salesforce.com"]
gem.description = %q{OmniAuth strategy for salesforce.com.}
gem.summary = %q{OmniAuth strategy for salesforce.com.}
gem.homepage = "https://github.com/realdoug/omniauth-salesforce"
gem.files = Dir.glob("lib/**/*.*")
gem.test_files = Dir.glob("spec/**/**/*.*")
gem.name = "omniauth-salesforce"
gem.require_paths = ["lib"]
gem.version = OmniAuth::Salesforce::VERSION
gem.license = "MIT"
gem.add_dependency 'omniauth', '~> 1.0', '< 3'
gem.add_dependency 'omniauth-oauth2', '~> 1.0'
gem.add_development_dependency 'rspec', '~> 2.7'
gem.add_development_dependency 'rack-test'
gem.add_development_dependency 'simplecov'
gem.add_development_dependency 'webmock'
end

View File

@ -0,0 +1,219 @@
require 'spec_helper'
describe OmniAuth::Strategies::Salesforce do
strategy = nil
before do
OmniAuth.config.test_mode = true
rack_app = []
rack_app.stub :call
strategy = OmniAuth::Strategies::Salesforce.new rack_app, 'Consumer Key', 'Consumer Secret'
end
describe "request_phase" do
env = nil
before do
env = {
'rack.session' => {},
'HTTP_USER_AGENT' => 'unknown',
'REQUEST_METHOD' => 'GET',
'rack.input' => '',
'rack.url_scheme' => 'http',
'SERVER_NAME' => 'server.example',
'QUERY_STRING' => 'code=xxxx',
'SCRIPT_NAME' => '',
'SERVER_PORT' => 80
}
end
context "when using a mobile browser" do
user_agents = {
:Pre => "Mozilla/5.0 (webOS/1.4.0; U; en-US) AppleWebKit/532.2 (KHTML, like Gecko) Version/1.0 Safari/532.2 Pre/1.1",
:iPod => "Mozilla/5.0 (iPod; U; CPU like Mac OS X; en) AppleWebKit/420.1 (KHTML, like Gecko) Version/3.0 Mobile/4A93 Safari/419.3",
:iPhone => "Mozilla/5.0 (iPhone; U; CPU like Mac OS X; en) AppleWebKit/420+ (KHTML, like Gecko) Version/3.0 Mobile/1A543 Safari/419.3",
:iPad => "Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10",
:Nexus => "Mozilla/5.0 (Linux; U; Android 2.2; en-us; Nexus One Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
:myTouch => "Mozilla/5.0 (Linux; U; Android 1.6; en-us; WOWMobile myTouch 3G Build/unknown) AppleWebKit/528.5+ (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
:Storm => "BlackBerry9530/4.7.0.148 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/105",
:Torch => "Mozilla/5.0 (BlackBerry; U; BlackBerry 9810; en-US) AppleWebKit/534.11+ (KHTML, like Gecko) Version/7.0.0 Mobile Safari/534.11+",
:generic_mobile => "some mobile device"
}
user_agents.each_pair do |name, agent|
context "with the user agent from a #{name.to_s}" do
before do
env['HTTP_USER_AGENT'] = agent
strategy.call!(env)
strategy.request_phase
end
subject {strategy.options}
it "sets the :display option to 'touch'" do
subject[:display].should == 'touch'
end
end
end
end
context "when using a desktop browser" do
user_agents = {
:Chrome => "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.21 (KHTML, like Gecko) Chrome/19.0.1042.0 Safari/535.21",
:Safari => "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; de-at) AppleWebKit/533.21.1 (KHTML, like Gecko) Version/5.0.5 Safari/533.21.1",
:IE => "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
:anything_else => "unknown"
}
user_agents.each_pair do |name, agent|
context "with the user agent from #{name.to_s}" do
before do
env['HTTP_USER_AGENT'] = agent
strategy.call!(env)
strategy.request_phase
end
subject {strategy.options}
it "sets the :display option to 'page'" do
subject[:display].should == 'page'
end
end
end
end
end
describe "callback phase" do
raw_info = nil
before do
raw_info = {
'id' => 'salesforce id',
'display_name' => 'display name',
'email' => 'email',
'nick_name' => 'nick name',
'first_name' => 'first name',
'last_name' => 'last name',
'photos' => {'thumbnail' => '/thumbnail/url'},
'urls'=> {
"enterprise" => "https://salesforce.example/services",
"metadata" => "https://salesforce.example/services"
}
}
client = OAuth2::Client.new 'id', 'secret', {:site => 'example.com'}
access_token = OAuth2::AccessToken.from_hash client, {
'access_token' => 'token',
'instance_url' => 'http://instance.salesforce.example',
'signature' => 'invalid',
'issued_at' => '1296458209517'
}
strategy.stub(:raw_info) { raw_info }
strategy.stub(:access_token) { access_token }
end
describe "uid" do
it "sets the id" do
strategy.uid.should == raw_info['id']
end
end
describe "info" do
subject { strategy.info }
it "returns an info hash" do
subject.should_not be_nil
end
it "sets name" do
subject['name'].should == raw_info['display_name']
end
it "sets email" do
subject['email'].should == raw_info['email']
end
it "sets nickname" do
subject['nickname'].should == raw_info['nick_name']
end
it "sets first_name" do
subject['first_name'].should == raw_info['first_name']
end
it "sets last_name" do
subject['last_name'].should == raw_info['last_name']
end
it "sets location" do
subject['location'].should == ''
end
it "sets description" do
subject['description'].should == ''
end
it "sets image" do
subject['image'].should == raw_info['photos']['thumbnail'] + "?oauth_token=#{strategy.access_token.token}"
end
it "sets phone" do
subject['phone'].should == ''
end
it "sets urls" do
subject['urls'].should == raw_info['urls']
end
end
describe "credentials" do
subject { strategy.credentials }
it "sets token" do
subject['token'].should == strategy.access_token.token
end
it "sets instance_url" do
subject['instance_url'].should == strategy.access_token.params["instance_url"]
end
context "given a refresh token" do
it "sets refresh_token" do
subject['refresh_token'].should == strategy.access_token.refresh_token
end
end
context "when not given a refresh token" do
it "does not set a refresh token" do
subject['refresh_token'].should be_nil
end
end
end
describe "extra" do
subject { strategy.extra }
it "sets instance_url" do
subject['instance_url'].should == strategy.access_token.params['instance_url']
end
it "sets pod" do
subject['pod'].should == strategy.access_token.params['instance_url']
end
it "sets signature" do
subject['signature'].should == strategy.access_token.params['signature']
end
it "sets issued_at" do
subject['issued_at'].should == strategy.access_token.params['issued_at']
end
end
describe "user id validation" do
client_id = nil
issued_at = nil
signature = nil
instance_url = 'http://instance.salesforce.example'
before do
client_id = "https://login.salesforce.com/id/00Dd0000000d45TEBQ/005d0000000fyGPCCY"
issued_at = "1331142541514"
signature = Base64.strict_encode64(OpenSSL::HMAC.digest('sha256', strategy.options.client_secret.to_s, client_id + issued_at))
end
context "when the signature does not match" do
before do
access_token = OAuth2::AccessToken.from_hash strategy.access_token.client, {
'id' => 'forged client id',
'refresh_token' => 'anything',
'issued_at' => issued_at,
'instance_url' => 'http://instance.salesforce.example',
'signature' => signature
}
strategy.stub(:access_token) { access_token }
end
it "should call fail!" do
strategy.should_receive(:fail!)
strategy.auth_hash
end
end
context "when the signature does match" do
before do
access_token = OAuth2::AccessToken.from_hash strategy.access_token.client, {
'id' => client_id,
'refresh_token' => 'anything',
'issued_at' => issued_at,
'instance_url' => 'http://instance.salesforce.example',
'signature' => signature
}
strategy.stub(:access_token) { access_token }
end
it "should not fail" do
strategy.should_not_receive(:fail!)
strategy.auth_hash
end
end
end
end
end

View File

@ -0,0 +1,16 @@
$:.unshift File.expand_path('..', __FILE__)
$:.unshift File.expand_path('../../lib', __FILE__)
require 'simplecov'
SimpleCov.start
require 'rspec'
require 'rack/test'
require 'webmock/rspec'
require 'omniauth'
require 'omniauth-salesforce'
RSpec.configure do |config|
config.include WebMock::API
config.include Rack::Test::Methods
config.extend OmniAuth::Test::StrategyMacros, :type => :strategy
end

View File

@ -288,7 +288,7 @@ func configureRoutes(u *upstream) {
u.route("PUT", apiProjectPattern+`/packages/debian/`, requestBodyUploader),
// RPM Artifact Repository
u.route("POST", apiProjectPattern+`packages/rpm/`, requestBodyUploader),
u.route("POST", apiProjectPattern+`/packages/rpm/`, requestBodyUploader),
// Gem Artifact Repository
u.route("POST", apiProjectPattern+`/packages/rubygems/`, requestBodyUploader),

View File

@ -302,7 +302,7 @@
chalk "^2.0.0"
js-tokens "^4.0.0"
"@babel/parser@^7.1.0", "@babel/parser@^7.14.7", "@babel/parser@^7.16.7", "@babel/parser@^7.16.8", "@babel/parser@^7.18.4", "@babel/parser@^7.18.5":
"@babel/parser@^7.1.0", "@babel/parser@^7.14.7", "@babel/parser@^7.16.7", "@babel/parser@^7.16.8", "@babel/parser@^7.18.5":
version "7.18.11"
resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.18.11.tgz#68bb07ab3d380affa9a3f96728df07969645d2d9"
integrity sha512-9JKn5vN+hDt0Hdqn1PiJ2guflwP+B6Ga8qbDuoF0PzzVhrzsKIJo8yGqVk6CmMHiMei9w1C1Bp9IMJSIK+HPIQ==
@ -2214,15 +2214,6 @@
semver "^6.3.0"
tsutils "^3.17.1"
"@vue/compiler-sfc@2.7.10":
version "2.7.10"
resolved "https://registry.yarnpkg.com/@vue/compiler-sfc/-/compiler-sfc-2.7.10.tgz#3fe08e780053a3bbf41328c65ae5dfdee0385206"
integrity sha512-55Shns6WPxlYsz4WX7q9ZJBL77sKE1ZAYNYStLs6GbhIOMrNtjMvzcob6gu3cGlfpCR4bT7NXgyJ3tly2+Hx8Q==
dependencies:
"@babel/parser" "^7.18.4"
postcss "^8.4.14"
source-map "^0.6.1"
"@vue/component-compiler-utils@^3.1.0":
version "3.3.0"
resolved "https://registry.yarnpkg.com/@vue/component-compiler-utils/-/component-compiler-utils-3.3.0.tgz#f9f5fb53464b0c37b2c8d2f3fbfe44df60f61dc9"
@ -3985,11 +3976,6 @@ cssstyle@^2.3.0:
dependencies:
cssom "~0.3.6"
csstype@^3.1.0:
version "3.1.0"
resolved "https://registry.yarnpkg.com/csstype/-/csstype-3.1.0.tgz#4ddcac3718d787cf9df0d1b7d15033925c8f29f2"
integrity sha512-uX1KG+x9h5hIJsaKR9xHUeUraxf8IODOwq9JLNPq6BwB04a/xgpq3rcx47l5BZu5zBPlgD342tdke3Hom/nJRA==
custom-jquery-matchers@^2.1.0:
version "2.1.0"
resolved "https://registry.yarnpkg.com/custom-jquery-matchers/-/custom-jquery-matchers-2.1.0.tgz#e5988fa9715c416b0986b372563f872d9e91e024"
@ -6304,7 +6290,7 @@ hastscript@^7.0.0:
property-information "^6.0.0"
space-separated-tokens "^2.0.0"
he@^1.2.0:
he@^1.1.0:
version "1.2.0"
resolved "https://registry.yarnpkg.com/he/-/he-1.2.0.tgz#84ae65fa7eafb165fddb61566ae14baf05664f0f"
integrity sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==
@ -11928,10 +11914,10 @@ vue-hot-reload-api@^2.3.0:
resolved "https://registry.yarnpkg.com/vue-hot-reload-api/-/vue-hot-reload-api-2.3.4.tgz#532955cc1eb208a3d990b3a9f9a70574657e08f2"
integrity sha512-BXq3jwIagosjgNVae6tkHzzIk6a8MHFtzAdwhnV5VlvPTFxDCvIttgSiHWjdGoTJvXtmRu5HacExfdarRcFhog==
vue-loader@^15.10:
version "15.10.0"
resolved "https://registry.yarnpkg.com/vue-loader/-/vue-loader-15.10.0.tgz#2a12695c421a2a2cc2138f05a949d04ed086e38b"
integrity sha512-VU6tuO8eKajrFeBzMssFUP9SvakEeeSi1BxdTH5o3+1yUyrldp8IERkSdXlMI2t4kxF2sqYUDsQY+WJBxzBmZg==
vue-loader@15.9.6:
version "15.9.6"
resolved "https://registry.yarnpkg.com/vue-loader/-/vue-loader-15.9.6.tgz#f4bb9ae20c3a8370af3ecf09b8126d38ffdb6b8b"
integrity sha512-j0cqiLzwbeImIC6nVIby2o/ABAWhlppyL/m5oJ67R5MloP0hj/DtFgb0Zmq3J9CG7AJ+AXIvHVnJAPBvrLyuDg==
dependencies:
"@vue/component-compiler-utils" "^3.1.0"
hash-sum "^1.0.2"
@ -11969,13 +11955,13 @@ vue-style-loader@^4.1.0:
hash-sum "^1.0.2"
loader-utils "^1.0.2"
vue-template-compiler@^2.7.10:
version "2.7.10"
resolved "https://registry.yarnpkg.com/vue-template-compiler/-/vue-template-compiler-2.7.10.tgz#9e20f35b2fdccacacf732dd7dedb49bf65f4556b"
integrity sha512-QO+8R9YRq1Gudm8ZMdo/lImZLJVUIAM8c07Vp84ojdDAf8HmPJc7XB556PcXV218k2AkKznsRz6xB5uOjAC4EQ==
vue-template-compiler@2.6.14:
version "2.6.14"
resolved "https://registry.yarnpkg.com/vue-template-compiler/-/vue-template-compiler-2.6.14.tgz#a2f0e7d985670d42c9c9ee0d044fed7690f4f763"
integrity sha512-ODQS1SyMbjKoO1JBJZojSw6FE4qnh9rIpUZn2EUT86FKizx9uH5z6uXiIrm4/Nb/gwxTi/o17ZDEGWAXHvtC7g==
dependencies:
de-indent "^1.0.2"
he "^1.2.0"
he "^1.1.0"
vue-template-es2015-compiler@^1.9.0:
version "1.9.1"
@ -11987,13 +11973,10 @@ vue-virtual-scroll-list@^1.4.7:
resolved "https://registry.yarnpkg.com/vue-virtual-scroll-list/-/vue-virtual-scroll-list-1.4.7.tgz#12ee26833885f5bb4d37dc058085ccf3ce5b5a74"
integrity sha512-R8bk+k7WMGGoFQ9xF0krGCAlZhQjbJOkDUX+YZD2J+sHQWTzDtmTLS6kiIJToOHK1d/8QPGiD8fd9w0lDP4arg==
vue@^2.7.10:
version "2.7.10"
resolved "https://registry.yarnpkg.com/vue/-/vue-2.7.10.tgz#ae516cc6c88e1c424754468844218fdd5e280f40"
integrity sha512-HmFC70qarSHPXcKtW8U8fgIkF6JGvjEmDiVInTkKZP0gIlEPhlVlcJJLkdGIDiNkIeA2zJPQTWJUI4iWe+AVfg==
dependencies:
"@vue/compiler-sfc" "2.7.10"
csstype "^3.1.0"
vue@2.6.14:
version "2.6.14"
resolved "https://registry.yarnpkg.com/vue/-/vue-2.6.14.tgz#e51aa5250250d569a3fbad3a8a5a687d6036e235"
integrity sha512-x2284lgYvjOMj3Za7kqzRcUSxBboHqtgRE2zlos1qWaOye5yUmHn42LB1250NJBLRwEcdrB0JRwyPTEPhfQjiQ==
vuedraggable@^2.23.0:
version "2.23.0"