Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2021-10-15 15:10:09 +00:00
parent e60f8421ac
commit 4d8132ee4f
38 changed files with 884 additions and 155 deletions

View File

@ -68,7 +68,7 @@ GitLab is an open source project and we are very happy to accept community contr
## Install a development environment ## Install a development environment
To work on GitLab itself, we recommend setting up your development environment with [the GitLab Development Kit](https://gitlab.com/gitlab-org/gitlab-development-kit). To work on GitLab itself, we recommend setting up your development environment with [the GitLab Development Kit](https://gitlab.com/gitlab-org/gitlab-development-kit).
If you do not use the GitLab Development Kit you need to install and setup all the dependencies yourself, this is a lot of work and error prone. If you do not use the GitLab Development Kit you need to install and configure all the dependencies yourself, this is a lot of work and error prone.
One small thing you also have to do when installing it yourself is to copy the example development Puma configuration file: One small thing you also have to do when installing it yourself is to copy the example development Puma configuration file:
cp config/puma.rb.example.development config/puma.rb cp config/puma.rb.example.development config/puma.rb

View File

@ -20,7 +20,8 @@ class Admin::DashboardController < Admin::ApplicationController
Gitlab::Redis::SharedState, Gitlab::Redis::SharedState,
Gitlab::Redis::Cache, Gitlab::Redis::Cache,
Gitlab::Redis::TraceChunks, Gitlab::Redis::TraceChunks,
Gitlab::Redis::RateLimiting Gitlab::Redis::RateLimiting,
Gitlab::Redis::Sessions
].map(&:version).uniq ].map(&:version).uniq
end end
# rubocop: enable CodeReuse/ActiveRecord # rubocop: enable CodeReuse/ActiveRecord

View File

@ -5,11 +5,15 @@ class Groups::DependencyProxyForContainersController < ::Groups::DependencyProxy
include DependencyProxy::GroupAccess include DependencyProxy::GroupAccess
include SendFileUpload include SendFileUpload
include ::PackagesHelper # for event tracking include ::PackagesHelper # for event tracking
include WorkhorseRequest
before_action :ensure_group before_action :ensure_group
before_action :ensure_token_granted! before_action :ensure_token_granted!, only: [:blob, :manifest]
before_action :ensure_feature_enabled! before_action :ensure_feature_enabled!
before_action :verify_workhorse_api!, only: [:authorize_upload_blob, :upload_blob]
skip_before_action :verify_authenticity_token, only: [:authorize_upload_blob, :upload_blob]
attr_reader :token attr_reader :token
feature_category :dependency_proxy feature_category :dependency_proxy
@ -38,6 +42,8 @@ class Groups::DependencyProxyForContainersController < ::Groups::DependencyProxy
end end
def blob def blob
return blob_via_workhorse if Feature.enabled?(:dependency_proxy_workhorse, group, default_enabled: :yaml)
result = DependencyProxy::FindOrCreateBlobService result = DependencyProxy::FindOrCreateBlobService
.new(group, image, token, params[:sha]).execute .new(group, image, token, params[:sha]).execute
@ -50,11 +56,47 @@ class Groups::DependencyProxyForContainersController < ::Groups::DependencyProxy
end end
end end
def authorize_upload_blob
set_workhorse_internal_api_content_type
render json: DependencyProxy::FileUploader.workhorse_authorize(has_length: false)
end
def upload_blob
@group.dependency_proxy_blobs.create!(
file_name: blob_file_name,
file: params[:file],
size: params[:file].size
)
event_name = tracking_event_name(object_type: :blob, from_cache: false)
track_package_event(event_name, :dependency_proxy, namespace: group, user: auth_user)
head :ok
end
private private
def blob_via_workhorse
blob = @group.dependency_proxy_blobs.find_by_file_name(blob_file_name)
if blob.present?
event_name = tracking_event_name(object_type: :blob, from_cache: true)
track_package_event(event_name, :dependency_proxy, namespace: group, user: auth_user)
send_upload(blob.file)
else
send_dependency(token, DependencyProxy::Registry.blob_url(image, params[:sha]), blob_file_name)
end
end
def blob_file_name
@blob_file_name ||= params[:sha].sub('sha256:', '') + '.gz'
end
def group def group
strong_memoize(:group) do strong_memoize(:group) do
Group.find_by_full_path(params[:group_id], follow_redirects: request.get?) Group.find_by_full_path(params[:group_id], follow_redirects: true)
end end
end end

View File

@ -17,6 +17,7 @@ class HealthController < ActionController::Base
Gitlab::HealthChecks::Redis::SharedStateCheck, Gitlab::HealthChecks::Redis::SharedStateCheck,
Gitlab::HealthChecks::Redis::TraceChunksCheck, Gitlab::HealthChecks::Redis::TraceChunksCheck,
Gitlab::HealthChecks::Redis::RateLimitingCheck, Gitlab::HealthChecks::Redis::RateLimitingCheck,
Gitlab::HealthChecks::Redis::SessionsCheck,
Gitlab::HealthChecks::GitalyCheck Gitlab::HealthChecks::GitalyCheck
].freeze ].freeze

View File

@ -41,6 +41,15 @@ module WorkhorseHelper
head :ok head :ok
end end
def send_dependency(token, url, filename)
headers.store(*Gitlab::Workhorse.send_dependency(token, url))
headers['Content-Disposition'] =
ActionDispatch::Http::ContentDisposition.format(disposition: 'attachment', filename: filename)
headers['Content-Type'] = 'application/gzip'
head :ok
end
def set_workhorse_internal_api_content_type def set_workhorse_internal_api_content_type
headers['Content-Type'] = Gitlab::Workhorse::INTERNAL_API_CONTENT_TYPE headers['Content-Type'] = Gitlab::Workhorse::INTERNAL_API_CONTENT_TYPE
end end

View File

@ -1,6 +1,7 @@
# frozen_string_literal: true # frozen_string_literal: true
class DependencyProxy::FileUploader < GitlabUploader class DependencyProxy::FileUploader < GitlabUploader
extend Workhorse::UploadPath
include ObjectStorage::Concern include ObjectStorage::Concern
before :cache, :set_content_type before :cache, :set_content_type

View File

@ -593,7 +593,7 @@
:feature_category: :continuous_integration :feature_category: :continuous_integration
:has_external_dependencies: :has_external_dependencies:
:urgency: :low :urgency: :low
:resource_boundary: :cpu :resource_boundary: :unknown
:weight: 1 :weight: 1
:idempotent: :idempotent:
:tags: [] :tags: []

View File

@ -13,7 +13,6 @@ class StuckCiJobsWorker # rubocop:disable Scalability/IdempotentWorker
data_consistency :always data_consistency :always
feature_category :continuous_integration feature_category :continuous_integration
worker_resource_boundary :cpu
def perform def perform
Ci::StuckBuilds::DropRunningWorker.perform_in(20.minutes) Ci::StuckBuilds::DropRunningWorker.perform_in(20.minutes)

View File

@ -78,6 +78,7 @@ An example configuration file for Redis is in this directory under the name
| `shared_state` | | Persistent application state | | `shared_state` | | Persistent application state |
| `trace_chunks` | `shared_state` | [CI trace chunks](https://docs.gitlab.com/ee/administration/job_logs.html#incremental-logging-architecture) | | `trace_chunks` | `shared_state` | [CI trace chunks](https://docs.gitlab.com/ee/administration/job_logs.html#incremental-logging-architecture) |
| `rate_limiting` | `cache` | [Rate limiting](https://docs.gitlab.com/ee/user/admin_area/settings/user_and_ip_rate_limits.html) state | | `rate_limiting` | `cache` | [Rate limiting](https://docs.gitlab.com/ee/user/admin_area/settings/user_and_ip_rate_limits.html) state |
| `sessions` | `shared_state` | [Sessions](https://docs.gitlab.com/ee/development/session.html#redis)|
If no configuration is found, or no URL is found in the configuration If no configuration is found, or no URL is found in the configuration
file, the default URL used is: file, the default URL used is:

View File

@ -25,6 +25,7 @@ module Gitlab
require_dependency Rails.root.join('lib/gitlab/redis/shared_state') require_dependency Rails.root.join('lib/gitlab/redis/shared_state')
require_dependency Rails.root.join('lib/gitlab/redis/trace_chunks') require_dependency Rails.root.join('lib/gitlab/redis/trace_chunks')
require_dependency Rails.root.join('lib/gitlab/redis/rate_limiting') require_dependency Rails.root.join('lib/gitlab/redis/rate_limiting')
require_dependency Rails.root.join('lib/gitlab/redis/sessions')
require_dependency Rails.root.join('lib/gitlab/current_settings') require_dependency Rails.root.join('lib/gitlab/current_settings')
require_dependency Rails.root.join('lib/gitlab/middleware/read_only') require_dependency Rails.root.join('lib/gitlab/middleware/read_only')
require_dependency Rails.root.join('lib/gitlab/middleware/basic_health_check') require_dependency Rails.root.join('lib/gitlab/middleware/basic_health_check')

View File

@ -0,0 +1,8 @@
---
name: dependency_proxy_workhorse
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/68157
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/339639
milestone: '14.3'
type: development
group: group::source code
default_enabled: false

View File

@ -18,3 +18,4 @@ Gitlab::Redis::Queues.with { nil }
Gitlab::Redis::SharedState.with { nil } Gitlab::Redis::SharedState.with { nil }
Gitlab::Redis::TraceChunks.with { nil } Gitlab::Redis::TraceChunks.with { nil }
Gitlab::Redis::RateLimiting.with { nil } Gitlab::Redis::RateLimiting.with { nil }
Gitlab::Redis::Sessions.with { nil }

View File

@ -146,5 +146,7 @@ scope format: false do
constraints image: Gitlab::PathRegex.container_image_regex, sha: Gitlab::PathRegex.container_image_blob_sha_regex do constraints image: Gitlab::PathRegex.container_image_regex, sha: Gitlab::PathRegex.container_image_blob_sha_regex do
get 'v2/*group_id/dependency_proxy/containers/*image/manifests/*tag' => 'groups/dependency_proxy_for_containers#manifest' # rubocop:todo Cop/PutGroupRoutesUnderScope get 'v2/*group_id/dependency_proxy/containers/*image/manifests/*tag' => 'groups/dependency_proxy_for_containers#manifest' # rubocop:todo Cop/PutGroupRoutesUnderScope
get 'v2/*group_id/dependency_proxy/containers/*image/blobs/:sha' => 'groups/dependency_proxy_for_containers#blob' # rubocop:todo Cop/PutGroupRoutesUnderScope get 'v2/*group_id/dependency_proxy/containers/*image/blobs/:sha' => 'groups/dependency_proxy_for_containers#blob' # rubocop:todo Cop/PutGroupRoutesUnderScope
post 'v2/*group_id/dependency_proxy/containers/*image/blobs/:sha/upload/authorize' => 'groups/dependency_proxy_for_containers#authorize_upload_blob' # rubocop:todo Cop/PutGroupRoutesUnderScope
post 'v2/*group_id/dependency_proxy/containers/*image/blobs/:sha/upload' => 'groups/dependency_proxy_for_containers#upload_blob' # rubocop:todo Cop/PutGroupRoutesUnderScope
end end
end end

View File

@ -16,6 +16,7 @@ GitLab uses [Redis](https://redis.io) for the following distinct purposes:
- To store CI trace chunks. - To store CI trace chunks.
- As a Pub/Sub queue backend for ActionCable. - As a Pub/Sub queue backend for ActionCable.
- Rate limiting state storage. - Rate limiting state storage.
- Sessions.
In most environments (including the GDK), all of these point to the same In most environments (including the GDK), all of these point to the same
Redis instance. Redis instance.

View File

@ -1,8 +1,7 @@
--- ---
stage: Create stage: Create
group: Source Code group: Source Code
info: "To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments" info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
type: reference, howto
--- ---
# AsciiDoc **(FREE)** # AsciiDoc **(FREE)**
@ -85,7 +84,7 @@ I believe I shall--no, actually I won't.
**Macros** **Macros**
```plaintext ```plaintext
// where c=specialchars, q=quotes, a=attributes, r=replacements, m=macros, p=post_replacements, etc. // where c=specialchars, q=quotes, a=attributes, r=replacements, m=macros, p=post_replacements
The European icon:flag[role=blue] is blue & contains pass:[************] arranged in a icon:circle-o[role=yellow]. The European icon:flag[role=blue] is blue & contains pass:[************] arranged in a icon:circle-o[role=yellow].
The pass:c[->] operator is often referred to as the stabby lambda. The pass:c[->] operator is often referred to as the stabby lambda.
Since `pass:[++]` has strong priority in AsciiDoc, you can rewrite pass:c,a,r[C++ => C{pp}]. Since `pass:[++]` has strong priority in AsciiDoc, you can rewrite pass:c,a,r[C++ => C{pp}].
@ -151,7 +150,7 @@ This paragraph has a footnote.footnote:[This is the text of the footnote.]
** level 2 ** level 2
*** level 3 *** level 3
**** level 4 **** level 4
***** etc. ***** level 5
* back at level 1 * back at level 1
+ +
Attach a block or paragraph to a list item using a list continuation (which you can enclose in an open block). Attach a block or paragraph to a list item using a list continuation (which you can enclose in an open block).
@ -240,10 +239,10 @@ include::basics.adoc[]
include::https://example.org/installation.adoc[] include::https://example.org/installation.adoc[]
``` ```
To guarantee good system performance and prevent malicious documents causing To guarantee good system performance and prevent malicious documents from causing
problems, GitLab enforces a **maximum limit** on the number of include directives problems, GitLab enforces a maximum limit on the number of include directives
processed in any one document. Currently a total of 32 documents can be processed in any one document. You can include up to 32 documents, which is
included, a number that is inclusive of transitive dependencies. inclusive of transitive dependencies.
### Blocks ### Blocks
@ -428,7 +427,7 @@ If you're new to using Mermaid or need help identifying issues in your Mermaid c
the [Mermaid Live Editor](https://mermaid-js.github.io/mermaid-live-editor/) is a helpful tool the [Mermaid Live Editor](https://mermaid-js.github.io/mermaid-live-editor/) is a helpful tool
for creating and resolving issues within Mermaid diagrams. for creating and resolving issues within Mermaid diagrams.
In order to generate a diagram or flowchart, you should write your text inside the `mermaid` block: To generate a diagram or flowchart, enter your text in a `mermaid` block:
```plaintext ```plaintext
[mermaid] [mermaid]
@ -447,7 +446,7 @@ Kroki supports more than a dozen diagram libraries.
To make Kroki available in GitLab, a GitLab administrator needs to enable it first. To make Kroki available in GitLab, a GitLab administrator needs to enable it first.
Read more in the [Kroki integration](../administration/integration/kroki.md) page. Read more in the [Kroki integration](../administration/integration/kroki.md) page.
Once Kroki is enabled, you can create a wide variety of diagrams in AsciiDoc and Markdown documents. After Kroki is enabled, you can create diagrams in AsciiDoc and Markdown documents.
Here's an example using a GraphViz diagram: Here's an example using a GraphViz diagram:
**AsciiDoc** **AsciiDoc**
@ -476,7 +475,7 @@ digraph G {
To make PlantUML available in GitLab, a GitLab administrator needs to enable it first. To make PlantUML available in GitLab, a GitLab administrator needs to enable it first.
Read more in [PlantUML & GitLab](../administration/integration/plantuml.md). Read more in [PlantUML & GitLab](../administration/integration/plantuml.md).
Once enabled, you should write your text inside the `plantuml` block: After PlantUML is enabled, enter your text in a `plantuml` block:
```plaintext ```plaintext
[plantuml] [plantuml]

View File

@ -199,3 +199,18 @@ NOTE:
The special handling for the `gitlab-deploy-token` deploy token is not The special handling for the `gitlab-deploy-token` deploy token is not
implemented for group deploy tokens. To make the group-level deploy token available for implemented for group deploy tokens. To make the group-level deploy token available for
CI/CD jobs, the `CI_DEPLOY_USER` and `CI_DEPLOY_PASSWORD` variables should be set under **Settings** to the name and token of the group deploy token respectively. CI/CD jobs, the `CI_DEPLOY_USER` and `CI_DEPLOY_PASSWORD` variables should be set under **Settings** to the name and token of the group deploy token respectively.
## Troubleshooting
### Group deploy tokens and LFS
A bug
[prevents Group Deploy Tokens from cloning LFS objects](https://gitlab.com/gitlab-org/gitlab/-/issues/235398).
If you receive `404 Not Found` errors and this error,
use a Project Deploy Token to work around the bug:
```plaintext
api error: Repository or object not found:
https://<URL-with-token>.git/info/lfs/objects/batch
Check that it exists and that you have proper access to it
```

View File

@ -22,7 +22,8 @@ module Gitlab
::Gitlab::HealthChecks::Redis::QueuesCheck.check_up && ::Gitlab::HealthChecks::Redis::QueuesCheck.check_up &&
::Gitlab::HealthChecks::Redis::SharedStateCheck.check_up && ::Gitlab::HealthChecks::Redis::SharedStateCheck.check_up &&
::Gitlab::HealthChecks::Redis::TraceChunksCheck.check_up && ::Gitlab::HealthChecks::Redis::TraceChunksCheck.check_up &&
::Gitlab::HealthChecks::Redis::RateLimitingCheck.check_up ::Gitlab::HealthChecks::Redis::RateLimitingCheck.check_up &&
::Gitlab::HealthChecks::Redis::SessionsCheck.check_up
end end
end end
end end

View File

@ -0,0 +1,35 @@
# frozen_string_literal: true
module Gitlab
module HealthChecks
module Redis
class SessionsCheck
extend SimpleAbstractCheck
class << self
def check_up
check
end
private
def metric_prefix
'redis_sessions_ping'
end
def successful?(result)
result == 'PONG'
end
# rubocop: disable CodeReuse/ActiveRecord
def check
catch_timeout 10.seconds do
Gitlab::Redis::Sessions.with(&:ping)
end
end
# rubocop: enable CodeReuse/ActiveRecord
end
end
end
end
end

View File

@ -10,8 +10,9 @@ module Gitlab
SharedState = Class.new(RedisBase).enable_redis_cluster_validation SharedState = Class.new(RedisBase).enable_redis_cluster_validation
TraceChunks = Class.new(RedisBase).enable_redis_cluster_validation TraceChunks = Class.new(RedisBase).enable_redis_cluster_validation
RateLimiting = Class.new(RedisBase).enable_redis_cluster_validation RateLimiting = Class.new(RedisBase).enable_redis_cluster_validation
Sessions = Class.new(RedisBase).enable_redis_cluster_validation
STORAGES = [ActionCable, Cache, Queues, SharedState, TraceChunks, RateLimiting].freeze STORAGES = [ActionCable, Cache, Queues, SharedState, TraceChunks, RateLimiting, Sessions].freeze
# Milliseconds represented in seconds (from 1 millisecond to 2 seconds). # Milliseconds represented in seconds (from 1 millisecond to 2 seconds).
QUERY_TIME_BUCKETS = [0.001, 0.0025, 0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2].freeze QUERY_TIME_BUCKETS = [0.001, 0.0025, 0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2].freeze

View File

@ -158,6 +158,7 @@ module Gitlab
::Gitlab.config.uploads.storage_path, ::Gitlab.config.uploads.storage_path,
::JobArtifactUploader.workhorse_upload_path, ::JobArtifactUploader.workhorse_upload_path,
::LfsObjectUploader.workhorse_upload_path, ::LfsObjectUploader.workhorse_upload_path,
::DependencyProxy::FileUploader.workhorse_upload_path,
File.join(Rails.root, 'public/uploads/tmp') File.join(Rails.root, 'public/uploads/tmp')
] + package_allowed_paths ] + package_allowed_paths
end end

View File

@ -0,0 +1,12 @@
# frozen_string_literal: true
module Gitlab
module Redis
class Sessions < ::Gitlab::Redis::Wrapper
# The data we store on Sessions used to be stored on SharedState.
def self.config_fallback
SharedState
end
end
end
end

View File

@ -170,6 +170,18 @@ module Gitlab
] ]
end end
def send_dependency(token, url)
params = {
'Header' => { Authorization: ["Bearer #{token}"] },
'Url' => url
}
[
SEND_DATA_HEADER,
"send-dependency:#{encode(params)}"
]
end
def channel_websocket(channel) def channel_websocket(channel)
details = { details = {
'Channel' => { 'Channel' => {

View File

@ -5,6 +5,7 @@ require 'spec_helper'
RSpec.describe Groups::DependencyProxyForContainersController do RSpec.describe Groups::DependencyProxyForContainersController do
include HttpBasicAuthHelpers include HttpBasicAuthHelpers
include DependencyProxyHelpers include DependencyProxyHelpers
include WorkhorseHelpers
let_it_be(:user) { create(:user) } let_it_be(:user) { create(:user) }
let_it_be_with_reload(:group) { create(:group, :private) } let_it_be_with_reload(:group) { create(:group, :private) }
@ -242,16 +243,9 @@ RSpec.describe Groups::DependencyProxyForContainersController do
end end
describe 'GET #blob' do describe 'GET #blob' do
let_it_be(:blob) { create(:dependency_proxy_blob) } let(:blob) { create(:dependency_proxy_blob, group: group) }
let(:blob_sha) { blob.file_name.sub('.gz', '') } let(:blob_sha) { blob.file_name.sub('.gz', '') }
let(:blob_response) { { status: :success, blob: blob, from_cache: false } }
before do
allow_next_instance_of(DependencyProxy::FindOrCreateBlobService) do |instance|
allow(instance).to receive(:execute).and_return(blob_response)
end
end
subject { get_blob } subject { get_blob }
@ -264,40 +258,31 @@ RSpec.describe Groups::DependencyProxyForContainersController do
it_behaves_like 'without permission' it_behaves_like 'without permission'
it_behaves_like 'feature flag disabled with private group' it_behaves_like 'feature flag disabled with private group'
context 'remote blob request fails' do
let(:blob_response) do
{
status: :error,
http_status: 400,
message: ''
}
end
before do
group.add_guest(user)
end
it 'proxies status from the remote blob request', :aggregate_failures do
subject
expect(response).to have_gitlab_http_status(:bad_request)
expect(response.body).to be_empty
end
end
context 'a valid user' do context 'a valid user' do
before do before do
group.add_guest(user) group.add_guest(user)
end end
it_behaves_like 'a successful blob pull' it_behaves_like 'a successful blob pull'
it_behaves_like 'a package tracking event', described_class.name, 'pull_blob' it_behaves_like 'a package tracking event', described_class.name, 'pull_blob_from_cache'
context 'with a cache entry' do context 'when cache entry does not exist' do
let(:blob_response) { { status: :success, blob: blob, from_cache: true } } let(:blob_sha) { 'a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4' }
it_behaves_like 'returning response status', :success it 'returns Workhorse send-dependency instructions' do
it_behaves_like 'a package tracking event', described_class.name, 'pull_blob_from_cache' subject
send_data_type, send_data = workhorse_send_data
header, url = send_data.values_at('Header', 'Url')
expect(send_data_type).to eq('send-dependency')
expect(header).to eq("Authorization" => ["Bearer abcd1234"])
expect(url).to eq(DependencyProxy::Registry.blob_url('alpine', blob_sha))
expect(response.headers['Content-Type']).to eq('application/gzip')
expect(response.headers['Content-Disposition']).to eq(
ActionDispatch::Http::ContentDisposition.format(disposition: 'attachment', filename: blob.file_name)
)
end
end end
end end
@ -319,6 +304,74 @@ RSpec.describe Groups::DependencyProxyForContainersController do
it_behaves_like 'a successful blob pull' it_behaves_like 'a successful blob pull'
end end
end end
context 'when dependency_proxy_workhorse disabled' do
let(:blob_response) { { status: :success, blob: blob, from_cache: false } }
before do
stub_feature_flags(dependency_proxy_workhorse: false)
allow_next_instance_of(DependencyProxy::FindOrCreateBlobService) do |instance|
allow(instance).to receive(:execute).and_return(blob_response)
end
end
context 'remote blob request fails' do
let(:blob_response) do
{
status: :error,
http_status: 400,
message: ''
}
end
before do
group.add_guest(user)
end
it 'proxies status from the remote blob request', :aggregate_failures do
subject
expect(response).to have_gitlab_http_status(:bad_request)
expect(response.body).to be_empty
end
end
context 'a valid user' do
before do
group.add_guest(user)
end
it_behaves_like 'a successful blob pull'
it_behaves_like 'a package tracking event', described_class.name, 'pull_blob'
context 'with a cache entry' do
let(:blob_response) { { status: :success, blob: blob, from_cache: true } }
it_behaves_like 'returning response status', :success
it_behaves_like 'a package tracking event', described_class.name, 'pull_blob_from_cache'
end
end
context 'a valid deploy token' do
let_it_be(:user) { create(:deploy_token, :group, :dependency_proxy_scopes) }
let_it_be(:group_deploy_token) { create(:group_deploy_token, deploy_token: user, group: group) }
it_behaves_like 'a successful blob pull'
context 'pulling from a subgroup' do
let_it_be_with_reload(:parent_group) { create(:group) }
let_it_be_with_reload(:group) { create(:group, parent: parent_group) }
before do
parent_group.create_dependency_proxy_setting!(enabled: true)
group_deploy_token.update_column(:group_id, parent_group.id)
end
it_behaves_like 'a successful blob pull'
end
end
end
end end
it_behaves_like 'not found when disabled' it_behaves_like 'not found when disabled'
@ -328,6 +381,61 @@ RSpec.describe Groups::DependencyProxyForContainersController do
end end
end end
describe 'GET #authorize_upload_blob' do
let(:blob_sha) { 'a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4' }
subject(:authorize_upload_blob) do
request.headers.merge!(workhorse_internal_api_request_header)
get :authorize_upload_blob, params: { group_id: group.to_param, image: 'alpine', sha: blob_sha }
end
it_behaves_like 'without permission'
context 'with a valid user' do
before do
group.add_guest(user)
end
it 'sends Workhorse file upload instructions', :aggregate_failures do
authorize_upload_blob
expect(response.headers['Content-Type']).to eq(Gitlab::Workhorse::INTERNAL_API_CONTENT_TYPE)
expect(json_response['TempPath']).to eq(DependencyProxy::FileUploader.workhorse_local_upload_path)
end
end
end
describe 'GET #upload_blob' do
let(:blob_sha) { 'a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4' }
let(:file) { fixture_file_upload("spec/fixtures/dependency_proxy/#{blob_sha}.gz", 'application/gzip') }
subject do
request.headers.merge!(workhorse_internal_api_request_header)
get :upload_blob, params: {
group_id: group.to_param,
image: 'alpine',
sha: blob_sha,
file: file
}
end
it_behaves_like 'without permission'
context 'with a valid user' do
before do
group.add_guest(user)
expect_next_found_instance_of(Group) do |instance|
expect(instance).to receive_message_chain(:dependency_proxy_blobs, :create!)
end
end
it_behaves_like 'a package tracking event', described_class.name, 'pull_blob'
end
end
def enable_dependency_proxy def enable_dependency_proxy
group.create_dependency_proxy_setting!(enabled: true) group.create_dependency_proxy_setting!(enabled: true)
end end

View File

@ -0,0 +1,108 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe 'Group Dependency Proxy for containers', :js do
include DependencyProxyHelpers
include_context 'file upload requests helpers'
let_it_be(:user) { create(:user) }
let_it_be(:group) { create(:group) }
let_it_be(:sha) { 'a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4' }
let_it_be(:content) { fixture_file_upload("spec/fixtures/dependency_proxy/#{sha}.gz").read }
let(:image) { 'alpine' }
let(:url) { capybara_url("/v2/#{group.full_path}/dependency_proxy/containers/#{image}/blobs/sha256:#{sha}") }
let(:token) { 'token' }
let(:headers) { { 'Authorization' => "Bearer #{build_jwt(user).encoded}" } }
subject do
HTTParty.get(url, headers: headers)
end
def run_server(handler)
default_server = Capybara.server
Capybara.server = Capybara.servers[:puma]
server = Capybara::Server.new(handler)
server.boot
server
ensure
Capybara.server = default_server
end
let_it_be(:external_server) do
handler = lambda do |env|
if env['REQUEST_PATH'] == '/token'
[200, {}, [{ token: 'token' }.to_json]]
else
[200, {}, [content]]
end
end
run_server(handler)
end
before do
stub_application_setting(allow_local_requests_from_web_hooks_and_services: true)
stub_config(dependency_proxy: { enabled: true })
group.add_developer(user)
stub_const("DependencyProxy::Registry::AUTH_URL", external_server.base_url)
stub_const("DependencyProxy::Registry::LIBRARY_URL", external_server.base_url)
end
shared_examples 'responds with the file' do
it 'sends file' do
expect(subject.code).to eq(200)
expect(subject.body).to eq(content)
expect(subject.headers.to_h).to include(
"content-type" => ["application/gzip"],
"content-disposition" => ["attachment; filename=\"#{sha}.gz\"; filename*=UTF-8''#{sha}.gz"],
"content-length" => ["32"]
)
end
end
shared_examples 'caches the file' do
it 'caches the file' do
expect { subject }.to change {
group.dependency_proxy_blobs.count
}.from(0).to(1)
expect(subject.code).to eq(200)
expect(group.dependency_proxy_blobs.first.file.read).to eq(content)
end
end
context 'fetching a blob' do
context 'when the blob is cached for the group' do
let!(:dependency_proxy_blob) { create(:dependency_proxy_blob, group: group) }
it_behaves_like 'responds with the file'
context 'dependency_proxy_workhorse feature flag disabled' do
before do
stub_feature_flags({ dependency_proxy_workhorse: false })
end
it_behaves_like 'responds with the file'
end
end
end
context 'when the blob must be downloaded' do
it_behaves_like 'responds with the file'
it_behaves_like 'caches the file'
context 'dependency_proxy_workhorse feature flag disabled' do
before do
stub_feature_flags({ dependency_proxy_workhorse: false })
end
it_behaves_like 'responds with the file'
it_behaves_like 'caches the file'
end
end
end

View File

@ -18,6 +18,7 @@ RSpec.describe Gitlab::HealthChecks::Probes::Collection do
Gitlab::HealthChecks::Redis::SharedStateCheck, Gitlab::HealthChecks::Redis::SharedStateCheck,
Gitlab::HealthChecks::Redis::TraceChunksCheck, Gitlab::HealthChecks::Redis::TraceChunksCheck,
Gitlab::HealthChecks::Redis::RateLimitingCheck, Gitlab::HealthChecks::Redis::RateLimitingCheck,
Gitlab::HealthChecks::Redis::SessionsCheck,
Gitlab::HealthChecks::GitalyCheck Gitlab::HealthChecks::GitalyCheck
] ]
end end

View File

@ -0,0 +1,8 @@
# frozen_string_literal: true
require 'spec_helper'
require_relative '../simple_check_shared'
RSpec.describe Gitlab::HealthChecks::Redis::SessionsCheck do
include_examples 'simple_check', 'redis_sessions_ping', 'RedisSessions', 'PONG'
end

View File

@ -77,7 +77,8 @@ RSpec.describe Gitlab::Instrumentation::Redis do
details_row.merge(storage: 'Queues'), details_row.merge(storage: 'Queues'),
details_row.merge(storage: 'SharedState'), details_row.merge(storage: 'SharedState'),
details_row.merge(storage: 'TraceChunks'), details_row.merge(storage: 'TraceChunks'),
details_row.merge(storage: 'RateLimiting')) details_row.merge(storage: 'RateLimiting'),
details_row.merge(storage: 'Sessions'))
end end
end end
end end

View File

@ -16,6 +16,7 @@ RSpec.describe Gitlab::Middleware::Multipart::Handler do
::Gitlab.config.uploads.storage_path, ::Gitlab.config.uploads.storage_path,
::JobArtifactUploader.workhorse_upload_path, ::JobArtifactUploader.workhorse_upload_path,
::LfsObjectUploader.workhorse_upload_path, ::LfsObjectUploader.workhorse_upload_path,
::DependencyProxy::FileUploader.workhorse_upload_path,
File.join(Rails.root, 'public/uploads/tmp') File.join(Rails.root, 'public/uploads/tmp')
] ]
end end

View File

@ -3,53 +3,5 @@
require 'spec_helper' require 'spec_helper'
RSpec.describe Gitlab::Redis::RateLimiting do RSpec.describe Gitlab::Redis::RateLimiting do
let(:instance_specific_config_file) { "config/redis.rate_limiting.yml" } include_examples "redis_new_instance_shared_examples", 'rate_limiting', Gitlab::Redis::Cache
let(:environment_config_file_name) { "GITLAB_REDIS_RATE_LIMITING_CONFIG_FILE" }
let(:cache_config_file) { nil }
before do
allow(Gitlab::Redis::Cache).to receive(:config_file_name).and_return(cache_config_file)
end
include_examples "redis_shared_examples"
describe '.config_file_name' do
subject { described_class.config_file_name }
let(:rails_root) { Dir.mktmpdir('redis_shared_examples') }
before do
# Undo top-level stub of config_file_name because we are testing that method now.
allow(described_class).to receive(:config_file_name).and_call_original
allow(described_class).to receive(:rails_root).and_return(rails_root)
FileUtils.mkdir_p(File.join(rails_root, 'config'))
end
after do
FileUtils.rm_rf(rails_root)
end
context 'when there is only a resque.yml' do
before do
FileUtils.touch(File.join(rails_root, 'config/resque.yml'))
end
it { expect(subject).to eq("#{rails_root}/config/resque.yml") }
context 'and there is a global env override' do
before do
stub_env('GITLAB_REDIS_CONFIG_FILE', 'global override')
end
it { expect(subject).to eq('global override') }
context 'and Cache has a different config file' do
let(:cache_config_file) { 'cache config file' }
it { expect(subject).to eq('cache config file') }
end
end
end
end
end end

View File

@ -0,0 +1,7 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Gitlab::Redis::Sessions do
include_examples "redis_new_instance_shared_examples", 'sessions', Gitlab::Redis::SharedState
end

View File

@ -3,53 +3,5 @@
require 'spec_helper' require 'spec_helper'
RSpec.describe Gitlab::Redis::TraceChunks do RSpec.describe Gitlab::Redis::TraceChunks do
let(:instance_specific_config_file) { "config/redis.trace_chunks.yml" } include_examples "redis_new_instance_shared_examples", 'trace_chunks', Gitlab::Redis::SharedState
let(:environment_config_file_name) { "GITLAB_REDIS_TRACE_CHUNKS_CONFIG_FILE" }
let(:shared_state_config_file) { nil }
before do
allow(Gitlab::Redis::SharedState).to receive(:config_file_name).and_return(shared_state_config_file)
end
include_examples "redis_shared_examples"
describe '.config_file_name' do
subject { described_class.config_file_name }
let(:rails_root) { Dir.mktmpdir('redis_shared_examples') }
before do
# Undo top-level stub of config_file_name because we are testing that method now.
allow(described_class).to receive(:config_file_name).and_call_original
allow(described_class).to receive(:rails_root).and_return(rails_root)
FileUtils.mkdir_p(File.join(rails_root, 'config'))
end
after do
FileUtils.rm_rf(rails_root)
end
context 'when there is only a resque.yml' do
before do
FileUtils.touch(File.join(rails_root, 'config/resque.yml'))
end
it { expect(subject).to eq("#{rails_root}/config/resque.yml") }
context 'and there is a global env override' do
before do
stub_env('GITLAB_REDIS_CONFIG_FILE', 'global override')
end
it { expect(subject).to eq('global override') }
context 'and SharedState has a different config file' do
let(:shared_state_config_file) { 'shared state config file' }
it { expect(subject).to eq('shared state config file') }
end
end
end
end
end end

View File

@ -46,4 +46,12 @@ RSpec.configure do |config|
redis_rate_limiting_cleanup! redis_rate_limiting_cleanup!
end end
config.around(:each, :clean_gitlab_redis_sessions) do |example|
redis_sessions_cleanup!
example.run
redis_sessions_cleanup!
end
end end

View File

@ -27,4 +27,9 @@ module RedisHelpers
def redis_rate_limiting_cleanup! def redis_rate_limiting_cleanup!
Gitlab::Redis::RateLimiting.with(&:flushdb) Gitlab::Redis::RateLimiting.with(&:flushdb)
end end
# Usage: session state
def redis_sessions_cleanup!
Gitlab::Redis::Sessions.with(&:flushdb)
end
end end

View File

@ -0,0 +1,55 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.shared_examples "redis_new_instance_shared_examples" do |name, fallback_class|
let(:instance_specific_config_file) { "config/redis.#{name}.yml" }
let(:environment_config_file_name) { "GITLAB_REDIS_#{name.upcase}_CONFIG_FILE" }
let(:fallback_config_file) { nil }
before do
allow(fallback_class).to receive(:config_file_name).and_return(fallback_config_file)
end
include_examples "redis_shared_examples"
describe '.config_file_name' do
subject { described_class.config_file_name }
let(:rails_root) { Dir.mktmpdir('redis_shared_examples') }
before do
# Undo top-level stub of config_file_name because we are testing that method now.
allow(described_class).to receive(:config_file_name).and_call_original
allow(described_class).to receive(:rails_root).and_return(rails_root)
FileUtils.mkdir_p(File.join(rails_root, 'config'))
end
after do
FileUtils.rm_rf(rails_root)
end
context 'when there is only a resque.yml' do
before do
FileUtils.touch(File.join(rails_root, 'config/resque.yml'))
end
it { expect(subject).to eq("#{rails_root}/config/resque.yml") }
context 'and there is a global env override' do
before do
stub_env('GITLAB_REDIS_CONFIG_FILE', 'global override')
end
it { expect(subject).to eq('global override') }
context "and #{fallback_class.name.demodulize} has a different config file" do
let(:fallback_config_file) { 'fallback config file' }
it { expect(subject).to eq('fallback config file') }
end
end
end
end
end

View File

@ -0,0 +1,123 @@
package dependencyproxy
import (
"context"
"fmt"
"io"
"net"
"net/http"
"time"
"gitlab.com/gitlab-org/labkit/correlation"
"gitlab.com/gitlab-org/labkit/log"
"gitlab.com/gitlab-org/labkit/tracing"
"gitlab.com/gitlab-org/gitlab/workhorse/internal/helper"
"gitlab.com/gitlab-org/gitlab/workhorse/internal/senddata"
)
// httpTransport defines a http.Transport with values
// that are more restrictive than for http.DefaultTransport,
// they define shorter TLS Handshake, and more aggressive connection closing
// to prevent the connection hanging and reduce FD usage
var httpTransport = tracing.NewRoundTripper(correlation.NewInstrumentedRoundTripper(&http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 10 * time.Second,
}).DialContext,
MaxIdleConns: 2,
IdleConnTimeout: 30 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 10 * time.Second,
ResponseHeaderTimeout: 30 * time.Second,
}))
var httpClient = &http.Client{
Transport: httpTransport,
}
type Injector struct {
senddata.Prefix
uploadHandler http.Handler
}
type entryParams struct {
Url string
Header http.Header
}
type nullResponseWriter struct {
header http.Header
status int
}
func (nullResponseWriter) Write(p []byte) (int, error) {
return len(p), nil
}
func (w *nullResponseWriter) Header() http.Header {
return w.header
}
func (w *nullResponseWriter) WriteHeader(status int) {
if w.status == 0 {
w.status = status
}
}
func NewInjector() *Injector {
return &Injector{Prefix: "send-dependency:"}
}
func (p *Injector) SetUploadHandler(uploadHandler http.Handler) {
p.uploadHandler = uploadHandler
}
func (p *Injector) Inject(w http.ResponseWriter, r *http.Request, sendData string) {
dependencyResponse, err := p.fetchUrl(r.Context(), sendData)
if err != nil {
helper.Fail500(w, r, err)
return
}
defer dependencyResponse.Body.Close()
if dependencyResponse.StatusCode >= 400 {
w.WriteHeader(dependencyResponse.StatusCode)
io.Copy(w, dependencyResponse.Body)
return
}
teeReader := io.TeeReader(dependencyResponse.Body, w)
saveFileRequest, err := http.NewRequestWithContext(r.Context(), "POST", r.URL.String()+"/upload", teeReader)
if err != nil {
helper.Fail500(w, r, fmt.Errorf("dependency proxy: failed to create request: %w", err))
}
saveFileRequest.Header = helper.HeaderClone(r.Header)
saveFileRequest.ContentLength = dependencyResponse.ContentLength
w.Header().Del("Content-Length")
nrw := &nullResponseWriter{header: make(http.Header)}
p.uploadHandler.ServeHTTP(nrw, saveFileRequest)
if nrw.status != http.StatusOK {
fields := log.Fields{"code": nrw.status}
helper.Fail500WithFields(nrw, r, fmt.Errorf("dependency proxy: failed to upload file"), fields)
}
}
func (p *Injector) fetchUrl(ctx context.Context, sendData string) (*http.Response, error) {
var params entryParams
if err := p.Unpack(&params, sendData); err != nil {
return nil, fmt.Errorf("dependency proxy: unpack sendData: %v", err)
}
r, err := http.NewRequestWithContext(ctx, "GET", params.Url, nil)
if err != nil {
return nil, fmt.Errorf("dependency proxy: failed to fetch dependency: %v", err)
}
r.Header = params.Header
return httpClient.Do(r)
}

View File

@ -0,0 +1,183 @@
package dependencyproxy
import (
"encoding/base64"
"fmt"
"io"
"net/http"
"net/http/httptest"
"strconv"
"strings"
"testing"
"github.com/stretchr/testify/require"
"gitlab.com/gitlab-org/gitlab/workhorse/internal/api"
"gitlab.com/gitlab-org/gitlab/workhorse/internal/testhelper"
"gitlab.com/gitlab-org/gitlab/workhorse/internal/upload"
)
type fakeUploadHandler struct {
request *http.Request
body []byte
handler func(w http.ResponseWriter, r *http.Request)
}
func (f *fakeUploadHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
f.request = r
f.body, _ = io.ReadAll(r.Body)
f.handler(w, r)
}
type errWriter struct{ writes int }
func (w *errWriter) Header() http.Header { return nil }
func (w *errWriter) WriteHeader(h int) {}
// First call of Write function succeeds while all the subsequent ones fail
func (w *errWriter) Write(p []byte) (int, error) {
if w.writes > 0 {
return 0, fmt.Errorf("client error")
}
w.writes++
return len(p), nil
}
type fakePreAuthHandler struct{}
func (f *fakePreAuthHandler) PreAuthorizeHandler(handler api.HandleFunc, _ string) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
handler(w, r, &api.Response{TempPath: "../../testdata/scratch"})
})
}
func TestInject(t *testing.T) {
contentLength := 32768 + 1
content := strings.Repeat("p", contentLength)
testCases := []struct {
desc string
responseWriter http.ResponseWriter
contentLength int
handlerMustBeCalled bool
}{
{
desc: "the uploading successfully finalized",
responseWriter: httptest.NewRecorder(),
contentLength: contentLength,
handlerMustBeCalled: true,
}, {
desc: "a user failed to receive the response",
responseWriter: &errWriter{},
contentLength: contentLength,
handlerMustBeCalled: false,
}, {
desc: "the origin resource server returns partial response",
responseWriter: httptest.NewRecorder(),
contentLength: contentLength + 1,
handlerMustBeCalled: false,
},
}
testhelper.ConfigureSecret()
for _, tc := range testCases {
originResourceServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Length", strconv.Itoa(tc.contentLength))
w.Write([]byte(content))
}))
defer originResourceServer.Close()
// BodyUploader expects http.Handler as its second param, we can create a stub function and verify that
// it's only called for successful requests
handlerIsCalled := false
handlerFunc := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { handlerIsCalled = true })
bodyUploader := upload.BodyUploader(&fakePreAuthHandler{}, handlerFunc, &upload.DefaultPreparer{})
injector := NewInjector()
injector.SetUploadHandler(bodyUploader)
r := httptest.NewRequest("GET", "/target", nil)
sendData := base64.StdEncoding.EncodeToString([]byte(`{"Token": "token", "Url": "` + originResourceServer.URL + `/url"}`))
injector.Inject(tc.responseWriter, r, sendData)
require.Equal(t, tc.handlerMustBeCalled, handlerIsCalled, "a partial file must not be saved")
}
}
func TestSuccessfullRequest(t *testing.T) {
content := []byte("result")
originResourceServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Length", strconv.Itoa(len(content)))
w.Write(content)
}))
uploadHandler := &fakeUploadHandler{
handler: func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200)
},
}
injector := NewInjector()
injector.SetUploadHandler(uploadHandler)
response := makeRequest(injector, `{"Token": "token", "Url": "`+originResourceServer.URL+`/url"}`)
require.Equal(t, "/target/upload", uploadHandler.request.URL.Path)
require.Equal(t, int64(6), uploadHandler.request.ContentLength)
require.Equal(t, content, uploadHandler.body)
require.Equal(t, 200, response.Code)
require.Equal(t, string(content), response.Body.String())
}
func TestIncorrectSendData(t *testing.T) {
response := makeRequest(NewInjector(), "")
require.Equal(t, 500, response.Code)
require.Equal(t, "Internal server error\n", response.Body.String())
}
func TestIncorrectSendDataUrl(t *testing.T) {
response := makeRequest(NewInjector(), `{"Token": "token", "Url": "url"}`)
require.Equal(t, 500, response.Code)
require.Equal(t, "Internal server error\n", response.Body.String())
}
func TestFailedOriginServer(t *testing.T) {
originResourceServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(404)
w.Write([]byte("Not found"))
}))
uploadHandler := &fakeUploadHandler{
handler: func(w http.ResponseWriter, r *http.Request) {
require.FailNow(t, "the error response must not be uploaded")
},
}
injector := NewInjector()
injector.SetUploadHandler(uploadHandler)
response := makeRequest(injector, `{"Token": "token", "Url": "`+originResourceServer.URL+`/url"}`)
require.Equal(t, 404, response.Code)
require.Equal(t, "Not found", response.Body.String())
}
func makeRequest(injector *Injector, data string) *httptest.ResponseRecorder {
w := httptest.NewRecorder()
r := httptest.NewRequest("GET", "/target", nil)
sendData := base64.StdEncoding.EncodeToString([]byte(data))
injector.Inject(w, r, sendData)
return w
}

View File

@ -16,6 +16,7 @@ import (
"gitlab.com/gitlab-org/gitlab/workhorse/internal/builds" "gitlab.com/gitlab-org/gitlab/workhorse/internal/builds"
"gitlab.com/gitlab-org/gitlab/workhorse/internal/channel" "gitlab.com/gitlab-org/gitlab/workhorse/internal/channel"
"gitlab.com/gitlab-org/gitlab/workhorse/internal/config" "gitlab.com/gitlab-org/gitlab/workhorse/internal/config"
"gitlab.com/gitlab-org/gitlab/workhorse/internal/dependencyproxy"
"gitlab.com/gitlab-org/gitlab/workhorse/internal/git" "gitlab.com/gitlab-org/gitlab/workhorse/internal/git"
"gitlab.com/gitlab-org/gitlab/workhorse/internal/helper" "gitlab.com/gitlab-org/gitlab/workhorse/internal/helper"
"gitlab.com/gitlab-org/gitlab/workhorse/internal/imageresizer" "gitlab.com/gitlab-org/gitlab/workhorse/internal/imageresizer"
@ -171,7 +172,7 @@ func (ro *routeEntry) isMatch(cleanedPath string, req *http.Request) bool {
return ok return ok
} }
func buildProxy(backend *url.URL, version string, rt http.RoundTripper, cfg config.Config) http.Handler { func buildProxy(backend *url.URL, version string, rt http.RoundTripper, cfg config.Config, dependencyProxyInjector *dependencyproxy.Injector) http.Handler {
proxier := proxypkg.NewProxy(backend, version, rt) proxier := proxypkg.NewProxy(backend, version, rt)
return senddata.SendData( return senddata.SendData(
@ -184,6 +185,7 @@ func buildProxy(backend *url.URL, version string, rt http.RoundTripper, cfg conf
artifacts.SendEntry, artifacts.SendEntry,
sendurl.SendURL, sendurl.SendURL,
imageresizer.NewResizer(cfg), imageresizer.NewResizer(cfg),
dependencyProxyInjector,
) )
} }
@ -194,7 +196,8 @@ func buildProxy(backend *url.URL, version string, rt http.RoundTripper, cfg conf
func configureRoutes(u *upstream) { func configureRoutes(u *upstream) {
api := u.APIClient api := u.APIClient
static := &staticpages.Static{DocumentRoot: u.DocumentRoot, Exclude: staticExclude} static := &staticpages.Static{DocumentRoot: u.DocumentRoot, Exclude: staticExclude}
proxy := buildProxy(u.Backend, u.Version, u.RoundTripper, u.Config) dependencyProxyInjector := dependencyproxy.NewInjector()
proxy := buildProxy(u.Backend, u.Version, u.RoundTripper, u.Config, dependencyProxyInjector)
cableProxy := proxypkg.NewProxy(u.CableBackend, u.Version, u.CableRoundTripper) cableProxy := proxypkg.NewProxy(u.CableBackend, u.Version, u.CableRoundTripper)
assetsNotFoundHandler := NotFoundUnless(u.DevelopmentMode, proxy) assetsNotFoundHandler := NotFoundUnless(u.DevelopmentMode, proxy)
@ -208,7 +211,7 @@ func configureRoutes(u *upstream) {
} }
signingTripper := secret.NewRoundTripper(u.RoundTripper, u.Version) signingTripper := secret.NewRoundTripper(u.RoundTripper, u.Version)
signingProxy := buildProxy(u.Backend, u.Version, signingTripper, u.Config) signingProxy := buildProxy(u.Backend, u.Version, signingTripper, u.Config, dependencyProxyInjector)
preparers := createUploadPreparers(u.Config) preparers := createUploadPreparers(u.Config)
uploadPath := path.Join(u.DocumentRoot, "uploads/tmp") uploadPath := path.Join(u.DocumentRoot, "uploads/tmp")
@ -216,6 +219,8 @@ func configureRoutes(u *upstream) {
ciAPIProxyQueue := queueing.QueueRequests("ci_api_job_requests", uploadAccelerateProxy, u.APILimit, u.APIQueueLimit, u.APIQueueTimeout) ciAPIProxyQueue := queueing.QueueRequests("ci_api_job_requests", uploadAccelerateProxy, u.APILimit, u.APIQueueLimit, u.APIQueueTimeout)
ciAPILongPolling := builds.RegisterHandler(ciAPIProxyQueue, redis.WatchKey, u.APICILongPollingDuration) ciAPILongPolling := builds.RegisterHandler(ciAPIProxyQueue, redis.WatchKey, u.APICILongPollingDuration)
dependencyProxyInjector.SetUploadHandler(upload.BodyUploader(api, signingProxy, preparers.packages))
// Serve static files or forward the requests // Serve static files or forward the requests
defaultUpstream := static.ServeExisting( defaultUpstream := static.ServeExisting(
u.URLPrefix, u.URLPrefix,

View File

@ -950,3 +950,72 @@ func TestHealthChecksUnreachable(t *testing.T) {
}) })
} }
} }
func TestDependencyProxyInjector(t *testing.T) {
token := "token"
bodyLength := 4096
expectedBody := strings.Repeat("p", bodyLength)
testCases := []struct {
desc string
finalizeStatus int
}{
{
desc: "user downloads the file when the request is successfully finalized",
finalizeStatus: 200,
}, {
desc: "user downloads the file even when the request fails to be finalized",
finalizeStatus: 500,
},
}
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
originResource := "/origin_resource"
originResourceServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
require.Equal(t, originResource, r.URL.String())
w.Header().Set("Content-Length", strconv.Itoa(bodyLength))
io.WriteString(w, expectedBody)
}))
defer originResourceServer.Close()
originResourceUrl := originResourceServer.URL + originResource
ts := testhelper.TestServerWithHandler(regexp.MustCompile(`.`), func(w http.ResponseWriter, r *http.Request) {
switch r.URL.String() {
case "/base":
params := `{"Url": "` + originResourceUrl + `", "Token": "` + token + `"}`
w.Header().Set("Gitlab-Workhorse-Send-Data", `send-dependency:`+base64.URLEncoding.EncodeToString([]byte(params)))
case "/base/upload/authorize":
w.Header().Set("Content-Type", api.ResponseContentType)
_, err := fmt.Fprintf(w, `{"TempPath":"%s"}`, scratchDir)
require.NoError(t, err)
case "/base/upload":
w.WriteHeader(tc.finalizeStatus)
default:
t.Fatalf("unexpected request: %s", r.URL)
}
})
defer ts.Close()
ws := startWorkhorseServer(ts.URL)
defer ws.Close()
resp, err := http.DefaultClient.Get(ws.URL + "/base")
require.NoError(t, err)
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
require.NoError(t, resp.Body.Close()) // Client closes connection
ws.Close() // Wait for server handler to return
require.Equal(t, 200, resp.StatusCode, "status code")
require.Equal(t, expectedBody, string(body), "response body")
})
}
}