diff --git a/README.md b/README.md index 73d0ffc3d34..f5ec329cd9e 100644 --- a/README.md +++ b/README.md @@ -68,7 +68,7 @@ GitLab is an open source project and we are very happy to accept community contr ## Install a development environment To work on GitLab itself, we recommend setting up your development environment with [the GitLab Development Kit](https://gitlab.com/gitlab-org/gitlab-development-kit). -If you do not use the GitLab Development Kit you need to install and setup all the dependencies yourself, this is a lot of work and error prone. +If you do not use the GitLab Development Kit you need to install and configure all the dependencies yourself, this is a lot of work and error prone. One small thing you also have to do when installing it yourself is to copy the example development Puma configuration file: cp config/puma.rb.example.development config/puma.rb diff --git a/app/controllers/admin/dashboard_controller.rb b/app/controllers/admin/dashboard_controller.rb index 086db1004a6..d12ccfc7423 100644 --- a/app/controllers/admin/dashboard_controller.rb +++ b/app/controllers/admin/dashboard_controller.rb @@ -20,7 +20,8 @@ class Admin::DashboardController < Admin::ApplicationController Gitlab::Redis::SharedState, Gitlab::Redis::Cache, Gitlab::Redis::TraceChunks, - Gitlab::Redis::RateLimiting + Gitlab::Redis::RateLimiting, + Gitlab::Redis::Sessions ].map(&:version).uniq end # rubocop: enable CodeReuse/ActiveRecord diff --git a/app/controllers/groups/dependency_proxy_for_containers_controller.rb b/app/controllers/groups/dependency_proxy_for_containers_controller.rb index f7dc552bd3e..e19b8ae35f8 100644 --- a/app/controllers/groups/dependency_proxy_for_containers_controller.rb +++ b/app/controllers/groups/dependency_proxy_for_containers_controller.rb @@ -5,11 +5,15 @@ class Groups::DependencyProxyForContainersController < ::Groups::DependencyProxy include DependencyProxy::GroupAccess include SendFileUpload include ::PackagesHelper # for event tracking + include WorkhorseRequest before_action :ensure_group - before_action :ensure_token_granted! + before_action :ensure_token_granted!, only: [:blob, :manifest] before_action :ensure_feature_enabled! + before_action :verify_workhorse_api!, only: [:authorize_upload_blob, :upload_blob] + skip_before_action :verify_authenticity_token, only: [:authorize_upload_blob, :upload_blob] + attr_reader :token feature_category :dependency_proxy @@ -38,6 +42,8 @@ class Groups::DependencyProxyForContainersController < ::Groups::DependencyProxy end def blob + return blob_via_workhorse if Feature.enabled?(:dependency_proxy_workhorse, group, default_enabled: :yaml) + result = DependencyProxy::FindOrCreateBlobService .new(group, image, token, params[:sha]).execute @@ -50,11 +56,47 @@ class Groups::DependencyProxyForContainersController < ::Groups::DependencyProxy end end + def authorize_upload_blob + set_workhorse_internal_api_content_type + + render json: DependencyProxy::FileUploader.workhorse_authorize(has_length: false) + end + + def upload_blob + @group.dependency_proxy_blobs.create!( + file_name: blob_file_name, + file: params[:file], + size: params[:file].size + ) + + event_name = tracking_event_name(object_type: :blob, from_cache: false) + track_package_event(event_name, :dependency_proxy, namespace: group, user: auth_user) + + head :ok + end + private + def blob_via_workhorse + blob = @group.dependency_proxy_blobs.find_by_file_name(blob_file_name) + + if blob.present? + event_name = tracking_event_name(object_type: :blob, from_cache: true) + track_package_event(event_name, :dependency_proxy, namespace: group, user: auth_user) + + send_upload(blob.file) + else + send_dependency(token, DependencyProxy::Registry.blob_url(image, params[:sha]), blob_file_name) + end + end + + def blob_file_name + @blob_file_name ||= params[:sha].sub('sha256:', '') + '.gz' + end + def group strong_memoize(:group) do - Group.find_by_full_path(params[:group_id], follow_redirects: request.get?) + Group.find_by_full_path(params[:group_id], follow_redirects: true) end end diff --git a/app/controllers/health_controller.rb b/app/controllers/health_controller.rb index 20407a75534..071378f266e 100644 --- a/app/controllers/health_controller.rb +++ b/app/controllers/health_controller.rb @@ -17,6 +17,7 @@ class HealthController < ActionController::Base Gitlab::HealthChecks::Redis::SharedStateCheck, Gitlab::HealthChecks::Redis::TraceChunksCheck, Gitlab::HealthChecks::Redis::RateLimitingCheck, + Gitlab::HealthChecks::Redis::SessionsCheck, Gitlab::HealthChecks::GitalyCheck ].freeze diff --git a/app/helpers/workhorse_helper.rb b/app/helpers/workhorse_helper.rb index 8785c4cdcbb..4862282bc73 100644 --- a/app/helpers/workhorse_helper.rb +++ b/app/helpers/workhorse_helper.rb @@ -41,6 +41,15 @@ module WorkhorseHelper head :ok end + def send_dependency(token, url, filename) + headers.store(*Gitlab::Workhorse.send_dependency(token, url)) + headers['Content-Disposition'] = + ActionDispatch::Http::ContentDisposition.format(disposition: 'attachment', filename: filename) + headers['Content-Type'] = 'application/gzip' + + head :ok + end + def set_workhorse_internal_api_content_type headers['Content-Type'] = Gitlab::Workhorse::INTERNAL_API_CONTENT_TYPE end diff --git a/app/uploaders/dependency_proxy/file_uploader.rb b/app/uploaders/dependency_proxy/file_uploader.rb index 5154f180454..f0222d4cf06 100644 --- a/app/uploaders/dependency_proxy/file_uploader.rb +++ b/app/uploaders/dependency_proxy/file_uploader.rb @@ -1,6 +1,7 @@ # frozen_string_literal: true class DependencyProxy::FileUploader < GitlabUploader + extend Workhorse::UploadPath include ObjectStorage::Concern before :cache, :set_content_type diff --git a/app/workers/all_queues.yml b/app/workers/all_queues.yml index ebb4e777b44..c7ce2eb8d00 100644 --- a/app/workers/all_queues.yml +++ b/app/workers/all_queues.yml @@ -593,7 +593,7 @@ :feature_category: :continuous_integration :has_external_dependencies: :urgency: :low - :resource_boundary: :cpu + :resource_boundary: :unknown :weight: 1 :idempotent: :tags: [] diff --git a/app/workers/stuck_ci_jobs_worker.rb b/app/workers/stuck_ci_jobs_worker.rb index f8f1d8c60b3..72004f7568c 100644 --- a/app/workers/stuck_ci_jobs_worker.rb +++ b/app/workers/stuck_ci_jobs_worker.rb @@ -13,7 +13,6 @@ class StuckCiJobsWorker # rubocop:disable Scalability/IdempotentWorker data_consistency :always feature_category :continuous_integration - worker_resource_boundary :cpu def perform Ci::StuckBuilds::DropRunningWorker.perform_in(20.minutes) diff --git a/config/README.md b/config/README.md index 52f9a244bd0..f04758fcaeb 100644 --- a/config/README.md +++ b/config/README.md @@ -78,6 +78,7 @@ An example configuration file for Redis is in this directory under the name | `shared_state` | | Persistent application state | | `trace_chunks` | `shared_state` | [CI trace chunks](https://docs.gitlab.com/ee/administration/job_logs.html#incremental-logging-architecture) | | `rate_limiting` | `cache` | [Rate limiting](https://docs.gitlab.com/ee/user/admin_area/settings/user_and_ip_rate_limits.html) state | +| `sessions` | `shared_state` | [Sessions](https://docs.gitlab.com/ee/development/session.html#redis)| If no configuration is found, or no URL is found in the configuration file, the default URL used is: diff --git a/config/application.rb b/config/application.rb index 83a3033a40d..dba9550a3dc 100644 --- a/config/application.rb +++ b/config/application.rb @@ -25,6 +25,7 @@ module Gitlab require_dependency Rails.root.join('lib/gitlab/redis/shared_state') require_dependency Rails.root.join('lib/gitlab/redis/trace_chunks') require_dependency Rails.root.join('lib/gitlab/redis/rate_limiting') + require_dependency Rails.root.join('lib/gitlab/redis/sessions') require_dependency Rails.root.join('lib/gitlab/current_settings') require_dependency Rails.root.join('lib/gitlab/middleware/read_only') require_dependency Rails.root.join('lib/gitlab/middleware/basic_health_check') diff --git a/config/feature_flags/development/dependency_proxy_workhorse.yml b/config/feature_flags/development/dependency_proxy_workhorse.yml new file mode 100644 index 00000000000..a3545d32cd5 --- /dev/null +++ b/config/feature_flags/development/dependency_proxy_workhorse.yml @@ -0,0 +1,8 @@ +--- +name: dependency_proxy_workhorse +introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/68157 +rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/339639 +milestone: '14.3' +type: development +group: group::source code +default_enabled: false diff --git a/config/initializers/7_redis.rb b/config/initializers/7_redis.rb index d6a2d070cc4..50f0fb92317 100644 --- a/config/initializers/7_redis.rb +++ b/config/initializers/7_redis.rb @@ -18,3 +18,4 @@ Gitlab::Redis::Queues.with { nil } Gitlab::Redis::SharedState.with { nil } Gitlab::Redis::TraceChunks.with { nil } Gitlab::Redis::RateLimiting.with { nil } +Gitlab::Redis::Sessions.with { nil } diff --git a/config/routes/group.rb b/config/routes/group.rb index ef31b639d33..803249f8861 100644 --- a/config/routes/group.rb +++ b/config/routes/group.rb @@ -146,5 +146,7 @@ scope format: false do constraints image: Gitlab::PathRegex.container_image_regex, sha: Gitlab::PathRegex.container_image_blob_sha_regex do get 'v2/*group_id/dependency_proxy/containers/*image/manifests/*tag' => 'groups/dependency_proxy_for_containers#manifest' # rubocop:todo Cop/PutGroupRoutesUnderScope get 'v2/*group_id/dependency_proxy/containers/*image/blobs/:sha' => 'groups/dependency_proxy_for_containers#blob' # rubocop:todo Cop/PutGroupRoutesUnderScope + post 'v2/*group_id/dependency_proxy/containers/*image/blobs/:sha/upload/authorize' => 'groups/dependency_proxy_for_containers#authorize_upload_blob' # rubocop:todo Cop/PutGroupRoutesUnderScope + post 'v2/*group_id/dependency_proxy/containers/*image/blobs/:sha/upload' => 'groups/dependency_proxy_for_containers#upload_blob' # rubocop:todo Cop/PutGroupRoutesUnderScope end end diff --git a/doc/development/redis.md b/doc/development/redis.md index 063e1b8d53d..fa07cebdc61 100644 --- a/doc/development/redis.md +++ b/doc/development/redis.md @@ -16,6 +16,7 @@ GitLab uses [Redis](https://redis.io) for the following distinct purposes: - To store CI trace chunks. - As a Pub/Sub queue backend for ActionCable. - Rate limiting state storage. +- Sessions. In most environments (including the GDK), all of these point to the same Redis instance. diff --git a/doc/user/asciidoc.md b/doc/user/asciidoc.md index 8313b20e795..cd166666ad6 100644 --- a/doc/user/asciidoc.md +++ b/doc/user/asciidoc.md @@ -1,8 +1,7 @@ --- stage: Create group: Source Code -info: "To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments" -type: reference, howto +info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments --- # AsciiDoc **(FREE)** @@ -85,7 +84,7 @@ I believe I shall--no, actually I won't. **Macros** ```plaintext -// where c=specialchars, q=quotes, a=attributes, r=replacements, m=macros, p=post_replacements, etc. +// where c=specialchars, q=quotes, a=attributes, r=replacements, m=macros, p=post_replacements The European icon:flag[role=blue] is blue & contains pass:[************] arranged in a icon:circle-o[role=yellow]. The pass:c[->] operator is often referred to as the stabby lambda. Since `pass:[++]` has strong priority in AsciiDoc, you can rewrite pass:c,a,r[C++ => C{pp}]. @@ -151,7 +150,7 @@ This paragraph has a footnote.footnote:[This is the text of the footnote.] ** level 2 *** level 3 **** level 4 -***** etc. +***** level 5 * back at level 1 + Attach a block or paragraph to a list item using a list continuation (which you can enclose in an open block). @@ -240,10 +239,10 @@ include::basics.adoc[] include::https://example.org/installation.adoc[] ``` -To guarantee good system performance and prevent malicious documents causing -problems, GitLab enforces a **maximum limit** on the number of include directives -processed in any one document. Currently a total of 32 documents can be -included, a number that is inclusive of transitive dependencies. +To guarantee good system performance and prevent malicious documents from causing +problems, GitLab enforces a maximum limit on the number of include directives +processed in any one document. You can include up to 32 documents, which is +inclusive of transitive dependencies. ### Blocks @@ -428,7 +427,7 @@ If you're new to using Mermaid or need help identifying issues in your Mermaid c the [Mermaid Live Editor](https://mermaid-js.github.io/mermaid-live-editor/) is a helpful tool for creating and resolving issues within Mermaid diagrams. -In order to generate a diagram or flowchart, you should write your text inside the `mermaid` block: +To generate a diagram or flowchart, enter your text in a `mermaid` block: ```plaintext [mermaid] @@ -447,7 +446,7 @@ Kroki supports more than a dozen diagram libraries. To make Kroki available in GitLab, a GitLab administrator needs to enable it first. Read more in the [Kroki integration](../administration/integration/kroki.md) page. -Once Kroki is enabled, you can create a wide variety of diagrams in AsciiDoc and Markdown documents. +After Kroki is enabled, you can create diagrams in AsciiDoc and Markdown documents. Here's an example using a GraphViz diagram: **AsciiDoc** @@ -476,7 +475,7 @@ digraph G { To make PlantUML available in GitLab, a GitLab administrator needs to enable it first. Read more in [PlantUML & GitLab](../administration/integration/plantuml.md). -Once enabled, you should write your text inside the `plantuml` block: +After PlantUML is enabled, enter your text in a `plantuml` block: ```plaintext [plantuml] diff --git a/doc/user/project/deploy_tokens/index.md b/doc/user/project/deploy_tokens/index.md index 29005b49dc2..483de3b21bd 100644 --- a/doc/user/project/deploy_tokens/index.md +++ b/doc/user/project/deploy_tokens/index.md @@ -199,3 +199,18 @@ NOTE: The special handling for the `gitlab-deploy-token` deploy token is not implemented for group deploy tokens. To make the group-level deploy token available for CI/CD jobs, the `CI_DEPLOY_USER` and `CI_DEPLOY_PASSWORD` variables should be set under **Settings** to the name and token of the group deploy token respectively. + +## Troubleshooting + +### Group deploy tokens and LFS + +A bug +[prevents Group Deploy Tokens from cloning LFS objects](https://gitlab.com/gitlab-org/gitlab/-/issues/235398). +If you receive `404 Not Found` errors and this error, +use a Project Deploy Token to work around the bug: + +```plaintext +api error: Repository or object not found: +https://.git/info/lfs/objects/batch +Check that it exists and that you have proper access to it +``` diff --git a/lib/gitlab/health_checks/redis/redis_check.rb b/lib/gitlab/health_checks/redis/redis_check.rb index 2fa39308b9a..25879c18f84 100644 --- a/lib/gitlab/health_checks/redis/redis_check.rb +++ b/lib/gitlab/health_checks/redis/redis_check.rb @@ -22,7 +22,8 @@ module Gitlab ::Gitlab::HealthChecks::Redis::QueuesCheck.check_up && ::Gitlab::HealthChecks::Redis::SharedStateCheck.check_up && ::Gitlab::HealthChecks::Redis::TraceChunksCheck.check_up && - ::Gitlab::HealthChecks::Redis::RateLimitingCheck.check_up + ::Gitlab::HealthChecks::Redis::RateLimitingCheck.check_up && + ::Gitlab::HealthChecks::Redis::SessionsCheck.check_up end end end diff --git a/lib/gitlab/health_checks/redis/sessions_check.rb b/lib/gitlab/health_checks/redis/sessions_check.rb new file mode 100644 index 00000000000..a0c5e177b4e --- /dev/null +++ b/lib/gitlab/health_checks/redis/sessions_check.rb @@ -0,0 +1,35 @@ +# frozen_string_literal: true + +module Gitlab + module HealthChecks + module Redis + class SessionsCheck + extend SimpleAbstractCheck + + class << self + def check_up + check + end + + private + + def metric_prefix + 'redis_sessions_ping' + end + + def successful?(result) + result == 'PONG' + end + + # rubocop: disable CodeReuse/ActiveRecord + def check + catch_timeout 10.seconds do + Gitlab::Redis::Sessions.with(&:ping) + end + end + # rubocop: enable CodeReuse/ActiveRecord + end + end + end + end +end diff --git a/lib/gitlab/instrumentation/redis.rb b/lib/gitlab/instrumentation/redis.rb index ea1d54ff867..4fee779c767 100644 --- a/lib/gitlab/instrumentation/redis.rb +++ b/lib/gitlab/instrumentation/redis.rb @@ -10,8 +10,9 @@ module Gitlab SharedState = Class.new(RedisBase).enable_redis_cluster_validation TraceChunks = Class.new(RedisBase).enable_redis_cluster_validation RateLimiting = Class.new(RedisBase).enable_redis_cluster_validation + Sessions = Class.new(RedisBase).enable_redis_cluster_validation - STORAGES = [ActionCable, Cache, Queues, SharedState, TraceChunks, RateLimiting].freeze + STORAGES = [ActionCable, Cache, Queues, SharedState, TraceChunks, RateLimiting, Sessions].freeze # Milliseconds represented in seconds (from 1 millisecond to 2 seconds). QUERY_TIME_BUCKETS = [0.001, 0.0025, 0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2].freeze diff --git a/lib/gitlab/middleware/multipart.rb b/lib/gitlab/middleware/multipart.rb index 49be3ffc839..a047015e54f 100644 --- a/lib/gitlab/middleware/multipart.rb +++ b/lib/gitlab/middleware/multipart.rb @@ -158,6 +158,7 @@ module Gitlab ::Gitlab.config.uploads.storage_path, ::JobArtifactUploader.workhorse_upload_path, ::LfsObjectUploader.workhorse_upload_path, + ::DependencyProxy::FileUploader.workhorse_upload_path, File.join(Rails.root, 'public/uploads/tmp') ] + package_allowed_paths end diff --git a/lib/gitlab/redis/sessions.rb b/lib/gitlab/redis/sessions.rb new file mode 100644 index 00000000000..3bf1eb6211d --- /dev/null +++ b/lib/gitlab/redis/sessions.rb @@ -0,0 +1,12 @@ +# frozen_string_literal: true + +module Gitlab + module Redis + class Sessions < ::Gitlab::Redis::Wrapper + # The data we store on Sessions used to be stored on SharedState. + def self.config_fallback + SharedState + end + end + end +end diff --git a/lib/gitlab/workhorse.rb b/lib/gitlab/workhorse.rb index 5780e4d6da8..c40aa2273aa 100644 --- a/lib/gitlab/workhorse.rb +++ b/lib/gitlab/workhorse.rb @@ -170,6 +170,18 @@ module Gitlab ] end + def send_dependency(token, url) + params = { + 'Header' => { Authorization: ["Bearer #{token}"] }, + 'Url' => url + } + + [ + SEND_DATA_HEADER, + "send-dependency:#{encode(params)}" + ] + end + def channel_websocket(channel) details = { 'Channel' => { diff --git a/spec/controllers/groups/dependency_proxy_for_containers_controller_spec.rb b/spec/controllers/groups/dependency_proxy_for_containers_controller_spec.rb index 7415c2860c8..fa402d556c7 100644 --- a/spec/controllers/groups/dependency_proxy_for_containers_controller_spec.rb +++ b/spec/controllers/groups/dependency_proxy_for_containers_controller_spec.rb @@ -5,6 +5,7 @@ require 'spec_helper' RSpec.describe Groups::DependencyProxyForContainersController do include HttpBasicAuthHelpers include DependencyProxyHelpers + include WorkhorseHelpers let_it_be(:user) { create(:user) } let_it_be_with_reload(:group) { create(:group, :private) } @@ -242,16 +243,9 @@ RSpec.describe Groups::DependencyProxyForContainersController do end describe 'GET #blob' do - let_it_be(:blob) { create(:dependency_proxy_blob) } + let(:blob) { create(:dependency_proxy_blob, group: group) } let(:blob_sha) { blob.file_name.sub('.gz', '') } - let(:blob_response) { { status: :success, blob: blob, from_cache: false } } - - before do - allow_next_instance_of(DependencyProxy::FindOrCreateBlobService) do |instance| - allow(instance).to receive(:execute).and_return(blob_response) - end - end subject { get_blob } @@ -264,40 +258,31 @@ RSpec.describe Groups::DependencyProxyForContainersController do it_behaves_like 'without permission' it_behaves_like 'feature flag disabled with private group' - context 'remote blob request fails' do - let(:blob_response) do - { - status: :error, - http_status: 400, - message: '' - } - end - - before do - group.add_guest(user) - end - - it 'proxies status from the remote blob request', :aggregate_failures do - subject - - expect(response).to have_gitlab_http_status(:bad_request) - expect(response.body).to be_empty - end - end - context 'a valid user' do before do group.add_guest(user) end it_behaves_like 'a successful blob pull' - it_behaves_like 'a package tracking event', described_class.name, 'pull_blob' + it_behaves_like 'a package tracking event', described_class.name, 'pull_blob_from_cache' - context 'with a cache entry' do - let(:blob_response) { { status: :success, blob: blob, from_cache: true } } + context 'when cache entry does not exist' do + let(:blob_sha) { 'a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4' } - it_behaves_like 'returning response status', :success - it_behaves_like 'a package tracking event', described_class.name, 'pull_blob_from_cache' + it 'returns Workhorse send-dependency instructions' do + subject + + send_data_type, send_data = workhorse_send_data + header, url = send_data.values_at('Header', 'Url') + + expect(send_data_type).to eq('send-dependency') + expect(header).to eq("Authorization" => ["Bearer abcd1234"]) + expect(url).to eq(DependencyProxy::Registry.blob_url('alpine', blob_sha)) + expect(response.headers['Content-Type']).to eq('application/gzip') + expect(response.headers['Content-Disposition']).to eq( + ActionDispatch::Http::ContentDisposition.format(disposition: 'attachment', filename: blob.file_name) + ) + end end end @@ -319,6 +304,74 @@ RSpec.describe Groups::DependencyProxyForContainersController do it_behaves_like 'a successful blob pull' end end + + context 'when dependency_proxy_workhorse disabled' do + let(:blob_response) { { status: :success, blob: blob, from_cache: false } } + + before do + stub_feature_flags(dependency_proxy_workhorse: false) + + allow_next_instance_of(DependencyProxy::FindOrCreateBlobService) do |instance| + allow(instance).to receive(:execute).and_return(blob_response) + end + end + + context 'remote blob request fails' do + let(:blob_response) do + { + status: :error, + http_status: 400, + message: '' + } + end + + before do + group.add_guest(user) + end + + it 'proxies status from the remote blob request', :aggregate_failures do + subject + + expect(response).to have_gitlab_http_status(:bad_request) + expect(response.body).to be_empty + end + end + + context 'a valid user' do + before do + group.add_guest(user) + end + + it_behaves_like 'a successful blob pull' + it_behaves_like 'a package tracking event', described_class.name, 'pull_blob' + + context 'with a cache entry' do + let(:blob_response) { { status: :success, blob: blob, from_cache: true } } + + it_behaves_like 'returning response status', :success + it_behaves_like 'a package tracking event', described_class.name, 'pull_blob_from_cache' + end + end + + context 'a valid deploy token' do + let_it_be(:user) { create(:deploy_token, :group, :dependency_proxy_scopes) } + let_it_be(:group_deploy_token) { create(:group_deploy_token, deploy_token: user, group: group) } + + it_behaves_like 'a successful blob pull' + + context 'pulling from a subgroup' do + let_it_be_with_reload(:parent_group) { create(:group) } + let_it_be_with_reload(:group) { create(:group, parent: parent_group) } + + before do + parent_group.create_dependency_proxy_setting!(enabled: true) + group_deploy_token.update_column(:group_id, parent_group.id) + end + + it_behaves_like 'a successful blob pull' + end + end + end end it_behaves_like 'not found when disabled' @@ -328,6 +381,61 @@ RSpec.describe Groups::DependencyProxyForContainersController do end end + describe 'GET #authorize_upload_blob' do + let(:blob_sha) { 'a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4' } + + subject(:authorize_upload_blob) do + request.headers.merge!(workhorse_internal_api_request_header) + + get :authorize_upload_blob, params: { group_id: group.to_param, image: 'alpine', sha: blob_sha } + end + + it_behaves_like 'without permission' + + context 'with a valid user' do + before do + group.add_guest(user) + end + + it 'sends Workhorse file upload instructions', :aggregate_failures do + authorize_upload_blob + + expect(response.headers['Content-Type']).to eq(Gitlab::Workhorse::INTERNAL_API_CONTENT_TYPE) + expect(json_response['TempPath']).to eq(DependencyProxy::FileUploader.workhorse_local_upload_path) + end + end + end + + describe 'GET #upload_blob' do + let(:blob_sha) { 'a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4' } + let(:file) { fixture_file_upload("spec/fixtures/dependency_proxy/#{blob_sha}.gz", 'application/gzip') } + + subject do + request.headers.merge!(workhorse_internal_api_request_header) + + get :upload_blob, params: { + group_id: group.to_param, + image: 'alpine', + sha: blob_sha, + file: file + } + end + + it_behaves_like 'without permission' + + context 'with a valid user' do + before do + group.add_guest(user) + + expect_next_found_instance_of(Group) do |instance| + expect(instance).to receive_message_chain(:dependency_proxy_blobs, :create!) + end + end + + it_behaves_like 'a package tracking event', described_class.name, 'pull_blob' + end + end + def enable_dependency_proxy group.create_dependency_proxy_setting!(enabled: true) end diff --git a/spec/features/groups/dependency_proxy_for_containers_spec.rb b/spec/features/groups/dependency_proxy_for_containers_spec.rb new file mode 100644 index 00000000000..a4cd6d0f503 --- /dev/null +++ b/spec/features/groups/dependency_proxy_for_containers_spec.rb @@ -0,0 +1,108 @@ +# frozen_string_literal: true + +require 'spec_helper' + +RSpec.describe 'Group Dependency Proxy for containers', :js do + include DependencyProxyHelpers + + include_context 'file upload requests helpers' + + let_it_be(:user) { create(:user) } + let_it_be(:group) { create(:group) } + let_it_be(:sha) { 'a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4' } + let_it_be(:content) { fixture_file_upload("spec/fixtures/dependency_proxy/#{sha}.gz").read } + + let(:image) { 'alpine' } + let(:url) { capybara_url("/v2/#{group.full_path}/dependency_proxy/containers/#{image}/blobs/sha256:#{sha}") } + let(:token) { 'token' } + let(:headers) { { 'Authorization' => "Bearer #{build_jwt(user).encoded}" } } + + subject do + HTTParty.get(url, headers: headers) + end + + def run_server(handler) + default_server = Capybara.server + + Capybara.server = Capybara.servers[:puma] + server = Capybara::Server.new(handler) + server.boot + server + ensure + Capybara.server = default_server + end + + let_it_be(:external_server) do + handler = lambda do |env| + if env['REQUEST_PATH'] == '/token' + [200, {}, [{ token: 'token' }.to_json]] + else + [200, {}, [content]] + end + end + + run_server(handler) + end + + before do + stub_application_setting(allow_local_requests_from_web_hooks_and_services: true) + stub_config(dependency_proxy: { enabled: true }) + group.add_developer(user) + + stub_const("DependencyProxy::Registry::AUTH_URL", external_server.base_url) + stub_const("DependencyProxy::Registry::LIBRARY_URL", external_server.base_url) + end + + shared_examples 'responds with the file' do + it 'sends file' do + expect(subject.code).to eq(200) + expect(subject.body).to eq(content) + expect(subject.headers.to_h).to include( + "content-type" => ["application/gzip"], + "content-disposition" => ["attachment; filename=\"#{sha}.gz\"; filename*=UTF-8''#{sha}.gz"], + "content-length" => ["32"] + ) + end + end + + shared_examples 'caches the file' do + it 'caches the file' do + expect { subject }.to change { + group.dependency_proxy_blobs.count + }.from(0).to(1) + + expect(subject.code).to eq(200) + expect(group.dependency_proxy_blobs.first.file.read).to eq(content) + end + end + + context 'fetching a blob' do + context 'when the blob is cached for the group' do + let!(:dependency_proxy_blob) { create(:dependency_proxy_blob, group: group) } + + it_behaves_like 'responds with the file' + + context 'dependency_proxy_workhorse feature flag disabled' do + before do + stub_feature_flags({ dependency_proxy_workhorse: false }) + end + + it_behaves_like 'responds with the file' + end + end + end + + context 'when the blob must be downloaded' do + it_behaves_like 'responds with the file' + it_behaves_like 'caches the file' + + context 'dependency_proxy_workhorse feature flag disabled' do + before do + stub_feature_flags({ dependency_proxy_workhorse: false }) + end + + it_behaves_like 'responds with the file' + it_behaves_like 'caches the file' + end + end +end diff --git a/spec/lib/gitlab/health_checks/probes/collection_spec.rb b/spec/lib/gitlab/health_checks/probes/collection_spec.rb index 401ffee9c28..741c45d953c 100644 --- a/spec/lib/gitlab/health_checks/probes/collection_spec.rb +++ b/spec/lib/gitlab/health_checks/probes/collection_spec.rb @@ -18,6 +18,7 @@ RSpec.describe Gitlab::HealthChecks::Probes::Collection do Gitlab::HealthChecks::Redis::SharedStateCheck, Gitlab::HealthChecks::Redis::TraceChunksCheck, Gitlab::HealthChecks::Redis::RateLimitingCheck, + Gitlab::HealthChecks::Redis::SessionsCheck, Gitlab::HealthChecks::GitalyCheck ] end diff --git a/spec/lib/gitlab/health_checks/redis/sessions_check_spec.rb b/spec/lib/gitlab/health_checks/redis/sessions_check_spec.rb new file mode 100644 index 00000000000..82b3b33ec0a --- /dev/null +++ b/spec/lib/gitlab/health_checks/redis/sessions_check_spec.rb @@ -0,0 +1,8 @@ +# frozen_string_literal: true + +require 'spec_helper' +require_relative '../simple_check_shared' + +RSpec.describe Gitlab::HealthChecks::Redis::SessionsCheck do + include_examples 'simple_check', 'redis_sessions_ping', 'RedisSessions', 'PONG' +end diff --git a/spec/lib/gitlab/instrumentation/redis_spec.rb b/spec/lib/gitlab/instrumentation/redis_spec.rb index 0da44dfb8d8..900a079cdd2 100644 --- a/spec/lib/gitlab/instrumentation/redis_spec.rb +++ b/spec/lib/gitlab/instrumentation/redis_spec.rb @@ -77,7 +77,8 @@ RSpec.describe Gitlab::Instrumentation::Redis do details_row.merge(storage: 'Queues'), details_row.merge(storage: 'SharedState'), details_row.merge(storage: 'TraceChunks'), - details_row.merge(storage: 'RateLimiting')) + details_row.merge(storage: 'RateLimiting'), + details_row.merge(storage: 'Sessions')) end end end diff --git a/spec/lib/gitlab/middleware/multipart/handler_spec.rb b/spec/lib/gitlab/middleware/multipart/handler_spec.rb index aac3f00defe..53b59b042e2 100644 --- a/spec/lib/gitlab/middleware/multipart/handler_spec.rb +++ b/spec/lib/gitlab/middleware/multipart/handler_spec.rb @@ -16,6 +16,7 @@ RSpec.describe Gitlab::Middleware::Multipart::Handler do ::Gitlab.config.uploads.storage_path, ::JobArtifactUploader.workhorse_upload_path, ::LfsObjectUploader.workhorse_upload_path, + ::DependencyProxy::FileUploader.workhorse_upload_path, File.join(Rails.root, 'public/uploads/tmp') ] end diff --git a/spec/lib/gitlab/redis/rate_limiting_spec.rb b/spec/lib/gitlab/redis/rate_limiting_spec.rb index f15aa71a52d..e79c070df93 100644 --- a/spec/lib/gitlab/redis/rate_limiting_spec.rb +++ b/spec/lib/gitlab/redis/rate_limiting_spec.rb @@ -3,53 +3,5 @@ require 'spec_helper' RSpec.describe Gitlab::Redis::RateLimiting do - let(:instance_specific_config_file) { "config/redis.rate_limiting.yml" } - let(:environment_config_file_name) { "GITLAB_REDIS_RATE_LIMITING_CONFIG_FILE" } - let(:cache_config_file) { nil } - - before do - allow(Gitlab::Redis::Cache).to receive(:config_file_name).and_return(cache_config_file) - end - - include_examples "redis_shared_examples" - - describe '.config_file_name' do - subject { described_class.config_file_name } - - let(:rails_root) { Dir.mktmpdir('redis_shared_examples') } - - before do - # Undo top-level stub of config_file_name because we are testing that method now. - allow(described_class).to receive(:config_file_name).and_call_original - - allow(described_class).to receive(:rails_root).and_return(rails_root) - FileUtils.mkdir_p(File.join(rails_root, 'config')) - end - - after do - FileUtils.rm_rf(rails_root) - end - - context 'when there is only a resque.yml' do - before do - FileUtils.touch(File.join(rails_root, 'config/resque.yml')) - end - - it { expect(subject).to eq("#{rails_root}/config/resque.yml") } - - context 'and there is a global env override' do - before do - stub_env('GITLAB_REDIS_CONFIG_FILE', 'global override') - end - - it { expect(subject).to eq('global override') } - - context 'and Cache has a different config file' do - let(:cache_config_file) { 'cache config file' } - - it { expect(subject).to eq('cache config file') } - end - end - end - end + include_examples "redis_new_instance_shared_examples", 'rate_limiting', Gitlab::Redis::Cache end diff --git a/spec/lib/gitlab/redis/sessions_spec.rb b/spec/lib/gitlab/redis/sessions_spec.rb new file mode 100644 index 00000000000..7e239c08e9f --- /dev/null +++ b/spec/lib/gitlab/redis/sessions_spec.rb @@ -0,0 +1,7 @@ +# frozen_string_literal: true + +require 'spec_helper' + +RSpec.describe Gitlab::Redis::Sessions do + include_examples "redis_new_instance_shared_examples", 'sessions', Gitlab::Redis::SharedState +end diff --git a/spec/lib/gitlab/redis/trace_chunks_spec.rb b/spec/lib/gitlab/redis/trace_chunks_spec.rb index e974dc519d6..bb3c3089430 100644 --- a/spec/lib/gitlab/redis/trace_chunks_spec.rb +++ b/spec/lib/gitlab/redis/trace_chunks_spec.rb @@ -3,53 +3,5 @@ require 'spec_helper' RSpec.describe Gitlab::Redis::TraceChunks do - let(:instance_specific_config_file) { "config/redis.trace_chunks.yml" } - let(:environment_config_file_name) { "GITLAB_REDIS_TRACE_CHUNKS_CONFIG_FILE" } - let(:shared_state_config_file) { nil } - - before do - allow(Gitlab::Redis::SharedState).to receive(:config_file_name).and_return(shared_state_config_file) - end - - include_examples "redis_shared_examples" - - describe '.config_file_name' do - subject { described_class.config_file_name } - - let(:rails_root) { Dir.mktmpdir('redis_shared_examples') } - - before do - # Undo top-level stub of config_file_name because we are testing that method now. - allow(described_class).to receive(:config_file_name).and_call_original - - allow(described_class).to receive(:rails_root).and_return(rails_root) - FileUtils.mkdir_p(File.join(rails_root, 'config')) - end - - after do - FileUtils.rm_rf(rails_root) - end - - context 'when there is only a resque.yml' do - before do - FileUtils.touch(File.join(rails_root, 'config/resque.yml')) - end - - it { expect(subject).to eq("#{rails_root}/config/resque.yml") } - - context 'and there is a global env override' do - before do - stub_env('GITLAB_REDIS_CONFIG_FILE', 'global override') - end - - it { expect(subject).to eq('global override') } - - context 'and SharedState has a different config file' do - let(:shared_state_config_file) { 'shared state config file' } - - it { expect(subject).to eq('shared state config file') } - end - end - end - end + include_examples "redis_new_instance_shared_examples", 'trace_chunks', Gitlab::Redis::SharedState end diff --git a/spec/support/redis.rb b/spec/support/redis.rb index 946c8685741..421079af8e0 100644 --- a/spec/support/redis.rb +++ b/spec/support/redis.rb @@ -46,4 +46,12 @@ RSpec.configure do |config| redis_rate_limiting_cleanup! end + + config.around(:each, :clean_gitlab_redis_sessions) do |example| + redis_sessions_cleanup! + + example.run + + redis_sessions_cleanup! + end end diff --git a/spec/support/redis/redis_helpers.rb b/spec/support/redis/redis_helpers.rb index bf52da5d6f2..f27d873eb31 100644 --- a/spec/support/redis/redis_helpers.rb +++ b/spec/support/redis/redis_helpers.rb @@ -27,4 +27,9 @@ module RedisHelpers def redis_rate_limiting_cleanup! Gitlab::Redis::RateLimiting.with(&:flushdb) end + + # Usage: session state + def redis_sessions_cleanup! + Gitlab::Redis::Sessions.with(&:flushdb) + end end diff --git a/spec/support/redis/redis_new_instance_shared_examples.rb b/spec/support/redis/redis_new_instance_shared_examples.rb new file mode 100644 index 00000000000..e9b1e3e4da1 --- /dev/null +++ b/spec/support/redis/redis_new_instance_shared_examples.rb @@ -0,0 +1,55 @@ +# frozen_string_literal: true + +require 'spec_helper' + +RSpec.shared_examples "redis_new_instance_shared_examples" do |name, fallback_class| + let(:instance_specific_config_file) { "config/redis.#{name}.yml" } + let(:environment_config_file_name) { "GITLAB_REDIS_#{name.upcase}_CONFIG_FILE" } + let(:fallback_config_file) { nil } + + before do + allow(fallback_class).to receive(:config_file_name).and_return(fallback_config_file) + end + + include_examples "redis_shared_examples" + + describe '.config_file_name' do + subject { described_class.config_file_name } + + let(:rails_root) { Dir.mktmpdir('redis_shared_examples') } + + before do + # Undo top-level stub of config_file_name because we are testing that method now. + allow(described_class).to receive(:config_file_name).and_call_original + + allow(described_class).to receive(:rails_root).and_return(rails_root) + FileUtils.mkdir_p(File.join(rails_root, 'config')) + end + + after do + FileUtils.rm_rf(rails_root) + end + + context 'when there is only a resque.yml' do + before do + FileUtils.touch(File.join(rails_root, 'config/resque.yml')) + end + + it { expect(subject).to eq("#{rails_root}/config/resque.yml") } + + context 'and there is a global env override' do + before do + stub_env('GITLAB_REDIS_CONFIG_FILE', 'global override') + end + + it { expect(subject).to eq('global override') } + + context "and #{fallback_class.name.demodulize} has a different config file" do + let(:fallback_config_file) { 'fallback config file' } + + it { expect(subject).to eq('fallback config file') } + end + end + end + end +end diff --git a/workhorse/internal/dependencyproxy/dependencyproxy.go b/workhorse/internal/dependencyproxy/dependencyproxy.go new file mode 100644 index 00000000000..cfb3045544f --- /dev/null +++ b/workhorse/internal/dependencyproxy/dependencyproxy.go @@ -0,0 +1,123 @@ +package dependencyproxy + +import ( + "context" + "fmt" + "io" + "net" + "net/http" + "time" + + "gitlab.com/gitlab-org/labkit/correlation" + "gitlab.com/gitlab-org/labkit/log" + "gitlab.com/gitlab-org/labkit/tracing" + + "gitlab.com/gitlab-org/gitlab/workhorse/internal/helper" + "gitlab.com/gitlab-org/gitlab/workhorse/internal/senddata" +) + +// httpTransport defines a http.Transport with values +// that are more restrictive than for http.DefaultTransport, +// they define shorter TLS Handshake, and more aggressive connection closing +// to prevent the connection hanging and reduce FD usage +var httpTransport = tracing.NewRoundTripper(correlation.NewInstrumentedRoundTripper(&http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 10 * time.Second, + }).DialContext, + MaxIdleConns: 2, + IdleConnTimeout: 30 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 10 * time.Second, + ResponseHeaderTimeout: 30 * time.Second, +})) + +var httpClient = &http.Client{ + Transport: httpTransport, +} + +type Injector struct { + senddata.Prefix + uploadHandler http.Handler +} + +type entryParams struct { + Url string + Header http.Header +} + +type nullResponseWriter struct { + header http.Header + status int +} + +func (nullResponseWriter) Write(p []byte) (int, error) { + return len(p), nil +} + +func (w *nullResponseWriter) Header() http.Header { + return w.header +} + +func (w *nullResponseWriter) WriteHeader(status int) { + if w.status == 0 { + w.status = status + } +} + +func NewInjector() *Injector { + return &Injector{Prefix: "send-dependency:"} +} + +func (p *Injector) SetUploadHandler(uploadHandler http.Handler) { + p.uploadHandler = uploadHandler +} + +func (p *Injector) Inject(w http.ResponseWriter, r *http.Request, sendData string) { + dependencyResponse, err := p.fetchUrl(r.Context(), sendData) + if err != nil { + helper.Fail500(w, r, err) + return + } + defer dependencyResponse.Body.Close() + if dependencyResponse.StatusCode >= 400 { + w.WriteHeader(dependencyResponse.StatusCode) + io.Copy(w, dependencyResponse.Body) + return + } + + teeReader := io.TeeReader(dependencyResponse.Body, w) + saveFileRequest, err := http.NewRequestWithContext(r.Context(), "POST", r.URL.String()+"/upload", teeReader) + if err != nil { + helper.Fail500(w, r, fmt.Errorf("dependency proxy: failed to create request: %w", err)) + } + saveFileRequest.Header = helper.HeaderClone(r.Header) + saveFileRequest.ContentLength = dependencyResponse.ContentLength + + w.Header().Del("Content-Length") + + nrw := &nullResponseWriter{header: make(http.Header)} + p.uploadHandler.ServeHTTP(nrw, saveFileRequest) + + if nrw.status != http.StatusOK { + fields := log.Fields{"code": nrw.status} + + helper.Fail500WithFields(nrw, r, fmt.Errorf("dependency proxy: failed to upload file"), fields) + } +} + +func (p *Injector) fetchUrl(ctx context.Context, sendData string) (*http.Response, error) { + var params entryParams + if err := p.Unpack(¶ms, sendData); err != nil { + return nil, fmt.Errorf("dependency proxy: unpack sendData: %v", err) + } + + r, err := http.NewRequestWithContext(ctx, "GET", params.Url, nil) + if err != nil { + return nil, fmt.Errorf("dependency proxy: failed to fetch dependency: %v", err) + } + r.Header = params.Header + + return httpClient.Do(r) +} diff --git a/workhorse/internal/dependencyproxy/dependencyproxy_test.go b/workhorse/internal/dependencyproxy/dependencyproxy_test.go new file mode 100644 index 00000000000..37e54c0b756 --- /dev/null +++ b/workhorse/internal/dependencyproxy/dependencyproxy_test.go @@ -0,0 +1,183 @@ +package dependencyproxy + +import ( + "encoding/base64" + "fmt" + "io" + "net/http" + "net/http/httptest" + "strconv" + "strings" + "testing" + + "github.com/stretchr/testify/require" + + "gitlab.com/gitlab-org/gitlab/workhorse/internal/api" + "gitlab.com/gitlab-org/gitlab/workhorse/internal/testhelper" + "gitlab.com/gitlab-org/gitlab/workhorse/internal/upload" +) + +type fakeUploadHandler struct { + request *http.Request + body []byte + handler func(w http.ResponseWriter, r *http.Request) +} + +func (f *fakeUploadHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + f.request = r + + f.body, _ = io.ReadAll(r.Body) + + f.handler(w, r) +} + +type errWriter struct{ writes int } + +func (w *errWriter) Header() http.Header { return nil } +func (w *errWriter) WriteHeader(h int) {} + +// First call of Write function succeeds while all the subsequent ones fail +func (w *errWriter) Write(p []byte) (int, error) { + if w.writes > 0 { + return 0, fmt.Errorf("client error") + } + + w.writes++ + + return len(p), nil +} + +type fakePreAuthHandler struct{} + +func (f *fakePreAuthHandler) PreAuthorizeHandler(handler api.HandleFunc, _ string) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + handler(w, r, &api.Response{TempPath: "../../testdata/scratch"}) + }) +} + +func TestInject(t *testing.T) { + contentLength := 32768 + 1 + content := strings.Repeat("p", contentLength) + + testCases := []struct { + desc string + responseWriter http.ResponseWriter + contentLength int + handlerMustBeCalled bool + }{ + { + desc: "the uploading successfully finalized", + responseWriter: httptest.NewRecorder(), + contentLength: contentLength, + handlerMustBeCalled: true, + }, { + desc: "a user failed to receive the response", + responseWriter: &errWriter{}, + contentLength: contentLength, + handlerMustBeCalled: false, + }, { + desc: "the origin resource server returns partial response", + responseWriter: httptest.NewRecorder(), + contentLength: contentLength + 1, + handlerMustBeCalled: false, + }, + } + testhelper.ConfigureSecret() + + for _, tc := range testCases { + originResourceServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Length", strconv.Itoa(tc.contentLength)) + w.Write([]byte(content)) + })) + defer originResourceServer.Close() + + // BodyUploader expects http.Handler as its second param, we can create a stub function and verify that + // it's only called for successful requests + handlerIsCalled := false + handlerFunc := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { handlerIsCalled = true }) + + bodyUploader := upload.BodyUploader(&fakePreAuthHandler{}, handlerFunc, &upload.DefaultPreparer{}) + + injector := NewInjector() + injector.SetUploadHandler(bodyUploader) + + r := httptest.NewRequest("GET", "/target", nil) + sendData := base64.StdEncoding.EncodeToString([]byte(`{"Token": "token", "Url": "` + originResourceServer.URL + `/url"}`)) + + injector.Inject(tc.responseWriter, r, sendData) + + require.Equal(t, tc.handlerMustBeCalled, handlerIsCalled, "a partial file must not be saved") + } +} + +func TestSuccessfullRequest(t *testing.T) { + content := []byte("result") + originResourceServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Length", strconv.Itoa(len(content))) + w.Write(content) + })) + + uploadHandler := &fakeUploadHandler{ + handler: func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(200) + }, + } + + injector := NewInjector() + injector.SetUploadHandler(uploadHandler) + + response := makeRequest(injector, `{"Token": "token", "Url": "`+originResourceServer.URL+`/url"}`) + + require.Equal(t, "/target/upload", uploadHandler.request.URL.Path) + require.Equal(t, int64(6), uploadHandler.request.ContentLength) + + require.Equal(t, content, uploadHandler.body) + + require.Equal(t, 200, response.Code) + require.Equal(t, string(content), response.Body.String()) +} + +func TestIncorrectSendData(t *testing.T) { + response := makeRequest(NewInjector(), "") + + require.Equal(t, 500, response.Code) + require.Equal(t, "Internal server error\n", response.Body.String()) +} + +func TestIncorrectSendDataUrl(t *testing.T) { + response := makeRequest(NewInjector(), `{"Token": "token", "Url": "url"}`) + + require.Equal(t, 500, response.Code) + require.Equal(t, "Internal server error\n", response.Body.String()) +} + +func TestFailedOriginServer(t *testing.T) { + originResourceServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(404) + w.Write([]byte("Not found")) + })) + + uploadHandler := &fakeUploadHandler{ + handler: func(w http.ResponseWriter, r *http.Request) { + require.FailNow(t, "the error response must not be uploaded") + }, + } + + injector := NewInjector() + injector.SetUploadHandler(uploadHandler) + + response := makeRequest(injector, `{"Token": "token", "Url": "`+originResourceServer.URL+`/url"}`) + + require.Equal(t, 404, response.Code) + require.Equal(t, "Not found", response.Body.String()) +} + +func makeRequest(injector *Injector, data string) *httptest.ResponseRecorder { + w := httptest.NewRecorder() + r := httptest.NewRequest("GET", "/target", nil) + + sendData := base64.StdEncoding.EncodeToString([]byte(data)) + injector.Inject(w, r, sendData) + + return w +} diff --git a/workhorse/internal/upstream/routes.go b/workhorse/internal/upstream/routes.go index 07bbd57421e..d39ba845dc5 100644 --- a/workhorse/internal/upstream/routes.go +++ b/workhorse/internal/upstream/routes.go @@ -16,6 +16,7 @@ import ( "gitlab.com/gitlab-org/gitlab/workhorse/internal/builds" "gitlab.com/gitlab-org/gitlab/workhorse/internal/channel" "gitlab.com/gitlab-org/gitlab/workhorse/internal/config" + "gitlab.com/gitlab-org/gitlab/workhorse/internal/dependencyproxy" "gitlab.com/gitlab-org/gitlab/workhorse/internal/git" "gitlab.com/gitlab-org/gitlab/workhorse/internal/helper" "gitlab.com/gitlab-org/gitlab/workhorse/internal/imageresizer" @@ -171,7 +172,7 @@ func (ro *routeEntry) isMatch(cleanedPath string, req *http.Request) bool { return ok } -func buildProxy(backend *url.URL, version string, rt http.RoundTripper, cfg config.Config) http.Handler { +func buildProxy(backend *url.URL, version string, rt http.RoundTripper, cfg config.Config, dependencyProxyInjector *dependencyproxy.Injector) http.Handler { proxier := proxypkg.NewProxy(backend, version, rt) return senddata.SendData( @@ -184,6 +185,7 @@ func buildProxy(backend *url.URL, version string, rt http.RoundTripper, cfg conf artifacts.SendEntry, sendurl.SendURL, imageresizer.NewResizer(cfg), + dependencyProxyInjector, ) } @@ -194,7 +196,8 @@ func buildProxy(backend *url.URL, version string, rt http.RoundTripper, cfg conf func configureRoutes(u *upstream) { api := u.APIClient static := &staticpages.Static{DocumentRoot: u.DocumentRoot, Exclude: staticExclude} - proxy := buildProxy(u.Backend, u.Version, u.RoundTripper, u.Config) + dependencyProxyInjector := dependencyproxy.NewInjector() + proxy := buildProxy(u.Backend, u.Version, u.RoundTripper, u.Config, dependencyProxyInjector) cableProxy := proxypkg.NewProxy(u.CableBackend, u.Version, u.CableRoundTripper) assetsNotFoundHandler := NotFoundUnless(u.DevelopmentMode, proxy) @@ -208,7 +211,7 @@ func configureRoutes(u *upstream) { } signingTripper := secret.NewRoundTripper(u.RoundTripper, u.Version) - signingProxy := buildProxy(u.Backend, u.Version, signingTripper, u.Config) + signingProxy := buildProxy(u.Backend, u.Version, signingTripper, u.Config, dependencyProxyInjector) preparers := createUploadPreparers(u.Config) uploadPath := path.Join(u.DocumentRoot, "uploads/tmp") @@ -216,6 +219,8 @@ func configureRoutes(u *upstream) { ciAPIProxyQueue := queueing.QueueRequests("ci_api_job_requests", uploadAccelerateProxy, u.APILimit, u.APIQueueLimit, u.APIQueueTimeout) ciAPILongPolling := builds.RegisterHandler(ciAPIProxyQueue, redis.WatchKey, u.APICILongPollingDuration) + dependencyProxyInjector.SetUploadHandler(upload.BodyUploader(api, signingProxy, preparers.packages)) + // Serve static files or forward the requests defaultUpstream := static.ServeExisting( u.URLPrefix, diff --git a/workhorse/main_test.go b/workhorse/main_test.go index 435e1e0e15d..349e2d78109 100644 --- a/workhorse/main_test.go +++ b/workhorse/main_test.go @@ -950,3 +950,72 @@ func TestHealthChecksUnreachable(t *testing.T) { }) } } + +func TestDependencyProxyInjector(t *testing.T) { + token := "token" + bodyLength := 4096 + expectedBody := strings.Repeat("p", bodyLength) + + testCases := []struct { + desc string + finalizeStatus int + }{ + { + desc: "user downloads the file when the request is successfully finalized", + finalizeStatus: 200, + }, { + desc: "user downloads the file even when the request fails to be finalized", + finalizeStatus: 500, + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + originResource := "/origin_resource" + + originResourceServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + require.Equal(t, originResource, r.URL.String()) + + w.Header().Set("Content-Length", strconv.Itoa(bodyLength)) + + io.WriteString(w, expectedBody) + })) + defer originResourceServer.Close() + + originResourceUrl := originResourceServer.URL + originResource + + ts := testhelper.TestServerWithHandler(regexp.MustCompile(`.`), func(w http.ResponseWriter, r *http.Request) { + switch r.URL.String() { + case "/base": + params := `{"Url": "` + originResourceUrl + `", "Token": "` + token + `"}` + w.Header().Set("Gitlab-Workhorse-Send-Data", `send-dependency:`+base64.URLEncoding.EncodeToString([]byte(params))) + case "/base/upload/authorize": + w.Header().Set("Content-Type", api.ResponseContentType) + _, err := fmt.Fprintf(w, `{"TempPath":"%s"}`, scratchDir) + require.NoError(t, err) + case "/base/upload": + w.WriteHeader(tc.finalizeStatus) + default: + t.Fatalf("unexpected request: %s", r.URL) + } + }) + defer ts.Close() + + ws := startWorkhorseServer(ts.URL) + defer ws.Close() + + resp, err := http.DefaultClient.Get(ws.URL + "/base") + require.NoError(t, err) + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) + + require.NoError(t, resp.Body.Close()) // Client closes connection + ws.Close() // Wait for server handler to return + + require.Equal(t, 200, resp.StatusCode, "status code") + require.Equal(t, expectedBody, string(body), "response body") + }) + } +}