gitlab-org--gitlab-foss/app/controllers/projects/pipelines_controller.rb

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

390 lines
12 KiB
Ruby
Raw Normal View History

# frozen_string_literal: true
2016-04-12 17:57:22 +00:00
class Projects::PipelinesController < Projects::ApplicationController
include ::Gitlab::Utils::StrongMemoize
include RedisTracking
include ProductAnalyticsTracking
include ProjectStatsRefreshConflictsGuard
include ZuoraCSP
urgency :low, [
:index, :new, :builds, :show, :failures, :create,
:stage, :retry, :dag, :cancel, :test_report,
:charts, :config_variables, :destroy, :status
]
before_action :disable_query_limiting, only: [:create, :retry]
before_action :pipeline, except: [:index, :new, :create, :charts, :config_variables]
before_action :set_pipeline_path, only: [:show]
2016-04-12 14:16:39 +00:00
before_action :authorize_read_pipeline!
before_action :authorize_read_build!, only: [:index, :show]
before_action :authorize_read_ci_cd_analytics!, only: [:charts]
before_action :authorize_create_pipeline!, only: [:new, :create, :config_variables]
2016-04-12 14:16:39 +00:00
before_action :authorize_update_pipeline!, only: [:retry, :cancel]
before_action :ensure_pipeline, only: [:show, :downloadable_artifacts]
before_action :reject_if_build_artifacts_size_refreshing!, only: [:destroy]
before_action do
push_frontend_feature_flag(:pipeline_tabs_vue, @project)
push_frontend_feature_flag(:run_pipeline_graphql, @project)
end
# Will be removed with https://gitlab.com/gitlab-org/gitlab/-/issues/225596
before_action :redirect_for_legacy_scope_filter, only: [:index], if: -> { request.format.html? }
around_action :allow_gitaly_ref_name_caching, only: [:index, :show]
track_custom_event :charts,
name: 'p_analytics_pipelines',
action: 'perform_analytics_usage_action',
label: 'redis_hll_counters.analytics.analytics_total_unique_counts_monthly',
destinations: %i[redis_hll snowplow]
track_redis_hll_event :charts, name: 'p_analytics_ci_cd_pipelines', if: -> { should_track_ci_cd_pipelines? }
track_redis_hll_event :charts, name: 'p_analytics_ci_cd_deployment_frequency', if: -> { should_track_ci_cd_deployment_frequency? }
track_redis_hll_event :charts, name: 'p_analytics_ci_cd_lead_time', if: -> { should_track_ci_cd_lead_time? }
track_redis_hll_event :charts, name: 'p_analytics_ci_cd_time_to_restore_service', if: -> { should_track_ci_cd_time_to_restore_service? }
track_redis_hll_event :charts, name: 'p_analytics_ci_cd_change_failure_rate', if: -> { should_track_ci_cd_change_failure_rate? }
wrap_parameters Ci::Pipeline
2017-05-06 16:45:46 +00:00
POLLING_INTERVAL = 10_000
feature_category :continuous_integration, [
:charts, :show, :config_variables, :stage, :cancel, :retry,
:builds, :dag, :failures, :status,
:index, :create, :new, :destroy
]
feature_category :code_testing, [:test_report]
feature_category :build_artifacts, [:downloadable_artifacts]
2016-04-12 14:16:39 +00:00
def index
@pipelines = Ci::PipelinesFinder
.new(project, current_user, index_params)
.execute
.page(params[:page])
2016-08-29 16:02:08 +00:00
Limit the number of pipelines to count When displaying the project pipelines dashboard we display a few tabs for different pipeline states. For every such tab we count the number of pipelines that belong to it. For large projects such as GitLab CE this means having to count over 80 000 rows, which can easily take between 70 and 100 milliseconds per query. To improve this we apply a technique we already use for search results: we limit the number of rows to count. The current limit is 1000, which means that if more than 1000 rows are present for a state we will show "1000+" instead of the exact number. The SQL queries used for this perform much better than a regular COUNT, even when a project has a lot of pipelines. Prior to these changes we would end up running a query like this: SELECT COUNT(*) FROM ci_pipelines WHERE project_id = 13083 AND status IN ('success', 'failed', 'canceled') This would produce a plan along the lines of the following: Aggregate (cost=3147.55..3147.56 rows=1 width=8) (actual time=501.413..501.413 rows=1 loops=1) Buffers: shared hit=17116 read=861 dirtied=2 -> Index Only Scan using index_ci_pipelines_on_project_id_and_ref_and_status_and_id on ci_pipelines (cost=0.56..2984.14 rows=65364 width=0) (actual time=0.095..490.263 rows=80388 loops=1) Index Cond: (project_id = 13083) Filter: ((status)::text = ANY ('{success,failed,canceled}'::text[])) Rows Removed by Filter: 2894 Heap Fetches: 353 Buffers: shared hit=17116 read=861 dirtied=2 Planning time: 1.409 ms Execution time: 501.519 ms Using the LIMIT count technique we instead run the following query: SELECT COUNT(*) FROM ( SELECT 1 FROM ci_pipelines WHERE project_id = 13083 AND status IN ('success', 'failed', 'canceled') LIMIT 1001 ) for_count This query produces the following plan: Aggregate (cost=58.77..58.78 rows=1 width=8) (actual time=1.726..1.727 rows=1 loops=1) Buffers: shared hit=169 read=15 -> Limit (cost=0.56..46.25 rows=1001 width=4) (actual time=0.164..1.570 rows=1001 loops=1) Buffers: shared hit=169 read=15 -> Index Only Scan using index_ci_pipelines_on_project_id_and_ref_and_status_and_id on ci_pipelines (cost=0.56..2984.14 rows=65364 width=4) (actual time=0.162..1.426 rows=1001 loops=1) Index Cond: (project_id = 13083) Filter: ((status)::text = ANY ('{success,failed,canceled}'::text[])) Rows Removed by Filter: 9 Heap Fetches: 10 Buffers: shared hit=169 read=15 Planning time: 1.832 ms Execution time: 1.821 ms While this query still uses a Filter for the "status" field the number of rows that it may end up filtering (at most 1001) is small enough that an additional index does not appear to be necessary at this time. See https://gitlab.com/gitlab-org/gitlab-ce/issues/43132#note_68659234 for more information.
2018-04-17 13:13:38 +00:00
@pipelines_count = limited_pipelines_count(project)
respond_to do |format|
format.html do
enable_runners_availability_section_experiment
end
format.json do
2017-05-06 16:45:46 +00:00
Gitlab::PollingInterval.set_header(response, interval: POLLING_INTERVAL)
2017-04-05 14:35:29 +00:00
render json: {
pipelines: serialize_pipelines,
count: {
all: @pipelines_count
}
}
end
end
2016-04-12 14:16:39 +00:00
end
def new
2018-12-05 14:39:15 +00:00
@pipeline = project.all_pipelines.new(ref: @project.default_branch)
2016-04-12 14:16:39 +00:00
end
def create
service_response = Ci::CreatePipelineService
.new(project, current_user, create_params)
2017-05-24 13:13:51 +00:00
.execute(:web, ignore_skip_ci: true, save_on_errors: false)
2017-05-06 16:45:46 +00:00
@pipeline = service_response.payload
respond_to do |format|
format.html do
if service_response.success?
redirect_to project_pipeline_path(project, @pipeline)
else
render 'new', status: :bad_request
end
end
format.json do
if service_response.success?
render json: PipelineSerializer
.new(project: project, current_user: current_user)
.represent(@pipeline),
status: :created
else
render json: { errors: @pipeline.error_messages.map(&:content),
warnings: @pipeline.warning_messages(limit: ::Gitlab::Ci::Warnings::MAX_LIMIT).map(&:content),
total_warnings: @pipeline.warning_messages.length },
status: :bad_request
end
end
2016-04-12 14:16:39 +00:00
end
end
def show
Gitlab::QueryLimiting.disable!('https://gitlab.com/gitlab-org/gitlab/-/issues/26657')
2017-05-06 16:45:46 +00:00
respond_to do |format|
format.html { render_show }
2017-05-06 16:45:46 +00:00
format.json do
Gitlab::PollingInterval.set_header(response, interval: POLLING_INTERVAL)
render json: PipelineSerializer
2017-05-09 04:15:34 +00:00
.new(project: @project, current_user: @current_user)
.represent(@pipeline, show_represent_params)
2017-05-06 16:45:46 +00:00
end
end
2016-04-12 14:16:39 +00:00
end
def destroy
::Ci::DestroyPipelineService.new(project, current_user).execute(pipeline)
redirect_to project_pipelines_path(project), status: :see_other
end
2016-11-23 14:44:05 +00:00
def builds
if Feature.enabled?(:pipeline_tabs_vue, project)
redirect_to pipeline_path(@pipeline, tab: 'builds')
else
render_show
end
end
def dag
respond_to do |format|
format.html do
if Feature.enabled?(:pipeline_tabs_vue, project)
redirect_to pipeline_path(@pipeline, tab: 'dag')
else
render_show
end
end
format.json do
render json: Ci::DagPipelineSerializer
.new(project: @project, current_user: @current_user)
.represent(@pipeline)
end
end
end
def failures
if Feature.enabled?(:pipeline_tabs_vue, project)
redirect_to pipeline_path(@pipeline, tab: 'failures')
elsif @pipeline.failed_builds.present?
render_show
else
redirect_to pipeline_path(@pipeline)
end
2016-11-23 14:44:05 +00:00
end
def status
render json: PipelineSerializer
2017-05-09 04:15:34 +00:00
.new(project: @project, current_user: @current_user)
2017-03-10 17:44:41 +00:00
.represent_status(@pipeline)
end
def stage
@stage = pipeline.stage(params[:stage])
return not_found unless @stage
2018-04-23 13:58:59 +00:00
render json: StageSerializer
.new(project: @project, current_user: @current_user)
.represent(@stage, details: true, retried: params[:retried])
end
2016-04-12 14:16:39 +00:00
def retry
# Check for access before execution to allow for async execution while still returning access results
access_response = ::Ci::RetryPipelineService.new(@project, current_user).check_access(pipeline)
if access_response.error?
response = { json: { errors: [access_response.message] }, status: access_response.http_status }
else
response = { json: {}, status: :no_content }
::Ci::RetryPipelineWorker.perform_async(pipeline.id, current_user.id) # rubocop:disable CodeReuse/Worker
end
2016-04-12 14:16:39 +00:00
respond_to do |format|
format.json do
render response
end
end
2016-04-12 14:16:39 +00:00
end
def cancel
2016-05-09 23:26:13 +00:00
pipeline.cancel_running
2016-04-12 14:16:39 +00:00
respond_to do |format|
format.html do
redirect_back_or_default default: project_pipelines_path(project)
end
format.json { head :no_content }
end
2016-04-12 14:16:39 +00:00
end
def test_report
respond_to do |format|
format.html do
if Feature.enabled?(:pipeline_tabs_vue, project)
redirect_to pipeline_path(@pipeline, tab: 'test_report')
else
render_show
end
end
format.json do
render json: TestReportSerializer
.new(current_user: @current_user)
.represent(pipeline_test_report, project: project, details: true)
end
end
end
def config_variables
respond_to do |format|
format.json do
project = @project.uses_external_project_ci_config? ? @project.ci_config_external_project : @project
result = Ci::ListConfigVariablesService.new(project, current_user).execute(params[:sha])
result.nil? ? head(:no_content) : render(json: result)
end
end
end
def downloadable_artifacts
render json: Ci::DownloadableArtifactSerializer.new(
project: project,
current_user: current_user
).represent(@pipeline)
end
2016-04-12 14:16:39 +00:00
private
def serialize_pipelines
PipelineSerializer
.new(project: @project, current_user: @current_user)
.with_pagination(request, response)
.represent(@pipelines, disable_coverage: true, preload: true)
end
def render_show
@stages = @pipeline.stages
respond_to do |format|
format.html do
render 'show'
end
end
end
def show_represent_params
{ grouped: true, expanded: params[:expanded].to_a.map(&:to_i) }
end
2016-05-09 23:26:13 +00:00
def create_params
params.require(:pipeline).permit(:ref, variables_attributes: %i[key variable_type secret_value])
2016-05-09 23:26:13 +00:00
end
def ensure_pipeline
render_404 unless pipeline
end
def redirect_for_legacy_scope_filter
return unless %w[running pending].include?(params[:scope])
redirect_to url_for(safe_params.except(:scope).merge(status: safe_params[:scope])), status: :moved_permanently
end
# rubocop: disable CodeReuse/ActiveRecord
2016-04-13 11:01:08 +00:00
def pipeline
@pipeline ||= if params[:id].blank? && params[:latest]
latest_pipeline
else
project
.all_pipelines
2019-08-23 12:49:52 +00:00
.includes(builds: :tags, user: :status)
.find(params[:id])
.present(current_user: current_user)
end
2016-04-12 14:16:39 +00:00
end
# rubocop: enable CodeReuse/ActiveRecord
2016-04-13 15:05:17 +00:00
def set_pipeline_path
@pipeline_path ||= if params[:id].blank? && params[:latest]
latest_project_pipelines_path(@project, params['ref'])
else
project_pipeline_path(@project, @pipeline)
end
end
def latest_pipeline
@project.latest_pipeline(params['ref'])
&.present(current_user: current_user)
end
def disable_query_limiting
# Also see https://gitlab.com/gitlab-org/gitlab/-/issues/20785
Gitlab::QueryLimiting.disable!('https://gitlab.com/gitlab-org/gitlab/-/issues/20784')
end
def authorize_update_pipeline!
return access_denied! unless can?(current_user, :update_pipeline, @pipeline)
end
Limit the number of pipelines to count When displaying the project pipelines dashboard we display a few tabs for different pipeline states. For every such tab we count the number of pipelines that belong to it. For large projects such as GitLab CE this means having to count over 80 000 rows, which can easily take between 70 and 100 milliseconds per query. To improve this we apply a technique we already use for search results: we limit the number of rows to count. The current limit is 1000, which means that if more than 1000 rows are present for a state we will show "1000+" instead of the exact number. The SQL queries used for this perform much better than a regular COUNT, even when a project has a lot of pipelines. Prior to these changes we would end up running a query like this: SELECT COUNT(*) FROM ci_pipelines WHERE project_id = 13083 AND status IN ('success', 'failed', 'canceled') This would produce a plan along the lines of the following: Aggregate (cost=3147.55..3147.56 rows=1 width=8) (actual time=501.413..501.413 rows=1 loops=1) Buffers: shared hit=17116 read=861 dirtied=2 -> Index Only Scan using index_ci_pipelines_on_project_id_and_ref_and_status_and_id on ci_pipelines (cost=0.56..2984.14 rows=65364 width=0) (actual time=0.095..490.263 rows=80388 loops=1) Index Cond: (project_id = 13083) Filter: ((status)::text = ANY ('{success,failed,canceled}'::text[])) Rows Removed by Filter: 2894 Heap Fetches: 353 Buffers: shared hit=17116 read=861 dirtied=2 Planning time: 1.409 ms Execution time: 501.519 ms Using the LIMIT count technique we instead run the following query: SELECT COUNT(*) FROM ( SELECT 1 FROM ci_pipelines WHERE project_id = 13083 AND status IN ('success', 'failed', 'canceled') LIMIT 1001 ) for_count This query produces the following plan: Aggregate (cost=58.77..58.78 rows=1 width=8) (actual time=1.726..1.727 rows=1 loops=1) Buffers: shared hit=169 read=15 -> Limit (cost=0.56..46.25 rows=1001 width=4) (actual time=0.164..1.570 rows=1001 loops=1) Buffers: shared hit=169 read=15 -> Index Only Scan using index_ci_pipelines_on_project_id_and_ref_and_status_and_id on ci_pipelines (cost=0.56..2984.14 rows=65364 width=4) (actual time=0.162..1.426 rows=1001 loops=1) Index Cond: (project_id = 13083) Filter: ((status)::text = ANY ('{success,failed,canceled}'::text[])) Rows Removed by Filter: 9 Heap Fetches: 10 Buffers: shared hit=169 read=15 Planning time: 1.832 ms Execution time: 1.821 ms While this query still uses a Filter for the "status" field the number of rows that it may end up filtering (at most 1001) is small enough that an additional index does not appear to be necessary at this time. See https://gitlab.com/gitlab-org/gitlab-ce/issues/43132#note_68659234 for more information.
2018-04-17 13:13:38 +00:00
def limited_pipelines_count(project, scope = nil)
finder = Ci::PipelinesFinder.new(project, current_user, index_params.merge(scope: scope))
Limit the number of pipelines to count When displaying the project pipelines dashboard we display a few tabs for different pipeline states. For every such tab we count the number of pipelines that belong to it. For large projects such as GitLab CE this means having to count over 80 000 rows, which can easily take between 70 and 100 milliseconds per query. To improve this we apply a technique we already use for search results: we limit the number of rows to count. The current limit is 1000, which means that if more than 1000 rows are present for a state we will show "1000+" instead of the exact number. The SQL queries used for this perform much better than a regular COUNT, even when a project has a lot of pipelines. Prior to these changes we would end up running a query like this: SELECT COUNT(*) FROM ci_pipelines WHERE project_id = 13083 AND status IN ('success', 'failed', 'canceled') This would produce a plan along the lines of the following: Aggregate (cost=3147.55..3147.56 rows=1 width=8) (actual time=501.413..501.413 rows=1 loops=1) Buffers: shared hit=17116 read=861 dirtied=2 -> Index Only Scan using index_ci_pipelines_on_project_id_and_ref_and_status_and_id on ci_pipelines (cost=0.56..2984.14 rows=65364 width=0) (actual time=0.095..490.263 rows=80388 loops=1) Index Cond: (project_id = 13083) Filter: ((status)::text = ANY ('{success,failed,canceled}'::text[])) Rows Removed by Filter: 2894 Heap Fetches: 353 Buffers: shared hit=17116 read=861 dirtied=2 Planning time: 1.409 ms Execution time: 501.519 ms Using the LIMIT count technique we instead run the following query: SELECT COUNT(*) FROM ( SELECT 1 FROM ci_pipelines WHERE project_id = 13083 AND status IN ('success', 'failed', 'canceled') LIMIT 1001 ) for_count This query produces the following plan: Aggregate (cost=58.77..58.78 rows=1 width=8) (actual time=1.726..1.727 rows=1 loops=1) Buffers: shared hit=169 read=15 -> Limit (cost=0.56..46.25 rows=1001 width=4) (actual time=0.164..1.570 rows=1001 loops=1) Buffers: shared hit=169 read=15 -> Index Only Scan using index_ci_pipelines_on_project_id_and_ref_and_status_and_id on ci_pipelines (cost=0.56..2984.14 rows=65364 width=4) (actual time=0.162..1.426 rows=1001 loops=1) Index Cond: (project_id = 13083) Filter: ((status)::text = ANY ('{success,failed,canceled}'::text[])) Rows Removed by Filter: 9 Heap Fetches: 10 Buffers: shared hit=169 read=15 Planning time: 1.832 ms Execution time: 1.821 ms While this query still uses a Filter for the "status" field the number of rows that it may end up filtering (at most 1001) is small enough that an additional index does not appear to be necessary at this time. See https://gitlab.com/gitlab-org/gitlab-ce/issues/43132#note_68659234 for more information.
2018-04-17 13:13:38 +00:00
view_context.limited_counter_with_delimiter(finder.execute)
end
def pipeline_test_report
strong_memoize(:pipeline_test_report) do
@pipeline.test_reports.tap do |reports|
reports.with_attachment! if params[:scope] == 'with_attachment'
end
end
end
def index_params
params.permit(:scope, :username, :ref, :status, :source)
end
def enable_runners_availability_section_experiment
return unless current_user
return unless can?(current_user, :create_pipeline, project)
return if @pipelines_count.to_i > 0
return if helpers.has_gitlab_ci?(project)
experiment(:runners_availability_section, namespace: project.root_ancestor) do |e|
e.candidate {}
e.publish_to_database
end
end
def should_track_ci_cd_pipelines?
params[:chart].blank? || params[:chart] == 'pipelines'
end
def should_track_ci_cd_deployment_frequency?
params[:chart] == 'deployment-frequency'
end
def should_track_ci_cd_lead_time?
params[:chart] == 'lead-time'
end
def should_track_ci_cd_time_to_restore_service?
params[:chart] == 'time-to-restore-service'
end
def should_track_ci_cd_change_failure_rate?
params[:chart] == 'change-failure-rate'
end
def tracking_namespace_source
project.namespace
end
def tracking_project_source
project
end
2016-04-12 14:16:39 +00:00
end
Projects::PipelinesController.prepend_mod_with('Projects::PipelinesController')