gitlab-org--gitlab-foss/app/controllers/projects/pipelines_controller.rb

297 lines
8.3 KiB
Ruby
Raw Normal View History

# frozen_string_literal: true
2016-04-12 13:57:22 -04:00
class Projects::PipelinesController < Projects::ApplicationController
include ::Gitlab::Utils::StrongMemoize
include Analytics::UniqueVisitsHelper
before_action :whitelist_query_limiting, only: [:create, :retry]
before_action :pipeline, except: [:index, :new, :create, :charts]
before_action :set_pipeline_path, only: [:show]
2016-04-12 10:16:39 -04:00
before_action :authorize_read_pipeline!
before_action :authorize_read_build!, only: [:index]
2016-04-12 10:16:39 -04:00
before_action :authorize_create_pipeline!, only: [:new, :create]
before_action :authorize_update_pipeline!, only: [:retry, :cancel]
before_action do
push_frontend_feature_flag(:filter_pipelines_search, project, default_enabled: true)
push_frontend_feature_flag(:dag_pipeline_tab, project, default_enabled: true)
push_frontend_feature_flag(:pipelines_security_report_summary, project)
push_frontend_feature_flag(:new_pipeline_form)
end
before_action :ensure_pipeline, only: [:show]
2016-04-12 10:16:39 -04:00
# Will be removed with https://gitlab.com/gitlab-org/gitlab/-/issues/225596
before_action :redirect_for_legacy_scope_filter, only: [:index], if: -> { request.format.html? }
around_action :allow_gitaly_ref_name_caching, only: [:index, :show]
track_unique_visits :charts, target_id: 'p_analytics_pipelines'
wrap_parameters Ci::Pipeline
2017-05-06 12:45:46 -04:00
POLLING_INTERVAL = 10_000
2016-04-12 10:16:39 -04:00
def index
@pipelines = Ci::PipelinesFinder
.new(project, current_user, index_params)
.execute
.page(params[:page])
.per(30)
2016-08-29 12:02:08 -04:00
Limit the number of pipelines to count When displaying the project pipelines dashboard we display a few tabs for different pipeline states. For every such tab we count the number of pipelines that belong to it. For large projects such as GitLab CE this means having to count over 80 000 rows, which can easily take between 70 and 100 milliseconds per query. To improve this we apply a technique we already use for search results: we limit the number of rows to count. The current limit is 1000, which means that if more than 1000 rows are present for a state we will show "1000+" instead of the exact number. The SQL queries used for this perform much better than a regular COUNT, even when a project has a lot of pipelines. Prior to these changes we would end up running a query like this: SELECT COUNT(*) FROM ci_pipelines WHERE project_id = 13083 AND status IN ('success', 'failed', 'canceled') This would produce a plan along the lines of the following: Aggregate (cost=3147.55..3147.56 rows=1 width=8) (actual time=501.413..501.413 rows=1 loops=1) Buffers: shared hit=17116 read=861 dirtied=2 -> Index Only Scan using index_ci_pipelines_on_project_id_and_ref_and_status_and_id on ci_pipelines (cost=0.56..2984.14 rows=65364 width=0) (actual time=0.095..490.263 rows=80388 loops=1) Index Cond: (project_id = 13083) Filter: ((status)::text = ANY ('{success,failed,canceled}'::text[])) Rows Removed by Filter: 2894 Heap Fetches: 353 Buffers: shared hit=17116 read=861 dirtied=2 Planning time: 1.409 ms Execution time: 501.519 ms Using the LIMIT count technique we instead run the following query: SELECT COUNT(*) FROM ( SELECT 1 FROM ci_pipelines WHERE project_id = 13083 AND status IN ('success', 'failed', 'canceled') LIMIT 1001 ) for_count This query produces the following plan: Aggregate (cost=58.77..58.78 rows=1 width=8) (actual time=1.726..1.727 rows=1 loops=1) Buffers: shared hit=169 read=15 -> Limit (cost=0.56..46.25 rows=1001 width=4) (actual time=0.164..1.570 rows=1001 loops=1) Buffers: shared hit=169 read=15 -> Index Only Scan using index_ci_pipelines_on_project_id_and_ref_and_status_and_id on ci_pipelines (cost=0.56..2984.14 rows=65364 width=4) (actual time=0.162..1.426 rows=1001 loops=1) Index Cond: (project_id = 13083) Filter: ((status)::text = ANY ('{success,failed,canceled}'::text[])) Rows Removed by Filter: 9 Heap Fetches: 10 Buffers: shared hit=169 read=15 Planning time: 1.832 ms Execution time: 1.821 ms While this query still uses a Filter for the "status" field the number of rows that it may end up filtering (at most 1001) is small enough that an additional index does not appear to be necessary at this time. See https://gitlab.com/gitlab-org/gitlab-ce/issues/43132#note_68659234 for more information.
2018-04-17 09:13:38 -04:00
@pipelines_count = limited_pipelines_count(project)
respond_to do |format|
format.html
format.json do
2017-05-06 12:45:46 -04:00
Gitlab::PollingInterval.set_header(response, interval: POLLING_INTERVAL)
2017-04-05 10:35:29 -04:00
render json: {
pipelines: serialize_pipelines,
count: {
all: @pipelines_count
}
}
end
end
2016-04-12 10:16:39 -04:00
end
def new
2018-12-05 09:39:15 -05:00
@pipeline = project.all_pipelines.new(ref: @project.default_branch)
2016-04-12 10:16:39 -04:00
end
def create
@pipeline = Ci::CreatePipelineService
.new(project, current_user, create_params)
2017-05-24 09:13:51 -04:00
.execute(:web, ignore_skip_ci: true, save_on_errors: false)
2017-05-06 12:45:46 -04:00
respond_to do |format|
format.html do
if @pipeline.created_successfully?
redirect_to project_pipeline_path(project, @pipeline)
else
render 'new', status: :bad_request
end
end
format.json do
if @pipeline.created_successfully?
render json: PipelineSerializer
.new(project: project, current_user: current_user)
.represent(@pipeline),
status: :created
else
render json: @pipeline.errors, status: :bad_request
end
end
2016-04-12 10:16:39 -04:00
end
end
def show
Gitlab::QueryLimiting.whitelist('https://gitlab.com/gitlab-org/gitlab/-/issues/26657')
2017-05-06 12:45:46 -04:00
respond_to do |format|
format.html
format.json do
Gitlab::PollingInterval.set_header(response, interval: POLLING_INTERVAL)
render json: PipelineSerializer
2017-05-09 00:15:34 -04:00
.new(project: @project, current_user: @current_user)
.represent(@pipeline, show_represent_params)
2017-05-06 12:45:46 -04:00
end
end
2016-04-12 10:16:39 -04:00
end
def destroy
::Ci::DestroyPipelineService.new(project, current_user).execute(pipeline)
redirect_to project_pipelines_path(project), status: :see_other
end
2016-11-23 09:44:05 -05:00
def builds
render_show
end
def dag
respond_to do |format|
format.html { render_show }
format.json do
render json: Ci::DagPipelineSerializer
.new(project: @project, current_user: @current_user)
.represent(@pipeline)
end
end
end
def failures
if @pipeline.failed_builds.present?
render_show
else
redirect_to pipeline_path(@pipeline)
end
2016-11-23 09:44:05 -05:00
end
def status
render json: PipelineSerializer
2017-05-09 00:15:34 -04:00
.new(project: @project, current_user: @current_user)
2017-03-10 12:44:41 -05:00
.represent_status(@pipeline)
end
def stage
@stage = pipeline.legacy_stage(params[:stage])
return not_found unless @stage
2018-04-23 09:58:59 -04:00
render json: StageSerializer
.new(project: @project, current_user: @current_user)
.represent(@stage, details: true, retried: params[:retried])
end
# TODO: This endpoint is used by mini-pipeline-graph
# TODO: This endpoint should be migrated to `stage.json`
def stage_ajax
@stage = pipeline.legacy_stage(params[:stage])
return not_found unless @stage
2018-05-02 08:15:24 -04:00
render json: { html: view_to_html_string('projects/pipelines/_stage') }
end
2016-04-12 10:16:39 -04:00
def retry
pipeline.retry_failed(current_user)
2016-04-12 10:16:39 -04:00
respond_to do |format|
format.html do
redirect_back_or_default default: project_pipelines_path(project)
end
format.json { head :no_content }
end
2016-04-12 10:16:39 -04:00
end
def cancel
2016-05-09 19:26:13 -04:00
pipeline.cancel_running
2016-04-12 10:16:39 -04:00
respond_to do |format|
format.html do
redirect_back_or_default default: project_pipelines_path(project)
end
format.json { head :no_content }
end
2016-04-12 10:16:39 -04:00
end
def charts
@charts = {}
@charts[:week] = Gitlab::Ci::Charts::WeekChart.new(project)
@charts[:month] = Gitlab::Ci::Charts::MonthChart.new(project)
@charts[:year] = Gitlab::Ci::Charts::YearChart.new(project)
@charts[:pipeline_times] = Gitlab::Ci::Charts::PipelineTime.new(project)
@counts = {}
2018-12-05 09:39:15 -05:00
@counts[:total] = @project.all_pipelines.count(:all)
@counts[:success] = @project.all_pipelines.success.count(:all)
@counts[:failed] = @project.all_pipelines.failed.count(:all)
end
def test_report
respond_to do |format|
format.html do
render 'show'
end
format.json do
render json: TestReportSerializer
.new(current_user: @current_user)
.represent(pipeline_test_report, project: project, details: true)
end
end
end
2016-04-12 10:16:39 -04:00
private
def serialize_pipelines
PipelineSerializer
.new(project: @project, current_user: @current_user)
.with_pagination(request, response)
.represent(@pipelines, disable_coverage: true, preload: true)
end
def render_show
respond_to do |format|
format.html do
render 'show'
end
end
end
def show_represent_params
{ grouped: true, expanded: params[:expanded].to_a.map(&:to_i) }
end
2016-05-09 19:26:13 -04:00
def create_params
params.require(:pipeline).permit(:ref, variables_attributes: %i[key variable_type secret_value])
2016-05-09 19:26:13 -04:00
end
def ensure_pipeline
render_404 unless pipeline
end
def redirect_for_legacy_scope_filter
return unless %w[running pending].include?(params[:scope])
redirect_to url_for(safe_params.except(:scope).merge(status: safe_params[:scope])), status: :moved_permanently
end
# rubocop: disable CodeReuse/ActiveRecord
2016-04-13 07:01:08 -04:00
def pipeline
@pipeline ||= if params[:id].blank? && params[:latest]
latest_pipeline
else
project
.all_pipelines
2019-08-23 08:49:52 -04:00
.includes(builds: :tags, user: :status)
.find_by!(id: params[:id])
.present(current_user: current_user)
end
2016-04-12 10:16:39 -04:00
end
# rubocop: enable CodeReuse/ActiveRecord
2016-04-13 11:05:17 -04:00
def set_pipeline_path
@pipeline_path ||= if params[:id].blank? && params[:latest]
latest_project_pipelines_path(@project, params['ref'])
else
project_pipeline_path(@project, @pipeline)
end
end
def latest_pipeline
@project.latest_pipeline_for_ref(params['ref'])
&.present(current_user: current_user)
end
def whitelist_query_limiting
# Also see https://gitlab.com/gitlab-org/gitlab-foss/issues/42343
Gitlab::QueryLimiting.whitelist('https://gitlab.com/gitlab-org/gitlab-foss/issues/42339')
end
def authorize_update_pipeline!
return access_denied! unless can?(current_user, :update_pipeline, @pipeline)
end
Limit the number of pipelines to count When displaying the project pipelines dashboard we display a few tabs for different pipeline states. For every such tab we count the number of pipelines that belong to it. For large projects such as GitLab CE this means having to count over 80 000 rows, which can easily take between 70 and 100 milliseconds per query. To improve this we apply a technique we already use for search results: we limit the number of rows to count. The current limit is 1000, which means that if more than 1000 rows are present for a state we will show "1000+" instead of the exact number. The SQL queries used for this perform much better than a regular COUNT, even when a project has a lot of pipelines. Prior to these changes we would end up running a query like this: SELECT COUNT(*) FROM ci_pipelines WHERE project_id = 13083 AND status IN ('success', 'failed', 'canceled') This would produce a plan along the lines of the following: Aggregate (cost=3147.55..3147.56 rows=1 width=8) (actual time=501.413..501.413 rows=1 loops=1) Buffers: shared hit=17116 read=861 dirtied=2 -> Index Only Scan using index_ci_pipelines_on_project_id_and_ref_and_status_and_id on ci_pipelines (cost=0.56..2984.14 rows=65364 width=0) (actual time=0.095..490.263 rows=80388 loops=1) Index Cond: (project_id = 13083) Filter: ((status)::text = ANY ('{success,failed,canceled}'::text[])) Rows Removed by Filter: 2894 Heap Fetches: 353 Buffers: shared hit=17116 read=861 dirtied=2 Planning time: 1.409 ms Execution time: 501.519 ms Using the LIMIT count technique we instead run the following query: SELECT COUNT(*) FROM ( SELECT 1 FROM ci_pipelines WHERE project_id = 13083 AND status IN ('success', 'failed', 'canceled') LIMIT 1001 ) for_count This query produces the following plan: Aggregate (cost=58.77..58.78 rows=1 width=8) (actual time=1.726..1.727 rows=1 loops=1) Buffers: shared hit=169 read=15 -> Limit (cost=0.56..46.25 rows=1001 width=4) (actual time=0.164..1.570 rows=1001 loops=1) Buffers: shared hit=169 read=15 -> Index Only Scan using index_ci_pipelines_on_project_id_and_ref_and_status_and_id on ci_pipelines (cost=0.56..2984.14 rows=65364 width=4) (actual time=0.162..1.426 rows=1001 loops=1) Index Cond: (project_id = 13083) Filter: ((status)::text = ANY ('{success,failed,canceled}'::text[])) Rows Removed by Filter: 9 Heap Fetches: 10 Buffers: shared hit=169 read=15 Planning time: 1.832 ms Execution time: 1.821 ms While this query still uses a Filter for the "status" field the number of rows that it may end up filtering (at most 1001) is small enough that an additional index does not appear to be necessary at this time. See https://gitlab.com/gitlab-org/gitlab-ce/issues/43132#note_68659234 for more information.
2018-04-17 09:13:38 -04:00
def limited_pipelines_count(project, scope = nil)
finder = Ci::PipelinesFinder.new(project, current_user, index_params.merge(scope: scope))
Limit the number of pipelines to count When displaying the project pipelines dashboard we display a few tabs for different pipeline states. For every such tab we count the number of pipelines that belong to it. For large projects such as GitLab CE this means having to count over 80 000 rows, which can easily take between 70 and 100 milliseconds per query. To improve this we apply a technique we already use for search results: we limit the number of rows to count. The current limit is 1000, which means that if more than 1000 rows are present for a state we will show "1000+" instead of the exact number. The SQL queries used for this perform much better than a regular COUNT, even when a project has a lot of pipelines. Prior to these changes we would end up running a query like this: SELECT COUNT(*) FROM ci_pipelines WHERE project_id = 13083 AND status IN ('success', 'failed', 'canceled') This would produce a plan along the lines of the following: Aggregate (cost=3147.55..3147.56 rows=1 width=8) (actual time=501.413..501.413 rows=1 loops=1) Buffers: shared hit=17116 read=861 dirtied=2 -> Index Only Scan using index_ci_pipelines_on_project_id_and_ref_and_status_and_id on ci_pipelines (cost=0.56..2984.14 rows=65364 width=0) (actual time=0.095..490.263 rows=80388 loops=1) Index Cond: (project_id = 13083) Filter: ((status)::text = ANY ('{success,failed,canceled}'::text[])) Rows Removed by Filter: 2894 Heap Fetches: 353 Buffers: shared hit=17116 read=861 dirtied=2 Planning time: 1.409 ms Execution time: 501.519 ms Using the LIMIT count technique we instead run the following query: SELECT COUNT(*) FROM ( SELECT 1 FROM ci_pipelines WHERE project_id = 13083 AND status IN ('success', 'failed', 'canceled') LIMIT 1001 ) for_count This query produces the following plan: Aggregate (cost=58.77..58.78 rows=1 width=8) (actual time=1.726..1.727 rows=1 loops=1) Buffers: shared hit=169 read=15 -> Limit (cost=0.56..46.25 rows=1001 width=4) (actual time=0.164..1.570 rows=1001 loops=1) Buffers: shared hit=169 read=15 -> Index Only Scan using index_ci_pipelines_on_project_id_and_ref_and_status_and_id on ci_pipelines (cost=0.56..2984.14 rows=65364 width=4) (actual time=0.162..1.426 rows=1001 loops=1) Index Cond: (project_id = 13083) Filter: ((status)::text = ANY ('{success,failed,canceled}'::text[])) Rows Removed by Filter: 9 Heap Fetches: 10 Buffers: shared hit=169 read=15 Planning time: 1.832 ms Execution time: 1.821 ms While this query still uses a Filter for the "status" field the number of rows that it may end up filtering (at most 1001) is small enough that an additional index does not appear to be necessary at this time. See https://gitlab.com/gitlab-org/gitlab-ce/issues/43132#note_68659234 for more information.
2018-04-17 09:13:38 -04:00
view_context.limited_counter_with_delimiter(finder.execute)
end
def pipeline_test_report
strong_memoize(:pipeline_test_report) do
@pipeline.test_reports.tap do |reports|
reports.with_attachment! if params[:scope] == 'with_attachment'
end
end
end
def index_params
params.permit(:scope, :username, :ref, :status)
end
2016-04-12 10:16:39 -04:00
end
Projects::PipelinesController.prepend_if_ee('EE::Projects::PipelinesController')