2018-09-25 23:45:43 -04:00
|
|
|
# frozen_string_literal: true
|
|
|
|
|
2016-04-12 13:57:22 -04:00
|
|
|
class Projects::PipelinesController < Projects::ApplicationController
|
2019-10-08 05:06:09 -04:00
|
|
|
include ::Gitlab::Utils::StrongMemoize
|
2020-06-22 11:09:27 -04:00
|
|
|
include Analytics::UniqueVisitsHelper
|
2019-10-08 05:06:09 -04:00
|
|
|
|
2018-05-22 08:38:25 -04:00
|
|
|
before_action :whitelist_query_limiting, only: [:create, :retry]
|
2017-03-02 12:57:01 -05:00
|
|
|
before_action :pipeline, except: [:index, :new, :create, :charts]
|
2018-07-27 00:26:13 -04:00
|
|
|
before_action :set_pipeline_path, only: [:show]
|
2016-04-12 10:16:39 -04:00
|
|
|
before_action :authorize_read_pipeline!
|
2019-01-28 07:12:30 -05:00
|
|
|
before_action :authorize_read_build!, only: [:index]
|
2016-04-12 10:16:39 -04:00
|
|
|
before_action :authorize_create_pipeline!, only: [:new, :create]
|
|
|
|
before_action :authorize_update_pipeline!, only: [:retry, :cancel]
|
2019-09-16 20:06:11 -04:00
|
|
|
before_action do
|
2020-06-15 08:08:44 -04:00
|
|
|
push_frontend_feature_flag(:filter_pipelines_search, project, default_enabled: true)
|
2020-07-15 14:09:09 -04:00
|
|
|
push_frontend_feature_flag(:dag_pipeline_tab, project, default_enabled: true)
|
2020-05-20 20:08:06 -04:00
|
|
|
push_frontend_feature_flag(:pipelines_security_report_summary, project)
|
2020-07-22 20:09:43 -04:00
|
|
|
push_frontend_feature_flag(:new_pipeline_form)
|
2019-09-16 20:06:11 -04:00
|
|
|
end
|
2020-03-31 17:08:05 -04:00
|
|
|
before_action :ensure_pipeline, only: [:show]
|
2016-04-12 10:16:39 -04:00
|
|
|
|
2020-07-06 20:08:58 -04:00
|
|
|
# Will be removed with https://gitlab.com/gitlab-org/gitlab/-/issues/225596
|
|
|
|
before_action :redirect_for_legacy_scope_filter, only: [:index], if: -> { request.format.html? }
|
|
|
|
|
2019-04-04 16:22:11 -04:00
|
|
|
around_action :allow_gitaly_ref_name_caching, only: [:index, :show]
|
|
|
|
|
2020-06-22 11:09:27 -04:00
|
|
|
track_unique_visits :charts, target_id: 'p_analytics_pipelines'
|
|
|
|
|
2017-04-24 10:34:53 -04:00
|
|
|
wrap_parameters Ci::Pipeline
|
|
|
|
|
2017-05-06 12:45:46 -04:00
|
|
|
POLLING_INTERVAL = 10_000
|
|
|
|
|
2016-04-12 10:16:39 -04:00
|
|
|
def index
|
2020-03-13 20:09:30 -04:00
|
|
|
@pipelines = Ci::PipelinesFinder
|
2020-05-06 20:11:11 -04:00
|
|
|
.new(project, current_user, index_params)
|
2017-02-21 03:20:50 -05:00
|
|
|
.execute
|
2016-12-15 07:41:46 -05:00
|
|
|
.page(params[:page])
|
|
|
|
.per(30)
|
2016-08-29 12:02:08 -04:00
|
|
|
|
Limit the number of pipelines to count
When displaying the project pipelines dashboard we display a few tabs
for different pipeline states. For every such tab we count the number of
pipelines that belong to it. For large projects such as GitLab CE this
means having to count over 80 000 rows, which can easily take between 70
and 100 milliseconds per query.
To improve this we apply a technique we already use for search results:
we limit the number of rows to count. The current limit is 1000, which
means that if more than 1000 rows are present for a state we will show
"1000+" instead of the exact number. The SQL queries used for this
perform much better than a regular COUNT, even when a project has a lot
of pipelines.
Prior to these changes we would end up running a query like this:
SELECT COUNT(*)
FROM ci_pipelines
WHERE project_id = 13083
AND status IN ('success', 'failed', 'canceled')
This would produce a plan along the lines of the following:
Aggregate (cost=3147.55..3147.56 rows=1 width=8) (actual time=501.413..501.413 rows=1 loops=1)
Buffers: shared hit=17116 read=861 dirtied=2
-> Index Only Scan using index_ci_pipelines_on_project_id_and_ref_and_status_and_id on ci_pipelines (cost=0.56..2984.14 rows=65364 width=0) (actual time=0.095..490.263 rows=80388 loops=1)
Index Cond: (project_id = 13083)
Filter: ((status)::text = ANY ('{success,failed,canceled}'::text[]))
Rows Removed by Filter: 2894
Heap Fetches: 353
Buffers: shared hit=17116 read=861 dirtied=2
Planning time: 1.409 ms
Execution time: 501.519 ms
Using the LIMIT count technique we instead run the following query:
SELECT COUNT(*)
FROM (
SELECT 1
FROM ci_pipelines
WHERE project_id = 13083
AND status IN ('success', 'failed', 'canceled')
LIMIT 1001
) for_count
This query produces the following plan:
Aggregate (cost=58.77..58.78 rows=1 width=8) (actual time=1.726..1.727 rows=1 loops=1)
Buffers: shared hit=169 read=15
-> Limit (cost=0.56..46.25 rows=1001 width=4) (actual time=0.164..1.570 rows=1001 loops=1)
Buffers: shared hit=169 read=15
-> Index Only Scan using index_ci_pipelines_on_project_id_and_ref_and_status_and_id on ci_pipelines (cost=0.56..2984.14 rows=65364 width=4) (actual time=0.162..1.426 rows=1001 loops=1)
Index Cond: (project_id = 13083)
Filter: ((status)::text = ANY ('{success,failed,canceled}'::text[]))
Rows Removed by Filter: 9
Heap Fetches: 10
Buffers: shared hit=169 read=15
Planning time: 1.832 ms
Execution time: 1.821 ms
While this query still uses a Filter for the "status" field the number
of rows that it may end up filtering (at most 1001) is small enough that
an additional index does not appear to be necessary at this time.
See https://gitlab.com/gitlab-org/gitlab-ce/issues/43132#note_68659234
for more information.
2018-04-17 09:13:38 -04:00
|
|
|
@pipelines_count = limited_pipelines_count(project)
|
2016-11-10 09:32:23 -05:00
|
|
|
|
|
|
|
respond_to do |format|
|
|
|
|
format.html
|
|
|
|
format.json do
|
2017-05-06 12:45:46 -04:00
|
|
|
Gitlab::PollingInterval.set_header(response, interval: POLLING_INTERVAL)
|
2017-04-05 10:35:29 -04:00
|
|
|
|
2016-12-15 07:41:46 -05:00
|
|
|
render json: {
|
2019-03-29 12:23:51 -04:00
|
|
|
pipelines: serialize_pipelines,
|
2016-12-15 07:41:46 -05:00
|
|
|
count: {
|
2020-07-06 20:08:58 -04:00
|
|
|
all: @pipelines_count
|
2016-12-15 07:41:46 -05:00
|
|
|
}
|
|
|
|
}
|
2016-11-10 09:32:23 -05:00
|
|
|
end
|
|
|
|
end
|
2016-04-12 10:16:39 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
def new
|
2018-12-05 09:39:15 -05:00
|
|
|
@pipeline = project.all_pipelines.new(ref: @project.default_branch)
|
2016-04-12 10:16:39 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
def create
|
2016-11-07 07:33:04 -05:00
|
|
|
@pipeline = Ci::CreatePipelineService
|
|
|
|
.new(project, current_user, create_params)
|
2017-05-24 09:13:51 -04:00
|
|
|
.execute(:web, ignore_skip_ci: true, save_on_errors: false)
|
2017-05-06 12:45:46 -04:00
|
|
|
|
2020-08-21 05:10:08 -04:00
|
|
|
respond_to do |format|
|
|
|
|
format.html do
|
|
|
|
if @pipeline.created_successfully?
|
|
|
|
redirect_to project_pipeline_path(project, @pipeline)
|
|
|
|
else
|
|
|
|
render 'new', status: :bad_request
|
|
|
|
end
|
|
|
|
end
|
|
|
|
format.json do
|
|
|
|
if @pipeline.created_successfully?
|
|
|
|
render json: PipelineSerializer
|
|
|
|
.new(project: project, current_user: current_user)
|
|
|
|
.represent(@pipeline),
|
|
|
|
status: :created
|
|
|
|
else
|
|
|
|
render json: @pipeline.errors, status: :bad_request
|
|
|
|
end
|
|
|
|
end
|
2016-04-12 10:16:39 -04:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
def show
|
2020-04-28 08:09:44 -04:00
|
|
|
Gitlab::QueryLimiting.whitelist('https://gitlab.com/gitlab-org/gitlab/-/issues/26657')
|
|
|
|
|
2017-05-06 12:45:46 -04:00
|
|
|
respond_to do |format|
|
|
|
|
format.html
|
|
|
|
format.json do
|
|
|
|
Gitlab::PollingInterval.set_header(response, interval: POLLING_INTERVAL)
|
|
|
|
|
|
|
|
render json: PipelineSerializer
|
2017-05-09 00:15:34 -04:00
|
|
|
.new(project: @project, current_user: @current_user)
|
2019-01-30 06:09:55 -05:00
|
|
|
.represent(@pipeline, show_represent_params)
|
2017-05-06 12:45:46 -04:00
|
|
|
end
|
|
|
|
end
|
2016-04-12 10:16:39 -04:00
|
|
|
end
|
|
|
|
|
2020-01-08 07:07:59 -05:00
|
|
|
def destroy
|
|
|
|
::Ci::DestroyPipelineService.new(project, current_user).execute(pipeline)
|
|
|
|
|
|
|
|
redirect_to project_pipelines_path(project), status: :see_other
|
|
|
|
end
|
|
|
|
|
2016-11-23 09:44:05 -05:00
|
|
|
def builds
|
2017-04-15 21:33:01 -04:00
|
|
|
render_show
|
|
|
|
end
|
|
|
|
|
2020-05-05 17:09:42 -04:00
|
|
|
def dag
|
2020-05-21 02:08:25 -04:00
|
|
|
respond_to do |format|
|
|
|
|
format.html { render_show }
|
|
|
|
format.json do
|
|
|
|
render json: Ci::DagPipelineSerializer
|
|
|
|
.new(project: @project, current_user: @current_user)
|
|
|
|
.represent(@pipeline)
|
|
|
|
end
|
|
|
|
end
|
2020-05-05 17:09:42 -04:00
|
|
|
end
|
|
|
|
|
2017-04-15 21:33:01 -04:00
|
|
|
def failures
|
2018-04-21 18:30:37 -04:00
|
|
|
if @pipeline.failed_builds.present?
|
2017-05-04 12:50:09 -04:00
|
|
|
render_show
|
|
|
|
else
|
|
|
|
redirect_to pipeline_path(@pipeline)
|
2016-11-24 06:28:48 -05:00
|
|
|
end
|
2016-11-23 09:44:05 -05:00
|
|
|
end
|
|
|
|
|
2017-03-03 01:59:25 -05:00
|
|
|
def status
|
2017-02-27 14:17:21 -05:00
|
|
|
render json: PipelineSerializer
|
2017-05-09 00:15:34 -04:00
|
|
|
.new(project: @project, current_user: @current_user)
|
2017-03-10 12:44:41 -05:00
|
|
|
.represent_status(@pipeline)
|
2017-02-27 14:17:21 -05:00
|
|
|
end
|
|
|
|
|
2016-12-19 07:20:17 -05:00
|
|
|
def stage
|
2017-06-01 05:55:18 -04:00
|
|
|
@stage = pipeline.legacy_stage(params[:stage])
|
2016-12-19 07:20:17 -05:00
|
|
|
return not_found unless @stage
|
|
|
|
|
2018-04-23 09:58:59 -04:00
|
|
|
render json: StageSerializer
|
|
|
|
.new(project: @project, current_user: @current_user)
|
2018-09-06 06:21:58 -04:00
|
|
|
.represent(@stage, details: true, retried: params[:retried])
|
2016-12-19 07:20:17 -05:00
|
|
|
end
|
|
|
|
|
2018-05-01 17:56:33 -04:00
|
|
|
# TODO: This endpoint is used by mini-pipeline-graph
|
|
|
|
# TODO: This endpoint should be migrated to `stage.json`
|
|
|
|
def stage_ajax
|
|
|
|
@stage = pipeline.legacy_stage(params[:stage])
|
|
|
|
return not_found unless @stage
|
|
|
|
|
2018-05-02 08:15:24 -04:00
|
|
|
render json: { html: view_to_html_string('projects/pipelines/_stage') }
|
2016-12-19 07:20:17 -05:00
|
|
|
end
|
|
|
|
|
2016-04-12 10:16:39 -04:00
|
|
|
def retry
|
2016-06-14 07:04:10 -04:00
|
|
|
pipeline.retry_failed(current_user)
|
2016-04-12 10:16:39 -04:00
|
|
|
|
2017-04-10 06:30:06 -04:00
|
|
|
respond_to do |format|
|
|
|
|
format.html do
|
2017-06-29 13:06:35 -04:00
|
|
|
redirect_back_or_default default: project_pipelines_path(project)
|
2017-04-10 06:30:06 -04:00
|
|
|
end
|
|
|
|
|
2017-04-12 03:44:16 -04:00
|
|
|
format.json { head :no_content }
|
2017-04-10 06:30:06 -04:00
|
|
|
end
|
2016-04-12 10:16:39 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
def cancel
|
2016-05-09 19:26:13 -04:00
|
|
|
pipeline.cancel_running
|
2016-04-12 10:16:39 -04:00
|
|
|
|
2017-04-10 06:30:06 -04:00
|
|
|
respond_to do |format|
|
|
|
|
format.html do
|
2017-06-29 13:06:35 -04:00
|
|
|
redirect_back_or_default default: project_pipelines_path(project)
|
2017-04-10 06:30:06 -04:00
|
|
|
end
|
|
|
|
|
2017-04-12 03:44:16 -04:00
|
|
|
format.json { head :no_content }
|
2017-04-10 06:30:06 -04:00
|
|
|
end
|
2016-04-12 10:16:39 -04:00
|
|
|
end
|
|
|
|
|
2017-03-02 12:57:01 -05:00
|
|
|
def charts
|
|
|
|
@charts = {}
|
2017-09-06 07:11:00 -04:00
|
|
|
@charts[:week] = Gitlab::Ci::Charts::WeekChart.new(project)
|
|
|
|
@charts[:month] = Gitlab::Ci::Charts::MonthChart.new(project)
|
|
|
|
@charts[:year] = Gitlab::Ci::Charts::YearChart.new(project)
|
|
|
|
@charts[:pipeline_times] = Gitlab::Ci::Charts::PipelineTime.new(project)
|
2017-06-22 08:37:59 -04:00
|
|
|
|
|
|
|
@counts = {}
|
2018-12-05 09:39:15 -05:00
|
|
|
@counts[:total] = @project.all_pipelines.count(:all)
|
|
|
|
@counts[:success] = @project.all_pipelines.success.count(:all)
|
|
|
|
@counts[:failed] = @project.all_pipelines.failed.count(:all)
|
2017-03-02 12:57:01 -05:00
|
|
|
end
|
|
|
|
|
2019-10-08 05:06:09 -04:00
|
|
|
def test_report
|
2019-10-25 17:06:20 -04:00
|
|
|
respond_to do |format|
|
|
|
|
format.html do
|
|
|
|
render 'show'
|
|
|
|
end
|
2019-10-08 05:06:09 -04:00
|
|
|
|
2019-10-25 17:06:20 -04:00
|
|
|
format.json do
|
2020-04-21 11:21:10 -04:00
|
|
|
render json: TestReportSerializer
|
|
|
|
.new(current_user: @current_user)
|
2020-06-19 14:08:39 -04:00
|
|
|
.represent(pipeline_test_report, project: project, details: true)
|
2019-10-25 17:06:20 -04:00
|
|
|
end
|
|
|
|
end
|
2019-10-08 05:06:09 -04:00
|
|
|
end
|
|
|
|
|
2016-04-12 10:16:39 -04:00
|
|
|
private
|
|
|
|
|
2019-03-29 12:23:51 -04:00
|
|
|
def serialize_pipelines
|
2019-04-04 16:22:11 -04:00
|
|
|
PipelineSerializer
|
|
|
|
.new(project: @project, current_user: @current_user)
|
|
|
|
.with_pagination(request, response)
|
|
|
|
.represent(@pipelines, disable_coverage: true, preload: true)
|
2019-03-29 12:23:51 -04:00
|
|
|
end
|
|
|
|
|
2017-04-15 21:33:01 -04:00
|
|
|
def render_show
|
|
|
|
respond_to do |format|
|
|
|
|
format.html do
|
|
|
|
render 'show'
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2019-01-30 06:09:55 -05:00
|
|
|
def show_represent_params
|
2019-10-16 05:07:51 -04:00
|
|
|
{ grouped: true, expanded: params[:expanded].to_a.map(&:to_i) }
|
2019-01-30 06:09:55 -05:00
|
|
|
end
|
|
|
|
|
2016-05-09 19:26:13 -04:00
|
|
|
def create_params
|
2019-05-06 09:11:42 -04:00
|
|
|
params.require(:pipeline).permit(:ref, variables_attributes: %i[key variable_type secret_value])
|
2016-05-09 19:26:13 -04:00
|
|
|
end
|
|
|
|
|
2020-03-31 17:08:05 -04:00
|
|
|
def ensure_pipeline
|
|
|
|
render_404 unless pipeline
|
|
|
|
end
|
|
|
|
|
2020-07-06 20:08:58 -04:00
|
|
|
def redirect_for_legacy_scope_filter
|
|
|
|
return unless %w[running pending].include?(params[:scope])
|
|
|
|
|
|
|
|
redirect_to url_for(safe_params.except(:scope).merge(status: safe_params[:scope])), status: :moved_permanently
|
|
|
|
end
|
|
|
|
|
2018-08-27 11:31:01 -04:00
|
|
|
# rubocop: disable CodeReuse/ActiveRecord
|
2016-04-13 07:01:08 -04:00
|
|
|
def pipeline
|
2018-07-27 00:26:13 -04:00
|
|
|
@pipeline ||= if params[:id].blank? && params[:latest]
|
|
|
|
latest_pipeline
|
|
|
|
else
|
|
|
|
project
|
|
|
|
.all_pipelines
|
2019-08-23 08:49:52 -04:00
|
|
|
.includes(builds: :tags, user: :status)
|
2018-07-27 00:26:13 -04:00
|
|
|
.find_by!(id: params[:id])
|
|
|
|
.present(current_user: current_user)
|
|
|
|
end
|
2016-04-12 10:16:39 -04:00
|
|
|
end
|
2018-08-27 11:31:01 -04:00
|
|
|
# rubocop: enable CodeReuse/ActiveRecord
|
2016-04-13 11:05:17 -04:00
|
|
|
|
2018-07-27 00:26:13 -04:00
|
|
|
def set_pipeline_path
|
|
|
|
@pipeline_path ||= if params[:id].blank? && params[:latest]
|
|
|
|
latest_project_pipelines_path(@project, params['ref'])
|
|
|
|
else
|
|
|
|
project_pipeline_path(@project, @pipeline)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
def latest_pipeline
|
2019-09-17 08:06:48 -04:00
|
|
|
@project.latest_pipeline_for_ref(params['ref'])
|
2018-07-27 00:26:13 -04:00
|
|
|
&.present(current_user: current_user)
|
|
|
|
end
|
|
|
|
|
2018-01-15 10:21:04 -05:00
|
|
|
def whitelist_query_limiting
|
2019-09-18 10:02:45 -04:00
|
|
|
# Also see https://gitlab.com/gitlab-org/gitlab-foss/issues/42343
|
|
|
|
Gitlab::QueryLimiting.whitelist('https://gitlab.com/gitlab-org/gitlab-foss/issues/42339')
|
2018-01-15 10:21:04 -05:00
|
|
|
end
|
2018-05-15 04:18:22 -04:00
|
|
|
|
|
|
|
def authorize_update_pipeline!
|
|
|
|
return access_denied! unless can?(current_user, :update_pipeline, @pipeline)
|
|
|
|
end
|
Limit the number of pipelines to count
When displaying the project pipelines dashboard we display a few tabs
for different pipeline states. For every such tab we count the number of
pipelines that belong to it. For large projects such as GitLab CE this
means having to count over 80 000 rows, which can easily take between 70
and 100 milliseconds per query.
To improve this we apply a technique we already use for search results:
we limit the number of rows to count. The current limit is 1000, which
means that if more than 1000 rows are present for a state we will show
"1000+" instead of the exact number. The SQL queries used for this
perform much better than a regular COUNT, even when a project has a lot
of pipelines.
Prior to these changes we would end up running a query like this:
SELECT COUNT(*)
FROM ci_pipelines
WHERE project_id = 13083
AND status IN ('success', 'failed', 'canceled')
This would produce a plan along the lines of the following:
Aggregate (cost=3147.55..3147.56 rows=1 width=8) (actual time=501.413..501.413 rows=1 loops=1)
Buffers: shared hit=17116 read=861 dirtied=2
-> Index Only Scan using index_ci_pipelines_on_project_id_and_ref_and_status_and_id on ci_pipelines (cost=0.56..2984.14 rows=65364 width=0) (actual time=0.095..490.263 rows=80388 loops=1)
Index Cond: (project_id = 13083)
Filter: ((status)::text = ANY ('{success,failed,canceled}'::text[]))
Rows Removed by Filter: 2894
Heap Fetches: 353
Buffers: shared hit=17116 read=861 dirtied=2
Planning time: 1.409 ms
Execution time: 501.519 ms
Using the LIMIT count technique we instead run the following query:
SELECT COUNT(*)
FROM (
SELECT 1
FROM ci_pipelines
WHERE project_id = 13083
AND status IN ('success', 'failed', 'canceled')
LIMIT 1001
) for_count
This query produces the following plan:
Aggregate (cost=58.77..58.78 rows=1 width=8) (actual time=1.726..1.727 rows=1 loops=1)
Buffers: shared hit=169 read=15
-> Limit (cost=0.56..46.25 rows=1001 width=4) (actual time=0.164..1.570 rows=1001 loops=1)
Buffers: shared hit=169 read=15
-> Index Only Scan using index_ci_pipelines_on_project_id_and_ref_and_status_and_id on ci_pipelines (cost=0.56..2984.14 rows=65364 width=4) (actual time=0.162..1.426 rows=1001 loops=1)
Index Cond: (project_id = 13083)
Filter: ((status)::text = ANY ('{success,failed,canceled}'::text[]))
Rows Removed by Filter: 9
Heap Fetches: 10
Buffers: shared hit=169 read=15
Planning time: 1.832 ms
Execution time: 1.821 ms
While this query still uses a Filter for the "status" field the number
of rows that it may end up filtering (at most 1001) is small enough that
an additional index does not appear to be necessary at this time.
See https://gitlab.com/gitlab-org/gitlab-ce/issues/43132#note_68659234
for more information.
2018-04-17 09:13:38 -04:00
|
|
|
|
|
|
|
def limited_pipelines_count(project, scope = nil)
|
2020-05-06 20:11:11 -04:00
|
|
|
finder = Ci::PipelinesFinder.new(project, current_user, index_params.merge(scope: scope))
|
Limit the number of pipelines to count
When displaying the project pipelines dashboard we display a few tabs
for different pipeline states. For every such tab we count the number of
pipelines that belong to it. For large projects such as GitLab CE this
means having to count over 80 000 rows, which can easily take between 70
and 100 milliseconds per query.
To improve this we apply a technique we already use for search results:
we limit the number of rows to count. The current limit is 1000, which
means that if more than 1000 rows are present for a state we will show
"1000+" instead of the exact number. The SQL queries used for this
perform much better than a regular COUNT, even when a project has a lot
of pipelines.
Prior to these changes we would end up running a query like this:
SELECT COUNT(*)
FROM ci_pipelines
WHERE project_id = 13083
AND status IN ('success', 'failed', 'canceled')
This would produce a plan along the lines of the following:
Aggregate (cost=3147.55..3147.56 rows=1 width=8) (actual time=501.413..501.413 rows=1 loops=1)
Buffers: shared hit=17116 read=861 dirtied=2
-> Index Only Scan using index_ci_pipelines_on_project_id_and_ref_and_status_and_id on ci_pipelines (cost=0.56..2984.14 rows=65364 width=0) (actual time=0.095..490.263 rows=80388 loops=1)
Index Cond: (project_id = 13083)
Filter: ((status)::text = ANY ('{success,failed,canceled}'::text[]))
Rows Removed by Filter: 2894
Heap Fetches: 353
Buffers: shared hit=17116 read=861 dirtied=2
Planning time: 1.409 ms
Execution time: 501.519 ms
Using the LIMIT count technique we instead run the following query:
SELECT COUNT(*)
FROM (
SELECT 1
FROM ci_pipelines
WHERE project_id = 13083
AND status IN ('success', 'failed', 'canceled')
LIMIT 1001
) for_count
This query produces the following plan:
Aggregate (cost=58.77..58.78 rows=1 width=8) (actual time=1.726..1.727 rows=1 loops=1)
Buffers: shared hit=169 read=15
-> Limit (cost=0.56..46.25 rows=1001 width=4) (actual time=0.164..1.570 rows=1001 loops=1)
Buffers: shared hit=169 read=15
-> Index Only Scan using index_ci_pipelines_on_project_id_and_ref_and_status_and_id on ci_pipelines (cost=0.56..2984.14 rows=65364 width=4) (actual time=0.162..1.426 rows=1001 loops=1)
Index Cond: (project_id = 13083)
Filter: ((status)::text = ANY ('{success,failed,canceled}'::text[]))
Rows Removed by Filter: 9
Heap Fetches: 10
Buffers: shared hit=169 read=15
Planning time: 1.832 ms
Execution time: 1.821 ms
While this query still uses a Filter for the "status" field the number
of rows that it may end up filtering (at most 1001) is small enough that
an additional index does not appear to be necessary at this time.
See https://gitlab.com/gitlab-org/gitlab-ce/issues/43132#note_68659234
for more information.
2018-04-17 09:13:38 -04:00
|
|
|
|
|
|
|
view_context.limited_counter_with_delimiter(finder.execute)
|
|
|
|
end
|
2019-10-08 05:06:09 -04:00
|
|
|
|
|
|
|
def pipeline_test_report
|
|
|
|
strong_memoize(:pipeline_test_report) do
|
2020-04-21 11:21:10 -04:00
|
|
|
@pipeline.test_reports.tap do |reports|
|
|
|
|
reports.with_attachment! if params[:scope] == 'with_attachment'
|
|
|
|
end
|
2019-10-08 05:06:09 -04:00
|
|
|
end
|
|
|
|
end
|
2020-05-06 20:11:11 -04:00
|
|
|
|
|
|
|
def index_params
|
2020-06-05 17:08:27 -04:00
|
|
|
params.permit(:scope, :username, :ref, :status)
|
2020-05-06 20:11:11 -04:00
|
|
|
end
|
2016-04-12 10:16:39 -04:00
|
|
|
end
|
2019-09-13 09:26:31 -04:00
|
|
|
|
|
|
|
Projects::PipelinesController.prepend_if_ee('EE::Projects::PipelinesController')
|