Add latest changes from gitlab-org/gitlab@master
This commit is contained in:
parent
7b69070a74
commit
d378fdaa60
70 changed files with 2822 additions and 108 deletions
|
@ -374,7 +374,7 @@
|
|||
- "jest.config.{base,integration,unit}.js"
|
||||
- "config/helpers/**/*.js"
|
||||
- "vendor/assets/javascripts/**/*"
|
||||
- "{,ee}/app/assets/**/*.graphql"
|
||||
- "{,ee/,jh/}app/assets/**/*.graphql"
|
||||
|
||||
################
|
||||
# Shared rules #
|
||||
|
|
|
@ -1 +1 @@
|
|||
07a81867515f925c6108033d2bb05aebfcf14bd0
|
||||
5376d026d3b602a4227d767a10e8d7ef76208d33
|
||||
|
|
|
@ -132,7 +132,8 @@ export default {
|
|||
<div class="gl-display-flex gl-flex-direction-column gl-md-flex-direction-row">
|
||||
<path-navigation
|
||||
v-if="displayPathNavigation"
|
||||
class="js-path-navigation gl-w-full gl-pb-2"
|
||||
data-testid="vsa-path-navigation"
|
||||
class="gl-w-full gl-pb-2"
|
||||
:loading="isLoading || isLoadingStage"
|
||||
:stages="pathNavigationData"
|
||||
:selected-stage="selectedStage"
|
||||
|
|
|
@ -61,7 +61,8 @@ export default {
|
|||
<template>
|
||||
<div class="gl-mt-3 gl-py-2 gl-px-3 bg-gray-light border-top border-bottom">
|
||||
<filter-bar
|
||||
class="js-filter-bar filtered-search-box gl-display-flex gl-mb-2 gl-mr-3 gl-border-none"
|
||||
data-testid="vsa-filter-bar"
|
||||
class="filtered-search-box gl-display-flex gl-mb-2 gl-mr-3 gl-border-none"
|
||||
:group-path="groupPath"
|
||||
/>
|
||||
<div
|
||||
|
|
30
app/finders/packages/helm/packages_finder.rb
Normal file
30
app/finders/packages/helm/packages_finder.rb
Normal file
|
@ -0,0 +1,30 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
module Packages
|
||||
module Helm
|
||||
class PackagesFinder
|
||||
include ::Packages::FinderHelper
|
||||
|
||||
MAX_PACKAGES_COUNT = 300
|
||||
|
||||
def initialize(project, channel)
|
||||
@project = project
|
||||
@channel = channel
|
||||
end
|
||||
|
||||
def execute
|
||||
if @channel.blank? || @project.blank?
|
||||
return ::Packages::Package.none
|
||||
end
|
||||
|
||||
pkg_files = ::Packages::PackageFile.for_helm_with_channel(@project, @channel)
|
||||
|
||||
# we use a subquery to get unique packages and at the same time
|
||||
# order + limit them.
|
||||
::Packages::Package
|
||||
.limit_recent(MAX_PACKAGES_COUNT)
|
||||
.id_in(pkg_files.select(:package_id))
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
|
@ -13,7 +13,10 @@ module Resolvers
|
|||
alias_method :list, :object
|
||||
|
||||
def resolve(**args)
|
||||
filter_params = item_filters(args[:filters]).merge(board_id: list.board.id, id: list.id)
|
||||
filters = item_filters(args[:filters])
|
||||
mutually_exclusive_milestone_args!(filters)
|
||||
|
||||
filter_params = filters.merge(board_id: list.board.id, id: list.id)
|
||||
service = ::Boards::Issues::ListService.new(list.board.resource_parent, context[:current_user], filter_params)
|
||||
pagination_connections = Gitlab::Graphql::Pagination::Keyset::Connection.new(service.execute)
|
||||
|
||||
|
@ -26,5 +29,13 @@ module Resolvers
|
|||
def self.complexity_multiplier(args)
|
||||
0.005
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def mutually_exclusive_milestone_args!(filters)
|
||||
if filters[:milestone_title] && filters[:milestone_wildcard_id]
|
||||
raise ::Gitlab::Graphql::Errors::ArgumentError, 'Incompatible arguments: milestoneTitle, milestoneWildcardId.'
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -24,6 +24,10 @@ module Types
|
|||
as: :issue_types,
|
||||
description: 'Filter by the given issue types.',
|
||||
required: false
|
||||
|
||||
argument :milestone_wildcard_id, ::Types::MilestoneWildcardIdEnum,
|
||||
required: false,
|
||||
description: 'Filter by milestone ID wildcard.'
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -93,6 +93,24 @@ class Packages::PackageFile < ApplicationRecord
|
|||
skip_callback :commit, :after, :remove_previously_stored_file, if: :execute_move_in_object_storage?
|
||||
after_commit :move_in_object_storage, if: :execute_move_in_object_storage?
|
||||
|
||||
# Returns the most recent package files for *each* of the given packages.
|
||||
# The order is not guaranteed.
|
||||
def self.most_recent_for(packages, extra_join: nil, extra_where: nil)
|
||||
cte_name = :packages_cte
|
||||
cte = Gitlab::SQL::CTE.new(cte_name, packages.select(:id))
|
||||
|
||||
package_files = ::Packages::PackageFile.limit_recent(1)
|
||||
.where(arel_table[:package_id].eq(Arel.sql("#{cte_name}.id")))
|
||||
|
||||
package_files = package_files.joins(extra_join) if extra_join
|
||||
package_files = package_files.where(extra_where) if extra_where
|
||||
|
||||
query = select('finder.*')
|
||||
.from([Arel.sql(cte_name.to_s), package_files.arel.lateral.as('finder')])
|
||||
|
||||
query.with(cte.to_arel)
|
||||
end
|
||||
|
||||
def download_path
|
||||
Gitlab::Routing.url_helpers.download_project_package_file_path(project, self)
|
||||
end
|
||||
|
|
|
@ -8,11 +8,12 @@ module Packages
|
|||
API_VERSION = 'v1'
|
||||
CHANNEL = 'channel'
|
||||
INDEX_YAML_SUFFIX = "/#{CHANNEL}/index.yaml"
|
||||
EMPTY_HASH = {}.freeze
|
||||
|
||||
def initialize(project, project_id_param, package_files)
|
||||
@project = project
|
||||
def initialize(project_id_param, channel, packages)
|
||||
@project_id_param = project_id_param
|
||||
@package_files = package_files
|
||||
@channel = channel
|
||||
@packages = packages
|
||||
end
|
||||
|
||||
def api_version
|
||||
|
@ -20,10 +21,12 @@ module Packages
|
|||
end
|
||||
|
||||
def entries
|
||||
files = @package_files.preload_helm_file_metadata
|
||||
return EMPTY_HASH unless @channel.present?
|
||||
|
||||
result = Hash.new { |h, k| h[k] = [] }
|
||||
|
||||
files.find_each do |package_file|
|
||||
# this .each is safe as we have max 300 objects
|
||||
most_recent_package_files.each do |package_file|
|
||||
name = package_file.helm_metadata['name']
|
||||
result[name] << package_file.helm_metadata.merge({
|
||||
'created' => package_file.created_at.utc.strftime('%Y-%m-%dT%H:%M:%S.%NZ'),
|
||||
|
@ -48,6 +51,16 @@ module Packages
|
|||
'contextPath' => path.delete_suffix(INDEX_YAML_SUFFIX)
|
||||
}
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def most_recent_package_files
|
||||
::Packages::PackageFile.most_recent_for(
|
||||
@packages,
|
||||
extra_join: :helm_file_metadatum,
|
||||
extra_where: { packages_helm_file_metadata: { channel: @channel } }
|
||||
).preload_helm_file_metadata
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -2,8 +2,9 @@
|
|||
|
||||
module ServicePing
|
||||
class SubmitService
|
||||
PRODUCTION_URL = 'https://version.gitlab.com/usage_data'
|
||||
STAGING_URL = 'https://gitlab-services-version-gitlab-com-staging.gs-staging.gitlab.org/usage_data'
|
||||
PRODUCTION_BASE_URL = 'https://version.gitlab.com'
|
||||
STAGING_BASE_URL = 'https://gitlab-services-version-gitlab-com-staging.gs-staging.gitlab.org'
|
||||
USAGE_DATA_PATH = 'usage_data'
|
||||
|
||||
METRICS = %w[leader_issues instance_issues percentage_issues leader_notes instance_notes
|
||||
percentage_notes leader_milestones instance_milestones percentage_milestones
|
||||
|
@ -41,6 +42,10 @@ module ServicePing
|
|||
store_metrics(response)
|
||||
end
|
||||
|
||||
def url
|
||||
URI.join(base_url, USAGE_DATA_PATH)
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def submit_payload(usage_data)
|
||||
|
@ -81,12 +86,8 @@ module ServicePing
|
|||
end
|
||||
|
||||
# See https://gitlab.com/gitlab-org/gitlab/-/issues/233615 for details
|
||||
def url
|
||||
if Rails.env.production?
|
||||
PRODUCTION_URL
|
||||
else
|
||||
STAGING_URL
|
||||
end
|
||||
def base_url
|
||||
Rails.env.production? ? PRODUCTION_BASE_URL : STAGING_BASE_URL
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
name: delete_branch_confirmation_modals
|
||||
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/56782
|
||||
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/329052
|
||||
milestone: '13.12'
|
||||
milestone: '14.3'
|
||||
type: development
|
||||
group: group::expansion
|
||||
default_enabled: false
|
||||
default_enabled: true
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
class AddPackageFileIdChannelIdxToPackagesHelmFileMetadata < ActiveRecord::Migration[6.1]
|
||||
include Gitlab::Database::MigrationHelpers
|
||||
|
||||
disable_ddl_transaction!
|
||||
|
||||
INDEX_NAME = 'index_packages_helm_file_metadata_on_pf_id_and_channel'
|
||||
|
||||
def up
|
||||
add_concurrent_index :packages_helm_file_metadata, [:package_file_id, :channel], name: INDEX_NAME
|
||||
end
|
||||
|
||||
def down
|
||||
remove_concurrent_index :packages_helm_file_metadata, [:package_file_id, :channel], name: INDEX_NAME
|
||||
end
|
||||
end
|
|
@ -0,0 +1,17 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
class AddInstallableHelmPkgsIdxToPackages < ActiveRecord::Migration[6.1]
|
||||
include Gitlab::Database::MigrationHelpers
|
||||
|
||||
disable_ddl_transaction!
|
||||
|
||||
INDEX_NAME = 'idx_installable_helm_pkgs_on_project_id_id'
|
||||
|
||||
def up
|
||||
add_concurrent_index :packages_packages, [:project_id, :id], name: INDEX_NAME
|
||||
end
|
||||
|
||||
def down
|
||||
remove_concurrent_index :packages_packages, [:project_id, :id], name: INDEX_NAME
|
||||
end
|
||||
end
|
|
@ -0,0 +1,17 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
class AddIndexPackageIdIdOnPackageFiles < Gitlab::Database::Migration[1.0]
|
||||
disable_ddl_transaction!
|
||||
|
||||
INDEX_NAME = 'index_packages_package_files_on_package_id_id'
|
||||
|
||||
def up
|
||||
disable_statement_timeout do
|
||||
execute "CREATE INDEX CONCURRENTLY #{INDEX_NAME} ON packages_package_files (package_id, id)" unless index_exists_by_name?(:package_package_files, INDEX_NAME)
|
||||
end
|
||||
end
|
||||
|
||||
def down
|
||||
remove_concurrent_index_by_name :packages_package_files, INDEX_NAME
|
||||
end
|
||||
end
|
|
@ -0,0 +1,15 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
class ChangeCiMinutesAdditionalPackTextLimit < Gitlab::Database::Migration[1.0]
|
||||
disable_ddl_transaction!
|
||||
|
||||
def up
|
||||
remove_text_limit :ci_minutes_additional_packs, :purchase_xid
|
||||
add_text_limit :ci_minutes_additional_packs, :purchase_xid, 50
|
||||
end
|
||||
|
||||
def down
|
||||
remove_text_limit :ci_minutes_additional_packs, :purchase_xid
|
||||
add_text_limit :ci_minutes_additional_packs, :purchase_xid, 32, validate: false
|
||||
end
|
||||
end
|
|
@ -0,0 +1,222 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
class FinalizeCiBuildsBigintConversion < Gitlab::Database::Migration[1.0]
|
||||
disable_ddl_transaction!
|
||||
|
||||
TABLE_NAME = 'ci_builds'
|
||||
PK_INDEX_NAME = 'index_ci_builds_on_converted_id'
|
||||
|
||||
SECONDARY_INDEXES = [
|
||||
{
|
||||
original_name: :index_ci_builds_on_commit_id_artifacts_expired_at_and_id,
|
||||
temporary_name: :index_ci_builds_on_commit_id_expire_at_and_converted_id,
|
||||
columns: [:commit_id, :artifacts_expire_at, :id_convert_to_bigint],
|
||||
options: {
|
||||
where: "type::text = 'Ci::Build'::text
|
||||
AND (retried = false OR retried IS NULL)
|
||||
AND (name::text = ANY (ARRAY['sast'::character varying::text,
|
||||
'secret_detection'::character varying::text,
|
||||
'dependency_scanning'::character varying::text,
|
||||
'container_scanning'::character varying::text,
|
||||
'dast'::character varying::text]))"
|
||||
}
|
||||
},
|
||||
{
|
||||
original_name: :index_ci_builds_on_project_id_and_id,
|
||||
temporary_name: :index_ci_builds_on_project_and_converted_id,
|
||||
columns: [:project_id, :id_convert_to_bigint],
|
||||
options: {}
|
||||
},
|
||||
{
|
||||
original_name: :index_ci_builds_on_runner_id_and_id_desc,
|
||||
temporary_name: :index_ci_builds_on_runner_id_and_converted_id_desc,
|
||||
columns: [:runner_id, :id_convert_to_bigint],
|
||||
options: { order: { id_convert_to_bigint: :desc } }
|
||||
},
|
||||
{
|
||||
original_name: :index_for_resource_group,
|
||||
temporary_name: :index_ci_builds_on_resource_group_and_converted_id,
|
||||
columns: [:resource_group_id, :id_convert_to_bigint],
|
||||
options: { where: 'resource_group_id IS NOT NULL' }
|
||||
},
|
||||
{
|
||||
original_name: :index_security_ci_builds_on_name_and_id_parser_features,
|
||||
temporary_name: :index_security_ci_builds_on_name_and_converted_id_parser,
|
||||
columns: [:name, :id_convert_to_bigint],
|
||||
options: {
|
||||
where: "(name::text = ANY (ARRAY['container_scanning'::character varying::text,
|
||||
'dast'::character varying::text,
|
||||
'dependency_scanning'::character varying::text,
|
||||
'license_management'::character varying::text,
|
||||
'sast'::character varying::text,
|
||||
'secret_detection'::character varying::text,
|
||||
'coverage_fuzzing'::character varying::text,
|
||||
'license_scanning'::character varying::text])
|
||||
) AND type::text = 'Ci::Build'::text"
|
||||
}
|
||||
}
|
||||
].freeze
|
||||
|
||||
MANUAL_INDEX_NAMES = {
|
||||
original_name: :index_ci_builds_runner_id_pending_covering,
|
||||
temporary_name: :index_ci_builds_runner_id_and_converted_id_pending_covering
|
||||
}.freeze
|
||||
|
||||
REFERENCING_FOREIGN_KEYS = [
|
||||
[:ci_build_needs, :build_id, :cascade, 'fk_rails_'],
|
||||
[:ci_build_pending_states, :build_id, :cascade, 'fk_rails_'],
|
||||
[:ci_build_report_results, :build_id, :cascade, 'fk_rails_'],
|
||||
[:ci_build_trace_chunks, :build_id, :cascade, 'fk_rails_'],
|
||||
[:ci_build_trace_metadata, :build_id, :cascade, 'fk_rails_'],
|
||||
[:ci_builds_runner_session, :build_id, :cascade, 'fk_rails_'],
|
||||
[:ci_builds_metadata, :build_id, :cascade, 'fk_'],
|
||||
[:ci_job_artifacts, :job_id, :cascade, 'fk_rails_'],
|
||||
[:ci_job_variables, :job_id, :cascade, 'fk_rails_'],
|
||||
[:ci_pending_builds, :build_id, :cascade, 'fk_rails_'],
|
||||
[:ci_resources, :build_id, :nullify, 'fk_'],
|
||||
[:ci_running_builds, :build_id, :cascade, 'fk_rails_'],
|
||||
[:ci_sources_pipelines, :source_job_id, :cascade, 'fk_'],
|
||||
[:ci_unit_test_failures, :build_id, :cascade, 'fk_'],
|
||||
[:dast_scanner_profiles_builds, :ci_build_id, :cascade, 'fk_'],
|
||||
[:dast_site_profiles_builds, :ci_build_id, :cascade, 'fk_'],
|
||||
[:pages_deployments, :ci_build_id, :nullify, 'fk_rails_'],
|
||||
[:requirements_management_test_reports, :build_id, :nullify, 'fk_rails_'],
|
||||
[:security_scans, :build_id, :cascade, 'fk_rails_'],
|
||||
[:terraform_state_versions, :ci_build_id, :nullify, 'fk_']
|
||||
].freeze
|
||||
|
||||
def up
|
||||
ensure_batched_background_migration_is_finished(
|
||||
job_class_name: 'CopyColumnUsingBackgroundMigrationJob',
|
||||
table_name: TABLE_NAME,
|
||||
column_name: 'id',
|
||||
job_arguments: [%w[id stage_id], %w[id_convert_to_bigint stage_id_convert_to_bigint]]
|
||||
)
|
||||
|
||||
# Remove this upfront since this table is being dropped, and doesn't need to be migrated
|
||||
if foreign_key_exists?(:dep_ci_build_trace_sections, TABLE_NAME, column: :build_id)
|
||||
remove_foreign_key(:dep_ci_build_trace_sections, TABLE_NAME, column: :build_id)
|
||||
end
|
||||
|
||||
# Remove this unexpected FK if it exists - https://gitlab.com/gitlab-com/gl-infra/production/-/issues/5531#note_676576081
|
||||
if foreign_key_exists?(:ci_resources, TABLE_NAME, column: :build_id, name: 'fk_rails_e169a8e3d5')
|
||||
remove_foreign_key(:ci_resources, TABLE_NAME, column: :build_id, name: 'fk_rails_e169a8e3d5')
|
||||
end
|
||||
|
||||
swap_columns
|
||||
end
|
||||
|
||||
def down
|
||||
swap_columns
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def swap_columns
|
||||
# Copy existing indexes from the original column to the new column
|
||||
create_indexes
|
||||
# Copy existing FKs from the original column to the new column
|
||||
create_referencing_foreign_keys
|
||||
|
||||
# Remove existing FKs from the referencing tables, so we don't have to lock on them when we drop the existing PK
|
||||
replace_referencing_foreign_keys
|
||||
|
||||
with_lock_retries(raise_on_exhaustion: true) do
|
||||
quoted_table_name = quote_table_name(TABLE_NAME)
|
||||
|
||||
# Swap the original and new column names
|
||||
temporary_name = 'id_tmp'
|
||||
execute "ALTER TABLE #{quoted_table_name} RENAME COLUMN #{quote_column_name(:id)} TO #{quote_column_name(temporary_name)}"
|
||||
execute "ALTER TABLE #{quoted_table_name} RENAME COLUMN #{quote_column_name(:id_convert_to_bigint)} TO #{quote_column_name(:id)}"
|
||||
execute "ALTER TABLE #{quoted_table_name} RENAME COLUMN #{quote_column_name(temporary_name)} TO #{quote_column_name(:id_convert_to_bigint)}"
|
||||
|
||||
# Reset the function so PG drops the plan cache for the incorrect integer type
|
||||
function_name = Gitlab::Database::UnidirectionalCopyTrigger.on_table(TABLE_NAME)
|
||||
.name([:id, :stage_id], [:id_convert_to_bigint, :stage_id_convert_to_bigint])
|
||||
execute "ALTER FUNCTION #{quote_table_name(function_name)} RESET ALL"
|
||||
|
||||
# Swap defaults of the two columns, and change ownership of the sequence to the new id
|
||||
execute "ALTER SEQUENCE ci_builds_id_seq OWNED BY #{TABLE_NAME}.id"
|
||||
change_column_default TABLE_NAME, :id, -> { "nextval('ci_builds_id_seq'::regclass)" }
|
||||
change_column_default TABLE_NAME, :id_convert_to_bigint, 0
|
||||
|
||||
# Swap the PK constraint from the original column to the new column
|
||||
# We deliberately don't CASCADE here because the old FKs should be removed already
|
||||
execute "ALTER TABLE #{quoted_table_name} DROP CONSTRAINT ci_builds_pkey"
|
||||
rename_index TABLE_NAME, PK_INDEX_NAME, 'ci_builds_pkey'
|
||||
execute "ALTER TABLE #{quoted_table_name} ADD CONSTRAINT ci_builds_pkey PRIMARY KEY USING INDEX ci_builds_pkey"
|
||||
|
||||
# Remove old column indexes and change new column indexes to have the original names
|
||||
rename_secondary_indexes # rubocop:disable Migration/WithLockRetriesDisallowedMethod
|
||||
end
|
||||
end
|
||||
|
||||
def create_indexes
|
||||
add_concurrent_index TABLE_NAME, :id_convert_to_bigint, unique: true, name: PK_INDEX_NAME
|
||||
|
||||
SECONDARY_INDEXES.each do |index_definition|
|
||||
options = index_definition[:options]
|
||||
options[:name] = index_definition[:temporary_name]
|
||||
|
||||
add_concurrent_index(TABLE_NAME, index_definition[:columns], options)
|
||||
end
|
||||
|
||||
unless index_name_exists?(TABLE_NAME, MANUAL_INDEX_NAMES[:temporary_name])
|
||||
execute(<<~SQL)
|
||||
CREATE INDEX CONCURRENTLY #{MANUAL_INDEX_NAMES[:temporary_name]}
|
||||
ON ci_builds (runner_id, id_convert_to_bigint) INCLUDE (project_id)
|
||||
WHERE status::text = 'pending'::text AND type::text = 'Ci::Build'::text
|
||||
SQL
|
||||
end
|
||||
end
|
||||
|
||||
def rename_secondary_indexes
|
||||
(SECONDARY_INDEXES + [MANUAL_INDEX_NAMES]).each do |index_definition|
|
||||
remove_index(TABLE_NAME, name: index_definition[:original_name]) # rubocop:disable Migration/RemoveIndex
|
||||
rename_index(TABLE_NAME, index_definition[:temporary_name], index_definition[:original_name])
|
||||
end
|
||||
end
|
||||
|
||||
def create_referencing_foreign_keys
|
||||
REFERENCING_FOREIGN_KEYS.each do |(from_table, column, on_delete, prefix)|
|
||||
# Don't attempt to create the FK if one already exists from the table to the new column
|
||||
# The check in `add_concurrent_foreign_key` already checks for this, but it looks for the foreign key
|
||||
# with the new name only (containing the `_tmp` suffix).
|
||||
#
|
||||
# Since we might partially rename FKs and re-run the migration, we also have to check and see if a FK exists
|
||||
# on those columns that might not match the `_tmp` name.
|
||||
next if foreign_key_exists?(from_table, TABLE_NAME, column: column, primary_key: :id_convert_to_bigint)
|
||||
|
||||
temporary_name = "#{concurrent_foreign_key_name(from_table, column, prefix: prefix)}_tmp"
|
||||
|
||||
add_concurrent_foreign_key(
|
||||
from_table,
|
||||
TABLE_NAME,
|
||||
column: column,
|
||||
target_column: :id_convert_to_bigint,
|
||||
name: temporary_name,
|
||||
on_delete: on_delete,
|
||||
reverse_lock_order: true)
|
||||
end
|
||||
end
|
||||
|
||||
def replace_referencing_foreign_keys
|
||||
REFERENCING_FOREIGN_KEYS.each do |(from_table, column, _, prefix)|
|
||||
existing_name = concurrent_foreign_key_name(from_table, column, prefix: prefix)
|
||||
|
||||
# Don't attempt to replace the FK unless it exists and points at the original column.
|
||||
# This could happen if the migration is re-run due to failing midway.
|
||||
next unless foreign_key_exists?(from_table, TABLE_NAME, column: column, primary_key: :id, name: existing_name)
|
||||
|
||||
with_lock_retries do
|
||||
# Explicitly lock table in order of parent, child to attempt to avoid deadlocks
|
||||
execute "LOCK TABLE #{TABLE_NAME}, #{from_table} IN ACCESS EXCLUSIVE MODE"
|
||||
|
||||
temporary_name = "#{existing_name}_tmp"
|
||||
|
||||
remove_foreign_key(from_table, TABLE_NAME, column: column, primary_key: :id, name: existing_name)
|
||||
rename_constraint(from_table, temporary_name, existing_name)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
1
db/schema_migrations/20210831134840
Normal file
1
db/schema_migrations/20210831134840
Normal file
|
@ -0,0 +1 @@
|
|||
6cad93bd4c086a60164c3cb5c42737194c4b7b20c1ea9f4c6d998b7c3a8918b5
|
1
db/schema_migrations/20210831135249
Normal file
1
db/schema_migrations/20210831135249
Normal file
|
@ -0,0 +1 @@
|
|||
78b14e92c91e7ccb11b7b37e89e8e55749cf109b0fa5ce06e4e396a2ac743f1c
|
1
db/schema_migrations/20210907211557
Normal file
1
db/schema_migrations/20210907211557
Normal file
|
@ -0,0 +1 @@
|
|||
387dcbda7c3b32050298d8a679361a17916a66d0ab686211f0d1a0dc708c4a74
|
1
db/schema_migrations/20210909184349
Normal file
1
db/schema_migrations/20210909184349
Normal file
|
@ -0,0 +1 @@
|
|||
6be3a6f8f748d8f02e2d42cd1a5103ec8bd5c17097d9e440b152685fc8d6ff83
|
1
db/schema_migrations/20210910141043
Normal file
1
db/schema_migrations/20210910141043
Normal file
|
@ -0,0 +1 @@
|
|||
3885d2fca4166e71610cd957f1c5a703118cbb5ba47a8d494eb4e017fe499a7d
|
|
@ -11306,7 +11306,7 @@ CREATE TABLE ci_build_trace_metadata (
|
|||
);
|
||||
|
||||
CREATE TABLE ci_builds (
|
||||
id integer NOT NULL,
|
||||
id_convert_to_bigint integer DEFAULT 0 NOT NULL,
|
||||
status character varying,
|
||||
finished_at timestamp without time zone,
|
||||
trace text,
|
||||
|
@ -11351,7 +11351,7 @@ CREATE TABLE ci_builds (
|
|||
waiting_for_resource_at timestamp with time zone,
|
||||
processed boolean,
|
||||
scheduling_type smallint,
|
||||
id_convert_to_bigint bigint DEFAULT 0 NOT NULL,
|
||||
id bigint NOT NULL,
|
||||
stage_id bigint,
|
||||
CONSTRAINT check_1e2fbd1b39 CHECK ((lock_version IS NOT NULL))
|
||||
);
|
||||
|
@ -11587,7 +11587,7 @@ CREATE TABLE ci_minutes_additional_packs (
|
|||
expires_at date,
|
||||
number_of_minutes integer NOT NULL,
|
||||
purchase_xid text,
|
||||
CONSTRAINT check_d7ef254af0 CHECK ((char_length(purchase_xid) <= 32))
|
||||
CONSTRAINT check_d7ef254af0 CHECK ((char_length(purchase_xid) <= 50))
|
||||
);
|
||||
|
||||
CREATE SEQUENCE ci_minutes_additional_packs_id_seq
|
||||
|
@ -24007,6 +24007,8 @@ CREATE UNIQUE INDEX idx_environment_merge_requests_unique_index ON deployment_me
|
|||
|
||||
CREATE INDEX idx_geo_con_rep_updated_events_on_container_repository_id ON geo_container_repository_updated_events USING btree (container_repository_id);
|
||||
|
||||
CREATE INDEX idx_installable_helm_pkgs_on_project_id_id ON packages_packages USING btree (project_id, id);
|
||||
|
||||
CREATE INDEX idx_installable_npm_pkgs_on_project_id_name_version_id ON packages_packages USING btree (project_id, name, version, id) WHERE ((package_type = 2) AND (status = 0));
|
||||
|
||||
CREATE INDEX idx_issues_on_health_status_not_null ON issues USING btree (health_status) WHERE (health_status IS NOT NULL);
|
||||
|
@ -25817,6 +25819,8 @@ CREATE INDEX index_packages_events_on_package_id ON packages_events USING btree
|
|||
|
||||
CREATE INDEX index_packages_helm_file_metadata_on_channel ON packages_helm_file_metadata USING btree (channel);
|
||||
|
||||
CREATE INDEX index_packages_helm_file_metadata_on_pf_id_and_channel ON packages_helm_file_metadata USING btree (package_file_id, channel);
|
||||
|
||||
CREATE INDEX index_packages_maven_metadata_on_package_id_and_path ON packages_maven_metadata USING btree (package_id, path);
|
||||
|
||||
CREATE INDEX index_packages_maven_metadata_on_path ON packages_maven_metadata USING btree (path);
|
||||
|
@ -25837,6 +25841,8 @@ CREATE INDEX index_packages_package_files_on_file_store ON packages_package_file
|
|||
|
||||
CREATE INDEX index_packages_package_files_on_package_id_and_file_name ON packages_package_files USING btree (package_id, file_name);
|
||||
|
||||
CREATE INDEX index_packages_package_files_on_package_id_id ON packages_package_files USING btree (package_id, id);
|
||||
|
||||
CREATE INDEX index_packages_package_files_on_verification_state ON packages_package_files USING btree (verification_state);
|
||||
|
||||
CREATE INDEX index_packages_packages_on_creator_id ON packages_packages USING btree (creator_id);
|
||||
|
@ -27534,9 +27540,6 @@ ALTER TABLE ONLY releases
|
|||
ALTER TABLE ONLY geo_event_log
|
||||
ADD CONSTRAINT fk_4a99ebfd60 FOREIGN KEY (repositories_changed_event_id) REFERENCES geo_repositories_changed_events(id) ON DELETE CASCADE;
|
||||
|
||||
ALTER TABLE ONLY dep_ci_build_trace_sections
|
||||
ADD CONSTRAINT fk_4ebe41f502 FOREIGN KEY (build_id) REFERENCES ci_builds(id) ON DELETE CASCADE;
|
||||
|
||||
ALTER TABLE ONLY alert_management_alerts
|
||||
ADD CONSTRAINT fk_51ab4b6089 FOREIGN KEY (prometheus_alert_id) REFERENCES prometheus_alerts(id) ON DELETE CASCADE;
|
||||
|
||||
|
|
|
@ -17529,6 +17529,7 @@ Field that are available while modifying the custom mapping attributes for an HT
|
|||
| <a id="boardissueinputiterationwildcardid"></a>`iterationWildcardId` | [`IterationWildcardId`](#iterationwildcardid) | Filter by iteration ID wildcard. |
|
||||
| <a id="boardissueinputlabelname"></a>`labelName` | [`[String]`](#string) | Filter by label name. |
|
||||
| <a id="boardissueinputmilestonetitle"></a>`milestoneTitle` | [`String`](#string) | Filter by milestone title. |
|
||||
| <a id="boardissueinputmilestonewildcardid"></a>`milestoneWildcardId` | [`MilestoneWildcardId`](#milestonewildcardid) | Filter by milestone ID wildcard. |
|
||||
| <a id="boardissueinputmyreactionemoji"></a>`myReactionEmoji` | [`String`](#string) | Filter by reaction emoji applied by the current user. Wildcard values "NONE" and "ANY" are supported. |
|
||||
| <a id="boardissueinputnot"></a>`not` | [`NegatedBoardIssueInput`](#negatedboardissueinput) | List of negated arguments. |
|
||||
| <a id="boardissueinputreleasetag"></a>`releaseTag` | [`String`](#string) | Filter by release tag. |
|
||||
|
@ -17709,6 +17710,7 @@ Represents an escalation rule.
|
|||
| <a id="negatedboardissueinputiterationwildcardid"></a>`iterationWildcardId` | [`NegatedIterationWildcardId`](#negatediterationwildcardid) | Filter by iteration ID wildcard. |
|
||||
| <a id="negatedboardissueinputlabelname"></a>`labelName` | [`[String]`](#string) | Filter by label name. |
|
||||
| <a id="negatedboardissueinputmilestonetitle"></a>`milestoneTitle` | [`String`](#string) | Filter by milestone title. |
|
||||
| <a id="negatedboardissueinputmilestonewildcardid"></a>`milestoneWildcardId` | [`MilestoneWildcardId`](#milestonewildcardid) | Filter by milestone ID wildcard. |
|
||||
| <a id="negatedboardissueinputmyreactionemoji"></a>`myReactionEmoji` | [`String`](#string) | Filter by reaction emoji applied by the current user. Wildcard values "NONE" and "ANY" are supported. |
|
||||
| <a id="negatedboardissueinputreleasetag"></a>`releaseTag` | [`String`](#string) | Filter by release tag. |
|
||||
| <a id="negatedboardissueinputtypes"></a>`types` | [`[IssueType!]`](#issuetype) | Filter by the given issue types. |
|
||||
|
|
|
@ -177,11 +177,10 @@ This ensures that our list isn't mistakenly removed by another auto generation o
|
|||
the `.rubocop_todo.yml`. This also allows us greater visibility into the exceptions
|
||||
which are currently being resolved.
|
||||
|
||||
One way to generate the initial list is to run the `todo` auto generation,
|
||||
with `exclude limit` set to a high number.
|
||||
One way to generate the initial list is to run the Rake task `rubocop:todo:generate`:
|
||||
|
||||
```shell
|
||||
bundle exec rubocop --auto-gen-config --auto-gen-only-exclude --exclude-limit=100000
|
||||
bundle exec rake rubocop:todo:generate
|
||||
```
|
||||
|
||||
You can then move the list from the freshly generated `.rubocop_todo.yml` for the Cop being actively
|
||||
|
|
|
@ -4,11 +4,17 @@ group: Database
|
|||
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
|
||||
---
|
||||
|
||||
# Adding foreign key constraint to an existing column
|
||||
# Add a foreign key constraint to an existing column
|
||||
|
||||
Foreign keys help ensure consistency between related database tables. The current database review process **always** encourages you to add [foreign keys](../foreign_keys.md) when creating tables that reference records from other tables.
|
||||
Foreign keys ensure consistency between related database tables. The current database review process **always** encourages you to add [foreign keys](../foreign_keys.md) when creating tables that reference records from other tables.
|
||||
|
||||
Starting with Rails version 4, Rails includes migration helpers to add foreign key constraints to database tables. Before Rails 4, the only way for ensuring some level of consistency was the [`dependent`](https://guides.rubyonrails.org/association_basics.html#options-for-belongs-to-dependent) option within the association definition. Ensuring data consistency on the application level could fail in some unfortunate cases, so we might end up with inconsistent data in the table. This is mostly affecting older tables, where we simply didn't have the framework support to ensure consistency on the database level. These data inconsistencies can easily cause unexpected application behavior or bugs.
|
||||
Starting with Rails version 4, Rails includes migration helpers to add foreign key constraints
|
||||
to database tables. Before Rails 4, the only way for ensuring some level of consistency was the
|
||||
[`dependent`](https://guides.rubyonrails.org/association_basics.html#options-for-belongs-to-dependent)
|
||||
option in the association definition. Ensuring data consistency on the application level could fail
|
||||
in some unfortunate cases, so we might end up with inconsistent data in the table. This mostly affects
|
||||
older tables, where we didn't have the framework support to ensure consistency on the database level.
|
||||
These data inconsistencies can cause unexpected application behavior or bugs.
|
||||
|
||||
Adding a foreign key to an existing database column requires database structure changes and potential data changes. In case the table is in use, we should always assume that there is inconsistent data.
|
||||
|
||||
|
@ -45,7 +51,7 @@ class Email < ActiveRecord::Base
|
|||
end
|
||||
```
|
||||
|
||||
Problem: when the user is removed, the email records related to the removed user will stay in the `emails` table:
|
||||
Problem: when the user is removed, the email records related to the removed user stays in the `emails` table:
|
||||
|
||||
```ruby
|
||||
user = User.find(1)
|
||||
|
@ -83,11 +89,13 @@ Avoid using the `add_foreign_key` constraint more than once per migration file,
|
|||
|
||||
#### Data migration to fix existing records
|
||||
|
||||
The approach here depends on the data volume and the cleanup strategy. If we can easily find "invalid" records by doing a simple database query and the record count is not that high, then the data migration can be executed within a Rails migration.
|
||||
The approach here depends on the data volume and the cleanup strategy. If we can find "invalid"
|
||||
records by doing a database query and the record count is not high, then the data migration can
|
||||
be executed in a Rails migration.
|
||||
|
||||
In case the data volume is higher (>1000 records), it's better to create a background migration. If unsure, please contact the database team for advice.
|
||||
|
||||
Example for cleaning up records in the `emails` table within a database migration:
|
||||
Example for cleaning up records in the `emails` table in a database migration:
|
||||
|
||||
```ruby
|
||||
class RemoveRecordsWithoutUserFromEmailsTable < Gitlab::Database::Migration[1.0]
|
||||
|
@ -112,7 +120,7 @@ end
|
|||
|
||||
### Validate the foreign key
|
||||
|
||||
Validating the foreign key will scan the whole table and make sure that each relation is correct.
|
||||
Validating the foreign key scans the whole table and makes sure that each relation is correct.
|
||||
|
||||
NOTE:
|
||||
When using [background migrations](../background_migrations.md), foreign key validation should happen in the next GitLab release.
|
||||
|
|
|
@ -6,7 +6,10 @@ info: To determine the technical writer assigned to the Stage/Group associated w
|
|||
|
||||
# Constraints naming conventions
|
||||
|
||||
The most common option is to let Rails pick the name for database constraints and indexes or let PostgreSQL use the defaults (when applicable). However, when needing to define custom names in Rails or working in Go applications where no ORM is used, it is important to follow strict naming conventions to improve consistency and discoverability.
|
||||
The most common option is to let Rails pick the name for database constraints and indexes or let
|
||||
PostgreSQL use the defaults (when applicable). However, when defining custom names in Rails, or
|
||||
working in Go applications where no ORM is used, it is important to follow strict naming conventions
|
||||
to improve consistency and discoverability.
|
||||
|
||||
The table below describes the naming conventions for custom PostgreSQL constraints.
|
||||
The intent is not to retroactively change names in existing databases but rather ensure consistency of future changes.
|
||||
|
|
|
@ -19,7 +19,7 @@ Database reviewers are domain experts who have substantial experience with datab
|
|||
A database review is required whenever an application update [touches the database](../database_review.md#general-process).
|
||||
|
||||
The database reviewer is tasked with reviewing the database specific updates and
|
||||
making sure that any queries or modifications will perform without issues
|
||||
making sure that any queries or modifications perform without issues
|
||||
at the scale of GitLab.com.
|
||||
|
||||
For more information on the database review process, check the [database review guidelines](../database_review.md).
|
||||
|
@ -72,7 +72,7 @@ topics and use cases. The most frequently required during database reviewing are
|
|||
- [Avoiding downtime in migrations](../avoiding_downtime_in_migrations.md).
|
||||
- [SQL guidelines](../sql.md) for working with SQL queries.
|
||||
|
||||
## How to apply for becoming a database maintainer
|
||||
## How to apply to become a database maintainer
|
||||
|
||||
Once a database reviewer feels confident on switching to a database maintainer,
|
||||
they can update their [team profile](https://gitlab.com/gitlab-com/www-gitlab-com/-/blob/master/data/team.yml)
|
||||
|
|
948
doc/development/database/efficient_in_operator_queries.md
Normal file
948
doc/development/database/efficient_in_operator_queries.md
Normal file
|
@ -0,0 +1,948 @@
|
|||
---
|
||||
stage: Enablement
|
||||
group: Database
|
||||
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
|
||||
---
|
||||
|
||||
# Efficient `IN` operator queries
|
||||
|
||||
This document describes a technique for building efficient ordered database queries with the `IN`
|
||||
SQL operator and the usage of a GitLab utility module to help apply the technique.
|
||||
|
||||
NOTE:
|
||||
The described technique makes heavy use of
|
||||
[keyset pagination](pagination_guidelines.md#keyset-pagination).
|
||||
It's advised to get familiar with the topic first.
|
||||
|
||||
## Motivation
|
||||
|
||||
In GitLab, many domain objects like `Issue` live under nested hierarchies of projects and groups.
|
||||
To fetch nested database records for domain objects at the group-level,
|
||||
we often perform queries with the `IN` SQL operator.
|
||||
We are usually interested in ordering the records by some attributes
|
||||
and limiting the number of records using `ORDER BY` and `LIMIT` clauses for performance.
|
||||
Pagination may be used to fetch subsequent records.
|
||||
|
||||
Example tasks requiring querying nested domain objects from the group level:
|
||||
|
||||
- Show first 20 issues by creation date or due date from the group `gitlab-org`.
|
||||
- Show first 20 merge_requests by merged at date from the group `gitlab-com`.
|
||||
|
||||
Unfortunately, ordered group-level queries typically perform badly
|
||||
as their executions require heavy I/O, memory, and computations.
|
||||
Let's do an in-depth examination of executing one such query.
|
||||
|
||||
### Performance problems with `IN` queries
|
||||
|
||||
Consider the task of fetching the twenty oldest created issues
|
||||
from the group `gitlab-org` with the following query:
|
||||
|
||||
```sql
|
||||
SELECT "issues".*
|
||||
FROM "issues"
|
||||
WHERE "issues"."project_id" IN
|
||||
(SELECT "projects"."id"
|
||||
FROM "projects"
|
||||
WHERE "projects"."namespace_id" IN
|
||||
(SELECT traversal_ids[array_length(traversal_ids, 1)] AS id
|
||||
FROM "namespaces"
|
||||
WHERE (traversal_ids @> ('{9970}'))))
|
||||
ORDER BY "issues"."created_at" ASC,
|
||||
"issues"."id" ASC
|
||||
LIMIT 20
|
||||
```
|
||||
|
||||
NOTE:
|
||||
For pagination, ordering by the `created_at` column is not enough,
|
||||
we must add the `id` column as a
|
||||
[tie-breaker](pagination_performance_guidelines.md#tie-breaker-column).
|
||||
|
||||
The execution of the query can be largely broken down into three steps:
|
||||
|
||||
1. The database accesses both `namespaces` and `projects` tables
|
||||
to find all projects from all groups in the group hierarchy.
|
||||
1. The database retrieves `issues` records for each project causing heavy disk I/O.
|
||||
Ideally, an appropriate index configuration should optimize this process.
|
||||
1. The database sorts the `issues` rows in memory by `created_at` and returns `LIMIT 20` rows to
|
||||
the end-user. For large groups, this final step requires both large memory and CPU resources.
|
||||
|
||||
<details>
|
||||
<summary>Expand this sentence to see the execution plan for this DB query.</summary>
|
||||
<pre><code>
|
||||
Limit (cost=90170.07..90170.12 rows=20 width=1329) (actual time=967.597..967.607 rows=20 loops=1)
|
||||
Buffers: shared hit=239127 read=3060
|
||||
I/O Timings: read=336.879
|
||||
-> Sort (cost=90170.07..90224.02 rows=21578 width=1329) (actual time=967.596..967.603 rows=20 loops=1)
|
||||
Sort Key: issues.created_at, issues.id
|
||||
Sort Method: top-N heapsort Memory: 74kB
|
||||
Buffers: shared hit=239127 read=3060
|
||||
I/O Timings: read=336.879
|
||||
-> Nested Loop (cost=1305.66..89595.89 rows=21578 width=1329) (actual time=4.709..797.659 rows=241534 loops=1)
|
||||
Buffers: shared hit=239121 read=3060
|
||||
I/O Timings: read=336.879
|
||||
-> HashAggregate (cost=1305.10..1360.22 rows=5512 width=4) (actual time=4.657..5.370 rows=1528 loops=1)
|
||||
Group Key: projects.id
|
||||
Buffers: shared hit=2597
|
||||
-> Nested Loop (cost=576.76..1291.32 rows=5512 width=4) (actual time=2.427..4.244 rows=1528 loops=1)
|
||||
Buffers: shared hit=2597
|
||||
-> HashAggregate (cost=576.32..579.06 rows=274 width=25) (actual time=2.406..2.447 rows=265 loops=1)
|
||||
Group Key: namespaces.traversal_ids[array_length(namespaces.traversal_ids, 1)]
|
||||
Buffers: shared hit=334
|
||||
-> Bitmap Heap Scan on namespaces (cost=141.62..575.63 rows=274 width=25) (actual time=1.933..2.330 rows=265 loops=1)
|
||||
Recheck Cond: (traversal_ids @> '{9970}'::integer[])
|
||||
Heap Blocks: exact=243
|
||||
Buffers: shared hit=334
|
||||
-> Bitmap Index Scan on index_namespaces_on_traversal_ids (cost=0.00..141.55 rows=274 width=0) (actual time=1.897..1.898 rows=265 loops=1)
|
||||
Index Cond: (traversal_ids @> '{9970}'::integer[])
|
||||
Buffers: shared hit=91
|
||||
-> Index Only Scan using index_projects_on_namespace_id_and_id on projects (cost=0.44..2.40 rows=20 width=8) (actual time=0.004..0.006 rows=6 loops=265)
|
||||
Index Cond: (namespace_id = (namespaces.traversal_ids)[array_length(namespaces.traversal_ids, 1)])
|
||||
Heap Fetches: 51
|
||||
Buffers: shared hit=2263
|
||||
-> Index Scan using index_issues_on_project_id_and_iid on issues (cost=0.57..10.57 rows=544 width=1329) (actual time=0.114..0.484 rows=158 loops=1528)
|
||||
Index Cond: (project_id = projects.id)
|
||||
Buffers: shared hit=236524 read=3060
|
||||
I/O Timings: read=336.879
|
||||
Planning Time: 7.750 ms
|
||||
Execution Time: 967.973 ms
|
||||
(36 rows)
|
||||
</code></pre>
|
||||
</details>
|
||||
|
||||
The performance of the query depends on the number of rows in the database.
|
||||
On average, we can say the following:
|
||||
|
||||
- Number of groups in the group-hierarchy: less than 1 000
|
||||
- Number of projects: less than 5 000
|
||||
- Number of issues: less than 100 000
|
||||
|
||||
From the list, it's apparent that the number of `issues` records has
|
||||
the largest impact on the performance.
|
||||
As per normal usage, we can say that the number of issue records grows
|
||||
at a faster rate than the `namespaces` and the `projects` records.
|
||||
|
||||
This problem affects most of our group-level features where records are listed
|
||||
in a specific order, such as group-level issues, merge requests pages, and APIs.
|
||||
For very large groups the database queries can easily time out, causing HTTP 500 errors.
|
||||
|
||||
## Optimizing ordered `IN` queries
|
||||
|
||||
In the talk
|
||||
["How to teach an elephant to dance rock'n'roll"](https://www.youtube.com/watch?v=Ha38lcjVyhQ),
|
||||
Maxim Boguk demonstrated a technique to optimize a special class of ordered `IN` queries,
|
||||
such as our ordered group-level queries.
|
||||
|
||||
A typical ordered `IN` query may look like this:
|
||||
|
||||
```sql
|
||||
SELECT t.* FROM t
|
||||
WHERE t.fkey IN (value_set)
|
||||
ORDER BY t.pkey
|
||||
LIMIT N;
|
||||
```
|
||||
|
||||
Here's the key insight used in the technique: we need at most `|value_set| + N` record lookups,
|
||||
rather than retrieving all records satisfying the condition `t.fkey IN value_set` (`|value_set|`
|
||||
is the number of values in `value_set`).
|
||||
|
||||
We adopted and generalized the technique for use in GitLab by implementing utilities in the
|
||||
`Gitlab::Pagination::Keyset::InOperatorOptimization` class to facilitate building efficient `IN`
|
||||
queries.
|
||||
|
||||
### Requirements
|
||||
|
||||
The technique is not a drop-in replacement for the existing group-level queries using `IN` operator.
|
||||
The technique can only optimize `IN` queries that satisfy the following requirements:
|
||||
|
||||
- `LIMIT` is present, which usually means that the query is paginated
|
||||
(offset or keyset pagination).
|
||||
- The column used with the `IN` query and the columns in the `ORDER BY`
|
||||
clause are covered with a database index. The columns in the index must be
|
||||
in the following order: `column_for_the_in_query`, `order by column 1`, and
|
||||
`order by column 2`.
|
||||
- The columns in the `ORDER BY` clause are distinct
|
||||
(the combination of the columns uniquely identifies one particular column in the table).
|
||||
|
||||
WARNING:
|
||||
This technique will not improve the performance of the `COUNT(*)` queries.
|
||||
|
||||
## The `InOperatorOptimization` module
|
||||
|
||||
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/67352) in GitLab 14.3.
|
||||
|
||||
The `Gitlab::Pagination::Keyset::InOperatorOptimization` module implements utilities for applying a generalized version of
|
||||
the efficient `IN` query technique described in the previous section.
|
||||
|
||||
To build optimized, ordered `IN` queries that meet [the requirements](#requirements),
|
||||
use the utility class `QueryBuilder` from the module.
|
||||
|
||||
NOTE:
|
||||
The generic keyset pagination module introduced in the merge request
|
||||
[51481](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/51481)
|
||||
plays a fundamental role in the generalized implementation of the technique
|
||||
in `Gitlab::Pagination::Keyset::InOperatorOptimization`.
|
||||
|
||||
### Basic usage of `QueryBuilder`
|
||||
|
||||
To illustrate a basic usage, we will build a query that
|
||||
fetches 20 issues with the oldest `created_at` from the group `gitlab-org`.
|
||||
|
||||
The following ActiveRecord query would produce a query similar to
|
||||
[the unoptimized query](#performance-problems-with-in-queries) that we examined earlier:
|
||||
|
||||
```ruby
|
||||
scope = Issue
|
||||
.where(project_id: Group.find(9970).all_projects.select(:id)) # `gitlab-org` group and its subgroups
|
||||
.order(:created_at, :id)
|
||||
.limit(20)
|
||||
```
|
||||
|
||||
Instead, use the query builder `InOperatorOptimization::QueryBuilder` to produce an optimized
|
||||
version:
|
||||
|
||||
```ruby
|
||||
scope = Issue.order(:created_at, :id)
|
||||
array_scope = Group.find(9970).all_projects.select(:id)
|
||||
array_mapping_scope = -> (id_expression) { Issue.where(Issue.arel_table[:project_id].eq(id_expression)) }
|
||||
finder_query = -> (created_at_expression, id_expression) { Issue.where(Issue.arel_table[:id].eq(id_expression)) }
|
||||
|
||||
Gitlab::Pagination::Keyset::InOperatorOptimization::QueryBuilder.new(
|
||||
scope: scope,
|
||||
array_scope: array_scope,
|
||||
array_mapping_scope: array_mapping_scope,
|
||||
finder_query: finder_query
|
||||
).execute.limit(20)
|
||||
```
|
||||
|
||||
- `scope` represents the original `ActiveRecord::Relation` object without the `IN` query. The
|
||||
relation should define an order which must be supported by the
|
||||
[keyset pagination library](keyset_pagination.md#usage).
|
||||
- `array_scope` contains the `ActiveRecord::Relation` object, which represents the original
|
||||
`IN (subquery)`. The select values must contain the columns by which the subquery is "connected"
|
||||
to the main query: the `id` of the project record.
|
||||
- `array_mapping_scope` defines a lambda returning an `ActiveRecord::Relation` object. The lambda
|
||||
matches (`=`) single select values from the `array_scope`. The lambda yields as many
|
||||
arguments as the select values defined in the `array_scope`. The arguments are Arel SQL expressions.
|
||||
- `finder_query` loads the actual record row from the database. It must also be a lambda, where
|
||||
the order by column expressions is available for locating the record. In this example, the
|
||||
yielded values are `created_at` and `id` SQL expressions. Finding a record is very fast via the
|
||||
primary key, so we don't use the `created_at` value.
|
||||
|
||||
The following database index on the `issues` table must be present
|
||||
to make the query execute efficiently:
|
||||
|
||||
```sql
|
||||
"idx_issues_on_project_id_and_created_at_and_id" btree (project_id, created_at, id)
|
||||
```
|
||||
|
||||
<details>
|
||||
<summary>Expand this sentence to see the SQL query.</summary>
|
||||
<pre><code>
|
||||
SELECT "issues".*
|
||||
FROM
|
||||
(WITH RECURSIVE "array_cte" AS MATERIALIZED
|
||||
(SELECT "projects"."id"
|
||||
FROM "projects"
|
||||
WHERE "projects"."namespace_id" IN
|
||||
(SELECT traversal_ids[array_length(traversal_ids, 1)] AS id
|
||||
FROM "namespaces"
|
||||
WHERE (traversal_ids @> ('{9970}')))),
|
||||
"recursive_keyset_cte" AS ( -- initializer row start
|
||||
(SELECT NULL::issues AS records,
|
||||
array_cte_id_array,
|
||||
issues_created_at_array,
|
||||
issues_id_array,
|
||||
0::bigint AS COUNT
|
||||
FROM
|
||||
(SELECT ARRAY_AGG("array_cte"."id") AS array_cte_id_array,
|
||||
ARRAY_AGG("issues"."created_at") AS issues_created_at_array,
|
||||
ARRAY_AGG("issues"."id") AS issues_id_array
|
||||
FROM
|
||||
(SELECT "array_cte"."id"
|
||||
FROM array_cte) array_cte
|
||||
LEFT JOIN LATERAL
|
||||
(SELECT "issues"."created_at",
|
||||
"issues"."id"
|
||||
FROM "issues"
|
||||
WHERE "issues"."project_id" = "array_cte"."id"
|
||||
ORDER BY "issues"."created_at" ASC, "issues"."id" ASC
|
||||
LIMIT 1) issues ON TRUE
|
||||
WHERE "issues"."created_at" IS NOT NULL
|
||||
AND "issues"."id" IS NOT NULL) array_scope_lateral_query
|
||||
LIMIT 1)
|
||||
-- initializer row finished
|
||||
UNION ALL
|
||||
(SELECT
|
||||
-- result row start
|
||||
(SELECT issues -- record finder query as the first column
|
||||
FROM "issues"
|
||||
WHERE "issues"."id" = recursive_keyset_cte.issues_id_array[position]
|
||||
LIMIT 1),
|
||||
array_cte_id_array,
|
||||
recursive_keyset_cte.issues_created_at_array[:position_query.position-1]||next_cursor_values.created_at||recursive_keyset_cte.issues_created_at_array[position_query.position+1:],
|
||||
recursive_keyset_cte.issues_id_array[:position_query.position-1]||next_cursor_values.id||recursive_keyset_cte.issues_id_array[position_query.position+1:],
|
||||
recursive_keyset_cte.count + 1
|
||||
-- result row finished
|
||||
FROM recursive_keyset_cte,
|
||||
LATERAL
|
||||
-- finding the cursor values of the next record start
|
||||
(SELECT created_at,
|
||||
id,
|
||||
position
|
||||
FROM UNNEST(issues_created_at_array, issues_id_array) WITH
|
||||
ORDINALITY AS u(created_at, id, position)
|
||||
WHERE created_at IS NOT NULL
|
||||
AND id IS NOT NULL
|
||||
ORDER BY "created_at" ASC, "id" ASC
|
||||
LIMIT 1) AS position_query,
|
||||
-- finding the cursor values of the next record end
|
||||
-- finding the next cursor values (next_cursor_values_query) start
|
||||
LATERAL
|
||||
(SELECT "record"."created_at",
|
||||
"record"."id"
|
||||
FROM (
|
||||
VALUES (NULL,
|
||||
NULL)) AS nulls
|
||||
LEFT JOIN
|
||||
(SELECT "issues"."created_at",
|
||||
"issues"."id"
|
||||
FROM (
|
||||
(SELECT "issues"."created_at",
|
||||
"issues"."id"
|
||||
FROM "issues"
|
||||
WHERE "issues"."project_id" = recursive_keyset_cte.array_cte_id_array[position]
|
||||
AND recursive_keyset_cte.issues_created_at_array[position] IS NULL
|
||||
AND "issues"."created_at" IS NULL
|
||||
AND "issues"."id" > recursive_keyset_cte.issues_id_array[position]
|
||||
ORDER BY "issues"."created_at" ASC, "issues"."id" ASC)
|
||||
UNION ALL
|
||||
(SELECT "issues"."created_at",
|
||||
"issues"."id"
|
||||
FROM "issues"
|
||||
WHERE "issues"."project_id" = recursive_keyset_cte.array_cte_id_array[position]
|
||||
AND recursive_keyset_cte.issues_created_at_array[position] IS NOT NULL
|
||||
AND "issues"."created_at" IS NULL
|
||||
ORDER BY "issues"."created_at" ASC, "issues"."id" ASC)
|
||||
UNION ALL
|
||||
(SELECT "issues"."created_at",
|
||||
"issues"."id"
|
||||
FROM "issues"
|
||||
WHERE "issues"."project_id" = recursive_keyset_cte.array_cte_id_array[position]
|
||||
AND recursive_keyset_cte.issues_created_at_array[position] IS NOT NULL
|
||||
AND "issues"."created_at" > recursive_keyset_cte.issues_created_at_array[position]
|
||||
ORDER BY "issues"."created_at" ASC, "issues"."id" ASC)
|
||||
UNION ALL
|
||||
(SELECT "issues"."created_at",
|
||||
"issues"."id"
|
||||
FROM "issues"
|
||||
WHERE "issues"."project_id" = recursive_keyset_cte.array_cte_id_array[position]
|
||||
AND recursive_keyset_cte.issues_created_at_array[position] IS NOT NULL
|
||||
AND "issues"."created_at" = recursive_keyset_cte.issues_created_at_array[position]
|
||||
AND "issues"."id" > recursive_keyset_cte.issues_id_array[position]
|
||||
ORDER BY "issues"."created_at" ASC, "issues"."id" ASC)) issues
|
||||
ORDER BY "issues"."created_at" ASC, "issues"."id" ASC
|
||||
LIMIT 1) record ON TRUE
|
||||
LIMIT 1) AS next_cursor_values))
|
||||
-- finding the next cursor values (next_cursor_values_query) END
|
||||
SELECT (records).*
|
||||
FROM "recursive_keyset_cte" AS "issues"
|
||||
WHERE (COUNT <> 0)) issues -- filtering out the initializer row
|
||||
LIMIT 20
|
||||
</code></pre>
|
||||
</details>
|
||||
|
||||
### Using the `IN` query optimization
|
||||
|
||||
#### Adding more filters
|
||||
|
||||
In this example, let's add an extra filter by `milestone_id`.
|
||||
|
||||
Be careful when adding extra filters to the query. If the column is not covered by the same index,
|
||||
then the query might perform worse than the non-optimized query. The `milestone_id` column in the
|
||||
`issues` table is currently covered by a different index:
|
||||
|
||||
```sql
|
||||
"index_issues_on_milestone_id" btree (milestone_id)
|
||||
```
|
||||
|
||||
Adding the `miletone_id = X` filter to the `scope` argument or to the optimized scope causes bad performance.
|
||||
|
||||
Example (bad):
|
||||
|
||||
```ruby
|
||||
Gitlab::Pagination::Keyset::InOperatorOptimization::QueryBuilder.new(
|
||||
scope: scope,
|
||||
array_scope: array_scope,
|
||||
array_mapping_scope: array_mapping_scope,
|
||||
finder_query: finder_query
|
||||
).execute
|
||||
.where(milestone_id: 5)
|
||||
.limit(20)
|
||||
```
|
||||
|
||||
To address this concern, we could define another index:
|
||||
|
||||
```sql
|
||||
"idx_issues_on_project_id_and_milestone_id_and_created_at_and_id" btree (project_id, milestone_id, created_at, id)
|
||||
```
|
||||
|
||||
Adding more indexes to the `issues` table could significantly affect the performance of
|
||||
the `UPDATE` queries. In this case, it's better to rely on the original query. It means that if we
|
||||
want to use the optimization for the unfiltered page we need to add extra logic in the application code:
|
||||
|
||||
```ruby
|
||||
if optimization_possible? # no extra params or params covered with the same index as the ORDER BY clause
|
||||
run_optimized_query
|
||||
else
|
||||
run_normal_in_query
|
||||
end
|
||||
```
|
||||
|
||||
#### Multiple `IN` queries
|
||||
|
||||
Let's assume that we want to extend the group-level queries to include only incident and test case
|
||||
issue types.
|
||||
|
||||
The original ActiveRecord query would look like this:
|
||||
|
||||
```ruby
|
||||
scope = Issue
|
||||
.where(project_id: Group.find(9970).all_projects.select(:id)) # `gitlab-org` group and its subgroups
|
||||
.where(issue_type: [:incident, :test_case]) # 1, 2
|
||||
.order(:created_at, :id)
|
||||
.limit(20)
|
||||
```
|
||||
|
||||
To construct the array scope, we'll need to take the Cartesian product of the `project_id IN` and
|
||||
the `issue_type IN` queries. `issue_type` is an ActiveRecord enum, so we need to
|
||||
construct the following table:
|
||||
|
||||
| `project_id` | `issue_type_value` |
|
||||
| ------------ | ------------------ |
|
||||
| 2 | 1 |
|
||||
| 2 | 2 |
|
||||
| 5 | 1 |
|
||||
| 5 | 2 |
|
||||
| 10 | 1 |
|
||||
| 10 | 2 |
|
||||
| 9 | 1 |
|
||||
| 9 | 2 |
|
||||
|
||||
For the `issue_types` query we can construct a value list without querying a table:
|
||||
|
||||
```ruby
|
||||
value_list = Arel::Nodes::ValuesList.new([[Issue.issue_types[:incident]],[Issue.issue_types[:test_case]]])
|
||||
issue_type_values = Arel::Nodes::Grouping.new(value_list).as('issue_type_values (value)').to_sql
|
||||
|
||||
array_scope = Group
|
||||
.find(9970)
|
||||
.all_projects
|
||||
.from("#{Project.table_name}, #{issue_type_values}")
|
||||
.select(:id, :value)
|
||||
```
|
||||
|
||||
Building the `array_mapping_scope` query requires two arguments: `id` and `issue_type_value`:
|
||||
|
||||
```ruby
|
||||
array_mapping_scope = -> (id_expression, issue_type_value_expression) { Issue.where(Issue.arel_table[:project_id].eq(id_expression)).where(Issue.arel_table[:issue_type].eq(issue_type_value_expression)) }
|
||||
```
|
||||
|
||||
The `scope` and the `finder` queries don't change:
|
||||
|
||||
```ruby
|
||||
scope = Issue.order(:created_at, :id)
|
||||
finder_query = -> (created_at_expression, id_expression) { Issue.where(Issue.arel_table[:id].eq(id_expression)) }
|
||||
|
||||
Gitlab::Pagination::Keyset::InOperatorOptimization::QueryBuilder.new(
|
||||
scope: scope,
|
||||
array_scope: array_scope,
|
||||
array_mapping_scope: array_mapping_scope,
|
||||
finder_query: finder_query
|
||||
).execute.limit(20)
|
||||
```
|
||||
|
||||
<details>
|
||||
<summary>Expand this sentence to see the SQL query.</summary>
|
||||
<pre><code lang='sql'>
|
||||
SELECT "issues".*
|
||||
FROM
|
||||
(WITH RECURSIVE "array_cte" AS MATERIALIZED
|
||||
(SELECT "projects"."id", "value"
|
||||
FROM projects, (
|
||||
VALUES (1), (2)) AS issue_type_values (value)
|
||||
WHERE "projects"."namespace_id" IN
|
||||
(WITH RECURSIVE "base_and_descendants" AS (
|
||||
(SELECT "namespaces".*
|
||||
FROM "namespaces"
|
||||
WHERE "namespaces"."type" = 'Group'
|
||||
AND "namespaces"."id" = 9970)
|
||||
UNION
|
||||
(SELECT "namespaces".*
|
||||
FROM "namespaces", "base_and_descendants"
|
||||
WHERE "namespaces"."type" = 'Group'
|
||||
AND "namespaces"."parent_id" = "base_and_descendants"."id")) SELECT "id"
|
||||
FROM "base_and_descendants" AS "namespaces")),
|
||||
"recursive_keyset_cte" AS (
|
||||
(SELECT NULL::issues AS records,
|
||||
array_cte_id_array,
|
||||
array_cte_value_array,
|
||||
issues_created_at_array,
|
||||
issues_id_array,
|
||||
0::bigint AS COUNT
|
||||
FROM
|
||||
(SELECT ARRAY_AGG("array_cte"."id") AS array_cte_id_array,
|
||||
ARRAY_AGG("array_cte"."value") AS array_cte_value_array,
|
||||
ARRAY_AGG("issues"."created_at") AS issues_created_at_array,
|
||||
ARRAY_AGG("issues"."id") AS issues_id_array
|
||||
FROM
|
||||
(SELECT "array_cte"."id",
|
||||
"array_cte"."value"
|
||||
FROM array_cte) array_cte
|
||||
LEFT JOIN LATERAL
|
||||
(SELECT "issues"."created_at",
|
||||
"issues"."id"
|
||||
FROM "issues"
|
||||
WHERE "issues"."project_id" = "array_cte"."id"
|
||||
AND "issues"."issue_type" = "array_cte"."value"
|
||||
ORDER BY "issues"."created_at" ASC, "issues"."id" ASC
|
||||
LIMIT 1) issues ON TRUE
|
||||
WHERE "issues"."created_at" IS NOT NULL
|
||||
AND "issues"."id" IS NOT NULL) array_scope_lateral_query
|
||||
LIMIT 1)
|
||||
UNION ALL
|
||||
(SELECT
|
||||
(SELECT issues
|
||||
FROM "issues"
|
||||
WHERE "issues"."id" = recursive_keyset_cte.issues_id_array[POSITION]
|
||||
LIMIT 1), array_cte_id_array,
|
||||
array_cte_value_array,
|
||||
recursive_keyset_cte.issues_created_at_array[:position_query.position-1]||next_cursor_values.created_at||recursive_keyset_cte.issues_created_at_array[position_query.position+1:], recursive_keyset_cte.issues_id_array[:position_query.position-1]||next_cursor_values.id||recursive_keyset_cte.issues_id_array[position_query.position+1:], recursive_keyset_cte.count + 1
|
||||
FROM recursive_keyset_cte,
|
||||
LATERAL
|
||||
(SELECT created_at,
|
||||
id,
|
||||
POSITION
|
||||
FROM UNNEST(issues_created_at_array, issues_id_array) WITH
|
||||
ORDINALITY AS u(created_at, id, POSITION)
|
||||
WHERE created_at IS NOT NULL
|
||||
AND id IS NOT NULL
|
||||
ORDER BY "created_at" ASC, "id" ASC
|
||||
LIMIT 1) AS position_query,
|
||||
LATERAL
|
||||
(SELECT "record"."created_at",
|
||||
"record"."id"
|
||||
FROM (
|
||||
VALUES (NULL,
|
||||
NULL)) AS nulls
|
||||
LEFT JOIN
|
||||
(SELECT "issues"."created_at",
|
||||
"issues"."id"
|
||||
FROM (
|
||||
(SELECT "issues"."created_at",
|
||||
"issues"."id"
|
||||
FROM "issues"
|
||||
WHERE "issues"."project_id" = recursive_keyset_cte.array_cte_id_array[POSITION]
|
||||
AND "issues"."issue_type" = recursive_keyset_cte.array_cte_value_array[POSITION]
|
||||
AND recursive_keyset_cte.issues_created_at_array[POSITION] IS NULL
|
||||
AND "issues"."created_at" IS NULL
|
||||
AND "issues"."id" > recursive_keyset_cte.issues_id_array[POSITION]
|
||||
ORDER BY "issues"."created_at" ASC, "issues"."id" ASC)
|
||||
UNION ALL
|
||||
(SELECT "issues"."created_at",
|
||||
"issues"."id"
|
||||
FROM "issues"
|
||||
WHERE "issues"."project_id" = recursive_keyset_cte.array_cte_id_array[POSITION]
|
||||
AND "issues"."issue_type" = recursive_keyset_cte.array_cte_value_array[POSITION]
|
||||
AND recursive_keyset_cte.issues_created_at_array[POSITION] IS NOT NULL
|
||||
AND "issues"."created_at" IS NULL
|
||||
ORDER BY "issues"."created_at" ASC, "issues"."id" ASC)
|
||||
UNION ALL
|
||||
(SELECT "issues"."created_at",
|
||||
"issues"."id"
|
||||
FROM "issues"
|
||||
WHERE "issues"."project_id" = recursive_keyset_cte.array_cte_id_array[POSITION]
|
||||
AND "issues"."issue_type" = recursive_keyset_cte.array_cte_value_array[POSITION]
|
||||
AND recursive_keyset_cte.issues_created_at_array[POSITION] IS NOT NULL
|
||||
AND "issues"."created_at" > recursive_keyset_cte.issues_created_at_array[POSITION]
|
||||
ORDER BY "issues"."created_at" ASC, "issues"."id" ASC)
|
||||
UNION ALL
|
||||
(SELECT "issues"."created_at",
|
||||
"issues"."id"
|
||||
FROM "issues"
|
||||
WHERE "issues"."project_id" = recursive_keyset_cte.array_cte_id_array[POSITION]
|
||||
AND "issues"."issue_type" = recursive_keyset_cte.array_cte_value_array[POSITION]
|
||||
AND recursive_keyset_cte.issues_created_at_array[POSITION] IS NOT NULL
|
||||
AND "issues"."created_at" = recursive_keyset_cte.issues_created_at_array[POSITION]
|
||||
AND "issues"."id" > recursive_keyset_cte.issues_id_array[POSITION]
|
||||
ORDER BY "issues"."created_at" ASC, "issues"."id" ASC)) issues
|
||||
ORDER BY "issues"."created_at" ASC, "issues"."id" ASC
|
||||
LIMIT 1) record ON TRUE
|
||||
LIMIT 1) AS next_cursor_values)) SELECT (records).*
|
||||
FROM "recursive_keyset_cte" AS "issues"
|
||||
WHERE (COUNT <> 0)) issues
|
||||
LIMIT 20
|
||||
</code>
|
||||
</details>
|
||||
|
||||
NOTE:
|
||||
To make the query efficient, the following columns need to be covered with an index: `project_id`, `issue_type`, `created_at`, and `id`.
|
||||
|
||||
#### Batch iteration
|
||||
|
||||
Batch iteration over the records is possible via the keyset `Iterator` class.
|
||||
|
||||
```ruby
|
||||
scope = Issue.order(:created_at, :id)
|
||||
array_scope = Group.find(9970).all_projects.select(:id)
|
||||
array_mapping_scope = -> (id_expression) { Issue.where(Issue.arel_table[:project_id].eq(id_expression)) }
|
||||
finder_query = -> (created_at_expression, id_expression) { Issue.where(Issue.arel_table[:id].eq(id_expression)) }
|
||||
|
||||
opts = {
|
||||
in_operator_optimization_options: {
|
||||
array_scope: array_scope,
|
||||
array_mapping_scope: array_mapping_scope,
|
||||
finder_query: finder_query
|
||||
}
|
||||
}
|
||||
|
||||
Gitlab::Pagination::Keyset::Iterator.new(scope: scope, **opts).each_batch(of: 100) do |records|
|
||||
puts records.select(:id).map { |r| [r.id] }
|
||||
end
|
||||
```
|
||||
|
||||
#### Keyset pagination
|
||||
|
||||
The optimization works out of the box with GraphQL and the `keyset_paginate` helper method.
|
||||
Read more about [keyset pagination](database/keyset_pagination.md).
|
||||
|
||||
```ruby
|
||||
array_scope = Group.find(9970).all_projects.select(:id)
|
||||
array_mapping_scope = -> (id_expression) { Issue.where(Issue.arel_table[:project_id].eq(id_expression)) }
|
||||
finder_query = -> (created_at_expression, id_expression) { Issue.where(Issue.arel_table[:id].eq(id_expression)) }
|
||||
|
||||
opts = {
|
||||
in_operator_optimization_options: {
|
||||
array_scope: array_scope,
|
||||
array_mapping_scope: array_mapping_scope,
|
||||
finder_query: finder_query
|
||||
}
|
||||
}
|
||||
|
||||
issues = Issue
|
||||
.order(:created_at, :id)
|
||||
.keyset_paginate(per_page: 20, keyset_order_options: opts)
|
||||
.records
|
||||
```
|
||||
|
||||
#### Offset pagination with Kaminari
|
||||
|
||||
The `ActiveRecord` scope produced by the `InOperatorOptimization` class can be used in
|
||||
[offset-paginated](database/pagination_guidelines.md#offset-pagination)
|
||||
queries.
|
||||
|
||||
```ruby
|
||||
Gitlab::Pagination::Keyset::InOperatorOptimization::QueryBuilder
|
||||
.new(...)
|
||||
.execute
|
||||
.page(1)
|
||||
.per(20)
|
||||
.without_count
|
||||
```
|
||||
|
||||
## Generalized `IN` optimization technique
|
||||
|
||||
Let's dive into how `QueryBuilder` builds the optimized query
|
||||
to fetch the twenty oldest created issues from the group `gitlab-org`
|
||||
using the generalized `IN` optimization technique.
|
||||
|
||||
### Array CTE
|
||||
|
||||
As the first step, we use a common table expression (CTE) for collecting the `projects.id` values.
|
||||
This is done by wrapping the incoming `array_scope` ActiveRecord relation parameter with a CTE.
|
||||
|
||||
```sql
|
||||
WITH array_cte AS MATERIALIZED (
|
||||
SELECT "projects"."id"
|
||||
FROM "projects"
|
||||
WHERE "projects"."namespace_id" IN
|
||||
(SELECT traversal_ids[array_length(traversal_ids, 1)] AS id
|
||||
FROM "namespaces"
|
||||
WHERE (traversal_ids @> ('{9970}')))
|
||||
)
|
||||
```
|
||||
|
||||
This query produces the following result set with only one column (`projects.id`):
|
||||
|
||||
| ID |
|
||||
| --- |
|
||||
| 9 |
|
||||
| 2 |
|
||||
| 5 |
|
||||
| 10 |
|
||||
|
||||
### Array mapping
|
||||
|
||||
For each project (that is, each record storing a project ID in `array_cte`),
|
||||
we will fetch the cursor value identifying the first issue respecting the `ORDER BY` clause.
|
||||
|
||||
As an example, let's pick the first record `ID=9` from `array_cte`.
|
||||
The following query should fetch the cursor value `(created_at, id)` identifying
|
||||
the first issue record respecting the `ORDER BY` clause for the project with `ID=9`:
|
||||
|
||||
```sql
|
||||
SELECT "issues"."created_at", "issues"."id"
|
||||
FROM "issues"."project_id"=9
|
||||
ORDER BY "issues"."created_at" ASC, "issues"."id" ASC
|
||||
LIMIT 1;
|
||||
```
|
||||
|
||||
We will use `LATERAL JOIN` to loop over the records in the `array_cte` and find the
|
||||
cursor value for each project. The query would be built using the `array_mapping_scope` lambda
|
||||
function.
|
||||
|
||||
```sql
|
||||
SELECT ARRAY_AGG("array_cte"."id") AS array_cte_id_array,
|
||||
ARRAY_AGG("issues"."created_at") AS issues_created_at_array,
|
||||
ARRAY_AGG("issues"."id") AS issues_id_array
|
||||
FROM (
|
||||
SELECT "array_cte"."id" FROM array_cte
|
||||
) array_cte
|
||||
LEFT JOIN LATERAL
|
||||
(
|
||||
SELECT "issues"."created_at", "issues"."id"
|
||||
FROM "issues"
|
||||
WHERE "issues"."project_id" = "array_cte"."id"
|
||||
ORDER BY "issues"."created_at" ASC, "issues"."id" ASC
|
||||
LIMIT 1
|
||||
) issues ON TRUE
|
||||
```
|
||||
|
||||
Since we have an index on `project_id`, `created_at`, and `id`,
|
||||
index-only scans should quickly locate all the cursor values.
|
||||
|
||||
This is how the query could be translated to Ruby:
|
||||
|
||||
```ruby
|
||||
created_at_values = []
|
||||
id_values = []
|
||||
project_ids.map do |project_id|
|
||||
created_at, id = Issue.select(:created_at, :id).where(project_id: project_id).order(:created_at, :id).limit(1).first # N+1 but it's fast
|
||||
created_at_values << created_at
|
||||
id_values << id
|
||||
end
|
||||
```
|
||||
|
||||
This is what the result set would look like:
|
||||
|
||||
| `project_ids` | `created_at_values` | `id_values` |
|
||||
| ------------- | ------------------- | ----------- |
|
||||
| 2 | 2020-01-10 | 5 |
|
||||
| 5 | 2020-01-05 | 4 |
|
||||
| 10 | 2020-01-15 | 7 |
|
||||
| 9 | 2020-01-05 | 3 |
|
||||
|
||||
The table shows the cursor values (`created_at, id`) of the first record for each project
|
||||
respecting the `ORDER BY` clause.
|
||||
|
||||
At this point, we have the initial data. To start collecting the actual records from the database,
|
||||
we'll use a recursive CTE query where each recursion locates one row until
|
||||
the `LIMIT` is reached or no more data can be found.
|
||||
|
||||
Here's an outline of the steps we will take in the recursive CTE query
|
||||
(expressing the steps in SQL is non-trivial but will be explained next):
|
||||
|
||||
1. Sort the initial resultset according to the `ORDER BY` clause.
|
||||
1. Pick the top cursor to fetch the record, this is our first record. In the example,
|
||||
this cursor would be (`2020-01-05`, `3`) for `project_id=9`.
|
||||
1. We can use (`2020-01-05`, `3`) to fetch the next issue respecting the `ORDER BY` clause
|
||||
`project_id=9` filter. This produces an updated resultset.
|
||||
|
||||
| `project_ids` | `created_at_values` | `id_values` |
|
||||
| ------------- | ------------------- | ----------- |
|
||||
| 2 | 2020-01-10 | 5 |
|
||||
| 5 | 2020-01-05 | 4 |
|
||||
| 10 | 2020-01-15 | 7 |
|
||||
| **9** | **2020-01-06** | **6** |
|
||||
|
||||
1. Repeat 1 to 3 with the updated resultset until we have fetched `N=20` records.
|
||||
|
||||
### Initializing the recursive CTE query
|
||||
|
||||
For the initial recursive query, we'll need to produce exactly one row, we call this the
|
||||
initializer query (`initializer_query`).
|
||||
|
||||
Use `ARRAY_AGG` function to compact the initial result set into a single row
|
||||
and use the row as the initial value for the recursive CTE query:
|
||||
|
||||
Example initializer row:
|
||||
|
||||
| `records` | `project_ids` | `created_at_values` | `id_values` | `Count` | `Position` |
|
||||
| -------------- | --------------- | ------------------- | ----------- | ------- | ---------- |
|
||||
| `NULL::issues` | `[9, 2, 5, 10]` | `[...]` | `[...]` | `0` | `NULL` |
|
||||
|
||||
- The `records` column contains our sorted database records, and the initializer query sets the
|
||||
first value to `NULL`, which is filtered out later.
|
||||
- The `count` column tracks the number of records found. We use this column to filter out the
|
||||
initializer row from the result set.
|
||||
|
||||
### Recursive portion of the CTE query
|
||||
|
||||
The result row is produced with the following steps:
|
||||
|
||||
1. [Order the keyset arrays.](#order-the-keyset-arrays)
|
||||
1. [Find the next cursor.](#find-the-next-cursor)
|
||||
1. [Produce a new row.](#produce-a-new-row)
|
||||
|
||||
#### Order the keyset arrays
|
||||
|
||||
Order the keyset arrays according to the original `ORDER BY` clause with `LIMIT 1` using the
|
||||
`UNNEST [] WITH ORDINALITY` table function. The function locates the "lowest" keyset cursor
|
||||
values and gives us the array position. These cursor values are used to locate the record.
|
||||
|
||||
NOTE:
|
||||
At this point, we haven't read anything from the database tables, because we relied on
|
||||
fast index-only scans.
|
||||
|
||||
| `project_ids` | `created_at_values` | `id_values` |
|
||||
| ------------- | ------------------- | ----------- |
|
||||
| 2 | 2020-01-10 | 5 |
|
||||
| 5 | 2020-01-05 | 4 |
|
||||
| 10 | 2020-01-15 | 7 |
|
||||
| 9 | 2020-01-05 | 3 |
|
||||
|
||||
The first row is the 4th one (`position = 4`), because it has the lowest `created_at` and
|
||||
`id` values. The `UNNEST` function also exposes the position using an extra column (note:
|
||||
PostgreSQL uses 1-based index).
|
||||
|
||||
Demonstration of the `UNNEST [] WITH ORDINALITY` table function:
|
||||
|
||||
```sql
|
||||
SELECT position FROM unnest('{2020-01-10, 2020-01-05, 2020-01-15, 2020-01-05}'::timestamp[], '{5, 4, 7, 3}'::int[])
|
||||
WITH ORDINALITY AS t(created_at, id, position) ORDER BY created_at ASC, id ASC LIMIT 1;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```sql
|
||||
position
|
||||
----------
|
||||
4
|
||||
(1 row)
|
||||
```
|
||||
|
||||
#### Find the next cursor
|
||||
|
||||
Now, let's find the next cursor values (`next_cursor_values_query`) for the project with `id = 9`.
|
||||
To do that, we build a keyset pagination SQL query. Find the next row after
|
||||
`created_at = 2020-01-05` and `id = 3`. Because we order by two database columns, there can be two
|
||||
cases:
|
||||
|
||||
- There are rows with `created_at = 2020-01-05` and `id > 3`.
|
||||
- There are rows with `created_at > 2020-01-05`.
|
||||
|
||||
Generating this query is done by the generic keyset pagination library. After the query is done,
|
||||
we have a temporary table with the next cursor values:
|
||||
|
||||
| `created_at` | ID |
|
||||
| ------------ | --- |
|
||||
| 2020-01-06 | 6 |
|
||||
|
||||
#### Produce a new row
|
||||
|
||||
As the final step, we need to produce a new row by manipulating the initializer row
|
||||
(`data_collector_query` method). Two things happen here:
|
||||
|
||||
- Read the full row from the DB and return it in the `records` columns. (`result_collector_columns`
|
||||
method)
|
||||
- Replace the cursor values at the current position with the results from the keyset query.
|
||||
|
||||
Reading the full row from the database is only one index scan by the primary key. We use the
|
||||
ActiveRecord query passed as the `finder_query`:
|
||||
|
||||
```sql
|
||||
(SELECT "issues".* FROM issues WHERE id = id_values[position] LIMIT 1)
|
||||
```
|
||||
|
||||
By adding parentheses, the result row can be put into the `records` column.
|
||||
|
||||
Replacing the cursor values at `position` can be done via standard PostgreSQL array operators:
|
||||
|
||||
```sql
|
||||
-- created_at_values column value
|
||||
created_at_values[:position-1]||next_cursor_values.created_at||created_at_values[position+1:]
|
||||
|
||||
-- id_values column value
|
||||
id_values[:position-1]||next_cursor_values.id||id_values[position+1:]
|
||||
```
|
||||
|
||||
The Ruby equivalent would be the following:
|
||||
|
||||
```ruby
|
||||
id_values[0..(position - 1)] + [next_cursor_values.id] + id_values[(position + 1)..-1]
|
||||
```
|
||||
|
||||
After this, the recursion starts again by finding the next lowest cursor value.
|
||||
|
||||
### Finalizing the query
|
||||
|
||||
For producing the final `issues` rows, we're going to wrap the query with another `SELECT` statement:
|
||||
|
||||
```sql
|
||||
SELECT "issues".*
|
||||
FROM (
|
||||
SELECT (records).* -- similar to ruby splat operator
|
||||
FROM recursive_keyset_cte
|
||||
WHERE recursive_keyset_cte.count <> 0 -- filter out the initializer row
|
||||
) AS issues
|
||||
```
|
||||
|
||||
### Performance comparison
|
||||
|
||||
Assuming that we have the correct database index in place, we can compare the query performance by
|
||||
looking at the number of database rows accessed by the query.
|
||||
|
||||
- Number of groups: 100
|
||||
- Number of projects: 500
|
||||
- Number of issues (in the group hierarchy): 50 000
|
||||
|
||||
Standard `IN` query:
|
||||
|
||||
| Query | Entries read from index | Rows read from the table | Rows sorted in memory |
|
||||
| ------------------------ | ----------------------- | ------------------------ | --------------------- |
|
||||
| group hierarchy subquery | 100 | 0 | 0 |
|
||||
| project lookup query | 500 | 0 | 0 |
|
||||
| issue lookup query | 50 000 | 20 | 50 000 |
|
||||
|
||||
Optimized `IN` query:
|
||||
|
||||
| Query | Entries read from index | Rows read from the table | Rows sorted in memory |
|
||||
| ------------------------ | ----------------------- | ------------------------ | --------------------- |
|
||||
| group hierarchy subquery | 100 | 0 | 0 |
|
||||
| project lookup query | 500 | 0 | 0 |
|
||||
| issue lookup query | 519 | 20 | 10 000 |
|
||||
|
||||
The group and project queries are not using sorting, the necessary columns are read from database
|
||||
indexes. These values are accessed frequently so it's very likely that most of the data will be
|
||||
in the PostgreSQL's buffer cache.
|
||||
|
||||
The optimized `IN` query will read maximum 519 entries (cursor values) from the index:
|
||||
|
||||
- 500 index-only scans for populating the arrays for each project. The cursor values of the first
|
||||
record will be here.
|
||||
- Maximum 19 additional index-only scans for the consecutive records.
|
||||
|
||||
The optimized `IN` query will sort the array (cursor values per project array) 20 times, which
|
||||
means we'll sort 20 x 500 rows. However, this might be a less memory-intensive task than
|
||||
sorting 10 000 rows at once.
|
||||
|
||||
Performance comparison for the `gitlab-org` group:
|
||||
|
||||
| Query | Number of 8K Buffers involved | Uncached execution time | Cached execution time |
|
||||
| -------------------- | ----------------------------- | ----------------------- | --------------------- |
|
||||
| `IN` query | 240833 | 1.2s | 660ms |
|
||||
| Optimized `IN` query | 9783 | 450ms | 22ms |
|
||||
|
||||
NOTE:
|
||||
Before taking measurements, the group lookup query was executed separately in order to make
|
||||
the group data available in the buffer cache. Since it's a frequently called query, it's going to
|
||||
hit many shared buffers during the query execution in the production environment.
|
|
@ -62,6 +62,7 @@ info: To determine the technical writer assigned to the Stage/Group associated w
|
|||
- [Query performance guidelines](../query_performance.md)
|
||||
- [Pagination guidelines](pagination_guidelines.md)
|
||||
- [Pagination performance guidelines](pagination_performance_guidelines.md)
|
||||
- [Efficient `IN` operator queries](efficient_in_operator_queries.md)
|
||||
|
||||
## Case studies
|
||||
|
||||
|
|
|
@ -36,7 +36,8 @@ Keyset pagination works without any configuration for simple ActiveRecord querie
|
|||
- Order by one column.
|
||||
- Order by two columns, where the last column is the primary key.
|
||||
|
||||
The library can detect nullable and non-distinct columns and based on these, it will add extra ordering using the primary key. This is necessary because keyset pagination expects distinct order by values:
|
||||
The library detects nullable and non-distinct columns and based on these, adds extra ordering
|
||||
using the primary key. This is necessary because keyset pagination expects distinct order by values:
|
||||
|
||||
```ruby
|
||||
Project.order(:created_at).keyset_paginate.records # ORDER BY created_at, id
|
||||
|
@ -79,7 +80,7 @@ cursor = paginator.cursor_for_next_page # encoded column attributes for the next
|
|||
paginator = Project.order(:name).keyset_paginate(cursor: cursor).records # loading the next page
|
||||
```
|
||||
|
||||
Since keyset pagination does not support page numbers, we are restricted to go to the following pages:
|
||||
Because keyset pagination does not support page numbers, we are restricted to go to the following pages:
|
||||
|
||||
- Next page
|
||||
- Previous page
|
||||
|
@ -111,7 +112,8 @@ In the HAML file, we can render the records:
|
|||
|
||||
The performance of the keyset pagination depends on the database index configuration and the number of columns we use in the `ORDER BY` clause.
|
||||
|
||||
In case we order by the primary key (`id`), then the generated queries will be efficient since the primary key is covered by a database index.
|
||||
In case we order by the primary key (`id`), then the generated queries are efficient because
|
||||
the primary key is covered by a database index.
|
||||
|
||||
When two or more columns are used in the `ORDER BY` clause, it's advised to check the generated database query and make sure that the correct index configuration is used. More information can be found on the [pagination guideline page](pagination_guidelines.md#index-coverage).
|
||||
|
||||
|
@ -149,7 +151,9 @@ puts paginator2.records.to_a # UNION query
|
|||
|
||||
## Complex order configuration
|
||||
|
||||
Common `ORDER BY` configurations will be handled by the `keyset_paginate` method automatically so no manual configuration is needed. There are a few edge cases where order object configuration is necessary:
|
||||
Common `ORDER BY` configurations are handled by the `keyset_paginate` method automatically
|
||||
so no manual configuration is needed. There are a few edge cases where order object
|
||||
configuration is necessary:
|
||||
|
||||
- `NULLS LAST` ordering.
|
||||
- Function-based ordering.
|
||||
|
@ -170,12 +174,13 @@ scope.keyset_paginate # raises: Gitlab::Pagination::Keyset::Paginator::Unsupport
|
|||
|
||||
The `keyset_paginate` method raises an error because the order value on the query is a custom SQL string and not an [`Arel`](https://www.rubydoc.info/gems/arel) AST node. The keyset library cannot automatically infer configuration values from these kinds of queries.
|
||||
|
||||
To make keyset pagination work, we need to configure custom order objects, to do so, we need to collect information about the order columns:
|
||||
To make keyset pagination work, we must configure custom order objects, to do so, we must
|
||||
collect information about the order columns:
|
||||
|
||||
- `relative_position` can have duplicated values since no unique index is present.
|
||||
- `relative_position` can have null values because we don't have a not null constraint on the column. For this, we need to determine where will we see NULL values, at the beginning of the resultset or the end (`NULLS LAST`).
|
||||
- Keyset pagination requires distinct order columns, so we'll need to add the primary key (`id`) to make the order distinct.
|
||||
- Jumping to the last page and paginating backwards actually reverses the `ORDER BY` clause. For this, we'll need to provide the reversed `ORDER BY` clause.
|
||||
- `relative_position` can have duplicated values because no unique index is present.
|
||||
- `relative_position` can have null values because we don't have a not null constraint on the column. For this, we must determine where we see NULL values, at the beginning of the result set, or the end (`NULLS LAST`).
|
||||
- Keyset pagination requires distinct order columns, so we must add the primary key (`id`) to make the order distinct.
|
||||
- Jumping to the last page and paginating backwards actually reverses the `ORDER BY` clause. For this, we must provide the reversed `ORDER BY` clause.
|
||||
|
||||
Example:
|
||||
|
||||
|
@ -206,7 +211,8 @@ scope.keyset_paginate.records # works
|
|||
|
||||
### Function-based ordering
|
||||
|
||||
In the following example, we multiply the `id` by 10 and ordering by that value. Since the `id` column is unique, we need to define only one column:
|
||||
In the following example, we multiply the `id` by 10 and order by that value. Because the `id`
|
||||
column is unique, we define only one column:
|
||||
|
||||
```ruby
|
||||
order = Gitlab::Pagination::Keyset::Order.build([
|
||||
|
@ -233,7 +239,8 @@ The `add_to_projections` flag tells the paginator to expose the column expressio
|
|||
|
||||
### `iid` based ordering
|
||||
|
||||
When ordering issues, the database ensures that we'll have distinct `iid` values within a project. Ordering by one column is enough to make the pagination work if the `project_id` filter is present:
|
||||
When ordering issues, the database ensures that we have distinct `iid` values in a project.
|
||||
Ordering by one column is enough to make the pagination work if the `project_id` filter is present:
|
||||
|
||||
```ruby
|
||||
order = Gitlab::Pagination::Keyset::Order.build([
|
||||
|
|
|
@ -421,14 +421,16 @@ is still validated.
|
|||
Again, make sure that those overrides are as short-lived as possible by tracking their removal in
|
||||
the appropriate issue.
|
||||
|
||||
#### Feature flags in queries
|
||||
#### Feature-flagged queries
|
||||
|
||||
Sometimes it may be helpful to have an entity in the GraphQL query behind a feature flag.
|
||||
One example is working on a feature where the backend has already been merged but the frontend
|
||||
has not. In this case, you may consider putting the GraphQL entity behind a feature flag to allow smaller
|
||||
merge requests to be created and merged.
|
||||
In cases where the backend is complete and the frontend is being implemented behind a feature flag,
|
||||
a couple options are available to leverage the feature flag in the GraphQL queries.
|
||||
|
||||
To do this we can use the `@include` directive to exclude an entity if the `if` statement passes.
|
||||
##### The `@include` directive
|
||||
|
||||
The `@include` (or its opposite, `@skip`) can be used to control whether an entity should be
|
||||
included in the query. If the `@include` directive evaluates to `false`, the entity's resolver is
|
||||
not hit and the entity is excluded from the response. For example:
|
||||
|
||||
```graphql
|
||||
query getAuthorData($authorNameEnabled: Boolean = false) {
|
||||
|
@ -456,6 +458,34 @@ export default {
|
|||
};
|
||||
```
|
||||
|
||||
Note that, even if the directive evalutes to `false`, the guarded entity is sent to the backend and
|
||||
matched against the GraphQL schema. So this approach requires that the feature-flagged entity
|
||||
exists in the schema, even if the feature flag is disabled. When the feature flag is turned off, it
|
||||
is recommended that the resolver returns `null` at the very least.
|
||||
|
||||
##### Different versions of a query
|
||||
|
||||
There's another approach that involves duplicating the standard query, and it should be avoided. The copy includes the new entities
|
||||
while the original remains unchanged. It is up to the production code to trigger the right query
|
||||
based on the feature flag's status. For example:
|
||||
|
||||
```javascript
|
||||
export default {
|
||||
apollo: {
|
||||
user: {
|
||||
query() {
|
||||
return this.glFeatures.authorNameEnabled ? NEW_QUERY : ORIGINAL_QUERY,
|
||||
}
|
||||
}
|
||||
},
|
||||
};
|
||||
```
|
||||
|
||||
This approach is not recommended as it results in bigger merge requests and requires maintaining
|
||||
two similar queries for as long as the feature flag exists. This can be used in cases where the new
|
||||
GraphQL entities are not yet part of the schema, or if they are feature-flagged at the schema level
|
||||
(`new_entitiy: :feature_flag`).
|
||||
|
||||
### Manually triggering queries
|
||||
|
||||
Queries on a component's `apollo` property are made automatically when the component is created.
|
||||
|
|
|
@ -188,6 +188,17 @@ Alternatively you can use the following on each spec run,
|
|||
bundle exec spring rspec some_spec.rb
|
||||
```
|
||||
|
||||
## Generate initial RuboCop TODO list
|
||||
|
||||
One way to generate the initial list is to run the Rake task `rubocop:todo:generate`:
|
||||
|
||||
```shell
|
||||
bundle exec rake rubocop:todo:generate
|
||||
```
|
||||
|
||||
See [Resolving RuboCop exceptions](contributing/style_guides.md#resolving-rubocop-exceptions)
|
||||
on how to proceed from here.
|
||||
|
||||
## Compile Frontend Assets
|
||||
|
||||
You shouldn't ever need to compile frontend assets manually in development, but
|
||||
|
|
|
@ -170,6 +170,45 @@ Helm chart](https://gitlab.com/gitlab-org/charts/gitlab/), itself deployed with
|
|||
|
||||
See [Review Apps](../review_apps.md) for more details about Review Apps.
|
||||
|
||||
## Test reports
|
||||
|
||||
### Allure report
|
||||
|
||||
For additional test results visibility, tests that run on pipelines generate
|
||||
and host [Allure](https://github.com/allure-framework/allure2) test reports.
|
||||
|
||||
The `QA` framework is using the [Allure RSpec](https://github.com/allure-framework/allure-ruby/blob/master/allure-rspec/README.md)
|
||||
gem to generate source files for the `Allure` test report. An additional job
|
||||
in the pipeline:
|
||||
|
||||
- Fetches these source files from all test jobs.
|
||||
- Generates and uploads the report to the `GCS` bucket `gitlab-qa-allure-report` under the project `gitlab-qa-resources`.
|
||||
|
||||
A common CI template for report uploading is stored in
|
||||
[`allure-report.yml`](https://gitlab.com/gitlab-org/quality/pipeline-common/-/blob/master/ci/allure-report.yml).
|
||||
|
||||
#### Merge requests
|
||||
|
||||
When these tests are executed in the scope of merge requests, the `Allure` report is
|
||||
uploaded to the `GCS` bucket and comment is added linking to their respective reports.
|
||||
|
||||
#### Scheduled pipelines
|
||||
|
||||
Scheduled pipelines for these tests contain a `generate-allure-report` job under the `Report` stage. They also output
|
||||
a link to the current test report.
|
||||
|
||||
#### Static report links
|
||||
|
||||
Each type of scheduled pipeline generates a static link for the latest test report according to its stage:
|
||||
|
||||
- [`master`](https://storage.googleapis.com/gitlab-qa-allure-reports/package-and-qa/master/index.html)
|
||||
- [`staging-full`](https://storage.googleapis.com/gitlab-qa-allure-reports/staging-full/master/index.html)
|
||||
- [`staging-sanity`](https://storage.googleapis.com/gitlab-qa-allure-reports/staging-sanity/master/index.html)
|
||||
- [`staging-sanity-no-admin`](https://storage.googleapis.com/gitlab-qa-allure-reports/staging-sanity-no-admin/master/index.html)
|
||||
- [`canary-sanity`](https://storage.googleapis.com/gitlab-qa-allure-reports/canary-sanity/master/index.html)
|
||||
- [`production`](https://storage.googleapis.com/gitlab-qa-allure-reports/production/master/index.html)
|
||||
- [`production-sanity`](https://storage.googleapis.com/gitlab-qa-allure-reports/production-sanity/master/index.html)
|
||||
|
||||
## How do I run the tests?
|
||||
|
||||
If you are not [testing code in a merge request](#testing-code-in-merge-requests),
|
||||
|
|
|
@ -246,7 +246,7 @@ A job is queued. When the job finishes, the subscription details are updated.
|
|||
#### Troubleshooting cloud licensing sync
|
||||
|
||||
If the sync job is not working, ensure you allow network traffic from your GitLab instance
|
||||
to IP address `104.46.106.135:443` (`customers.gitlab.com`).
|
||||
to IP address `104.18.26.123:443` (`customers.gitlab.com`).
|
||||
|
||||
## Obtain a subscription
|
||||
|
||||
|
|
|
@ -88,3 +88,7 @@ stages:
|
|||
include:
|
||||
- template: DAST.latest.gitlab-ci.yml
|
||||
```
|
||||
|
||||
## Lack of IPv6 support
|
||||
|
||||
Due to the underlying [ZAProxy engine not supporting IPv6](https://github.com/zaproxy/zaproxy/issues/3705), DAST is unable to scan or crawl IPv6-based applications.
|
||||
|
|
|
@ -90,6 +90,16 @@ some recorded videos with [live examples](#live-examples).
|
|||
used in Helm v3. So, the only way to integrate it with this Cluster Management Project is to actually uninstall this app and accept the
|
||||
chart version proposed in `applications/vault/values.yaml`.
|
||||
|
||||
- Cert-manager:
|
||||
- For users on Kubernetes version 1.20 or above, the deprecated cert-manager v0.10 is no longer valid and
|
||||
and the upgrade includes a breaking change. So we suggest that you [backup and uninstall cert-manager v0.10](#backup-and-uninstall-cert-manager-v010)
|
||||
, and install cert-manager v1.4 instead. To install this version, uncomment the `applications/cert-manager-1-4/helmfile.yaml`
|
||||
from the [`./helmfile.yaml`](management_project_template.md#the-main-helmfileyml-file).
|
||||
This triggers a pipeline to install the new version.
|
||||
- For users on Kubernetes versions lower than 1.20, you can stick to v0.10 by uncommenting
|
||||
`applications/cert-manager/helmfile.yaml`
|
||||
in your project's main Helmfile ([`./helmfile.yaml`](management_project_template.md#the-main-helmfileyml-file)).
|
||||
|
||||
1. After following all the previous steps, [run a pipeline manually](../../ci/pipelines/index.md#run-a-pipeline-manually)
|
||||
and watch the `apply` job logs to see if any of your applications were successfully detected, installed, and whether they got any
|
||||
unexpected updates.
|
||||
|
@ -104,6 +114,17 @@ some recorded videos with [live examples](#live-examples).
|
|||
After getting a successful pipeline, repeat these steps for any other deployed apps
|
||||
you want to manage with the Cluster Management Project.
|
||||
|
||||
## Backup and uninstall cert-manager v0.10
|
||||
|
||||
1. Follow the [official docs](https://docs.cert-manager.io/en/release-0.10/tasks/backup-restore-crds.html) on how to
|
||||
backup your cert-manager v0.10 data.
|
||||
1. Uninstall cert-manager by editing the setting all the occurrences of `installed: true` to `installed: false` in the
|
||||
`applications/cert-manager/helmfile.yaml` file.
|
||||
1. Search for any left-over resources by executing the following command `kubectl get Issuers,ClusterIssuers,Certificates,CertificateRequests,Orders,Challenges,Secrets,ConfigMaps -n gitlab-managed-apps | grep certmanager`.
|
||||
1. For each of the resources found in the previous step, delete them with `kubectl delete -n gitlab-managed-apps {ResourceType} {ResourceName}`.
|
||||
For example, if you found a resource of type `ConfigMap` named `cert-manager-controller`, delete it by executing:
|
||||
`kubectl delete configmap -n gitlab-managed-apps cert-manager-controller`.
|
||||
|
||||
## Live examples
|
||||
|
||||
- [Migrating from scratch using a brand new cluster management project](https://youtu.be/jCUFGWT0jS0). Also covers Helm v2 apps migration.
|
||||
|
|
|
@ -6,16 +6,21 @@ info: To determine the technical writer assigned to the Stage/Group associated w
|
|||
|
||||
# Install cert-manager with a cluster management project
|
||||
|
||||
> [Introduced](https://gitlab.com/gitlab-org/project-templates/cluster-management/-/merge_requests/5) in GitLab 14.0.
|
||||
> - [Introduced](https://gitlab.com/gitlab-org/project-templates/cluster-management/-/merge_requests/5) in GitLab 14.0.
|
||||
> - Support for cert-manager v1.4 was [introduced](https://gitlab.com/gitlab-org/project-templates/cluster-management/-/merge_requests/69405) in GitLab 14.3.
|
||||
|
||||
Assuming you already have a [Cluster management project](../../../../../user/clusters/management_project.md) created from a
|
||||
[management project template](../../../../../user/clusters/management_project_template.md), to install cert-manager you should
|
||||
uncomment this line from your `helmfile.yaml`:
|
||||
|
||||
```yaml
|
||||
- path: applications/cert-manager/helmfile.yaml
|
||||
- path: applications/cert-manager-1-4/helmfile.yaml
|
||||
```
|
||||
|
||||
NOTE:
|
||||
We kept the `- path: applications/cert-manager/helmfile.yaml` with cert-manager v0.10 to facilitate
|
||||
the [migration from GitLab Managed Apps to a cluster management project](../../../../clusters/migrating_from_gma_to_project_template.md).
|
||||
|
||||
cert-manager:
|
||||
|
||||
- Is installed by default into the `gitlab-managed-apps` namespace of your cluster.
|
||||
|
@ -24,7 +29,7 @@ cert-manager:
|
|||
email address to be specified. The email address is used by Let's Encrypt to
|
||||
contact you about expiring certificates and issues related to your account.
|
||||
|
||||
The following configuration in your `applications/cert-manager/helmfile.yaml` is required to install cert-manager:
|
||||
To install cert-manager in your cluster, configure your `applications/cert-manager-1-4/helmfile.yaml` to:
|
||||
|
||||
```yaml
|
||||
certManager:
|
||||
|
|
|
@ -89,6 +89,10 @@ upload:
|
|||
|
||||
## Install a package
|
||||
|
||||
NOTE:
|
||||
When requesting a package, GitLab considers only the 300 most recent packages created.
|
||||
For each package, only the most recent package file is returned.
|
||||
|
||||
To install the latest version of a chart, use the following command:
|
||||
|
||||
```shell
|
||||
|
|
|
@ -44,6 +44,7 @@ version. The range of supported versions is based on the evaluation of:
|
|||
GitLab supports the following Kubernetes versions, and you can upgrade your
|
||||
Kubernetes version to any supported version at any time:
|
||||
|
||||
- 1.20 (support ends on April 22, 2022)
|
||||
- 1.19 (support ends on February 22, 2022)
|
||||
- 1.18 (support ends on November 22, 2021)
|
||||
- 1.17 (support ends on September 22, 2021)
|
||||
|
|
|
@ -44,15 +44,10 @@ module API
|
|||
get ":channel/index.yaml" do
|
||||
authorize_read_package!(authorized_user_project)
|
||||
|
||||
package_files = Packages::Helm::PackageFilesFinder.new(
|
||||
authorized_user_project,
|
||||
params[:channel],
|
||||
order_by: 'created_at',
|
||||
sort: 'desc'
|
||||
).execute
|
||||
packages = Packages::Helm::PackagesFinder.new(authorized_user_project, params[:channel]).execute
|
||||
|
||||
env['api.format'] = :yaml
|
||||
present ::Packages::Helm::IndexPresenter.new(authorized_user_project, params[:id], package_files),
|
||||
present ::Packages::Helm::IndexPresenter.new(params[:id], params[:channel], packages),
|
||||
with: ::API::Entities::Helm::Index
|
||||
end
|
||||
|
||||
|
|
|
@ -13,18 +13,53 @@ module Gitlab
|
|||
extend ::Gitlab::Utils::Override
|
||||
include Gitlab::Git::RuggedImpl::UseRugged
|
||||
|
||||
TREE_SORT_ORDER = { tree: 0, blob: 1, commit: 2 }.freeze
|
||||
|
||||
override :tree_entries
|
||||
def tree_entries(repository, sha, path, recursive, pagination_params = nil)
|
||||
if use_rugged?(repository, :rugged_tree_entries)
|
||||
[
|
||||
execute_rugged_call(:tree_entries_with_flat_path_from_rugged, repository, sha, path, recursive),
|
||||
nil
|
||||
]
|
||||
entries = execute_rugged_call(:tree_entries_with_flat_path_from_rugged, repository, sha, path, recursive)
|
||||
|
||||
if pagination_params
|
||||
paginated_response(entries, pagination_params[:limit], pagination_params[:page_token].to_s)
|
||||
else
|
||||
[entries, nil]
|
||||
end
|
||||
else
|
||||
super
|
||||
end
|
||||
end
|
||||
|
||||
# Rugged version of TreePagination in Go: https://gitlab.com/gitlab-org/gitaly/-/merge_requests/3611
|
||||
def paginated_response(entries, limit, token)
|
||||
total_entries = entries.count
|
||||
|
||||
return [[], nil] if limit == 0 || limit.blank?
|
||||
|
||||
entries = Gitlab::Utils.stable_sort_by(entries) { |x| TREE_SORT_ORDER[x.type] }
|
||||
|
||||
if token.blank?
|
||||
index = 0
|
||||
else
|
||||
index = entries.index { |entry| entry.id == token }
|
||||
|
||||
raise Gitlab::Git::CommandError, "could not find starting OID: #{token}" if index.nil?
|
||||
|
||||
index += 1
|
||||
end
|
||||
|
||||
return [entries[index..], nil] if limit < 0
|
||||
|
||||
last_index = index + limit
|
||||
result = entries[index...last_index]
|
||||
|
||||
if last_index < total_entries
|
||||
cursor = Gitaly::PaginationCursor.new(next_cursor: result.last.id)
|
||||
end
|
||||
|
||||
[result, cursor]
|
||||
end
|
||||
|
||||
def tree_entries_with_flat_path_from_rugged(repository, sha, path, recursive)
|
||||
tree_entries_from_rugged(repository, sha, path, recursive).tap do |entries|
|
||||
# This was an optimization to reduce N+1 queries for Gitaly
|
||||
|
|
|
@ -173,6 +173,18 @@ module Gitlab
|
|||
distinct
|
||||
end
|
||||
|
||||
def order_direction_as_sql_string
|
||||
sql_string = ascending_order? ? +'ASC' : +'DESC'
|
||||
|
||||
if nulls_first?
|
||||
sql_string << ' NULLS FIRST'
|
||||
elsif nulls_last?
|
||||
sql_string << ' NULLS LAST'
|
||||
end
|
||||
|
||||
sql_string
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
attr_reader :reversed_order_expression, :nullable, :distinct
|
||||
|
|
|
@ -0,0 +1,59 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
module Gitlab
|
||||
module Pagination
|
||||
module Keyset
|
||||
module InOperatorOptimization
|
||||
class ArrayScopeColumns
|
||||
ARRAY_SCOPE_CTE_NAME = 'array_cte'
|
||||
|
||||
def initialize(columns)
|
||||
validate_columns!(columns)
|
||||
|
||||
array_scope_table = Arel::Table.new(ARRAY_SCOPE_CTE_NAME)
|
||||
@columns = columns.map do |column|
|
||||
ColumnData.new(column, "array_scope_#{column}", array_scope_table)
|
||||
end
|
||||
end
|
||||
|
||||
def array_scope_cte_name
|
||||
ARRAY_SCOPE_CTE_NAME
|
||||
end
|
||||
|
||||
def array_aggregated_columns
|
||||
columns.map(&:array_aggregated_column)
|
||||
end
|
||||
|
||||
def array_aggregated_column_names
|
||||
columns.map(&:array_aggregated_column_name)
|
||||
end
|
||||
|
||||
def arel_columns
|
||||
columns.map(&:arel_column)
|
||||
end
|
||||
|
||||
def array_lookup_expressions_by_position(table_name)
|
||||
columns.map do |column|
|
||||
Arel.sql("#{table_name}.#{column.array_aggregated_column_name}[position]")
|
||||
end
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
attr_reader :columns
|
||||
|
||||
def validate_columns!(columns)
|
||||
if columns.blank?
|
||||
msg = <<~MSG
|
||||
No array columns were given.
|
||||
Make sure you explicitly select the columns in the array_scope parameter.
|
||||
Example: Project.select(:id)
|
||||
MSG
|
||||
raise StandardError, msg
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
|
@ -0,0 +1,39 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
module Gitlab
|
||||
module Pagination
|
||||
module Keyset
|
||||
module InOperatorOptimization
|
||||
class ColumnData
|
||||
attr_reader :original_column_name, :as, :arel_table
|
||||
|
||||
def initialize(original_column_name, as, arel_table)
|
||||
@original_column_name = original_column_name.to_s
|
||||
@as = as.to_s
|
||||
@arel_table = arel_table
|
||||
end
|
||||
|
||||
def projection
|
||||
arel_column.as(as)
|
||||
end
|
||||
|
||||
def arel_column
|
||||
arel_table[original_column_name]
|
||||
end
|
||||
|
||||
def arel_column_as
|
||||
arel_table[as]
|
||||
end
|
||||
|
||||
def array_aggregated_column_name
|
||||
"#{arel_table.name}_#{original_column_name}_array"
|
||||
end
|
||||
|
||||
def array_aggregated_column
|
||||
Arel::Nodes::NamedFunction.new('ARRAY_AGG', [arel_column]).as(array_aggregated_column_name)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
|
@ -0,0 +1,76 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
module Gitlab
|
||||
module Pagination
|
||||
module Keyset
|
||||
module InOperatorOptimization
|
||||
class OrderByColumns
|
||||
include Enumerable
|
||||
|
||||
# This class exposes collection methods for the order by columns
|
||||
#
|
||||
# Example: by modelling the `issues.created_at ASC, issues.id ASC` ORDER BY
|
||||
# SQL clause, this class will receive two ColumnOrderDefinition objects
|
||||
def initialize(columns, arel_table)
|
||||
@columns = columns.map do |column|
|
||||
ColumnData.new(column.attribute_name, "order_by_columns_#{column.attribute_name}", arel_table)
|
||||
end
|
||||
end
|
||||
|
||||
def arel_columns
|
||||
columns.map(&:arel_column)
|
||||
end
|
||||
|
||||
def array_aggregated_columns
|
||||
columns.map(&:array_aggregated_column)
|
||||
end
|
||||
|
||||
def array_aggregated_column_names
|
||||
columns.map(&:array_aggregated_column_name)
|
||||
end
|
||||
|
||||
def original_column_names
|
||||
columns.map(&:original_column_name)
|
||||
end
|
||||
|
||||
def original_column_names_as_arel_string
|
||||
columns.map { |c| Arel.sql(c.original_column_name) }
|
||||
end
|
||||
|
||||
def original_column_names_as_tmp_tamble
|
||||
temp_table = Arel::Table.new('record')
|
||||
original_column_names.map { |c| temp_table[c] }
|
||||
end
|
||||
|
||||
def cursor_values(table_name)
|
||||
columns.each_with_object({}) do |column, hash|
|
||||
hash[column.original_column_name] = Arel.sql("#{table_name}.#{column.array_aggregated_column_name}[position]")
|
||||
end
|
||||
end
|
||||
|
||||
def array_lookup_expressions_by_position(table_name)
|
||||
columns.map do |column|
|
||||
Arel.sql("#{table_name}.#{column.array_aggregated_column_name}[position]")
|
||||
end
|
||||
end
|
||||
|
||||
def replace_value_in_array_by_position_expressions
|
||||
columns.map do |column|
|
||||
name = "#{QueryBuilder::RECURSIVE_CTE_NAME}.#{column.array_aggregated_column_name}"
|
||||
new_value = "next_cursor_values.#{column.original_column_name}"
|
||||
"#{name}[:position_query.position-1]||#{new_value}||#{name}[position_query.position+1:]"
|
||||
end
|
||||
end
|
||||
|
||||
def each(&block)
|
||||
columns.each(&block)
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
attr_reader :columns
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
|
@ -0,0 +1,290 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
module Gitlab
|
||||
module Pagination
|
||||
module Keyset
|
||||
module InOperatorOptimization
|
||||
# rubocop: disable CodeReuse/ActiveRecord
|
||||
class QueryBuilder
|
||||
UnsupportedScopeOrder = Class.new(StandardError)
|
||||
|
||||
RECURSIVE_CTE_NAME = 'recursive_keyset_cte'
|
||||
RECORDS_COLUMN = 'records'
|
||||
|
||||
# This class optimizes slow database queries (PostgreSQL specific) where the
|
||||
# IN SQL operator is used with sorting.
|
||||
#
|
||||
# Arguments:
|
||||
# scope - ActiveRecord::Relation supporting keyset pagination
|
||||
# array_scope - ActiveRecord::Relation for the `IN` subselect
|
||||
# array_mapping_scope - Lambda for connecting scope with array_scope
|
||||
# finder_query - ActiveRecord::Relation for finding one row by the passed in cursor values
|
||||
# values - keyset cursor values (optional)
|
||||
#
|
||||
# Example ActiveRecord query: Issues in the namespace hierarchy
|
||||
# > scope = Issue
|
||||
# > .where(project_id: Group.find(9970).all_projects.select(:id))
|
||||
# > .order(:created_at, :id)
|
||||
# > .limit(20);
|
||||
#
|
||||
# Optimized version:
|
||||
#
|
||||
# > scope = Issue.where({}).order(:created_at, :id) # base scope
|
||||
# > array_scope = Group.find(9970).all_projects.select(:id)
|
||||
# > array_mapping_scope = -> (id_expression) { Issue.where(Issue.arel_table[:project_id].eq(id_expression)) }
|
||||
#
|
||||
# # finding the record by id is good enough, we can ignore the created_at_expression
|
||||
# > finder_query = -> (created_at_expression, id_expression) { Issue.where(Issue.arel_table[:id].eq(id_expression)) }
|
||||
#
|
||||
# > Gitlab::Pagination::Keyset::InOperatorOptimization::QueryBuilder.new(
|
||||
# > scope: scope,
|
||||
# > array_scope: array_scope,
|
||||
# > array_mapping_scope: array_mapping_scope,
|
||||
# > finder_query: finder_query
|
||||
# > ).execute.limit(20)
|
||||
def initialize(scope:, array_scope:, array_mapping_scope:, finder_query:, values: {})
|
||||
@scope, success = Gitlab::Pagination::Keyset::SimpleOrderBuilder.build(scope)
|
||||
|
||||
unless success
|
||||
error_message = <<~MSG
|
||||
The order on the scope does not support keyset pagination. You might need to define a custom Order object.\n
|
||||
See https://docs.gitlab.com/ee/development/database/keyset_pagination.html#complex-order-configuration\n
|
||||
Or the Gitlab::Pagination::Keyset::Order class for examples
|
||||
MSG
|
||||
raise(UnsupportedScopeOrder, error_message)
|
||||
end
|
||||
|
||||
@order = Gitlab::Pagination::Keyset::Order.extract_keyset_order_object(scope)
|
||||
@array_scope = array_scope
|
||||
@array_mapping_scope = array_mapping_scope
|
||||
@finder_query = finder_query
|
||||
@values = values
|
||||
@model = @scope.model
|
||||
@table_name = @model.table_name
|
||||
@arel_table = @model.arel_table
|
||||
end
|
||||
|
||||
def execute
|
||||
selector_cte = Gitlab::SQL::CTE.new(:array_cte, array_scope)
|
||||
|
||||
cte = Gitlab::SQL::RecursiveCTE.new(RECURSIVE_CTE_NAME, union_args: { remove_duplicates: false, remove_order: false })
|
||||
cte << initializer_query
|
||||
cte << data_collector_query
|
||||
|
||||
q = cte
|
||||
.apply_to(model.where({})
|
||||
.with(selector_cte.to_arel))
|
||||
.select(result_collector_final_projections)
|
||||
.where("count <> 0") # filter out the initializer row
|
||||
|
||||
model.from(q.arel.as(table_name))
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
attr_reader :array_scope, :scope, :order, :array_mapping_scope, :finder_query, :values, :model, :table_name, :arel_table
|
||||
|
||||
def initializer_query
|
||||
array_column_names = array_scope_columns.array_aggregated_column_names + order_by_columns.array_aggregated_column_names
|
||||
|
||||
projections = [
|
||||
*result_collector_initializer_columns,
|
||||
*array_column_names,
|
||||
'0::bigint AS count'
|
||||
]
|
||||
|
||||
model.select(projections).from(build_column_arrays_query).limit(1)
|
||||
end
|
||||
|
||||
# This query finds the first cursor values for each item in the array CTE.
|
||||
#
|
||||
# array_cte:
|
||||
#
|
||||
# |project_id|
|
||||
# |----------|
|
||||
# | 1|
|
||||
# | 2|
|
||||
# | 3|
|
||||
# | 4|
|
||||
#
|
||||
# For each project_id, find the first issues row by respecting the created_at, id order.
|
||||
#
|
||||
# The `array_mapping_scope` parameter defines how the `array_scope` and the `scope` can be combined.
|
||||
#
|
||||
# scope = Issue.where({}) # empty scope
|
||||
# array_mapping_scope = Issue.where(project_id: X)
|
||||
#
|
||||
# scope.merge(array_mapping_scope) # Issue.where(project_id: X)
|
||||
#
|
||||
# X will be replaced with a value from the `array_cte` temporary table.
|
||||
#
|
||||
# |created_at|id|
|
||||
# |----------|--|
|
||||
# |2020-01-15| 2|
|
||||
# |2020-01-07| 3|
|
||||
# |2020-01-07| 4|
|
||||
# |2020-01-10| 5|
|
||||
def build_column_arrays_query
|
||||
q = Arel::SelectManager.new
|
||||
.project(array_scope_columns.array_aggregated_columns + order_by_columns.array_aggregated_columns)
|
||||
.from(array_cte)
|
||||
.join(Arel.sql("LEFT JOIN LATERAL (#{initial_keyset_query.to_sql}) #{table_name} ON TRUE"))
|
||||
|
||||
order_by_columns.each { |column| q.where(column.arel_column.not_eq(nil)) }
|
||||
|
||||
q.as('array_scope_lateral_query')
|
||||
end
|
||||
|
||||
def array_cte
|
||||
Arel::SelectManager.new
|
||||
.project(array_scope_columns.arel_columns)
|
||||
.from(Arel.sql(array_scope_columns.array_scope_cte_name))
|
||||
.as(array_scope_columns.array_scope_cte_name)
|
||||
end
|
||||
|
||||
def initial_keyset_query
|
||||
keyset_scope = scope.merge(array_mapping_scope.call(*array_scope_columns.arel_columns))
|
||||
order
|
||||
.apply_cursor_conditions(keyset_scope, values, use_union_optimization: true)
|
||||
.reselect(*order_by_columns.arel_columns)
|
||||
.limit(1)
|
||||
end
|
||||
|
||||
def data_collector_query
|
||||
array_column_list = array_scope_columns.array_aggregated_column_names
|
||||
|
||||
order_column_value_arrays = order_by_columns.replace_value_in_array_by_position_expressions
|
||||
|
||||
select = [
|
||||
*result_collector_columns,
|
||||
*array_column_list,
|
||||
*order_column_value_arrays,
|
||||
"#{RECURSIVE_CTE_NAME}.count + 1"
|
||||
]
|
||||
|
||||
from = <<~SQL
|
||||
#{RECURSIVE_CTE_NAME},
|
||||
#{array_order_query.lateral.as('position_query').to_sql},
|
||||
#{ensure_one_row(next_cursor_values_query).lateral.as('next_cursor_values').to_sql}
|
||||
SQL
|
||||
|
||||
model.select(select).from(from)
|
||||
end
|
||||
|
||||
# NULL guard. This method ensures that NULL values are returned when the passed in scope returns 0 rows.
|
||||
# Example query: returns issues.id or NULL
|
||||
#
|
||||
# SELECT issues.id FROM (VALUES (NULL)) nulls (id)
|
||||
# LEFT JOIN (SELECT id FROM issues WHERE id = 1 LIMIT 1) issues ON TRUE
|
||||
# LIMIT 1
|
||||
def ensure_one_row(query)
|
||||
q = Arel::SelectManager.new
|
||||
q.projections = order_by_columns.original_column_names_as_tmp_tamble
|
||||
|
||||
null_values = [nil] * order_by_columns.count
|
||||
|
||||
from = Arel::Nodes::Grouping.new(Arel::Nodes::ValuesList.new([null_values])).as('nulls')
|
||||
|
||||
q.from(from)
|
||||
q.join(Arel.sql("LEFT JOIN (#{query.to_sql}) record ON TRUE"))
|
||||
q.limit = 1
|
||||
q
|
||||
end
|
||||
|
||||
# This subquery finds the cursor values for the next record by sorting the generated cursor arrays in memory and taking the first element.
|
||||
# It combines the cursor arrays (UNNEST) together and sorts them according to the originally defined ORDER BY clause.
|
||||
#
|
||||
# Example: issues in the group hierarchy with ORDER BY created_at, id
|
||||
#
|
||||
# |project_id| |created_at|id| # 2 arrays combined: issues_created_at_array, issues_id_array
|
||||
# |----------| |----------|--|
|
||||
# | 1| |2020-01-15| 2|
|
||||
# | 2| |2020-01-07| 3|
|
||||
# | 3| |2020-01-07| 4|
|
||||
# | 4| |2020-01-10| 5|
|
||||
#
|
||||
# The query will return the cursor values: (2020-01-07, 3) and the array position: 1
|
||||
# From the position, we can tell that the record belongs to the project with id 2.
|
||||
def array_order_query
|
||||
q = Arel::SelectManager.new
|
||||
.project([*order_by_columns.original_column_names_as_arel_string, Arel.sql('position')])
|
||||
.from("UNNEST(#{list(order_by_columns.array_aggregated_column_names)}) WITH ORDINALITY AS u(#{list(order_by_columns.original_column_names)}, position)")
|
||||
|
||||
order_by_columns.each { |column| q.where(Arel.sql(column.original_column_name).not_eq(nil)) } # ignore rows where all columns are NULL
|
||||
|
||||
q.order(Arel.sql(order_by_without_table_references)).take(1)
|
||||
end
|
||||
|
||||
# This subquery finds the next cursor values after the previously determined position (from array_order_query).
|
||||
# The current cursor values are passed in as SQL literals since the actual values are encoded into SQL arrays.
|
||||
#
|
||||
# Example: issues in the group hierarchy with ORDER BY created_at, id
|
||||
#
|
||||
# |project_id| |created_at|id| # 2 arrays combined: issues_created_at_array, issues_id_array
|
||||
# |----------| |----------|--|
|
||||
# | 1| |2020-01-15| 2|
|
||||
# | 2| |2020-01-07| 3|
|
||||
# | 3| |2020-01-07| 4|
|
||||
# | 4| |2020-01-10| 5|
|
||||
#
|
||||
# Assuming that the determined position is 1, the cursor values will be the following:
|
||||
# - Filter: project_id = 2
|
||||
# - created_at = 2020-01-07
|
||||
# - id = 3
|
||||
def next_cursor_values_query
|
||||
cursor_values = order_by_columns.cursor_values(RECURSIVE_CTE_NAME)
|
||||
array_mapping_scope_columns = array_scope_columns.array_lookup_expressions_by_position(RECURSIVE_CTE_NAME)
|
||||
|
||||
keyset_scope = scope
|
||||
.reselect(*order_by_columns.arel_columns)
|
||||
.merge(array_mapping_scope.call(*array_mapping_scope_columns))
|
||||
|
||||
order
|
||||
.apply_cursor_conditions(keyset_scope, cursor_values, use_union_optimization: true)
|
||||
.reselect(*order_by_columns.arel_columns)
|
||||
.limit(1)
|
||||
end
|
||||
|
||||
# Generates an ORDER BY clause by using the column position index and the original order clauses.
|
||||
# This method is used to sort the collected arrays in SQL.
|
||||
# Example: "issues".created_at DESC , "issues".id ASC => 1 DESC, 2 ASC
|
||||
def order_by_without_table_references
|
||||
order.column_definitions.each_with_index.map do |column_definition, i|
|
||||
"#{i + 1} #{column_definition.order_direction_as_sql_string}"
|
||||
end.join(", ")
|
||||
end
|
||||
|
||||
def result_collector_initializer_columns
|
||||
["NULL::#{table_name} AS #{RECORDS_COLUMN}"]
|
||||
end
|
||||
|
||||
def result_collector_columns
|
||||
query = finder_query
|
||||
.call(*order_by_columns.array_lookup_expressions_by_position(RECURSIVE_CTE_NAME))
|
||||
.select("#{table_name}")
|
||||
.limit(1)
|
||||
|
||||
["(#{query.to_sql})"]
|
||||
end
|
||||
|
||||
def result_collector_final_projections
|
||||
["(#{RECORDS_COLUMN}).*"]
|
||||
end
|
||||
|
||||
def array_scope_columns
|
||||
@array_scope_columns ||= ArrayScopeColumns.new(array_scope.select_values)
|
||||
end
|
||||
|
||||
def order_by_columns
|
||||
@order_by_columns ||= OrderByColumns.new(order.column_definitions, arel_table)
|
||||
end
|
||||
|
||||
def list(array)
|
||||
array.join(', ')
|
||||
end
|
||||
end
|
||||
# rubocop: enable CodeReuse/ActiveRecord
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
|
@ -6,12 +6,13 @@ module Gitlab
|
|||
class Iterator
|
||||
UnsupportedScopeOrder = Class.new(StandardError)
|
||||
|
||||
def initialize(scope:, use_union_optimization: true)
|
||||
def initialize(scope:, use_union_optimization: true, in_operator_optimization_options: nil)
|
||||
@scope, success = Gitlab::Pagination::Keyset::SimpleOrderBuilder.build(scope)
|
||||
raise(UnsupportedScopeOrder, 'The order on the scope does not support keyset pagination') unless success
|
||||
|
||||
@order = Gitlab::Pagination::Keyset::Order.extract_keyset_order_object(scope)
|
||||
@use_union_optimization = use_union_optimization
|
||||
@use_union_optimization = in_operator_optimization_options ? false : use_union_optimization
|
||||
@in_operator_optimization_options = in_operator_optimization_options
|
||||
end
|
||||
|
||||
# rubocop: disable CodeReuse/ActiveRecord
|
||||
|
@ -19,11 +20,10 @@ module Gitlab
|
|||
cursor_attributes = {}
|
||||
|
||||
loop do
|
||||
current_scope = scope.dup.limit(of)
|
||||
relation = order
|
||||
.apply_cursor_conditions(current_scope, cursor_attributes, { use_union_optimization: @use_union_optimization })
|
||||
.reorder(order)
|
||||
.limit(of)
|
||||
current_scope = scope.dup
|
||||
relation = order.apply_cursor_conditions(current_scope, cursor_attributes, keyset_options)
|
||||
relation = relation.reorder(order) unless @in_operator_optimization_options
|
||||
relation = relation.limit(of)
|
||||
|
||||
yield relation
|
||||
|
||||
|
@ -38,6 +38,13 @@ module Gitlab
|
|||
private
|
||||
|
||||
attr_reader :scope, :order
|
||||
|
||||
def keyset_options
|
||||
{
|
||||
use_union_optimization: @use_union_optimization,
|
||||
in_operator_optimization_options: @in_operator_optimization_options
|
||||
}
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -152,15 +152,24 @@ module Gitlab
|
|||
end
|
||||
|
||||
# rubocop: disable CodeReuse/ActiveRecord
|
||||
def apply_cursor_conditions(scope, values = {}, options = { use_union_optimization: false })
|
||||
def apply_cursor_conditions(scope, values = {}, options = { use_union_optimization: false, in_operator_optimization_options: nil })
|
||||
values ||= {}
|
||||
transformed_values = values.with_indifferent_access
|
||||
scope = apply_custom_projections(scope)
|
||||
scope = apply_custom_projections(scope.dup)
|
||||
|
||||
where_values = build_where_values(transformed_values)
|
||||
|
||||
if options[:use_union_optimization] && where_values.size > 1
|
||||
build_union_query(scope, where_values).reorder(self)
|
||||
elsif options[:in_operator_optimization_options]
|
||||
opts = options[:in_operator_optimization_options]
|
||||
|
||||
Gitlab::Pagination::Keyset::InOperatorOptimization::QueryBuilder.new(
|
||||
**{
|
||||
scope: scope.reorder(self),
|
||||
values: values
|
||||
}.merge(opts)
|
||||
).execute
|
||||
else
|
||||
scope.where(build_or_query(where_values)) # rubocop: disable CodeReuse/ActiveRecord
|
||||
end
|
||||
|
@ -187,7 +196,7 @@ module Gitlab
|
|||
columns = Arel::Nodes::Grouping.new(column_definitions.map(&:column_expression))
|
||||
values = Arel::Nodes::Grouping.new(column_definitions.map do |column_definition|
|
||||
value = values[column_definition.attribute_name]
|
||||
Arel::Nodes.build_quoted(value, column_definition.column_expression)
|
||||
build_quoted(value, column_definition.column_expression)
|
||||
end)
|
||||
|
||||
if column_definitions.first.ascending_order?
|
||||
|
@ -197,6 +206,12 @@ module Gitlab
|
|||
end
|
||||
end
|
||||
|
||||
def build_quoted(value, column_expression)
|
||||
return value if value.instance_of?(Arel::Nodes::SqlLiteral)
|
||||
|
||||
Arel::Nodes.build_quoted(value, column_expression)
|
||||
end
|
||||
|
||||
# Adds extra columns to the SELECT clause
|
||||
def apply_custom_projections(scope)
|
||||
additional_projections = column_definitions.select(&:add_to_projections).map do |column_definition|
|
||||
|
|
|
@ -4,4 +4,21 @@ unless Rails.env.production?
|
|||
require 'rubocop/rake_task'
|
||||
|
||||
RuboCop::RakeTask.new
|
||||
|
||||
namespace :rubocop do
|
||||
namespace :todo do
|
||||
desc 'Generate RuboCop todos'
|
||||
task :generate do
|
||||
require 'rubocop'
|
||||
|
||||
options = %w[
|
||||
--auto-gen-config
|
||||
--auto-gen-only-exclude
|
||||
--exclude-limit=100000
|
||||
]
|
||||
|
||||
RuboCop::CLI.new.run(options)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -29908,9 +29908,6 @@ msgstr ""
|
|||
msgid "SecurityOrchestration|To widen your search, change filters above or select a different security policy project."
|
||||
msgstr ""
|
||||
|
||||
msgid "SecurityOrchestration|Unable to parse policy"
|
||||
msgstr ""
|
||||
|
||||
msgid "SecurityOrchestration|Update scan execution policies"
|
||||
msgstr ""
|
||||
|
||||
|
|
|
@ -58,7 +58,7 @@ module QA
|
|||
it_behaves_like 'successful tag creation', :maintainer_user
|
||||
end
|
||||
|
||||
context 'when protected', quarantine: { issue: 'https://gitlab.com/gitlab-org/gitlab/-/issues/339727', type: :bug } do
|
||||
context 'when protected' do
|
||||
before do
|
||||
add_members_to_project(project)
|
||||
|
||||
|
|
|
@ -63,9 +63,7 @@ function retrieve_tests_mapping() {
|
|||
local artifact_branch="master"
|
||||
local test_metadata_with_mapping_job_id
|
||||
|
||||
# FIXME: retrieving job id is failing https://gitlab.com/gitlab-org/gitlab/-/issues/340706
|
||||
# test_metadata_with_mapping_job_id=$(scripts/api/get_job_id.rb --endpoint "https://gitlab.com/api/v4" --project "${project_path}" -q "status=success" -q "ref=${artifact_branch}" -q "username=gitlab-bot" -Q "scope=success" --job-name "update-tests-metadata" --artifact-path "${RSPEC_PACKED_TESTS_MAPPING_PATH}.gz")
|
||||
test_metadata_with_mapping_job_id="1583877936"
|
||||
test_metadata_with_mapping_job_id=$(scripts/api/get_job_id.rb --endpoint "https://gitlab.com/api/v4" --project "${project_path}" -q "status=success" -q "ref=${artifact_branch}" -q "username=gitlab-bot" -Q "scope=success" --job-name "update-tests-metadata" --artifact-path "${RSPEC_PACKED_TESTS_MAPPING_PATH}.gz")
|
||||
|
||||
if [[ ! -f "${RSPEC_PACKED_TESTS_MAPPING_PATH}" ]]; then
|
||||
(scripts/api/download_job_artifact.rb --endpoint "https://gitlab.com/api/v4" --project "${project_path}" --job-id "${test_metadata_with_mapping_job_id}" --artifact-path "${RSPEC_PACKED_TESTS_MAPPING_PATH}.gz" && gzip -d "${RSPEC_PACKED_TESTS_MAPPING_PATH}.gz") || echo "{}" > "${RSPEC_PACKED_TESTS_MAPPING_PATH}"
|
||||
|
|
|
@ -35,6 +35,7 @@ RSpec.describe 'Database schema' do
|
|||
cluster_providers_gcp: %w[gcp_project_id operation_id],
|
||||
compliance_management_frameworks: %w[group_id],
|
||||
commit_user_mentions: %w[commit_id],
|
||||
dep_ci_build_trace_sections: %w[build_id],
|
||||
deploy_keys_projects: %w[deploy_key_id],
|
||||
deployments: %w[deployable_id user_id],
|
||||
draft_notes: %w[discussion_id commit_id],
|
||||
|
|
|
@ -2,8 +2,16 @@
|
|||
|
||||
FactoryBot.define do
|
||||
factory :helm_file_metadatum, class: 'Packages::Helm::FileMetadatum' do
|
||||
transient do
|
||||
description { nil }
|
||||
end
|
||||
|
||||
package_file { association(:helm_package_file, without_loaded_metadatum: true) }
|
||||
sequence(:channel) { |n| "#{FFaker::Lorem.word}-#{n}" }
|
||||
metadata { { 'name': package_file.package.name, 'version': package_file.package.version, 'apiVersion': 'v2' } }
|
||||
metadata do
|
||||
{ 'name': package_file.package.name, 'version': package_file.package.version, 'apiVersion': 'v2' }.tap do |defaults|
|
||||
defaults['description'] = description if description
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -212,11 +212,12 @@ FactoryBot.define do
|
|||
package_name { package&.name || 'foo' }
|
||||
sequence(:package_version) { |n| package&.version || "v#{n}" }
|
||||
channel { 'stable' }
|
||||
description { nil }
|
||||
end
|
||||
|
||||
after :create do |package_file, evaluator|
|
||||
unless evaluator.without_loaded_metadatum
|
||||
create :helm_file_metadatum, package_file: package_file, channel: evaluator.channel
|
||||
create :helm_file_metadatum, package_file: package_file, channel: evaluator.channel, description: evaluator.description
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
74
spec/finders/packages/helm/packages_finder_spec.rb
Normal file
74
spec/finders/packages/helm/packages_finder_spec.rb
Normal file
|
@ -0,0 +1,74 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require 'spec_helper'
|
||||
|
||||
RSpec.describe ::Packages::Helm::PackagesFinder do
|
||||
let_it_be(:project1) { create(:project) }
|
||||
let_it_be(:project2) { create(:project) }
|
||||
let_it_be(:helm_package) { create(:helm_package, project: project1) }
|
||||
let_it_be(:npm_package) { create(:npm_package, project: project1) }
|
||||
let_it_be(:npm_package) { create(:npm_package, project: project2) }
|
||||
|
||||
let(:project) { project1 }
|
||||
let(:channel) { 'stable' }
|
||||
let(:finder) { described_class.new(project, channel) }
|
||||
|
||||
describe '#execute' do
|
||||
subject { finder.execute }
|
||||
|
||||
context 'with project' do
|
||||
context 'with channel' do
|
||||
it { is_expected.to eq([helm_package]) }
|
||||
|
||||
context 'ignores duplicate package files' do
|
||||
let_it_be(:package_file1) { create(:helm_package_file, package: helm_package) }
|
||||
let_it_be(:package_file2) { create(:helm_package_file, package: helm_package) }
|
||||
|
||||
it { is_expected.to eq([helm_package]) }
|
||||
|
||||
context 'let clients use select id' do
|
||||
subject { finder.execute.pluck_primary_key }
|
||||
|
||||
it { is_expected.to eq([helm_package.id]) }
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
context 'with not existing channel' do
|
||||
let(:channel) { 'alpha' }
|
||||
|
||||
it { is_expected.to be_empty }
|
||||
end
|
||||
|
||||
context 'with no channel' do
|
||||
let(:channel) { nil }
|
||||
|
||||
it { is_expected.to be_empty }
|
||||
end
|
||||
|
||||
context 'with no helm packages' do
|
||||
let(:project) { project2 }
|
||||
|
||||
it { is_expected.to be_empty }
|
||||
end
|
||||
end
|
||||
|
||||
context 'with no project' do
|
||||
let(:project) { nil }
|
||||
|
||||
it { is_expected.to be_empty }
|
||||
end
|
||||
|
||||
context 'when the limit is hit' do
|
||||
let_it_be(:helm_package2) { create(:helm_package, project: project1) }
|
||||
let_it_be(:helm_package3) { create(:helm_package, project: project1) }
|
||||
let_it_be(:helm_package4) { create(:helm_package, project: project1) }
|
||||
|
||||
before do
|
||||
stub_const("#{described_class}::MAX_PACKAGES_COUNT", 2)
|
||||
end
|
||||
|
||||
it { is_expected.to eq([helm_package4, helm_package3]) }
|
||||
end
|
||||
end
|
||||
end
|
|
@ -17,11 +17,20 @@ RSpec.describe Resolvers::BoardListIssuesResolver do
|
|||
|
||||
# auth is handled by the parent object
|
||||
context 'when authorized' do
|
||||
let!(:issue1) { create(:issue, project: project, labels: [label], relative_position: 10) }
|
||||
let!(:issue2) { create(:issue, project: project, labels: [label, label2], relative_position: 12) }
|
||||
let!(:issue3) { create(:issue, project: project, labels: [label, label3], relative_position: 10) }
|
||||
let!(:issue1) { create(:issue, project: project, labels: [label], relative_position: 10, milestone: started_milestone) }
|
||||
let!(:issue2) { create(:issue, project: project, labels: [label, label2], relative_position: 12, milestone: started_milestone) }
|
||||
let!(:issue3) { create(:issue, project: project, labels: [label, label3], relative_position: 10, milestone: future_milestone) }
|
||||
let!(:issue4) { create(:issue, project: project, labels: [label], relative_position: nil) }
|
||||
|
||||
let(:wildcard_started) { 'STARTED' }
|
||||
let(:filters) { { milestone_title: ["started"], milestone_wildcard_id: wildcard_started } }
|
||||
|
||||
it 'raises a mutually exclusive filter error when milstone wildcard and title are provided' do
|
||||
expect do
|
||||
resolve_board_list_issues(args: { filters: filters })
|
||||
end.to raise_error(Gitlab::Graphql::Errors::ArgumentError)
|
||||
end
|
||||
|
||||
it 'returns issues in the correct order with non-nil relative positions', :aggregate_failures do
|
||||
# by relative_position and then ID
|
||||
result = resolve_board_list_issues
|
||||
|
@ -36,6 +45,12 @@ RSpec.describe Resolvers::BoardListIssuesResolver do
|
|||
expect(result).to match_array([issue1, issue3, issue4])
|
||||
end
|
||||
|
||||
it 'finds only issues filtered by milestone wildcard' do
|
||||
result = resolve_board_list_issues(args: { filters: { milestone_wildcard_id: wildcard_started } })
|
||||
|
||||
expect(result).to match_array([issue1, issue2])
|
||||
end
|
||||
|
||||
it 'finds only issues matching search param' do
|
||||
result = resolve_board_list_issues(args: { filters: { search: issue1.title } })
|
||||
|
||||
|
@ -73,6 +88,9 @@ RSpec.describe Resolvers::BoardListIssuesResolver do
|
|||
let(:board_parent) { user_project }
|
||||
let(:project) { user_project }
|
||||
|
||||
let_it_be(:started_milestone) { create(:milestone, project: user_project, title: 'started milestone', start_date: 1.day.ago, due_date: 1.day.from_now) }
|
||||
let_it_be(:future_milestone) { create(:milestone, project: user_project, title: 'future milestone', start_date: 1.day.from_now) }
|
||||
|
||||
it_behaves_like 'group and project board list issues resolver'
|
||||
end
|
||||
|
||||
|
@ -86,6 +104,9 @@ RSpec.describe Resolvers::BoardListIssuesResolver do
|
|||
let(:board_parent) { group }
|
||||
let!(:project) { create(:project, :private, group: group) }
|
||||
|
||||
let_it_be(:started_milestone) { create(:milestone, group: group, title: 'started milestone', start_date: 1.day.ago, due_date: 1.day.from_now) }
|
||||
let_it_be(:future_milestone) { create(:milestone, group: group, title: 'future milestone', start_date: 1.day.from_now) }
|
||||
|
||||
it_behaves_like 'group and project board list issues resolver'
|
||||
end
|
||||
end
|
||||
|
|
|
@ -189,12 +189,109 @@ RSpec.describe Gitlab::Git::Tree, :seed_helper do
|
|||
end
|
||||
|
||||
it_behaves_like :repo do
|
||||
context 'with pagination parameters' do
|
||||
let(:pagination_params) { { limit: 3, page_token: nil } }
|
||||
describe 'Pagination' do
|
||||
context 'with restrictive limit' do
|
||||
let(:pagination_params) { { limit: 3, page_token: nil } }
|
||||
|
||||
it 'does not support pagination' do
|
||||
expect(entries.count).to be >= 10
|
||||
expect(cursor).to be_nil
|
||||
it 'returns limited paginated list of tree objects' do
|
||||
expect(entries.count).to eq(3)
|
||||
expect(cursor.next_cursor).to be_present
|
||||
end
|
||||
end
|
||||
|
||||
context 'when limit is equal to number of entries' do
|
||||
let(:entries_count) { entries.count }
|
||||
|
||||
it 'returns all entries without a cursor' do
|
||||
result, cursor = Gitlab::Git::Tree.where(repository, sha, path, recursive, { limit: entries_count, page_token: nil })
|
||||
|
||||
expect(cursor).to be_nil
|
||||
expect(result.entries.count).to eq(entries_count)
|
||||
end
|
||||
end
|
||||
|
||||
context 'when limit is 0' do
|
||||
let(:pagination_params) { { limit: 0, page_token: nil } }
|
||||
|
||||
it 'returns empty result' do
|
||||
expect(entries).to eq([])
|
||||
expect(cursor).to be_nil
|
||||
end
|
||||
end
|
||||
|
||||
context 'when limit is missing' do
|
||||
let(:pagination_params) { { limit: nil, page_token: nil } }
|
||||
|
||||
it 'returns empty result' do
|
||||
expect(entries).to eq([])
|
||||
expect(cursor).to be_nil
|
||||
end
|
||||
end
|
||||
|
||||
context 'when limit is negative' do
|
||||
let(:entries_count) { entries.count }
|
||||
|
||||
it 'returns all entries' do
|
||||
result, cursor = Gitlab::Git::Tree.where(repository, sha, path, recursive, { limit: -1, page_token: nil })
|
||||
|
||||
expect(result.count).to eq(entries_count)
|
||||
expect(cursor).to be_nil
|
||||
end
|
||||
|
||||
context 'when token is provided' do
|
||||
let(:pagination_params) { { limit: 1000, page_token: nil } }
|
||||
let(:token) { entries.second.id }
|
||||
|
||||
it 'returns all entries after token' do
|
||||
result, cursor = Gitlab::Git::Tree.where(repository, sha, path, recursive, { limit: -1, page_token: token })
|
||||
|
||||
expect(result.count).to eq(entries.count - 2)
|
||||
expect(cursor).to be_nil
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
context 'when token does not exist' do
|
||||
let(:pagination_params) { { limit: 5, page_token: 'aabbccdd' } }
|
||||
|
||||
it 'raises a command error' do
|
||||
expect { entries }.to raise_error(Gitlab::Git::CommandError, 'could not find starting OID: aabbccdd')
|
||||
end
|
||||
end
|
||||
|
||||
context 'when limit is bigger than number of entries' do
|
||||
let(:pagination_params) { { limit: 1000, page_token: nil } }
|
||||
|
||||
it 'returns only available entries' do
|
||||
expect(entries.count).to be < 20
|
||||
expect(cursor).to be_nil
|
||||
end
|
||||
end
|
||||
|
||||
it 'returns all tree entries in specific order during cursor pagination' do
|
||||
collected_entries = []
|
||||
token = nil
|
||||
|
||||
expected_entries = entries
|
||||
|
||||
loop do
|
||||
result, cursor = Gitlab::Git::Tree.where(repository, sha, path, recursive, { limit: 5, page_token: token })
|
||||
|
||||
collected_entries += result.entries
|
||||
token = cursor&.next_cursor
|
||||
|
||||
break if token.blank?
|
||||
end
|
||||
|
||||
expect(collected_entries.map(&:path)).to match_array(expected_entries.map(&:path))
|
||||
|
||||
expected_order = [
|
||||
collected_entries.select(&:dir?).map(&:path),
|
||||
collected_entries.select(&:file?).map(&:path),
|
||||
collected_entries.select(&:submodule?).map(&:path)
|
||||
].flatten
|
||||
|
||||
expect(collected_entries.map(&:path)).to eq(expected_order)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -185,4 +185,25 @@ RSpec.describe Gitlab::Pagination::Keyset::ColumnOrderDefinition do
|
|||
end
|
||||
end
|
||||
end
|
||||
|
||||
describe "#order_direction_as_sql_string" do
|
||||
let(:nulls_last_order) do
|
||||
described_class.new(
|
||||
attribute_name: :name,
|
||||
column_expression: Project.arel_table[:name],
|
||||
order_expression: Gitlab::Database.nulls_last_order('merge_request_metrics.merged_at', :desc),
|
||||
reversed_order_expression: Gitlab::Database.nulls_first_order('merge_request_metrics.merged_at', :asc),
|
||||
order_direction: :desc,
|
||||
nullable: :nulls_last, # null values are always last
|
||||
distinct: false
|
||||
)
|
||||
end
|
||||
|
||||
it { expect(project_name_column.order_direction_as_sql_string).to eq('ASC') }
|
||||
it { expect(project_name_column.reverse.order_direction_as_sql_string).to eq('DESC') }
|
||||
it { expect(project_name_lower_column.order_direction_as_sql_string).to eq('DESC') }
|
||||
it { expect(project_name_lower_column.reverse.order_direction_as_sql_string).to eq('ASC') }
|
||||
it { expect(nulls_last_order.order_direction_as_sql_string).to eq('DESC NULLS LAST') }
|
||||
it { expect(nulls_last_order.reverse.order_direction_as_sql_string).to eq('ASC NULLS FIRST') }
|
||||
end
|
||||
end
|
||||
|
|
|
@ -0,0 +1,19 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require 'spec_helper'
|
||||
|
||||
RSpec.describe Gitlab::Pagination::Keyset::InOperatorOptimization::ArrayScopeColumns do
|
||||
let(:columns) { [:relative_position, :id] }
|
||||
|
||||
subject(:array_scope_columns) { described_class.new(columns) }
|
||||
|
||||
it 'builds array column names' do
|
||||
expect(array_scope_columns.array_aggregated_column_names).to eq(%w[array_cte_relative_position_array array_cte_id_array])
|
||||
end
|
||||
|
||||
context 'when no columns are given' do
|
||||
let(:columns) { [] }
|
||||
|
||||
it { expect { array_scope_columns }.to raise_error /No array columns were given/ }
|
||||
end
|
||||
end
|
|
@ -0,0 +1,23 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require 'spec_helper'
|
||||
|
||||
RSpec.describe Gitlab::Pagination::Keyset::InOperatorOptimization::ColumnData do
|
||||
subject(:column_data) { described_class.new('id', 'issue_id', Issue.arel_table) }
|
||||
|
||||
describe '#array_aggregated_column_name' do
|
||||
it { expect(column_data.array_aggregated_column_name).to eq('issues_id_array') }
|
||||
end
|
||||
|
||||
describe '#projection' do
|
||||
it 'returns the Arel projection for the column with a new alias' do
|
||||
expect(column_data.projection.to_sql).to eq('"issues"."id" AS issue_id')
|
||||
end
|
||||
end
|
||||
|
||||
it 'accepts symbols for original_column_name and as' do
|
||||
column_data = described_class.new(:id, :issue_id, Issue.arel_table)
|
||||
|
||||
expect(column_data.projection.to_sql).to eq('"issues"."id" AS issue_id')
|
||||
end
|
||||
end
|
|
@ -0,0 +1,37 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require 'spec_helper'
|
||||
|
||||
RSpec.describe Gitlab::Pagination::Keyset::InOperatorOptimization::OrderByColumns do
|
||||
let(:columns) do
|
||||
[
|
||||
Gitlab::Pagination::Keyset::ColumnOrderDefinition.new(
|
||||
attribute_name: :relative_position,
|
||||
order_expression: Issue.arel_table[:relative_position].desc
|
||||
),
|
||||
Gitlab::Pagination::Keyset::ColumnOrderDefinition.new(
|
||||
attribute_name: :id,
|
||||
order_expression: Issue.arel_table[:id].desc
|
||||
)
|
||||
]
|
||||
end
|
||||
|
||||
subject(:order_by_columns) { described_class.new(columns, Issue.arel_table) }
|
||||
|
||||
describe '#array_aggregated_column_names' do
|
||||
it { expect(order_by_columns.array_aggregated_column_names).to eq(%w[issues_relative_position_array issues_id_array]) }
|
||||
end
|
||||
|
||||
describe '#original_column_names' do
|
||||
it { expect(order_by_columns.original_column_names).to eq(%w[relative_position id]) }
|
||||
end
|
||||
|
||||
describe '#cursor_values' do
|
||||
it 'returns the keyset pagination cursor values from the column arrays as SQL expression' do
|
||||
expect(order_by_columns.cursor_values('tbl')).to eq({
|
||||
"id" => "tbl.issues_id_array[position]",
|
||||
"relative_position" => "tbl.issues_relative_position_array[position]"
|
||||
})
|
||||
end
|
||||
end
|
||||
end
|
|
@ -0,0 +1,225 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require 'spec_helper'
|
||||
|
||||
RSpec.describe Gitlab::Pagination::Keyset::InOperatorOptimization::QueryBuilder do
|
||||
let_it_be(:two_weeks_ago) { 2.weeks.ago }
|
||||
let_it_be(:three_weeks_ago) { 3.weeks.ago }
|
||||
let_it_be(:four_weeks_ago) { 4.weeks.ago }
|
||||
let_it_be(:five_weeks_ago) { 5.weeks.ago }
|
||||
|
||||
let_it_be(:top_level_group) { create(:group) }
|
||||
let_it_be(:sub_group_1) { create(:group, parent: top_level_group) }
|
||||
let_it_be(:sub_group_2) { create(:group, parent: top_level_group) }
|
||||
let_it_be(:sub_sub_group_1) { create(:group, parent: sub_group_2) }
|
||||
|
||||
let_it_be(:project_1) { create(:project, group: top_level_group) }
|
||||
let_it_be(:project_2) { create(:project, group: top_level_group) }
|
||||
|
||||
let_it_be(:project_3) { create(:project, group: sub_group_1) }
|
||||
let_it_be(:project_4) { create(:project, group: sub_group_2) }
|
||||
|
||||
let_it_be(:project_5) { create(:project, group: sub_sub_group_1) }
|
||||
|
||||
let_it_be(:issues) do
|
||||
[
|
||||
create(:issue, project: project_1, created_at: three_weeks_ago, relative_position: 5),
|
||||
create(:issue, project: project_1, created_at: two_weeks_ago),
|
||||
create(:issue, project: project_2, created_at: two_weeks_ago, relative_position: 15),
|
||||
create(:issue, project: project_2, created_at: two_weeks_ago),
|
||||
create(:issue, project: project_3, created_at: four_weeks_ago),
|
||||
create(:issue, project: project_4, created_at: five_weeks_ago, relative_position: 10),
|
||||
create(:issue, project: project_5, created_at: four_weeks_ago)
|
||||
]
|
||||
end
|
||||
|
||||
shared_examples 'correct ordering examples' do
|
||||
let(:iterator) do
|
||||
Gitlab::Pagination::Keyset::Iterator.new(
|
||||
scope: scope.limit(batch_size),
|
||||
in_operator_optimization_options: in_operator_optimization_options
|
||||
)
|
||||
end
|
||||
|
||||
it 'returns records in correct order' do
|
||||
all_records = []
|
||||
iterator.each_batch(of: batch_size) do |records|
|
||||
all_records.concat(records)
|
||||
end
|
||||
|
||||
expect(all_records).to eq(expected_order)
|
||||
end
|
||||
end
|
||||
|
||||
context 'when ordering by issues.id DESC' do
|
||||
let(:scope) { Issue.order(id: :desc) }
|
||||
let(:expected_order) { issues.sort_by(&:id).reverse }
|
||||
|
||||
let(:in_operator_optimization_options) do
|
||||
{
|
||||
array_scope: Project.where(namespace_id: top_level_group.self_and_descendants.select(:id)).select(:id),
|
||||
array_mapping_scope: -> (id_expression) { Issue.where(Issue.arel_table[:project_id].eq(id_expression)) },
|
||||
finder_query: -> (id_expression) { Issue.where(Issue.arel_table[:id].eq(id_expression)) }
|
||||
}
|
||||
end
|
||||
|
||||
context 'when iterating records one by one' do
|
||||
let(:batch_size) { 1 }
|
||||
|
||||
it_behaves_like 'correct ordering examples'
|
||||
end
|
||||
|
||||
context 'when iterating records with LIMIT 3' do
|
||||
let(:batch_size) { 3 }
|
||||
|
||||
it_behaves_like 'correct ordering examples'
|
||||
end
|
||||
|
||||
context 'when loading records at once' do
|
||||
let(:batch_size) { issues.size + 1 }
|
||||
|
||||
it_behaves_like 'correct ordering examples'
|
||||
end
|
||||
end
|
||||
|
||||
context 'when ordering by issues.relative_position DESC NULLS LAST, id DESC' do
|
||||
let(:scope) { Issue.order(order) }
|
||||
let(:expected_order) { scope.to_a }
|
||||
|
||||
let(:order) do
|
||||
# NULLS LAST ordering requires custom Order object for keyset pagination:
|
||||
# https://docs.gitlab.com/ee/development/database/keyset_pagination.html#complex-order-configuration
|
||||
Gitlab::Pagination::Keyset::Order.build([
|
||||
Gitlab::Pagination::Keyset::ColumnOrderDefinition.new(
|
||||
attribute_name: :relative_position,
|
||||
column_expression: Issue.arel_table[:relative_position],
|
||||
order_expression: Gitlab::Database.nulls_last_order('relative_position', :desc),
|
||||
reversed_order_expression: Gitlab::Database.nulls_first_order('relative_position', :asc),
|
||||
order_direction: :desc,
|
||||
nullable: :nulls_last,
|
||||
distinct: false
|
||||
),
|
||||
Gitlab::Pagination::Keyset::ColumnOrderDefinition.new(
|
||||
attribute_name: :id,
|
||||
order_expression: Issue.arel_table[:id].desc,
|
||||
nullable: :not_nullable,
|
||||
distinct: true
|
||||
)
|
||||
])
|
||||
end
|
||||
|
||||
let(:in_operator_optimization_options) do
|
||||
{
|
||||
array_scope: Project.where(namespace_id: top_level_group.self_and_descendants.select(:id)).select(:id),
|
||||
array_mapping_scope: -> (id_expression) { Issue.where(Issue.arel_table[:project_id].eq(id_expression)) },
|
||||
finder_query: -> (_relative_position_expression, id_expression) { Issue.where(Issue.arel_table[:id].eq(id_expression)) }
|
||||
}
|
||||
end
|
||||
|
||||
context 'when iterating records one by one' do
|
||||
let(:batch_size) { 1 }
|
||||
|
||||
it_behaves_like 'correct ordering examples'
|
||||
end
|
||||
|
||||
context 'when iterating records with LIMIT 3' do
|
||||
let(:batch_size) { 3 }
|
||||
|
||||
it_behaves_like 'correct ordering examples'
|
||||
end
|
||||
end
|
||||
|
||||
context 'when ordering by issues.created_at DESC, issues.id ASC' do
|
||||
let(:scope) { Issue.order(created_at: :desc, id: :asc) }
|
||||
let(:expected_order) { issues.sort_by { |issue| [issue.created_at.to_f * -1, issue.id] } }
|
||||
|
||||
let(:in_operator_optimization_options) do
|
||||
{
|
||||
array_scope: Project.where(namespace_id: top_level_group.self_and_descendants.select(:id)).select(:id),
|
||||
array_mapping_scope: -> (id_expression) { Issue.where(Issue.arel_table[:project_id].eq(id_expression)) },
|
||||
finder_query: -> (_created_at_expression, id_expression) { Issue.where(Issue.arel_table[:id].eq(id_expression)) }
|
||||
}
|
||||
end
|
||||
|
||||
context 'when iterating records one by one' do
|
||||
let(:batch_size) { 1 }
|
||||
|
||||
it_behaves_like 'correct ordering examples'
|
||||
end
|
||||
|
||||
context 'when iterating records with LIMIT 3' do
|
||||
let(:batch_size) { 3 }
|
||||
|
||||
it_behaves_like 'correct ordering examples'
|
||||
end
|
||||
|
||||
context 'when loading records at once' do
|
||||
let(:batch_size) { issues.size + 1 }
|
||||
|
||||
it_behaves_like 'correct ordering examples'
|
||||
end
|
||||
end
|
||||
|
||||
context 'pagination support' do
|
||||
let(:scope) { Issue.order(id: :desc) }
|
||||
let(:expected_order) { issues.sort_by(&:id).reverse }
|
||||
|
||||
let(:options) do
|
||||
{
|
||||
scope: scope,
|
||||
array_scope: Project.where(namespace_id: top_level_group.self_and_descendants.select(:id)).select(:id),
|
||||
array_mapping_scope: -> (id_expression) { Issue.where(Issue.arel_table[:project_id].eq(id_expression)) },
|
||||
finder_query: -> (id_expression) { Issue.where(Issue.arel_table[:id].eq(id_expression)) }
|
||||
}
|
||||
end
|
||||
|
||||
context 'offset pagination' do
|
||||
subject(:optimized_scope) { described_class.new(**options).execute }
|
||||
|
||||
it 'paginates the scopes' do
|
||||
first_page = optimized_scope.page(1).per(2)
|
||||
expect(first_page).to eq(expected_order[0...2])
|
||||
|
||||
second_page = optimized_scope.page(2).per(2)
|
||||
expect(second_page).to eq(expected_order[2...4])
|
||||
|
||||
third_page = optimized_scope.page(3).per(2)
|
||||
expect(third_page).to eq(expected_order[4...6])
|
||||
end
|
||||
end
|
||||
|
||||
context 'keyset pagination' do
|
||||
def paginator(cursor = nil)
|
||||
scope.keyset_paginate(cursor: cursor, per_page: 2, keyset_order_options: options)
|
||||
end
|
||||
|
||||
it 'paginates correctly' do
|
||||
first_page = paginator.records
|
||||
expect(first_page).to eq(expected_order[0...2])
|
||||
|
||||
cursor_for_page_2 = paginator.cursor_for_next_page
|
||||
|
||||
second_page = paginator(cursor_for_page_2).records
|
||||
expect(second_page).to eq(expected_order[2...4])
|
||||
|
||||
cursor_for_page_3 = paginator(cursor_for_page_2).cursor_for_next_page
|
||||
|
||||
third_page = paginator(cursor_for_page_3).records
|
||||
expect(third_page).to eq(expected_order[4...6])
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
it 'raises error when unsupported scope is passed' do
|
||||
scope = Issue.order(Issue.arel_table[:id].lower.desc)
|
||||
|
||||
options = {
|
||||
scope: scope,
|
||||
array_scope: Project.where(namespace_id: top_level_group.self_and_descendants.select(:id)).select(:id),
|
||||
array_mapping_scope: -> (id_expression) { Issue.where(Issue.arel_table[:project_id].eq(id_expression)) },
|
||||
finder_query: -> (id_expression) { Issue.where(Issue.arel_table[:id].eq(id_expression)) }
|
||||
}
|
||||
|
||||
expect { described_class.new(**options).execute }.to raise_error(/The order on the scope does not support keyset pagination/)
|
||||
end
|
||||
end
|
|
@ -2,6 +2,8 @@
|
|||
require 'spec_helper'
|
||||
|
||||
RSpec.describe Packages::PackageFile, type: :model do
|
||||
using RSpec::Parameterized::TableSyntax
|
||||
|
||||
let_it_be(:project) { create(:project) }
|
||||
let_it_be(:package_file1) { create(:package_file, :xml, file_name: 'FooBar') }
|
||||
let_it_be(:package_file2) { create(:package_file, :xml, file_name: 'ThisIsATest') }
|
||||
|
@ -143,6 +145,67 @@ RSpec.describe Packages::PackageFile, type: :model do
|
|||
it { expect(described_class.most_recent!).to eq(debian_package.package_files.last) }
|
||||
end
|
||||
|
||||
describe '.most_recent_for' do
|
||||
let_it_be(:package1) { create(:npm_package) }
|
||||
let_it_be(:package2) { create(:npm_package) }
|
||||
let_it_be(:package3) { create(:npm_package) }
|
||||
let_it_be(:package4) { create(:npm_package) }
|
||||
|
||||
let_it_be(:package_file2_2) { create(:package_file, :npm, package: package2) }
|
||||
|
||||
let_it_be(:package_file3_2) { create(:package_file, :npm, package: package3) }
|
||||
let_it_be(:package_file3_3) { create(:package_file, :npm, package: package3) }
|
||||
|
||||
let_it_be(:package_file4_2) { create(:package_file, :npm, package: package2) }
|
||||
let_it_be(:package_file4_3) { create(:package_file, :npm, package: package2) }
|
||||
let_it_be(:package_file4_4) { create(:package_file, :npm, package: package2) }
|
||||
|
||||
let(:most_recent_package_file1) { package1.package_files.recent.first }
|
||||
let(:most_recent_package_file2) { package2.package_files.recent.first }
|
||||
let(:most_recent_package_file3) { package3.package_files.recent.first }
|
||||
let(:most_recent_package_file4) { package4.package_files.recent.first }
|
||||
|
||||
subject { described_class.most_recent_for(packages) }
|
||||
|
||||
where(
|
||||
package_input1: [1, nil],
|
||||
package_input2: [2, nil],
|
||||
package_input3: [3, nil],
|
||||
package_input4: [4, nil]
|
||||
)
|
||||
|
||||
with_them do
|
||||
let(:compact_inputs) { [package_input1, package_input2, package_input3, package_input4].compact }
|
||||
let(:packages) do
|
||||
::Packages::Package.id_in(
|
||||
compact_inputs.map { |pkg_number| public_send("package#{pkg_number}") }
|
||||
.map(&:id)
|
||||
)
|
||||
end
|
||||
|
||||
let(:expected_package_files) { compact_inputs.map { |pkg_number| public_send("most_recent_package_file#{pkg_number}") } }
|
||||
|
||||
it { is_expected.to contain_exactly(*expected_package_files) }
|
||||
end
|
||||
|
||||
context 'extra join and extra where' do
|
||||
let_it_be(:helm_package) { create(:helm_package, without_package_files: true) }
|
||||
let_it_be(:helm_package_file1) { create(:helm_package_file, channel: 'alpha') }
|
||||
let_it_be(:helm_package_file2) { create(:helm_package_file, channel: 'alpha', package: helm_package) }
|
||||
let_it_be(:helm_package_file3) { create(:helm_package_file, channel: 'beta', package: helm_package) }
|
||||
let_it_be(:helm_package_file4) { create(:helm_package_file, channel: 'beta', package: helm_package) }
|
||||
|
||||
let(:extra_join) { :helm_file_metadatum }
|
||||
let(:extra_where) { { packages_helm_file_metadata: { channel: 'alpha' } } }
|
||||
|
||||
subject { described_class.most_recent_for(Packages::Package.id_in(helm_package.id), extra_join: extra_join, extra_where: extra_where) }
|
||||
|
||||
it 'returns the most recent package for the selected channel' do
|
||||
expect(subject).to contain_exactly(helm_package_file2)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
describe '#update_file_store callback' do
|
||||
let_it_be(:package_file) { build(:package_file, :nuget, size: nil) }
|
||||
|
||||
|
|
80
spec/presenters/packages/helm/index_presenter_spec.rb
Normal file
80
spec/presenters/packages/helm/index_presenter_spec.rb
Normal file
|
@ -0,0 +1,80 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require 'spec_helper'
|
||||
|
||||
RSpec.describe Packages::Helm::IndexPresenter do
|
||||
include_context 'with expected presenters dependency groups'
|
||||
|
||||
let_it_be(:project) { create(:project) }
|
||||
let_it_be(:packages) { create_list(:helm_package, 5, project: project) }
|
||||
let_it_be(:package_files3_1) { create(:helm_package_file, package: packages[2], file_sha256: '3_1', file_name: 'file3_1') }
|
||||
let_it_be(:package_files3_2) { create(:helm_package_file, package: packages[2], file_sha256: '3_2', file_name: 'file3_2') }
|
||||
let_it_be(:package_files4_1) { create(:helm_package_file, package: packages[3], file_sha256: '4_1', file_name: 'file4_1') }
|
||||
let_it_be(:package_files4_2) { create(:helm_package_file, package: packages[3], file_sha256: '4_2', file_name: 'file4_2') }
|
||||
let_it_be(:package_files4_3) { create(:helm_package_file, package: packages[3], file_sha256: '4_3', file_name: 'file4_3') }
|
||||
|
||||
let(:project_id_param) { project.id }
|
||||
let(:channel) { 'stable' }
|
||||
let(:presenter) { described_class.new(project_id_param, channel, ::Packages::Package.id_in(packages.map(&:id))) }
|
||||
|
||||
describe('#entries') do
|
||||
subject { presenter.entries }
|
||||
|
||||
it 'returns the correct hash' do
|
||||
expect(subject.size).to eq(5)
|
||||
expect(subject.keys).to eq(packages.map(&:name))
|
||||
subject.values.zip(packages) do |raws, pkg|
|
||||
expect(raws.size).to eq(1)
|
||||
|
||||
file = pkg.package_files.recent.first
|
||||
raw = raws.first
|
||||
expect(raw['name']).to eq(pkg.name)
|
||||
expect(raw['version']).to eq(pkg.version)
|
||||
expect(raw['apiVersion']).to eq("v2")
|
||||
expect(raw['created']).to eq(file.created_at.utc.strftime('%Y-%m-%dT%H:%M:%S.%NZ'))
|
||||
expect(raw['digest']).to eq(file.file_sha256)
|
||||
expect(raw['urls']).to eq(["charts/#{file.file_name}"])
|
||||
end
|
||||
end
|
||||
|
||||
context 'with an unknown channel' do
|
||||
let(:channel) { 'unknown' }
|
||||
|
||||
it { is_expected.to be_empty }
|
||||
end
|
||||
|
||||
context 'with a nil channel' do
|
||||
let(:channel) { nil }
|
||||
|
||||
it { is_expected.to be_empty }
|
||||
end
|
||||
end
|
||||
|
||||
describe('#api_version') do
|
||||
subject { presenter.api_version }
|
||||
|
||||
it { is_expected.to eq(described_class::API_VERSION) }
|
||||
end
|
||||
|
||||
describe('#generated') do
|
||||
subject { presenter.generated }
|
||||
|
||||
it 'returns the expected format' do
|
||||
freeze_time do
|
||||
expect(subject).to eq(Time.zone.now.utc.strftime('%Y-%m-%dT%H:%M:%S.%NZ'))
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
describe('#server_info') do
|
||||
subject { presenter.server_info }
|
||||
|
||||
it { is_expected.to eq({ 'contextPath' => "/api/v4/projects/#{project.id}/packages/helm" }) }
|
||||
|
||||
context 'with url encoded project id param' do
|
||||
let_it_be(:project_id_param) { 'foo/bar' }
|
||||
|
||||
it { is_expected.to eq({ 'contextPath' => '/api/v4/projects/foo%2Fbar/packages/helm' }) }
|
||||
end
|
||||
end
|
||||
end
|
|
@ -12,10 +12,24 @@ RSpec.describe API::HelmPackages do
|
|||
let_it_be(:package) { create(:helm_package, project: project, without_package_files: true) }
|
||||
let_it_be(:package_file1) { create(:helm_package_file, package: package) }
|
||||
let_it_be(:package_file2) { create(:helm_package_file, package: package) }
|
||||
let_it_be(:package2) { create(:helm_package, project: project, without_package_files: true) }
|
||||
let_it_be(:package_file2_1) { create(:helm_package_file, package: package2, file_sha256: 'file2', file_name: 'filename2.tgz', description: 'hello from stable channel') }
|
||||
let_it_be(:package_file2_2) { create(:helm_package_file, package: package2, file_sha256: 'file2', file_name: 'filename2.tgz', channel: 'test', description: 'hello from test channel') }
|
||||
let_it_be(:other_package) { create(:npm_package, project: project) }
|
||||
|
||||
describe 'GET /api/v4/projects/:id/packages/helm/:channel/index.yaml' do
|
||||
it_behaves_like 'handling helm chart index requests' do
|
||||
let(:url) { "/projects/#{project.id}/packages/helm/stable/index.yaml" }
|
||||
let(:url) { "/projects/#{project_id}/packages/helm/stable/index.yaml" }
|
||||
|
||||
context 'with a project id' do
|
||||
let(:project_id) { project.id }
|
||||
|
||||
it_behaves_like 'handling helm chart index requests'
|
||||
end
|
||||
|
||||
context 'with an url encoded project id' do
|
||||
let(:project_id) { ERB::Util.url_encode(project.full_path) }
|
||||
|
||||
it_behaves_like 'handling helm chart index requests'
|
||||
end
|
||||
end
|
||||
|
||||
|
|
|
@ -300,8 +300,32 @@ RSpec.describe ServicePing::SubmitService do
|
|||
end
|
||||
end
|
||||
|
||||
def stub_response(body:, status: 201)
|
||||
stub_full_request(subject.send(:url), method: :post)
|
||||
describe '#url' do
|
||||
let(:url) { subject.url.to_s }
|
||||
|
||||
context 'when Rails.env is production' do
|
||||
before do
|
||||
stub_rails_env('production')
|
||||
end
|
||||
|
||||
it 'points to the production Version app' do
|
||||
expect(url).to eq("#{described_class::PRODUCTION_BASE_URL}/#{described_class::USAGE_DATA_PATH}")
|
||||
end
|
||||
end
|
||||
|
||||
context 'when Rails.env is not production' do
|
||||
before do
|
||||
stub_rails_env('development')
|
||||
end
|
||||
|
||||
it 'points to the staging Version app' do
|
||||
expect(url).to eq("#{described_class::STAGING_BASE_URL}/#{described_class::USAGE_DATA_PATH}")
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def stub_response(url: subject.url, body:, status: 201)
|
||||
stub_full_request(url, method: :post)
|
||||
.to_return(
|
||||
headers: { 'Content-Type' => 'application/json' },
|
||||
body: body.to_json,
|
||||
|
|
|
@ -71,12 +71,12 @@ module CycleAnalyticsHelpers
|
|||
save_value_stream(custom_value_stream_name)
|
||||
end
|
||||
|
||||
def wait_for_stages_to_load(selector = '.js-path-navigation')
|
||||
def wait_for_stages_to_load(selector = '[data-testid="vsa-path-navigation"]')
|
||||
expect(page).to have_selector selector
|
||||
wait_for_requests
|
||||
end
|
||||
|
||||
def select_group(target_group, ready_selector = '.js-path-navigation')
|
||||
def select_group(target_group, ready_selector = '[data-testid="vsa-path-navigation"]')
|
||||
visit group_analytics_cycle_analytics_path(target_group)
|
||||
|
||||
wait_for_stages_to_load(ready_selector)
|
||||
|
|
|
@ -36,15 +36,23 @@ RSpec.shared_examples 'process helm service index request' do |user_type, status
|
|||
|
||||
expect(yaml_response.keys).to contain_exactly('apiVersion', 'entries', 'generated', 'serverInfo')
|
||||
expect(yaml_response['entries']).to be_a(Hash)
|
||||
expect(yaml_response['entries'].keys).to contain_exactly(package.name)
|
||||
expect(yaml_response['serverInfo']).to eq({ 'contextPath' => "/api/v4/projects/#{project.id}/packages/helm" })
|
||||
expect(yaml_response['entries'].keys).to contain_exactly(package.name, package2.name)
|
||||
expect(yaml_response['serverInfo']).to eq({ 'contextPath' => "/api/v4/projects/#{project_id}/packages/helm" })
|
||||
|
||||
package_entry = yaml_response['entries'][package.name]
|
||||
|
||||
expect(package_entry.length).to eq(2)
|
||||
expect(package_entry.length).to eq(1)
|
||||
expect(package_entry.first.keys).to contain_exactly('name', 'version', 'apiVersion', 'created', 'digest', 'urls')
|
||||
expect(package_entry.first['digest']).to eq('fd2b2fa0329e80a2a602c2bb3b40608bcd6ee5cf96cf46fd0d2800a4c129c9db')
|
||||
expect(package_entry.first['urls']).to eq(["charts/#{package.name}-#{package.version}.tgz"])
|
||||
|
||||
package_entry = yaml_response['entries'][package2.name]
|
||||
|
||||
expect(package_entry.length).to eq(1)
|
||||
expect(package_entry.first.keys).to contain_exactly('name', 'version', 'apiVersion', 'created', 'digest', 'urls', 'description')
|
||||
expect(package_entry.first['digest']).to eq('file2')
|
||||
expect(package_entry.first['description']).to eq('hello from stable channel')
|
||||
expect(package_entry.first['urls']).to eq(['charts/filename2.tgz'])
|
||||
end
|
||||
end
|
||||
end
|
||||
|
@ -196,7 +204,7 @@ end
|
|||
|
||||
RSpec.shared_examples 'rejects helm access with unknown project id' do
|
||||
context 'with an unknown project' do
|
||||
let(:project) { OpenStruct.new(id: 1234567890) }
|
||||
let(:project_id) { 1234567890 }
|
||||
|
||||
context 'as anonymous' do
|
||||
it_behaves_like 'rejects helm packages access', :anonymous, :unauthorized
|
||||
|
|
BIN
vendor/project_templates/cluster_management.tar.gz
vendored
BIN
vendor/project_templates/cluster_management.tar.gz
vendored
Binary file not shown.
Loading…
Reference in a new issue