Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2020-05-27 15:08:11 +00:00
parent 90c9981395
commit 4d5ee2b814
48 changed files with 460 additions and 368 deletions

View File

@ -43,17 +43,17 @@ export default {
key: 'environment_scope',
label: __('Environment scope'),
},
// Wait for backend to send these fields
{
key: 'node_size',
label: __('Nodes'),
},
// Fields are missing calculation methods and not ready to display
// {
// key: 'size',
// label: __('Size'),
// },
// {
// key: 'cpu',
// key: 'node_cpu',
// label: __('Total cores (vCPUs)'),
// },
// {
// key: 'memory',
// key: 'node_memory',
// label: __('Total memory (GB)'),
// },
{
@ -111,6 +111,14 @@ export default {
></div>
</div>
</template>
<template #cell(node_size)="{ item }">
<span v-if="item.nodes">{{ item.nodes.length }}</span>
<small v-else class="gl-font-sm gl-font-style-italic gl-text-gray-400">{{
__('Unknown')
}}</small>
</template>
<template #cell(cluster_type)="{value}">
<gl-badge variant="light">
{{ value }}

View File

@ -6,6 +6,8 @@ export const CLUSTER_TYPES = {
instance_type: __('Instance'),
};
export const MAX_REQUESTS = 3;
export const STATUSES = {
default: { className: 'bg-white', title: __('Unknown') },
disabled: { className: 'disabled', title: __('Disabled') },

View File

@ -2,10 +2,23 @@ import Poll from '~/lib/utils/poll';
import axios from '~/lib/utils/axios_utils';
import flash from '~/flash';
import { __ } from '~/locale';
import { MAX_REQUESTS } from '../constants';
import { parseIntPagination, normalizeHeaders } from '~/lib/utils/common_utils';
import * as Sentry from '@sentry/browser';
import * as types from './mutation_types';
const allNodesPresent = (clusters, retryCount) => {
/*
Nodes are coming from external Kubernetes clusters.
They may fail for reasons GitLab cannot control.
MAX_REQUESTS will ensure this poll stops at some point.
*/
return retryCount > MAX_REQUESTS || clusters.every(cluster => cluster.nodes != null);
};
export const fetchClusters = ({ state, commit }) => {
let retryCount = 0;
const poll = new Poll({
resource: {
fetchClusters: paginatedEndPoint => axios.get(paginatedEndPoint),
@ -13,16 +26,40 @@ export const fetchClusters = ({ state, commit }) => {
data: `${state.endpoint}?page=${state.page}`,
method: 'fetchClusters',
successCallback: ({ data, headers }) => {
if (data.clusters) {
const normalizedHeaders = normalizeHeaders(headers);
const paginationInformation = parseIntPagination(normalizedHeaders);
retryCount += 1;
commit(types.SET_CLUSTERS_DATA, { data, paginationInformation });
commit(types.SET_LOADING_STATE, false);
try {
if (data.clusters) {
const normalizedHeaders = normalizeHeaders(headers);
const paginationInformation = parseIntPagination(normalizedHeaders);
commit(types.SET_CLUSTERS_DATA, { data, paginationInformation });
commit(types.SET_LOADING_STATE, false);
if (allNodesPresent(data.clusters, retryCount)) {
poll.stop();
}
}
} catch (error) {
poll.stop();
Sentry.withScope(scope => {
scope.setTag('javascript_clusters_list', 'fetchClustersSuccessCallback');
Sentry.captureException(error);
});
}
},
errorCallback: () => flash(__('An error occurred while loading clusters')),
errorCallback: response => {
poll.stop();
commit(types.SET_LOADING_STATE, false);
flash(__('Clusters|An error occurred while loading clusters'));
Sentry.withScope(scope => {
scope.setTag('javascript_clusters_list', 'fetchClustersErrorCallback');
Sentry.captureException(response);
});
},
});
poll.makeRequest();

View File

@ -412,7 +412,7 @@ js-gfm-input js-autosize markdown-area js-vue-textarea qa-comment-input"
</gl-alert>
<div class="note-form-actions">
<div
class="float-left btn-group
class="btn-group
append-right-10 comment-type-dropdown js-comment-type-dropdown droplab-dropdown"
>
<button

View File

@ -47,7 +47,7 @@ export default {
};
</script>
<template>
<div class="d-flex flex-grow-1 flex-column">
<div class="d-flex flex-grow-1 flex-column h-100">
<edit-header class="py-2" :title="title" />
<rich-content-editor v-model="editableContent" class="mb-9" />
<publish-toolbar

View File

@ -9,14 +9,8 @@
@include gl-rounded-bottom-right-base;
}
&:not(:first-child) {
@include gl-border-l-1;
@include gl-border-l-solid;
@include gl-border-white;
}
&:not(:last-child) {
@include gl-border-r-1;
@include gl-border-r-2;
@include gl-border-r-solid;
@include gl-border-white;
}

View File

@ -23,6 +23,7 @@ class Clusters::ClustersController < Clusters::BaseController
respond_to do |format|
format.html
format.json do
Gitlab::PollingInterval.set_header(response, interval: STATUS_POLLING_INTERVAL)
serializer = ClusterSerializer.new(current_user: current_user)
render json: {

View File

@ -5,7 +5,6 @@ module IssuableCollections
include PaginatedCollection
include SortingHelper
include SortingPreference
include Gitlab::IssuableMetadata
include Gitlab::Utils::StrongMemoize
included do
@ -44,7 +43,7 @@ module IssuableCollections
def set_pagination
@issuables = @issuables.page(params[:page])
@issuables = per_page_for_relative_position if params[:sort] == 'relative_position'
@issuable_meta_data = issuable_meta_data(@issuables, collection_type, current_user)
@issuable_meta_data = Gitlab::IssuableMetadata.new(current_user, @issuables).data
@total_pages = issuable_page_count(@issuables)
end
# rubocop:enable Gitlab/ModuleWithInstanceVariables

View File

@ -11,7 +11,7 @@ module IssuableCollectionsAction
.non_archived
.page(params[:page])
@issuable_meta_data = issuable_meta_data(@issues, collection_type, current_user)
@issuable_meta_data = Gitlab::IssuableMetadata.new(current_user, @issues).data
respond_to do |format|
format.html
@ -22,7 +22,7 @@ module IssuableCollectionsAction
def merge_requests
@merge_requests = issuables_collection.page(params[:page])
@issuable_meta_data = issuable_meta_data(@merge_requests, collection_type, current_user)
@issuable_meta_data = Gitlab::IssuableMetadata.new(current_user, @merge_requests).data
end
# rubocop:enable Gitlab/ModuleWithInstanceVariables

View File

@ -314,8 +314,7 @@ class ProjectsController < Projects::ApplicationController
@wiki_home = @project_wiki.find_page('home', params[:version_id])
elsif @project.feature_available?(:issues, current_user)
@issues = issuables_collection.page(params[:page])
@collection_type = 'Issue'
@issuable_meta_data = issuable_meta_data(@issues, @collection_type, current_user)
@issuable_meta_data = Gitlab::IssuableMetadata.new(current_user, @issues).data
end
render :show

View File

@ -23,10 +23,17 @@ module Ci
project_type: 3
}
ONLINE_CONTACT_TIMEOUT = 1.hour
# This `ONLINE_CONTACT_TIMEOUT` needs to be larger than
# `RUNNER_QUEUE_EXPIRY_TIME+UPDATE_CONTACT_COLUMN_EVERY`
#
ONLINE_CONTACT_TIMEOUT = 2.hours
# The `RUNNER_QUEUE_EXPIRY_TIME` indicates the longest interval that
# Runner request needs to be refreshed by Rails instead of being handled
# by Workhorse
RUNNER_QUEUE_EXPIRY_TIME = 1.hour
# This needs to be less than `ONLINE_CONTACT_TIMEOUT`
# The `UPDATE_CONTACT_COLUMN_EVERY` defines how often the Runner DB entry can be updated
UPDATE_CONTACT_COLUMN_EVERY = (40.minutes..55.minutes).freeze
AVAILABLE_TYPES_LEGACY = %w[specific shared].freeze
@ -282,7 +289,7 @@ module Ci
ensure_runner_queue_value == value if value.present?
end
def update_cached_info(values)
def heartbeat(values)
values = values&.slice(:version, :revision, :platform, :architecture, :ip_address) || {}
values[:contacted_at] = Time.current

View File

@ -189,8 +189,10 @@ class CommitStatus < ApplicationRecord
end
def self.update_as_processed!
# Marks items as processed, and increases `lock_version` (Optimisitc Locking)
update_all('processed=TRUE, lock_version=COALESCE(lock_version,0)+1')
# Marks items as processed
# we do not increase `lock_version`, as we are the one
# holding given lock_version (Optimisitc Locking)
update_all(processed: true)
end
def self.locking_enabled?

View File

@ -39,15 +39,6 @@ module Issuable
locked: 4
}.with_indifferent_access.freeze
# This object is used to gather issuable meta data for displaying
# upvotes, downvotes, notes and closing merge requests count for issues and merge requests
# lists avoiding n+1 queries and improving performance.
IssuableMeta = Struct.new(:upvotes, :downvotes, :user_notes_count, :mrs_count) do
def merge_requests_count(user = nil)
mrs_count
end
end
included do
cache_markdown_field :title, pipeline: :single_line
cache_markdown_field :description, issuable_state_filter_enabled: true

View File

@ -2,8 +2,6 @@
module Projects
class UpdatePagesService < BaseService
include Gitlab::OptimisticLocking
InvalidStateError = Class.new(StandardError)
FailedToExtractError = Class.new(StandardError)
@ -25,8 +23,8 @@ module Projects
# Create status notifying the deployment of pages
@status = create_status
retry_optimistic_lock(@status, &:enqueue!)
retry_optimistic_lock(@status, &:run!)
@status.enqueue!
@status.run!
raise InvalidStateError, 'missing pages artifacts' unless build.artifacts?
raise InvalidStateError, 'build SHA is outdated for this ref' unless latest?
@ -53,7 +51,7 @@ module Projects
private
def success
retry_optimistic_lock(@status, &:success)
@status.success
@project.mark_pages_as_deployed
super
end
@ -63,7 +61,7 @@ module Projects
log_error("Projects::UpdatePagesService: #{message}")
@status.allow_failure = !latest?
@status.description = message
retry_optimistic_lock(@status) { |status| status.drop(:script_failure) }
@status.drop(:script_failure)
super
end

View File

@ -0,0 +1,5 @@
---
title: Added node size to cluster index
merge_request: 32435
author:
type: changed

View File

@ -0,0 +1,5 @@
---
title: Fix overflow issue in MR and Issue comments
merge_request: 33100
author:
type: fixed

View File

@ -0,0 +1,5 @@
---
title: Fix atomic processing bumping a lock_version
merge_request: 32914
author:
type: fixed

View File

@ -0,0 +1,5 @@
---
title: Fix Runner heartbeats that results in considering them offline
merge_request: 32851
author:
type: fixed

View File

@ -1,5 +0,0 @@
---
title: Add btree_gist PGSQL extension and add DB constraints for Iteration date ranges
merge_request: 32335
author:
type: added

View File

@ -1,13 +0,0 @@
# frozen_string_literal: true
class EnableBtreeGistExtension < ActiveRecord::Migration[6.0]
DOWNTIME = false
def up
execute 'CREATE EXTENSION IF NOT EXISTS btree_gist'
end
def down
execute 'DROP EXTENSION IF EXISTS btree_gist'
end
end

View File

@ -1,39 +0,0 @@
# frozen_string_literal: true
class IterationDateRangeConstraint < ActiveRecord::Migration[6.0]
DOWNTIME = false
def up
execute <<~SQL
ALTER TABLE sprints
ADD CONSTRAINT iteration_start_and_due_daterange_project_id_constraint
EXCLUDE USING gist
( project_id WITH =,
daterange(start_date, due_date, '[]') WITH &&
)
WHERE (project_id IS NOT NULL)
SQL
execute <<~SQL
ALTER TABLE sprints
ADD CONSTRAINT iteration_start_and_due_daterange_group_id_constraint
EXCLUDE USING gist
( group_id WITH =,
daterange(start_date, due_date, '[]') WITH &&
)
WHERE (group_id IS NOT NULL)
SQL
end
def down
execute <<~SQL
ALTER TABLE sprints
DROP CONSTRAINT IF EXISTS iteration_start_and_due_daterange_project_id_constraint
SQL
execute <<~SQL
ALTER TABLE sprints
DROP CONSTRAINT IF EXISTS iteration_start_and_due_daterange_group_id_constraint
SQL
end
end

View File

@ -2,8 +2,6 @@ SET search_path=public;
CREATE EXTENSION IF NOT EXISTS plpgsql WITH SCHEMA pg_catalog;
CREATE EXTENSION IF NOT EXISTS btree_gist WITH SCHEMA public;
CREATE EXTENSION IF NOT EXISTS pg_trgm WITH SCHEMA public;
CREATE TABLE public.abuse_reports (
@ -8427,12 +8425,6 @@ ALTER TABLE ONLY public.issue_user_mentions
ALTER TABLE ONLY public.issues
ADD CONSTRAINT issues_pkey PRIMARY KEY (id);
ALTER TABLE ONLY public.sprints
ADD CONSTRAINT iteration_start_and_due_daterange_group_id_constraint EXCLUDE USING gist (group_id WITH =, daterange(start_date, due_date, '[]'::text) WITH &&) WHERE ((group_id IS NOT NULL));
ALTER TABLE ONLY public.sprints
ADD CONSTRAINT iteration_start_and_due_daterange_project_id_constraint EXCLUDE USING gist (project_id WITH =, daterange(start_date, due_date, '[]'::text) WITH &&) WHERE ((project_id IS NOT NULL));
ALTER TABLE ONLY public.jira_connect_installations
ADD CONSTRAINT jira_connect_installations_pkey PRIMARY KEY (id);
@ -13953,8 +13945,6 @@ COPY "schema_migrations" (version) FROM STDIN;
20200514000009
20200514000132
20200514000340
20200515152649
20200515153633
20200515155620
20200519115908
20200519171058

View File

@ -367,6 +367,8 @@ migration involves one of the high-traffic tables:
- `users`
- `projects`
- `namespaces`
- `issues`
- `merge_requests`
- `ci_pipelines`
- `ci_builds`
- `notes`

View File

@ -59,8 +59,6 @@ Here's a list of the AWS services we will use, with links to pricing information
Redis configuration. See the
[Amazon ElastiCache pricing](https://aws.amazon.com/elasticache/pricing/).
NOTE: **Note:** Please note that while we will be using EBS for storage, we do not recommend using EFS as it may negatively impact GitLab's performance. You can review the [relevant documentation](../../administration/high_availability/nfs.md#avoid-using-awss-elastic-file-system-efs) for more details.
## Create an IAM EC2 instance role and profile
As we'll be using [Amazon S3 object storage](#amazon-s3-object-storage), our EC2 instances need to have read, write, and list permissions for our S3 buckets. To avoid embedding AWS keys in our GitLab config, we'll make use of an [IAM Role](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html) to allow our GitLab instance with this access. We'll need to create an IAM policy to attach to our IAM role:
@ -563,7 +561,7 @@ Let's create an EC2 instance where we'll install Gitaly:
1. Click **Review and launch** followed by **Launch** if you're happy with your settings.
1. Finally, acknowledge that you have access to the selected private key file or create a new one. Click **Launch Instances**.
> **Optional:** Instead of storing configuration _and_ repository data on the root volume, you can also choose to add an additional EBS volume for repository storage. Follow the same guidance as above. See the [Amazon EBS pricing](https://aws.amazon.com/ebs/pricing/).
NOTE: **Optional:** Instead of storing configuration _and_ repository data on the root volume, you can also choose to add an additional EBS volume for repository storage. Follow the same guidance as above. See the [Amazon EBS pricing](https://aws.amazon.com/ebs/pricing/). We do not recommend using EFS as it may negatively impact GitLabs performance. You can review the [relevant documentation](../../administration/high_availability/nfs.md#avoid-using-awss-elastic-file-system-efs) for more details.
Now that we have our EC2 instance ready, follow the [documentation to install GitLab and set up Gitaly on its own server](../../administration/gitaly/index.md#running-gitaly-on-its-own-server). Perform the client setup steps from that document on the [GitLab instance we created](#install-gitlab) above.

View File

@ -563,6 +563,10 @@ If more than the maximum number of allowed connections occur concurrently, they
dropped and users get
[an `ssh_exchange_identification` error](../../topics/git/troubleshooting_git.md#ssh_exchange_identification-error).
### Import/export
To help avoid abuse, project and group imports, exports, and export downloads are rate limited. See [Project import/export rate limits](../../user/project/settings/import_export.md#rate-limits) and [Group import/export rate limits](../../user/group/settings/import_export.md#rate-limits) for details.
## GitLab.com Logging
We use [Fluentd](https://gitlab.com/gitlab-com/runbooks/tree/master/logging/doc#fluentd) to parse our logs. Fluentd sends our logs to

View File

@ -106,7 +106,7 @@ module API
status.enqueue!
when 'running'
status.enqueue
Gitlab::OptimisticLocking.retry_lock(status, &:run!)
status.run!
when 'success'
status.success!
when 'failed'

View File

@ -3,6 +3,8 @@
module API
module Helpers
module Runner
include Gitlab::Utils::StrongMemoize
prepend_if_ee('EE::API::Helpers::Runner') # rubocop: disable Cop/InjectEnterpriseEditionModule
JOB_TOKEN_HEADER = 'HTTP_JOB_TOKEN'
@ -16,7 +18,7 @@ module API
forbidden! unless current_runner
current_runner
.update_cached_info(get_runner_details_from_request)
.heartbeat(get_runner_details_from_request)
end
def get_runner_details_from_request
@ -31,31 +33,35 @@ module API
end
def current_runner
@runner ||= ::Ci::Runner.find_by_token(params[:token].to_s)
strong_memoize(:current_runner) do
::Ci::Runner.find_by_token(params[:token].to_s)
end
end
def validate_job!(job)
not_found! unless job
yield if block_given?
project = job.project
forbidden!('Project has been deleted!') if project.nil? || project.pending_delete?
forbidden!('Job has been erased!') if job.erased?
end
def authenticate_job!
def authenticate_job!(require_running: true)
job = current_job
validate_job!(job) do
forbidden! unless job_token_valid?(job)
not_found! unless job
forbidden! unless job_token_valid?(job)
forbidden!('Project has been deleted!') if job.project.nil? || job.project.pending_delete?
forbidden!('Job has been erased!') if job.erased?
if require_running
job_forbidden!(job, 'Job is not running') unless job.running?
end
if Gitlab::Ci::Features.job_heartbeats_runner?(job.project)
job.runner&.heartbeat(get_runner_ip)
end
job
end
def current_job
@current_job ||= Ci::Build.find_by_id(params[:id])
strong_memoize(:current_job) do
Ci::Build.find_by_id(params[:id])
end
end
def job_token_valid?(job)

View File

@ -5,7 +5,6 @@ module API
include PaginationParams
helpers Helpers::IssuesHelpers
helpers Helpers::RateLimiter
helpers ::Gitlab::IssuableMetadata
before { authenticate_non_get! }
@ -108,7 +107,7 @@ module API
with: Entities::Issue,
with_labels_details: declared_params[:with_labels_details],
current_user: current_user,
issuable_metadata: issuable_meta_data(issues, 'Issue', current_user),
issuable_metadata: Gitlab::IssuableMetadata.new(current_user, issues).data,
include_subscribed: false
}
@ -134,7 +133,7 @@ module API
with: Entities::Issue,
with_labels_details: declared_params[:with_labels_details],
current_user: current_user,
issuable_metadata: issuable_meta_data(issues, 'Issue', current_user),
issuable_metadata: Gitlab::IssuableMetadata.new(current_user, issues).data,
include_subscribed: false,
group: user_group
}
@ -171,7 +170,7 @@ module API
with_labels_details: declared_params[:with_labels_details],
current_user: current_user,
project: user_project,
issuable_metadata: issuable_meta_data(issues, 'Issue', current_user),
issuable_metadata: Gitlab::IssuableMetadata.new(current_user, issues).data,
include_subscribed: false
}

View File

@ -8,7 +8,6 @@ module API
before { authenticate_non_get! }
helpers ::Gitlab::IssuableMetadata
helpers Helpers::MergeRequestsHelpers
# EE::API::MergeRequests would override the following helpers
@ -92,7 +91,7 @@ module API
if params[:view] == 'simple'
options[:with] = Entities::MergeRequestSimple
else
options[:issuable_metadata] = issuable_meta_data(merge_requests, 'MergeRequest', current_user)
options[:issuable_metadata] = Gitlab::IssuableMetadata.new(current_user, merge_requests).data
if Feature.enabled?(:mr_list_api_skip_merge_status_recheck, default_enabled: true)
options[:skip_merge_status_recheck] = !declared_params[:with_merge_status_recheck]
end

View File

@ -154,7 +154,6 @@ module API
end
put '/:id' do
job = authenticate_job!
job_forbidden!(job, 'Job is not running') unless job.running?
job.trace.set(params[:trace]) if params[:trace]
@ -182,7 +181,6 @@ module API
end
patch '/:id/trace' do
job = authenticate_job!
job_forbidden!(job, 'Job is not running') unless job.running?
error!('400 Missing header Content-Range', 400) unless request.headers.key?('Content-Range')
content_range = request.headers['Content-Range']
@ -229,7 +227,6 @@ module API
Gitlab::Workhorse.verify_api_request!(headers)
job = authenticate_job!
forbidden!('Job is not running') unless job.running?
service = Ci::AuthorizeJobArtifactService.new(job, params, max_size: max_artifacts_size(job))
@ -265,7 +262,6 @@ module API
require_gitlab_workhorse!
job = authenticate_job!
forbidden!('Job is not running!') unless job.running?
artifacts = params[:file]
metadata = params[:metadata]
@ -292,7 +288,7 @@ module API
optional :direct_download, default: false, type: Boolean, desc: %q(Perform direct download from remote storage instead of proxying artifacts)
end
get '/:id/artifacts' do
job = authenticate_job!
job = authenticate_job!(require_running: false)
present_carrierwave_file!(job.artifacts_file, supports_direct_download: params[:direct_download])
end

View File

@ -6,8 +6,6 @@ module API
before { authenticate! }
helpers ::Gitlab::IssuableMetadata
ISSUABLE_TYPES = {
'merge_requests' => ->(iid) { find_merge_request_with_access(iid) },
'issues' => ->(iid) { find_project_issue(iid) }
@ -65,7 +63,7 @@ module API
next unless collection
targets = collection.map(&:target)
options[type] = { issuable_metadata: issuable_meta_data(targets, type, current_user) }
options[type] = { issuable_metadata: Gitlab::IssuableMetadata.new(current_user, targets).data }
end
end
end

View File

@ -13,6 +13,10 @@ module Gitlab
def self.ensure_scheduling_type_enabled?
::Feature.enabled?(:ci_ensure_scheduling_type, default_enabled: true)
end
def self.job_heartbeats_runner?(project)
::Feature.enabled?(:ci_job_heartbeats_runner, project, default_enabled: true)
end
end
end
end

View File

@ -1,8 +1,52 @@
# frozen_string_literal: true
module Gitlab
module IssuableMetadata
def issuable_meta_data(issuable_collection, collection_type, user = nil)
class IssuableMetadata
include Gitlab::Utils::StrongMemoize
# data structure to store issuable meta data like
# upvotes, downvotes, notes and closing merge requests counts for issues and merge requests
# this avoiding n+1 queries when loading issuable collections on frontend
IssuableMeta = Struct.new(:upvotes, :downvotes, :user_notes_count, :mrs_count) do
def merge_requests_count(user = nil)
mrs_count
end
end
attr_reader :current_user, :issuable_collection
def initialize(current_user, issuable_collection)
@current_user = current_user
@issuable_collection = issuable_collection
validate_collection!
end
def data
return {} if issuable_ids.empty?
issuable_ids.each_with_object({}) do |id, issuable_meta|
issuable_meta[id] = metadata_for_issuable(id)
end
end
private
def metadata_for_issuable(id)
downvotes = group_issuable_votes_count.find { |votes| votes.awardable_id == id && votes.downvote? }
upvotes = group_issuable_votes_count.find { |votes| votes.awardable_id == id && votes.upvote? }
notes = grouped_issuable_notes_count.find { |notes| notes.noteable_id == id }
merge_requests = grouped_issuable_merge_requests_count.find { |mr| mr.first == id }
IssuableMeta.new(
upvotes.try(:count).to_i,
downvotes.try(:count).to_i,
notes.try(:count).to_i,
merge_requests.try(:last).to_i
)
end
def validate_collection!
# ActiveRecord uses Object#extend for null relations.
if !(issuable_collection.singleton_class < ActiveRecord::NullRelation) &&
issuable_collection.respond_to?(:limit_value) &&
@ -10,36 +54,43 @@ module Gitlab
raise 'Collection must have a limit applied for preloading meta-data'
end
end
# map has to be used here since using pluck or select will
# throw an error when ordering issuables by priority which inserts
# a new order into the collection.
# We cannot use reorder to not mess up the paginated collection.
issuable_ids = issuable_collection.map(&:id)
def issuable_ids
strong_memoize(:issuable_ids) do
# map has to be used here since using pluck or select will
# throw an error when ordering issuables by priority which inserts
# a new order into the collection.
# We cannot use reorder to not mess up the paginated collection.
issuable_collection.map(&:id)
end
end
return {} if issuable_ids.empty?
def collection_type
# Supports relations or paginated arrays
issuable_collection.try(:model)&.name ||
issuable_collection.first&.model_name.to_s
end
issuable_notes_count = ::Note.count_for_collection(issuable_ids, collection_type)
issuable_votes_count = ::AwardEmoji.votes_for_collection(issuable_ids, collection_type)
issuable_merge_requests_count =
def group_issuable_votes_count
strong_memoize(:group_issuable_votes_count) do
AwardEmoji.votes_for_collection(issuable_ids, collection_type)
end
end
def grouped_issuable_notes_count
strong_memoize(:grouped_issuable_notes_count) do
::Note.count_for_collection(issuable_ids, collection_type)
end
end
def grouped_issuable_merge_requests_count
strong_memoize(:grouped_issuable_merge_requests_count) do
if collection_type == 'Issue'
::MergeRequestsClosingIssues.count_for_collection(issuable_ids, user)
::MergeRequestsClosingIssues.count_for_collection(issuable_ids, current_user)
else
[]
end
issuable_ids.each_with_object({}) do |id, issuable_meta|
downvotes = issuable_votes_count.find { |votes| votes.awardable_id == id && votes.downvote? }
upvotes = issuable_votes_count.find { |votes| votes.awardable_id == id && votes.upvote? }
notes = issuable_notes_count.find { |notes| notes.noteable_id == id }
merge_requests = issuable_merge_requests_count.find { |mr| mr.first == id }
issuable_meta[id] = ::Issuable::IssuableMeta.new(
upvotes.try(:count).to_i,
downvotes.try(:count).to_i,
notes.try(:count).to_i,
merge_requests.try(:last).to_i
)
end
end
end

View File

@ -2231,9 +2231,6 @@ msgstr ""
msgid "An error occurred while loading chart data"
msgstr ""
msgid "An error occurred while loading clusters"
msgstr ""
msgid "An error occurred while loading commit signatures"
msgstr ""
@ -5429,6 +5426,9 @@ msgstr ""
msgid "ClusterIntergation|Select service role"
msgstr ""
msgid "Clusters|An error occurred while loading clusters"
msgstr ""
msgid "Code"
msgstr ""

View File

@ -41,7 +41,7 @@
"@babel/preset-env": "^7.8.4",
"@gitlab/at.js": "1.5.5",
"@gitlab/svgs": "1.130.0",
"@gitlab/ui": "16.0",
"@gitlab/ui": "16.0.0",
"@gitlab/visual-review-tools": "1.6.1",
"@rails/actioncable": "^6.0.3",
"@sentry/browser": "^5.10.2",

View File

@ -42,6 +42,13 @@ describe Admin::ClustersController do
expect(response).to match_response_schema('cluster_list')
end
it 'sets the polling interval header for json requests' do
get_index(format: :json)
expect(response).to have_gitlab_http_status(:ok)
expect(response.headers['Poll-Interval']).to eq("10000")
end
context 'when page is specified' do
let(:last_page) { Clusters::Cluster.instance_type.page.total_pages }
let(:total_count) { Clusters::Cluster.instance_type.page.total_count }

View File

@ -47,6 +47,13 @@ describe Groups::ClustersController do
expect(response).to match_response_schema('cluster_list')
end
it 'sets the polling interval header for json requests' do
go(format: :json)
expect(response).to have_gitlab_http_status(:ok)
expect(response.headers['Poll-Interval']).to eq("10000")
end
context 'when page is specified' do
let(:last_page) { group.clusters.page.total_pages }
let(:total_count) { group.clusters.page.total_count }

View File

@ -41,6 +41,13 @@ describe Projects::ClustersController do
expect(response).to match_response_schema('cluster_list')
end
it 'sets the polling interval header for json requests' do
go(format: :json)
expect(response).to have_gitlab_http_status(:ok)
expect(response.headers['Poll-Interval']).to eq("10000")
end
context 'when page is specified' do
let(:last_page) { project.clusters.page.total_pages }
let(:total_count) { project.clusters.page.total_count }

View File

@ -95,45 +95,6 @@ describe 'issue move to another project' do
expect(page).to have_no_selector('#move_to_project_id')
end
end
context 'service desk issue moved to a project with service desk disabled', :js do
let(:project_title) { 'service desk disabled project' }
let(:warning_selector) { '.js-alert-moved-from-service-desk-warning' }
let(:namespace) { create(:namespace) }
let(:regular_project) { create(:project, title: project_title, service_desk_enabled: false) }
let(:service_desk_project) { build(:project, :private, namespace: namespace, service_desk_enabled: true) }
let(:service_desk_issue) { create(:issue, project: service_desk_project, author: ::User.support_bot) }
before do
allow(::Gitlab).to receive(:com?).and_return(true)
allow(::Gitlab::IncomingEmail).to receive(:enabled?).and_return(true)
allow(::Gitlab::IncomingEmail).to receive(:supports_wildcard?).and_return(true)
regular_project.add_reporter(user)
service_desk_project.add_reporter(user)
visit issue_path(service_desk_issue)
find('.js-move-issue').click
wait_for_requests
find('.js-move-issue-dropdown-item', text: project_title).click
find('.js-move-issue-confirmation-button').click
end
it 'shows an alert after being moved' do
expect(page).to have_content('This project does not have Service Desk enabled')
end
it 'does not show an alert after being dismissed' do
find("#{warning_selector} .js-close").click
expect(page).to have_no_selector(warning_selector)
page.refresh
expect(page).to have_no_selector(warning_selector)
end
end
end
def issue_path(issue)

View File

@ -28,13 +28,17 @@ describe('Clusters', () => {
return axios.waitForAll();
};
const paginationHeader = (total = apiData.clusters.length, perPage = 20, currentPage = 1) => {
return {
'x-total': total,
'x-per-page': perPage,
'x-page': currentPage,
};
};
beforeEach(() => {
mock = new MockAdapter(axios);
mockPollingApi(200, apiData, {
'x-total': apiData.clusters.length,
'x-per-page': 20,
'x-page': 1,
});
mockPollingApi(200, apiData, paginationHeader());
return mountWrapper();
});
@ -99,17 +103,30 @@ describe('Clusters', () => {
});
});
describe('nodes present', () => {
it.each`
nodeSize | lineNumber
${'Unknown'} | ${0}
${'1'} | ${1}
${'2'} | ${2}
${'Unknown'} | ${3}
${'Unknown'} | ${4}
${'Unknown'} | ${5}
`('renders node size for each cluster', ({ nodeSize, lineNumber }) => {
const sizes = findTable().findAll('td:nth-child(3)');
const size = sizes.at(lineNumber);
expect(size.text()).toBe(nodeSize);
});
});
describe('pagination', () => {
const perPage = apiData.clusters.length;
const totalFirstPage = 100;
const totalSecondPage = 500;
beforeEach(() => {
mockPollingApi(200, apiData, {
'x-total': totalFirstPage,
'x-per-page': perPage,
'x-page': 1,
});
mockPollingApi(200, apiData, paginationHeader(totalFirstPage, perPage, 1));
return mountWrapper();
});
@ -123,11 +140,7 @@ describe('Clusters', () => {
describe('when updating currentPage', () => {
beforeEach(() => {
mockPollingApi(200, apiData, {
'x-total': totalSecondPage,
'x-per-page': perPage,
'x-page': 2,
});
mockPollingApi(200, apiData, paginationHeader(totalSecondPage, perPage, 2));
wrapper.setData({ currentPage: 2 });
return axios.waitForAll();
});

View File

@ -1,57 +1,45 @@
export const clusterList = [
{
name: 'My Cluster 1',
environmentScope: '*',
size: '3',
clusterType: 'group_type',
environment_scope: '*',
cluster_type: 'group_type',
status: 'disabled',
cpu: '6 (100% free)',
memory: '22.50 (30% free)',
nodes: null,
},
{
name: 'My Cluster 2',
environmentScope: 'development',
size: '12',
clusterType: 'project_type',
environment_scope: 'development',
cluster_type: 'project_type',
status: 'unreachable',
cpu: '3 (50% free)',
memory: '11 (60% free)',
nodes: [{ usage: { cpu: '246155922n', memory: '1255212Ki' } }],
},
{
name: 'My Cluster 3',
environmentScope: 'development',
size: '12',
clusterType: 'project_type',
environment_scope: 'development',
cluster_type: 'project_type',
status: 'authentication_failure',
cpu: '1 (0% free)',
memory: '22 (33% free)',
nodes: [
{ usage: { cpu: '246155922n', memory: '1255212Ki' } },
{ usage: { cpu: '307051934n', memory: '1379136Ki' } },
],
},
{
name: 'My Cluster 4',
environmentScope: 'production',
size: '12',
clusterType: 'project_type',
environment_scope: 'production',
cluster_type: 'project_type',
status: 'deleting',
cpu: '6 (100% free)',
memory: '45 (15% free)',
},
{
name: 'My Cluster 5',
environmentScope: 'development',
size: '12',
clusterType: 'project_type',
environment_scope: 'development',
cluster_type: 'project_type',
status: 'created',
cpu: '6 (100% free)',
memory: '20.12 (35% free)',
},
{
name: 'My Cluster 6',
environmentScope: '*',
size: '1',
clusterType: 'project_type',
environment_scope: '*',
cluster_type: 'project_type',
status: 'cleanup_ongoing',
cpu: '6 (100% free)',
memory: '20.12 (35% free)',
},
];

View File

@ -1,10 +1,14 @@
import MockAdapter from 'axios-mock-adapter';
import Poll from '~/lib/utils/poll';
import flashError from '~/flash';
import testAction from 'helpers/vuex_action_helper';
import axios from '~/lib/utils/axios_utils';
import waitForPromises from 'helpers/wait_for_promises';
import { apiData } from '../mock_data';
import { MAX_REQUESTS } from '~/clusters_list/constants';
import * as types from '~/clusters_list/store/mutation_types';
import * as actions from '~/clusters_list/store/actions';
import * as Sentry from '@sentry/browser';
jest.mock('~/flash.js');
@ -12,6 +16,24 @@ describe('Clusters store actions', () => {
describe('fetchClusters', () => {
let mock;
const headers = {
'x-next-page': 1,
'x-total': apiData.clusters.length,
'x-total-pages': 1,
'x-per-page': 20,
'x-page': 1,
'x-prev-page': 1,
};
const paginationInformation = {
nextPage: 1,
page: 1,
perPage: 20,
previousPage: 1,
total: apiData.clusters.length,
totalPages: 1,
};
beforeEach(() => {
mock = new MockAdapter(axios);
});
@ -19,21 +41,6 @@ describe('Clusters store actions', () => {
afterEach(() => mock.restore());
it('should commit SET_CLUSTERS_DATA with received response', done => {
const headers = {
'x-total': apiData.clusters.length,
'x-per-page': 20,
'x-page': 1,
};
const paginationInformation = {
nextPage: NaN,
page: 1,
perPage: 20,
previousPage: NaN,
total: apiData.clusters.length,
totalPages: NaN,
};
mock.onGet().reply(200, apiData, headers);
testAction(
@ -52,9 +59,110 @@ describe('Clusters store actions', () => {
it('should show flash on API error', done => {
mock.onGet().reply(400, 'Not Found');
testAction(actions.fetchClusters, { endpoint: apiData.endpoint }, {}, [], [], () => {
expect(flashError).toHaveBeenCalledWith(expect.stringMatching('error'));
done();
testAction(
actions.fetchClusters,
{ endpoint: apiData.endpoint },
{},
[{ type: types.SET_LOADING_STATE, payload: false }],
[],
() => {
expect(flashError).toHaveBeenCalledWith(expect.stringMatching('error'));
done();
},
);
});
describe('multiple api requests', () => {
let captureException;
let pollRequest;
let pollStop;
const pollInterval = 10;
const pollHeaders = { 'poll-interval': pollInterval, ...headers };
beforeEach(() => {
captureException = jest.spyOn(Sentry, 'captureException');
pollRequest = jest.spyOn(Poll.prototype, 'makeRequest');
pollStop = jest.spyOn(Poll.prototype, 'stop');
mock.onGet().reply(200, apiData, pollHeaders);
});
afterEach(() => {
captureException.mockRestore();
pollRequest.mockRestore();
pollStop.mockRestore();
});
it('should stop polling after MAX Requests', done => {
testAction(
actions.fetchClusters,
{ endpoint: apiData.endpoint },
{},
[
{ type: types.SET_CLUSTERS_DATA, payload: { data: apiData, paginationInformation } },
{ type: types.SET_LOADING_STATE, payload: false },
],
[],
() => {
expect(pollRequest).toHaveBeenCalledTimes(1);
expect(pollStop).toHaveBeenCalledTimes(0);
jest.advanceTimersByTime(pollInterval);
waitForPromises()
.then(() => {
expect(pollRequest).toHaveBeenCalledTimes(2);
expect(pollStop).toHaveBeenCalledTimes(0);
jest.advanceTimersByTime(pollInterval);
})
.then(() => waitForPromises())
.then(() => {
expect(pollRequest).toHaveBeenCalledTimes(MAX_REQUESTS);
expect(pollStop).toHaveBeenCalledTimes(0);
jest.advanceTimersByTime(pollInterval);
})
.then(() => waitForPromises())
.then(() => {
expect(pollRequest).toHaveBeenCalledTimes(MAX_REQUESTS + 1);
// Stops poll once it exceeds the MAX_REQUESTS limit
expect(pollStop).toHaveBeenCalledTimes(1);
jest.advanceTimersByTime(pollInterval);
})
.then(() => waitForPromises())
.then(() => {
// Additional poll requests are not made once pollStop is called
expect(pollRequest).toHaveBeenCalledTimes(MAX_REQUESTS + 1);
expect(pollStop).toHaveBeenCalledTimes(1);
})
.then(done)
.catch(done.fail);
},
);
});
it('should stop polling and report to Sentry when data is invalid', done => {
const badApiResponse = { clusters: {} };
mock.onGet().reply(200, badApiResponse, pollHeaders);
testAction(
actions.fetchClusters,
{ endpoint: apiData.endpoint },
{},
[
{
type: types.SET_CLUSTERS_DATA,
payload: { data: badApiResponse, paginationInformation },
},
{ type: types.SET_LOADING_STATE, payload: false },
],
[],
() => {
expect(pollRequest).toHaveBeenCalledTimes(1);
expect(pollStop).toHaveBeenCalledTimes(1);
expect(captureException).toHaveBeenCalledTimes(1);
done();
},
);
});
});
});

View File

@ -6,14 +6,12 @@ describe Gitlab::IssuableMetadata do
let(:user) { create(:user) }
let!(:project) { create(:project, :public, :repository, creator: user, namespace: user.namespace) }
subject { Class.new { include Gitlab::IssuableMetadata }.new }
it 'returns an empty Hash if an empty collection is provided' do
expect(subject.issuable_meta_data(Issue.none, 'Issue', user)).to eq({})
expect(described_class.new(user, Issue.none).data).to eq({})
end
it 'raises an error when given a collection with no limit' do
expect { subject.issuable_meta_data(Issue.all, 'Issue', user) }.to raise_error(/must have a limit/)
expect { described_class.new(user, Issue.all) }.to raise_error(/must have a limit/)
end
context 'issues' do
@ -25,7 +23,7 @@ describe Gitlab::IssuableMetadata do
let!(:closing_issues) { create(:merge_requests_closing_issues, issue: issue, merge_request: merge_request) }
it 'aggregates stats on issues' do
data = subject.issuable_meta_data(Issue.all.limit(10), 'Issue', user)
data = described_class.new(user, Issue.all.limit(10)).data
expect(data.count).to eq(2)
expect(data[issue.id].upvotes).to eq(1)
@ -48,7 +46,7 @@ describe Gitlab::IssuableMetadata do
let!(:note) { create(:note_on_merge_request, author: user, project: project, noteable: merge_request, note: "a comment on a MR") }
it 'aggregates stats on merge requests' do
data = subject.issuable_meta_data(MergeRequest.all.limit(10), 'MergeRequest', user)
data = described_class.new(user, MergeRequest.all.limit(10)).data
expect(data.count).to eq(2)
expect(data[merge_request.id].upvotes).to eq(1)

View File

@ -263,7 +263,7 @@ describe Ci::Runner do
subject { described_class.online }
before do
@runner1 = create(:ci_runner, :instance, contacted_at: 1.hour.ago)
@runner1 = create(:ci_runner, :instance, contacted_at: 2.hours.ago)
@runner2 = create(:ci_runner, :instance, contacted_at: 1.second.ago)
end
@ -344,7 +344,7 @@ describe Ci::Runner do
subject { described_class.offline }
before do
@runner1 = create(:ci_runner, :instance, contacted_at: 1.hour.ago)
@runner1 = create(:ci_runner, :instance, contacted_at: 2.hours.ago)
@runner2 = create(:ci_runner, :instance, contacted_at: 1.second.ago)
end
@ -598,10 +598,10 @@ describe Ci::Runner do
end
end
describe '#update_cached_info' do
describe '#heartbeat' do
let(:runner) { create(:ci_runner, :project) }
subject { runner.update_cached_info(architecture: '18-bit') }
subject { runner.heartbeat(architecture: '18-bit') }
context 'when database was updated recently' do
before do

View File

@ -46,10 +46,7 @@ describe Iteration do
end
context 'when dates overlap' do
let(:start_date) { 5.days.from_now }
let(:due_date) { 6.days.from_now }
shared_examples_for 'overlapping dates' do
context 'same group' do
context 'when start_date is in range' do
let(:start_date) { 5.days.from_now }
let(:due_date) { 3.weeks.from_now }
@ -58,11 +55,6 @@ describe Iteration do
expect(subject).not_to be_valid
expect(subject.errors[:base]).to include('Dates cannot overlap with other existing Iterations')
end
it 'is not valid even if forced' do
subject.validate # to generate iid/etc
expect { subject.save(validate: false) }.to raise_exception(ActiveRecord::StatementInvalid, /#{constraint_name}/)
end
end
context 'when end_date is in range' do
@ -73,84 +65,25 @@ describe Iteration do
expect(subject).not_to be_valid
expect(subject.errors[:base]).to include('Dates cannot overlap with other existing Iterations')
end
it 'is not valid even if forced' do
subject.validate # to generate iid/etc
expect { subject.save(validate: false) }.to raise_exception(ActiveRecord::StatementInvalid, /#{constraint_name}/)
end
end
context 'when both overlap' do
let(:start_date) { 5.days.from_now }
let(:due_date) { 6.days.from_now }
it 'is not valid' do
expect(subject).not_to be_valid
expect(subject.errors[:base]).to include('Dates cannot overlap with other existing Iterations')
end
it 'is not valid even if forced' do
subject.validate # to generate iid/etc
expect { subject.save(validate: false) }.to raise_exception(ActiveRecord::StatementInvalid, /#{constraint_name}/)
end
end
end
context 'group' do
it_behaves_like 'overlapping dates' do
let(:constraint_name) { 'iteration_start_and_due_daterange_group_id_constraint' }
end
context 'different group' do
let(:start_date) { 5.days.from_now }
let(:due_date) { 6.days.from_now }
let(:group) { create(:group) }
context 'different group' do
let(:group) { create(:group) }
it { is_expected.to be_valid }
it 'does not trigger exclusion constraints' do
expect { subject.save }.not_to raise_exception
end
end
context 'in a project' do
let(:project) { create(:project) }
subject { build(:iteration, project: project, start_date: start_date, due_date: due_date) }
it { is_expected.to be_valid }
it 'does not trigger exclusion constraints' do
expect { subject.save }.not_to raise_exception
end
end
end
context 'project' do
let_it_be(:existing_iteration) { create(:iteration, project: project, start_date: 4.days.from_now, due_date: 1.week.from_now) }
subject { build(:iteration, project: project, start_date: start_date, due_date: due_date) }
it_behaves_like 'overlapping dates' do
let(:constraint_name) { 'iteration_start_and_due_daterange_project_id_constraint' }
end
context 'different project' do
let(:project) { create(:project) }
it { is_expected.to be_valid }
it 'does not trigger exclusion constraints' do
expect { subject.save }.not_to raise_exception
end
end
context 'in a group' do
let(:group) { create(:group) }
subject { build(:iteration, group: group, start_date: start_date, due_date: due_date) }
it { is_expected.to be_valid }
it 'does not trigger exclusion constraints' do
expect { subject.save }.not_to raise_exception
end
end
it { is_expected.to be_valid }
end
end
end

View File

@ -1129,6 +1129,10 @@ describe API::Runner, :clean_gitlab_redis_shared_state do
let(:send_request) { update_job(state: 'success') }
end
it 'updates runner info' do
expect { update_job(state: 'success') }.to change { runner.reload.contacted_at }
end
context 'when status is given' do
it 'mark job as succeeded' do
update_job(state: 'success')
@ -1294,6 +1298,12 @@ describe API::Runner, :clean_gitlab_redis_shared_state do
let(:send_request) { patch_the_trace }
end
it 'updates runner info' do
runner.update!(contacted_at: 1.year.ago)
expect { patch_the_trace }.to change { runner.reload.contacted_at }
end
context 'when request is valid' do
it 'gets correct response' do
expect(response).to have_gitlab_http_status(:accepted)
@ -1555,6 +1565,10 @@ describe API::Runner, :clean_gitlab_redis_shared_state do
let(:send_request) { subject }
end
it 'updates runner info' do
expect { subject }.to change { runner.reload.contacted_at }
end
shared_examples 'authorizes local file' do
it 'succeeds' do
subject
@ -1743,6 +1757,10 @@ describe API::Runner, :clean_gitlab_redis_shared_state do
end
end
it 'updates runner info' do
expect { upload_artifacts(file_upload, headers_with_token) }.to change { runner.reload.contacted_at }
end
context 'when artifacts are being stored inside of tmp path' do
before do
# by configuring this path we allow to pass temp file from any path
@ -2228,6 +2246,10 @@ describe API::Runner, :clean_gitlab_redis_shared_state do
let(:send_request) { download_artifact }
end
it 'updates runner info' do
expect { download_artifact }.to change { runner.reload.contacted_at }
end
context 'when job has artifacts' do
let(:job) { create(:ci_build) }
let(:store) { JobArtifactUploader::Store::LOCAL }

View File

@ -34,7 +34,7 @@ RSpec.shared_examples 'issuables list meta-data' do |issuable_type, action = nil
aggregate_failures do
expect(meta_data.keys).to match_array(issuables.map(&:id))
expect(meta_data.values).to all(be_kind_of(Issuable::IssuableMeta))
expect(meta_data.values).to all(be_kind_of(Gitlab::IssuableMetadata::IssuableMeta))
end
end

View File

@ -787,7 +787,7 @@
resolved "https://registry.yarnpkg.com/@gitlab/svgs/-/svgs-1.130.0.tgz#0c2f3cdc0a4b0f54c47b2861c8fa31b2a58c570a"
integrity sha512-azJ1E9PBk6fGOaP6816BSr8oYrQu3m3BbYZwWOCUp8AfbZuf0ZOZVYmlR9i/eAOhoqqqmwF8hYCK2VjAklbpPA==
"@gitlab/ui@16.0":
"@gitlab/ui@16.0.0":
version "16.0.0"
resolved "https://registry.yarnpkg.com/@gitlab/ui/-/ui-16.0.0.tgz#0e2d19b85c47f45a052caf6cd0367613cbab8e8e"
integrity sha512-xSWXtFWWQzGtL35dGexc5LGqAJXYjLMEFQyPLzCBX3yY9tkI9s9rVMX053tnKYb9kgEmL+R/xGiW7D9nb58VmQ==