Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2022-02-09 12:12:04 +00:00
parent 7c2cf0604b
commit 399b67163d
30 changed files with 760 additions and 144 deletions

View File

@ -231,7 +231,6 @@ Naming/HeredocDelimiterNaming:
Naming/MethodParameterName:
Exclude:
- 'lib/gitlab/diff/inline_diff.rb'
- 'spec/support/helpers/key_generator_helper.rb'
# Offense count: 218
# Cop supports --auto-correct.

View File

@ -12,7 +12,7 @@ export const DEPLOYMENT_TARGET_SELECTIONS = [
s__('DeploymentTarget|Serverless backend (Lambda, Cloud functions)'),
s__('DeploymentTarget|GitLab Pages'),
s__('DeploymentTarget|Other hosting service'),
s__('DeploymentTarget|None'),
s__('DeploymentTarget|No deployment planned'),
];
export const NEW_PROJECT_FORM = 'new_project';

View File

@ -0,0 +1,41 @@
<script>
import { GlAvatar, GlLink } from '@gitlab/ui';
export default {
components: {
GlAvatar,
GlLink,
},
props: {
href: {
type: String,
required: true,
},
name: {
type: String,
required: true,
},
fullName: {
type: String,
required: true,
},
avatarUrl: {
type: String,
required: false,
default: null,
},
},
};
</script>
<template>
<div class="gl-display-flex gl-align-items-center gl-py-5">
<gl-link :href="href" data-testid="item-avatar" class="gl-text-decoration-none! gl-mr-3">
<gl-avatar shape="rect" :entity-name="name" :alt="name" :src="avatarUrl" :size="48" />
</gl-link>
<gl-link :href="href" class="gl-font-lg gl-font-weight-bold gl-text-gray-900!">{{
fullName
}}</gl-link>
</div>
</template>

View File

@ -1,10 +1,9 @@
<script>
import { GlAvatar, GlLink } from '@gitlab/ui';
import RunnerAssignedItem from './runner_assigned_item.vue';
export default {
components: {
GlAvatar,
GlLink,
RunnerAssignedItem,
},
props: {
runner: {
@ -22,27 +21,16 @@ export default {
<template>
<div class="gl-border-t-gray-100 gl-border-t-1 gl-border-t-solid">
<h3 class="gl-font-lg gl-my-5">{{ s__('Runners|Assigned Group') }}</h3>
<h3 class="gl-font-lg gl-mt-5 gl-mb-0">{{ s__('Runners|Assigned Group') }}</h3>
<template v-if="groups.length">
<div v-for="group in groups" :key="group.id" class="gl-display-flex gl-align-items-center">
<gl-link
:href="group.webUrl"
data-testid="group-avatar"
class="gl-text-decoration-none! gl-mr-3"
>
<gl-avatar
shape="rect"
:entity-name="group.name"
:src="group.avatarUrl"
:alt="group.name"
:size="48"
/>
</gl-link>
<gl-link :href="group.webUrl" class="gl-font-lg gl-font-weight-bold gl-text-gray-900!">{{
group.fullName
}}</gl-link>
</div>
<runner-assigned-item
v-for="group in groups"
:key="group.id"
:href="group.webUrl"
:name="group.name"
:full-name="group.fullName"
:avatar-url="group.avatarUrl"
/>
</template>
<span v-else class="gl-text-gray-500">{{ __('None') }}</span>
</div>

View File

@ -23,7 +23,7 @@ class Projects::RepositoriesController < Projects::ApplicationController
feature_category :source_code_management
def create
@project.create_repository
@project.create_repository unless @project.repository_exists?
redirect_to project_path(@project)
end

View File

@ -43,6 +43,7 @@ class Namespace < ApplicationRecord
has_many :projects, dependent: :destroy # rubocop:disable Cop/ActiveRecordDependent
has_many :project_statistics
has_one :namespace_settings, inverse_of: :namespace, class_name: 'NamespaceSetting', autosave: true
has_one :namespace_statistics
has_one :namespace_route, foreign_key: :namespace_id, autosave: false, inverse_of: :namespace, class_name: 'Route'
has_many :namespace_members, foreign_key: :member_namespace_id, inverse_of: :member_namespace, class_name: 'Member'

View File

@ -27,10 +27,17 @@ class Namespace::RootStorageStatistics < ApplicationRecord
update!(merged_attributes)
end
def self.namespace_statistics_attributes
%w(storage_size dependency_proxy_size)
end
private
def merged_attributes
attributes_from_project_statistics.merge!(attributes_from_personal_snippets) { |key, v1, v2| v1 + v2 }
attributes_from_project_statistics.merge!(
attributes_from_personal_snippets,
attributes_from_namespace_statistics
) { |key, v1, v2| v1 + v2 }
end
def attributes_from_project_statistics
@ -68,6 +75,27 @@ class Namespace::RootStorageStatistics < ApplicationRecord
.where(author: namespace.owner_id)
.select("COALESCE(SUM(s.repository_size), 0) AS #{SNIPPETS_SIZE_STAT_NAME}")
end
def from_namespace_statistics
namespace
.self_and_descendants
.joins("INNER JOIN namespace_statistics ns ON ns.namespace_id = namespaces.id")
.select(
'COALESCE(SUM(ns.storage_size), 0) AS storage_size',
'COALESCE(SUM(ns.dependency_proxy_size), 0) AS dependency_proxy_size'
)
end
def attributes_from_namespace_statistics
# At the moment, only groups can have some storage data because of dependency proxy assets.
# Therefore, if the namespace is not a group one, there is no need to perform
# the query. If this changes in the future and we add some sort of resource to
# users that it's store in NamespaceStatistics, we will need to remove this
# guard clause.
return {} unless namespace.group_namespace?
from_namespace_statistics.take.slice(*self.class.namespace_statistics_attributes)
end
end
Namespace::RootStorageStatistics.prepend_mod_with('Namespace::RootStorageStatistics')

View File

@ -0,0 +1,56 @@
# frozen_string_literal: true
class NamespaceStatistics < ApplicationRecord # rubocop:disable Gitlab/NamespacedClass
include AfterCommitQueue
belongs_to :namespace
validates :namespace, presence: true
scope :for_namespaces, -> (namespaces) { where(namespace: namespaces) }
before_save :update_storage_size
after_save :update_root_storage_statistics, if: :saved_change_to_storage_size?
after_destroy :update_root_storage_statistics
delegate :group_namespace?, to: :namespace
def refresh!(only: [])
return if Gitlab::Database.read_only?
return unless group_namespace?
self.class.columns_to_refresh.each do |column|
if only.empty? || only.include?(column)
public_send("update_#{column}") # rubocop:disable GitlabSecurity/PublicSend
end
end
save!
end
def update_storage_size
self.storage_size = dependency_proxy_size
end
def update_dependency_proxy_size
return unless group_namespace?
self.dependency_proxy_size = namespace.dependency_proxy_manifests.sum(:size) + namespace.dependency_proxy_blobs.sum(:size)
end
def self.columns_to_refresh
[:dependency_proxy_size]
end
private
def update_root_storage_statistics
return unless group_namespace?
run_after_commit do
Namespaces::ScheduleAggregationWorker.perform_async(namespace.id)
end
end
end
NamespaceStatistics.prepend_mod_with('NamespaceStatistics')

View File

@ -57,6 +57,12 @@ class ProjectImportState < ApplicationRecord
end
end
after_transition any => :failed do |state, _|
if Feature.enabled?(:remove_import_data_on_failure, state.project, default_enabled: :yaml)
state.project.remove_import_data
end
end
after_transition started: :finished do |state, _|
project = state.project

View File

@ -2,6 +2,8 @@
module Issues
class MoveService < Issuable::Clone::BaseService
extend ::Gitlab::Utils::Override
MoveError = Class.new(StandardError)
def execute(issue, target_project)
@ -47,6 +49,7 @@ module Issues
.sent_notifications.update_all(project_id: new_entity.project_id, noteable_id: new_entity.id)
end
override :update_old_entity
def update_old_entity
super
@ -54,6 +57,13 @@ module Issues
mark_as_moved
end
override :update_new_entity
def update_new_entity
super
copy_contacts
end
def create_new_entity
new_params = {
id: nil,
@ -99,6 +109,13 @@ module Issues
target_issue_links.update_all(target_id: new_entity.id)
end
def copy_contacts
return unless Feature.enabled?(:customer_relations, original_entity.project.root_ancestor)
return unless original_entity.project.root_ancestor == new_entity.project.root_ancestor
new_entity.customer_relations_contacts = original_entity.customer_relations_contacts
end
def notify_participants
notification_service.async.issue_moved(original_entity, new_entity, @current_user)
end

View File

@ -3,5 +3,5 @@
.label-actions-list
= link_to edit_admin_label_path(label), class: 'btn btn-default gl-button btn-default-tertiary label-action has-tooltip', title: _('Edit'), data: { placement: 'bottom' }, aria_label: _('Edit') do
= sprite_icon('pencil')
= link_to admin_label_path(label), class: 'btn btn-default gl-button btn-default-tertiary hover-red js-remove-label label-action has-tooltip', title: _('Delete'), data: { placement: 'bottom', confirm: "Delete this label? Are you sure?" }, aria_label: _('Delete'), method: :delete, remote: true do
= link_to admin_label_path(label), class: 'btn btn-default gl-button btn-default-tertiary hover-red js-remove-label label-action has-tooltip', title: _('Delete'), data: { placement: 'bottom', confirm: _('Are you sure you want to delete this label?'), confirm_btn_variant: 'danger' }, aria: { label: _('Delete label') }, method: :delete, remote: true do
= sprite_icon('remove')

View File

@ -0,0 +1,8 @@
---
name: remove_import_data_on_failure
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/80074
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/352156
milestone: '14.8'
type: development
group: group::source code
default_enabled: false

View File

@ -346,3 +346,83 @@ object using `UploadedFile#from_params`! This method can be unsafe to use depend
passed. Instead, use the [`UploadedFile`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/uploaded_file.rb)
object that [`multipart.rb`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/middleware/multipart.rb)
builds automatically for you.
### Document Object Storage buckets and CarrierWave integration
When using Object Storage, GitLab expects each kind of upload to maintain its own bucket in the respective
Object Storage destination. Moreover, the integration with CarrierWave is not used all the time.
The [Object Storage Working Group](https://about.gitlab.com/company/team/structure/working-groups/object-storage/)
is investigating an approach that unifies Object Storage buckets into a single one and removes CarrierWave
so as to simplify implementation and administration of uploads.
Therefore, document new uploads here by slotting them into the following tables:
- [Feature bucket details](#feature-bucket-details)
- [CarrierWave integration](#carrierwave-integration)
#### Feature bucket details
| Feature | Upload technology | Uploader | Bucket structure |
|------------------------------------------|-------------------|-----------------------|-----------------------------------------------------------------------------------------------------------|
| Job artifacts | `direct upload` | `workhorse` | `/artifacts/<proj_id_hash>/<date>/<job_id>/<artifact_id>` |
| Pipeline artifacts | `carrierwave` | `sidekiq` | `/artifacts/<proj_id_hash>/pipelines/<pipeline_id>/artifacts/<artifact_id>` |
| Live job traces | `fog` | `sidekiq` | `/artifacts/tmp/builds/<job_id>/chunks/<chunk_index>.log` |
| Job traces archive | `carrierwave` | `sidekiq` | `/artifacts/<proj_id_hash>/<date>/<job_id>/<artifact_id>/job.log` |
| Autoscale runner caching | N/A | `gitlab-runner` | `/gitlab-com-[platform-]runners-cache/???` |
| Backups | N/A | `s3cmd`, `awscli`, or `gcs` | `/gitlab-backups/???` |
| Git LFS | `direct upload` | `workhorse` | `/lsf-objects/<lfs_obj_oid[0:2]>/<lfs_obj_oid[2:2]>` |
| Design management files | `disk buffering` | `rails controller` | `/lsf-objects/<lfs_obj_oid[0:2]>/<lfs_obj_oid[2:2]>` |
| Design management thumbnails | `carrierwave` | `sidekiq` | `/uploads/design_management/action/image_v432x230/<model_id>` |
| Generic file uploads | `direct upload` | `workhorse` | `/uploads/@hashed/[0:2]/[2:4]/<hash1>/<hash2>/file` |
| Generic file uploads - personal snippets | `direct upload` | `workhorse` | `/uploads/personal_snippet/<snippet_id>/<filename>` |
| Global appearance settings | `disk buffering` | `rails controller` | `/uploads/appearance/...` |
| Topics | `disk buffering` | `rails controller` | `/uploads/projects/topic/...` |
| Avatar images | `direct upload` | `workhorse` | `/uploads/[user,group,project]/avatar/<model_id>` |
| Import/export | `direct upload` | `workhorse` | `/uploads/import_export_upload/???` |
| GitLab Migration | `carrierwave` | `sidekiq` | `/uploads/bulk_imports/???` |
| MR diffs | `carrierwave` | `sidekiq` | `/external-diffs/merge_request_diffs/mr-<mr_id>/diff-<diff_id>` |
| Package manager archives | `direct upload` | `sidekiq` | `/packages/<proj_id_hash>/packages/<pkg_segment>/files/<pkg_file_id>` |
| Package manager archives | `direct upload` | `sidekiq` | `/packages/<container_id_hash>/debian_*_component_file/<component_file_id>` |
| Package manager archives | `direct upload` | `sidekiq` | `/packages/<container_id_hash>/debian_*_distribution/<distribution_file_id>` |
| Container image cache (?) | `direct upload` | `workhorse` | `/dependency-proxy/<group_id_hash>/dependency_proxy/<group_id>/files/<proxy_id>/<blob_id or manifest_id>` |
| Terraform state files | `carrierwave` | `rails controller` | `/terraform/<proj_id_hash>/<terraform_state_id>` |
| Pages content archives | `carrierwave` | `sidekiq` | `/gitlab-gprd-pages/<proj_id_hash>/pages_deployments/<deployment_id>/` |
#### CarrierWave integration
| File | Carrierwave usage | Categorized |
|---------------------------------------------------------|----------------------------------------------------------------------------------|---------------------|
| `app/models/project.rb` | `include Avatarable` | :white_check_mark: |
| `app/models/projects/topic.rb` | `include Avatarable` | :white_check_mark: |
| `app/models/group.rb` | `include Avatarable` | :white_check_mark: |
| `app/models/user.rb` | `include Avatarable` | :white_check_mark: |
| `app/models/terraform/state_version.rb` | `include FileStoreMounter` | :white_check_mark: |
| `app/models/ci/job_artifact.rb` | `include FileStoreMounter` | :white_check_mark: |
| `app/models/ci/pipeline_artifact.rb` | `include FileStoreMounter` | :white_check_mark: |
| `app/models/pages_deployment.rb` | `include FileStoreMounter` | :white_check_mark: |
| `app/models/lfs_object.rb` | `include FileStoreMounter` | :white_check_mark: |
| `app/models/dependency_proxy/blob.rb` | `include FileStoreMounter` | :white_check_mark: |
| `app/models/dependency_proxy/manifest.rb` | `include FileStoreMounter` | :white_check_mark: |
| `app/models/packages/composer/cache_file.rb` | `include FileStoreMounter` | :white_check_mark: |
| `app/models/packages/package_file.rb` | `include FileStoreMounter` | :white_check_mark: |
| `app/models/concerns/packages/debian/component_file.rb` | `include FileStoreMounter` | :white_check_mark: |
| `ee/app/models/issuable_metric_image.rb` | `include FileStoreMounter` | |
| `ee/app/models/vulnerabilities/remediation.rb` | `include FileStoreMounter` | |
| `ee/app/models/vulnerabilities/export.rb` | `include FileStoreMounter` | |
| `app/models/packages/debian/project_distribution.rb` | `include Packages::Debian::Distribution` | :white_check_mark: |
| `app/models/packages/debian/group_distribution.rb` | `include Packages::Debian::Distribution` | :white_check_mark: |
| `app/models/packages/debian/project_component_file.rb` | `include Packages::Debian::ComponentFile` | :white_check_mark: |
| `app/models/packages/debian/group_component_file.rb` | `include Packages::Debian::ComponentFile` | :white_check_mark: |
| `app/models/merge_request_diff.rb` | `mount_uploader :external_diff, ExternalDiffUploader` | :white_check_mark: |
| `app/models/note.rb` | `mount_uploader :attachment, AttachmentUploader` | :white_check_mark: |
| `app/models/appearance.rb` | `mount_uploader :logo, AttachmentUploader` | :white_check_mark: |
| `app/models/appearance.rb` | `mount_uploader :header_logo, AttachmentUploader` | :white_check_mark: |
| `app/models/appearance.rb` | `mount_uploader :favicon, FaviconUploader` | :white_check_mark: |
| `app/models/project.rb` | `mount_uploader :bfg_object_map, AttachmentUploader` | |
| `app/models/import_export_upload.rb` | `mount_uploader :import_file, ImportExportUploader` | :white_check_mark: |
| `app/models/import_export_upload.rb` | `mount_uploader :export_file, ImportExportUploader` | :white_check_mark: |
| `app/models/ci/deleted_object.rb` | `mount_uploader :file, DeletedObjectUploader` | |
| `app/models/design_management/action.rb` | `mount_uploader :image_v432x230, DesignManagement::DesignV432x230Uploader` | :white_check_mark: |
| `app/models/concerns/packages/debian/distribution.rb` | `mount_uploader :signed_file, Packages::Debian::DistributionReleaseFileUploader` | :white_check_mark: |
| `app/models/bulk_imports/export_upload.rb` | `mount_uploader :export_file, ExportUploader` | :white_check_mark: |
| `ee/app/models/user_permission_export_upload.rb` | `mount_uploader :file, AttachmentUploader` | |

View File

@ -106,7 +106,7 @@ sudo gitlab-rails runner -e production 'puts Gitlab::Database::BackgroundMigrati
```shell
cd /home/git/gitlab
sudo -u git -H bundle exec rails runner -e production 'puts Gitlab::BackgroundMigration.remaining'
sudo -u git -H bundle exec rails runner -e production 'puts Gitlab::Database::BackgroundMigrationJob.pending'
sudo -u git -H bundle exec rails runner -e production 'puts Gitlab::Database::BackgroundMigrationJob.pending.count'
```
### Batched background migrations

View File

@ -4696,6 +4696,9 @@ msgstr ""
msgid "Are you sure you want to delete this device? This action cannot be undone."
msgstr ""
msgid "Are you sure you want to delete this label?"
msgstr ""
msgid "Are you sure you want to delete this pipeline schedule?"
msgstr ""
@ -12140,7 +12143,7 @@ msgstr ""
msgid "DeploymentTarget|Mobile app store"
msgstr ""
msgid "DeploymentTarget|None"
msgid "DeploymentTarget|No deployment planned"
msgstr ""
msgid "DeploymentTarget|Other hosting service"
@ -41183,6 +41186,9 @@ msgstr ""
msgid "You are attempting to update a file that has changed since you started editing it."
msgstr ""
msgid "You are billed if you exceed this number. %{qsrOverageLinkStart}How does billing work?%{qsrOverageLinkEnd}"
msgstr ""
msgid "You are connected to the Prometheus server, but there is currently no data to display."
msgstr ""

View File

@ -50,6 +50,7 @@ module QA
def stop_primary_node
stop_node(@primary_node)
wait_until_node_is_removed_from_healthy_storages(@primary_node)
end
def start_primary_node
@ -67,6 +68,7 @@ module QA
def stop_secondary_node
stop_node(@secondary_node)
wait_until_node_is_removed_from_healthy_storages(@stop_secondary_node)
end
def start_secondary_node
@ -75,6 +77,7 @@ module QA
def stop_tertiary_node
stop_node(@tertiary_node)
wait_until_node_is_removed_from_healthy_storages(@tertiary_node)
end
def start_tertiary_node
@ -82,20 +85,39 @@ module QA
end
def start_node(name)
shell "docker start #{name}"
end
state = node_state(name)
return if state == "running"
if state == "paused"
shell "docker unpause #{name}"
end
if state == "stopped"
shell "docker start #{name}"
end
def stop_node(name)
shell "docker stop #{name}"
wait_until_shell_command_matches(
"docker inspect -f {{.State.Running}} #{name}",
/false/,
/true/,
sleep_interval: 3,
max_duration: 180,
retry_on_exception: true
)
end
def stop_node(name)
shell "docker pause #{name}"
end
def node_state(name)
state = "stopped"
wait_until_shell_command("docker inspect -f {{.State.Status}} #{name}") do |line|
QA::Runtime::Logger.debug(line)
break state = "running" if line.include?("running")
break state = "paused" if line.include?("paused")
end
end
def clear_replication_queue
QA::Runtime::Logger.info("Clearing the replication queue")
shell sql_to_docker_exec_cmd(
@ -204,9 +226,8 @@ module QA
def wait_for_praefect
QA::Runtime::Logger.info("Waiting for health check on praefect")
Support::Waiter.wait_until(max_duration: 120, sleep_interval: 1, raise_on_failure: true) do
# praefect runs a grpc server on port 2305, which will return an error 'Connection refused' until such time it is ready
wait_until_shell_command("docker exec #{@gitaly_cluster} bash -c 'curl #{@praefect}:2305'") do |line|
break if line.include?('curl: (1) Received HTTP/0.9 when not allowed')
wait_until_shell_command("docker exec #{@praefect} gitlab-ctl status praefect") do |line|
break true if line.include?('run: praefect: ')
QA::Runtime::Logger.debug(line.chomp)
end
@ -269,9 +290,8 @@ module QA
def wait_for_gitaly_health_check(node)
QA::Runtime::Logger.info("Waiting for health check on #{node}")
Support::Waiter.wait_until(max_duration: 120, sleep_interval: 1, raise_on_failure: true) do
# gitaly runs a grpc server on port 8075, which will return an error 'Connection refused' until such time it is ready
wait_until_shell_command("docker exec #{@praefect} bash -c 'curl #{node}:8075'") do |line|
break if line.include?('curl: (1) Received HTTP/0.9 when not allowed')
wait_until_shell_command("docker exec #{node} gitlab-ctl status gitaly") do |line|
break true if line.include?('run: gitaly: ')
QA::Runtime::Logger.debug(line.chomp)
end

View File

@ -9,37 +9,30 @@ module QA
project = nil
let(:intial_commit_message) { 'Initial commit' }
let(:first_added_commit_message) { 'pushed to primary gitaly node' }
let(:second_added_commit_message) { 'commit to failover node' }
let(:first_added_commit_message) { 'first_added_commit_message to primary gitaly node' }
let(:second_added_commit_message) { 'second_added_commit_message to failover node' }
before(:context) do
# Reset the cluster in case previous tests left it in a bad state
praefect_manager.start_all_nodes
project = Resource::Project.fabricate! do |project|
project.name = "gitaly_cluster"
project.initialize_with_readme = true
end
end
after do
praefect_manager.start_all_nodes
# We need to ensure that the the project is replicated to all nodes before proceeding with this test
praefect_manager.wait_for_replication(project.id)
end
it 'automatically fails over', testcase: 'https://gitlab.com/gitlab-org/gitlab/-/quality/test_cases/347830' do
# Create a new project with a commit and wait for it to replicate
# make sure that our project is published to the 'primary' node
# stop other nodes, so we can control which node the commit is sent to
praefect_manager.stop_secondary_node
praefect_manager.stop_tertiary_node
praefect_manager.wait_for_secondary_node_health_check_failure
praefect_manager.wait_for_tertiary_node_health_check_failure
Resource::Repository::ProjectPush.fabricate! do |push|
push.project = project
push.commit_message = first_added_commit_message
push.new_branch = false
push.file_content = "This should exist on all nodes"
push.file_content = 'This file created on gitaly1 while gitaly2/gitaly3 not running'
end
praefect_manager.start_all_nodes
@ -56,7 +49,7 @@ module QA
commit.add_files([
{
file_path: "file-#{SecureRandom.hex(8)}",
content: 'This should exist on one node before reconciliation'
content: 'This is created on gitaly2/gitaly3 while gitaly1 is unavailable'
}
])
end

View File

@ -4,7 +4,7 @@ require 'parallel'
module QA
RSpec.describe 'Create' do
context 'Gitaly Cluster replication queue', :orchestrated, :gitaly_cluster, :skip_live_env, quarantine: { issue: 'https://gitlab.com/gitlab-org/gitlab/-/issues/346453', type: :flaky } do
context 'Gitaly Cluster replication queue', :orchestrated, :gitaly_cluster, :skip_live_env do
let(:praefect_manager) { Service::PraefectManager.new }
let(:project) do
Resource::Project.fabricate! do |project|
@ -15,12 +15,10 @@ module QA
before do
praefect_manager.start_all_nodes
praefect_manager.start_praefect
end
after do
praefect_manager.start_all_nodes
praefect_manager.start_praefect
praefect_manager.clear_replication_queue
end

View File

@ -68,9 +68,10 @@ module QA
mr.iid = merge_request[:iid]
end
expect(merge_request.state).to eq('opened')
expect(merge_request.merge_status).to eq('checking')
expect(merge_request.merge_when_pipeline_succeeds).to be true
aggregate_failures do
expect(merge_request.state).to eq('opened')
expect(merge_request.merge_when_pipeline_succeeds).to be true
end
end
it 'merges when pipeline succeeds', testcase: 'https://gitlab.com/gitlab-org/gitlab/-/quality/test_cases/347842' do

View File

@ -3,7 +3,37 @@
require "spec_helper"
RSpec.describe Projects::RepositoriesController do
let(:project) { create(:project, :repository) }
let_it_be(:project) { create(:project, :repository) }
describe 'POST create' do
let_it_be(:user) { create(:user) }
let(:request) { post :create, params: { namespace_id: project.namespace, project_id: project } }
before do
project.add_maintainer(user)
sign_in(user)
end
context 'when repository does not exist' do
let!(:project) { create(:project) }
it 'creates the repository' do
expect { request }.to change { project.repository.raw_repository.exists? }.from(false).to(true)
expect(response).to be_redirect
end
end
context 'when repository already exists' do
it 'does not raise an exception' do
expect(Gitlab::ErrorTracking).not_to receive(:track_exception)
request
expect(response).to be_redirect
end
end
end
describe "GET archive" do
before do

View File

@ -1,14 +1,12 @@
# frozen_string_literal: true
require_relative '../support/helpers/key_generator_helper'
FactoryBot.define do
factory :key do
title
key { Spec::Support::Helpers::KeyGeneratorHelper.new(1024).generate + ' dummy@gitlab.com' }
key { SSHData::PrivateKey::RSA.generate(1024, unsafe_allow_small_key: true).public_key.openssh(comment: 'dummy@gitlab.com') }
factory :key_without_comment do
key { Spec::Support::Helpers::KeyGeneratorHelper.new(1024).generate }
key { SSHData::PrivateKey::RSA.generate(1024, unsafe_allow_small_key: true).public_key.openssh }
end
factory :deploy_key, class: 'DeployKey'

View File

@ -0,0 +1,7 @@
# frozen_string_literal: true
FactoryBot.define do
factory :namespace_statistics do
namespace factory: :namespace
end
end

View File

@ -0,0 +1,53 @@
import { GlAvatar } from '@gitlab/ui';
import { shallowMountExtended } from 'helpers/vue_test_utils_helper';
import RunnerAssignedItem from '~/runner/components/runner_assigned_item.vue';
const mockHref = '/group/project';
const mockName = 'Project';
const mockFullName = 'Group / Project';
const mockAvatarUrl = '/avatar.png';
describe('RunnerAssignedItem', () => {
let wrapper;
const findAvatar = () => wrapper.findByTestId('item-avatar');
const createComponent = ({ props = {} } = {}) => {
wrapper = shallowMountExtended(RunnerAssignedItem, {
propsData: {
href: mockHref,
name: mockName,
fullName: mockFullName,
avatarUrl: mockAvatarUrl,
...props,
},
});
};
beforeEach(() => {
createComponent();
});
afterEach(() => {
wrapper.destroy();
});
it('Shows an avatar', () => {
const avatar = findAvatar();
expect(avatar.attributes('href')).toBe(mockHref);
expect(avatar.findComponent(GlAvatar).props()).toMatchObject({
alt: mockName,
entityName: mockName,
src: mockAvatarUrl,
shape: 'rect',
size: 48,
});
});
it('Shows an item link', () => {
const groupFullName = wrapper.findByText(mockFullName);
expect(groupFullName.attributes('href')).toBe(mockHref);
});
});

View File

@ -1,7 +1,7 @@
import { GlAvatar } from '@gitlab/ui';
import { shallowMountExtended } from 'helpers/vue_test_utils_helper';
import RunnerDetailGroups from '~/runner/components/runner_detail_groups.vue';
import RunnerAssignedItem from '~/runner/components/runner_assigned_item.vue';
import { runnerData, runnerWithGroupData } from '../mock_data';
@ -13,7 +13,7 @@ describe('RunnerDetailGroups', () => {
let wrapper;
const findHeading = () => wrapper.find('h3');
const findGroupAvatar = () => wrapper.findByTestId('group-avatar');
const findRunnerAssignedItems = () => wrapper.findAllComponents(RunnerAssignedItem);
const createComponent = ({ runner = mockGroupRunner, mountFn = shallowMountExtended } = {}) => {
wrapper = mountFn(RunnerDetailGroups, {
@ -33,29 +33,24 @@ describe('RunnerDetailGroups', () => {
expect(findHeading().text()).toBe('Assigned Group');
});
describe('When there is group runner', () => {
describe('When there is a group runner', () => {
beforeEach(() => {
createComponent();
});
it('Shows a group avatar', () => {
const avatar = findGroupAvatar();
it('Shows a project', () => {
createComponent();
expect(avatar.attributes('href')).toBe(mockGroup.webUrl);
expect(avatar.findComponent(GlAvatar).props()).toMatchObject({
alt: mockGroup.name,
entityName: mockGroup.name,
src: mockGroup.avatarUrl,
shape: 'rect',
size: 48,
const item = findRunnerAssignedItems().at(0);
const { webUrl, name, fullName, avatarUrl } = mockGroup;
expect(item.props()).toMatchObject({
href: webUrl,
name,
fullName,
avatarUrl,
});
});
it('Shows a group link', () => {
const groupFullName = wrapper.findByText(mockGroup.fullName);
expect(groupFullName.attributes('href')).toBe(mockGroup.webUrl);
});
});
describe('When there are no groups', () => {

View File

@ -28,24 +28,24 @@ RSpec.describe Namespace::RootStorageStatistics, type: :model do
let(:project1) { create(:project, namespace: namespace) }
let(:project2) { create(:project, namespace: namespace) }
let!(:stat1) { create(:project_statistics, project: project1, with_data: true, size_multiplier: 100) }
let!(:stat2) { create(:project_statistics, project: project2, with_data: true, size_multiplier: 200) }
let!(:project_stat1) { create(:project_statistics, project: project1, with_data: true, size_multiplier: 100) }
let!(:project_stat2) { create(:project_statistics, project: project2, with_data: true, size_multiplier: 200) }
shared_examples 'data refresh' do
shared_examples 'project data refresh' do
it 'aggregates project statistics' do
root_storage_statistics.recalculate!
root_storage_statistics.reload
total_repository_size = stat1.repository_size + stat2.repository_size
total_wiki_size = stat1.wiki_size + stat2.wiki_size
total_lfs_objects_size = stat1.lfs_objects_size + stat2.lfs_objects_size
total_build_artifacts_size = stat1.build_artifacts_size + stat2.build_artifacts_size
total_packages_size = stat1.packages_size + stat2.packages_size
total_storage_size = stat1.storage_size + stat2.storage_size
total_snippets_size = stat1.snippets_size + stat2.snippets_size
total_pipeline_artifacts_size = stat1.pipeline_artifacts_size + stat2.pipeline_artifacts_size
total_uploads_size = stat1.uploads_size + stat2.uploads_size
total_repository_size = project_stat1.repository_size + project_stat2.repository_size
total_wiki_size = project_stat1.wiki_size + project_stat2.wiki_size
total_lfs_objects_size = project_stat1.lfs_objects_size + project_stat2.lfs_objects_size
total_build_artifacts_size = project_stat1.build_artifacts_size + project_stat2.build_artifacts_size
total_packages_size = project_stat1.packages_size + project_stat2.packages_size
total_storage_size = project_stat1.storage_size + project_stat2.storage_size
total_snippets_size = project_stat1.snippets_size + project_stat2.snippets_size
total_pipeline_artifacts_size = project_stat1.pipeline_artifacts_size + project_stat2.pipeline_artifacts_size
total_uploads_size = project_stat1.uploads_size + project_stat2.uploads_size
expect(root_storage_statistics.repository_size).to eq(total_repository_size)
expect(root_storage_statistics.wiki_size).to eq(total_wiki_size)
@ -83,7 +83,7 @@ RSpec.describe Namespace::RootStorageStatistics, type: :model do
end
end
it_behaves_like 'data refresh'
it_behaves_like 'project data refresh'
it_behaves_like 'does not include personal snippets'
context 'with subgroups' do
@ -93,19 +93,81 @@ RSpec.describe Namespace::RootStorageStatistics, type: :model do
let(:project1) { create(:project, namespace: subgroup1) }
let(:project2) { create(:project, namespace: subgroup2) }
it_behaves_like 'data refresh'
it_behaves_like 'project data refresh'
it_behaves_like 'does not include personal snippets'
end
context 'with a group namespace' do
let_it_be(:root_group) { create(:group) }
let_it_be(:group1) { create(:group, parent: root_group) }
let_it_be(:subgroup1) { create(:group, parent: group1) }
let_it_be(:group2) { create(:group, parent: root_group) }
let_it_be(:root_namespace_stat) { create(:namespace_statistics, namespace: root_group, storage_size: 100, dependency_proxy_size: 100) }
let_it_be(:group1_namespace_stat) { create(:namespace_statistics, namespace: group1, storage_size: 200, dependency_proxy_size: 200) }
let_it_be(:group2_namespace_stat) { create(:namespace_statistics, namespace: group2, storage_size: 300, dependency_proxy_size: 300) }
let_it_be(:subgroup1_namespace_stat) { create(:namespace_statistics, namespace: subgroup1, storage_size: 300, dependency_proxy_size: 100) }
let(:namespace) { root_group }
it 'aggregates namespace statistics' do
# This group is not a descendant of the root_group so it shouldn't be included in the final stats.
other_group = create(:group)
create(:namespace_statistics, namespace: other_group, storage_size: 500, dependency_proxy_size: 500)
root_storage_statistics.recalculate!
total_repository_size = project_stat1.repository_size + project_stat2.repository_size
total_lfs_objects_size = project_stat1.lfs_objects_size + project_stat2.lfs_objects_size
total_build_artifacts_size = project_stat1.build_artifacts_size + project_stat2.build_artifacts_size
total_packages_size = project_stat1.packages_size + project_stat2.packages_size
total_snippets_size = project_stat1.snippets_size + project_stat2.snippets_size
total_pipeline_artifacts_size = project_stat1.pipeline_artifacts_size + project_stat2.pipeline_artifacts_size
total_uploads_size = project_stat1.uploads_size + project_stat2.uploads_size
total_wiki_size = project_stat1.wiki_size + project_stat2.wiki_size
total_dependency_proxy_size = root_namespace_stat.dependency_proxy_size + group1_namespace_stat.dependency_proxy_size + group2_namespace_stat.dependency_proxy_size + subgroup1_namespace_stat.dependency_proxy_size
total_storage_size = project_stat1.storage_size + project_stat2.storage_size + root_namespace_stat.storage_size + group1_namespace_stat.storage_size + group2_namespace_stat.storage_size + subgroup1_namespace_stat.storage_size
expect(root_storage_statistics.repository_size).to eq(total_repository_size)
expect(root_storage_statistics.lfs_objects_size).to eq(total_lfs_objects_size)
expect(root_storage_statistics.build_artifacts_size).to eq(total_build_artifacts_size)
expect(root_storage_statistics.packages_size).to eq(total_packages_size)
expect(root_storage_statistics.snippets_size).to eq(total_snippets_size)
expect(root_storage_statistics.pipeline_artifacts_size).to eq(total_pipeline_artifacts_size)
expect(root_storage_statistics.uploads_size).to eq(total_uploads_size)
expect(root_storage_statistics.dependency_proxy_size).to eq(total_dependency_proxy_size)
expect(root_storage_statistics.wiki_size).to eq(total_wiki_size)
expect(root_storage_statistics.storage_size).to eq(total_storage_size)
end
it 'works when there are no namespace statistics' do
NamespaceStatistics.delete_all
root_storage_statistics.recalculate!
total_storage_size = project_stat1.storage_size + project_stat2.storage_size
expect(root_storage_statistics.storage_size).to eq(total_storage_size)
end
end
context 'with a personal namespace' do
let_it_be(:user) { create(:user) }
let(:namespace) { user.namespace }
it_behaves_like 'data refresh'
it_behaves_like 'project data refresh'
it 'does not aggregate namespace statistics' do
create(:namespace_statistics, namespace: user.namespace, storage_size: 200, dependency_proxy_size: 200)
root_storage_statistics.recalculate!
expect(root_storage_statistics.storage_size).to eq(project_stat1.storage_size + project_stat2.storage_size)
expect(root_storage_statistics.dependency_proxy_size).to eq(0)
end
context 'when user has personal snippets' do
let(:total_project_snippets_size) { stat1.snippets_size + stat2.snippets_size }
let(:total_project_snippets_size) { project_stat1.snippets_size + project_stat2.snippets_size }
it 'aggregates personal and project snippets size' do
# This is just a a snippet authored by other user

View File

@ -23,6 +23,7 @@ RSpec.describe Namespace do
it { is_expected.to have_one :root_storage_statistics }
it { is_expected.to have_one :aggregation_schedule }
it { is_expected.to have_one :namespace_settings }
it { is_expected.to have_one(:namespace_statistics) }
it { is_expected.to have_many :custom_emoji }
it { is_expected.to have_one :package_setting_relation }
it { is_expected.to have_one :onboarding_progress }

View File

@ -0,0 +1,207 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe NamespaceStatistics do
let_it_be(:user) { create(:user) }
let_it_be(:group) { create(:group) }
it { is_expected.to belong_to(:namespace) }
it { is_expected.to validate_presence_of(:namespace) }
describe '#refresh!' do
let(:namespace) { group }
let(:statistics) { create(:namespace_statistics, namespace: namespace) }
let(:columns) { [] }
subject(:refresh!) { statistics.refresh!(only: columns) }
context 'when database is read_only' do
it 'does not save the object' do
allow(Gitlab::Database).to receive(:read_only?).and_return(true)
expect(statistics).not_to receive(:save!)
refresh!
end
end
context 'when namespace belong to a user' do
let(:namespace) { user.namespace }
it 'does not save the object' do
expect(statistics).not_to receive(:save!)
refresh!
end
end
shared_examples 'creates the namespace statistics' do
specify do
expect(statistics).to receive(:save!)
refresh!
end
end
context 'when invalid option is passed' do
let(:columns) { [:foo] }
it 'does not update any column' do
create(:dependency_proxy_manifest, group: namespace, size: 50)
expect(statistics).not_to receive(:update_dependency_proxy_size)
expect { refresh! }.not_to change { statistics.reload.storage_size }
end
it_behaves_like 'creates the namespace statistics'
end
context 'when no option is passed' do
it 'updates the dependency proxy size' do
expect(statistics).to receive(:update_dependency_proxy_size)
refresh!
end
it_behaves_like 'creates the namespace statistics'
end
context 'when dependency_proxy_size option is passed' do
let(:columns) { [:dependency_proxy_size] }
it 'updates the dependency proxy size' do
expect(statistics).to receive(:update_dependency_proxy_size)
refresh!
end
it_behaves_like 'creates the namespace statistics'
end
end
describe '#update_storage_size' do
let_it_be(:statistics, reload: true) { create(:namespace_statistics, namespace: group) }
it 'sets storage_size to the dependency_proxy_size' do
statistics.dependency_proxy_size = 3
statistics.update_storage_size
expect(statistics.storage_size).to eq 3
end
end
describe '#update_dependency_proxy_size' do
let_it_be(:statistics, reload: true) { create(:namespace_statistics, namespace: group) }
let_it_be(:dependency_proxy_manifest) { create(:dependency_proxy_manifest, group: group, size: 50) }
let_it_be(:dependency_proxy_blob) { create(:dependency_proxy_blob, group: group, size: 50) }
subject(:update_dependency_proxy_size) { statistics.update_dependency_proxy_size }
it 'updates the dependency proxy size' do
update_dependency_proxy_size
expect(statistics.dependency_proxy_size).to eq 100
end
context 'when namespace does not belong to a group' do
let(:statistics) { create(:namespace_statistics, namespace: user.namespace) }
it 'does not update the dependency proxy size' do
update_dependency_proxy_size
expect(statistics.dependency_proxy_size).to be_zero
end
end
end
context 'before saving statistics' do
let(:statistics) { create(:namespace_statistics, namespace: group, dependency_proxy_size: 10) }
it 'updates storage size' do
expect(statistics).to receive(:update_storage_size).and_call_original
statistics.save!
expect(statistics.storage_size).to eq 10
end
end
context 'after saving statistics', :aggregate_failures do
let(:statistics) { create(:namespace_statistics, namespace: namespace) }
let(:namespace) { group }
context 'when storage_size is not updated' do
it 'does not enqueue the job to update root storage statistics' do
expect(statistics).not_to receive(:update_root_storage_statistics)
expect(Namespaces::ScheduleAggregationWorker).not_to receive(:perform_async)
statistics.save!
end
end
context 'when storage_size is updated' do
before do
# we have to update this value instead of `storage_size` because the before_save
# hook we have. If we don't do it, storage_size will be set to the dependency_proxy_size value
# which is 0.
statistics.dependency_proxy_size = 10
end
it 'enqueues the job to update root storage statistics' do
expect(statistics).to receive(:update_root_storage_statistics).and_call_original
expect(Namespaces::ScheduleAggregationWorker).to receive(:perform_async).with(group.id)
statistics.save!
end
context 'when namespace does not belong to a group' do
let(:namespace) { user.namespace }
it 'does not enqueue the job to update root storage statistics' do
expect(statistics).to receive(:update_root_storage_statistics).and_call_original
expect(Namespaces::ScheduleAggregationWorker).not_to receive(:perform_async)
statistics.save!
end
end
end
context 'when other columns are updated' do
it 'does not enqueue the job to update root storage statistics' do
columns_to_update = NamespaceStatistics.columns_hash.reject { |k, _| %w(id namespace_id).include?(k) || k.include?('_size') }.keys
columns_to_update.each { |c| statistics[c] = 10 }
expect(statistics).not_to receive(:update_root_storage_statistics)
expect(Namespaces::ScheduleAggregationWorker).not_to receive(:perform_async)
statistics.save!
end
end
end
context 'after destroy statistics', :aggregate_failures do
let(:statistics) { create(:namespace_statistics, namespace: namespace) }
let(:namespace) { group }
it 'enqueues the job to update root storage statistics' do
expect(statistics).to receive(:update_root_storage_statistics).and_call_original
expect(Namespaces::ScheduleAggregationWorker).to receive(:perform_async).with(group.id)
statistics.destroy!
end
context 'when namespace belongs to a group' do
let(:namespace) { user.namespace }
it 'does not enqueue the job to update root storage statistics' do
expect(statistics).to receive(:update_root_storage_statistics).and_call_original
expect(Namespaces::ScheduleAggregationWorker).not_to receive(:perform_async)
statistics.destroy!
end
end
end
end

View File

@ -79,6 +79,29 @@ RSpec.describe ProjectImportState, type: :model do
expect(import_state.last_error).to eq(error_message)
end
it 'removes project import data' do
import_data = ProjectImportData.new(data: { 'test' => 'some data' })
project = create(:project, import_data: import_data)
import_state = create(:import_state, :started, project: project)
expect do
import_state.mark_as_failed(error_message)
end.to change { project.reload.import_data }.from(import_data).to(nil)
end
context 'when remove_import_data_on_failure feature flag is disabled' do
it 'removes project import data' do
stub_feature_flags(remove_import_data_on_failure: false)
project = create(:project, import_data: ProjectImportData.new(data: { 'test' => 'some data' }))
import_state = create(:import_state, :started, project: project)
expect do
import_state.mark_as_failed(error_message)
end.not_to change { project.reload.import_data }
end
end
end
describe '#human_status_name' do

View File

@ -168,6 +168,48 @@ RSpec.describe Issues::MoveService do
end
end
context 'issue with contacts' do
let_it_be(:contacts) { create_list(:contact, 2, group: group) }
before do
old_issue.customer_relations_contacts = contacts
end
it 'preserves contacts' do
new_issue = move_service.execute(old_issue, new_project)
expect(new_issue.customer_relations_contacts).to eq(contacts)
end
context 'when moving to another root group' do
let(:another_project) { create(:project, namespace: create(:group)) }
before do
another_project.add_reporter(user)
end
it 'does not preserve contacts' do
new_issue = move_service.execute(old_issue, another_project)
expect(new_issue.customer_relations_contacts).to be_empty
end
end
context 'when customer_relations feature is disabled' do
let(:another_project) { create(:project, namespace: create(:group)) }
before do
stub_feature_flags(customer_relations: false)
end
it 'does not preserve contacts' do
new_issue = move_service.execute(old_issue, new_project)
expect(new_issue.customer_relations_contacts).to be_empty
end
end
end
context 'moving to same project' do
let(:new_project) { old_project }

View File

@ -1,44 +0,0 @@
# frozen_string_literal: true
module Spec
module Support
module Helpers
class KeyGeneratorHelper
# The components in a openssh .pub / known_host RSA public key.
RSA_COMPONENTS = ['ssh-rsa', :e, :n].freeze
attr_reader :size
def initialize(size = 2048)
@size = size
end
def generate
key = OpenSSL::PKey::RSA.generate(size)
components = RSA_COMPONENTS.map do |component|
key.respond_to?(component) ? encode_mpi(key.public_send(component)) : component
end
# Ruby tries to be helpful and adds new lines every 60 bytes :(
'ssh-rsa ' + [pack_pubkey_components(components)].pack('m').delete("\n")
end
private
# Encodes an openssh-mpi-encoded integer.
def encode_mpi(n) # rubocop:disable Naming/UncommunicativeMethodParamName
chars = []
n = n.to_i
chars << (n & 0xff) && n >>= 8 while n != 0
chars << 0 if chars.empty? || chars.last >= 0x80
chars.reverse.pack('C*')
end
# Packs string components into an openssh-encoded pubkey.
def pack_pubkey_components(strings)
(strings.flat_map { |s| [s.length].pack('N') }).zip(strings).join
end
end
end
end
end