Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2022-09-08 15:12:29 +00:00
parent 8fea353b90
commit f8c7f38d02
51 changed files with 862 additions and 334 deletions

View File

@ -9,23 +9,38 @@ workhorse:verify:
.workhorse:test:
extends: .workhorse:rules:workhorse
image: ${REGISTRY_HOST}/${REGISTRY_GROUP}/gitlab-build-images/debian-${DEBIAN_VERSION}-ruby-${RUBY_VERSION}-golang-${GO_VERSION}:git-2.36
variables:
GITALY_ADDRESS: "tcp://127.0.0.1:8075"
GO_VERSION: "1.17"
stage: test
needs:
- setup-test-env
script:
before_script:
- go version
- apt-get update && apt-get -y install libimage-exiftool-perl
- scripts/gitaly-test-build
script:
- make -C workhorse test
workhorse:test using go 1.17:
workhorse:test go:
extends: .workhorse:test
image: ${REGISTRY_HOST}/${REGISTRY_GROUP}/gitlab-build-images:debian-${DEBIAN_VERSION}-ruby-${RUBY_VERSION}-golang-1.17-git-2.31
parallel:
matrix:
- GO_VERSION: ["1.17", "1.18"]
script:
- make -C workhorse test-coverage
coverage: '/\d+.\d+%/'
artifacts:
paths:
- workhorse/coverage.html
workhorse:test using go 1.17 with FIPS:
workhorse:test fips:
extends: .workhorse:test
variables:
WORKHORSE_TEST_FIPS_ENABLED: 1
image: ${REGISTRY_HOST}/${REGISTRY_GROUP}/gitlab-build-images:debian-${DEBIAN_VERSION}-ruby-${RUBY_VERSION}-golang-1.17-git-2.31
workhorse:test race:
extends: .workhorse:test
script:
- make -C workhorse test-race

View File

@ -27,7 +27,7 @@ Broken metrics issues are marked with the ~"broken metric" label.
1. Note which bastion host machine was assigned. For example: `<username>@bastion-01-inf-gprd.c.gitlab-production.internal:~$` shows that you are connected to `bastion-01-inf-gprd.c.gitlab-production.internal`.
1. Create a named screen: `screen -S $USER-service-ping-$(date +%F)`.
1. Connect to the console host: `ssh $USER-rails@console-01-sv-gprd.c.gitlab-production.internal`.
1. Run: `ServicePing::SubmitService.new.execute`.
1. Run: `GitlabServicePingWorker.new.perform('triggered_from_cron' => false)`.
1. Press <kbd>Control</kbd>+<kbd>a</kbd> followed by <kbd>Control</kbd>+<kbd>d</kbd> to detach from the screen session.
1. Exit from the bastion: `exit`.
@ -58,12 +58,12 @@ OR
## Service Ping process triggering (through a long-running SSH session)
1. Connect to the `gprd` Rails console.
1. Run `SubmitUsagePingService.new.execute`. This process requires more than 30 hours to complete.
1. Run `GitlabServicePingWorker.new.perform('triggered_from_cron' => false)`. This process requires more than 30 hours to complete.
1. Find the last payload in the `raw_usage_data` table: `RawUsageData.last.payload`.
1. Check the when the payload was sent: `RawUsageData.last.sent_at`.
```plaintext
ServicePing::SubmitService.new.execute
GitlabServicePingWorker.new.perform('triggered_from_cron' => false)
# Get the payload
RawUsageData.last.payload

View File

@ -11,6 +11,7 @@ query getJobs($fullPath: ID!, $after: String, $first: Int = 30, $statuses: [CiJo
}
nodes {
artifacts {
# eslint-disable-next-line @graphql-eslint/require-id-when-available
nodes {
downloadPath
fileType

View File

@ -11,6 +11,7 @@ query getPipelineJobs($fullPath: ID!, $iid: ID!, $after: String) {
}
nodes {
artifacts {
# eslint-disable-next-line @graphql-eslint/require-id-when-available
nodes {
downloadPath
fileType

View File

@ -0,0 +1,61 @@
<script>
import { GlBadge } from '@gitlab/ui';
import { s__ } from '~/locale';
export const i18n = {
defaultLabel: s__('BranchRules|default'),
protectedLabel: s__('BranchRules|protected'),
};
export default {
name: 'BranchRule',
i18n,
components: {
GlBadge,
},
props: {
name: {
type: String,
required: true,
},
isDefault: {
type: Boolean,
required: false,
default: false,
},
isProtected: {
type: Boolean,
required: false,
default: false,
},
approvalDetails: {
type: Array,
required: false,
default: () => [],
},
},
computed: {
hasApprovalDetails() {
return this.approvalDetails && this.approvalDetails.length;
},
},
};
</script>
<template>
<div class="gl-border-b gl-pt-5 gl-pb-5">
<strong class="gl-font-monospace">{{ name }}</strong>
<gl-badge v-if="isDefault" variant="info" size="sm" class="gl-ml-2">{{
$options.i18n.defaultLabel
}}</gl-badge>
<gl-badge v-if="isProtected" variant="success" size="sm" class="gl-ml-2">{{
$options.i18n.protectedLabel
}}</gl-badge>
<ul v-if="hasApprovalDetails" class="gl-pl-6 gl-mt-2 gl-mb-0 gl-text-gray-500">
<li v-for="(detail, index) in approvalDetails" :key="index">{{ detail }}</li>
</ul>
</div>
</template>

View File

@ -14,6 +14,7 @@ query securityReportDownloadPaths(
id
name
artifacts {
# eslint-disable-next-line @graphql-eslint/require-id-when-available
nodes {
downloadPath
fileType

View File

@ -4,6 +4,7 @@ query getPipelineCorpuses($projectPath: ID!, $iid: ID, $reportTypes: [SecurityRe
project(fullPath: $projectPath) {
id
pipeline(iid: $iid) {
# eslint-disable-next-line @graphql-eslint/require-id-when-available
...JobArtifacts
}
}

View File

@ -6,6 +6,9 @@ module Types
class JobArtifactType < BaseObject
graphql_name 'CiJobArtifact'
field :id, Types::GlobalIDType[::Ci::JobArtifact], null: false,
description: 'ID of the artifact.'
field :download_path, GraphQL::Types::String, null: true,
description: "URL for downloading the artifact's file."
@ -16,6 +19,12 @@ module Types
description: 'File name of the artifact.',
method: :filename
field :size, GraphQL::Types::Int, null: false,
description: 'Size of the artifact in bytes.'
field :expire_at, Types::TimeType, null: true,
description: 'Expiry date of the artifact.'
def download_path
::Gitlab::Routing.url_helpers.download_project_job_artifacts_path(
object.project,

View File

@ -566,7 +566,7 @@ module Types
project.container_repositories.size
end
def ci_config_variables(sha)
def ci_config_variables(sha:)
result = ::Ci::ListConfigVariablesService.new(object, context[:current_user]).execute(sha)
return if result.nil?

View File

@ -9,6 +9,7 @@ module Ci
include UsageStatistics
include Sortable
include Artifactable
include Lockable
include FileStoreMounter
include EachBatch
include Gitlab::Utils::StrongMemoize
@ -222,17 +223,6 @@ module Ci
hashed_path: 2
}
# `locked` will be populated from the source of truth on Ci::Pipeline
# in order to clean up expired job artifacts in a performant way.
# The values should be the same as `Ci::Pipeline.lockeds` with the
# additional value of `unknown` to indicate rows that have not
# yet been populated from the parent Ci::Pipeline
enum locked: {
unlocked: 0,
artifacts_locked: 1,
unknown: 2
}, _prefix: :artifact
def validate_file_format!
unless TYPE_AND_FORMAT_PAIRS[self.file_type&.to_sym] == self.file_format&.to_sym
errors.add(:base, _('Invalid file format with specified file type'))

View File

@ -7,6 +7,7 @@ module Ci
include UpdateProjectStatistics
include Artifactable
include FileStoreMounter
include Lockable
include Presentable
FILE_SIZE_LIMIT = 10.megabytes.freeze

View File

@ -0,0 +1,20 @@
# frozen_string_literal: true
module Ci
module Lockable
extend ActiveSupport::Concern
included do
# `locked` will be populated from the source of truth on Ci::Pipeline
# in order to clean up expired job artifacts in a performant way.
# The values should be the same as `Ci::Pipeline.lockeds` with the
# additional value of `unknown` to indicate rows that have not
# yet been populated from the parent Ci::Pipeline
enum locked: {
unlocked: 0,
artifacts_locked: 1,
unknown: 2
}, _prefix: :artifact
end
end
end

View File

@ -18,41 +18,20 @@ module ServicePing
def execute
return unless ServicePing::ServicePingSettings.product_intelligence_enabled?
start = Time.current
start_time = Time.current
begin
usage_data = payload || ServicePing::BuildPayload.new.execute
response = submit_usage_data_payload(usage_data)
response = submit_usage_data_payload
raise SubmissionError, "Unsuccessful response code: #{response.code}" unless response.success?
handle_response(response)
submit_metadata_payload
rescue StandardError => e
return unless Gitlab::CurrentSettings.usage_ping_enabled?
submit_error_payload(e, start_time)
error_payload = {
time: Time.current,
uuid: Gitlab::CurrentSettings.uuid,
hostname: Gitlab.config.gitlab.host,
version: Gitlab.version_info.to_s,
message: "#{e.message.presence || e.class} at #{e.backtrace[0]}",
elapsed: (Time.current - start).round(1)
}
submit_payload({ error: error_payload }, path: ERROR_PATH)
usage_data = payload || Gitlab::Usage::ServicePingReport.for(output: :all_metrics_values)
response = submit_usage_data_payload(usage_data)
raise
end
version_usage_data_id =
response.dig('conv_index', 'usage_data_id') || response.dig('dev_ops_score', 'usage_data_id')
unless version_usage_data_id.is_a?(Integer) && version_usage_data_id > 0
raise SubmissionError, "Invalid usage_data_id in response: #{version_usage_data_id}"
end
unless skip_db_write
raw_usage_data = save_raw_usage_data(usage_data)
raw_usage_data.update_version_metadata!(usage_data_id: version_usage_data_id)
ServicePing::DevopsReport.new(response).execute
end
submit_payload(metadata(usage_data), path: METADATA_PATH)
end
private
@ -90,14 +69,43 @@ module ServicePing
)
end
def submit_usage_data_payload(usage_data)
raise SubmissionError, 'Usage data is blank' if usage_data.blank?
def submit_usage_data_payload
raise SubmissionError, 'Usage data payload is blank' if payload.blank?
response = submit_payload(usage_data)
submit_payload(payload)
end
raise SubmissionError, "Unsuccessful response code: #{response.code}" unless response.success?
def handle_response(response)
version_usage_data_id =
response.dig('conv_index', 'usage_data_id') || response.dig('dev_ops_score', 'usage_data_id')
response
unless version_usage_data_id.is_a?(Integer) && version_usage_data_id > 0
raise SubmissionError, "Invalid usage_data_id in response: #{version_usage_data_id}"
end
return if skip_db_write
raw_usage_data = save_raw_usage_data(payload)
raw_usage_data.update_version_metadata!(usage_data_id: version_usage_data_id)
ServicePing::DevopsReport.new(response).execute
end
def submit_error_payload(error, start_time)
current_time = Time.current
error_payload = {
time: current_time,
uuid: Gitlab::CurrentSettings.uuid,
hostname: Gitlab.config.gitlab.host,
version: Gitlab.version_info.to_s,
message: "#{error.message.presence || error.class} at #{error.backtrace[0]}",
elapsed: (current_time - start_time).round(1)
}
submit_payload({ error: error_payload }, path: ERROR_PATH)
end
def submit_metadata_payload
submit_payload(metadata(payload), path: METADATA_PATH)
end
def save_raw_usage_data(usage_data)

View File

@ -78,10 +78,10 @@
= s_('Preferences|Choose between fixed (max. 1280px) and fluid (%{percentage}) application layout.').html_safe % { percentage: '100%' }
.form-group
= f.label :dashboard, class: 'label-bold' do
= s_('Preferences|Homepage content')
= s_('Preferences|Dashboard')
= f.select :dashboard, dashboard_choices, {}, class: 'select2'
.form-text.text-muted
= s_('Preferences|Choose what content you want to see on your homepage.')
= s_('Preferences|Choose what content you want to see by default on your dashboard.')
= render_if_exists 'profiles/preferences/group_overview_selector', f: f # EE-specific

View File

@ -15,17 +15,24 @@ class GitlabServicePingWorker # rubocop:disable Scalability/IdempotentWorker
sidekiq_options retry: 3, dead: false
sidekiq_retry_in { |count| (count + 1) * 8.hours.to_i }
def perform
# Disable service ping for GitLab.com
def perform(options = {})
# Sidekiq does not support keyword arguments, so the args need to be
# passed the old pre-Ruby 2.0 way.
#
# See https://github.com/mperham/sidekiq/issues/2372
triggered_from_cron = options.fetch('triggered_from_cron', true)
skip_db_write = options.fetch('skip_db_write', false)
# Disable service ping for GitLab.com unless called manually
# See https://gitlab.com/gitlab-org/gitlab/-/issues/292929 for details
return if Gitlab.com?
return if Gitlab.com? && triggered_from_cron
# Multiple Sidekiq workers could run this. We should only do this at most once a day.
in_lock(LEASE_KEY, ttl: LEASE_TIMEOUT) do
# Splay the request over a minute to avoid thundering herd problems.
sleep(rand(0.0..60.0).round(3))
ServicePing::SubmitService.new(payload: usage_data).execute
ServicePing::SubmitService.new(payload: usage_data, skip_db_write: skip_db_write).execute
end
end

View File

@ -0,0 +1,8 @@
---
name: workhorse_long_polling_publish_many
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/96751
rollout_issue_url: https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1901
milestone: '15.4'
type: development
group: group::scalability
default_enabled: false

View File

@ -0,0 +1,20 @@
# frozen_string_literal: true
class AddLockedToCiPipelineArtifacts < Gitlab::Database::Migration[2.0]
disable_ddl_transaction!
TABLE_NAME = 'ci_pipeline_artifacts'
COLUMN_NAME = 'locked'
def up
with_lock_retries do
add_column TABLE_NAME, COLUMN_NAME, :smallint, default: 2
end
end
def down
with_lock_retries do
remove_column TABLE_NAME, COLUMN_NAME
end
end
end

View File

@ -0,0 +1,17 @@
# frozen_string_literal: true
class AddPartialIndexForCiPipelineArtifactsUnlockedWithExpireAt < Gitlab::Database::Migration[2.0]
disable_ddl_transaction!
TABLE_NAME = 'ci_pipeline_artifacts'
INDEX_NAME = 'ci_pipeline_artifacts_on_expire_at_for_removal'
CONDITIONS = 'locked = 0 AND expire_at IS NOT NULL'
def up
add_concurrent_index TABLE_NAME, [:expire_at], where: CONDITIONS, name: INDEX_NAME
end
def down
remove_concurrent_index_by_name TABLE_NAME, INDEX_NAME
end
end

View File

@ -0,0 +1 @@
bda120b4684900c0763af116557930a77b2dfa3c3884ae7f8d4183db546fa019

View File

@ -0,0 +1 @@
0a6bd5578f5180fac269ffd8a78fc87b7bd95be4b0246890d5c57d79f2a856f8

View File

@ -12993,6 +12993,7 @@ CREATE TABLE ci_pipeline_artifacts (
verification_retry_count smallint,
verification_checksum bytea,
verification_failure text,
locked smallint DEFAULT 2,
CONSTRAINT check_191b5850ec CHECK ((char_length(file) <= 255)),
CONSTRAINT check_abeeb71caf CHECK ((file IS NOT NULL)),
CONSTRAINT ci_pipeline_artifacts_verification_failure_text_limit CHECK ((char_length(verification_failure) <= 255))
@ -27431,6 +27432,8 @@ CREATE INDEX cadence_create_iterations_automation ON iterations_cadences USING b
CREATE INDEX ci_builds_gitlab_monitor_metrics ON ci_builds USING btree (status, created_at, project_id) WHERE ((type)::text = 'Ci::Build'::text);
CREATE INDEX ci_pipeline_artifacts_on_expire_at_for_removal ON ci_pipeline_artifacts USING btree (expire_at) WHERE ((locked = 0) AND (expire_at IS NOT NULL));
CREATE INDEX code_owner_approval_required ON protected_branches USING btree (project_id, code_owner_approval_required) WHERE (code_owner_approval_required = true);
CREATE UNIQUE INDEX commit_user_mentions_on_commit_id_and_note_id_unique_index ON commit_user_mentions USING btree (commit_id, note_id);

View File

@ -10288,8 +10288,11 @@ CI/CD variables for a GitLab instance.
| Name | Type | Description |
| ---- | ---- | ----------- |
| <a id="cijobartifactdownloadpath"></a>`downloadPath` | [`String`](#string) | URL for downloading the artifact's file. |
| <a id="cijobartifactexpireat"></a>`expireAt` | [`Time`](#time) | Expiry date of the artifact. |
| <a id="cijobartifactfiletype"></a>`fileType` | [`JobArtifactFileType`](#jobartifactfiletype) | File type of the artifact. |
| <a id="cijobartifactid"></a>`id` | [`CiJobArtifactID!`](#cijobartifactid) | ID of the artifact. |
| <a id="cijobartifactname"></a>`name` | [`String`](#string) | File name of the artifact. |
| <a id="cijobartifactsize"></a>`size` | [`Int!`](#int) | Size of the artifact in bytes. |
### `CiJobTokenScopeType`

View File

@ -35,12 +35,23 @@ and see the [Development section](../../index.md) for the required guidelines.
## Merge request guidelines for contributors
If you find an issue, please submit a merge request with a fix or improvement, if
you can, and include tests. If you don't know how to fix the issue but can write a test
that exposes the issue, we will accept that as well. In general, bug fixes that
include a regression test are merged quickly, while new features without proper
tests might be slower to receive feedback. The workflow to make a merge
request is as follows:
If you find an issue, please submit a merge request with a fix or improvement,
if you can, and include tests.
If the change is non-trivial, we encourage you to
start a discussion with [a product manager or a member of the team](https://about.gitlab.com/handbook/product/categories/).
You can do
this by tagging them in an MR before submitting the code for review. Talking
to team members can be helpful when making design decisions. Communicating the
intent behind your changes can also help expedite merge request reviews.
If
you don't know how to fix the issue but can write a test that exposes the
issue, we will accept that as well. In general, bug fixes that include a
regression test are merged quickly. New features without proper tests
might be slower to receive feedback.
To create a merge request:
1. [Fork](../../user/project/repository/forking_workflow.md) the project into
your personal namespace (or group) on GitLab.com.

View File

@ -770,7 +770,7 @@ To set up Service Ping locally, you must:
1. Using the `gitlab` Rails console, manually trigger Service Ping:
```ruby
ServicePing::SubmitService.new.execute
GitlabServicePingWorker.new.perform('triggered_from_cron' => false)
```
1. Use the `versions` Rails console to check the Service Ping was successfully received,

View File

@ -408,7 +408,7 @@ To generate Service Ping, use [Teleport](https://goteleport.com/docs/) or a deta
1. Request temporary [access](https://gitlab.com/gitlab-com/runbooks/-/blob/master/docs/Teleport/Connect_to_Rails_Console_via_Teleport.md#how-to-use-teleport-to-connect-to-rails-console) to the required environment.
1. After your approval is issued, [access the Rails console](https://gitlab.com/gitlab-com/runbooks/-/blob/master/docs/Teleport/Connect_to_Rails_Console_via_Teleport.md#access-approval).
1. Run `ServicePing::SubmitService.new.execute`.
1. Run `GitlabServicePingWorker.new.perform('triggered_from_cron' => false)`.
#### Trigger Service Ping with a detached screen session
@ -433,7 +433,7 @@ To generate Service Ping, use [Teleport](https://goteleport.com/docs/) or a deta
1. Run:
```shell
ServicePing::SubmitService.new.execute
GitlabServicePingWorker.new.perform('triggered_from_cron' => false)
```
1. To detach from screen, press `ctrl + A`, `ctrl + D`.
@ -493,7 +493,7 @@ To skip database write operations, DevOps report creation, and storage of usage
```shell
skip_db_write:
ServicePing::SubmitService.new(skip_db_write: true).execute
GitlabServicePingWorker.new.perform('triggered_from_cron' => false, 'skip_db_write' => true)
```
## Monitoring

View File

@ -116,12 +116,11 @@ between the fixed (max. `1280px`) and the fluid (`100%`) application layout.
NOTE:
While `1280px` is the standard max width when using fixed layout, some pages still use 100% width, depending on the content.
### Default dashboard
### Dashboard
For users who have access to a large number of projects but only keep up with a
select few, the amount of activity on the default dashboard page can be
overwhelming. Changing this setting allows you to redefine your default
dashboard.
select few, the amount of activity on the your dashboard can be
overwhelming. Changing this setting allows you to redefine what is displayed by default.
You can include the following options for your default dashboard view:

View File

@ -52,9 +52,6 @@ Prerequisites:
- You must have a role in the project that allows you to edit merge requests, and add
code to the repository.
- Your project must use the [merge method](methods/index.md#fast-forward-merge) **Merge Commit**,
which is set in the project's **Settings > General > Merge request**. You can't revert
fast-forwarded commits from the GitLab UI.
To do this:

View File

@ -39,21 +39,18 @@ To prevent exceeding the namespace storage quota, you can:
### Namespace storage limit enforcement schedule
Storage limits for GitLab SaaS Free tier namespaces will not be enforced prior to 2022-10-19. Storage limits for GitLab SaaS Paid tier namespaces will not be enforced for prior to 2023-02-15.
Storage limits for GitLab SaaS Free tier namespaces will not be enforced prior to 2022-10-19. Storage limits for GitLab SaaS Paid tier namespaces will not be enforced for prior to 2023-02-15. Enforcement will not occur until all storage types are accurately measured, including deduplication of forks for [Git](https://gitlab.com/gitlab-org/gitlab/-/issues/371671) and [LFS](https://gitlab.com/gitlab-org/gitlab/-/issues/370242).
Impacted users are notified via email and in-app notifications at least 60 days prior to enforcement.
### Project storage limit
Namespaces on a GitLab SaaS **paid** tier (Premium and Ultimate) have a storage limit on their project repositories.
A project's repository has a storage quota of 10 GB. A namespace has either a namespace-level storage limit or a project-level storage limit, but not both.
Projects on GitLab SaaS have a 10GB storage limit on their Git repository and LFS storage.
Once namespace-level storage limits are enforced, the project limit will be removed. A namespace has either a namespace-level storage limit or a project-level storage limit, but not both.
- Paid tier namespaces have project-level storage limits enforced.
- Free tier namespaces have namespace-level storage limits.
When a project's repository reaches the quota, the project is locked. You cannot push changes to a locked project. To monitor the size of each
When a project's repository and LFS reaches the quota, the project is locked. You cannot push changes to a locked project. To monitor the size of each
repository in a namespace, including a breakdown for each project, you can
[view storage usage](#view-storage-usage). To allow a project's repository to exceed the free quota
[view storage usage](#view-storage-usage). To allow a project's repository and LFS to exceed the free quota
you must purchase additional storage. For more details, see [Excess storage usage](#excess-storage-usage).
## View storage usage
@ -100,7 +97,7 @@ For more information, see the following pages:
## Excess storage usage
Excess storage usage is the amount that a project's repository exceeds the free storage quota. If no
Excess storage usage is the amount that a project's repository and LFS exceeds the free storage quota. If no
purchased storage is available the project is locked. You cannot push changes to a locked project.
To unlock a project you must [purchase more storage](../subscriptions/gitlab_com/index.md#purchase-more-storage-and-transfer)
for the namespace. When the purchase is completed, locked projects are automatically unlocked. The
@ -125,12 +122,12 @@ The following example describes an excess storage scenario for a namespace:
| Yellow | 2 GB | 0 GB | 10 GB | Not locked |
| **Totals** | **30 GB** | **0 GB** | - | - |
The Red and Green projects are locked because their repositories have reached the quota. In this
The Red and Green projects are locked because their repositories and LFS have reached the quota. In this
example, no additional storage has yet been purchased.
To unlock the Red and Green projects, 50 GB additional storage is purchased.
Assuming the Green and Red projects' repositories grow past the 10 GB quota, the purchased storage
Assuming the Green and Red projects' repositories and LFS grow past the 10 GB quota, the purchased storage
available decreases. All projects remain unlocked because 40 GB purchased storage is available:
50 GB (purchased storage) - 10 GB (total excess storage used).

View File

@ -218,6 +218,11 @@ module Gitlab
result = redis.set(key, value, ex: expire, nx: !overwrite)
if result
redis.publish(NOTIFICATION_CHANNEL, "#{key}=#{value}")
if Feature.enabled?(:workhorse_long_polling_publish_many)
redis.publish("#{NOTIFICATION_CHANNEL}:#{key}", value)
end
value
else
redis.get(key)

View File

@ -24,9 +24,9 @@ namespace :gitlab do
desc 'GitLab | UsageData | Generate usage ping and send it to Versions Application'
task generate_and_send: :environment do
result = ServicePing::SubmitService.new.execute
response = GitlabServicePingWorker.new.perform('triggered_from_cron' => false)
puts Gitlab::Json.pretty_generate(result.attributes)
puts response.body, response.code, response.message, response.headers.inspect
end
desc 'GitLab | UsageDataMetrics | Generate usage ping from metrics definition YAML files in JSON'

View File

@ -6731,6 +6731,12 @@ msgstr ""
msgid "BranchRules|Require approval from code owners."
msgstr ""
msgid "BranchRules|default"
msgstr ""
msgid "BranchRules|protected"
msgstr ""
msgid "Branches"
msgstr ""
@ -29755,10 +29761,10 @@ msgstr ""
msgid "Preferences|Choose between fixed (max. 1280px) and fluid (%{percentage}) application layout."
msgstr ""
msgid "Preferences|Choose what content you want to see on a projects overview page."
msgid "Preferences|Choose what content you want to see by default on your dashboard."
msgstr ""
msgid "Preferences|Choose what content you want to see on your homepage."
msgid "Preferences|Choose what content you want to see on a projects overview page."
msgstr ""
msgid "Preferences|Color for added lines"
@ -29782,6 +29788,9 @@ msgstr ""
msgid "Preferences|Customize the colors of removed and added lines in diffs."
msgstr ""
msgid "Preferences|Dashboard"
msgstr ""
msgid "Preferences|Diff colors"
msgstr ""
@ -29803,9 +29812,6 @@ msgstr ""
msgid "Preferences|Gitpod"
msgstr ""
msgid "Preferences|Homepage content"
msgstr ""
msgid "Preferences|Instead of all the files changed, show only one file at a time. To switch between files, use the file browser."
msgstr ""

View File

@ -4,23 +4,18 @@ require_relative "qa"
Dir['tasks/*.rake'].each { |file| load file }
desc "Revokes all personal access tokens"
task :revoke_personal_access_tokens do
QA::Tools::RevokeAllPersonalAccessTokens.new.run
end
desc "Deletes subgroups within a provided group"
task :delete_subgroups do
QA::Tools::DeleteSubgroups.new.run
end
desc "Initialize GitLab with an access token"
task :initialize_gitlab_auth, [:address] do |t, args|
task :initialize_gitlab_auth, [:address] do |_, args|
QA::Tools::InitializeGitLabAuth.new(args).run
end
desc "Generate Performance Testdata"
task :generate_perf_testdata, :type do |t, args|
task :generate_perf_testdata, :type do |_, args|
args.with_defaults(type: :all)
QA::Tools::GeneratePerfTestdata.new.method(args[:type]).call
end
@ -50,7 +45,7 @@ desc "Generate data and run load tests"
task generate_data_and_run_load_test: [:generate_perf_testdata, :run_artillery_load_tests]
desc "Deletes test ssh keys a user"
task :delete_test_ssh_keys, [:title_portion, :delete_before, :dry_run] do |t, args|
task :delete_test_ssh_keys, [:title_portion, :delete_before, :dry_run] do |_, args|
QA::Tools::DeleteTestSSHKeys.new(args).run
end
@ -60,33 +55,38 @@ task :delete_projects do
end
desc "Deletes test users"
task :delete_test_users, [:delete_before, :dry_run, :exclude_users] do |t, args|
task :delete_test_users, [:delete_before, :dry_run, :exclude_users] do |_, args|
QA::Tools::DeleteTestUsers.new(args).run
end
desc "Deletes snippets"
task :delete_test_snippets, [:delete_before, :dry_run] do |t, args|
task :delete_test_snippets, [:delete_before, :dry_run] do |_, args|
QA::Tools::DeleteTestSnippets.new(args).run
end
namespace :test_resources do
desc "Deletes resources created during E2E test runs"
task :delete, [:file_pattern] do |t, args|
task :delete, [:file_pattern] do |_, args|
QA::Tools::TestResourcesHandler.new(args[:file_pattern]).run_delete
end
desc "Upload test resources JSON files to GCS"
task :upload, [:file_pattern, :ci_project_name] do |t, args|
task :upload, [:file_pattern, :ci_project_name] do |_, args|
QA::Tools::TestResourcesHandler.new(args[:file_pattern]).upload(args[:ci_project_name])
end
desc "Download test resources JSON files from GCS"
task :download, [:ci_project_name] do |t, args|
task :download, [:ci_project_name] do |_, args|
QA::Tools::TestResourcesHandler.new.download(args[:ci_project_name])
end
end
desc "Deletes user's projects"
task :delete_user_projects, [:delete_before, :dry_run] do |t, args|
task :delete_user_projects, [:delete_before, :dry_run] do |_, args|
QA::Tools::DeleteUserProjects.new(args).run
end
desc "Revokes user's personal access tokens"
task :revoke_user_pats, [:revoke_before, :dry_run] do |_, args|
QA::Tools::RevokeUserPersonalAccessTokens.new(args).run
end

View File

@ -1,44 +0,0 @@
# frozen_string_literal: true
require 'net/protocol'
# This script revokes all personal access tokens with the name of 'api-test-token' on the host specified by GITLAB_ADDRESS
# Required environment variables: GITLAB_USERNAME, GITLAB_PASSWORD and GITLAB_ADDRESS
# Run `rake revoke_personal_access_tokens`
module QA
module Tools
class RevokeAllPersonalAccessTokens
def run
do_run
rescue Net::ReadTimeout
$stdout.puts 'Net::ReadTimeout during run. Trying again'
run
end
private
def do_run
raise ArgumentError, "Please provide GITLAB_USERNAME" unless ENV['GITLAB_USERNAME']
raise ArgumentError, "Please provide GITLAB_PASSWORD" unless ENV['GITLAB_PASSWORD']
raise ArgumentError, "Please provide GITLAB_ADDRESS" unless ENV['GITLAB_ADDRESS']
$stdout.puts 'Running...'
Runtime::Browser.visit(ENV['GITLAB_ADDRESS'], Page::Main::Login)
Page::Main::Login.perform(&:sign_in_using_credentials)
Page::Main::Menu.perform(&:click_edit_profile_link)
Page::Profile::Menu.perform(&:click_access_tokens)
token_name = 'api-test-token'
Page::Profile::PersonalAccessTokens.perform do |tokens_page|
while tokens_page.has_token_row_for_name?(token_name)
tokens_page.revoke_first_token_with_name(token_name)
print "\e[32m.\e[0m"
end
end
end
end
end
end

View File

@ -0,0 +1,94 @@
# frozen_string_literal: true
# This script revokes all active personal access tokens owned by a given USER_ID
# up to a given date (Date.today - 1 by default)
# Required environment variables: USER_ID, GITLAB_QA_ACCESS_TOKEN and GITLAB_ADDRESS
# Run `rake revoke_user_pats`
module QA
module Tools
class RevokeUserPersonalAccessTokens
include Support::API
def initialize(revoke_before: (Date.today - 1).to_s, dry_run: false)
raise ArgumentError, "Please provide GITLAB_ADDRESS environment variable" unless ENV['GITLAB_ADDRESS']
unless ENV['GITLAB_QA_ACCESS_TOKEN']
raise ArgumentError, "Please provide GITLAB_QA_ACCESS_TOKEN environment variable"
end
raise ArgumentError, "Please provide USER_ID environment variable" unless ENV['USER_ID']
@revoke_before = Date.parse(revoke_before)
@dry_run = dry_run
@api_client = Runtime::API::Client.new(ENV['GITLAB_ADDRESS'],
personal_access_token: ENV['GITLAB_QA_ACCESS_TOKEN'])
end
def run
$stdout.puts 'Running...'
tokens_head_response = head Runtime::API::Request.new(@api_client,
"/personal_access_tokens?user_id=#{ENV['USER_ID']}",
per_page: "100").url
total_token_pages = tokens_head_response.headers[:x_total_pages]
total_tokens = tokens_head_response.headers[:x_total]
$stdout.puts "Total tokens: #{total_tokens}. Total pages: #{total_token_pages}"
tokens = fetch_tokens
revoke_tokens(tokens, @api_client, @dry_run) unless tokens.empty?
$stdout.puts "\nDone"
end
private
def fetch_tokens
fetched_tokens = []
page_no = 1
while page_no > 0
tokens_response = get Runtime::API::Request.new(@api_client,
"/personal_access_tokens?user_id=#{ENV['USER_ID']}",
page: page_no.to_s, per_page: "100").url
fetched_tokens
.concat(JSON.parse(tokens_response.body)
.select { |token| Date.parse(token["created_at"]) < @revoke_before && token['active'] }
.map { |token| { id: token["id"], name: token["name"], created_at: token["created_at"] } }
)
page_no = tokens_response.headers[:x_next_page].to_i
end
fetched_tokens
end
def revoke_tokens(tokens, api_client, dry_run = false)
if dry_run
$stdout.puts "Following #{tokens.count} tokens would be revoked:"
else
$stdout.puts "Revoking #{tokens.count} tokens..."
end
tokens.each do |token|
if dry_run
$stdout.puts "Token name: #{token[:name]}, id: #{token[:id]}, created at: #{token[:created_at]}"
else
request_url = Runtime::API::Request.new(api_client, "/personal_access_tokens/#{token[:id]}").url
$stdout.puts "\nRevoking token with name: #{token[:name]}, " \
"id: #{token[:id]}, created at: #{token[:created_at]}"
delete_response = delete(request_url)
dot_or_f = delete_response.code == 204 ? "\e[32m.\e[0m" : "\e[31mF - #{delete_response}\e[0m"
print dot_or_f
end
end
end
end
end
end

View File

@ -50,7 +50,7 @@ RSpec.describe "Admin Runners" do
it 'shows an instance badge' do
within_runner_row(instance_runner.id) do
expect(page).to have_selector '.badge', text: 'Instance'
expect(page).to have_selector '.badge', text: s_('Runners|Instance')
end
end
end

View File

@ -0,0 +1,58 @@
import { shallowMountExtended } from 'helpers/vue_test_utils_helper';
import BranchRule, {
i18n,
} from '~/projects/settings/repository/branch_rules/components/branch_rule.vue';
const defaultProps = {
name: 'main',
isDefault: true,
isProtected: true,
approvalDetails: ['requires approval from TEST', '2 status checks'],
};
describe('Branch rule', () => {
let wrapper;
const createComponent = (props = {}) => {
wrapper = shallowMountExtended(BranchRule, { propsData: { ...defaultProps, ...props } });
};
const findDefaultBadge = () => wrapper.findByText(i18n.defaultLabel);
const findProtectedBadge = () => wrapper.findByText(i18n.protectedLabel);
const findBranchName = () => wrapper.findByText(defaultProps.name);
const findProtectionDetailsList = () => wrapper.findByRole('list');
const findProtectionDetailsListItems = () => wrapper.findAllByRole('listitem');
beforeEach(() => createComponent());
it('renders the branch name', () => {
expect(findBranchName().exists()).toBe(true);
});
describe('badges', () => {
it('renders both default and protected badges', () => {
expect(findDefaultBadge().exists()).toBe(true);
expect(findProtectedBadge().exists()).toBe(true);
});
it('does not render default badge if isDefault is set to false', () => {
createComponent({ isDefault: false });
expect(findDefaultBadge().exists()).toBe(false);
});
it('does not render protected badge if isProtected is set to false', () => {
createComponent({ isProtected: false });
expect(findProtectedBadge().exists()).toBe(false);
});
});
it('does not render the protection details list of no details are present', () => {
createComponent({ approvalDetails: null });
expect(findProtectionDetailsList().exists()).toBe(false);
});
it('renders the protection details list items', () => {
expect(findProtectionDetailsListItems().at(0).text()).toBe(defaultProps.approvalDetails[0]);
expect(findProtectionDetailsListItems().at(1).text()).toBe(defaultProps.approvalDetails[1]);
});
});

View File

@ -4,7 +4,7 @@ require 'spec_helper'
RSpec.describe GitlabSchema.types['CiJobArtifact'] do
it 'has the correct fields' do
expected_fields = [:download_path, :file_type, :name]
expected_fields = [:id, :download_path, :file_type, :name, :size, :expire_at]
expect(described_class).to have_graphql_fields(*expected_fields)
end

View File

@ -366,9 +366,25 @@ RSpec.describe Gitlab::Workhorse do
expect(Gitlab::Redis::SharedState).to receive(:with).and_call_original
expect_any_instance_of(::Redis).to receive(:publish)
.with(described_class::NOTIFICATION_CHANNEL, "test-key=test-value")
expect_any_instance_of(::Redis).to receive(:publish)
.with(described_class::NOTIFICATION_CHANNEL + ':test-key', "test-value")
subject
end
context 'when workhorse_long_polling_publish_many is disabled' do
before do
stub_feature_flags(workhorse_long_polling_publish_many: false)
end
it 'set and notify' do
expect(Gitlab::Redis::SharedState).to receive(:with).and_call_original
expect_any_instance_of(::Redis).to receive(:publish)
.with(described_class::NOTIFICATION_CHANNEL, "test-key=test-value")
subject
end
end
end
context 'when we set a new key' do

View File

@ -6,15 +6,15 @@ RSpec.describe 'Query.project(fullPath).ciConfigVariables(sha)' do
include GraphqlHelpers
include ReactiveCachingHelpers
let_it_be(:project) { create(:project, :repository, :public) }
let_it_be(:user) { create(:user) }
let_it_be(:content) do
File.read(Rails.root.join('spec/support/gitlab_stubs/gitlab_ci.yml'))
end
let(:sha) { project.commit.sha }
let_it_be(:project) { create(:project, :custom_repo, :public, files: { '.gitlab-ci.yml' => content }) }
let_it_be(:user) { create(:user) }
let(:service) { Ci::ListConfigVariablesService.new(project, user) }
let(:sha) { project.repository.commit.sha }
let(:query) do
%(
@ -33,7 +33,6 @@ RSpec.describe 'Query.project(fullPath).ciConfigVariables(sha)' do
context 'when the user has the correct permissions' do
before do
project.add_maintainer(user)
stub_ci_pipeline_yaml_file(content)
allow(Ci::ListConfigVariablesService)
.to receive(:new)
.and_return(service)
@ -45,6 +44,11 @@ RSpec.describe 'Query.project(fullPath).ciConfigVariables(sha)' do
end
it 'returns the CI variables for the config' do
expect(service)
.to receive(:execute)
.with(sha)
.and_call_original
post_graphql(query, current_user: user)
expect(graphql_data.dig('project', 'ciConfigVariables')).to contain_exactly(
@ -63,8 +67,6 @@ RSpec.describe 'Query.project(fullPath).ciConfigVariables(sha)' do
end
context 'when the cache is empty' do
let(:sha) { 'main' }
it 'returns nothing' do
post_graphql(query, current_user: user)
@ -76,7 +78,6 @@ RSpec.describe 'Query.project(fullPath).ciConfigVariables(sha)' do
context 'when the user is not authorized' do
before do
project.add_guest(user)
stub_ci_pipeline_yaml_file(content)
allow(Ci::ListConfigVariablesService)
.to receive(:new)
.and_return(service)

View File

@ -54,11 +54,13 @@ RSpec.describe ServicePing::SubmitService do
let(:service_ping_payload_url) { File.join(described_class::STAGING_BASE_URL, described_class::USAGE_DATA_PATH) }
let(:service_ping_errors_url) { File.join(described_class::STAGING_BASE_URL, described_class::ERROR_PATH) }
let(:service_ping_metadata_url) { File.join(described_class::STAGING_BASE_URL, described_class::METADATA_PATH) }
let!(:usage_data) { { uuid: 'uuid', recorded_at: Time.current } }
let(:subject) { described_class.new(payload: usage_data) }
shared_examples 'does not run' do
it do
expect(Gitlab::HTTP).not_to receive(:post)
expect(Gitlab::Usage::ServicePingReport).not_to receive(:for)
subject.execute
end
@ -69,7 +71,7 @@ RSpec.describe ServicePing::SubmitService do
expect(Gitlab::HTTP).not_to receive(:post).with(service_ping_payload_url, any_args)
expect { subject.execute }.to raise_error(described_class::SubmissionError) do |error|
expect(error.message).to include('Usage data is blank')
expect(error.message).to include('Usage data payload is blank')
end
end
end
@ -118,13 +120,18 @@ RSpec.describe ServicePing::SubmitService do
allow(ServicePing::ServicePingSettings).to receive(:product_intelligence_enabled?).and_return(true)
end
it 'generates service ping' do
stub_response(body: with_dev_ops_score_params)
stub_response(body: nil, url: service_ping_metadata_url, status: 201)
it 'submits a service ping payload without errors', :aggregate_failures do
response = stub_response(body: with_dev_ops_score_params)
error_response = stub_response(body: nil, url: service_ping_errors_url, status: 201)
metadata_response = stub_response(body: nil, url: service_ping_metadata_url, status: 201)
expect(Gitlab::Usage::ServicePingReport).to receive(:for).with(output: :all_metrics_values).and_call_original
expect(Gitlab::HTTP).to receive(:post).twice.and_call_original
subject.execute
expect(response).to have_been_requested
expect(error_response).not_to have_been_requested
expect(metadata_response).to have_been_requested
end
end
@ -155,15 +162,9 @@ RSpec.describe ServicePing::SubmitService do
expect(response).to have_been_requested
end
it 'forces a refresh of usage data statistics before submitting' do
stub_response(body: with_dev_ops_score_params)
expect(Gitlab::Usage::ServicePingReport).to receive(:for).with(output: :all_metrics_values).and_call_original
subject.execute
end
context 'when conv_index data is passed' do
let(:usage_data) { { uuid: 'uuid', recorded_at: Time.current } }
before do
stub_response(body: with_conv_index_params)
end
@ -171,21 +172,17 @@ RSpec.describe ServicePing::SubmitService do
it_behaves_like 'saves DevOps report data from the response'
it 'saves usage_data_id to version_usage_data_id_value' do
recorded_at = Time.current
usage_data = { uuid: 'uuid', recorded_at: recorded_at }
expect(Gitlab::Usage::ServicePingReport).to receive(:for).with(output: :all_metrics_values)
.and_return(usage_data)
subject.execute
raw_usage_data = RawUsageData.find_by(recorded_at: recorded_at)
raw_usage_data = RawUsageData.find_by(recorded_at: usage_data[:recorded_at])
expect(raw_usage_data.version_usage_data_id_value).to eq(31643)
end
end
context 'when only usage_data_id is passed in response' do
let(:usage_data) { { uuid: 'uuid', recorded_at: Time.current } }
before do
stub_response(body: with_usage_data_id_params)
end
@ -195,15 +192,9 @@ RSpec.describe ServicePing::SubmitService do
end
it 'saves usage_data_id to version_usage_data_id_value' do
recorded_at = Time.current
usage_data = { uuid: 'uuid', recorded_at: recorded_at }
expect(Gitlab::Usage::ServicePingReport).to receive(:for).with(output: :all_metrics_values)
.and_return(usage_data)
subject.execute
raw_usage_data = RawUsageData.find_by(recorded_at: recorded_at)
raw_usage_data = RawUsageData.find_by(recorded_at: usage_data[:recorded_at])
expect(raw_usage_data.version_usage_data_id_value).to eq(31643)
end
@ -232,6 +223,8 @@ RSpec.describe ServicePing::SubmitService do
end
context 'with saving raw_usage_data' do
let(:usage_data) { { uuid: 'uuid', recorded_at: Time.current } }
before do
stub_response(body: with_dev_ops_score_params)
end
@ -241,17 +234,10 @@ RSpec.describe ServicePing::SubmitService do
end
it 'saves the correct payload' do
recorded_at = Time.current
usage_data = { uuid: 'uuid', recorded_at: recorded_at }
expect(Gitlab::Usage::ServicePingReport).to receive(:for).with(output: :all_metrics_values)
.and_return(usage_data)
subject.execute
raw_usage_data = RawUsageData.find_by(recorded_at: recorded_at)
raw_usage_data = RawUsageData.find_by(recorded_at: usage_data[:recorded_at])
expect(raw_usage_data.recorded_at).to be_like_time(recorded_at)
expect(raw_usage_data.payload.to_json).to eq(usage_data.to_json)
end
end
@ -269,90 +255,30 @@ RSpec.describe ServicePing::SubmitService do
end
context 'and usage data is empty string' do
before do
allow(Gitlab::Usage::ServicePingReport).to receive(:for).with(output: :all_metrics_values).and_return({})
end
let(:usage_data) { {} }
it_behaves_like 'does not send a blank usage ping payload'
end
context 'and usage data is nil' do
before do
allow(ServicePing::BuildPayload).to receive(:execute).and_return(nil)
allow(Gitlab::Usage::ServicePingReport).to receive(:for).with(output: :all_metrics_values).and_return(nil)
end
let(:usage_data) { nil }
it_behaves_like 'does not send a blank usage ping payload'
end
context 'if payload service fails' do
before do
stub_response(body: with_dev_ops_score_params)
allow(ServicePing::BuildPayload).to receive_message_chain(:new, :execute)
.and_raise(described_class::SubmissionError, 'SubmissionError')
end
it 'calls Gitlab::Usage::ServicePingReport .for method' do
usage_data = build_usage_data
expect(Gitlab::Usage::ServicePingReport).to receive(:for).with(output: :all_metrics_values)
.and_return(usage_data)
subject.execute
end
it 'submits error' do
expect(Gitlab::HTTP).to receive(:post).with(URI.join(service_ping_payload_url), any_args)
.and_call_original
expect(Gitlab::HTTP).to receive(:post).with(URI.join(service_ping_errors_url), any_args)
.and_call_original
expect(Gitlab::HTTP).to receive(:post).with(URI.join(service_ping_metadata_url), any_args)
.and_call_original
subject.execute
end
end
context 'calls BuildPayload first' do
before do
stub_response(body: with_dev_ops_score_params)
end
it 'returns usage data' do
usage_data = build_usage_data
expect_next_instance_of(ServicePing::BuildPayload) do |service|
expect(service).to receive(:execute).and_return(usage_data)
end
subject.execute
end
end
context 'if version app response fails' do
before do
stub_response(body: with_dev_ops_score_params, status: 404)
usage_data = build_usage_data
allow_next_instance_of(ServicePing::BuildPayload) do |service|
allow(service).to receive(:execute).and_return(usage_data)
end
end
it 'calls Gitlab::Usage::ServicePingReport .for method' do
usage_data = build_usage_data
expect(Gitlab::Usage::ServicePingReport).to receive(:for).with(output: :all_metrics_values)
.and_return(usage_data)
it 'raises SubmissionError' do
# SubmissionError is raised as a result of 404 in response from HTTP Request
expect { subject.execute }.to raise_error(described_class::SubmissionError)
end
end
context 'when skip_db_write passed to service' do
let(:subject) { ServicePing::SubmitService.new(skip_db_write: true) }
let(:subject) { described_class.new(payload: usage_data, skip_db_write: true) }
before do
stub_response(body: with_dev_ops_score_params)
@ -377,13 +303,10 @@ RSpec.describe ServicePing::SubmitService do
stub_database_flavor_check
stub_application_setting(usage_ping_enabled: true)
stub_response(body: with_conv_index_params)
allow_next_instance_of(ServicePing::BuildPayload) do |service|
allow(service).to receive(:execute).and_return(payload)
end
end
let(:metric_double) { instance_double(Gitlab::Usage::ServicePing::LegacyMetricTimingDecorator, duration: 123) }
let(:payload) do
let(:usage_data) do
{
uuid: 'uuid',
metric_a: metric_double,
@ -425,8 +348,4 @@ RSpec.describe ServicePing::SubmitService do
status: status
)
end
def build_usage_data
{ uuid: 'uuid', recorded_at: Time.current }
end
end

View File

@ -173,6 +173,7 @@ RSpec.configure do |config|
config.include TestEnv
config.include FileReadHelpers
config.include Database::MultipleDatabases
config.include Database::WithoutCheckConstraint
config.include Devise::Test::ControllerHelpers, type: :controller
config.include Devise::Test::ControllerHelpers, type: :view
config.include Devise::Test::IntegrationHelpers, type: :feature

View File

@ -0,0 +1,52 @@
# frozen_string_literal: true
# Temporarily disable the named constraint on the table within the block.
#
# without_constraint('members', 'check_1234') do
# create_invalid_data
# end
module Database
module WithoutCheckConstraint
def without_check_constraint(table, name, connection:)
saved_constraint = constraint(table, name, connection)
constraint_error!(table, name, connection) if saved_constraint.nil?
begin
connection.remove_check_constraint(table, name: name)
connection.transaction do
yield
raise ActiveRecord::Rollback
end
ensure
restore_constraint(saved_constraint, connection)
end
end
private
def constraint_error!(table, name, connection)
msg = if connection.table_exists?(table)
"'#{table}' table does not contain constraint called '#{name}'"
else
"'#{table}' does not exist"
end
raise msg
end
def constraint(table, name, connection)
connection
.check_constraints(table)
.find { |constraint| constraint.options[:name] == name }
end
def restore_constraint(constraint, connection)
connection.add_check_constraint(
constraint.table_name,
constraint.expression,
**constraint.options
)
end
end
end

View File

@ -0,0 +1,85 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe 'Database::WithoutCheckConstraint' do
include MigrationsHelpers
describe '.without_check_constraint' do
let(:connection) { ApplicationRecord.connection }
let(:table_name) { '_test_table' }
let(:constraint_name) { 'check_1234' }
let(:model) { table(table_name) }
before do
# Drop test table in case it's left from a previous execution.
connection.exec_query("DROP TABLE IF EXISTS #{table_name}")
# Model has an attribute called 'name' that can't be NULL.
connection.exec_query(<<-SQL)
CREATE TABLE #{table_name} (
name text
CONSTRAINT #{constraint_name} CHECK (name IS NOT NULL)
);
SQL
end
context 'with invalid table' do
subject do
without_check_constraint('no_such_table', constraint_name, connection: connection) {}
end
it 'raises exception' do
msg = "'no_such_table' does not exist"
expect { subject }.to raise_error(msg)
end
end
context 'with invalid constraint name' do
subject do
without_check_constraint(table_name, 'no_such_constraint', connection: connection) {}
end
it 'raises exception' do
msg = "'#{table_name}' table does not contain constraint called 'no_such_constraint'"
expect { subject }.to raise_error(msg)
end
end
context 'with constraint' do
subject { connection.check_constraints(table_name) }
it 'removes inside block' do
without_check_constraint(table_name, constraint_name, connection: connection) do
expect(subject).to be_empty
end
end
it 'restores outside block' do
saved_constraints = subject
without_check_constraint(table_name, constraint_name, connection: connection) do
end
expect(subject).to eq(saved_constraints)
end
end
context 'when creating an invalid record' do
subject(:invalid_record) { model.create!(name: nil) }
it 'enables invalid record creation inside block' do
without_check_constraint(table_name, constraint_name, connection: connection) do
expect(invalid_record).to be_persisted
expect(invalid_record.name).to be_nil
end
end
it 'rolls back changes made within the block' do
without_check_constraint(table_name, constraint_name, connection: connection) do
invalid_record
end
expect(model.all).to be_empty
end
end
end
end

View File

@ -3,6 +3,7 @@
require 'rake_helper'
RSpec.describe 'gitlab:usage data take tasks', :silence_stdout do
include StubRequests
include UsageDataHelpers
let(:metrics_file) { Rails.root.join('tmp', 'test', 'sql_metrics_queries.json') }
@ -44,4 +45,39 @@ RSpec.describe 'gitlab:usage data take tasks', :silence_stdout do
expect(Pathname.new(metrics_file)).to exist
end
end
describe 'generate_and_send' do
let(:service_ping_payload_url) do
File.join(ServicePing::SubmitService::STAGING_BASE_URL, ServicePing::SubmitService::USAGE_DATA_PATH)
end
let(:service_ping_metadata_url) do
File.join(ServicePing::SubmitService::STAGING_BASE_URL, ServicePing::SubmitService::METADATA_PATH)
end
let(:payload) { { recorded_at: Time.current } }
before do
allow_next_instance_of(ServicePing::BuildPayload) do |service|
allow(service).to receive(:execute).and_return(payload)
end
stub_response(body: payload.merge(conv_index: { usage_data_id: 123 }))
stub_response(body: nil, url: service_ping_metadata_url, status: 201)
end
it 'generates and sends Service Ping payload' do
expect { run_rake_task('gitlab:usage_data:generate_and_send') }.to output(/.*201.*/).to_stdout
end
private
def stub_response(url: service_ping_payload_url, body:, status: 201)
stub_full_request(url, method: :post)
.to_return(
headers: { 'Content-Type' => 'application/json' },
body: body.to_json,
status: status
)
end
end
end

View File

@ -54,8 +54,8 @@ RSpec.describe 'profiles/preferences/show' do
end
it 'has helpful homepage setup guidance' do
expect(rendered).to have_field('Homepage content')
expect(rendered).to have_content('Choose what content you want to see on your homepage.')
expect(rendered).to have_field('Dashboard')
expect(rendered).to have_content('Choose what content you want to see by default on your dashboard.')
end
end

View File

@ -14,21 +14,36 @@ RSpec.describe GitlabServicePingWorker, :clean_gitlab_redis_shared_state do
allow(subject).to receive(:sleep)
end
it 'does not run for GitLab.com' do
it 'does not run for GitLab.com when triggered from cron' do
allow(Gitlab).to receive(:com?).and_return(true)
expect(ServicePing::SubmitService).not_to receive(:new)
subject.perform
end
it 'runs for GitLab.com when triggered manually' do
allow(Gitlab).to receive(:com?).and_return(true)
expect(ServicePing::SubmitService).to receive(:new)
subject.perform('triggered_from_cron' => false)
end
it 'delegates to ServicePing::SubmitService' do
expect_next_instance_of(ServicePing::SubmitService, payload: payload) do |service|
expect_next_instance_of(ServicePing::SubmitService, payload: payload, skip_db_write: false) do |service|
expect(service).to receive(:execute)
end
subject.perform
end
it 'passes Hash arguments to ServicePing::SubmitService' do
expect_next_instance_of(ServicePing::SubmitService, payload: payload, skip_db_write: true) do |service|
expect(service).to receive(:execute)
end
subject.perform('skip_db_write' => true)
end
context 'payload computation' do
it 'creates RawUsageData entry when there is NO entry with the same recorded_at timestamp' do
expect { subject.perform }.to change { RawUsageData.count }.by(1)
@ -46,7 +61,7 @@ RSpec.describe GitlabServicePingWorker, :clean_gitlab_redis_shared_state do
allow(::ServicePing::BuildPayload).to receive(:new).and_raise(error)
expect(::Gitlab::ErrorTracking).to receive(:track_and_raise_for_dev_exception).with(error)
expect_next_instance_of(::ServicePing::SubmitService, payload: nil) do |service|
expect_next_instance_of(::ServicePing::SubmitService, payload: nil, skip_db_write: false) do |service|
expect(service).to receive(:execute)
end

View File

@ -8,5 +8,6 @@ testdata/alt-public
/gitlab-zip-metadata
/_build
coverage.html
cover.out
/*.toml
/gitaly.pid

View File

@ -84,8 +84,8 @@ test: prepare-tests
else \
$(MAKE) run-gitaly ; \
fi
@go test -tags "$(BUILD_TAGS)" ./... ;\
status="$$?" ;\
go test ${TEST_OPTIONS} -tags "$(BUILD_TAGS)" ./...
@status="$$?" ;\
if [ -f "$(GITALY_PID_FILE)" ] ; then \
echo "Clean up Gitaly server for workhorse integration test" ;\
kill -9 $$(cat $(GITALY_PID_FILE)) ;\
@ -96,10 +96,21 @@ test: prepare-tests
exit "$$status"
@echo SUCCESS
.PHONY: test-race
test-race: TEST_OPTIONS = -race
test-race: test
.PHONY: test-coverage
test-coverage: TEST_OPTIONS = -cover -coverprofile=cover.out
test-coverage: test
$(call message, "Calculating the coverage")
[ -f cover.out ] && go tool cover -html cover.out -o coverage.html
[ -f cover.out ] && go tool cover -func cover.out
.PHONY: clean
clean: clean-workhorse clean-build
$(call message,$@)
rm -rf testdata/data testdata/scratch
rm -rf testdata/data testdata/scratch cover.out coverage.html
.PHONY: clean-workhorse
clean-workhorse:

View File

@ -20,18 +20,20 @@ type KeyWatcher struct {
subscribers map[string][]chan string
shutdown chan struct{}
reconnectBackoff backoff.Backoff
channelPerKey bool // TODO remove this field https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1902
conn *redis.PubSubConn
}
func NewKeyWatcher() *KeyWatcher {
func NewKeyWatcher(channelPerKey bool) *KeyWatcher {
return &KeyWatcher{
subscribers: make(map[string][]chan string),
shutdown: make(chan struct{}),
shutdown: make(chan struct{}),
reconnectBackoff: backoff.Backoff{
Min: 100 * time.Millisecond,
Max: 60 * time.Second,
Factor: 2,
Jitter: true,
},
channelPerKey: channelPerKey,
}
}
@ -42,6 +44,12 @@ var (
Help: "The number of keys that is being watched by gitlab-workhorse",
},
)
redisSubscriptions = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "gitlab_workhorse_keywatcher_redis_subscriptions",
Help: "Current number of keywatcher Redis pubsub subscriptions",
},
)
totalMessages = promauto.NewCounter(
prometheus.CounterOpts{
Name: "gitlab_workhorse_keywatcher_total_messages",
@ -65,30 +73,66 @@ var (
const (
keySubChannel = "workhorse:notifications"
channelPrefix = keySubChannel + ":"
)
func countAction(action string) { totalActions.WithLabelValues(action).Add(1) }
func (kw *KeyWatcher) receivePubSubStream(conn redis.Conn) error {
defer conn.Close()
psc := redis.PubSubConn{Conn: conn}
if err := psc.Subscribe(keySubChannel); err != nil {
return err
kw.mu.Lock()
// We must share kw.conn with the goroutines that call SUBSCRIBE and
// UNSUBSCRIBE because Redis pubsub subscriptions are tied to the
// connection.
kw.conn = &redis.PubSubConn{Conn: conn}
kw.mu.Unlock()
defer func() {
kw.mu.Lock()
defer kw.mu.Unlock()
kw.conn.Close()
kw.conn = nil
// Reset kw.subscribers because it is tied to Redis server side state of
// kw.conn and we just closed that connection.
for _, chans := range kw.subscribers {
for _, ch := range chans {
close(ch)
keyWatchers.Dec()
}
}
kw.subscribers = nil
}()
if kw.channelPerKey {
// Do not drink from firehose
} else {
// Do drink from firehose
if err := kw.conn.Subscribe(keySubChannel); err != nil {
return err
}
defer kw.conn.Unsubscribe(keySubChannel)
}
defer psc.Unsubscribe(keySubChannel)
for {
switch v := psc.Receive().(type) {
switch v := kw.conn.Receive().(type) {
case redis.Message:
totalMessages.Inc()
dataStr := string(v.Data)
receivedBytes.Add(float64(len(dataStr)))
msg := strings.SplitN(dataStr, "=", 2)
if len(msg) != 2 {
log.WithError(fmt.Errorf("keywatcher: invalid notification: %q", dataStr)).Error()
continue
if strings.HasPrefix(v.Channel, channelPrefix) {
// v is a message on a per-key channel
kw.notifySubscribers(v.Channel[len(channelPrefix):], dataStr)
} else if v.Channel == keySubChannel {
// v is a message on the firehose channel
msg := strings.SplitN(dataStr, "=", 2)
if len(msg) != 2 {
log.WithError(fmt.Errorf("keywatcher: invalid notification: %q", dataStr)).Error()
continue
}
kw.notifySubscribers(msg[0], msg[1])
}
kw.notifySubscribers(msg[0], msg[1])
case redis.Subscription:
redisSubscriptions.Set(float64(v.Count))
case error:
log.WithError(fmt.Errorf("keywatcher: pubsub receive: %v", v)).Error()
// Intermittent error, return nil so that it doesn't wait before reconnect
@ -156,21 +200,40 @@ func (kw *KeyWatcher) notifySubscribers(key, value string) {
countAction("deliver-message")
for _, c := range chanList {
c <- value
keyWatchers.Dec()
select {
case c <- value:
default:
}
}
delete(kw.subscribers, key)
}
func (kw *KeyWatcher) addSubscription(key string, notify chan string) {
func (kw *KeyWatcher) addSubscription(key string, notify chan string) error {
kw.mu.Lock()
defer kw.mu.Unlock()
if kw.conn == nil {
// This can happen because CI long polling is disabled in this Workhorse
// process. It can also be that we are waiting for the pubsub connection
// to be established. Either way it is OK to fail fast.
return errors.New("no redis connection")
}
if len(kw.subscribers[key]) == 0 {
countAction("create-subscription")
if kw.channelPerKey {
if err := kw.conn.Subscribe(channelPrefix + key); err != nil {
return err
}
}
}
if kw.subscribers == nil {
kw.subscribers = make(map[string][]chan string)
}
kw.subscribers[key] = append(kw.subscribers[key], notify)
keyWatchers.Inc()
if len(kw.subscribers[key]) == 1 {
countAction("create-subscription")
}
return nil
}
func (kw *KeyWatcher) delSubscription(key string, notify chan string) {
@ -179,6 +242,8 @@ func (kw *KeyWatcher) delSubscription(key string, notify chan string) {
chans, ok := kw.subscribers[key]
if !ok {
// This can happen if the pubsub connection dropped while we were
// waiting.
return
}
@ -192,6 +257,9 @@ func (kw *KeyWatcher) delSubscription(key string, notify chan string) {
if len(kw.subscribers[key]) == 0 {
delete(kw.subscribers, key)
countAction("delete-subscription")
if kw.channelPerKey && kw.conn != nil {
kw.conn.Unsubscribe(channelPrefix + key)
}
}
}
@ -212,7 +280,9 @@ const (
func (kw *KeyWatcher) WatchKey(key, value string, timeout time.Duration) (WatchKeyStatus, error) {
notify := make(chan string, 1)
kw.addSubscription(key, notify)
if err := kw.addSubscription(key, notify); err != nil {
return WatchKeyStatusNoChange, err
}
defer kw.delSubscription(key, notify)
currentValue, err := GetString(key)

View File

@ -1,7 +1,7 @@
package redis
import (
"errors"
"fmt"
"sync"
"testing"
"time"
@ -45,20 +45,37 @@ func (kw *KeyWatcher) countSubscribers(key string) int {
}
// Forces a run of the `Process` loop against a mock PubSubConn.
func (kw *KeyWatcher) processMessages(numWatchers int, value string) {
func (kw *KeyWatcher) processMessages(t *testing.T, numWatchers int, value string, ready chan<- struct{}) {
psc := redigomock.NewConn()
psc.ReceiveWait = true
// Setup the initial subscription message
psc.Command("SUBSCRIBE", keySubChannel).Expect(createSubscribeMessage(keySubChannel))
psc.Command("UNSUBSCRIBE", keySubChannel).Expect(createUnsubscribeMessage(keySubChannel))
psc.AddSubscriptionMessage(createSubscriptionMessage(keySubChannel, runnerKey+"="+value))
// Wait for all the `WatchKey` calls to be registered
for kw.countSubscribers(runnerKey) != numWatchers {
time.Sleep(time.Millisecond)
if kw.channelPerKey {
channel := channelPrefix + runnerKey
psc.Command("SUBSCRIBE", channel).Expect(createSubscribeMessage(channel))
psc.Command("UNSUBSCRIBE", channel).Expect(createUnsubscribeMessage(channel))
psc.AddSubscriptionMessage(createSubscriptionMessage(channel, value))
} else {
psc.Command("SUBSCRIBE", keySubChannel).Expect(createSubscribeMessage(keySubChannel))
psc.Command("UNSUBSCRIBE", keySubChannel).Expect(createUnsubscribeMessage(keySubChannel))
psc.AddSubscriptionMessage(createSubscriptionMessage(keySubChannel, runnerKey+"="+value))
}
kw.receivePubSubStream(psc)
errC := make(chan error)
go func() { errC <- kw.receivePubSubStream(psc) }()
require.Eventually(t, func() bool {
kw.mu.Lock()
defer kw.mu.Unlock()
return kw.conn != nil
}, time.Second, time.Millisecond)
close(ready)
require.Eventually(t, func() bool {
return kw.countSubscribers(runnerKey) == numWatchers
}, time.Second, time.Millisecond)
close(psc.ReceiveNow)
require.NoError(t, <-errC)
}
type keyChangeTestCase struct {
@ -71,20 +88,13 @@ type keyChangeTestCase struct {
timeout time.Duration
}
func TestKeyChangesBubblesUpError(t *testing.T) {
conn, td := setupMockPool()
defer td()
kw := NewKeyWatcher()
defer kw.Shutdown()
conn.Command("GET", runnerKey).ExpectError(errors.New("test error"))
_, err := kw.WatchKey(runnerKey, "something", time.Second)
require.Error(t, err, "Expected error")
func TestKeyChangesInstantReturn(t *testing.T) {
for _, v := range []bool{false, true} {
t.Run(fmt.Sprintf("channelPerKey:%v", v), func(t *testing.T) { testKeyChangesInstantReturn(t, v) })
}
}
func TestKeyChangesInstantReturn(t *testing.T) {
func testKeyChangesInstantReturn(t *testing.T, channelPerKey bool) {
testCases := []keyChangeTestCase{
// WatchKeyStatusAlreadyChanged
{
@ -130,8 +140,9 @@ func TestKeyChangesInstantReturn(t *testing.T) {
conn.Command("GET", runnerKey).Expect(tc.returnValue)
}
kw := NewKeyWatcher()
kw := NewKeyWatcher(channelPerKey)
defer kw.Shutdown()
kw.conn = &redis.PubSubConn{Conn: redigomock.NewConn()}
val, err := kw.WatchKey(runnerKey, tc.watchValue, tc.timeout)
@ -142,6 +153,12 @@ func TestKeyChangesInstantReturn(t *testing.T) {
}
func TestKeyChangesWhenWatching(t *testing.T) {
for _, v := range []bool{false, true} {
t.Run(fmt.Sprintf("channelPerKey:%v", v), func(t *testing.T) { testKeyChangesWhenWatching(t, v) })
}
}
func testKeyChangesWhenWatching(t *testing.T, channelPerKey bool) {
testCases := []keyChangeTestCase{
// WatchKeyStatusSeenChange
{
@ -179,27 +196,35 @@ func TestKeyChangesWhenWatching(t *testing.T) {
conn.Command("GET", runnerKey).Expect(tc.returnValue)
}
kw := NewKeyWatcher()
kw := NewKeyWatcher(channelPerKey)
defer kw.Shutdown()
wg := &sync.WaitGroup{}
wg.Add(1)
ready := make(chan struct{})
go func() {
defer wg.Done()
<-ready
val, err := kw.WatchKey(runnerKey, tc.watchValue, time.Second)
require.NoError(t, err, "Expected no error")
require.Equal(t, tc.expectedStatus, val, "Expected value")
}()
kw.processMessages(1, tc.processedValue)
kw.processMessages(t, 1, tc.processedValue, ready)
wg.Wait()
})
}
}
func TestKeyChangesParallel(t *testing.T) {
for _, v := range []bool{false, true} {
t.Run(fmt.Sprintf("channelPerKey:%v", v), func(t *testing.T) { testKeyChangesParallel(t, v) })
}
}
func testKeyChangesParallel(t *testing.T, channelPerKey bool) {
testCases := []keyChangeTestCase{
{
desc: "massively parallel, sees change with key existing",
@ -236,13 +261,15 @@ func TestKeyChangesParallel(t *testing.T) {
wg := &sync.WaitGroup{}
wg.Add(runTimes)
ready := make(chan struct{})
kw := NewKeyWatcher()
kw := NewKeyWatcher(channelPerKey)
defer kw.Shutdown()
for i := 0; i < runTimes; i++ {
go func() {
defer wg.Done()
<-ready
val, err := kw.WatchKey(runnerKey, tc.watchValue, time.Second)
require.NoError(t, err, "Expected no error")
@ -250,7 +277,7 @@ func TestKeyChangesParallel(t *testing.T) {
}()
}
kw.processMessages(runTimes, tc.processedValue)
kw.processMessages(t, runTimes, tc.processedValue, ready)
wg.Wait()
})
}
@ -260,7 +287,8 @@ func TestShutdown(t *testing.T) {
conn, td := setupMockPool()
defer td()
kw := NewKeyWatcher()
kw := NewKeyWatcher(false)
kw.conn = &redis.PubSubConn{Conn: redigomock.NewConn()}
defer kw.Shutdown()
conn.Command("GET", runnerKey).Expect("something")
@ -269,18 +297,18 @@ func TestShutdown(t *testing.T) {
wg.Add(2)
go func() {
defer wg.Done()
val, err := kw.WatchKey(runnerKey, "something", 10*time.Second)
require.NoError(t, err, "Expected no error")
require.Equal(t, WatchKeyStatusNoChange, val, "Expected value not to change")
wg.Done()
}()
go func() {
defer wg.Done()
require.Eventually(t, func() bool { return kw.countSubscribers(runnerKey) == 1 }, 10*time.Second, time.Millisecond)
kw.Shutdown()
wg.Done()
}()
wg.Wait()

View File

@ -220,7 +220,9 @@ func run(boot bootConfig, cfg config.Config) error {
secret.SetPath(boot.secretPath)
keyWatcher := redis.NewKeyWatcher()
keyWatcher := redis.NewKeyWatcher(
os.Getenv("GITLAB_WORKHORSE_REDIS_SUBSCRIBE_MANY") == "1",
)
if cfg.Redis != nil {
redis.Configure(cfg.Redis, redis.DefaultDialFunc)
go keyWatcher.Process()