Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2022-02-23 03:17:13 +00:00
parent 6adb784bf2
commit a9fa13e4ba
27 changed files with 552 additions and 242 deletions

View File

@ -1 +1 @@
51cf676fc530f242eb893c7b630ef309de116e35
838d4c8bc4d82ef2ad79437b37f0d8b43394c7f7

View File

@ -1 +1 @@
1.54.0
1.55.0

View File

@ -10,22 +10,30 @@ export function createResizeObserver() {
});
}
// watches for change in size of a container element (e.g. for lazy-loaded images)
// and scroll the target element to the top of the content area
// stop watching after any user input. So if user opens sidebar or manually
// scrolls the page we don't hijack their scroll position
/**
* Watches for change in size of a container element (e.g. for lazy-loaded images)
* and scrolls the target note to the top of the content area.
* Stops watching after any user input. So if user opens sidebar or manually
* scrolls the page we don't hijack their scroll position
*
* @param {Object} options
* @param {string} options.targetId - id of element to scroll to
* @param {string} options.container - Selector of element containing target
*
* @return {ResizeObserver|null} - ResizeObserver instance if target looks like a note DOM ID
*/
export function scrollToTargetOnResize({
target = window.location.hash,
targetId = window.location.hash.slice(1),
container = '#content-body',
} = {}) {
if (!target) return null;
if (!targetId) return null;
const ro = createResizeObserver();
const containerEl = document.querySelector(container);
let interactionListenersAdded = false;
function keepTargetAtTop() {
const anchorEl = document.querySelector(target);
const anchorEl = document.getElementById(targetId);
if (!anchorEl) return;

View File

@ -1,5 +1,5 @@
import $ from 'jquery';
import createFlash from '~/flash';
import createFlash, { FLASH_TYPES } from '~/flash';
import axios from '~/lib/utils/axios_utils';
import { parseBoolean } from '~/lib/utils/common_utils';
import { Rails } from '~/lib/utils/rails_ujs';
@ -86,7 +86,7 @@ export default class Profile {
createFlash({
message: data.message,
type: 'notice',
type: data.status === 'error' ? FLASH_TYPES.ALERT : FLASH_TYPES.NOTICE,
});
})
.then(() => {

View File

@ -464,35 +464,52 @@ module Issuable
false
end
def hook_association_changes(old_associations)
changes = {}
old_labels = old_associations.fetch(:labels, labels)
old_assignees = old_associations.fetch(:assignees, assignees)
old_severity = old_associations.fetch(:severity, severity)
if old_labels != labels
changes[:labels] = [old_labels.map(&:hook_attrs), labels.map(&:hook_attrs)]
end
if old_assignees != assignees
changes[:assignees] = [old_assignees.map(&:hook_attrs), assignees.map(&:hook_attrs)]
end
if supports_severity? && old_severity != severity
changes[:severity] = [old_severity, severity]
end
if supports_escalation? && escalation_status
current_escalation_status = escalation_status.status_name
old_escalation_status = old_associations.fetch(:escalation_status, current_escalation_status)
if old_escalation_status != current_escalation_status
changes[:escalation_status] = [old_escalation_status, current_escalation_status]
end
end
if self.respond_to?(:total_time_spent)
old_total_time_spent = old_associations.fetch(:total_time_spent, total_time_spent)
old_time_change = old_associations.fetch(:time_change, time_change)
if old_total_time_spent != total_time_spent
changes[:total_time_spent] = [old_total_time_spent, total_time_spent]
changes[:time_change] = [old_time_change, time_change]
end
end
changes
end
def to_hook_data(user, old_associations: {})
changes = previous_changes
if old_associations
old_labels = old_associations.fetch(:labels, labels)
old_assignees = old_associations.fetch(:assignees, assignees)
old_severity = old_associations.fetch(:severity, severity)
if old_labels != labels
changes[:labels] = [old_labels.map(&:hook_attrs), labels.map(&:hook_attrs)]
end
if old_assignees != assignees
changes[:assignees] = [old_assignees.map(&:hook_attrs), assignees.map(&:hook_attrs)]
end
if supports_severity? && old_severity != severity
changes[:severity] = [old_severity, severity]
end
if self.respond_to?(:total_time_spent)
old_total_time_spent = old_associations.fetch(:total_time_spent, total_time_spent)
old_time_change = old_associations.fetch(:time_change, time_change)
if old_total_time_spent != total_time_spent
changes[:total_time_spent] = [old_total_time_spent, total_time_spent]
changes[:time_change] = [old_time_change, time_change]
end
end
if old_associations.present?
changes.merge!(hook_association_changes(old_associations))
end
Gitlab::HookData::IssuableBuilder.new(self).build(user: user, changes: changes)

View File

@ -160,7 +160,7 @@ class IssuableBaseService < ::BaseProjectService
params.delete(:escalation_status)
).execute
return unless result.success? && result.payload.present?
return unless result.success? && result[:escalation_status].present?
@escalation_status_change_reason = result[:escalation_status].delete(:status_change_reason)
@ -486,7 +486,10 @@ class IssuableBaseService < ::BaseProjectService
associations[:description] = issuable.description
associations[:reviewers] = issuable.reviewers.to_a if issuable.allows_reviewers?
associations[:severity] = issuable.severity if issuable.supports_severity?
associations[:escalation_status] = issuable.escalation_status&.slice(:status, :policy_id) if issuable.supports_escalation?
if issuable.supports_escalation? && issuable.escalation_status
associations[:escalation_status] = issuable.escalation_status.status_name
end
associations
end

View File

@ -51,7 +51,6 @@ module Issues
old_mentioned_users = old_associations.fetch(:mentioned_users, [])
old_assignees = old_associations.fetch(:assignees, [])
old_severity = old_associations[:severity]
old_escalation_status = old_associations[:escalation_status]
if has_changes?(issue, old_labels: old_labels, old_assignees: old_assignees)
todo_service.resolve_todos_for_target(issue, current_user)
@ -68,7 +67,7 @@ module Issues
handle_milestone_change(issue)
handle_added_mentions(issue, old_mentioned_users)
handle_severity_change(issue, old_severity)
handle_escalation_status_change(issue, old_escalation_status)
handle_escalation_status_change(issue)
handle_issue_type_change(issue)
end
@ -196,9 +195,8 @@ module Issues
::IncidentManagement::AddSeveritySystemNoteWorker.perform_async(issue.id, current_user.id)
end
def handle_escalation_status_change(issue, old_escalation_status)
return unless old_escalation_status.present?
return if issue.escalation_status&.slice(:status, :policy_id) == old_escalation_status
def handle_escalation_status_change(issue)
return unless issue.supports_escalation? && issue.escalation_status
::IncidentManagement::IssuableEscalationStatuses::AfterUpdateService.new(
issue,

View File

@ -79,7 +79,7 @@ module QuickActions
# using a user-style reference, which is not in scope here.
args = params.split(/\s|,/).select(&:present?).uniq - ['and']
usernames = (args - ['me']).map { _1.delete_prefix('@') }
found = User.by_username(usernames).to_a.select { can?(:read_user_profile, _1) }
found = User.by_username(usernames).to_a.select { can?(:read_user, _1) }
found_names = found.map(&:username).to_set
missing = args.reject { |arg| arg == 'me' || found_names.include?(arg.delete_prefix('@')) }.map { "'#{_1}'" }

View File

@ -2,6 +2,12 @@
module WebHooks
class LogExecutionService
include ::Gitlab::ExclusiveLeaseHelpers
LOCK_TTL = 15.seconds.freeze
LOCK_SLEEP = 0.25.seconds.freeze
LOCK_RETRY = 65
attr_reader :hook, :log_data, :response_category
def initialize(hook:, log_data:, response_category:)
@ -11,7 +17,7 @@ module WebHooks
end
def execute
update_hook_executability
update_hook_failure_state
log_execution
end
@ -21,15 +27,25 @@ module WebHooks
WebHookLog.create!(web_hook: hook, **log_data.transform_keys(&:to_sym))
end
def update_hook_executability
case response_category
when :ok
hook.enable!
when :error
hook.backoff!
when :failed
hook.failed!
# Perform this operation within an `Gitlab::ExclusiveLease` lock to make it
# safe to be called concurrently from different workers.
def update_hook_failure_state
in_lock(lock_name, ttl: LOCK_TTL, sleep_sec: LOCK_SLEEP, retries: LOCK_RETRY) do |retried|
hook.reset # Reload within the lock so properties are guaranteed to be current.
case response_category
when :ok
hook.enable!
when :error
hook.backoff!
when :failed
hook.failed!
end
end
end
def lock_name
"web_hooks:update_hook_failure_state:#{hook.id}"
end
end
end

View File

@ -15,7 +15,7 @@ There are two tokens to take into account when connecting a runner with GitLab.
| Token | Description |
| ----- | ----------- |
| Registration token | Token used to [register the runner](https://docs.gitlab.com/runner/register/). It can be [obtained through GitLab](../ci/runners/index.md). |
| Authentication token | Token used to authenticate the runner with the GitLab instance. It is obtained automatically when you [register a runner](https://docs.gitlab.com/runner/register/) or by the Runner API when you manually [register a runner](#register-a-new-runner) or [reset the authentication token](#reset-runners-authentication-token). |
| Authentication token | Token used to authenticate the runner with the GitLab instance. It is obtained automatically when you [register a runner](https://docs.gitlab.com/runner/register/) or by the Runner API when you manually [register a runner](#register-a-new-runner) or [reset the authentication token](#reset-runners-authentication-token-by-using-the-runner-id). |
Here's an example of how the two tokens are used in runner registration:
@ -799,9 +799,9 @@ curl --request POST --header "PRIVATE-TOKEN: <your_access_token>" \
"https://gitlab.example.com/api/v4/groups/9/runners/reset_registration_token"
```
## Reset runner's authentication token
## Reset runner's authentication token by using the runner ID
Resets the runner's authentication token.
Resets the runner's authentication token by using its runner ID.
```plaintext
POST /runners/:id/reset_authentication_token
@ -824,3 +824,29 @@ Example response:
"token_expires_at": "2021-09-27T21:05:03.203Z"
}
```
## Reset runner's authentication token by using the current token
Resets the runner's authentication token by using the current token's value as an input.
```plaintext
POST /runners/reset_authentication_token
```
| Attribute | Type | Required | Description |
|-----------|---------|----------|---------------------------------|
| `token` | string | yes | The current token of the runner |
```shell
curl --request POST --form "token=<current token>" \
"https://gitlab.example.com/api/v4/runners/reset_authentication_token"
```
Example response:
```json
{
"token": "6337ff461c94fd3fa32ba3b1ff4125",
"token_expires_at": "2021-09-27T21:05:03.203Z"
}
```

View File

@ -71,6 +71,19 @@ module API
status 200
body "200"
end
desc 'Reset runner authentication token with current token' do
success Entities::Ci::ResetTokenResult
end
params do
requires :token, type: String, desc: 'The current authentication token of the runner'
end
post '/reset_authentication_token', feature_category: :runner do
authenticate_runner!
current_runner.reset_token!
present current_runner.token_with_expiration, with: Entities::Ci::ResetTokenResult
end
end
resource :jobs do

View File

@ -26,7 +26,7 @@ module Gitlab
end
def safe_keys
issuable_builder.safe_hook_attributes + issuable_builder::SAFE_HOOK_RELATIONS
issuable_builder.safe_hook_attributes + issuable_builder.safe_hook_relations
end
private

View File

@ -3,13 +3,16 @@
module Gitlab
module HookData
class IssueBuilder < BaseBuilder
SAFE_HOOK_RELATIONS = %i[
assignees
labels
total_time_spent
time_change
severity
].freeze
def self.safe_hook_relations
%i[
assignees
labels
total_time_spent
time_change
severity
escalation_status
].freeze
end
def self.safe_hook_attributes
%i[
@ -56,6 +59,10 @@ module Gitlab
severity: issue.severity
}
if issue.supports_escalation? && issue.escalation_status
attrs[:escalation_status] = issue.escalation_status.status_name
end
issue.attributes.with_indifferent_access.slice(*self.class.safe_hook_attributes)
.merge!(attrs)
end

View File

@ -34,12 +34,14 @@ module Gitlab
].freeze
end
SAFE_HOOK_RELATIONS = %i[
assignees
labels
total_time_spent
time_change
].freeze
def self.safe_hook_relations
%i[
assignees
labels
total_time_spent
time_change
].freeze
end
alias_method :merge_request, :object

View File

@ -152,6 +152,12 @@ namespace :gitlab do
Rake::Task['gitlab:db:create_dynamic_partitions'].invoke
end
ActiveRecord::Tasks::DatabaseTasks.for_each(databases) do |name|
Rake::Task["db:migrate:#{name}"].enhance do
Rake::Task['gitlab:db:create_dynamic_partitions'].invoke
end
end
# When we load the database schema from db/structure.sql
# we don't have any dynamic partitions created. We don't really need to
# because application initializers/sidekiq take care of that, too.
@ -160,14 +166,14 @@ namespace :gitlab do
#
# Other than that it's helpful to create partitions early when bootstrapping
# a new installation.
#
# Rails 6.1 deprecates db:structure:load in favor of db:schema:load
Rake::Task['db:structure:load'].enhance do
Rake::Task['db:schema:load'].enhance do
Rake::Task['gitlab:db:create_dynamic_partitions'].invoke
end
Rake::Task['db:schema:load'].enhance do
Rake::Task['gitlab:db:create_dynamic_partitions'].invoke
ActiveRecord::Tasks::DatabaseTasks.for_each(databases) do |name|
Rake::Task["db:schema:load:#{name}"].enhance do
Rake::Task['gitlab:db:create_dynamic_partitions'].invoke
end
end
desc "Clear all connections"

View File

@ -39399,6 +39399,9 @@ msgstr ""
msgid "UsageQuota|Storage type"
msgstr ""
msgid "UsageQuota|Storage used"
msgstr ""
msgid "UsageQuota|This is the total amount of storage used across your projects within this namespace."
msgstr ""

View File

@ -19,6 +19,10 @@ FactoryBot.define do
public_email { email }
end
trait :private_profile do
private_profile { true }
end
trait :blocked do
after(:build) { |user, _| user.block! }
end

View File

@ -19,16 +19,11 @@ describe('ResizeObserver Utility', () => {
jest.spyOn(document.documentElement, 'scrollTo');
setFixtures(`<div id="content-body"><div class="target">element to scroll to</div></div>`);
setFixtures(`<div id="content-body"><div id="note_1234">note to scroll to</div></div>`);
const target = document.querySelector('.target');
const target = document.querySelector('#note_1234');
jest.spyOn(target, 'getBoundingClientRect').mockReturnValue({ top: 200 });
observer = scrollToTargetOnResize({
target: '.target',
container: '#content-body',
});
});
afterEach(() => {
@ -38,21 +33,22 @@ describe('ResizeObserver Utility', () => {
describe('Observer behavior', () => {
it('returns null for empty target', () => {
observer = scrollToTargetOnResize({
target: '',
targetId: '',
container: '#content-body',
});
expect(observer).toBe(null);
});
it('returns ResizeObserver instance', () => {
expect(observer).toBeInstanceOf(ResizeObserver);
});
it('does not scroll if target does not exist', () => {
observer = scrollToTargetOnResize({
targetId: 'some_imaginary_id',
container: '#content-body',
});
it('scrolls body so anchor is just below sticky header (contentTop)', () => {
triggerResize();
expect(document.documentElement.scrollTo).toHaveBeenCalledWith({ top: 110 });
expect(document.documentElement.scrollTo).not.toHaveBeenCalled();
});
const interactionEvents = ['mousedown', 'touchstart', 'keydown', 'wheel'];
@ -64,5 +60,24 @@ describe('ResizeObserver Utility', () => {
expect(document.documentElement.scrollTo).not.toHaveBeenCalledWith();
});
describe('with existing target', () => {
beforeEach(() => {
observer = scrollToTargetOnResize({
targetId: 'note_1234',
container: '#content-body',
});
});
it('returns ResizeObserver instance', () => {
expect(observer).toBeInstanceOf(ResizeObserver);
});
it('scrolls body so anchor is just below sticky header (contentTop)', () => {
triggerResize();
expect(document.documentElement.scrollTo).toHaveBeenCalledWith({ top: 110 });
});
});
});
});

View File

@ -63,5 +63,13 @@ RSpec.describe Gitlab::HookData::IssueBuilder do
.to eq("test![Issue_Image](#{Settings.gitlab.url}/#{expected_path})")
end
end
context 'for incident' do
let_it_be(:issue) { create(:incident, :with_escalation_status) }
it 'includes additional attr' do
expect(data).to include(:escalation_status)
end
end
end
end

View File

@ -572,6 +572,27 @@ RSpec.describe Issuable do
issue.to_hook_data(user, old_associations: { severity: 'unknown' })
end
end
context 'escalation status is updated' do
let(:issue) { create(:incident, :with_escalation_status) }
let(:acknowledged) { IncidentManagement::IssuableEscalationStatus::STATUSES[:acknowledged] }
before do
issue.escalation_status.update!(status: acknowledged)
expect(Gitlab::HookData::IssuableBuilder).to receive(:new).with(issue).and_return(builder)
end
it 'delegates to Gitlab::HookData::IssuableBuilder#build' do
expect(builder).to receive(:build).with(
user: user,
changes: hash_including(
'escalation_status' => %i(triggered acknowledged)
))
issue.to_hook_data(user, old_associations: { escalation_status: :triggered })
end
end
end
describe '#labels_array' do

View File

@ -3,7 +3,7 @@
require 'spec_helper'
RSpec.describe IncidentManagement::IssuableEscalationStatus do
let_it_be(:issue) { create(:issue) }
let_it_be(:issue) { create(:incident) }
subject(:escalation_status) { build(:incident_management_issuable_escalation_status, issue: issue) }

View File

@ -0,0 +1,65 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe API::Ci::Runner, :clean_gitlab_redis_shared_state do
include StubGitlabCalls
include RedisHelpers
include WorkhorseHelpers
before do
stub_feature_flags(ci_enable_live_trace: true)
stub_feature_flags(runner_registration_control: false)
stub_gitlab_calls
stub_application_setting(valid_runner_registrars: ApplicationSetting::VALID_RUNNER_REGISTRAR_TYPES)
end
let_it_be(:group_settings) { create(:namespace_settings, runner_token_expiration_interval: 5.days.to_i) }
let_it_be(:group) { create(:group, namespace_settings: group_settings) }
let_it_be(:instance_runner, reload: true) { create(:ci_runner, :instance) }
let_it_be(:group_runner) { create(:ci_runner, :group, groups: [group], token_expires_at: 1.day.from_now) }
describe 'POST /runners/reset_authentication_token', :freeze_time do
context 'current token provided' do
it "resets authentication token when token doesn't have an expiration" do
expect do
post api("/runners/reset_authentication_token"), params: { token: instance_runner.reload.token }
expect(response).to have_gitlab_http_status(:success)
expect(json_response).to eq({ 'token' => instance_runner.reload.token, 'token_expires_at' => nil })
expect(instance_runner.reload.token_expires_at).to be_nil
end.to change { instance_runner.reload.token }
end
it 'resets authentication token when token is not expired' do
expect do
post api("/runners/reset_authentication_token"), params: { token: group_runner.reload.token }
expect(response).to have_gitlab_http_status(:success)
expect(json_response).to eq({ 'token' => group_runner.reload.token, 'token_expires_at' => group_runner.reload.token_expires_at.iso8601(3) })
expect(group_runner.reload.token_expires_at).to eq(5.days.from_now)
end.to change { group_runner.reload.token }
end
it 'does not reset authentication token when token is expired' do
expect do
instance_runner.update!(token_expires_at: 1.day.ago)
post api("/runners/reset_authentication_token"), params: { token: instance_runner.reload.token }
expect(response).to have_gitlab_http_status(:forbidden)
instance_runner.update!(token_expires_at: nil)
end.not_to change { instance_runner.reload.token }
end
end
context 'wrong current token provided' do
it 'does not reset authentication token' do
expect do
post api("/runners/reset_authentication_token"), params: { token: 'garbage' }
expect(response).to have_gitlab_http_status(:forbidden)
end.not_to change { instance_runner.reload.token }
end
end
end
end

View File

@ -1157,6 +1157,13 @@ RSpec.describe Issues::UpdateService, :mailer do
expect(issue.escalation_status.status_name).to eq(expected_status)
end
it 'triggers webhooks' do
expect(project).to receive(:execute_hooks).with(an_instance_of(Hash), :issue_hooks)
expect(project).to receive(:execute_integrations).with(an_instance_of(Hash), :issue_hooks)
update_issue(opts)
end
end
shared_examples 'does not change the status record' do
@ -1169,7 +1176,8 @@ RSpec.describe Issues::UpdateService, :mailer do
end
it 'does not trigger side-effects' do
expect(escalation_update_class).not_to receive(:new)
expect(project).not_to receive(:execute_hooks)
expect(project).not_to receive(:execute_integrations)
update_issue(opts)
end

View File

@ -682,6 +682,20 @@ RSpec.describe QuickActions::InterpretService do
expect(message).to eq("Assigned #{developer.to_reference}.")
end
context 'when the user has a private profile' do
let(:user) { create(:user, :private_profile) }
let(:content) { "/assign #{user.to_reference}" }
it 'assigns to the user' do
issuable.project.add_developer(user)
_, updates, message = service.execute(content, issuable)
expect(updates).to eq(assignee_ids: [user.id])
expect(message).to eq("Assigned #{user.to_reference}.")
end
end
end
shared_examples 'assign_reviewer command' do
@ -971,24 +985,6 @@ RSpec.describe QuickActions::InterpretService do
it_behaves_like 'assign_reviewer command'
end
context 'with a private user' do
let(:ref) { create(:user, :unconfirmed).to_reference }
let(:content) { "/assign_reviewer #{ref}" }
it_behaves_like 'failed command', 'a parse error' do
let(:match_msg) { eq "Could not apply assign_reviewer command. Failed to find users for '#{ref}'." }
end
end
context 'with a private user, bare username' do
let(:ref) { create(:user, :unconfirmed).username }
let(:content) { "/assign_reviewer #{ref}" }
it_behaves_like 'failed command', 'a parse error' do
let(:match_msg) { eq "Could not apply assign_reviewer command. Failed to find users for '#{ref}'." }
end
end
context 'with @all' do
let(:content) { "/assign_reviewer @all" }

View File

@ -14,10 +14,6 @@ RSpec.describe WebHookService, :request_store, :clean_gitlab_redis_shared_state
let(:service_instance) { described_class.new(project_hook, data, :push_hooks) }
around do |example|
travel_to(Time.current) { example.run }
end
describe '#initialize' do
before do
stub_application_setting(setting_name => setting)
@ -257,14 +253,6 @@ RSpec.describe WebHookService, :request_store, :clean_gitlab_redis_shared_state
end
context 'execution logging' do
let(:hook_log) { project_hook.web_hook_logs.last }
def run_service
service_instance.execute
::WebHooks::LogExecutionWorker.drain
project_hook.reload
end
context 'with success' do
before do
stub_full_request(project_hook.url, method: :post).to_return(status: 200, body: 'Success')
@ -280,43 +268,39 @@ RSpec.describe WebHookService, :request_store, :clean_gitlab_redis_shared_state
.with(hook: project_hook, log_data: Hash, response_category: :ok)
.and_return(double(execute: nil))
run_service
service_instance.execute
end
end
it 'log successful execution' do
run_service
it 'queues LogExecutionWorker correctly' do
expect(WebHooks::LogExecutionWorker).to receive(:perform_async)
.with(
project_hook.id,
hash_including(
trigger: 'push_hooks',
url: project_hook.url,
request_headers: headers,
request_data: data,
response_body: 'Success',
response_headers: {},
response_status: 200,
execution_duration: be > 0,
internal_error_message: nil
),
:ok,
nil
)
expect(hook_log.trigger).to eq('push_hooks')
expect(hook_log.url).to eq(project_hook.url)
expect(hook_log.request_headers).to eq(headers)
expect(hook_log.response_body).to eq('Success')
expect(hook_log.response_status).to eq('200')
expect(hook_log.execution_duration).to be > 0
expect(hook_log.internal_error_message).to be_nil
service_instance.execute
end
it 'queues LogExecutionWorker correctly, resulting in a log record (integration-style test)', :sidekiq_inline do
expect { service_instance.execute }.to change(::WebHookLog, :count).by(1)
end
it 'does not log in the service itself' do
expect { service_instance.execute }.not_to change(::WebHookLog, :count)
end
it 'does not increment the failure count' do
expect { run_service }.not_to change(project_hook, :recent_failures)
end
it 'does not change the disabled_until attribute' do
expect { run_service }.not_to change(project_hook, :disabled_until)
end
context 'when the hook had previously failed' do
before do
project_hook.update!(recent_failures: 2)
end
it 'resets the failure count' do
expect { run_service }.to change(project_hook, :recent_failures).to(0)
end
end
end
context 'with bad request' do
@ -324,45 +308,26 @@ RSpec.describe WebHookService, :request_store, :clean_gitlab_redis_shared_state
stub_full_request(project_hook.url, method: :post).to_return(status: 400, body: 'Bad request')
end
it 'logs failed execution' do
run_service
it 'queues LogExecutionWorker correctly' do
expect(WebHooks::LogExecutionWorker).to receive(:perform_async)
.with(
project_hook.id,
hash_including(
trigger: 'push_hooks',
url: project_hook.url,
request_headers: headers,
request_data: data,
response_body: 'Bad request',
response_headers: {},
response_status: 400,
execution_duration: be > 0,
internal_error_message: nil
),
:failed,
nil
)
expect(hook_log).to have_attributes(
trigger: eq('push_hooks'),
url: eq(project_hook.url),
request_headers: eq(headers),
response_body: eq('Bad request'),
response_status: eq('400'),
execution_duration: be > 0,
internal_error_message: be_nil
)
end
it 'increments the failure count' do
expect { run_service }.to change(project_hook, :recent_failures).by(1)
end
it 'does not change the disabled_until attribute' do
expect { run_service }.not_to change(project_hook, :disabled_until)
end
it 'does not allow the failure count to overflow' do
project_hook.update!(recent_failures: 32767)
expect { run_service }.not_to change(project_hook, :recent_failures)
end
context 'when the web_hooks_disable_failed FF is disabled' do
before do
# Hook will only be executed if the flag is disabled.
stub_feature_flags(web_hooks_disable_failed: false)
end
it 'does not allow the failure count to overflow' do
project_hook.update!(recent_failures: 32767)
expect { run_service }.not_to change(project_hook, :recent_failures)
end
service_instance.execute
end
end
@ -371,65 +336,54 @@ RSpec.describe WebHookService, :request_store, :clean_gitlab_redis_shared_state
stub_full_request(project_hook.url, method: :post).to_raise(SocketError.new('Some HTTP Post error'))
end
it 'log failed execution' do
run_service
it 'queues LogExecutionWorker correctly' do
expect(WebHooks::LogExecutionWorker).to receive(:perform_async)
.with(
project_hook.id,
hash_including(
trigger: 'push_hooks',
url: project_hook.url,
request_headers: headers,
request_data: data,
response_body: '',
response_headers: {},
response_status: 'internal error',
execution_duration: be > 0,
internal_error_message: 'Some HTTP Post error'
),
:error,
nil
)
expect(hook_log.trigger).to eq('push_hooks')
expect(hook_log.url).to eq(project_hook.url)
expect(hook_log.request_headers).to eq(headers)
expect(hook_log.response_body).to eq('')
expect(hook_log.response_status).to eq('internal error')
expect(hook_log.execution_duration).to be > 0
expect(hook_log.internal_error_message).to eq('Some HTTP Post error')
end
it 'does not increment the failure count' do
expect { run_service }.not_to change(project_hook, :recent_failures)
end
it 'backs off' do
expect { run_service }.to change(project_hook, :disabled_until)
end
it 'increases the backoff count' do
expect { run_service }.to change(project_hook, :backoff_count).by(1)
end
context 'when the previous cool-off was near the maximum' do
before do
project_hook.update!(disabled_until: 5.minutes.ago, backoff_count: 8)
end
it 'sets the disabled_until attribute' do
expect { run_service }.to change(project_hook, :disabled_until).to(1.day.from_now)
end
end
context 'when we have backed-off many many times' do
before do
project_hook.update!(disabled_until: 5.minutes.ago, backoff_count: 365)
end
it 'sets the disabled_until attribute' do
expect { run_service }.to change(project_hook, :disabled_until).to(1.day.from_now)
end
service_instance.execute
end
end
context 'with unsafe response body' do
before do
stub_full_request(project_hook.url, method: :post).to_return(status: 200, body: "\xBB")
run_service
end
it 'log successful execution' do
expect(hook_log.trigger).to eq('push_hooks')
expect(hook_log.url).to eq(project_hook.url)
expect(hook_log.request_headers).to eq(headers)
expect(hook_log.response_body).to eq('')
expect(hook_log.response_status).to eq('200')
expect(hook_log.execution_duration).to be > 0
expect(hook_log.internal_error_message).to be_nil
it 'queues LogExecutionWorker with sanitized response_body' do
expect(WebHooks::LogExecutionWorker).to receive(:perform_async)
.with(
project_hook.id,
hash_including(
trigger: 'push_hooks',
url: project_hook.url,
request_headers: headers,
request_data: data,
response_body: '',
response_headers: {},
response_status: 200,
execution_duration: be > 0,
internal_error_message: nil
),
:ok,
nil
)
service_instance.execute
end
end
end

View File

@ -0,0 +1,136 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe WebHooks::LogExecutionService do
include ExclusiveLeaseHelpers
describe '#execute' do
around do |example|
travel_to(Time.current) { example.run }
end
let_it_be_with_reload(:project_hook) { create(:project_hook) }
let(:response_category) { :ok }
let(:data) do
{
trigger: 'trigger_name',
url: 'https://example.com',
request_headers: { 'Header' => 'header value' },
request_data: { 'Request Data' => 'request data value' },
response_body: 'Response body',
response_status: '200',
execution_duration: 1.2,
internal_error_message: 'error message'
}
end
subject(:service) { described_class.new(hook: project_hook, log_data: data, response_category: response_category) }
it 'logs the data' do
expect { service.execute }.to change(::WebHookLog, :count).by(1)
expect(WebHookLog.recent.first).to have_attributes(data)
end
it 'updates failure state using a lease that ensures fresh state is written' do
service = described_class.new(hook: project_hook, log_data: data, response_category: :error)
WebHook.find(project_hook.id).update!(backoff_count: 1)
lease_key = "web_hooks:update_hook_failure_state:#{project_hook.id}"
lease = stub_exclusive_lease(lease_key, timeout: described_class::LOCK_TTL)
expect(lease).to receive(:try_obtain)
expect(lease).to receive(:cancel)
expect { service.execute }.to change { WebHook.find(project_hook.id).backoff_count }.to(2)
end
context 'when response_category is :ok' do
it 'does not increment the failure count' do
expect { service.execute }.not_to change(project_hook, :recent_failures)
end
it 'does not change the disabled_until attribute' do
expect { service.execute }.not_to change(project_hook, :disabled_until)
end
context 'when the hook had previously failed' do
before do
project_hook.update!(recent_failures: 2)
end
it 'resets the failure count' do
expect { service.execute }.to change(project_hook, :recent_failures).to(0)
end
end
end
context 'when response_category is :failed' do
let(:response_category) { :failed }
it 'increments the failure count' do
expect { service.execute }.to change(project_hook, :recent_failures).by(1)
end
it 'does not change the disabled_until attribute' do
expect { service.execute }.not_to change(project_hook, :disabled_until)
end
it 'does not allow the failure count to overflow' do
project_hook.update!(recent_failures: 32767)
expect { service.execute }.not_to change(project_hook, :recent_failures)
end
context 'when the web_hooks_disable_failed FF is disabled' do
before do
# Hook will only be executed if the flag is disabled.
stub_feature_flags(web_hooks_disable_failed: false)
end
it 'does not allow the failure count to overflow' do
project_hook.update!(recent_failures: 32767)
expect { service.execute }.not_to change(project_hook, :recent_failures)
end
end
end
context 'when response_category is :error' do
let(:response_category) { :error }
it 'does not increment the failure count' do
expect { service.execute }.not_to change(project_hook, :recent_failures)
end
it 'backs off' do
expect { service.execute }.to change(project_hook, :disabled_until)
end
it 'increases the backoff count' do
expect { service.execute }.to change(project_hook, :backoff_count).by(1)
end
context 'when the previous cool-off was near the maximum' do
before do
project_hook.update!(disabled_until: 5.minutes.ago, backoff_count: 8)
end
it 'sets the disabled_until attribute' do
expect { service.execute }.to change(project_hook, :disabled_until).to(1.day.from_now)
end
end
context 'when we have backed-off many many times' do
before do
project_hook.update!(disabled_until: 5.minutes.ago, backoff_count: 365)
end
it 'sets the disabled_until attribute' do
expect { service.execute }.to change(project_hook, :disabled_until).to(1.day.from_now)
end
end
end
end
end

View File

@ -430,13 +430,11 @@ RSpec.describe 'gitlab:db namespace rake task', :silence_stdout do
context 'with multiple databases', :reestablished_active_record_base do
before do
allow(ActiveRecord::Tasks::DatabaseTasks).to receive(:setup_initial_database_yaml).and_return([:main, :geo])
skip_if_multiple_databases_not_setup
end
describe 'db:structure:dump' do
it 'invokes gitlab:db:clean_structure_sql' do
skip unless Gitlab.ee?
expect(Rake::Task['gitlab:db:clean_structure_sql']).to receive(:invoke).twice.and_return(true)
expect { run_rake_task('db:structure:dump:main') }.not_to raise_error
@ -445,13 +443,19 @@ RSpec.describe 'gitlab:db namespace rake task', :silence_stdout do
describe 'db:schema:dump' do
it 'invokes gitlab:db:clean_structure_sql' do
skip unless Gitlab.ee?
expect(Rake::Task['gitlab:db:clean_structure_sql']).to receive(:invoke).once.and_return(true)
expect { run_rake_task('db:schema:dump:main') }.not_to raise_error
end
end
describe 'db:migrate' do
it 'invokes gitlab:db:create_dynamic_partitions' do
expect(Rake::Task['gitlab:db:create_dynamic_partitions']).to receive(:invoke).once.and_return(true)
expect { run_rake_task('db:migrate:main') }.not_to raise_error
end
end
end
describe 'gitlab:db:reset_as_non_superuser' do