Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2021-11-18 06:10:36 +00:00
parent edd6fda56d
commit e9ae9c5f2e
16 changed files with 272 additions and 219 deletions

View File

@ -3,11 +3,14 @@
module ShaAttribute
extend ActiveSupport::Concern
# Needed for the database method
include DatabaseReflection
class_methods do
def sha_attribute(name)
return if ENV['STATIC_VERIFICATION']
validate_binary_column_exists!(name) if Rails.env.development?
validate_binary_column_exists!(name) if Rails.env.development? || Rails.env.test?
attribute(name, Gitlab::Database::ShaAttribute.new)
end

View File

@ -38,9 +38,9 @@ being removed before the [experiment cleanup process](https://about.gitlab.com/h
If, as a reviewer or maintainer, you find code that would usually fail review
but is acceptable for now, mention your concerns with a note that there's no
need to change the code. The author can then add a comment to this piece of code
and link to the issue that resolves the experiment. If the experiment is
successful and becomes part of the product, any follow up issues should be
addressed.
and link to the issue that resolves the experiment. The author or reviewer can add a link to this concern in the
experiment rollout issue under the `Experiment Successful Cleanup Concerns` section of the description.
If the experiment is successful and becomes part of the product, any items that appear under this section will be addressed.
## Implementing an experiment

View File

@ -65,5 +65,7 @@ More examples of how to write a cron schedule can be found at
## How GitLab parses cron syntax strings
GitLab uses [`fugit`](https://github.com/floraison/fugit) to parse cron syntax
strings on the server and [cron-validate](https://github.com/Airfooox/cron-validate)
to validate cron syntax in the browser.
strings on the server and [cron-validator](https://github.com/TheCloudConnectors/cron-validator)
to validate cron syntax in the browser. GitLab uses
[`cRonstrue`](https://github.com/bradymholt/cRonstrue) to convert cron to human-readable strings
in the browser.

View File

@ -0,0 +1,84 @@
# frozen_string_literal: true
module Gitlab
module ProcessManagement
# The signals that should terminate both the master and workers.
TERMINATE_SIGNALS = %i(INT TERM).freeze
# The signals that should simply be forwarded to the workers.
FORWARD_SIGNALS = %i(TTIN USR1 USR2 HUP).freeze
# Traps the given signals and yields the block whenever these signals are
# received.
#
# The block is passed the name of the signal.
#
# Example:
#
# trap_signals(%i(HUP TERM)) do |signal|
# ...
# end
def self.trap_signals(signals)
signals.each do |signal|
trap(signal) do
yield signal
end
end
end
def self.trap_terminate(&block)
trap_signals(TERMINATE_SIGNALS, &block)
end
def self.trap_forward(&block)
trap_signals(FORWARD_SIGNALS, &block)
end
def self.signal(pid, signal)
Process.kill(signal, pid)
true
rescue Errno::ESRCH
false
end
def self.signal_processes(pids, signal)
pids.each { |pid| signal(pid, signal) }
end
# Waits for the given process to complete using a separate thread.
def self.wait_async(pid)
Thread.new do
Process.wait(pid) rescue Errno::ECHILD
end
end
# Returns true if all the processes are alive.
def self.all_alive?(pids)
pids.each do |pid|
return false unless process_alive?(pid)
end
true
end
def self.any_alive?(pids)
pids_alive(pids).any?
end
def self.pids_alive(pids)
pids.select { |pid| process_alive?(pid) }
end
def self.process_alive?(pid)
# Signal 0 tests whether the process exists and we have access to send signals
# but is otherwise a noop (doesn't actually send a signal to the process)
signal(pid, 0)
end
def self.write_pid(path)
File.open(path, 'w') do |handle|
handle.write(Process.pid.to_s)
end
end
end
end

View File

@ -30807,7 +30807,7 @@ msgstr ""
msgid "SecurityOrchestration|Scan execution policies can only be created by project owners."
msgstr ""
msgid "SecurityOrchestration|Scan to be performed every %{cadence} on the %{branches}"
msgid "SecurityOrchestration|Scan to be performed %{cadence} on the %{branches}"
msgstr ""
msgid "SecurityOrchestration|Scan to be performed on every pipeline on the %{branches}"

View File

@ -117,6 +117,7 @@
"copy-webpack-plugin": "^6.4.1",
"core-js": "^3.18.2",
"cron-validator": "^1.1.1",
"cronstrue": "^1.122.0",
"cropper": "^2.3.0",
"css-loader": "^2.1.1",
"d3": "^5.16.0",

View File

@ -73,6 +73,16 @@ bundle exec bin/qa Test::Instance::All http://localhost:3000
Note: If you want to run tests requiring SSH against GDK, you
will need to [modify your GDK setup](https://gitlab.com/gitlab-org/gitlab-qa/blob/master/docs/run_qa_against_gdk.md).
Note: When you log into your GDK instance of GitLab for the first time, the root password requires a change.
GitLab QA expects the default initial password to be used in tests; see all default values listed in
[Supported GitLab environment variables](https://gitlab.com/gitlab-org/gitlab-qa/-/blob/master/docs/what_tests_can_be_run.md#supported-gitlab-environment-variables).
If you have changed your root password, you must set the `GITLAB_INITIAL_ROOT_PASSWORD` environment
variable.
```
export GITLAB_INITIAL_ROOT_PASSWORD="<GDK root password>"
```
#### Running EE tests
When running EE tests you'll need to have a license available. GitLab engineers can [request a license](https://about.gitlab.com/handbook/developer-onboarding/#working-on-gitlab-ee).

View File

@ -11,6 +11,7 @@ require_relative '../lib/gitlab/utils'
require_relative '../lib/gitlab/sidekiq_config/cli_methods'
require_relative '../lib/gitlab/sidekiq_config/worker_matcher'
require_relative '../lib/gitlab/sidekiq_logging/json_formatter'
require_relative '../lib/gitlab/process_management'
require_relative 'sidekiq_cluster'
module Gitlab
@ -106,7 +107,7 @@ module Gitlab
end
def write_pid
SidekiqCluster.write_pid(@pid) if @pid
ProcessManagement.write_pid(@pid) if @pid
end
def soft_timeout_seconds
@ -123,11 +124,11 @@ module Gitlab
end
def continue_waiting?(deadline)
SidekiqCluster.any_alive?(@processes) && monotonic_time < deadline
ProcessManagement.any_alive?(@processes) && monotonic_time < deadline
end
def hard_stop_stuck_pids
SidekiqCluster.signal_processes(SidekiqCluster.pids_alive(@processes), "-KILL")
ProcessManagement.signal_processes(ProcessManagement.pids_alive(@processes), "-KILL")
end
def wait_for_termination
@ -138,14 +139,14 @@ module Gitlab
end
def trap_signals
SidekiqCluster.trap_terminate do |signal|
ProcessManagement.trap_terminate do |signal|
@alive = false
SidekiqCluster.signal_processes(@processes, signal)
ProcessManagement.signal_processes(@processes, signal)
wait_for_termination
end
SidekiqCluster.trap_forward do |signal|
SidekiqCluster.signal_processes(@processes, signal)
ProcessManagement.trap_forward do |signal|
ProcessManagement.signal_processes(@processes, signal)
end
end
@ -153,12 +154,12 @@ module Gitlab
while @alive
sleep(@interval)
unless SidekiqCluster.all_alive?(@processes)
unless ProcessManagement.all_alive?(@processes)
# If a child process died we'll just terminate the whole cluster. It's up to
# runit and such to then restart the cluster.
@logger.info('A worker terminated, shutting down the cluster')
SidekiqCluster.signal_processes(@processes, :TERM)
ProcessManagement.signal_processes(@processes, :TERM)
break
end
end

View File

@ -1,6 +1,7 @@
# frozen_string_literal: true
require_relative 'dependencies'
require_relative '../lib/gitlab/process_management'
module Gitlab
module SidekiqCluster
@ -17,49 +18,6 @@ module Gitlab
# After surpassing the soft timeout.
DEFAULT_HARD_TIMEOUT_SECONDS = 5
# The signals that should terminate both the master and workers.
TERMINATE_SIGNALS = %i(INT TERM).freeze
# The signals that should simply be forwarded to the workers.
FORWARD_SIGNALS = %i(TTIN USR1 USR2 HUP).freeze
# Traps the given signals and yields the block whenever these signals are
# received.
#
# The block is passed the name of the signal.
#
# Example:
#
# trap_signals(%i(HUP TERM)) do |signal|
# ...
# end
def self.trap_signals(signals)
signals.each do |signal|
trap(signal) do
yield signal
end
end
end
def self.trap_terminate(&block)
trap_signals(TERMINATE_SIGNALS, &block)
end
def self.trap_forward(&block)
trap_signals(FORWARD_SIGNALS, &block)
end
def self.signal(pid, signal)
Process.kill(signal, pid)
true
rescue Errno::ESRCH
false
end
def self.signal_processes(pids, signal)
pids.each { |pid| signal(pid, signal) }
end
# Starts Sidekiq workers for the pairs of processes.
#
# Example:
@ -118,7 +76,7 @@ module Gitlab
out: $stdout
)
wait_async(pid)
ProcessManagement.wait_async(pid)
pid
end
@ -144,41 +102,5 @@ module Gitlab
concurrency_from_queues.clamp(min, max)
end
# Waits for the given process to complete using a separate thread.
def self.wait_async(pid)
Thread.new do
Process.wait(pid) rescue Errno::ECHILD
end
end
# Returns true if all the processes are alive.
def self.all_alive?(pids)
pids.each do |pid|
return false unless process_alive?(pid)
end
true
end
def self.any_alive?(pids)
pids_alive(pids).any?
end
def self.pids_alive(pids)
pids.select { |pid| process_alive?(pid) }
end
def self.process_alive?(pid)
# Signal 0 tests whether the process exists and we have access to send signals
# but is otherwise a noop (doesn't actually send a signal to the process)
signal(pid, 0)
end
def self.write_pid(path)
File.open(path, 'w') do |handle|
handle.write(Process.pid.to_s)
end
end
end
end

View File

@ -246,7 +246,7 @@ RSpec.describe Gitlab::SidekiqCluster::CLI do # rubocop:disable RSpec/FilePath
describe '#write_pid' do
context 'when a PID is specified' do
it 'writes the PID to a file' do
expect(Gitlab::SidekiqCluster).to receive(:write_pid).with('/dev/null')
expect(Gitlab::ProcessManagement).to receive(:write_pid).with('/dev/null')
cli.option_parser.parse!(%w(-P /dev/null))
cli.write_pid
@ -255,7 +255,7 @@ RSpec.describe Gitlab::SidekiqCluster::CLI do # rubocop:disable RSpec/FilePath
context 'when no PID is specified' do
it 'does not write a PID' do
expect(Gitlab::SidekiqCluster).not_to receive(:write_pid)
expect(Gitlab::ProcessManagement).not_to receive(:write_pid)
cli.write_pid
end
@ -264,13 +264,13 @@ RSpec.describe Gitlab::SidekiqCluster::CLI do # rubocop:disable RSpec/FilePath
describe '#wait_for_termination' do
it 'waits for termination of all sub-processes and succeeds after 3 checks' do
expect(Gitlab::SidekiqCluster).to receive(:any_alive?)
expect(Gitlab::ProcessManagement).to receive(:any_alive?)
.with(an_instance_of(Array)).and_return(true, true, true, false)
expect(Gitlab::SidekiqCluster).to receive(:pids_alive)
expect(Gitlab::ProcessManagement).to receive(:pids_alive)
.with([]).and_return([])
expect(Gitlab::SidekiqCluster).to receive(:signal_processes)
expect(Gitlab::ProcessManagement).to receive(:signal_processes)
.with([], "-KILL")
stub_const("Gitlab::SidekiqCluster::CHECK_TERMINATE_INTERVAL_SECONDS", 0.1)
@ -292,13 +292,13 @@ RSpec.describe Gitlab::SidekiqCluster::CLI do # rubocop:disable RSpec/FilePath
.with([['foo']], default_options)
.and_return(worker_pids)
expect(Gitlab::SidekiqCluster).to receive(:any_alive?)
expect(Gitlab::ProcessManagement).to receive(:any_alive?)
.with(worker_pids).and_return(true).at_least(10).times
expect(Gitlab::SidekiqCluster).to receive(:pids_alive)
expect(Gitlab::ProcessManagement).to receive(:pids_alive)
.with(worker_pids).and_return([102])
expect(Gitlab::SidekiqCluster).to receive(:signal_processes)
expect(Gitlab::ProcessManagement).to receive(:signal_processes)
.with([102], "-KILL")
cli.run(%w(foo))
@ -313,8 +313,8 @@ RSpec.describe Gitlab::SidekiqCluster::CLI do # rubocop:disable RSpec/FilePath
describe '#trap_signals' do
it 'traps the termination and forwarding signals' do
expect(Gitlab::SidekiqCluster).to receive(:trap_terminate)
expect(Gitlab::SidekiqCluster).to receive(:trap_forward)
expect(Gitlab::ProcessManagement).to receive(:trap_terminate)
expect(Gitlab::ProcessManagement).to receive(:trap_forward)
cli.trap_signals
end
@ -324,10 +324,10 @@ RSpec.describe Gitlab::SidekiqCluster::CLI do # rubocop:disable RSpec/FilePath
it 'runs until one of the processes has been terminated' do
allow(cli).to receive(:sleep).with(a_kind_of(Numeric))
expect(Gitlab::SidekiqCluster).to receive(:all_alive?)
expect(Gitlab::ProcessManagement).to receive(:all_alive?)
.with(an_instance_of(Array)).and_return(false)
expect(Gitlab::SidekiqCluster).to receive(:signal_processes)
expect(Gitlab::ProcessManagement).to receive(:signal_processes)
.with(an_instance_of(Array), :TERM)
cli.start_loop

View File

@ -26,7 +26,5 @@ export function createMockClient(handlers = [], resolvers = {}, cacheOptions = {
export default function createMockApollo(handlers, resolvers, cacheOptions) {
const mockClient = createMockClient(handlers, resolvers, cacheOptions);
const apolloProvider = new VueApollo({ defaultClient: mockClient });
return apolloProvider;
return new VueApollo({ defaultClient: mockClient });
}

View File

@ -0,0 +1,127 @@
# frozen_string_literal: true
require_relative '../../../lib/gitlab/process_management'
RSpec.describe Gitlab::ProcessManagement do
describe '.trap_signals' do
it 'traps the given signals' do
expect(described_class).to receive(:trap).ordered.with(:INT)
expect(described_class).to receive(:trap).ordered.with(:HUP)
described_class.trap_signals(%i(INT HUP))
end
end
describe '.trap_terminate' do
it 'traps the termination signals' do
expect(described_class).to receive(:trap_signals)
.with(described_class::TERMINATE_SIGNALS)
described_class.trap_terminate { }
end
end
describe '.trap_forward' do
it 'traps the signals to forward' do
expect(described_class).to receive(:trap_signals)
.with(described_class::FORWARD_SIGNALS)
described_class.trap_forward { }
end
end
describe '.signal_processes' do
it 'sends a signal to every given process' do
expect(described_class).to receive(:signal).with(1, :INT)
described_class.signal_processes([1], :INT)
end
end
describe '.signal' do
it 'sends a signal to the given process' do
allow(Process).to receive(:kill).with(:INT, 4)
expect(described_class.signal(4, :INT)).to eq(true)
end
it 'returns false when the process does not exist' do
allow(Process).to receive(:kill).with(:INT, 4).and_raise(Errno::ESRCH)
expect(described_class.signal(4, :INT)).to eq(false)
end
end
describe '.wait_async' do
it 'waits for a process in a separate thread' do
thread = described_class.wait_async(Process.spawn('true'))
# Upon success Process.wait just returns the PID.
expect(thread.value).to be_a_kind_of(Numeric)
end
end
# In the X_alive? checks, we check negative PIDs sometimes as a simple way
# to be sure the pids are definitely for non-existent processes.
# Note that -1 is special, and sends the signal to every process we have permission
# for, so we use -2, -3 etc
describe '.all_alive?' do
it 'returns true if all processes are alive' do
processes = [Process.pid]
expect(described_class.all_alive?(processes)).to eq(true)
end
it 'returns false when a thread was not alive' do
processes = [-2]
expect(described_class.all_alive?(processes)).to eq(false)
end
end
describe '.process_alive?' do
it 'returns true if the proces is alive' do
process = Process.pid
expect(described_class.process_alive?(process)).to eq(true)
end
it 'returns false when a thread was not alive' do
process = -2
expect(described_class.process_alive?(process)).to eq(false)
end
end
describe '.pids_alive' do
it 'returns the pids that are alive, from a given array' do
pids = [Process.pid, -2]
expect(described_class.pids_alive(pids)).to match_array([Process.pid])
end
end
describe '.any_alive?' do
it 'returns true if at least one process is alive' do
processes = [Process.pid, -2]
expect(described_class.any_alive?(processes)).to eq(true)
end
it 'returns false when all threads are dead' do
processes = [-2, -3]
expect(described_class.any_alive?(processes)).to eq(false)
end
end
describe '.write_pid' do
it 'writes the PID of the current process to the given file' do
handle = double(:handle)
allow(File).to receive(:open).with('/dev/null', 'w').and_yield(handle)
expect(handle).to receive(:write).with(Process.pid.to_s)
described_class.write_pid('/dev/null')
end
end
end

View File

@ -3,7 +3,7 @@
require 'spec_helper'
RSpec.describe ShaAttribute do
let(:model) { Class.new(ApplicationRecord) { include ShaAttribute } }
let(:model) { Class.new(ActiveRecord::Base) { include ShaAttribute } }
before do
columns = [

View File

@ -5,53 +5,6 @@ require 'rspec-parameterized'
require_relative '../../sidekiq_cluster/sidekiq_cluster'
RSpec.describe Gitlab::SidekiqCluster do # rubocop:disable RSpec/FilePath
describe '.trap_signals' do
it 'traps the given signals' do
expect(described_class).to receive(:trap).ordered.with(:INT)
expect(described_class).to receive(:trap).ordered.with(:HUP)
described_class.trap_signals(%i(INT HUP))
end
end
describe '.trap_terminate' do
it 'traps the termination signals' do
expect(described_class).to receive(:trap_signals)
.with(described_class::TERMINATE_SIGNALS)
described_class.trap_terminate { }
end
end
describe '.trap_forward' do
it 'traps the signals to forward' do
expect(described_class).to receive(:trap_signals)
.with(described_class::FORWARD_SIGNALS)
described_class.trap_forward { }
end
end
describe '.signal' do
it 'sends a signal to the given process' do
allow(Process).to receive(:kill).with(:INT, 4)
expect(described_class.signal(4, :INT)).to eq(true)
end
it 'returns false when the process does not exist' do
allow(Process).to receive(:kill).with(:INT, 4).and_raise(Errno::ESRCH)
expect(described_class.signal(4, :INT)).to eq(false)
end
end
describe '.signal_processes' do
it 'sends a signal to every given process' do
expect(described_class).to receive(:signal).with(1, :INT)
described_class.signal_processes([1], :INT)
end
end
describe '.start' do
it 'starts Sidekiq with the given queues, environment and options' do
expected_options = {
@ -99,7 +52,7 @@ RSpec.describe Gitlab::SidekiqCluster do # rubocop:disable RSpec/FilePath
it 'starts a Sidekiq process' do
allow(Process).to receive(:spawn).and_return(1)
expect(described_class).to receive(:wait_async).with(1)
expect(Gitlab::ProcessManagement).to receive(:wait_async).with(1)
expect(described_class.start_sidekiq(%w(foo), **options)).to eq(1)
end
@ -109,7 +62,7 @@ RSpec.describe Gitlab::SidekiqCluster do # rubocop:disable RSpec/FilePath
.with(env, *args, anything)
.and_return(1)
expect(described_class).to receive(:wait_async).with(1)
expect(Gitlab::ProcessManagement).to receive(:wait_async).with(1)
expect(described_class.start_sidekiq(%w(foo foo bar baz), **options)).to eq(1)
end
@ -119,7 +72,7 @@ RSpec.describe Gitlab::SidekiqCluster do # rubocop:disable RSpec/FilePath
.with(anything, *args, a_hash_including(pgroup: true))
.and_return(1)
allow(described_class).to receive(:wait_async)
allow(Gitlab::ProcessManagement).to receive(:wait_async)
expect(described_class.start_sidekiq(%w(foo bar baz), **options)).to eq(1)
end
end
@ -152,57 +105,4 @@ RSpec.describe Gitlab::SidekiqCluster do # rubocop:disable RSpec/FilePath
it { expect(described_class.concurrency(queues, min, max)).to eq(expected) }
end
end
describe '.wait_async' do
it 'waits for a process in a separate thread' do
thread = described_class.wait_async(Process.spawn('true'))
# Upon success Process.wait just returns the PID.
expect(thread.value).to be_a_kind_of(Numeric)
end
end
# In the X_alive? checks, we check negative PIDs sometimes as a simple way
# to be sure the pids are definitely for non-existent processes.
# Note that -1 is special, and sends the signal to every process we have permission
# for, so we use -2, -3 etc
describe '.all_alive?' do
it 'returns true if all processes are alive' do
processes = [Process.pid]
expect(described_class.all_alive?(processes)).to eq(true)
end
it 'returns false when a thread was not alive' do
processes = [-2]
expect(described_class.all_alive?(processes)).to eq(false)
end
end
describe '.any_alive?' do
it 'returns true if at least one process is alive' do
processes = [Process.pid, -2]
expect(described_class.any_alive?(processes)).to eq(true)
end
it 'returns false when all threads are dead' do
processes = [-2, -3]
expect(described_class.any_alive?(processes)).to eq(false)
end
end
describe '.write_pid' do
it 'writes the PID of the current process to the given file' do
handle = double(:handle)
allow(File).to receive(:open).with('/dev/null', 'w').and_yield(handle)
expect(handle).to receive(:write).with(Process.pid.to_s)
described_class.write_pid('/dev/null')
end
end
end

View File

@ -49,7 +49,7 @@ module SimpleCovEnv
track_files '{app,config/initializers,config/initializers_before_autoloader,db/post_migrate,haml_lint,lib,rubocop,tooling}/**/*.rb'
add_filter '/vendor/ruby/'
add_filter '/app/controllers/sherlock/'
add_filter '/app/controllers/sherlock/' # Profiling tool used only in development
add_filter '/bin/'
add_filter 'db/fixtures/' # Matches EE files as well
add_filter '/lib/gitlab/sidekiq_middleware/'

View File

@ -3863,9 +3863,14 @@ create-require@^1.1.0:
integrity sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ==
cron-validator@^1.1.1:
version "1.1.1"
resolved "https://registry.yarnpkg.com/cron-validator/-/cron-validator-1.1.1.tgz#0a27bb75508c7bc03c8b840d2d9f170eeacb5615"
integrity sha512-vfZb05w/wezuwPZBDvdIBmJp2BvuJExHeyKRa5oBqD2ZDXR61hb3QgPc/3ZhBEQJlAy8Jlnn5XC/JCT3IDqxwg==
version "1.2.1"
resolved "https://registry.yarnpkg.com/cron-validator/-/cron-validator-1.2.1.tgz#0f0de2de36d231a6ace0e43ffc6c0564fe6edf1a"
integrity sha512-RqdpGSokGFICPc8qAkT38aXqZLLanXghQTK2q7a2x2FabSwDd2ARrazd5ElEWAXzToUcMG4cZIwDH+5RM0q1mA==
cronstrue@^1.122.0:
version "1.122.0"
resolved "https://registry.yarnpkg.com/cronstrue/-/cronstrue-1.122.0.tgz#bd6838077b476d28f61d381398b47b8c3912a126"
integrity sha512-PFuhZd+iPQQ0AWTXIEYX+t3nFGzBrWxmTWUKJOrsGRewaBSLKZ4I1f8s2kryU75nNxgyugZgiGh2OJsCTA/XlA==
cropper@^2.3.0:
version "2.3.0"