Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2022-09-16 09:11:45 +00:00
parent 2800e6ea59
commit 6f9c158ef1
16 changed files with 145 additions and 172 deletions

View File

@ -58,7 +58,7 @@ include:
download-knapsack-report:
extends:
- .bundle-base
- .rules:qa-framework-changes-or-review-scenarios
- .review:rules:review-build-cng
stage: prepare
script:
- bundle exec rake "knapsack:download[qa]"

View File

@ -27,7 +27,7 @@ export default {
numberOfLessParticipants: {
type: Number,
required: false,
default: 7,
default: 8,
},
showParticipantLabel: {
type: Boolean,
@ -123,7 +123,7 @@ export default {
:size="24"
:tooltip-text="participant.name"
:img-alt="participant.name"
css-classes="avatar-inline"
css-classes="gl-mr-0!"
tooltip-placement="bottom"
/>
</a>

View File

@ -71,7 +71,7 @@ export default {
<participants
:loading="isLoading"
:participants="participants"
:number-of-less-participants="7"
:number-of-less-participants="8"
:lazy="false"
class="block participants"
@toggleSidebar="toggleSidebar"

View File

@ -613,7 +613,7 @@
}
.participants-author {
&:nth-of-type(7n) {
&:nth-of-type(8n) {
padding-right: 0;
}

View File

@ -6,6 +6,24 @@ module Integrations
class Discord < BaseChatNotification
ATTACHMENT_REGEX = /: (?<entry>.*?)\n - (?<name>.*)\n*/.freeze
undef :notify_only_broken_pipelines
field :webhook,
section: SECTION_TYPE_CONNECTION,
placeholder: 'https://discordapp.com/api/webhooks/…',
help: 'URL to the webhook for the Discord channel.',
required: true
field :notify_only_broken_pipelines,
type: 'checkbox',
section: SECTION_TYPE_CONFIGURATION
field :branches_to_be_notified,
type: 'select',
section: SECTION_TYPE_CONFIGURATION,
title: -> { s_('Integrations|Branches for which notifications are to be sent') },
choices: -> { branch_choices }
def title
s_("DiscordService|Discord Notifications")
end
@ -18,6 +36,10 @@ module Integrations
"discord"
end
def fields
self.class.fields + build_event_channels
end
def help
docs_link = ActionController::Base.helpers.link_to _('How do I set up this service?'), Rails.application.routes.url_helpers.help_page_url('user/project/integrations/discord_notifications'), target: '_blank', rel: 'noopener noreferrer'
s_('Send notifications about project events to a Discord channel. %{docs_link}').html_safe % { docs_link: docs_link.html_safe }
@ -31,30 +53,6 @@ module Integrations
%w[push issue confidential_issue merge_request note confidential_note tag_push pipeline wiki_page]
end
def default_fields
[
{
type: 'text',
section: SECTION_TYPE_CONNECTION,
name: 'webhook',
placeholder: 'https://discordapp.com/api/webhooks/…',
help: 'URL to the webhook for the Discord channel.'
},
{
type: 'checkbox',
section: SECTION_TYPE_CONFIGURATION,
name: 'notify_only_broken_pipelines'
},
{
type: 'select',
section: SECTION_TYPE_CONFIGURATION,
name: 'branches_to_be_notified',
title: s_('Integrations|Branches for which notifications are to be sent'),
choices: self.class.branch_choices
}
]
end
def sections
[
{

View File

@ -1,8 +0,0 @@
---
name: workhorse_long_polling_publish_many
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/96751
rollout_issue_url: https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1901
milestone: '15.4'
type: development
group: group::scalability
default_enabled: false

View File

@ -212,10 +212,40 @@ GitLab Runners, Gitaly, Workhorse, KAS could use the API to receive a set of
application limits those are supposed to enforce. This will still allow us to
define all of them in a single place.
We should, however, avoid the possible negative-feedback-loop, that will put
additional strain on the Rails application when there is a sudden increase in
usage happening. This might be a big customer starting a new automation that
traverses our API or a Denial of Service attack. In such cases, the additional
traffic will reach GitLab Rails and subsequently also other satellite services.
Then the satellite services may need to consult Rails again to obtain new
instructions / policies around rate limiting the increased traffic. This can
put additional strain on Rails application and eventually degrade performance
even more. In order to avoid this problem, we should extract the API endpoints
to separate service (see the section below) if the request rate to those
endpoints depends on the volume of incoming traffic. Alternatively we can keep
those endpoints in Rails if the increased traffic will not translate into
increase of requests rate or increase in resources consumption on these API
endpoints on the Rails side.
#### Decoupled Limits Service
At some point we may decide that it is time to extract a stateful backend
responsible for storing metadata around limits, all the counters and state
required, and exposing API, out of Rails.
It is impossible to make a decision about extracting such a decoupled limits
service yet, because we will need to ship more proof-of-concept work, and
concrete iterations to inform us better about when and how we should do that. We
will depend on the Evolution Architecture practice to guide us towards either
extracting Decoupled Limits Service or not doing that at all.
As we evolve this blueprint, we will document our findings and insights about
how this service should look like, in this section of the document.
### GitLab Policy Service
_Disclaimer_: Extracting a GitLab Policy Service might be out of scope of the
current workstream organized around implementing this blueprint.
_Disclaimer_: Extracting a GitLab Policy Service might be out of scope
of the current workstream organized around implementing this blueprint.
Not all limits can be easily described in YAML. There are some more complex
policies that require a bit more sophisticated approach and a declarative
@ -257,6 +287,33 @@ require to have a globally defined rules / configuration, but this state is not
volatile in a same way a rate limiting counter may be, or a megabytes consumed
to evaluate quota limit.
#### Policies used internally and externally
The GitLab Policy Service might be used in two different ways:
1. Rails limits framework will use it as a source of policies enforced internally.
1. The policy service feature will be used as a backend to store policies defined by users.
These are two slightly different use-cases: first one is about using
internally-defined policies to ensure the stability / availably of a GitLab
instance (GitLab.com or self-managed instance). The second use-case is about
making GitLab Policy Service a feature that users will be able to build on top
of.
Both use-cases are valid but we will need to make technical decision about how
to separate them. Even if we decide to implement them both in a single service,
we will need to draw a strong boundary between the two.
The same principle might apply to Decouple Limits Service described in one of
the sections of this document above.
#### The two limits / policy services
It is possible that GitLab Policy Service and Decoupled Limits Service can
actually be the same thing. It, however, depends on the implementation details
that we can't predict yet, and the decision about merging these services
together will need to be informed by subsequent interations' feedback.
## Hierarchical limits
GitLab application aggregates users, projects, groups and namespaces in a

View File

@ -349,6 +349,22 @@ You can also filter runners by status, type, and tag. To filter:
![Attributes of a runner, with the **Search or filter results...** field active](img/index_runners_search_or_filter_v14_5.png)
#### Bulk delete runners
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/370241) in GitLab 15.4 [with a flag](../../administration/feature_flags.md) named `admin_runners_bulk_delete`. Disabled by default.
FLAG:
On self-managed GitLab, by default this feature is not available. To make it available, ask an administrator to [enable the feature flag](../../administration/feature_flags.md) named `admin_runners_bulk_delete`. On GitLab.com, this feature is not available but can be enabled by GitLab.com administrators.
You can delete multiple runners at the same time.
1. On the top bar, select **Main menu > Admin**.
1. On the left sidebar, select **Overview > Runners**.
1. To the left of the runners you want to delete, select the checkbox.
To select all of the runners on the page, select the checkbox above
the list.
1. Select **Delete selected**.
#### Runner attributes
For each runner, the following attributes are listed:

View File

@ -5,60 +5,44 @@ info: "To determine the technical writer assigned to the Stage/Group associated
type: reference
---
# GitLab Advanced Search **(PREMIUM)**
# Advanced Search **(PREMIUM)**
> Moved to GitLab Premium in 13.9.
Advanced Search uses Elasticsearch for faster, more advanced search across the entire
GitLab instance.
You can use Advanced Search for faster, more efficient search across the entire GitLab
instance. Advanced Search is based on Elasticsearch, a purpose-built full-text search
engine you can horizontally scale to get results in up to a second in most cases.
Use Advanced Search when searching in:
You can find code you want to update in all projects at once to save
maintenance time and promote innersourcing.
You can use Advanced Search in:
- Projects
- Issues
- Merge requests
- Milestones
- Users
- Epics (when searching in a group only)
- Epics (in groups only)
- Code
- Comments
- Commits
- Wiki (except [group wikis](../project/wiki/group.md))
- Project wikis (not [group wikis](../project/wiki/group.md))
Advanced Search can be useful in various scenarios:
## Configure Advanced Search
- **Faster searches:**
Advanced Search is based on Elasticsearch, which is a purpose-built full
text search engine that can be horizontally scaled so that it can provide
search results in 1-2 seconds in most cases.
- **Code Maintenance:**
Finding all the code that needs to be updated at once across an entire
instance can save time spent maintaining code.
This is especially helpful for organizations with more than 10 active projects.
This can also help build confidence is code refactoring to identify unknown impacts.
- **Promote innersourcing:**
Your company may consist of many different developer teams each of which has
their own group where the various projects are hosted. Some of your applications
may be connected to each other, so your developers need to instantly search
throughout the GitLab instance and find the code they search for.
- On GitLab.com, Advanced Search is enabled for groups with paid subscriptions.
- For self-managed GitLab instances, an administrator must
[configure Advanced Search](../../integration/advanced_search/elasticsearch.md).
## Configuring Advanced Search
## Syntax
For self-managed GitLab instances, an administrator must
[configure Advanced Search](../../integration/advanced_search/elasticsearch.md).
See [Advanced Search syntax](global_search/advanced_search_syntax.md) for more information.
On GitLab.com, Advanced Search is enabled.
## Search by ID
## Advanced Search syntax
See the documentation on [Advanced Search syntax](global_search/advanced_search_syntax.md).
## Search by issue or merge request ID
You can search a specific issue or merge request by its ID with a special prefix.
- To search by issue ID, use prefix `#` followed by issue ID. For example, [#23456](https://gitlab.com/search?snippets=&scope=issues&repository_ref=&search=%2323456&group_id=9970&project_id=278964)
- To search by merge request ID, use prefix `!` followed by merge request ID. For example [!23456](https://gitlab.com/search?snippets=&scope=merge_requests&repository_ref=&search=%2123456&group_id=9970&project_id=278964)
- To search by issue ID, use the `#` prefix followed by the issue ID (for example, [`#23456`](https://gitlab.com/search?snippets=&scope=issues&repository_ref=&search=%2323456&group_id=9970&project_id=278964)).
- To search by merge request ID, use the `!` prefix followed by the merge request ID (for example, [`!23456`](https://gitlab.com/search?snippets=&scope=merge_requests&repository_ref=&search=%2123456&group_id=9970&project_id=278964)).
## Global search scopes **(FREE SELF)**

View File

@ -12,7 +12,7 @@ module Gitlab
VERSION_FILE = 'GITLAB_WORKHORSE_VERSION'
INTERNAL_API_CONTENT_TYPE = 'application/vnd.gitlab-workhorse+json'
INTERNAL_API_REQUEST_HEADER = 'Gitlab-Workhorse-Api-Request'
NOTIFICATION_CHANNEL = 'workhorse:notifications'
NOTIFICATION_PREFIX = 'workhorse:notifications:'
ALLOWED_GIT_HTTP_ACTIONS = %w[git_receive_pack git_upload_pack info_refs].freeze
DETECT_HEADER = 'Gitlab-Workhorse-Detect-Content-Type'
ARCHIVE_FORMATS = %w(zip tar.gz tar.bz2 tar).freeze
@ -217,11 +217,7 @@ module Gitlab
Gitlab::Redis::SharedState.with do |redis|
result = redis.set(key, value, ex: expire, nx: !overwrite)
if result
redis.publish(NOTIFICATION_CHANNEL, "#{key}=#{value}")
if Feature.enabled?(:workhorse_long_polling_publish_many)
redis.publish("#{NOTIFICATION_CHANNEL}:#{key}", value)
end
redis.publish(NOTIFICATION_PREFIX + key, value)
value
else

View File

@ -64,7 +64,7 @@ exports[`Design management design index page renders design index 1`] = `
<participants-stub
class="gl-mb-4"
lazy="true"
numberoflessparticipants="7"
numberoflessparticipants="8"
participants="[object Object]"
/>
@ -195,7 +195,7 @@ exports[`Design management design index page with error GlAlert is rendered in c
<participants-stub
class="gl-mb-4"
lazy="true"
numberoflessparticipants="7"
numberoflessparticipants="8"
participants="[object Object]"
/>

View File

@ -365,26 +365,10 @@ RSpec.describe Gitlab::Workhorse do
it 'set and notify' do
expect(Gitlab::Redis::SharedState).to receive(:with).and_call_original
expect_any_instance_of(::Redis).to receive(:publish)
.with(described_class::NOTIFICATION_CHANNEL, "test-key=test-value")
expect_any_instance_of(::Redis).to receive(:publish)
.with(described_class::NOTIFICATION_CHANNEL + ':test-key', "test-value")
.with(described_class::NOTIFICATION_PREFIX + 'test-key', "test-value")
subject
end
context 'when workhorse_long_polling_publish_many is disabled' do
before do
stub_feature_flags(workhorse_long_polling_publish_many: false)
end
it 'set and notify' do
expect(Gitlab::Redis::SharedState).to receive(:with).and_call_original
expect_any_instance_of(::Redis).to receive(:publish)
.with(described_class::NOTIFICATION_CHANNEL, "test-key=test-value")
subject
end
end
end
context 'when we set a new key' do

View File

@ -23,10 +23,10 @@ RSpec.describe Integrations::Discord do
describe '#execute' do
include StubRequests
let(:user) { create(:user) }
let(:project) { create(:project, :repository) }
let(:webhook_url) { "https://example.gitlab.com/" }
let_it_be(:project) { create(:project, :repository) }
let(:user) { create(:user) }
let(:webhook_url) { "https://example.gitlab.com/" }
let(:sample_data) do
Gitlab::DataBuilder::Push.build_sample(project, user)
end

View File

@ -20,11 +20,10 @@ type KeyWatcher struct {
subscribers map[string][]chan string
shutdown chan struct{}
reconnectBackoff backoff.Backoff
channelPerKey bool // TODO remove this field https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1902
conn *redis.PubSubConn
}
func NewKeyWatcher(channelPerKey bool) *KeyWatcher {
func NewKeyWatcher() *KeyWatcher {
return &KeyWatcher{
shutdown: make(chan struct{}),
reconnectBackoff: backoff.Backoff{
@ -33,7 +32,6 @@ func NewKeyWatcher(channelPerKey bool) *KeyWatcher {
Factor: 2,
Jitter: true,
},
channelPerKey: channelPerKey,
}
}
@ -71,10 +69,7 @@ var (
)
)
const (
keySubChannel = "workhorse:notifications"
channelPrefix = keySubChannel + ":"
)
const channelPrefix = "workhorse:notifications:"
func countAction(action string) { totalActions.WithLabelValues(action).Add(1) }
@ -103,33 +98,13 @@ func (kw *KeyWatcher) receivePubSubStream(conn redis.Conn) error {
kw.subscribers = nil
}()
if kw.channelPerKey {
// Do not drink from firehose
} else {
// Do drink from firehose
if err := kw.conn.Subscribe(keySubChannel); err != nil {
return err
}
defer kw.conn.Unsubscribe(keySubChannel)
}
for {
switch v := kw.conn.Receive().(type) {
case redis.Message:
totalMessages.Inc()
dataStr := string(v.Data)
receivedBytes.Add(float64(len(dataStr)))
receivedBytes.Add(float64(len(v.Data)))
if strings.HasPrefix(v.Channel, channelPrefix) {
// v is a message on a per-key channel
kw.notifySubscribers(v.Channel[len(channelPrefix):], dataStr)
} else if v.Channel == keySubChannel {
// v is a message on the firehose channel
msg := strings.SplitN(dataStr, "=", 2)
if len(msg) != 2 {
log.WithError(fmt.Errorf("keywatcher: invalid notification: %q", dataStr)).Error()
continue
}
kw.notifySubscribers(msg[0], msg[1])
kw.notifySubscribers(v.Channel[len(channelPrefix):], string(v.Data))
}
case redis.Subscription:
redisSubscriptions.Set(float64(v.Count))
@ -220,10 +195,8 @@ func (kw *KeyWatcher) addSubscription(key string, notify chan string) error {
if len(kw.subscribers[key]) == 0 {
countAction("create-subscription")
if kw.channelPerKey {
if err := kw.conn.Subscribe(channelPrefix + key); err != nil {
return err
}
if err := kw.conn.Subscribe(channelPrefix + key); err != nil {
return err
}
}
@ -257,7 +230,7 @@ func (kw *KeyWatcher) delSubscription(key string, notify chan string) {
if len(kw.subscribers[key]) == 0 {
delete(kw.subscribers, key)
countAction("delete-subscription")
if kw.channelPerKey && kw.conn != nil {
if kw.conn != nil {
kw.conn.Unsubscribe(channelPrefix + key)
}
}

View File

@ -1,7 +1,6 @@
package redis
import (
"fmt"
"sync"
"testing"
"time"
@ -49,16 +48,10 @@ func (kw *KeyWatcher) processMessages(t *testing.T, numWatchers int, value strin
psc := redigomock.NewConn()
psc.ReceiveWait = true
if kw.channelPerKey {
channel := channelPrefix + runnerKey
psc.Command("SUBSCRIBE", channel).Expect(createSubscribeMessage(channel))
psc.Command("UNSUBSCRIBE", channel).Expect(createUnsubscribeMessage(channel))
psc.AddSubscriptionMessage(createSubscriptionMessage(channel, value))
} else {
psc.Command("SUBSCRIBE", keySubChannel).Expect(createSubscribeMessage(keySubChannel))
psc.Command("UNSUBSCRIBE", keySubChannel).Expect(createUnsubscribeMessage(keySubChannel))
psc.AddSubscriptionMessage(createSubscriptionMessage(keySubChannel, runnerKey+"="+value))
}
channel := channelPrefix + runnerKey
psc.Command("SUBSCRIBE", channel).Expect(createSubscribeMessage(channel))
psc.Command("UNSUBSCRIBE", channel).Expect(createUnsubscribeMessage(channel))
psc.AddSubscriptionMessage(createSubscriptionMessage(channel, value))
errC := make(chan error)
go func() { errC <- kw.receivePubSubStream(psc) }()
@ -89,12 +82,6 @@ type keyChangeTestCase struct {
}
func TestKeyChangesInstantReturn(t *testing.T) {
for _, v := range []bool{false, true} {
t.Run(fmt.Sprintf("channelPerKey:%v", v), func(t *testing.T) { testKeyChangesInstantReturn(t, v) })
}
}
func testKeyChangesInstantReturn(t *testing.T, channelPerKey bool) {
testCases := []keyChangeTestCase{
// WatchKeyStatusAlreadyChanged
{
@ -140,7 +127,7 @@ func testKeyChangesInstantReturn(t *testing.T, channelPerKey bool) {
conn.Command("GET", runnerKey).Expect(tc.returnValue)
}
kw := NewKeyWatcher(channelPerKey)
kw := NewKeyWatcher()
defer kw.Shutdown()
kw.conn = &redis.PubSubConn{Conn: redigomock.NewConn()}
@ -153,12 +140,6 @@ func testKeyChangesInstantReturn(t *testing.T, channelPerKey bool) {
}
func TestKeyChangesWhenWatching(t *testing.T) {
for _, v := range []bool{false, true} {
t.Run(fmt.Sprintf("channelPerKey:%v", v), func(t *testing.T) { testKeyChangesWhenWatching(t, v) })
}
}
func testKeyChangesWhenWatching(t *testing.T, channelPerKey bool) {
testCases := []keyChangeTestCase{
// WatchKeyStatusSeenChange
{
@ -196,7 +177,7 @@ func testKeyChangesWhenWatching(t *testing.T, channelPerKey bool) {
conn.Command("GET", runnerKey).Expect(tc.returnValue)
}
kw := NewKeyWatcher(channelPerKey)
kw := NewKeyWatcher()
defer kw.Shutdown()
wg := &sync.WaitGroup{}
@ -219,12 +200,6 @@ func testKeyChangesWhenWatching(t *testing.T, channelPerKey bool) {
}
func TestKeyChangesParallel(t *testing.T) {
for _, v := range []bool{false, true} {
t.Run(fmt.Sprintf("channelPerKey:%v", v), func(t *testing.T) { testKeyChangesParallel(t, v) })
}
}
func testKeyChangesParallel(t *testing.T, channelPerKey bool) {
testCases := []keyChangeTestCase{
{
desc: "massively parallel, sees change with key existing",
@ -263,7 +238,7 @@ func testKeyChangesParallel(t *testing.T, channelPerKey bool) {
wg.Add(runTimes)
ready := make(chan struct{})
kw := NewKeyWatcher(channelPerKey)
kw := NewKeyWatcher()
defer kw.Shutdown()
for i := 0; i < runTimes; i++ {
@ -287,7 +262,7 @@ func TestShutdown(t *testing.T) {
conn, td := setupMockPool()
defer td()
kw := NewKeyWatcher(false)
kw := NewKeyWatcher()
kw.conn = &redis.PubSubConn{Conn: redigomock.NewConn()}
defer kw.Shutdown()

View File

@ -220,9 +220,7 @@ func run(boot bootConfig, cfg config.Config) error {
secret.SetPath(boot.secretPath)
keyWatcher := redis.NewKeyWatcher(
os.Getenv("GITLAB_WORKHORSE_REDIS_SUBSCRIBE_MANY") == "1",
)
keyWatcher := redis.NewKeyWatcher()
if cfg.Redis != nil {
redis.Configure(cfg.Redis, redis.DefaultDialFunc)
go keyWatcher.Process()