Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2022-02-22 06:16:10 +00:00
parent e4011eb845
commit c123291db9
21 changed files with 173 additions and 86 deletions

View file

@ -245,7 +245,7 @@
- name: postgres:11.6
command: ["postgres", "-c", "fsync=off", "-c", "synchronous_commit=off", "-c", "full_page_writes=off"]
- name: redis:5.0-alpine
- name: elasticsearch:7.14.2
- name: elasticsearch:7.17.0
command: ["elasticsearch", "-E", "discovery.type=single-node"]
variables:
POSTGRES_HOST_AUTH_METHOD: trust
@ -256,7 +256,7 @@
- name: postgres:12
command: ["postgres", "-c", "fsync=off", "-c", "synchronous_commit=off", "-c", "full_page_writes=off"]
- name: redis:5.0-alpine
- name: elasticsearch:7.14.2
- name: elasticsearch:7.17.0
command: ["elasticsearch", "-E", "discovery.type=single-node"]
variables:
POSTGRES_HOST_AUTH_METHOD: trust
@ -267,7 +267,7 @@
- name: postgres:13
command: ["postgres", "-c", "fsync=off", "-c", "synchronous_commit=off", "-c", "full_page_writes=off"]
- name: redis:5.0-alpine
- name: elasticsearch:7.14.2
- name: elasticsearch:7.17.0
command: ["elasticsearch", "-E", "discovery.type=single-node"]
variables:
POSTGRES_HOST_AUTH_METHOD: trust

View file

@ -1 +1 @@
ad76c4bd99833148140d5c08b7258363e5163b9c
c1cabced28d02d667a37122860adde10068d28e2

View file

@ -1,5 +1,5 @@
<script>
import { GlTable, GlTooltipDirective, GlSkeletonLoader } from '@gitlab/ui';
import { GlTableLite, GlTooltipDirective, GlSkeletonLoader } from '@gitlab/ui';
import TooltipOnTruncate from '~/vue_shared/components/tooltip_on_truncate/tooltip_on_truncate.vue';
import { getIdFromGraphQLId } from '~/graphql_shared/utils';
import { __, s__ } from '~/locale';
@ -12,7 +12,7 @@ import RunnerTags from './runner_tags.vue';
export default {
components: {
GlTable,
GlTableLite,
GlSkeletonLoader,
TooltipOnTruncate,
TimeAgo,
@ -35,6 +35,16 @@ export default {
required: true,
},
},
computed: {
tableClass() {
// <gl-table-lite> does not provide a busy state, add
// simple support for it.
// See http://bootstrap-vue.org/docs/components/table#table-busy-state
return {
'gl-opacity-6': this.loading,
};
},
},
methods: {
formatJobCount(jobCount) {
return formatJobCount(jobCount);
@ -62,8 +72,9 @@ export default {
</script>
<template>
<div>
<gl-table
:busy="loading"
<gl-table-lite
:aria-busy="loading"
:class="tableClass"
:items="runners"
:fields="$options.fields"
:tbody-tr-attr="runnerTrAttr"
@ -72,10 +83,6 @@ export default {
primary-key="id"
fixed
>
<template v-if="!runners.length" #table-busy>
<gl-skeleton-loader v-for="i in 4" :key="i" />
</template>
<template #cell(status)="{ item }">
<runner-status-cell :runner="item" />
</template>
@ -116,6 +123,10 @@ export default {
<template #cell(actions)="{ item }">
<runner-actions-cell :runner="item" />
</template>
</gl-table>
</gl-table-lite>
<template v-if="!runners.length && loading">
<gl-skeleton-loader v-for="i in 4" :key="i" />
</template>
</div>
</template>

View file

@ -12,7 +12,10 @@ class Oauth::AuthorizationsController < Doorkeeper::AuthorizationsController
# Overridden from Doorkeeper::AuthorizationsController to
# include the call to session.delete
def new
logger.info("#{self.class.name}#new: pre_auth_params['scope'] = #{pre_auth_params['scope'].inspect}")
if pre_auth.authorizable?
logger.info("#{self.class.name}#new: pre_auth.scopes = #{pre_auth.scopes.to_a.inspect}")
if skip_authorization? || matching_token?
auth = authorization.authorize
parsed_redirect_uri = URI.parse(auth.redirect_uri)
@ -43,9 +46,15 @@ class Oauth::AuthorizationsController < Doorkeeper::AuthorizationsController
auth_type = params.delete('gl_auth_type')
return unless auth_type == 'login'
logger.info("#{self.class.name}: BEFORE application has read_user: #{application_has_read_user_scope?}")
logger.info("#{self.class.name}: BEFORE scope = #{params['scope'].inspect}")
ensure_read_user_scope!
params['scope'] = Gitlab::Auth::READ_USER_SCOPE.to_s if application_has_read_user_scope?
logger.info("#{self.class.name}: AFTER application has read_user: #{application_has_read_user_scope?}")
logger.info("#{self.class.name}: AFTER scope = #{params['scope'].inspect}")
end
# Configure the application to support read_user scope, if it already

View file

@ -40,8 +40,7 @@ class UploadsController < ApplicationController
upload_model_class.find(params[:id])
end
def authorize_access!
authorized =
def authorized?
case model
when Note
can?(current_user, :read_project, model.project)
@ -58,12 +57,12 @@ class UploadsController < ApplicationController
when Projects::Topic
true
else
permission = "read_#{model.class.underscore}".to_sym
can?(current_user, permission, model)
can?(current_user, "read_#{model.class.underscore}".to_sym, model)
end
end
render_unauthorized unless authorized
def authorize_access!
render_unauthorized unless authorized?
end
def authorize_create_access!

View file

@ -12,7 +12,7 @@ module Projects
end
def execute
topics = Projects::Topic.order_by_total_projects_count
topics = Projects::Topic.order_by_non_private_projects_count
by_search(topics)
end

View file

@ -10,9 +10,9 @@ module Resolvers
def resolve(**args)
if args[:search].present?
::Projects::Topic.search(args[:search]).order_by_total_projects_count
::Projects::Topic.search(args[:search]).order_by_non_private_projects_count
else
::Projects::Topic.order_by_total_projects_count
::Projects::Topic.order_by_non_private_projects_count
end
end
end

View file

@ -14,12 +14,12 @@ module Projects
has_many :project_topics, class_name: 'Projects::ProjectTopic'
has_many :projects, through: :project_topics
scope :order_by_total_projects_count, -> { order(total_projects_count: :desc).order(id: :asc) }
scope :order_by_non_private_projects_count, -> { order(non_private_projects_count: :desc).order(id: :asc) }
scope :reorder_by_similarity, -> (search) do
order_expression = Gitlab::Database::SimilarityScore.build_expression(search: search, rules: [
{ column: arel_table['name'] }
])
reorder(order_expression.desc, arel_table['total_projects_count'].desc, arel_table['id'])
reorder(order_expression.desc, arel_table['non_private_projects_count'].desc, arel_table['id'])
end
class << self

View file

@ -5,3 +5,5 @@ module AlertManagement
delegate { @subject.project }
end
end
AlertManagement::AlertPolicy.prepend_mod

View file

@ -0,0 +1,15 @@
# frozen_string_literal: true
class CleanupPopulateTopicsNonPrivateProjectsCount < Gitlab::Database::Migration[1.0]
MIGRATION = 'PopulateTopicsNonPrivateProjectsCount'
disable_ddl_transaction!
def up
finalize_background_migration(MIGRATION)
end
def down
# no-op
end
end

View file

@ -0,0 +1 @@
644d38e401ac8179777cb9d3c5fefca2fb55e0c409197bb2d222f7e96e5dd42f

View file

@ -13,14 +13,14 @@ To enable the Atlassian OmniAuth provider for passwordless authentication you mu
1. Go to <https://developer.atlassian.com/console/myapps/> and sign-in with the Atlassian
account to administer the application.
1. Click **Create a new app**.
1. Choose an App Name, such as 'GitLab', and click **Create**.
1. Select **Create a new app**.
1. Choose an App Name, such as 'GitLab', and select **Create**.
1. Note the `Client ID` and `Secret` for the [GitLab configuration](#gitlab-configuration) steps.
1. On the left sidebar under **APIS AND FEATURES**, click **OAuth 2.0 (3LO)**.
1. Enter the GitLab callback URL using the format `https://gitlab.example.com/users/auth/atlassian_oauth2/callback` and click **Save changes**.
1. Click **+ Add** in the left sidebar under **APIS AND FEATURES**.
1. Click **Add** for **Jira platform REST API** and then **Configure**.
1. Click **Add** next to the following scopes:
1. On the left sidebar under **APIS AND FEATURES**, select **OAuth 2.0 (3LO)**.
1. Enter the GitLab callback URL using the format `https://gitlab.example.com/users/auth/atlassian_oauth2/callback` and select **Save changes**.
1. Select **+ Add** in the left sidebar under **APIS AND FEATURES**.
1. Select **Add** for **Jira platform REST API** and then **Configure**.
1. Select **Add** next to the following scopes:
- **View Jira issue data**
- **View user profiles**
- **Create and manage issues**
@ -73,6 +73,6 @@ To enable the Atlassian OmniAuth provider for passwordless authentication you mu
1. Save the configuration file.
1. [Reconfigure](../restart_gitlab.md#omnibus-gitlab-reconfigure) or [restart GitLab](../restart_gitlab.md#installations-from-source) for the changes to take effect if you installed GitLab via Omnibus or from source respectively.
On the sign-in page there should now be an Atlassian icon below the regular sign in form. Click the icon to begin the authentication process.
On the sign-in page there should now be an Atlassian icon below the regular sign in form. Select the icon to begin the authentication process.
If everything goes right, the user is signed in to GitLab using their Atlassian credentials.

View file

@ -89,14 +89,14 @@ established but GitLab doesn't show you LDAP users in the output, one of the
following is most likely true:
- The `bind_dn` user doesn't have enough permissions to traverse the user tree.
- The user(s) don't fall under the [configured `base`](index.md#configure-ldap).
- The [configured `user_filter`](index.md#set-up-ldap-user-filter) blocks access to the user(s).
- The users don't fall under the [configured `base`](index.md#configure-ldap).
- The [configured `user_filter`](index.md#set-up-ldap-user-filter) blocks access to the users.
In this case, you con confirm which of the above is true using
[ldapsearch](#ldapsearch) with the existing LDAP configuration in your
`/etc/gitlab/gitlab.rb`.
#### User(s) cannot sign-in
#### Users cannot sign-in
A user can have trouble signing in for any number of reasons. To get started,
here are some questions to ask yourself:
@ -284,7 +284,7 @@ If you don't find a particular user's GitLab email in the output, then that
user hasn't signed in with LDAP yet.
Next, GitLab searches its `identities` table for the existing
link between this user and the configured LDAP provider(s):
link between this user and the configured LDAP providers:
```sql
Identity Load (0.9ms) SELECT "identities".* FROM "identities" WHERE "identities"."user_id" = 20 AND (provider LIKE 'ldap%') LIMIT 1
@ -334,7 +334,7 @@ Gitlab::Auth::Ldap::Person.find_by_uid('<uid>', adapter)
### Group memberships **(PREMIUM SELF)**
#### Membership(s) not granted
#### Memberships not granted
Sometimes you may think a particular user should be added to a GitLab group via
LDAP group sync, but for some reason it's not happening. You can check several
@ -348,7 +348,7 @@ things to debug the situation.
1. On the top bar, select **Menu > Admin**.
1. On the left sidebar, select **Overview > Users**.
1. Search for the user.
1. Open the user by clicking their name. Do not click **Edit**.
1. Open the user by selecting their name. Do not select **Edit**.
1. Select the **Identities** tab. There should be an LDAP identity with
an LDAP DN as the 'Identifier'. If not, this user hasn't signed in with
LDAP yet and must do so first.
@ -558,7 +558,7 @@ aren't blocked or unable to access their accounts.
NOTE:
The following script requires that any new accounts with the new
email address are removed first. This is because emails have to be unique in GitLab.
email address are removed first. Email addresses must be unique in GitLab.
Go to the [rails console](#rails-console) and then run:

View file

@ -13,7 +13,7 @@ It is used by GitLab to read and write Git data.
Gitaly is present in every GitLab installation and coordinates Git repository
storage and retrieval. Gitaly can be:
- A simple background service operating on a single instance Omnibus GitLab (all of
- A background service operating on a single instance Omnibus GitLab (all of
GitLab on one machine).
- Separated onto its own instance and configured in a full cluster configuration,
depending on scaling and availability requirements.
@ -71,7 +71,7 @@ the current status of these issues, please refer to the referenced issues and ep
| Gitaly Cluster + Geo - Issues retrying failed syncs | If Gitaly Cluster is used on a Geo secondary site, repositories that have failed to sync could continue to fail when Geo tries to resync them. Recovering from this state requires assistance from support to run manual steps. Work is in-progress to update Gitaly Cluster to [identify repositories with a unique and persistent identifier](https://gitlab.com/gitlab-org/gitaly/-/issues/3485), which is expected to resolve the issue. | No known solution at this time. |
| Database inconsistencies due to repository access outside of Gitaly Cluster's control | Operations that write to the repository storage that do not go through normal Gitaly Cluster methods can cause database inconsistencies. These can include (but are not limited to) snapshot restoration for cluster node disks, node upgrades which modify files under Git control, or any other disk operation that may touch repository storage external to GitLab. The Gitaly team is actively working to provide manual commands to [reconcile the Praefect database with the repository storage](https://gitlab.com/groups/gitlab-org/-/epics/6723). | Don't directly change repositories on any Gitaly Cluster node at this time. |
| Praefect unable to insert data into the database due to migrations not being applied after an upgrade | If the database is not kept up to date with completed migrations, then the Praefect node is unable to perform normal operation. | Make sure the Praefect database is up and running with all migrations completed (For example: `/opt/gitlab/embedded/bin/praefect -config /var/opt/gitlab/praefect/config.toml sql-migrate-status` should show a list of all applied migrations). Consider [requesting live upgrade assistance](https://about.gitlab.com/support/scheduling-live-upgrade-assistance.html) so your upgrade plan can be reviewed by support. |
| Restoring a Gitaly Cluster node from a snapshot in a running cluster | Because the Gitaly Cluster runs with consistent state, introducing a single node that is behind will result in the cluster not being able to reconcile the nodes data and other nodes data | Don't restore a single Gitaly Cluster node from a backup snapshot. If you need to restore from backup, it's best to snapshot all Gitaly Cluster nodes at the same time and take a database dump of the Praefect database. |
| Restoring a Gitaly Cluster node from a snapshot in a running cluster | Because the Gitaly Cluster runs with consistent state, introducing a single node that is behind will result in the cluster not being able to reconcile the nodes data and other nodes data | Don't restore a single Gitaly Cluster node from a backup snapshot. If you must restore from backup, it's best to snapshot all Gitaly Cluster nodes at the same time and take a database dump of the Praefect database. |
### Snapshot backup and recovery limitations
@ -179,7 +179,7 @@ In this example:
- Repositories are stored on a virtual storage called `storage-1`.
- Three Gitaly nodes provide `storage-1` access: `gitaly-1`, `gitaly-2`, and `gitaly-3`.
- The three Gitaly nodes share data in three separate hashed storage locations.
- The [replication factor](#replication-factor) is `3`. There are three copies maintained
- The [replication factor](#replication-factor) is `3`. Three copies are maintained
of each repository.
The availability objectives for Gitaly clusters assuming a single node failure are:
@ -237,7 +237,7 @@ Engineering support for NFS for Git repositories is deprecated. Technical suppor
is not well suited to Git workloads which are CPU and IOPS sensitive.
Specifically:
- Git is sensitive to file system latency. Even simple operations require many
- Git is sensitive to file system latency. Some operations require many
read operations. Operations that are fast on block storage can become an order of
magnitude slower. This significantly impacts GitLab application performance.
- NFS performance optimizations that prevent the performance gap between
@ -386,8 +386,7 @@ them back to direct Gitaly storage:
1. Create and configure a new
[Gitaly server](configure_gitaly.md#run-gitaly-on-its-own-server).
1. [Move the repositories](../operations/moving_repositories.md#move-repositories)
to the newly created storage. There are different possibilities to move them
by shard or by group, this gives you the opportunity to spread them over
to the newly created storage. You can move them by shard or by group, which gives you the opportunity to spread them over
multiple Gitaly servers.
## Monitor Gitaly and Gitaly Cluster
@ -509,7 +508,7 @@ The following metrics are available from the `/metrics` endpoint:
They reflect configuration defined for this instance of Praefect.
- `gitaly_praefect_replication_latency_bucket`, a histogram measuring the amount of time it takes
for replication to complete once the replication job starts. Available in GitLab 12.10 and later.
for replication to complete after the replication job starts. Available in GitLab 12.10 and later.
- `gitaly_praefect_replication_delay_bucket`, a histogram measuring how much time passes between
when the replication job is created and when it starts. Available in GitLab 12.10 and later.
- `gitaly_praefect_node_latency_bucket`, a histogram measuring the latency in Gitaly returning
@ -542,7 +541,7 @@ You can also monitor the [Praefect logs](../logs.md#praefect-logs).
The following metrics are available from the `/db_metrics` endpoint:
- `gitaly_praefect_unavailable_repositories`, the number of repositories that have no healthy, up to date replicas.
- `gitaly_praefect_read_only_repositories`, the number of repositories in read-only mode within a virtual storage.
- `gitaly_praefect_read_only_repositories`, the number of repositories in read-only mode in a virtual storage.
This metric is available for backwards compatibility reasons. `gitaly_praefect_unavailable_repositories` is more
accurate.
- `gitaly_praefect_replication_queue_depth`, the number of jobs in the replication queue.

View file

@ -0,0 +1,50 @@
---
stage: Monitor
group: Respond
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
---
# Alert Management Alerts API **(FREE)**
This is the documentation of Alert Management Alerts API.
NOTE:
This API is limited to metric images. For more API endpoints please refer to the [GraphQL API](graphql/reference/index.md#alertmanagementalert).
## List metric images
```plaintext
GET /projects/:id/alert_management_alerts/:alert_iid/metric_images
```
| Attribute | Type | Required | Description |
|-------------|---------|----------|--------------------------------------|
| `id` | integer/string | yes | The ID or [URL-encoded path of the project](index.md#namespaced-path-encoding) owned by the authenticated user |
| `alert_iid` | integer | yes | The internal ID of a project's alert |
```shell
curl --header "PRIVATE-TOKEN: <your_access_token>" "https://gitlab.example.com/api/v4/projects/5/alert_management_alerts/93/metric_images"
```
Example response:
```json
[
{
"id": 17,
"created_at": "2020-11-12T20:07:58.156Z",
"filename": "sample_2054",
"file_path": "/uploads/-/system/alert_metric_image/file/17/sample_2054.png",
"url": "example.com/metric",
"url_text": "An example metric"
},
{
"id": 18,
"created_at": "2020-11-12T20:14:26.441Z",
"filename": "sample_2054",
"file_path": "/uploads/-/system/alert_metric_image/file/18/sample_2054.png",
"url": "example.com/metric",
"url_text": "An example metric"
}
]
```

View file

@ -62,7 +62,7 @@ To purge files from a GitLab repository:
git clone --bare --mirror /path/to/project.bundle
```
1. Navigate to the `project.git` directory:
1. Go to the `project.git` directory:
```shell
cd project.git
@ -159,7 +159,7 @@ operations before continuing.
To clean up a repository:
1. Go to the project for the repository.
1. Navigate to **Settings > Repository**.
1. Go to **Settings > Repository**.
1. Upload a list of objects. For example, a `commit-map` file created by `git filter-repo` which is located in the
`filter-repo` directory.
@ -169,7 +169,7 @@ To clean up a repository:
split -l 3000 filter-repo/commit-map filter-repo/commit-map-
```
1. Click **Start cleanup**.
1. Select **Start cleanup**.
This:
@ -240,7 +240,7 @@ Until `git gc` runs on the GitLab side, the "removed" commits and blobs still ex
must be able to push the rewritten history to GitLab, which may be impossible if you've already
exceeded the maximum size limit.
In order to lift these restrictions, the administrator of the self-managed GitLab instance must
To lift these restrictions, the Administrator of the self-managed GitLab instance must
increase the limit on the particular project that exceeded it. Therefore, it's always better to
proactively stay underneath the limit. If you hit the limit, and can't have it temporarily
increased, your only option is to:

View file

@ -9,9 +9,9 @@ RSpec.describe Projects::TopicsFinder do
let!(:topic2) { create(:topic, name: 'topicC') }
let!(:topic3) { create(:topic, name: 'topicA') }
let!(:project1) { create(:project, namespace: user.namespace, topic_list: 'topicC, topicA, topicB') }
let!(:project2) { create(:project, namespace: user.namespace, topic_list: 'topicC, topicA') }
let!(:project3) { create(:project, namespace: user.namespace, topic_list: 'topicC') }
let!(:project1) { create(:project, :public, namespace: user.namespace, topic_list: 'topicC, topicA, topicB') }
let!(:project2) { create(:project, :public, namespace: user.namespace, topic_list: 'topicC, topicA') }
let!(:project3) { create(:project, :public, namespace: user.namespace, topic_list: 'topicC') }
describe '#execute' do
it 'returns topics' do

View file

@ -1,4 +1,4 @@
import { GlTable, GlSkeletonLoader } from '@gitlab/ui';
import { GlTableLite, GlSkeletonLoader } from '@gitlab/ui';
import {
extendedWrapper,
shallowMountExtended,
@ -18,7 +18,7 @@ describe('RunnerList', () => {
let wrapper;
const findSkeletonLoader = () => wrapper.findComponent(GlSkeletonLoader);
const findTable = () => wrapper.findComponent(GlTable);
const findTable = () => wrapper.findComponent(GlTableLite);
const findHeaders = () => wrapper.findAll('th');
const findRows = () => wrapper.findAll('[data-testid^="runner-row-"]');
const findCell = ({ row = 0, fieldKey }) =>
@ -144,7 +144,8 @@ describe('RunnerList', () => {
describe('When data is loading', () => {
it('shows a busy state', () => {
createComponent({ props: { runners: [], loading: true } });
expect(findTable().attributes('busy')).toBeTruthy();
expect(findTable().classes('gl-opacity-6')).toBe(true);
});
it('when there are no runners, shows an skeleton loader', () => {

View file

@ -6,9 +6,9 @@ RSpec.describe Resolvers::TopicsResolver do
include GraphqlHelpers
describe '#resolve' do
let!(:topic1) { create(:topic, name: 'GitLab', total_projects_count: 1) }
let!(:topic2) { create(:topic, name: 'git', total_projects_count: 2) }
let!(:topic3) { create(:topic, name: 'topic3', total_projects_count: 3) }
let!(:topic1) { create(:topic, name: 'GitLab', non_private_projects_count: 1) }
let!(:topic2) { create(:topic, name: 'git', non_private_projects_count: 2) }
let!(:topic3) { create(:topic, name: 'topic3', non_private_projects_count: 3) }
it 'finds all topics' do
expect(resolve_topics).to eq([topic3, topic2, topic1])

View file

@ -28,16 +28,16 @@ RSpec.describe Projects::Topic do
end
describe 'scopes' do
describe 'order_by_total_projects_count' do
describe 'order_by_non_private_projects_count' do
let!(:topic1) { create(:topic, name: 'topicB') }
let!(:topic2) { create(:topic, name: 'topicC') }
let!(:topic3) { create(:topic, name: 'topicA') }
let!(:project1) { create(:project, topic_list: 'topicC, topicA, topicB') }
let!(:project2) { create(:project, topic_list: 'topicC, topicA') }
let!(:project3) { create(:project, topic_list: 'topicC') }
let!(:project1) { create(:project, :public, topic_list: 'topicC, topicA, topicB') }
let!(:project2) { create(:project, :public, topic_list: 'topicC, topicA') }
let!(:project3) { create(:project, :public, topic_list: 'topicC') }
it 'sorts topics by total_projects_count' do
topics = described_class.order_by_total_projects_count
it 'sorts topics by non_private_projects_count' do
topics = described_class.order_by_non_private_projects_count
expect(topics.map(&:name)).to eq(%w[topicC topicA topicB topic])
end

View file

@ -7,9 +7,9 @@ RSpec.describe API::Topics do
let_it_be(:file) { fixture_file_upload('spec/fixtures/dk.png') }
let_it_be(:topic_1) { create(:topic, name: 'Git', total_projects_count: 1, avatar: file) }
let_it_be(:topic_2) { create(:topic, name: 'GitLab', total_projects_count: 2) }
let_it_be(:topic_3) { create(:topic, name: 'other-topic', total_projects_count: 3) }
let_it_be(:topic_1) { create(:topic, name: 'Git', total_projects_count: 1, non_private_projects_count: 1, avatar: file) }
let_it_be(:topic_2) { create(:topic, name: 'GitLab', total_projects_count: 2, non_private_projects_count: 2) }
let_it_be(:topic_3) { create(:topic, name: 'other-topic', total_projects_count: 3, non_private_projects_count: 3) }
let_it_be(:admin) { create(:user, :admin) }
let_it_be(:user) { create(:user) }