Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2022-06-09 09:09:12 +00:00
parent 3607cd931c
commit 5f2a8d5813
26 changed files with 241 additions and 82 deletions

View File

@ -29,8 +29,6 @@ start-review-app-pipeline:
needs:
- job: build-assets-image
artifacts: false
- job: build-qa-image
artifacts: false
# These variables are set in the pipeline schedules.
# They need to be explicitly passed on to the child pipeline.
# https://docs.gitlab.com/ee/ci/pipelines/multi_project_pipelines.html#pass-cicd-variables-to-a-downstream-pipeline-by-using-the-variables-keyword

View File

@ -13,7 +13,6 @@ class Route < ApplicationRecord
presence: true,
uniqueness: { case_sensitive: false }
before_validation :delete_conflicting_orphaned_routes
after_create :delete_conflicting_redirects
after_update :delete_conflicting_redirects, if: :saved_change_to_path?
after_update :create_redirect_for_old_path
@ -71,13 +70,4 @@ class Route < ApplicationRecord
def create_redirect_for_old_path
create_redirect(path_before_last_save) if saved_change_to_path?
end
def delete_conflicting_orphaned_routes
conflicting = self.class.iwhere(path: path)
conflicting_orphaned_routes = conflicting.select do |route|
route.source.nil?
end
conflicting_orphaned_routes.each(&:destroy)
end
end

View File

@ -6,6 +6,19 @@ module Repositories
DEFAULT_TRAILER = 'Changelog'
DEFAULT_FILE = 'CHANGELOG.md'
# The maximum number of commits allowed to fetch in `from` and `to` range.
#
# This value is arbitrarily chosen. Increasing it means more Gitaly calls
# and more presure on Gitaly services.
#
# This number is 3x of the average number of commits per GitLab releases.
# Some examples for GitLab's own releases:
#
# * 13.6.0: 4636 commits
# * 13.5.0: 5912 commits
# * 13.4.0: 5541 commits
COMMITS_LIMIT = 15_000
# The `project` specifies the `Project` to generate the changelog section
# for.
#
@ -75,6 +88,8 @@ module Repositories
commits =
ChangelogCommitsFinder.new(project: @project, from: from, to: @to)
verify_commit_range!(from, @to)
commits.each_page(@trailer) do |page|
mrs = mrs_finder.execute(page)
@ -120,5 +135,15 @@ module Repositories
'could be found to use instead'
)
end
def verify_commit_range!(from, to)
return unless Feature.enabled?(:changelog_commits_limitation, @project)
_, commits_count = @project.repository.diverging_commit_count(from, to)
if commits_count > COMMITS_LIMIT
raise Gitlab::Changelog::Error, "The commits range exceeds #{COMMITS_LIMIT} elements."
end
end
end
end

View File

@ -74,7 +74,7 @@
- if phabricator_import_enabled?
%div
= link_to new_import_phabricator_path, class: 'gl-button btn-default btn import_phabricator js-import-project-btn', data: { platform: 'phabricator', track_label: "#{track_label}", track_action: "click_button", track_property: "phabricator" } do
= link_to new_import_phabricator_path(namespace_id: namespace_id), class: 'gl-button btn-default btn import_phabricator js-import-project-btn', data: { platform: 'phabricator', track_label: "#{track_label}", track_action: "click_button", track_property: "phabricator" } do
.gl-button-icon
= custom_icon('issues')
= _("Phabricator Tasks")

View File

@ -0,0 +1,8 @@
---
name: changelog_commits_limitation
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/89032
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/364101
milestone: '15.1'
type: development
group: group::source code
default_enabled: false

View File

@ -0,0 +1,13 @@
# frozen_string_literal: true
class PrepareConfidentialNoteIndexOnId < Gitlab::Database::Migration[2.0]
INDEX_NAME = 'index_notes_on_id_where_confidential'
def up
prepare_async_index :notes, :id, where: 'confidential = true', name: INDEX_NAME
end
def down
unprepare_async_index :notes, :id, name: INDEX_NAME
end
end

View File

@ -0,0 +1,41 @@
# frozen_string_literal: true
class SetOnDeleteCascadeOnNamespaceIdOnRoutesTable < Gitlab::Database::Migration[2.0]
disable_ddl_transaction!
TARGET_COLUMN = :namespace_id
def up
# add the new FK before removing the old one
add_concurrent_foreign_key(
:routes,
:namespaces,
column: TARGET_COLUMN,
name: fk_name("#{TARGET_COLUMN}_new"),
on_delete: :cascade
)
with_lock_retries do
remove_foreign_key_if_exists(:routes, column: TARGET_COLUMN, name: fk_name(TARGET_COLUMN))
end
end
def down
add_concurrent_foreign_key(
:routes,
:namespaces,
column: TARGET_COLUMN,
name: fk_name(TARGET_COLUMN),
on_delete: :nullify
)
with_lock_retries do
remove_foreign_key_if_exists(:routes, column: TARGET_COLUMN, name: fk_name("#{TARGET_COLUMN}_new"))
end
end
def fk_name(column_name)
# generate a FK name
concurrent_foreign_key_name(:routes, column_name)
end
end

View File

@ -0,0 +1,13 @@
# frozen_string_literal: true
class AddNotNullConstraintOnRoutesNamespaceId < Gitlab::Database::Migration[2.0]
disable_ddl_transaction!
def up
add_not_null_constraint :routes, :namespace_id, validate: false
end
def down
remove_not_null_constraint :routes, :namespace_id
end
end

View File

@ -0,0 +1 @@
3c0e1b1bdf658a1335ecd61e5409d428d1ff1827a9f13a9dbb8df7757a899b59

View File

@ -0,0 +1 @@
86abe66430f55f57cd528af90bbc364d75466ea44f8016c54b9734d123ae69a4

View File

@ -0,0 +1 @@
282fb55b257baa432f9e7aa97901ef61c58fd8e5dee2e687b21af54db9d37d03

View File

@ -24281,6 +24281,9 @@ ALTER TABLE ONLY chat_teams
ALTER TABLE vulnerability_scanners
ADD CONSTRAINT check_37608c9db5 CHECK ((char_length(vendor) <= 255)) NOT VALID;
ALTER TABLE routes
ADD CONSTRAINT check_af84c6c93f CHECK ((namespace_id IS NOT NULL)) NOT VALID;
ALTER TABLE sprints
ADD CONSTRAINT check_ccd8a1eae0 CHECK ((start_date IS NOT NULL)) NOT VALID;
@ -31634,9 +31637,6 @@ ALTER TABLE ONLY merge_requests
ALTER TABLE ONLY ci_builds
ADD CONSTRAINT fk_6661f4f0e8 FOREIGN KEY (resource_group_id) REFERENCES ci_resource_groups(id) ON DELETE SET NULL;
ALTER TABLE ONLY routes
ADD CONSTRAINT fk_679ff8213d FOREIGN KEY (namespace_id) REFERENCES namespaces(id) ON DELETE SET NULL;
ALTER TABLE ONLY application_settings
ADD CONSTRAINT fk_693b8795e4 FOREIGN KEY (push_rule_id) REFERENCES push_rules(id) ON DELETE SET NULL;
@ -31940,6 +31940,9 @@ ALTER TABLE ONLY customer_relations_contacts
ALTER TABLE ONLY deployments
ADD CONSTRAINT fk_b9a3851b82 FOREIGN KEY (project_id) REFERENCES projects(id) ON DELETE CASCADE;
ALTER TABLE ONLY routes
ADD CONSTRAINT fk_bb2e5b8968 FOREIGN KEY (namespace_id) REFERENCES namespaces(id) ON DELETE CASCADE;
ALTER TABLE ONLY gitlab_subscriptions
ADD CONSTRAINT fk_bd0c4019c3 FOREIGN KEY (hosted_plan_id) REFERENCES plans(id) ON DELETE CASCADE;

View File

@ -161,7 +161,7 @@ sudo gitlab-ctl restart consul
### Consul nodes unable to communicate
By default, Consul attempts to
[bind](https://www.consul.io/docs/agent/options#_bind) to `0.0.0.0`, but
[bind](https://www.consul.io/docs/agent/config/config-files#bind_addr) to `0.0.0.0`, but
it advertises the first private IP address on the node for other Consul nodes
to communicate with it. If the other nodes cannot communicate with a node on
this address, then the cluster has a failed status.

View File

@ -342,7 +342,7 @@ with the **secondary** site:
1. Promote the replica database associated with the **secondary** site. This
sets the database to read-write. The instructions vary depending on where your database is hosted:
- [Amazon RDS](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_ReadRepl.html#USER_ReadRepl.Promote)
- [Azure PostgreSQL](https://docs.microsoft.com/en-us/azure/postgresql/howto-read-replicas-portal#stop-replication)
- [Azure PostgreSQL](https://docs.microsoft.com/en-us/azure/postgresql/single-server/how-to-read-replicas-portal#stop-replication)
- [Google Cloud SQL](https://cloud.google.com/sql/docs/mysql/replication/manage-replicas#promote-replica)
- For other external PostgreSQL databases, save the following script in your
secondary site, for example `/tmp/geo_promote.sh`, and modify the connection

View File

@ -71,7 +71,7 @@ The following instructions detail how to create a read-only replica for common
cloud providers:
- Amazon RDS - [Creating a Read Replica](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_ReadRepl.html#USER_ReadRepl.Create)
- Azure Database for PostgreSQL - [Create and manage read replicas in Azure Database for PostgreSQL](https://docs.microsoft.com/en-us/azure/postgresql/howto-read-replicas-portal)
- Azure Database for PostgreSQL - [Create and manage read replicas in Azure Database for PostgreSQL](https://docs.microsoft.com/en-us/azure/postgresql/single-server/how-to-read-replicas-portal)
- Google Cloud SQL - [Creating read replicas](https://cloud.google.com/sql/docs/postgres/replication/create-replica)
Once your read-only replica is set up, you can skip to [configure your secondary application node](#configure-secondary-application-nodes-to-use-the-external-read-replica).
@ -190,7 +190,7 @@ to grant additional roles to your tracking database user (by default, this is
`gitlab_geo`):
- Amazon RDS requires the [`rds_superuser`](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Appendix.PostgreSQL.CommonDBATasks.html#Appendix.PostgreSQL.CommonDBATasks.Roles) role.
- Azure Database for PostgreSQL requires the [`azure_pg_admin`](https://docs.microsoft.com/en-us/azure/postgresql/howto-create-users#how-to-create-additional-admin-users-in-azure-database-for-postgresql) role.
- Azure Database for PostgreSQL requires the [`azure_pg_admin`](https://docs.microsoft.com/en-us/azure/postgresql/single-server/how-to-create-users#how-to-create-additional-admin-users-in-azure-database-for-postgresql) role.
- Google Cloud SQL requires the [`cloudsqlsuperuser`](https://cloud.google.com/sql/docs/postgres/users#default-users) role.
This is for the installation of extensions during installation and upgrades. As an alternative,

View File

@ -71,7 +71,7 @@ the current status of these issues, please refer to the referenced issues and ep
| Issue | Summary | How to avoid |
|:--------------------------------------------------------------------------------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:--------------------------------|
| Gitaly Cluster + Geo - Issues retrying failed syncs | If Gitaly Cluster is used on a Geo secondary site, repositories that have failed to sync could continue to fail when Geo tries to resync them. Recovering from this state requires assistance from support to run manual steps. Work is in-progress to update Gitaly Cluster to [identify repositories with a unique and persistent identifier](https://gitlab.com/gitlab-org/gitaly/-/issues/3485), which is expected to resolve the issue. | No known solution at this time. |
| Praefect unable to insert data into the database due to migrations not being applied after an upgrade | If the database is not kept up to date with completed migrations, then the Praefect node is unable to perform normal operation. | Make sure the Praefect database is up and running with all migrations completed (For example: `/opt/gitlab/embedded/bin/praefect -config /var/opt/gitlab/praefect/config.toml sql-migrate-status` should show a list of all applied migrations). Consider [requesting live upgrade assistance](https://about.gitlab.com/support/scheduling-upgrade-assistance.html) so your upgrade plan can be reviewed by support. |
| Praefect unable to insert data into the database due to migrations not being applied after an upgrade | If the database is not kept up to date with completed migrations, then the Praefect node is unable to perform normal operation. | Make sure the Praefect database is up and running with all migrations completed (For example: `/opt/gitlab/embedded/bin/praefect -config /var/opt/gitlab/praefect/config.toml sql-migrate-status` should show a list of all applied migrations). Consider [requesting live upgrade assistance](https://about.gitlab.com/support/scheduling-upgrade-assistance/) so your upgrade plan can be reviewed by support. |
| Restoring a Gitaly Cluster node from a snapshot in a running cluster | Because the Gitaly Cluster runs with consistent state, introducing a single node that is behind will result in the cluster not being able to reconcile the nodes data and other nodes data | Don't restore a single Gitaly Cluster node from a backup snapshot. If you must restore from backup, it's best to snapshot all Gitaly Cluster nodes at the same time and take a database dump of the Praefect database. |
### Snapshot backup and recovery limitations
@ -389,7 +389,7 @@ relative path of the repository in the metadata store.
### Moving beyond NFS
Engineering support for NFS for Git repositories is deprecated. Technical support is planned to be unavailable starting
November 22, 2022. Please see our [statement of support](https://about.gitlab.com/support/statement-of-support.html#gitaly-and-nfs)
November 22, 2022. Please see our [statement of support](https://about.gitlab.com/support/statement-of-support/#gitaly-and-nfs)
for more details.
[Network File System (NFS)](https://en.wikipedia.org/wiki/Network_File_System)

View File

@ -239,7 +239,7 @@ who are aware of the risks.
- Related links:
- [GitLab Developer Documentation](../development/index.md)
- [Repairing and recovering broken Git repositories](https://git.seveas.net/repairing-and-recovering-broken-git-repositories.html)
- [Testing with OpenSSL](https://www.feistyduck.com/library/openssl-cookbook/online/ch-testing-with-openssl.html)
- [Testing with OpenSSL](https://www.feistyduck.com/library/openssl-cookbook/online/testing-with-openssl/index.html)
- [`strace` zine](https://wizardzines.com/zines/strace/)
- GitLab.com-specific resources:
- [Group SAML/SCIM setup](troubleshooting/group_saml_scim.md)

View File

@ -1059,3 +1059,11 @@ The [secure files API](../api/secure_files.md) enforces the following limits:
- Files must be smaller than 5 MB.
- Projects cannot have more than 100 secure files.
## Changelog API limits
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/89032) in GitLab 15.1 [with a flag](../administration/feature_flags.md) named `changelog_commits_limitation`. Disabled by default.
The [changelog API](../api/repositories.md#add-changelog-data-to-a-changelog-file) enforces the following limits:
- The commit range between `from` and `to` cannot exceed 15000 commits.

View File

@ -327,6 +327,11 @@ GitLab treats trailers case-sensitively. If you set the `trailer` field to
`Example`, GitLab _won't_ include commits that use the trailer `example`,
`eXaMpLE`, or anything else that isn't _exactly_ `Example`.
WARNING:
The allowed commits range between `from` and `to` is limited to 15000 commits. To disable
this restriction, [turn off the feature flag](../administration/feature_flags.md)
`changelog_commits_limitation`.
If the `from` attribute is unspecified, GitLab uses the Git tag of the last
stable version that came before the version specified in the `version`
attribute. This requires that Git tag names follow a specific format, allowing

View File

@ -375,13 +375,14 @@ job artifacts are deleted.
> - [Feature flag removed](https://gitlab.com/gitlab-org/gitlab/-/issues/229936) in GitLab 13.4.
> - [Made optional with a CI/CD setting](https://gitlab.com/gitlab-org/gitlab/-/issues/241026) in GitLab 13.8.
By default artifacts are always kept for the most recent successful pipeline for
By default artifacts are always kept for successful pipelines for the most recent commit on
each ref. This means that the latest artifacts do not immediately expire according
to the `expire_in` specification.
If a new pipeline for the same ref completes successfully, the previous pipeline's
If a pipeline for a new commit on the same ref completes successfully, the previous pipeline's
artifacts are deleted according to the `expire_in` configuration. The artifacts
of the new pipeline are kept automatically.
of the new pipeline are kept automatically. If multiple pipelines run for the most
recent commit on the ref, all artifacts are kept.
Keeping the latest artifacts can use a large amount of storage space in projects
with a lot of jobs or large artifacts. If the latest artifacts are not needed in

View File

@ -1848,35 +1848,59 @@ field :created_at, Types::TimeType, null: true, description: 'Timestamp of when
## Testing
### Writing unit tests
For testing mutations and resolvers, consider the unit of
test a full GraphQL request, not a call to a resolver. The reasons for this are
that we want to avoid lots of coupling to the framework, since this makes
upgrades to dependencies much more difficult.
You should:
- Prefer request specs (either using the full API endpoint or going through
`GitlabSchema.execute`) to unit specs for resolvers and mutations.
- Prefer `GraphqlHelpers#execute_query` and `GraphqlHelpers#run_with_clean_state` to
`GraphqlHelpers#resolve` and `GraphqlHelpers#resolve_field`.
For example:
```ruby
# Good:
gql_query = %q(some query text...)
post_graphql(gql_query, current_user: current_user)
# or:
GitlabSchema.execute(gql_query, context: { current_user: current_user })
# Deprecated: avoid
resolve(described_class, obj: project, ctx: { current_user: current_user })
```
### Writing unit tests (deprecated)
WARNING:
Avoid writing unit tests if the same thing can be tested with
a full GraphQL request.
Before creating unit tests, review the following examples:
- [`spec/graphql/resolvers/users_resolver_spec.rb`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/spec/graphql/resolvers/users_resolver_spec.rb)
- [`spec/graphql/mutations/issues/create_spec.rb`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/spec/graphql/mutations/issues/create_spec.rb)
It's faster to test as much of the logic from your GraphQL queries and mutations
with unit tests, which are stored in `spec/graphql`.
Use unit tests to verify that:
- Types have the expected fields.
- Resolvers and mutations apply authorizations and return expected data.
- Edge cases are handled correctly.
### Writing integration tests
Integration tests check the full stack for a GraphQL query or mutation and are stored in
`spec/requests/api/graphql`.
For speed, you should test most logic in unit tests instead of integration tests.
However, integration tests that check if data is returned verify the following
For speed, consider calling `GitlabSchema.execute` directly, or making use
of smaller test schemas that only contain the types under test.
However, full request integration tests that check if data is returned verify the following
additional items:
- The mutation is actually queryable in the schema (was mounted in `MutationType`).
- The data returned by a resolver or mutation correctly matches the
[return types](https://graphql-ruby.org/fields/introduction.html#field-return-type) of
the fields and resolves without errors.
- The arguments coerce correctly on input, and the fields serialize correctly
on output.
Integration tests can also verify the following items, because they invoke the
full stack:
@ -1929,6 +1953,33 @@ end
### Testing tips and tricks
- Become familiar with the methods in the `GraphqlHelpers` support module.
Many of these methods make writing GraphQL tests easier.
- Use traversal helpers like `GraphqlHelpers#graphql_data_at` and
`GraphqlHelpers#graphql_dig_at` to access result fields. For example:
```ruby
result = GitlabSchema.execute(query)
mr_iid = graphql_dig_at(result.to_h, :data, :project, :merge_request, :iid)
```
- Use `GraphqlHelpers#a_graphql_entity_for` to match against results.
For example:
```ruby
post_graphql(some_query)
# checks that it is a hash containing { id => global_id_of(issue) }
expect(graphql_data_at(:project, :issues, :nodes))
.to contain_exactly(a_graphql_entity_for(issue))
# Additional fields can be passed, either as names of methods, or with values
expect(graphql_data_at(:project, :issues, :nodes))
.to contain_exactly(a_graphql_entity_for(issue, :iid, :title, created_at: some_time))
```
- Avoid false positives:
Authenticating a user with the `current_user:` argument for `post_graphql`

View File

@ -243,7 +243,7 @@ due to this [known issue on the Kubernetes executor for GitLab Runner](https://g
### Helm
The Helm version used is defined in the
[`registry.gitlab.com/gitlab-org/gitlab-build-images:gitlab-helm3-kubectl1.14` image](https://gitlab.com/gitlab-org/gitlab-build-images/-/blob/master/Dockerfile.gitlab-helm3-kubectl1.14#L7)
[`registry.gitlab.com/gitlab-org/gitlab-build-images:gitlab-helm3.5-kubectl1.17` image](https://gitlab.com/gitlab-org/gitlab-build-images/-/blob/master/Dockerfile.gitlab-helm3.5-kubectl1.17#L6)
used by the `review-deploy` and `review-stop` jobs.
## Diagnosing unhealthy Review App releases

View File

@ -1,8 +1,8 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Gitlab::BackgroundMigration::BackfillNamespaceIdForProjectRoute do
# this needs the schema to be before we introduce the not null constraint on routes#namespace_id
RSpec.describe Gitlab::BackgroundMigration::BackfillNamespaceIdForProjectRoute, schema: 20220606060825 do
let(:migration) { described_class.new }
let(:namespaces) { table(:namespaces) }
let(:projects) { table(:projects) }

View File

@ -2,10 +2,11 @@
require 'spec_helper'
RSpec.describe Gitlab::BackgroundMigration::CleanupOrphanedRoutes do
let!(:namespaces) { table(:namespaces) }
let!(:projects) { table(:projects) }
let!(:routes) { table(:routes) }
# this needs the schema to be before we introduce the not null constraint on routes#namespace_id
RSpec.describe Gitlab::BackgroundMigration::CleanupOrphanedRoutes, schema: 20220606060825 do
let(:namespaces) { table(:namespaces) }
let(:projects) { table(:projects) }
let(:routes) { table(:routes) }
let!(:namespace1) { namespaces.create!(name: 'batchtest1', type: 'Group', path: 'space1') }
let!(:namespace2) { namespaces.create!(name: 'batchtest2', type: 'Group', parent_id: namespace1.id, path: 'space2') }

View File

@ -22,13 +22,6 @@ RSpec.describe Route do
end
describe 'callbacks' do
context 'before validation' do
it 'calls #delete_conflicting_orphaned_routes' do
expect(route).to receive(:delete_conflicting_orphaned_routes)
route.valid?
end
end
context 'after update' do
it 'calls #create_redirect_for_old_path' do
expect(route).to receive(:create_redirect_for_old_path)
@ -44,7 +37,7 @@ RSpec.describe Route do
context 'after create' do
it 'calls #delete_conflicting_redirects' do
route.destroy!
new_route = described_class.new(source: group, path: group.path)
new_route = described_class.new(source: group, path: group.path, namespace: group)
expect(new_route).to receive(:delete_conflicting_redirects)
new_route.save!
end
@ -275,7 +268,7 @@ RSpec.describe Route do
end
end
describe '#delete_conflicting_orphaned_routes' do
describe 'conflicting routes validation' do
context 'when there is a conflicting route' do
let!(:conflicting_group) { create(:group, path: 'foo') }
@ -283,47 +276,31 @@ RSpec.describe Route do
route.path = conflicting_group.route.path
end
context 'when the route is orphaned' do
context 'when deleting the conflicting route' do
let!(:offending_route) { conflicting_group.route }
before do
Group.delete(conflicting_group) # Orphan the route
end
it 'does not delete the original route' do
# before deleting the route, check its there
expect(Route.where(path: offending_route.path).count).to eq(1)
it 'deletes the orphaned route' do
expect do
route.valid?
end.to change { described_class.count }.from(2).to(1)
end
Group.delete(conflicting_group) # delete group with conflicting route
end.to change { described_class.count }.by(-1)
it 'passes validation, as usual' do
# check the conflicting route is gone
expect(Route.where(path: offending_route.path).count).to eq(0)
expect(route.path).to eq(offending_route.path)
expect(route.valid?).to be_truthy
end
end
context 'when the route is not orphaned' do
it 'does not delete the conflicting route' do
expect do
route.valid?
end.not_to change { described_class.count }
end
it 'fails validation, as usual' do
expect(route.valid?).to be_falsey
end
it 'fails validation' do
expect(route.valid?).to be_falsey
end
end
context 'when there are no conflicting routes' do
it 'does not delete any routes' do
route
expect do
route.valid?
end.not_to change { described_class.count }
end
it 'passes validation, as usual' do
it 'passes validation' do
expect(route.valid?).to be_truthy
end
end

View File

@ -164,6 +164,28 @@ RSpec.describe Repositories::ChangelogService do
expect { request.call(sha3) }.not_to exceed_query_limit(control.count)
end
context 'when commit range exceeds the limit' do
let(:service) { described_class.new(project, creator, version: '1.0.0', from: sha1) }
before do
stub_const("#{described_class.name}::COMMITS_LIMIT", 2)
end
it 'raises an exception' do
expect { service.execute(commit_to_changelog: false) }.to raise_error(Gitlab::Changelog::Error)
end
context 'when feature flag is off' do
before do
stub_feature_flags(changelog_commits_limitation: false)
end
it 'returns the changelog' do
expect(service.execute(commit_to_changelog: false)).to include('Title 1', 'Title 2', 'Title 3')
end
end
end
end
describe '#start_of_commit_range' do