2016-08-24 13:11:48 -04:00
|
|
|
module Banzai
|
|
|
|
# Extract references to issuables from multiple documents
|
|
|
|
|
|
|
|
# This populates RequestStore cache used in Banzai::ReferenceParser::IssueParser
|
|
|
|
# and Banzai::ReferenceParser::MergeRequestParser
|
|
|
|
# Populating the cache should happen before processing documents one-by-one
|
|
|
|
# so we can avoid N+1 queries problem
|
|
|
|
|
|
|
|
class IssuableExtractor
|
|
|
|
QUERY = %q(
|
|
|
|
descendant-or-self::a[contains(concat(" ", @class, " "), " gfm ")]
|
|
|
|
[@data-reference-type="issue" or @data-reference-type="merge_request"]
|
|
|
|
).freeze
|
|
|
|
|
|
|
|
attr_reader :project, :user
|
|
|
|
|
|
|
|
def initialize(project, user)
|
|
|
|
@project = project
|
|
|
|
@user = user
|
|
|
|
end
|
|
|
|
|
|
|
|
# Returns Hash in the form { node => issuable_instance }
|
|
|
|
def extract(documents)
|
|
|
|
nodes = documents.flat_map do |document|
|
|
|
|
document.xpath(QUERY)
|
|
|
|
end
|
|
|
|
|
|
|
|
issue_parser = Banzai::ReferenceParser::IssueParser.new(project, user)
|
|
|
|
merge_request_parser = Banzai::ReferenceParser::MergeRequestParser.new(project, user)
|
|
|
|
|
2017-04-26 01:11:49 -04:00
|
|
|
issuables_for_nodes = issue_parser.issues_for_nodes(nodes).merge(
|
2016-08-24 13:11:48 -04:00
|
|
|
merge_request_parser.merge_requests_for_nodes(nodes)
|
|
|
|
)
|
2017-04-25 06:36:18 -04:00
|
|
|
|
2017-04-26 01:11:49 -04:00
|
|
|
# The project for the issue/MR might be pending for deletion!
|
2017-04-25 14:29:36 -04:00
|
|
|
# Filter them out because we don't care about them.
|
2017-04-26 01:11:49 -04:00
|
|
|
issuables_for_nodes.select { |node, issuable| issuable.project }
|
2016-08-24 13:11:48 -04:00
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|