2003-06-09 21:31:01 -04:00
|
|
|
require 'rexml/parseexception'
|
|
|
|
require 'rexml/source'
|
|
|
|
|
|
|
|
module REXML
|
|
|
|
module Parsers
|
|
|
|
# = Using the Pull Parser
|
|
|
|
# <em>This API is experimental, and subject to change.</em>
|
|
|
|
# parser = PullParser.new( "<a>text<b att='val'/>txet</a>" )
|
|
|
|
# while parser.has_next?
|
|
|
|
# res = parser.next
|
|
|
|
# puts res[1]['att'] if res.start_tag? and res[0] == 'b'
|
|
|
|
# end
|
|
|
|
# See the PullEvent class for information on the content of the results.
|
|
|
|
# The data is identical to the arguments passed for the various events to
|
|
|
|
# the StreamListener API.
|
|
|
|
#
|
|
|
|
# Notice that:
|
|
|
|
# parser = PullParser.new( "<a>BAD DOCUMENT" )
|
|
|
|
# while parser.has_next?
|
|
|
|
# res = parser.next
|
|
|
|
# raise res[1] if res.error?
|
|
|
|
# end
|
|
|
|
#
|
|
|
|
# Nat Price gave me some good ideas for the API.
|
|
|
|
class BaseParser
|
2003-06-15 14:31:16 -04:00
|
|
|
NCNAME_STR= '[\w:][\-\w\d.]*'
|
2003-06-09 21:31:01 -04:00
|
|
|
NAME_STR= "(?:#{NCNAME_STR}:)?#{NCNAME_STR}"
|
|
|
|
|
2003-06-15 14:31:16 -04:00
|
|
|
NAMECHAR = '[\-\w\d\.:]'
|
2003-06-09 21:31:01 -04:00
|
|
|
NAME = "([\\w:]#{NAMECHAR}*)"
|
|
|
|
NMTOKEN = "(?:#{NAMECHAR})+"
|
|
|
|
NMTOKENS = "#{NMTOKEN}(\\s+#{NMTOKEN})*"
|
|
|
|
REFERENCE = "(?:&#{NAME};|&#\\d+;|&#x[0-9a-fA-F]+;)"
|
2003-06-15 14:31:16 -04:00
|
|
|
REFERENCE_RE = /#{REFERENCE}/
|
2003-06-09 21:31:01 -04:00
|
|
|
|
|
|
|
DOCTYPE_START = /\A\s*<!DOCTYPE\s/um
|
|
|
|
DOCTYPE_PATTERN = /\s*<!DOCTYPE\s+(.*?)(\[|>)/um
|
|
|
|
ATTRIBUTE_PATTERN = /\s*(#{NAME_STR})\s*=\s*(["'])(.*?)\2/um
|
|
|
|
COMMENT_START = /\A<!--/u
|
|
|
|
COMMENT_PATTERN = /<!--(.*?)-->/um
|
|
|
|
CDATA_START = /\A<!\[CDATA\[/u
|
2003-06-15 14:31:16 -04:00
|
|
|
CDATA_END = /^\s*\]\s*>/um
|
2003-06-09 21:31:01 -04:00
|
|
|
CDATA_PATTERN = /<!\[CDATA\[(.*?)\]\]>/um
|
|
|
|
XMLDECL_START = /\A<\?xml\s/u;
|
|
|
|
XMLDECL_PATTERN = /<\?xml\s+(.*?)\?>*/um
|
|
|
|
INSTRUCTION_START = /\A<\?/u
|
|
|
|
INSTRUCTION_PATTERN = /<\?(.*?)(\s+.*?)?\?>/um
|
|
|
|
TAG_MATCH = /^<((?>#{NAME_STR}))\s*((?>\s+#{NAME_STR}\s*=\s*(["']).*?\3)*)\s*(\/)?>/um
|
|
|
|
CLOSE_MATCH = /^\s*<\/(#{NAME_STR})\s*>/um
|
|
|
|
|
|
|
|
VERSION = /\bversion\s*=\s*["'](.*?)['"]/um
|
|
|
|
ENCODING = /\bencoding=["'](.*?)['"]/um
|
|
|
|
STANDALONE = /\bstandalone=["'](.*?)['"]/um
|
|
|
|
|
|
|
|
ENTITY_START = /^\s*<!ENTITY/
|
2003-06-15 14:31:16 -04:00
|
|
|
IDENTITY = /^([!\*\w\-]+)(\s+#{NCNAME_STR})?(\s+["'].*?['"])?(\s+['"].*?["'])?/u
|
2003-06-09 21:31:01 -04:00
|
|
|
ELEMENTDECL_START = /^\s*<!ELEMENT/um
|
|
|
|
ELEMENTDECL_PATTERN = /^\s*(<!ELEMENT.*?)>/um
|
2003-12-08 21:41:33 -05:00
|
|
|
SYSTEMENTITY = /^\s*(%.*?;)\s*$/um
|
2003-06-09 21:31:01 -04:00
|
|
|
ENUMERATION = "\\(\\s*#{NMTOKEN}(?:\\s*\\|\\s*#{NMTOKEN})*\\s*\\)"
|
|
|
|
NOTATIONTYPE = "NOTATION\\s+\\(\\s*#{NAME}(?:\\s*\\|\\s*#{NAME})*\\s*\\)"
|
|
|
|
ENUMERATEDTYPE = "(?:(?:#{NOTATIONTYPE})|(?:#{ENUMERATION}))"
|
|
|
|
ATTTYPE = "(CDATA|ID|IDREF|IDREFS|ENTITY|ENTITIES|NMTOKEN|NMTOKENS|#{ENUMERATEDTYPE})"
|
|
|
|
ATTVALUE = "(?:\"((?:[^<&\"]|#{REFERENCE})*)\")|(?:'((?:[^<&']|#{REFERENCE})*)')"
|
|
|
|
DEFAULTDECL = "(#REQUIRED|#IMPLIED|(?:(#FIXED\\s+)?#{ATTVALUE}))"
|
|
|
|
ATTDEF = "\\s+#{NAME}\\s+#{ATTTYPE}\\s+#{DEFAULTDECL}"
|
2003-06-15 14:31:16 -04:00
|
|
|
ATTDEF_RE = /#{ATTDEF}/
|
2003-06-09 21:31:01 -04:00
|
|
|
ATTLISTDECL_START = /^\s*<!ATTLIST/um
|
|
|
|
ATTLISTDECL_PATTERN = /^\s*<!ATTLIST\s+#{NAME}(?:#{ATTDEF})*\s*>/um
|
|
|
|
NOTATIONDECL_START = /^\s*<!NOTATION/um
|
2003-06-15 14:31:16 -04:00
|
|
|
PUBLIC = /^\s*<!NOTATION\s+(\w[\-\w]*)\s+(PUBLIC)\s+((["']).*?\4)\s*>/um
|
|
|
|
SYSTEM = /^\s*<!NOTATION\s+(\w[\-\w]*)\s+(SYSTEM)\s+((["']).*?\4)\s*>/um
|
2003-06-09 21:31:01 -04:00
|
|
|
|
|
|
|
TEXT_PATTERN = /\A([^<]*)/um
|
|
|
|
|
|
|
|
# Entity constants
|
2003-06-15 14:31:16 -04:00
|
|
|
PUBIDCHAR = "\x20\x0D\x0Aa-zA-Z0-9\\-()+,./:=?;!*@$_%#"
|
2003-06-09 21:31:01 -04:00
|
|
|
SYSTEMLITERAL = %Q{((?:"[^"]*")|(?:'[^']*'))}
|
|
|
|
PUBIDLITERAL = %Q{("[#{PUBIDCHAR}']*"|'[#{PUBIDCHAR}]*')}
|
|
|
|
EXTERNALID = "(?:(?:(SYSTEM)\\s+#{SYSTEMLITERAL})|(?:(PUBLIC)\\s+#{PUBIDLITERAL}\\s+#{SYSTEMLITERAL}))"
|
|
|
|
NDATADECL = "\\s+NDATA\\s+#{NAME}"
|
|
|
|
PEREFERENCE = "%#{NAME};"
|
|
|
|
ENTITYVALUE = %Q{((?:"(?:[^%&"]|#{PEREFERENCE}|#{REFERENCE})*")|(?:'([^%&']|#{PEREFERENCE}|#{REFERENCE})*'))}
|
|
|
|
PEDEF = "(?:#{ENTITYVALUE}|#{EXTERNALID})"
|
|
|
|
ENTITYDEF = "(?:#{ENTITYVALUE}|(?:#{EXTERNALID}(#{NDATADECL})?))"
|
|
|
|
PEDECL = "<!ENTITY\\s+(%)\\s+#{NAME}\\s+#{PEDEF}\\s*>"
|
|
|
|
GEDECL = "<!ENTITY\\s+#{NAME}\\s+#{ENTITYDEF}\\s*>"
|
|
|
|
ENTITYDECL = /\s*(?:#{GEDECL})|(?:#{PEDECL})/um
|
|
|
|
|
|
|
|
EREFERENCE = /&(?!#{NAME};)/
|
|
|
|
|
|
|
|
DEFAULT_ENTITIES = {
|
2003-10-10 08:54:46 -04:00
|
|
|
'gt' => [/>/, '>', '>', />/],
|
|
|
|
'lt' => [/</, '<', '<', /</],
|
|
|
|
'quot' => [/"/, '"', '"', /"/],
|
|
|
|
"apos" => [/'/, "'", "'", /'/]
|
2003-06-09 21:31:01 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
def initialize( source )
|
|
|
|
self.stream = source
|
|
|
|
end
|
|
|
|
|
2004-05-16 11:17:31 -04:00
|
|
|
attr_reader :source
|
|
|
|
|
2003-06-09 21:31:01 -04:00
|
|
|
def stream=( source )
|
|
|
|
if source.kind_of? String
|
|
|
|
@source = Source.new(source)
|
|
|
|
elsif source.kind_of? IO
|
|
|
|
@source = IOSource.new(source)
|
|
|
|
elsif source.kind_of? Source
|
|
|
|
@source = source
|
2003-09-11 23:16:31 -04:00
|
|
|
elsif defined? StringIO and source.kind_of? StringIO
|
|
|
|
@source = IOSource.new(source)
|
2003-06-09 21:31:01 -04:00
|
|
|
else
|
2003-09-11 23:16:31 -04:00
|
|
|
raise "#{source.class} is not a valid input stream. It must be \n"+
|
|
|
|
"either a String, IO, StringIO or Source."
|
2003-06-09 21:31:01 -04:00
|
|
|
end
|
|
|
|
@closed = nil
|
|
|
|
@document_status = nil
|
|
|
|
@tags = []
|
|
|
|
@stack = []
|
|
|
|
@entities = []
|
|
|
|
end
|
|
|
|
|
|
|
|
# Returns true if there are no more events
|
|
|
|
def empty?
|
2004-02-13 17:40:14 -05:00
|
|
|
#puts "@source.empty? = #{@source.empty?}"
|
|
|
|
#puts "@stack.empty? = #{@stack.empty?}"
|
|
|
|
return (@source.empty? and @stack.empty?)
|
2003-06-09 21:31:01 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
# Returns true if there are more events. Synonymous with !empty?
|
|
|
|
def has_next?
|
2004-02-13 17:40:14 -05:00
|
|
|
return !(@source.empty? and @stack.empty?)
|
2003-06-09 21:31:01 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
# Push an event back on the head of the stream. This method
|
|
|
|
# has (theoretically) infinite depth.
|
|
|
|
def unshift token
|
|
|
|
@stack.unshift(token)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Peek at the +depth+ event in the stack. The first element on the stack
|
|
|
|
# is at depth 0. If +depth+ is -1, will parse to the end of the input
|
|
|
|
# stream and return the last event, which is always :end_document.
|
|
|
|
# Be aware that this causes the stream to be parsed up to the +depth+
|
|
|
|
# event, so you can effectively pre-parse the entire document (pull the
|
|
|
|
# entire thing into memory) using this method.
|
|
|
|
def peek depth=0
|
2003-10-10 08:54:46 -04:00
|
|
|
raise %Q[Illegal argument "#{depth}"] if depth < -1
|
2003-06-09 21:31:01 -04:00
|
|
|
temp = []
|
|
|
|
if depth == -1
|
|
|
|
temp.push(pull()) until empty?
|
|
|
|
else
|
|
|
|
while @stack.size+temp.size < depth+1
|
|
|
|
temp.push(pull())
|
|
|
|
end
|
|
|
|
end
|
|
|
|
@stack += temp if temp.size > 0
|
|
|
|
@stack[depth]
|
|
|
|
end
|
|
|
|
|
|
|
|
# Returns the next event. This is a +PullEvent+ object.
|
|
|
|
def pull
|
|
|
|
return [ :end_document ] if empty?
|
|
|
|
if @closed
|
|
|
|
x, @closed = @closed, nil
|
|
|
|
return [ :end_element, x ]
|
|
|
|
end
|
|
|
|
return @stack.shift if @stack.size > 0
|
2004-04-23 11:44:30 -04:00
|
|
|
@source.read if @source.buffer.size<2
|
2003-06-09 21:31:01 -04:00
|
|
|
if @document_status == nil
|
2003-10-10 08:54:46 -04:00
|
|
|
@source.consume( /^\s*/um )
|
2003-10-10 08:58:23 -04:00
|
|
|
word = @source.match( /(<[^>]*)>/um )
|
2003-06-09 21:31:01 -04:00
|
|
|
word = word[1] unless word.nil?
|
|
|
|
case word
|
|
|
|
when COMMENT_START
|
|
|
|
return [ :comment, @source.match( COMMENT_PATTERN, true )[1] ]
|
|
|
|
when XMLDECL_START
|
|
|
|
results = @source.match( XMLDECL_PATTERN, true )[1]
|
|
|
|
version = VERSION.match( results )
|
|
|
|
version = version[1] unless version.nil?
|
|
|
|
encoding = ENCODING.match(results)
|
|
|
|
encoding = encoding[1] unless encoding.nil?
|
|
|
|
@source.encoding = encoding
|
|
|
|
standalone = STANDALONE.match(results)
|
|
|
|
standalone = standalone[1] unless standalone.nil?
|
|
|
|
return [ :xmldecl, version, encoding, standalone]
|
|
|
|
when INSTRUCTION_START
|
|
|
|
return [ :processing_instruction, *@source.match(INSTRUCTION_PATTERN, true)[1,2] ]
|
|
|
|
when DOCTYPE_START
|
|
|
|
md = @source.match( DOCTYPE_PATTERN, true )
|
|
|
|
identity = md[1]
|
|
|
|
close = md[2]
|
|
|
|
identity =~ IDENTITY
|
|
|
|
name = $1
|
2003-10-10 08:54:46 -04:00
|
|
|
raise REXML::ParseException("DOCTYPE is missing a name") if name.nil?
|
2003-06-09 21:31:01 -04:00
|
|
|
pub_sys = $2.nil? ? nil : $2.strip
|
|
|
|
long_name = $3.nil? ? nil : $3.strip
|
|
|
|
uri = $4.nil? ? nil : $4.strip
|
|
|
|
args = [ :start_doctype, name, pub_sys, long_name, uri ]
|
|
|
|
if close == ">"
|
|
|
|
@document_status = :after_doctype
|
2004-04-23 11:44:30 -04:00
|
|
|
@source.read if @source.buffer.size<2
|
2003-06-09 21:31:01 -04:00
|
|
|
md = @source.match(/^\s*/um, true)
|
|
|
|
@stack << [ :end_doctype ]
|
|
|
|
else
|
|
|
|
@document_status = :in_doctype
|
|
|
|
end
|
|
|
|
return args
|
|
|
|
else
|
|
|
|
@document_status = :after_doctype
|
2004-04-23 11:44:30 -04:00
|
|
|
@source.read if @source.buffer.size<2
|
2003-06-09 21:31:01 -04:00
|
|
|
md = @source.match(/\s*/um, true)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
if @document_status == :in_doctype
|
|
|
|
md = @source.match(/\s*(.*?>)/um)
|
|
|
|
case md[1]
|
2003-12-08 21:41:33 -05:00
|
|
|
when SYSTEMENTITY
|
|
|
|
match = @source.match( SYSTEMENTITY, true )[1]
|
|
|
|
return [ :externalentity, match ]
|
|
|
|
|
2003-06-09 21:31:01 -04:00
|
|
|
when ELEMENTDECL_START
|
|
|
|
return [ :elementdecl, @source.match( ELEMENTDECL_PATTERN, true )[1] ]
|
2003-12-08 21:41:33 -05:00
|
|
|
|
2003-06-09 21:31:01 -04:00
|
|
|
when ENTITY_START
|
|
|
|
match = @source.match( ENTITYDECL, true ).to_a.compact
|
|
|
|
match[0] = :entitydecl
|
|
|
|
ref = false
|
|
|
|
if match[1] == '%'
|
|
|
|
ref = true
|
|
|
|
match.delete_at 1
|
|
|
|
end
|
|
|
|
# Now we have to sort out what kind of entity reference this is
|
|
|
|
if match[2] == 'SYSTEM'
|
|
|
|
# External reference
|
|
|
|
match[3] = match[3][1..-2] # PUBID
|
|
|
|
match.delete_at(4) if match.size > 4 # Chop out NDATA decl
|
|
|
|
# match is [ :entity, name, SYSTEM, pubid(, ndata)? ]
|
|
|
|
elsif match[2] == 'PUBLIC'
|
|
|
|
# External reference
|
|
|
|
match[3] = match[3][1..-2] # PUBID
|
|
|
|
match[4] = match[4][1..-2] # HREF
|
|
|
|
# match is [ :entity, name, PUBLIC, pubid, href ]
|
|
|
|
else
|
|
|
|
match[2] = match[2][1..-2]
|
|
|
|
match.pop if match.size == 4
|
|
|
|
# match is [ :entity, name, value ]
|
|
|
|
end
|
|
|
|
match << '%' if ref
|
|
|
|
return match
|
|
|
|
when ATTLISTDECL_START
|
|
|
|
md = @source.match( ATTLISTDECL_PATTERN, true )
|
|
|
|
raise REXML::ParseException.new( "Bad ATTLIST declaration!", @source ) if md.nil?
|
|
|
|
element = md[1]
|
|
|
|
contents = md[0]
|
|
|
|
|
|
|
|
pairs = {}
|
2003-06-15 14:31:16 -04:00
|
|
|
values = md[0].scan( ATTDEF_RE )
|
2003-06-09 21:31:01 -04:00
|
|
|
values.each do |attdef|
|
|
|
|
unless attdef[3] == "#IMPLIED"
|
|
|
|
attdef.compact!
|
|
|
|
val = attdef[3]
|
|
|
|
val = attdef[4] if val == "#FIXED "
|
|
|
|
pairs[attdef[0]] = val
|
|
|
|
end
|
|
|
|
end
|
|
|
|
return [ :attlistdecl, element, pairs, contents ]
|
|
|
|
when NOTATIONDECL_START
|
|
|
|
md = nil
|
|
|
|
if @source.match( PUBLIC )
|
|
|
|
md = @source.match( PUBLIC, true )
|
|
|
|
elsif @source.match( SYSTEM )
|
|
|
|
md = @source.match( SYSTEM, true )
|
|
|
|
else
|
|
|
|
raise REXML::ParseException.new( "error parsing notation: no matching pattern", @source )
|
|
|
|
end
|
|
|
|
return [ :notationdecl, md[1], md[2], md[3] ]
|
2003-06-15 14:31:16 -04:00
|
|
|
when CDATA_END
|
2003-06-09 21:31:01 -04:00
|
|
|
@document_status = :after_doctype
|
2003-06-15 14:31:16 -04:00
|
|
|
@source.match( CDATA_END, true )
|
2003-06-09 21:31:01 -04:00
|
|
|
return [ :end_doctype ]
|
|
|
|
end
|
|
|
|
end
|
2003-10-10 08:54:46 -04:00
|
|
|
begin
|
2003-06-09 21:31:01 -04:00
|
|
|
if @source.buffer[0] == ?<
|
|
|
|
if @source.buffer[1] == ?/
|
|
|
|
last_tag = @tags.pop
|
2003-10-10 08:54:46 -04:00
|
|
|
#md = @source.match_to_consume( '>', CLOSE_MATCH)
|
2003-06-09 21:31:01 -04:00
|
|
|
md = @source.match( CLOSE_MATCH, true )
|
|
|
|
raise REXML::ParseException.new( "Missing end tag for '#{last_tag}' "+
|
|
|
|
"(got \"#{md[1]}\")", @source) unless last_tag == md[1]
|
|
|
|
return [ :end_element, last_tag ]
|
|
|
|
elsif @source.buffer[1] == ?!
|
|
|
|
md = @source.match(/\A(\s*[^>]*>)/um)
|
|
|
|
#puts "SOURCE BUFFER = #{source.buffer}, #{source.buffer.size}"
|
|
|
|
raise REXML::ParseException.new("Malformed node", @source) unless md
|
2003-10-10 08:54:46 -04:00
|
|
|
if md[0][2] == ?-
|
|
|
|
md = @source.match( COMMENT_PATTERN, true )
|
|
|
|
return [ :comment, md[1] ] if md
|
2003-06-09 21:31:01 -04:00
|
|
|
else
|
2003-10-10 08:54:46 -04:00
|
|
|
md = @source.match( CDATA_PATTERN, true )
|
|
|
|
return [ :cdata, md[1] ] if md
|
2003-06-09 21:31:01 -04:00
|
|
|
end
|
2003-10-10 08:54:46 -04:00
|
|
|
raise REXML::ParseException.new( "Declarations can only occur "+
|
|
|
|
"in the doctype declaration.", @source)
|
2003-06-09 21:31:01 -04:00
|
|
|
elsif @source.buffer[1] == ??
|
|
|
|
md = @source.match( INSTRUCTION_PATTERN, true )
|
2003-10-10 08:54:46 -04:00
|
|
|
return [ :processing_instruction, md[1], md[2] ] if md
|
|
|
|
raise REXML::ParseException.new( "Bad instruction declaration",
|
|
|
|
@source)
|
2003-06-09 21:31:01 -04:00
|
|
|
else
|
|
|
|
# Get the next tag
|
|
|
|
md = @source.match(TAG_MATCH, true)
|
|
|
|
raise REXML::ParseException.new("malformed XML: missing tag start", @source) unless md
|
|
|
|
attrs = []
|
|
|
|
if md[2].size > 0
|
|
|
|
attrs = md[2].scan( ATTRIBUTE_PATTERN )
|
|
|
|
raise REXML::ParseException.new( "error parsing attributes: [#{attrs.join ', '}], excess = \"#$'\"", @source) if $' and $'.strip.size > 0
|
|
|
|
end
|
|
|
|
|
|
|
|
if md[4]
|
|
|
|
@closed = md[1]
|
|
|
|
else
|
|
|
|
@tags.push( md[1] )
|
|
|
|
end
|
|
|
|
attributes = {}
|
|
|
|
attrs.each { |a,b,c| attributes[a] = c }
|
|
|
|
return [ :start_element, md[1], attributes ]
|
|
|
|
end
|
|
|
|
else
|
2003-10-10 08:54:46 -04:00
|
|
|
md = @source.match( TEXT_PATTERN, true )
|
2004-02-13 17:40:14 -05:00
|
|
|
if md[0].length == 0
|
|
|
|
#puts "EMPTY = #{empty?}"
|
|
|
|
#puts "BUFFER = \"#{@source.buffer}\""
|
|
|
|
@source.match( /(\s+)/, true )
|
|
|
|
end
|
|
|
|
#return [ :text, "" ] if md[0].length == 0
|
2003-06-09 21:31:01 -04:00
|
|
|
# unnormalized = Text::unnormalize( md[1], self )
|
|
|
|
# return PullEvent.new( :text, md[1], unnormalized )
|
|
|
|
return [ :text, md[1] ]
|
|
|
|
end
|
2003-10-10 08:54:46 -04:00
|
|
|
rescue REXML::ParseException
|
|
|
|
raise
|
2003-06-09 21:31:01 -04:00
|
|
|
rescue Exception, NameError => error
|
|
|
|
raise REXML::ParseException.new( "Exception parsing",
|
2003-10-10 08:54:46 -04:00
|
|
|
@source, self, (error ? error : $!) )
|
2003-06-09 21:31:01 -04:00
|
|
|
end
|
|
|
|
return [ :dummy ]
|
|
|
|
end
|
|
|
|
|
|
|
|
def entity( reference, entities )
|
|
|
|
value = nil
|
|
|
|
value = entities[ reference ] if entities
|
|
|
|
if not value
|
|
|
|
value = DEFAULT_ENTITIES[ reference ]
|
|
|
|
value = value[2] if value
|
|
|
|
end
|
|
|
|
unnormalize( value, entities ) if value
|
|
|
|
end
|
|
|
|
|
|
|
|
# Escapes all possible entities
|
|
|
|
def normalize( input, entities=nil, entity_filter=nil )
|
|
|
|
copy = input.clone
|
|
|
|
# Doing it like this rather than in a loop improves the speed
|
|
|
|
copy.gsub!( EREFERENCE, '&' )
|
|
|
|
entities.each do |key, value|
|
|
|
|
copy.gsub!( value, "&#{key};" ) unless entity_filter and
|
|
|
|
entity_filter.include?(entity)
|
|
|
|
end if entities
|
|
|
|
copy.gsub!( EREFERENCE, '&' )
|
|
|
|
DEFAULT_ENTITIES.each do |key, value|
|
2003-10-10 08:54:46 -04:00
|
|
|
copy.gsub!( value[3], value[1] )
|
2003-06-09 21:31:01 -04:00
|
|
|
end
|
|
|
|
copy
|
|
|
|
end
|
|
|
|
|
|
|
|
# Unescapes all possible entities
|
|
|
|
def unnormalize( string, entities=nil, filter=nil )
|
|
|
|
rv = string.clone
|
|
|
|
rv.gsub!( /\r\n?/, "\n" )
|
2003-06-15 14:31:16 -04:00
|
|
|
matches = rv.scan( REFERENCE_RE )
|
2003-06-09 21:31:01 -04:00
|
|
|
return rv if matches.size == 0
|
|
|
|
rv.gsub!( /�*((?:\d+)|(?:x[a-fA-F0-9]+));/ ) {|m|
|
|
|
|
m=$1
|
|
|
|
m = "0#{m}" if m[0] == ?x
|
|
|
|
[Integer(m)].pack('U*')
|
|
|
|
}
|
|
|
|
matches.collect!{|x|x[0]}.compact!
|
|
|
|
if matches.size > 0
|
|
|
|
matches.each do |entity_reference|
|
|
|
|
unless filter and filter.include?(entity_reference)
|
|
|
|
entity_value = entity( entity_reference, entities )
|
|
|
|
if entity_value
|
|
|
|
re = /&#{entity_reference};/
|
|
|
|
rv.gsub!( re, entity_value )
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
matches.each do |entity_reference|
|
|
|
|
unless filter and filter.include?(entity_reference)
|
|
|
|
er = DEFAULT_ENTITIES[entity_reference]
|
|
|
|
rv.gsub!( er[0], er[2] ) if er
|
|
|
|
end
|
|
|
|
end
|
|
|
|
rv.gsub!( /&/, '&' )
|
|
|
|
end
|
|
|
|
rv
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|