cordova-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From an-s...@apache.org
Subject [24/51] [abbrv] [partial] cordova-windows git commit: CB-11117: Bundle updated node modules
Date Fri, 27 May 2016 07:09:17 GMT
http://git-wip-us.apache.org/repos/asf/cordova-windows/blob/5a6cb728/node_modules/elementtree/node_modules/sax/lib/sax.js
----------------------------------------------------------------------
diff --git a/node_modules/elementtree/node_modules/sax/lib/sax.js b/node_modules/elementtree/node_modules/sax/lib/sax.js
deleted file mode 100644
index 17fb08e..0000000
--- a/node_modules/elementtree/node_modules/sax/lib/sax.js
+++ /dev/null
@@ -1,1006 +0,0 @@
-// wrapper for non-node envs
-;(function (sax) {
-
-sax.parser = function (strict, opt) { return new SAXParser(strict, opt) }
-sax.SAXParser = SAXParser
-sax.SAXStream = SAXStream
-sax.createStream = createStream
-
-// When we pass the MAX_BUFFER_LENGTH position, start checking for buffer overruns.
-// When we check, schedule the next check for MAX_BUFFER_LENGTH - (max(buffer lengths)),
-// since that's the earliest that a buffer overrun could occur.  This way, checks are
-// as rare as required, but as often as necessary to ensure never crossing this bound.
-// Furthermore, buffers are only tested at most once per write(), so passing a very
-// large string into write() might have undesirable effects, but this is manageable by
-// the caller, so it is assumed to be safe.  Thus, a call to write() may, in the extreme
-// edge case, result in creating at most one complete copy of the string passed in.
-// Set to Infinity to have unlimited buffers.
-sax.MAX_BUFFER_LENGTH = 64 * 1024
-
-var buffers = [
-  "comment", "sgmlDecl", "textNode", "tagName", "doctype",
-  "procInstName", "procInstBody", "entity", "attribName",
-  "attribValue", "cdata", "script"
-]
-
-sax.EVENTS = // for discoverability.
-  [ "text"
-  , "processinginstruction"
-  , "sgmldeclaration"
-  , "doctype"
-  , "comment"
-  , "attribute"
-  , "opentag"
-  , "closetag"
-  , "opencdata"
-  , "cdata"
-  , "closecdata"
-  , "error"
-  , "end"
-  , "ready"
-  , "script"
-  , "opennamespace"
-  , "closenamespace"
-  ]
-
-function SAXParser (strict, opt) {
-  if (!(this instanceof SAXParser)) return new SAXParser(strict, opt)
-
-  var parser = this
-  clearBuffers(parser)
-  parser.q = parser.c = ""
-  parser.bufferCheckPosition = sax.MAX_BUFFER_LENGTH
-  parser.opt = opt || {}
-  parser.tagCase = parser.opt.lowercasetags ? "toLowerCase" : "toUpperCase"
-  parser.tags = []
-  parser.closed = parser.closedRoot = parser.sawRoot = false
-  parser.tag = parser.error = null
-  parser.strict = !!strict
-  parser.noscript = !!(strict || parser.opt.noscript)
-  parser.state = S.BEGIN
-  parser.ENTITIES = Object.create(sax.ENTITIES)
-  parser.attribList = []
-
-  // namespaces form a prototype chain.
-  // it always points at the current tag,
-  // which protos to its parent tag.
-  if (parser.opt.xmlns) parser.ns = Object.create(rootNS)
-
-  // mostly just for error reporting
-  parser.position = parser.line = parser.column = 0
-  emit(parser, "onready")
-}
-
-if (!Object.create) Object.create = function (o) {
-  function f () { this.__proto__ = o }
-  f.prototype = o
-  return new f
-}
-
-if (!Object.getPrototypeOf) Object.getPrototypeOf = function (o) {
-  return o.__proto__
-}
-
-if (!Object.keys) Object.keys = function (o) {
-  var a = []
-  for (var i in o) if (o.hasOwnProperty(i)) a.push(i)
-  return a
-}
-
-function checkBufferLength (parser) {
-  var maxAllowed = Math.max(sax.MAX_BUFFER_LENGTH, 10)
-    , maxActual = 0
-  for (var i = 0, l = buffers.length; i < l; i ++) {
-    var len = parser[buffers[i]].length
-    if (len > maxAllowed) {
-      // Text/cdata nodes can get big, and since they're buffered,
-      // we can get here under normal conditions.
-      // Avoid issues by emitting the text node now,
-      // so at least it won't get any bigger.
-      switch (buffers[i]) {
-        case "textNode":
-          closeText(parser)
-        break
-
-        case "cdata":
-          emitNode(parser, "oncdata", parser.cdata)
-          parser.cdata = ""
-        break
-
-        case "script":
-          emitNode(parser, "onscript", parser.script)
-          parser.script = ""
-        break
-
-        default:
-          error(parser, "Max buffer length exceeded: "+buffers[i])
-      }
-    }
-    maxActual = Math.max(maxActual, len)
-  }
-  // schedule the next check for the earliest possible buffer overrun.
-  parser.bufferCheckPosition = (sax.MAX_BUFFER_LENGTH - maxActual)
-                             + parser.position
-}
-
-function clearBuffers (parser) {
-  for (var i = 0, l = buffers.length; i < l; i ++) {
-    parser[buffers[i]] = ""
-  }
-}
-
-SAXParser.prototype =
-  { end: function () { end(this) }
-  , write: write
-  , resume: function () { this.error = null; return this }
-  , close: function () { return this.write(null) }
-  , end: function () { return this.write(null) }
-  }
-
-try {
-  var Stream = require("stream").Stream
-} catch (ex) {
-  var Stream = function () {}
-}
-
-
-var streamWraps = sax.EVENTS.filter(function (ev) {
-  return ev !== "error" && ev !== "end"
-})
-
-function createStream (strict, opt) {
-  return new SAXStream(strict, opt)
-}
-
-function SAXStream (strict, opt) {
-  if (!(this instanceof SAXStream)) return new SAXStream(strict, opt)
-
-  Stream.apply(me)
-
-  this._parser = new SAXParser(strict, opt)
-  this.writable = true
-  this.readable = true
-
-
-  var me = this
-
-  this._parser.onend = function () {
-    me.emit("end")
-  }
-
-  this._parser.onerror = function (er) {
-    me.emit("error", er)
-
-    // if didn't throw, then means error was handled.
-    // go ahead and clear error, so we can write again.
-    me._parser.error = null
-  }
-
-  streamWraps.forEach(function (ev) {
-    Object.defineProperty(me, "on" + ev, {
-      get: function () { return me._parser["on" + ev] },
-      set: function (h) {
-        if (!h) {
-          me.removeAllListeners(ev)
-          return me._parser["on"+ev] = h
-        }
-        me.on(ev, h)
-      },
-      enumerable: true,
-      configurable: false
-    })
-  })
-}
-
-SAXStream.prototype = Object.create(Stream.prototype,
-  { constructor: { value: SAXStream } })
-
-SAXStream.prototype.write = function (data) {
-  this._parser.write(data.toString())
-  this.emit("data", data)
-  return true
-}
-
-SAXStream.prototype.end = function (chunk) {
-  if (chunk && chunk.length) this._parser.write(chunk.toString())
-  this._parser.end()
-  return true
-}
-
-SAXStream.prototype.on = function (ev, handler) {
-  var me = this
-  if (!me._parser["on"+ev] && streamWraps.indexOf(ev) !== -1) {
-    me._parser["on"+ev] = function () {
-      var args = arguments.length === 1 ? [arguments[0]]
-               : Array.apply(null, arguments)
-      args.splice(0, 0, ev)
-      me.emit.apply(me, args)
-    }
-  }
-
-  return Stream.prototype.on.call(me, ev, handler)
-}
-
-
-
-// character classes and tokens
-var whitespace = "\r\n\t "
-  // this really needs to be replaced with character classes.
-  // XML allows all manner of ridiculous numbers and digits.
-  , number = "0124356789"
-  , letter = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
-  // (Letter | "_" | ":")
-  , nameStart = letter+"_:"
-  , nameBody = nameStart+number+"-."
-  , quote = "'\""
-  , entity = number+letter+"#"
-  , attribEnd = whitespace + ">"
-  , CDATA = "[CDATA["
-  , DOCTYPE = "DOCTYPE"
-  , XML_NAMESPACE = "http://www.w3.org/XML/1998/namespace"
-  , XMLNS_NAMESPACE = "http://www.w3.org/2000/xmlns/"
-  , rootNS = { xml: XML_NAMESPACE, xmlns: XMLNS_NAMESPACE }
-
-// turn all the string character sets into character class objects.
-whitespace = charClass(whitespace)
-number = charClass(number)
-letter = charClass(letter)
-nameStart = charClass(nameStart)
-nameBody = charClass(nameBody)
-quote = charClass(quote)
-entity = charClass(entity)
-attribEnd = charClass(attribEnd)
-
-function charClass (str) {
-  return str.split("").reduce(function (s, c) {
-    s[c] = true
-    return s
-  }, {})
-}
-
-function is (charclass, c) {
-  return charclass[c]
-}
-
-function not (charclass, c) {
-  return !charclass[c]
-}
-
-var S = 0
-sax.STATE =
-{ BEGIN                     : S++
-, TEXT                      : S++ // general stuff
-, TEXT_ENTITY               : S++ // &amp and such.
-, OPEN_WAKA                 : S++ // <
-, SGML_DECL                 : S++ // <!BLARG
-, SGML_DECL_QUOTED          : S++ // <!BLARG foo "bar
-, DOCTYPE                   : S++ // <!DOCTYPE
-, DOCTYPE_QUOTED            : S++ // <!DOCTYPE "//blah
-, DOCTYPE_DTD               : S++ // <!DOCTYPE "//blah" [ ...
-, DOCTYPE_DTD_QUOTED        : S++ // <!DOCTYPE "//blah" [ "foo
-, COMMENT_STARTING          : S++ // <!-
-, COMMENT                   : S++ // <!--
-, COMMENT_ENDING            : S++ // <!-- blah -
-, COMMENT_ENDED             : S++ // <!-- blah --
-, CDATA                     : S++ // <![CDATA[ something
-, CDATA_ENDING              : S++ // ]
-, CDATA_ENDING_2            : S++ // ]]
-, PROC_INST                 : S++ // <?hi
-, PROC_INST_BODY            : S++ // <?hi there
-, PROC_INST_QUOTED          : S++ // <?hi "there
-, PROC_INST_ENDING          : S++ // <?hi "there" ?
-, OPEN_TAG                  : S++ // <strong
-, OPEN_TAG_SLASH            : S++ // <strong /
-, ATTRIB                    : S++ // <a
-, ATTRIB_NAME               : S++ // <a foo
-, ATTRIB_NAME_SAW_WHITE     : S++ // <a foo _
-, ATTRIB_VALUE              : S++ // <a foo=
-, ATTRIB_VALUE_QUOTED       : S++ // <a foo="bar
-, ATTRIB_VALUE_UNQUOTED     : S++ // <a foo=bar
-, ATTRIB_VALUE_ENTITY_Q     : S++ // <foo bar="&quot;"
-, ATTRIB_VALUE_ENTITY_U     : S++ // <foo bar=&quot;
-, CLOSE_TAG                 : S++ // </a
-, CLOSE_TAG_SAW_WHITE       : S++ // </a   >
-, SCRIPT                    : S++ // <script> ...
-, SCRIPT_ENDING             : S++ // <script> ... <
-}
-
-sax.ENTITIES =
-{ "apos" : "'"
-, "quot" : "\""
-, "amp"  : "&"
-, "gt"   : ">"
-, "lt"   : "<"
-}
-
-for (var S in sax.STATE) sax.STATE[sax.STATE[S]] = S
-
-// shorthand
-S = sax.STATE
-
-function emit (parser, event, data) {
-  parser[event] && parser[event](data)
-}
-
-function emitNode (parser, nodeType, data) {
-  if (parser.textNode) closeText(parser)
-  emit(parser, nodeType, data)
-}
-
-function closeText (parser) {
-  parser.textNode = textopts(parser.opt, parser.textNode)
-  if (parser.textNode) emit(parser, "ontext", parser.textNode)
-  parser.textNode = ""
-}
-
-function textopts (opt, text) {
-  if (opt.trim) text = text.trim()
-  if (opt.normalize) text = text.replace(/\s+/g, " ")
-  return text
-}
-
-function error (parser, er) {
-  closeText(parser)
-  er += "\nLine: "+parser.line+
-        "\nColumn: "+parser.column+
-        "\nChar: "+parser.c
-  er = new Error(er)
-  parser.error = er
-  emit(parser, "onerror", er)
-  return parser
-}
-
-function end (parser) {
-  if (parser.state !== S.TEXT) error(parser, "Unexpected end")
-  closeText(parser)
-  parser.c = ""
-  parser.closed = true
-  emit(parser, "onend")
-  SAXParser.call(parser, parser.strict, parser.opt)
-  return parser
-}
-
-function strictFail (parser, message) {
-  if (parser.strict) error(parser, message)
-}
-
-function newTag (parser) {
-  if (!parser.strict) parser.tagName = parser.tagName[parser.tagCase]()
-  var parent = parser.tags[parser.tags.length - 1] || parser
-    , tag = parser.tag = { name : parser.tagName, attributes : {} }
-
-  // will be overridden if tag contails an xmlns="foo" or xmlns:foo="bar"
-  if (parser.opt.xmlns) tag.ns = parent.ns
-  parser.attribList.length = 0
-}
-
-function qname (name) {
-  var i = name.indexOf(":")
-    , qualName = i < 0 ? [ "", name ] : name.split(":")
-    , prefix = qualName[0]
-    , local = qualName[1]
-
-  // <x "xmlns"="http://foo">
-  if (name === "xmlns") {
-    prefix = "xmlns"
-    local = ""
-  }
-
-  return { prefix: prefix, local: local }
-}
-
-function attrib (parser) {
-  if (parser.opt.xmlns) {
-    var qn = qname(parser.attribName)
-      , prefix = qn.prefix
-      , local = qn.local
-
-    if (prefix === "xmlns") {
-      // namespace binding attribute; push the binding into scope
-      if (local === "xml" && parser.attribValue !== XML_NAMESPACE) {
-        strictFail( parser
-                  , "xml: prefix must be bound to " + XML_NAMESPACE + "\n"
-                  + "Actual: " + parser.attribValue )
-      } else if (local === "xmlns" && parser.attribValue !== XMLNS_NAMESPACE) {
-        strictFail( parser
-                  , "xmlns: prefix must be bound to " + XMLNS_NAMESPACE + "\n"
-                  + "Actual: " + parser.attribValue )
-      } else {
-        var tag = parser.tag
-          , parent = parser.tags[parser.tags.length - 1] || parser
-        if (tag.ns === parent.ns) {
-          tag.ns = Object.create(parent.ns)
-        }
-        tag.ns[local] = parser.attribValue
-      }
-    }
-
-    // defer onattribute events until all attributes have been seen
-    // so any new bindings can take effect; preserve attribute order
-    // so deferred events can be emitted in document order
-    parser.attribList.push([parser.attribName, parser.attribValue])
-  } else {
-    // in non-xmlns mode, we can emit the event right away
-    parser.tag.attributes[parser.attribName] = parser.attribValue
-    emitNode( parser
-            , "onattribute"
-            , { name: parser.attribName
-              , value: parser.attribValue } )
-  }
-
-  parser.attribName = parser.attribValue = ""
-}
-
-function openTag (parser, selfClosing) {
-  if (parser.opt.xmlns) {
-    // emit namespace binding events
-    var tag = parser.tag
-
-    // add namespace info to tag
-    var qn = qname(parser.tagName)
-    tag.prefix = qn.prefix
-    tag.local = qn.local
-    tag.uri = tag.ns[qn.prefix] || qn.prefix
-
-    if (tag.prefix && !tag.uri) {
-      strictFail(parser, "Unbound namespace prefix: "
-                       + JSON.stringify(parser.tagName))
-    }
-
-    var parent = parser.tags[parser.tags.length - 1] || parser
-    if (tag.ns && parent.ns !== tag.ns) {
-      Object.keys(tag.ns).forEach(function (p) {
-        emitNode( parser
-                , "onopennamespace"
-                , { prefix: p , uri: tag.ns[p] } )
-      })
-    }
-
-    // handle deferred onattribute events
-    for (var i = 0, l = parser.attribList.length; i < l; i ++) {
-      var nv = parser.attribList[i]
-      var name = nv[0]
-        , value = nv[1]
-        , qualName = qname(name)
-        , prefix = qualName.prefix
-        , local = qualName.local
-        , uri = tag.ns[prefix] || ""
-        , a = { name: name
-              , value: value
-              , prefix: prefix
-              , local: local
-              , uri: uri
-              }
-
-      // if there's any attributes with an undefined namespace,
-      // then fail on them now.
-      if (prefix && prefix != "xmlns" && !uri) {
-        strictFail(parser, "Unbound namespace prefix: "
-                         + JSON.stringify(prefix))
-        a.uri = prefix
-      }
-      parser.tag.attributes[name] = a
-      emitNode(parser, "onattribute", a)
-    }
-    parser.attribList.length = 0
-  }
-
-  // process the tag
-  parser.sawRoot = true
-  parser.tags.push(parser.tag)
-  emitNode(parser, "onopentag", parser.tag)
-  if (!selfClosing) {
-    // special case for <script> in non-strict mode.
-    if (!parser.noscript && parser.tagName.toLowerCase() === "script") {
-      parser.state = S.SCRIPT
-    } else {
-      parser.state = S.TEXT
-    }
-    parser.tag = null
-    parser.tagName = ""
-  }
-  parser.attribName = parser.attribValue = ""
-  parser.attribList.length = 0
-}
-
-function closeTag (parser) {
-  if (!parser.tagName) {
-    strictFail(parser, "Weird empty close tag.")
-    parser.textNode += "</>"
-    parser.state = S.TEXT
-    return
-  }
-  // first make sure that the closing tag actually exists.
-  // <a><b></c></b></a> will close everything, otherwise.
-  var t = parser.tags.length
-  var tagName = parser.tagName
-  if (!parser.strict) tagName = tagName[parser.tagCase]()
-  var closeTo = tagName
-  while (t --) {
-    var close = parser.tags[t]
-    if (close.name !== closeTo) {
-      // fail the first time in strict mode
-      strictFail(parser, "Unexpected close tag")
-    } else break
-  }
-
-  // didn't find it.  we already failed for strict, so just abort.
-  if (t < 0) {
-    strictFail(parser, "Unmatched closing tag: "+parser.tagName)
-    parser.textNode += "</" + parser.tagName + ">"
-    parser.state = S.TEXT
-    return
-  }
-  parser.tagName = tagName
-  var s = parser.tags.length
-  while (s --> t) {
-    var tag = parser.tag = parser.tags.pop()
-    parser.tagName = parser.tag.name
-    emitNode(parser, "onclosetag", parser.tagName)
-
-    var x = {}
-    for (var i in tag.ns) x[i] = tag.ns[i]
-
-    var parent = parser.tags[parser.tags.length - 1] || parser
-    if (parser.opt.xmlns && tag.ns !== parent.ns) {
-      // remove namespace bindings introduced by tag
-      Object.keys(tag.ns).forEach(function (p) {
-        var n = tag.ns[p]
-        emitNode(parser, "onclosenamespace", { prefix: p, uri: n })
-      })
-    }
-  }
-  if (t === 0) parser.closedRoot = true
-  parser.tagName = parser.attribValue = parser.attribName = ""
-  parser.attribList.length = 0
-  parser.state = S.TEXT
-}
-
-function parseEntity (parser) {
-  var entity = parser.entity.toLowerCase()
-    , num
-    , numStr = ""
-  if (parser.ENTITIES[entity]) return parser.ENTITIES[entity]
-  if (entity.charAt(0) === "#") {
-    if (entity.charAt(1) === "x") {
-      entity = entity.slice(2)
-      num = parseInt(entity, 16)
-      numStr = num.toString(16)
-    } else {
-      entity = entity.slice(1)
-      num = parseInt(entity, 10)
-      numStr = num.toString(10)
-    }
-  }
-  entity = entity.replace(/^0+/, "")
-  if (numStr.toLowerCase() !== entity) {
-    strictFail(parser, "Invalid character entity")
-    return "&"+parser.entity + ";"
-  }
-  return String.fromCharCode(num)
-}
-
-function write (chunk) {
-  var parser = this
-  if (this.error) throw this.error
-  if (parser.closed) return error(parser,
-    "Cannot write after close. Assign an onready handler.")
-  if (chunk === null) return end(parser)
-  var i = 0, c = ""
-  while (parser.c = c = chunk.charAt(i++)) {
-    parser.position ++
-    if (c === "\n") {
-      parser.line ++
-      parser.column = 0
-    } else parser.column ++
-    switch (parser.state) {
-
-      case S.BEGIN:
-        if (c === "<") parser.state = S.OPEN_WAKA
-        else if (not(whitespace,c)) {
-          // have to process this as a text node.
-          // weird, but happens.
-          strictFail(parser, "Non-whitespace before first tag.")
-          parser.textNode = c
-          parser.state = S.TEXT
-        }
-      continue
-
-      case S.TEXT:
-        if (parser.sawRoot && !parser.closedRoot) {
-          var starti = i-1
-          while (c && c!=="<" && c!=="&") {
-            c = chunk.charAt(i++)
-            if (c) {
-              parser.position ++
-              if (c === "\n") {
-                parser.line ++
-                parser.column = 0
-              } else parser.column ++
-            }
-          }
-          parser.textNode += chunk.substring(starti, i-1)
-        }
-        if (c === "<") parser.state = S.OPEN_WAKA
-        else {
-          if (not(whitespace, c) && (!parser.sawRoot || parser.closedRoot))
-            strictFail("Text data outside of root node.")
-          if (c === "&") parser.state = S.TEXT_ENTITY
-          else parser.textNode += c
-        }
-      continue
-
-      case S.SCRIPT:
-        // only non-strict
-        if (c === "<") {
-          parser.state = S.SCRIPT_ENDING
-        } else parser.script += c
-      continue
-
-      case S.SCRIPT_ENDING:
-        if (c === "/") {
-          emitNode(parser, "onscript", parser.script)
-          parser.state = S.CLOSE_TAG
-          parser.script = ""
-          parser.tagName = ""
-        } else {
-          parser.script += "<" + c
-          parser.state = S.SCRIPT
-        }
-      continue
-
-      case S.OPEN_WAKA:
-        // either a /, ?, !, or text is coming next.
-        if (c === "!") {
-          parser.state = S.SGML_DECL
-          parser.sgmlDecl = ""
-        } else if (is(whitespace, c)) {
-          // wait for it...
-        } else if (is(nameStart,c)) {
-          parser.startTagPosition = parser.position - 1
-          parser.state = S.OPEN_TAG
-          parser.tagName = c
-        } else if (c === "/") {
-          parser.startTagPosition = parser.position - 1
-          parser.state = S.CLOSE_TAG
-          parser.tagName = ""
-        } else if (c === "?") {
-          parser.state = S.PROC_INST
-          parser.procInstName = parser.procInstBody = ""
-        } else {
-          strictFail(parser, "Unencoded <")
-          parser.textNode += "<" + c
-          parser.state = S.TEXT
-        }
-      continue
-
-      case S.SGML_DECL:
-        if ((parser.sgmlDecl+c).toUpperCase() === CDATA) {
-          emitNode(parser, "onopencdata")
-          parser.state = S.CDATA
-          parser.sgmlDecl = ""
-          parser.cdata = ""
-        } else if (parser.sgmlDecl+c === "--") {
-          parser.state = S.COMMENT
-          parser.comment = ""
-          parser.sgmlDecl = ""
-        } else if ((parser.sgmlDecl+c).toUpperCase() === DOCTYPE) {
-          parser.state = S.DOCTYPE
-          if (parser.doctype || parser.sawRoot) strictFail(parser,
-            "Inappropriately located doctype declaration")
-          parser.doctype = ""
-          parser.sgmlDecl = ""
-        } else if (c === ">") {
-          emitNode(parser, "onsgmldeclaration", parser.sgmlDecl)
-          parser.sgmlDecl = ""
-          parser.state = S.TEXT
-        } else if (is(quote, c)) {
-          parser.state = S.SGML_DECL_QUOTED
-          parser.sgmlDecl += c
-        } else parser.sgmlDecl += c
-      continue
-
-      case S.SGML_DECL_QUOTED:
-        if (c === parser.q) {
-          parser.state = S.SGML_DECL
-          parser.q = ""
-        }
-        parser.sgmlDecl += c
-      continue
-
-      case S.DOCTYPE:
-        if (c === ">") {
-          parser.state = S.TEXT
-          emitNode(parser, "ondoctype", parser.doctype)
-          parser.doctype = true // just remember that we saw it.
-        } else {
-          parser.doctype += c
-          if (c === "[") parser.state = S.DOCTYPE_DTD
-          else if (is(quote, c)) {
-            parser.state = S.DOCTYPE_QUOTED
-            parser.q = c
-          }
-        }
-      continue
-
-      case S.DOCTYPE_QUOTED:
-        parser.doctype += c
-        if (c === parser.q) {
-          parser.q = ""
-          parser.state = S.DOCTYPE
-        }
-      continue
-
-      case S.DOCTYPE_DTD:
-        parser.doctype += c
-        if (c === "]") parser.state = S.DOCTYPE
-        else if (is(quote,c)) {
-          parser.state = S.DOCTYPE_DTD_QUOTED
-          parser.q = c
-        }
-      continue
-
-      case S.DOCTYPE_DTD_QUOTED:
-        parser.doctype += c
-        if (c === parser.q) {
-          parser.state = S.DOCTYPE_DTD
-          parser.q = ""
-        }
-      continue
-
-      case S.COMMENT:
-        if (c === "-") parser.state = S.COMMENT_ENDING
-        else parser.comment += c
-      continue
-
-      case S.COMMENT_ENDING:
-        if (c === "-") {
-          parser.state = S.COMMENT_ENDED
-          parser.comment = textopts(parser.opt, parser.comment)
-          if (parser.comment) emitNode(parser, "oncomment", parser.comment)
-          parser.comment = ""
-        } else {
-          parser.comment += "-" + c
-          parser.state = S.COMMENT
-        }
-      continue
-
-      case S.COMMENT_ENDED:
-        if (c !== ">") {
-          strictFail(parser, "Malformed comment")
-          // allow <!-- blah -- bloo --> in non-strict mode,
-          // which is a comment of " blah -- bloo "
-          parser.comment += "--" + c
-          parser.state = S.COMMENT
-        } else parser.state = S.TEXT
-      continue
-
-      case S.CDATA:
-        if (c === "]") parser.state = S.CDATA_ENDING
-        else parser.cdata += c
-      continue
-
-      case S.CDATA_ENDING:
-        if (c === "]") parser.state = S.CDATA_ENDING_2
-        else {
-          parser.cdata += "]" + c
-          parser.state = S.CDATA
-        }
-      continue
-
-      case S.CDATA_ENDING_2:
-        if (c === ">") {
-          if (parser.cdata) emitNode(parser, "oncdata", parser.cdata)
-          emitNode(parser, "onclosecdata")
-          parser.cdata = ""
-          parser.state = S.TEXT
-        } else if (c === "]") {
-          parser.cdata += "]"
-        } else {
-          parser.cdata += "]]" + c
-          parser.state = S.CDATA
-        }
-      continue
-
-      case S.PROC_INST:
-        if (c === "?") parser.state = S.PROC_INST_ENDING
-        else if (is(whitespace, c)) parser.state = S.PROC_INST_BODY
-        else parser.procInstName += c
-      continue
-
-      case S.PROC_INST_BODY:
-        if (!parser.procInstBody && is(whitespace, c)) continue
-        else if (c === "?") parser.state = S.PROC_INST_ENDING
-        else if (is(quote, c)) {
-          parser.state = S.PROC_INST_QUOTED
-          parser.q = c
-          parser.procInstBody += c
-        } else parser.procInstBody += c
-      continue
-
-      case S.PROC_INST_ENDING:
-        if (c === ">") {
-          emitNode(parser, "onprocessinginstruction", {
-            name : parser.procInstName,
-            body : parser.procInstBody
-          })
-          parser.procInstName = parser.procInstBody = ""
-          parser.state = S.TEXT
-        } else {
-          parser.procInstBody += "?" + c
-          parser.state = S.PROC_INST_BODY
-        }
-      continue
-
-      case S.PROC_INST_QUOTED:
-        parser.procInstBody += c
-        if (c === parser.q) {
-          parser.state = S.PROC_INST_BODY
-          parser.q = ""
-        }
-      continue
-
-      case S.OPEN_TAG:
-        if (is(nameBody, c)) parser.tagName += c
-        else {
-          newTag(parser)
-          if (c === ">") openTag(parser)
-          else if (c === "/") parser.state = S.OPEN_TAG_SLASH
-          else {
-            if (not(whitespace, c)) strictFail(
-              parser, "Invalid character in tag name")
-            parser.state = S.ATTRIB
-          }
-        }
-      continue
-
-      case S.OPEN_TAG_SLASH:
-        if (c === ">") {
-          openTag(parser, true)
-          closeTag(parser)
-        } else {
-          strictFail(parser, "Forward-slash in opening tag not followed by >")
-          parser.state = S.ATTRIB
-        }
-      continue
-
-      case S.ATTRIB:
-        // haven't read the attribute name yet.
-        if (is(whitespace, c)) continue
-        else if (c === ">") openTag(parser)
-        else if (c === "/") parser.state = S.OPEN_TAG_SLASH
-        else if (is(nameStart, c)) {
-          parser.attribName = c
-          parser.attribValue = ""
-          parser.state = S.ATTRIB_NAME
-        } else strictFail(parser, "Invalid attribute name")
-      continue
-
-      case S.ATTRIB_NAME:
-        if (c === "=") parser.state = S.ATTRIB_VALUE
-        else if (is(whitespace, c)) parser.state = S.ATTRIB_NAME_SAW_WHITE
-        else if (is(nameBody, c)) parser.attribName += c
-        else strictFail(parser, "Invalid attribute name")
-      continue
-
-      case S.ATTRIB_NAME_SAW_WHITE:
-        if (c === "=") parser.state = S.ATTRIB_VALUE
-        else if (is(whitespace, c)) continue
-        else {
-          strictFail(parser, "Attribute without value")
-          parser.tag.attributes[parser.attribName] = ""
-          parser.attribValue = ""
-          emitNode(parser, "onattribute",
-                   { name : parser.attribName, value : "" })
-          parser.attribName = ""
-          if (c === ">") openTag(parser)
-          else if (is(nameStart, c)) {
-            parser.attribName = c
-            parser.state = S.ATTRIB_NAME
-          } else {
-            strictFail(parser, "Invalid attribute name")
-            parser.state = S.ATTRIB
-          }
-        }
-      continue
-
-      case S.ATTRIB_VALUE:
-        if (is(whitespace, c)) continue
-        else if (is(quote, c)) {
-          parser.q = c
-          parser.state = S.ATTRIB_VALUE_QUOTED
-        } else {
-          strictFail(parser, "Unquoted attribute value")
-          parser.state = S.ATTRIB_VALUE_UNQUOTED
-          parser.attribValue = c
-        }
-      continue
-
-      case S.ATTRIB_VALUE_QUOTED:
-        if (c !== parser.q) {
-          if (c === "&") parser.state = S.ATTRIB_VALUE_ENTITY_Q
-          else parser.attribValue += c
-          continue
-        }
-        attrib(parser)
-        parser.q = ""
-        parser.state = S.ATTRIB
-      continue
-
-      case S.ATTRIB_VALUE_UNQUOTED:
-        if (not(attribEnd,c)) {
-          if (c === "&") parser.state = S.ATTRIB_VALUE_ENTITY_U
-          else parser.attribValue += c
-          continue
-        }
-        attrib(parser)
-        if (c === ">") openTag(parser)
-        else parser.state = S.ATTRIB
-      continue
-
-      case S.CLOSE_TAG:
-        if (!parser.tagName) {
-          if (is(whitespace, c)) continue
-          else if (not(nameStart, c)) strictFail(parser,
-            "Invalid tagname in closing tag.")
-          else parser.tagName = c
-        }
-        else if (c === ">") closeTag(parser)
-        else if (is(nameBody, c)) parser.tagName += c
-        else {
-          if (not(whitespace, c)) strictFail(parser,
-            "Invalid tagname in closing tag")
-          parser.state = S.CLOSE_TAG_SAW_WHITE
-        }
-      continue
-
-      case S.CLOSE_TAG_SAW_WHITE:
-        if (is(whitespace, c)) continue
-        if (c === ">") closeTag(parser)
-        else strictFail("Invalid characters in closing tag")
-      continue
-
-      case S.TEXT_ENTITY:
-      case S.ATTRIB_VALUE_ENTITY_Q:
-      case S.ATTRIB_VALUE_ENTITY_U:
-        switch(parser.state) {
-          case S.TEXT_ENTITY:
-            var returnState = S.TEXT, buffer = "textNode"
-          break
-
-          case S.ATTRIB_VALUE_ENTITY_Q:
-            var returnState = S.ATTRIB_VALUE_QUOTED, buffer = "attribValue"
-          break
-
-          case S.ATTRIB_VALUE_ENTITY_U:
-            var returnState = S.ATTRIB_VALUE_UNQUOTED, buffer = "attribValue"
-          break
-        }
-        if (c === ";") {
-          parser[buffer] += parseEntity(parser)
-          parser.entity = ""
-          parser.state = returnState
-        }
-        else if (is(entity, c)) parser.entity += c
-        else {
-          strictFail("Invalid character entity")
-          parser[buffer] += "&" + parser.entity + c
-          parser.entity = ""
-          parser.state = returnState
-        }
-      continue
-
-      default:
-        throw new Error(parser, "Unknown state: " + parser.state)
-    }
-  } // while
-  // cdata blocks can get very big under normal conditions. emit and move on.
-  // if (parser.state === S.CDATA && parser.cdata) {
-  //   emitNode(parser, "oncdata", parser.cdata)
-  //   parser.cdata = ""
-  // }
-  if (parser.position >= parser.bufferCheckPosition) checkBufferLength(parser)
-  return parser
-}
-
-})(typeof exports === "undefined" ? sax = {} : exports)

http://git-wip-us.apache.org/repos/asf/cordova-windows/blob/5a6cb728/node_modules/elementtree/node_modules/sax/package.json
----------------------------------------------------------------------
diff --git a/node_modules/elementtree/node_modules/sax/package.json b/node_modules/elementtree/node_modules/sax/package.json
deleted file mode 100644
index 52c3b23..0000000
--- a/node_modules/elementtree/node_modules/sax/package.json
+++ /dev/null
@@ -1,89 +0,0 @@
-{
-  "name": "sax",
-  "description": "An evented streaming XML parser in JavaScript",
-  "author": {
-    "name": "Isaac Z. Schlueter",
-    "email": "i@izs.me",
-    "url": "http://blog.izs.me/"
-  },
-  "version": "0.3.5",
-  "main": "lib/sax.js",
-  "license": {
-    "type": "MIT",
-    "url": "https://raw.github.com/isaacs/sax-js/master/LICENSE"
-  },
-  "scripts": {
-    "test": "node test/index.js"
-  },
-  "repository": {
-    "type": "git",
-    "url": "git://github.com/isaacs/sax-js.git"
-  },
-  "_npmUser": {
-    "name": "isaacs",
-    "email": "i@izs.me"
-  },
-  "_id": "sax@0.3.5",
-  "contributors": [
-    {
-      "name": "Isaac Z. Schlueter",
-      "email": "i@izs.me"
-    },
-    {
-      "name": "Stein Martin Hustad",
-      "email": "stein@hustad.com"
-    },
-    {
-      "name": "Mikeal Rogers",
-      "email": "mikeal.rogers@gmail.com"
-    },
-    {
-      "name": "Laurie Harper",
-      "email": "laurie@holoweb.net"
-    },
-    {
-      "name": "Jann Horn",
-      "email": "jann@Jann-PC.fritz.box"
-    },
-    {
-      "name": "Elijah Insua",
-      "email": "tmpvar@gmail.com"
-    },
-    {
-      "name": "Henry Rawas",
-      "email": "henryr@schakra.com"
-    },
-    {
-      "name": "Justin Makeig",
-      "email": "jmpublic@makeig.com"
-    }
-  ],
-  "dependencies": {},
-  "devDependencies": {},
-  "engines": {
-    "node": "*"
-  },
-  "_engineSupported": true,
-  "_npmVersion": "1.1.0-beta-7",
-  "_nodeVersion": "v0.6.7-pre",
-  "_defaultsLoaded": true,
-  "dist": {
-    "shasum": "88fcfc1f73c0c8bbd5b7c776b6d3f3501eed073d",
-    "tarball": "http://registry.npmjs.org/sax/-/sax-0.3.5.tgz"
-  },
-  "maintainers": [
-    {
-      "name": "isaacs",
-      "email": "i@izs.me"
-    }
-  ],
-  "directories": {},
-  "_shasum": "88fcfc1f73c0c8bbd5b7c776b6d3f3501eed073d",
-  "_resolved": "https://registry.npmjs.org/sax/-/sax-0.3.5.tgz",
-  "_from": "sax@0.3.5",
-  "bugs": {
-    "url": "https://github.com/isaacs/sax-js/issues"
-  },
-  "readme": "ERROR: No README data found!",
-  "homepage": "https://github.com/isaacs/sax-js#readme"
-}

http://git-wip-us.apache.org/repos/asf/cordova-windows/blob/5a6cb728/node_modules/elementtree/package.json
----------------------------------------------------------------------
diff --git a/node_modules/elementtree/package.json b/node_modules/elementtree/package.json
index d712d2e..2be3d5d 100644
--- a/node_modules/elementtree/package.json
+++ b/node_modules/elementtree/package.json
@@ -1,75 +1,101 @@
 {
+  "_args": [
+    [
+      "elementtree@^0.1.6",
+      "D:\\Cordova\\cordova-windows"
+    ]
+  ],
+  "_from": "elementtree@>=0.1.6 <0.2.0",
+  "_id": "elementtree@0.1.6",
+  "_inCache": true,
+  "_installable": true,
+  "_location": "/elementtree",
+  "_npmUser": {
+    "email": "ryan@trolocsis.com",
+    "name": "rphillips"
+  },
+  "_npmVersion": "1.3.24",
+  "_phantomChildren": {},
+  "_requested": {
+    "name": "elementtree",
+    "raw": "elementtree@^0.1.6",
+    "rawSpec": "^0.1.6",
+    "scope": null,
+    "spec": ">=0.1.6 <0.2.0",
+    "type": "range"
+  },
+  "_requiredBy": [
+    "/",
+    "/cordova-common"
+  ],
+  "_resolved": "https://registry.npmjs.org/elementtree/-/elementtree-0.1.6.tgz",
+  "_shasum": "2ac4c46ea30516c8c4cbdb5e3ac7418e592de20c",
+  "_shrinkwrap": null,
+  "_spec": "elementtree@^0.1.6",
+  "_where": "D:\\Cordova\\cordova-windows",
   "author": {
     "name": "Rackspace US, Inc."
   },
+  "bugs": {
+    "url": "https://github.com/racker/node-elementtree/issues"
+  },
   "contributors": [
     {
-      "name": "Paul Querna",
-      "email": "paul.querna@rackspace.com"
+      "email": "paul.querna@rackspace.com",
+      "name": "Paul Querna"
     },
     {
-      "name": "Tomaz Muraus",
-      "email": "tomaz.muraus@rackspace.com"
+      "email": "tomaz.muraus@rackspace.com",
+      "name": "Tomaz Muraus"
     }
   ],
-  "name": "elementtree",
+  "dependencies": {
+    "sax": "0.3.5"
+  },
   "description": "XML Serialization and Parsing module based on Python's ElementTree.",
-  "version": "0.1.6",
-  "keywords": [
-    "xml",
-    "sax",
-    "parser",
-    "seralization",
-    "elementtree"
-  ],
-  "homepage": "https://github.com/racker/node-elementtree",
-  "repository": {
-    "type": "git",
-    "url": "git://github.com/racker/node-elementtree.git"
+  "devDependencies": {
+    "whiskey": "0.8.x"
   },
-  "main": "lib/elementtree.js",
   "directories": {
     "lib": "lib"
   },
-  "scripts": {
-    "test": "make test"
+  "dist": {
+    "shasum": "2ac4c46ea30516c8c4cbdb5e3ac7418e592de20c",
+    "tarball": "https://registry.npmjs.org/elementtree/-/elementtree-0.1.6.tgz"
   },
   "engines": {
     "node": ">= 0.4.0"
   },
-  "dependencies": {
-    "sax": "0.3.5"
-  },
-  "devDependencies": {
-    "whiskey": "0.8.x"
-  },
+  "homepage": "https://github.com/racker/node-elementtree",
+  "keywords": [
+    "xml",
+    "sax",
+    "parser",
+    "seralization",
+    "elementtree"
+  ],
   "licenses": [
     {
       "type": "Apache",
       "url": "http://www.apache.org/licenses/LICENSE-2.0.html"
     }
   ],
-  "bugs": {
-    "url": "https://github.com/racker/node-elementtree/issues"
-  },
-  "_id": "elementtree@0.1.6",
-  "dist": {
-    "shasum": "2ac4c46ea30516c8c4cbdb5e3ac7418e592de20c",
-    "tarball": "http://registry.npmjs.org/elementtree/-/elementtree-0.1.6.tgz"
-  },
-  "_from": "elementtree@>=0.1.6 <0.2.0",
-  "_npmVersion": "1.3.24",
-  "_npmUser": {
-    "name": "rphillips",
-    "email": "ryan@trolocsis.com"
-  },
+  "main": "lib/elementtree.js",
   "maintainers": [
     {
-      "name": "rphillips",
-      "email": "ryan@trolocsis.com"
+      "email": "ryan@trolocsis.com",
+      "name": "rphillips"
     }
   ],
-  "_shasum": "2ac4c46ea30516c8c4cbdb5e3ac7418e592de20c",
-  "_resolved": "https://registry.npmjs.org/elementtree/-/elementtree-0.1.6.tgz",
-  "readme": "ERROR: No README data found!"
+  "name": "elementtree",
+  "optionalDependencies": {},
+  "readme": "ERROR: No README data found!",
+  "repository": {
+    "type": "git",
+    "url": "git://github.com/racker/node-elementtree.git"
+  },
+  "scripts": {
+    "test": "make test"
+  },
+  "version": "0.1.6"
 }

http://git-wip-us.apache.org/repos/asf/cordova-windows/blob/5a6cb728/node_modules/elementtree/tests/data/xml1.xml
----------------------------------------------------------------------
diff --git a/node_modules/elementtree/tests/data/xml1.xml b/node_modules/elementtree/tests/data/xml1.xml
new file mode 100644
index 0000000..72c33ae
--- /dev/null
+++ b/node_modules/elementtree/tests/data/xml1.xml
@@ -0,0 +1,17 @@
+<?xml version="1.0"?>
+<container name="test_container_1" xmlns:android="http://schemas.android.com/apk/res/android">
+  <object>dd
+    <name>test_object_1</name>
+    <hash>4281c348eaf83e70ddce0e07221c3d28</hash>
+    <bytes android:type="cool">14</bytes>
+    <content_type>application/octetstream</content_type>
+    <last_modified>2009-02-03T05:26:32.612278</last_modified>
+  </object>
+  <object>
+    <name>test_object_2</name>
+    <hash>b039efe731ad111bc1b0ef221c3849d0</hash>
+    <bytes android:type="lame">64</bytes>
+    <content_type>application/octetstream</content_type>
+    <last_modified>2009-02-03T05:26:32.612278</last_modified>
+  </object>
+</container>

http://git-wip-us.apache.org/repos/asf/cordova-windows/blob/5a6cb728/node_modules/elementtree/tests/data/xml2.xml
----------------------------------------------------------------------
diff --git a/node_modules/elementtree/tests/data/xml2.xml b/node_modules/elementtree/tests/data/xml2.xml
new file mode 100644
index 0000000..5f94bbd
--- /dev/null
+++ b/node_modules/elementtree/tests/data/xml2.xml
@@ -0,0 +1,14 @@
+<?xml version="1.0"?>
+<object>
+    <title>
+        Hello World
+    </title>
+    <children>
+        <object id="obj1" />
+        <object id="obj2" />
+        <object id="obj3" />
+    </children>
+    <text><![CDATA[
+        Test & Test & Test
+    ]]></text>
+</object>

http://git-wip-us.apache.org/repos/asf/cordova-windows/blob/5a6cb728/node_modules/elementtree/tests/test-simple.js
----------------------------------------------------------------------
diff --git a/node_modules/elementtree/tests/test-simple.js b/node_modules/elementtree/tests/test-simple.js
new file mode 100644
index 0000000..1fc04b8
--- /dev/null
+++ b/node_modules/elementtree/tests/test-simple.js
@@ -0,0 +1,339 @@
+/**
+ *  Copyright 2011 Rackspace
+ *
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ *
+ */
+
+var fs = require('fs');
+var path = require('path');
+
+var sprintf = require('./../lib/sprintf').sprintf;
+var et = require('elementtree');
+var XML = et.XML;
+var ElementTree = et.ElementTree;
+var Element = et.Element;
+var SubElement = et.SubElement;
+var SyntaxError = require('./../lib/errors').SyntaxError;
+
+function readFile(name) {
+  return fs.readFileSync(path.join(__dirname, '/data/', name), 'utf8');
+}
+
+exports['test_simplest'] = function(test, assert) {
+  /* Ported from <https://github.com/lxml/lxml/blob/master/src/lxml/tests/test_elementtree.py> */
+  var Element = et.Element;
+  var root = Element('root');
+  root.append(Element('one'));
+  root.append(Element('two'));
+  root.append(Element('three'));
+  assert.equal(3, root.len());
+  assert.equal('one', root.getItem(0).tag);
+  assert.equal('two', root.getItem(1).tag);
+  assert.equal('three', root.getItem(2).tag);
+  test.finish();
+};
+
+
+exports['test_attribute_values'] = function(test, assert) {
+  var XML = et.XML;
+  var root = XML('<doc alpha="Alpha" beta="Beta" gamma="Gamma"/>');
+  assert.equal('Alpha', root.attrib['alpha']);
+  assert.equal('Beta', root.attrib['beta']);
+  assert.equal('Gamma', root.attrib['gamma']);
+  test.finish();
+};
+
+
+exports['test_findall'] = function(test, assert) {
+  var XML = et.XML;
+  var root = XML('<a><b><c/></b><b/><c><b/></c></a>');
+
+  assert.equal(root.findall("c").length, 1);
+  assert.equal(root.findall(".//c").length, 2);
+  assert.equal(root.findall(".//b").length, 3);
+  assert.equal(root.findall(".//b")[0]._children.length, 1);
+  assert.equal(root.findall(".//b")[1]._children.length, 0);
+  assert.equal(root.findall(".//b")[2]._children.length, 0);
+  assert.deepEqual(root.findall('.//b')[0], root.getchildren()[0]);
+
+  test.finish();
+};
+
+exports['test_find'] = function(test, assert) {
+  var a = Element('a');
+  var b = SubElement(a, 'b');
+  var c = SubElement(a, 'c');
+
+  assert.deepEqual(a.find('./b/..'), a);
+  test.finish();
+};
+
+exports['test_elementtree_find_qname'] = function(test, assert) {
+  var tree = new et.ElementTree(XML('<a><b><c/></b><b/><c><b/></c></a>'));
+  assert.deepEqual(tree.find(new et.QName('c')), tree.getroot()._children[2]);
+  test.finish();
+};
+
+exports['test_attrib_ns_clear'] = function(test, assert) {
+  var attribNS = '{http://foo/bar}x';
+
+  var par = Element('par');
+  par.set(attribNS, 'a');
+  var child = SubElement(par, 'child');
+  child.set(attribNS, 'b');
+
+  assert.equal('a', par.get(attribNS));
+  assert.equal('b', child.get(attribNS));
+
+  par.clear();
+  assert.equal(null, par.get(attribNS));
+  assert.equal('b', child.get(attribNS));
+  test.finish();
+};
+
+exports['test_create_tree_and_parse_simple'] = function(test, assert) {
+  var i = 0;
+  var e = new Element('bar', {});
+  var expected = "<?xml version='1.0' encoding='utf-8'?>\n" +
+    '<bar><blah a="11" /><blah a="12" /><gag a="13" b="abc">ponies</gag></bar>';
+
+  SubElement(e, "blah", {a: 11});
+  SubElement(e, "blah", {a: 12});
+  var se = et.SubElement(e, "gag", {a: '13', b: 'abc'});
+  se.text = 'ponies';
+
+  se.itertext(function(text) {
+    assert.equal(text, 'ponies');
+    i++;
+  });
+
+  assert.equal(i, 1);
+  var etree = new ElementTree(e);
+  var xml = etree.write();
+  assert.equal(xml, expected);
+  test.finish();
+};
+
+exports['test_write_with_options'] = function(test, assert) {
+  var i = 0;
+  var e = new Element('bar', {});
+  var expected1 = "<?xml version='1.0' encoding='utf-8'?>\n" +
+    '<bar>\n' +
+    '    <blah a="11">\n' +
+    '        <baz d="11">test</baz>\n' +
+    '    </blah>\n' +
+    '    <blah a="12" />\n' +
+    '    <gag a="13" b="abc">ponies</gag>\n' +
+    '</bar>\n';
+    var expected2 = "<?xml version='1.0' encoding='utf-8'?>\n" +
+    '<bar>\n' +
+    '  <blah a="11">\n' +
+    '    <baz d="11">test</baz>\n' +
+    '  </blah>\n' +
+    '  <blah a="12" />\n' +
+    '  <gag a="13" b="abc">ponies</gag>\n' +
+    '</bar>\n';
+
+    var expected3 = "<?xml version='1.0' encoding='utf-8'?>\n" +
+    '<object>\n' +
+    '    <title>\n' +
+    '        Hello World\n' +
+    '    </title>\n' +
+    '    <children>\n' +
+    '        <object id="obj1" />\n' +
+    '        <object id="obj2" />\n' +
+    '        <object id="obj3" />\n' +
+    '    </children>\n' +
+    '    <text>\n' +
+    '        Test &amp; Test &amp; Test\n' +
+    '    </text>\n' +
+    '</object>\n';
+
+  var se1 = SubElement(e, "blah", {a: 11});
+  var se2 = SubElement(se1, "baz", {d: 11});
+  se2.text = 'test';
+  SubElement(e, "blah", {a: 12});
+  var se = et.SubElement(e, "gag", {a: '13', b: 'abc'});
+  se.text = 'ponies';
+
+  se.itertext(function(text) {
+    assert.equal(text, 'ponies');
+    i++;
+  });
+
+  assert.equal(i, 1);
+  var etree = new ElementTree(e);
+  var xml1 = etree.write({'indent': 4});
+  var xml2 = etree.write({'indent': 2});
+  assert.equal(xml1, expected1);
+  assert.equal(xml2, expected2);
+
+  var file = readFile('xml2.xml');
+  var etree2 = et.parse(file);
+  var xml3 = etree2.write({'indent': 4});
+  assert.equal(xml3, expected3);
+  test.finish();
+};
+
+exports['test_parse_and_find_2'] = function(test, assert) {
+  var data = readFile('xml1.xml');
+  var etree = et.parse(data);
+
+  assert.equal(etree.findall('./object').length, 2);
+  assert.equal(etree.findall('[@name]').length, 1);
+  assert.equal(etree.findall('[@name="test_container_1"]').length, 1);
+  assert.equal(etree.findall('[@name=\'test_container_1\']').length, 1);
+  assert.equal(etree.findall('./object')[0].findtext('name'), 'test_object_1');
+  assert.equal(etree.findtext('./object/name'), 'test_object_1');
+  assert.equal(etree.findall('.//bytes').length, 2);
+  assert.equal(etree.findall('*/bytes').length, 2);
+  assert.equal(etree.findall('*/foobar').length, 0);
+
+  test.finish();
+};
+
+exports['test_namespaced_attribute'] = function(test, assert) {
+  var data = readFile('xml1.xml');
+  var etree = et.parse(data);
+
+  assert.equal(etree.findall('*/bytes[@android:type="cool"]').length, 1);
+
+  test.finish();
+}
+
+exports['test_syntax_errors'] = function(test, assert) {
+  var expressions = [ './/@bar', '[@bar', '[@foo=bar]', '[@', '/bar' ];
+  var errCount = 0;
+  var data = readFile('xml1.xml');
+  var etree = et.parse(data);
+
+  expressions.forEach(function(expression) {
+    try {
+      etree.findall(expression);
+    }
+    catch (err) {
+      errCount++;
+    }
+  });
+
+  assert.equal(errCount, expressions.length);
+  test.finish();
+};
+
+exports['test_register_namespace'] = function(test, assert){
+  var prefix = 'TESTPREFIX';
+  var namespace = 'http://seriously.unknown/namespace/URI';
+  var errCount = 0;
+
+  var etree = Element(sprintf('{%s}test', namespace));
+  assert.equal(et.tostring(etree, { 'xml_declaration': false}),
+               sprintf('<ns0:test xmlns:ns0="%s" />', namespace));
+
+  et.register_namespace(prefix, namespace);
+  var etree = Element(sprintf('{%s}test', namespace));
+  assert.equal(et.tostring(etree, { 'xml_declaration': false}),
+               sprintf('<%s:test xmlns:%s="%s" />', prefix, prefix, namespace));
+
+  try {
+    et.register_namespace('ns25', namespace);
+  }
+  catch (err) {
+    errCount++;
+  }
+
+  assert.equal(errCount, 1, 'Reserved prefix used, but exception was not thrown');
+  test.finish();
+};
+
+exports['test_tostring'] = function(test, assert) {
+  var a = Element('a');
+  var b = SubElement(a, 'b');
+  var c = SubElement(a, 'c');
+  c.text = 543;
+
+  assert.equal(et.tostring(a, { 'xml_declaration': false }), '<a><b /><c>543</c></a>');
+  assert.equal(et.tostring(c, { 'xml_declaration': false }), '<c>543</c>');
+  test.finish();
+};
+
+exports['test_escape'] = function(test, assert) {
+  var a = Element('a');
+  var b = SubElement(a, 'b');
+  b.text = '&&&&<>"\n\r';
+
+  assert.equal(et.tostring(a, { 'xml_declaration': false }), '<a><b>&amp;&amp;&amp;&amp;&lt;&gt;\"\n\r</b></a>');
+  test.finish();
+};
+
+exports['test_find_null'] = function(test, assert) {
+  var root = Element('root');
+  var node = SubElement(root, 'node');
+  var leaf  = SubElement(node, 'leaf');
+  leaf.text = 'ipsum';
+
+  assert.equal(root.find('node/leaf'), leaf);
+  assert.equal(root.find('no-such-node/leaf'), null);
+  test.finish();
+};
+
+exports['test_findtext_null'] = function(test, assert) {
+  var root = Element('root');
+  var node = SubElement(root, 'node');
+  var leaf  = SubElement(node, 'leaf');
+  leaf.text = 'ipsum';
+
+  assert.equal(root.findtext('node/leaf'), 'ipsum');
+  assert.equal(root.findtext('no-such-node/leaf'), null);
+  test.finish();
+};
+
+exports['test_remove'] = function(test, assert) {
+  var root = Element('root');
+  var node1 = SubElement(root, 'node1');
+  var node2 = SubElement(root, 'node2');
+  var node3 = SubElement(root, 'node3');
+
+  assert.equal(root.len(), 3);
+
+  root.remove(node2);
+
+  assert.equal(root.len(), 2);
+  assert.equal(root.getItem(0).tag, 'node1')
+  assert.equal(root.getItem(1).tag, 'node3')
+
+  test.finish();
+};
+
+exports['test_cdata_write'] = function(test, assert) {
+  var root, etree, xml, values, value, i;
+
+  values = [
+    'if(0>1) then true;',
+    '<test1>ponies hello</test1>',
+    ''
+  ];
+
+  for (i = 0; i < values.length; i++) {
+    value = values[i];
+
+    root = Element('root');
+    root.append(et.CData(value));
+    etree = new ElementTree(root);
+    xml = etree.write({'xml_declaration': false});
+
+    assert.equal(xml, sprintf('<root><![CDATA[%s]]></root>', value));
+  }
+
+  test.finish();
+};

http://git-wip-us.apache.org/repos/asf/cordova-windows/blob/5a6cb728/node_modules/glob/LICENSE
----------------------------------------------------------------------
diff --git a/node_modules/glob/LICENSE b/node_modules/glob/LICENSE
new file mode 100644
index 0000000..19129e3
--- /dev/null
+++ b/node_modules/glob/LICENSE
@@ -0,0 +1,15 @@
+The ISC License
+
+Copyright (c) Isaac Z. Schlueter and Contributors
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
+IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

http://git-wip-us.apache.org/repos/asf/cordova-windows/blob/5a6cb728/node_modules/glob/README.md
----------------------------------------------------------------------
diff --git a/node_modules/glob/README.md b/node_modules/glob/README.md
new file mode 100644
index 0000000..063cf95
--- /dev/null
+++ b/node_modules/glob/README.md
@@ -0,0 +1,377 @@
+[![Build Status](https://travis-ci.org/isaacs/node-glob.svg?branch=master)](https://travis-ci.org/isaacs/node-glob/) [![Dependency Status](https://david-dm.org/isaacs/node-glob.svg)](https://david-dm.org/isaacs/node-glob) [![devDependency Status](https://david-dm.org/isaacs/node-glob/dev-status.svg)](https://david-dm.org/isaacs/node-glob#info=devDependencies) [![optionalDependency Status](https://david-dm.org/isaacs/node-glob/optional-status.svg)](https://david-dm.org/isaacs/node-glob#info=optionalDependencies)
+
+# Glob
+
+Match files using the patterns the shell uses, like stars and stuff.
+
+This is a glob implementation in JavaScript.  It uses the `minimatch`
+library to do its matching.
+
+![](oh-my-glob.gif)
+
+## Usage
+
+```javascript
+var glob = require("glob")
+
+// options is optional
+glob("**/*.js", options, function (er, files) {
+  // files is an array of filenames.
+  // If the `nonull` option is set, and nothing
+  // was found, then files is ["**/*.js"]
+  // er is an error object or null.
+})
+```
+
+## Glob Primer
+
+"Globs" are the patterns you type when you do stuff like `ls *.js` on
+the command line, or put `build/*` in a `.gitignore` file.
+
+Before parsing the path part patterns, braced sections are expanded
+into a set.  Braced sections start with `{` and end with `}`, with any
+number of comma-delimited sections within.  Braced sections may contain
+slash characters, so `a{/b/c,bcd}` would expand into `a/b/c` and `abcd`.
+
+The following characters have special magic meaning when used in a
+path portion:
+
+* `*` Matches 0 or more characters in a single path portion
+* `?` Matches 1 character
+* `[...]` Matches a range of characters, similar to a RegExp range.
+  If the first character of the range is `!` or `^` then it matches
+  any character not in the range.
+* `!(pattern|pattern|pattern)` Matches anything that does not match
+  any of the patterns provided.
+* `?(pattern|pattern|pattern)` Matches zero or one occurrence of the
+  patterns provided.
+* `+(pattern|pattern|pattern)` Matches one or more occurrences of the
+  patterns provided.
+* `*(a|b|c)` Matches zero or more occurrences of the patterns provided
+* `@(pattern|pat*|pat?erN)` Matches exactly one of the patterns
+  provided
+* `**` If a "globstar" is alone in a path portion, then it matches
+  zero or more directories and subdirectories searching for matches.
+  It does not crawl symlinked directories.
+
+### Dots
+
+If a file or directory path portion has a `.` as the first character,
+then it will not match any glob pattern unless that pattern's
+corresponding path part also has a `.` as its first character.
+
+For example, the pattern `a/.*/c` would match the file at `a/.b/c`.
+However the pattern `a/*/c` would not, because `*` does not start with
+a dot character.
+
+You can make glob treat dots as normal characters by setting
+`dot:true` in the options.
+
+### Basename Matching
+
+If you set `matchBase:true` in the options, and the pattern has no
+slashes in it, then it will seek for any file anywhere in the tree
+with a matching basename.  For example, `*.js` would match
+`test/simple/basic.js`.
+
+### Negation
+
+The intent for negation would be for a pattern starting with `!` to
+match everything that *doesn't* match the supplied pattern.  However,
+the implementation is weird, and for the time being, this should be
+avoided.  The behavior is deprecated in version 5, and will be removed
+entirely in version 6.
+
+### Empty Sets
+
+If no matching files are found, then an empty array is returned.  This
+differs from the shell, where the pattern itself is returned.  For
+example:
+
+    $ echo a*s*d*f
+    a*s*d*f
+
+To get the bash-style behavior, set the `nonull:true` in the options.
+
+### See Also:
+
+* `man sh`
+* `man bash` (Search for "Pattern Matching")
+* `man 3 fnmatch`
+* `man 5 gitignore`
+* [minimatch documentation](https://github.com/isaacs/minimatch)
+
+## glob.hasMagic(pattern, [options])
+
+Returns `true` if there are any special characters in the pattern, and
+`false` otherwise.
+
+Note that the options affect the results.  If `noext:true` is set in
+the options object, then `+(a|b)` will not be considered a magic
+pattern.  If the pattern has a brace expansion, like `a/{b/c,x/y}`
+then that is considered magical, unless `nobrace:true` is set in the
+options.
+
+## glob(pattern, [options], cb)
+
+* `pattern` {String} Pattern to be matched
+* `options` {Object}
+* `cb` {Function}
+  * `err` {Error | null}
+  * `matches` {Array<String>} filenames found matching the pattern
+
+Perform an asynchronous glob search.
+
+## glob.sync(pattern, [options])
+
+* `pattern` {String} Pattern to be matched
+* `options` {Object}
+* return: {Array<String>} filenames found matching the pattern
+
+Perform a synchronous glob search.
+
+## Class: glob.Glob
+
+Create a Glob object by instantiating the `glob.Glob` class.
+
+```javascript
+var Glob = require("glob").Glob
+var mg = new Glob(pattern, options, cb)
+```
+
+It's an EventEmitter, and starts walking the filesystem to find matches
+immediately.
+
+### new glob.Glob(pattern, [options], [cb])
+
+* `pattern` {String} pattern to search for
+* `options` {Object}
+* `cb` {Function} Called when an error occurs, or matches are found
+  * `err` {Error | null}
+  * `matches` {Array<String>} filenames found matching the pattern
+
+Note that if the `sync` flag is set in the options, then matches will
+be immediately available on the `g.found` member.
+
+### Properties
+
+* `minimatch` The minimatch object that the glob uses.
+* `options` The options object passed in.
+* `aborted` Boolean which is set to true when calling `abort()`.  There
+  is no way at this time to continue a glob search after aborting, but
+  you can re-use the statCache to avoid having to duplicate syscalls.
+* `cache` Convenience object.  Each field has the following possible
+  values:
+  * `false` - Path does not exist
+  * `true` - Path exists
+  * `'DIR'` - Path exists, and is not a directory
+  * `'FILE'` - Path exists, and is a directory
+  * `[file, entries, ...]` - Path exists, is a directory, and the
+    array value is the results of `fs.readdir`
+* `statCache` Cache of `fs.stat` results, to prevent statting the same
+  path multiple times.
+* `symlinks` A record of which paths are symbolic links, which is
+  relevant in resolving `**` patterns.
+* `realpathCache` An optional object which is passed to `fs.realpath`
+  to minimize unnecessary syscalls.  It is stored on the instantiated
+  Glob object, and may be re-used.
+
+### Events
+
+* `end` When the matching is finished, this is emitted with all the
+  matches found.  If the `nonull` option is set, and no match was found,
+  then the `matches` list contains the original pattern.  The matches
+  are sorted, unless the `nosort` flag is set.
+* `match` Every time a match is found, this is emitted with the matched.
+* `error` Emitted when an unexpected error is encountered, or whenever
+  any fs error occurs if `options.strict` is set.
+* `abort` When `abort()` is called, this event is raised.
+
+### Methods
+
+* `pause` Temporarily stop the search
+* `resume` Resume the search
+* `abort` Stop the search forever
+
+### Options
+
+All the options that can be passed to Minimatch can also be passed to
+Glob to change pattern matching behavior.  Also, some have been added,
+or have glob-specific ramifications.
+
+All options are false by default, unless otherwise noted.
+
+All options are added to the Glob object, as well.
+
+If you are running many `glob` operations, you can pass a Glob object
+as the `options` argument to a subsequent operation to shortcut some
+`stat` and `readdir` calls.  At the very least, you may pass in shared
+`symlinks`, `statCache`, `realpathCache`, and `cache` options, so that
+parallel glob operations will be sped up by sharing information about
+the filesystem.
+
+* `cwd` The current working directory in which to search.  Defaults
+  to `process.cwd()`.
+* `root` The place where patterns starting with `/` will be mounted
+  onto.  Defaults to `path.resolve(options.cwd, "/")` (`/` on Unix
+  systems, and `C:\` or some such on Windows.)
+* `dot` Include `.dot` files in normal matches and `globstar` matches.
+  Note that an explicit dot in a portion of the pattern will always
+  match dot files.
+* `nomount` By default, a pattern starting with a forward-slash will be
+  "mounted" onto the root setting, so that a valid filesystem path is
+  returned.  Set this flag to disable that behavior.
+* `mark` Add a `/` character to directory matches.  Note that this
+  requires additional stat calls.
+* `nosort` Don't sort the results.
+* `stat` Set to true to stat *all* results.  This reduces performance
+  somewhat, and is completely unnecessary, unless `readdir` is presumed
+  to be an untrustworthy indicator of file existence.
+* `silent` When an unusual error is encountered when attempting to
+  read a directory, a warning will be printed to stderr.  Set the
+  `silent` option to true to suppress these warnings.
+* `strict` When an unusual error is encountered when attempting to
+  read a directory, the process will just continue on in search of
+  other matches.  Set the `strict` option to raise an error in these
+  cases.
+* `cache` See `cache` property above.  Pass in a previously generated
+  cache object to save some fs calls.
+* `statCache` A cache of results of filesystem information, to prevent
+  unnecessary stat calls.  While it should not normally be necessary
+  to set this, you may pass the statCache from one glob() call to the
+  options object of another, if you know that the filesystem will not
+  change between calls.  (See "Race Conditions" below.)
+* `symlinks` A cache of known symbolic links.  You may pass in a
+  previously generated `symlinks` object to save `lstat` calls when
+  resolving `**` matches.
+* `sync` DEPRECATED: use `glob.sync(pattern, opts)` instead.
+* `nounique` In some cases, brace-expanded patterns can result in the
+  same file showing up multiple times in the result set.  By default,
+  this implementation prevents duplicates in the result set.  Set this
+  flag to disable that behavior.
+* `nonull` Set to never return an empty set, instead returning a set
+  containing the pattern itself.  This is the default in glob(3).
+* `debug` Set to enable debug logging in minimatch and glob.
+* `nobrace` Do not expand `{a,b}` and `{1..3}` brace sets.
+* `noglobstar` Do not match `**` against multiple filenames.  (Ie,
+  treat it as a normal `*` instead.)
+* `noext` Do not match `+(a|b)` "extglob" patterns.
+* `nocase` Perform a case-insensitive match.  Note: on
+  case-insensitive filesystems, non-magic patterns will match by
+  default, since `stat` and `readdir` will not raise errors.
+* `matchBase` Perform a basename-only match if the pattern does not
+  contain any slash characters.  That is, `*.js` would be treated as
+  equivalent to `**/*.js`, matching all js files in all directories.
+* `nodir` Do not match directories, only files.  (Note: to match
+  *only* directories, simply put a `/` at the end of the pattern.)
+* `ignore` Add a pattern or an array of patterns to exclude matches.
+* `follow` Follow symlinked directories when expanding `**` patterns.
+  Note that this can result in a lot of duplicate references in the
+  presence of cyclic links.
+* `realpath` Set to true to call `fs.realpath` on all of the results.
+  In the case of a symlink that cannot be resolved, the full absolute
+  path to the matched entry is returned (though it will usually be a
+  broken symlink)
+* `nonegate` Suppress deprecated `negate` behavior.  (See below.)
+  Default=true
+* `nocomment` Suppress deprecated `comment` behavior.  (See below.)
+  Default=true
+
+## Comparisons to other fnmatch/glob implementations
+
+While strict compliance with the existing standards is a worthwhile
+goal, some discrepancies exist between node-glob and other
+implementations, and are intentional.
+
+The double-star character `**` is supported by default, unless the
+`noglobstar` flag is set.  This is supported in the manner of bsdglob
+and bash 4.3, where `**` only has special significance if it is the only
+thing in a path part.  That is, `a/**/b` will match `a/x/y/b`, but
+`a/**b` will not.
+
+Note that symlinked directories are not crawled as part of a `**`,
+though their contents may match against subsequent portions of the
+pattern.  This prevents infinite loops and duplicates and the like.
+
+If an escaped pattern has no matches, and the `nonull` flag is set,
+then glob returns the pattern as-provided, rather than
+interpreting the character escapes.  For example,
+`glob.match([], "\\*a\\?")` will return `"\\*a\\?"` rather than
+`"*a?"`.  This is akin to setting the `nullglob` option in bash, except
+that it does not resolve escaped pattern characters.
+
+If brace expansion is not disabled, then it is performed before any
+other interpretation of the glob pattern.  Thus, a pattern like
+`+(a|{b),c)}`, which would not be valid in bash or zsh, is expanded
+**first** into the set of `+(a|b)` and `+(a|c)`, and those patterns are
+checked for validity.  Since those two are valid, matching proceeds.
+
+### Comments and Negation
+
+**Note**: In version 5 of this module, negation and comments are
+**disabled** by default.  You can explicitly set `nonegate:false` or
+`nocomment:false` to re-enable them.  They are going away entirely in
+version 6.
+
+The intent for negation would be for a pattern starting with `!` to
+match everything that *doesn't* match the supplied pattern.  However,
+the implementation is weird.  It is better to use the `ignore` option
+to set a pattern or set of patterns to exclude from matches.  If you
+want the "everything except *x*" type of behavior, you can use `**` as
+the main pattern, and set an `ignore` for the things to exclude.
+
+The comments feature is added in minimatch, primarily to more easily
+support use cases like ignore files, where a `#` at the start of a
+line makes the pattern "empty".  However, in the context of a
+straightforward filesystem globber, "comments" don't make much sense.
+
+## Windows
+
+**Please only use forward-slashes in glob expressions.**
+
+Though windows uses either `/` or `\` as its path separator, only `/`
+characters are used by this glob implementation.  You must use
+forward-slashes **only** in glob expressions.  Back-slashes will always
+be interpreted as escape characters, not path separators.
+
+Results from absolute patterns such as `/foo/*` are mounted onto the
+root setting using `path.join`.  On windows, this will by default result
+in `/foo/*` matching `C:\foo\bar.txt`.
+
+## Race Conditions
+
+Glob searching, by its very nature, is susceptible to race conditions,
+since it relies on directory walking and such.
+
+As a result, it is possible that a file that exists when glob looks for
+it may have been deleted or modified by the time it returns the result.
+
+As part of its internal implementation, this program caches all stat
+and readdir calls that it makes, in order to cut down on system
+overhead.  However, this also makes it even more susceptible to races,
+especially if the cache or statCache objects are reused between glob
+calls.
+
+Users are thus advised not to use a glob result as a guarantee of
+filesystem state in the face of rapid changes.  For the vast majority
+of operations, this is never a problem.
+
+## Contributing
+
+Any change to behavior (including bugfixes) must come with a test.
+
+Patches that fail tests or reduce performance will be rejected.
+
+```
+# to run tests
+npm test
+
+# to re-generate test fixtures
+npm run test-regen
+
+# to benchmark against bash/zsh
+npm run bench
+
+# to profile javascript
+npm run prof
+```

http://git-wip-us.apache.org/repos/asf/cordova-windows/blob/5a6cb728/node_modules/glob/common.js
----------------------------------------------------------------------
diff --git a/node_modules/glob/common.js b/node_modules/glob/common.js
new file mode 100644
index 0000000..e36a631
--- /dev/null
+++ b/node_modules/glob/common.js
@@ -0,0 +1,245 @@
+exports.alphasort = alphasort
+exports.alphasorti = alphasorti
+exports.setopts = setopts
+exports.ownProp = ownProp
+exports.makeAbs = makeAbs
+exports.finish = finish
+exports.mark = mark
+exports.isIgnored = isIgnored
+exports.childrenIgnored = childrenIgnored
+
+function ownProp (obj, field) {
+  return Object.prototype.hasOwnProperty.call(obj, field)
+}
+
+var path = require("path")
+var minimatch = require("minimatch")
+var isAbsolute = require("path-is-absolute")
+var Minimatch = minimatch.Minimatch
+
+function alphasorti (a, b) {
+  return a.toLowerCase().localeCompare(b.toLowerCase())
+}
+
+function alphasort (a, b) {
+  return a.localeCompare(b)
+}
+
+function setupIgnores (self, options) {
+  self.ignore = options.ignore || []
+
+  if (!Array.isArray(self.ignore))
+    self.ignore = [self.ignore]
+
+  if (self.ignore.length) {
+    self.ignore = self.ignore.map(ignoreMap)
+  }
+}
+
+function ignoreMap (pattern) {
+  var gmatcher = null
+  if (pattern.slice(-3) === '/**') {
+    var gpattern = pattern.replace(/(\/\*\*)+$/, '')
+    gmatcher = new Minimatch(gpattern)
+  }
+
+  return {
+    matcher: new Minimatch(pattern),
+    gmatcher: gmatcher
+  }
+}
+
+function setopts (self, pattern, options) {
+  if (!options)
+    options = {}
+
+  // base-matching: just use globstar for that.
+  if (options.matchBase && -1 === pattern.indexOf("/")) {
+    if (options.noglobstar) {
+      throw new Error("base matching requires globstar")
+    }
+    pattern = "**/" + pattern
+  }
+
+  self.silent = !!options.silent
+  self.pattern = pattern
+  self.strict = options.strict !== false
+  self.realpath = !!options.realpath
+  self.realpathCache = options.realpathCache || Object.create(null)
+  self.follow = !!options.follow
+  self.dot = !!options.dot
+  self.mark = !!options.mark
+  self.nodir = !!options.nodir
+  if (self.nodir)
+    self.mark = true
+  self.sync = !!options.sync
+  self.nounique = !!options.nounique
+  self.nonull = !!options.nonull
+  self.nosort = !!options.nosort
+  self.nocase = !!options.nocase
+  self.stat = !!options.stat
+  self.noprocess = !!options.noprocess
+
+  self.maxLength = options.maxLength || Infinity
+  self.cache = options.cache || Object.create(null)
+  self.statCache = options.statCache || Object.create(null)
+  self.symlinks = options.symlinks || Object.create(null)
+
+  setupIgnores(self, options)
+
+  self.changedCwd = false
+  var cwd = process.cwd()
+  if (!ownProp(options, "cwd"))
+    self.cwd = cwd
+  else {
+    self.cwd = options.cwd
+    self.changedCwd = path.resolve(options.cwd) !== cwd
+  }
+
+  self.root = options.root || path.resolve(self.cwd, "/")
+  self.root = path.resolve(self.root)
+  if (process.platform === "win32")
+    self.root = self.root.replace(/\\/g, "/")
+
+  self.nomount = !!options.nomount
+
+  // disable comments and negation unless the user explicitly
+  // passes in false as the option.
+  options.nonegate = options.nonegate === false ? false : true
+  options.nocomment = options.nocomment === false ? false : true
+  deprecationWarning(options)
+
+  self.minimatch = new Minimatch(pattern, options)
+  self.options = self.minimatch.options
+}
+
+// TODO(isaacs): remove entirely in v6
+// exported to reset in tests
+exports.deprecationWarned
+function deprecationWarning(options) {
+  if (!options.nonegate || !options.nocomment) {
+    if (process.noDeprecation !== true && !exports.deprecationWarned) {
+      var msg = 'glob WARNING: comments and negation will be disabled in v6'
+      if (process.throwDeprecation)
+        throw new Error(msg)
+      else if (process.traceDeprecation)
+        console.trace(msg)
+      else
+        console.error(msg)
+
+      exports.deprecationWarned = true
+    }
+  }
+}
+
+function finish (self) {
+  var nou = self.nounique
+  var all = nou ? [] : Object.create(null)
+
+  for (var i = 0, l = self.matches.length; i < l; i ++) {
+    var matches = self.matches[i]
+    if (!matches || Object.keys(matches).length === 0) {
+      if (self.nonull) {
+        // do like the shell, and spit out the literal glob
+        var literal = self.minimatch.globSet[i]
+        if (nou)
+          all.push(literal)
+        else
+          all[literal] = true
+      }
+    } else {
+      // had matches
+      var m = Object.keys(matches)
+      if (nou)
+        all.push.apply(all, m)
+      else
+        m.forEach(function (m) {
+          all[m] = true
+        })
+    }
+  }
+
+  if (!nou)
+    all = Object.keys(all)
+
+  if (!self.nosort)
+    all = all.sort(self.nocase ? alphasorti : alphasort)
+
+  // at *some* point we statted all of these
+  if (self.mark) {
+    for (var i = 0; i < all.length; i++) {
+      all[i] = self._mark(all[i])
+    }
+    if (self.nodir) {
+      all = all.filter(function (e) {
+        return !(/\/$/.test(e))
+      })
+    }
+  }
+
+  if (self.ignore.length)
+    all = all.filter(function(m) {
+      return !isIgnored(self, m)
+    })
+
+  self.found = all
+}
+
+function mark (self, p) {
+  var abs = makeAbs(self, p)
+  var c = self.cache[abs]
+  var m = p
+  if (c) {
+    var isDir = c === 'DIR' || Array.isArray(c)
+    var slash = p.slice(-1) === '/'
+
+    if (isDir && !slash)
+      m += '/'
+    else if (!isDir && slash)
+      m = m.slice(0, -1)
+
+    if (m !== p) {
+      var mabs = makeAbs(self, m)
+      self.statCache[mabs] = self.statCache[abs]
+      self.cache[mabs] = self.cache[abs]
+    }
+  }
+
+  return m
+}
+
+// lotta situps...
+function makeAbs (self, f) {
+  var abs = f
+  if (f.charAt(0) === '/') {
+    abs = path.join(self.root, f)
+  } else if (isAbsolute(f) || f === '') {
+    abs = f
+  } else if (self.changedCwd) {
+    abs = path.resolve(self.cwd, f)
+  } else {
+    abs = path.resolve(f)
+  }
+  return abs
+}
+
+
+// Return true, if pattern ends with globstar '**', for the accompanying parent directory.
+// Ex:- If node_modules/** is the pattern, add 'node_modules' to ignore list along with it's contents
+function isIgnored (self, path) {
+  if (!self.ignore.length)
+    return false
+
+  return self.ignore.some(function(item) {
+    return item.matcher.match(path) || !!(item.gmatcher && item.gmatcher.match(path))
+  })
+}
+
+function childrenIgnored (self, path) {
+  if (!self.ignore.length)
+    return false
+
+  return self.ignore.some(function(item) {
+    return !!(item.gmatcher && item.gmatcher.match(path))
+  })
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cordova.apache.org
For additional commands, e-mail: commits-help@cordova.apache.org


Mime
View raw message