Skip to content

Commit

Permalink
3.1.4 (Faster crawling & negligible DOM XSS false positives)
Browse files Browse the repository at this point in the history
- Negligible DOM XSS false positives
- x10 Faster crawling by
    - Removing additional request for detecting DOM XSS
    - Skipping testing of a parameter multiple times
  • Loading branch information
s0md3v authored Apr 8, 2019
2 parents 7684889 + f9aee58 commit 4032e40
Show file tree
Hide file tree
Showing 6 changed files with 64 additions and 48 deletions.
6 changes: 6 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,9 @@
### 3.1.4
- Negligible DOM XSS false positives
- x10 Faster crawling by
- Removing additional request for detecting DOM XSS
- Skipping testing of a parameter multiple times

### 3.1.3
- Removed browser engine emulation
- Fixed a few bugs
Expand Down
2 changes: 1 addition & 1 deletion core/config.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
changes = '''Removed browser engine emulation;Fixed a few bugs;Added a plugin to scan for outdated JS libraries;Improved crawling and DOM scanning'''
changes = '''Negligible DOM XSS false positives;x10 faster crawling'''
globalVariables = {} # it holds variables during runtime for collaboration across modules

defaultEditor = 'nano'
Expand Down
5 changes: 4 additions & 1 deletion core/dom.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,4 +49,7 @@ def dom(response):
num += 1
except MemoryError:
pass
return highlighted
if (yellow and red) in highlighted:
return highlighted
else:
return []
23 changes: 17 additions & 6 deletions core/photon.py
Original file line number Diff line number Diff line change
@@ -1,25 +1,26 @@
import re
import concurrent.futures
from re import findall
from urllib.parse import urlparse


from plugins.retireJs import retireJs
from core.dom import dom
from core.log import setup_logger
from core.utils import getUrl, getParams
from core.requester import requester
from core.zetanize import zetanize
from core.log import setup_logger
from plugins.retireJs import retireJs

logger = setup_logger(__name__)


def photon(seedUrl, headers, level, threadCount, delay, timeout):
def photon(seedUrl, headers, level, threadCount, delay, timeout, skipDOM):
forms = [] # web forms
processed = set() # urls that have been crawled
storage = set() # urls that belong to the target i.e. in-scope
schema = urlparse(seedUrl).scheme # extract the scheme e.g. http or https
host = urlparse(seedUrl).netloc # extract the host e.g. example.com
main_url = schema + '://' + host # join scheme and host to make the root url
storage.add(seedUrl) # add the url to storage
checkedDOMs = []

def rec(target):
processed.add(target)
Expand All @@ -38,8 +39,18 @@ def rec(target):
forms.append({0: {'action': url, 'method': 'get', 'inputs': inps}})
response = requester(url, params, headers, True, delay, timeout).text
retireJs(url, response)
if not skipDOM:
highlighted = dom(response)
clean_highlighted = ''.join([re.sub(r'^\d+\s+', '', line) for line in highlighted])
if highlighted and clean_highlighted not in checkedDOMs:
checkedDOMs.append(clean_highlighted)
logger.good('Potentially vulnerable objects found at %s' % url)
logger.red_line(level='good')
for line in highlighted:
logger.no_format(line, level='good')
logger.red_line(level='good')
forms.append(zetanize(response))
matches = findall(r'<[aA].*href=["\']{0,1}(.*?)["\']', response)
matches = re.findall(r'<[aA].*href=["\']{0,1}(.*?)["\']', response)
for link in matches: # iterate over the matches
# remove everything after a "#" to deal with in-page anchors
link = link.split('#')[0]
Expand Down
67 changes: 31 additions & 36 deletions modes/crawl.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
import copy
import re

import core.config
from core.colors import red, good, green, end
from core.config import xsschecker
from core.dom import dom
from core.filterChecker import filterChecker
from core.generator import generator
from core.htmlParser import htmlParser
Expand All @@ -13,16 +13,7 @@
logger = setup_logger(__name__)


def crawl(scheme, host, main_url, form, domURL, blindXSS, blindPayload, headers, delay, timeout, skipDOM, encoding):
if domURL and not skipDOM:
response = requester(domURL, {}, headers, True, delay, timeout).text
highlighted = dom(response)
if highlighted:
logger.good('Potentially vulnerable objects found at %s' % domURL)
logger.red_line(level='good')
for line in highlighted:
logger.no_format(line, level='good')
logger.red_line(level='good')
def crawl(scheme, host, main_url, form, blindXSS, blindPayload, headers, delay, timeout, encoding):
if form:
for each in form.values():
url = each['action']
Expand All @@ -35,35 +26,39 @@ def crawl(scheme, host, main_url, form, domURL, blindXSS, blindPayload, headers,
url = scheme + '://' + host + url
elif re.match(r'\w', url[0]):
url = scheme + '://' + host + '/' + url
if url not in core.config.globalVariables['checkedForms']:
core.config.globalVariables['checkedForms'][url] = []
method = each['method']
GET = True if method == 'get' else False
inputs = each['inputs']
paramData = {}
for one in inputs:
paramData[one['name']] = one['value']
for paramName in paramData.keys():
paramsCopy = copy.deepcopy(paramData)
paramsCopy[paramName] = xsschecker
response = requester(
url, paramsCopy, headers, GET, delay, timeout)
parsedResponse = htmlParser(response, encoding)
occurences = parsedResponse[0]
positions = parsedResponse[1]
efficiencies = filterChecker(
url, paramsCopy, headers, GET, delay, occurences, timeout, encoding)
vectors = generator(occurences, response.text)
if vectors:
for confidence, vects in vectors.items():
try:
payload = list(vects)[0]
logger.vuln('Vulnerable webpage: %s%s%s' %
(green, url, end))
logger.vuln('Vector for %s%s%s: %s' %
(green, paramName, end, payload))
break
except IndexError:
pass
if blindXSS and blindPayload:
paramsCopy[paramName] = blindPayload
requester(url, paramsCopy, headers,
GET, delay, timeout)
if paramName not in core.config.globalVariables['checkedForms'][url]:
core.config.globalVariables['checkedForms'][url].append(paramName)
paramsCopy = copy.deepcopy(paramData)
paramsCopy[paramName] = xsschecker
response = requester(
url, paramsCopy, headers, GET, delay, timeout)
parsedResponse = htmlParser(response, encoding)
occurences = parsedResponse[0]
positions = parsedResponse[1]
efficiencies = filterChecker(
url, paramsCopy, headers, GET, delay, occurences, timeout, encoding)
vectors = generator(occurences, response.text)
if vectors:
for confidence, vects in vectors.items():
try:
payload = list(vects)[0]
logger.vuln('Vulnerable webpage: %s%s%s' %
(green, url, end))
logger.vuln('Vector for %s%s%s: %s' %
(green, paramName, end, payload))
break
except IndexError:
pass
if blindXSS and blindPayload:
paramsCopy[paramName] = blindPayload
requester(url, paramsCopy, headers,
GET, delay, timeout)
9 changes: 5 additions & 4 deletions xsstrike.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

# Just a fancy ass banner
print('''%s
\tXSStrike %sv3.1.3
\tXSStrike %sv3.1.4
%s''' % (red, white, end))

try:
Expand Down Expand Up @@ -133,6 +133,7 @@

core.config.globalVariables['headers'] = headers
core.config.globalVariables['checkedScripts'] = set()
core.config.globalVariables['checkedForms'] = {}
core.config.globalVariables['definitions'] = json.loads('\n'.join(reader(sys.path[0] + '/db/definitions.json')))

if path:
Expand Down Expand Up @@ -181,7 +182,7 @@
host = urlparse(target).netloc
main_url = scheme + '://' + host
crawlingResult = photon(target, headers, level,
threadCount, delay, timeout)
threadCount, delay, timeout, skipDOM)
forms = crawlingResult[0]
domURLs = list(crawlingResult[1])
difference = abs(len(domURLs) - len(forms))
Expand All @@ -192,8 +193,8 @@
for i in range(difference):
domURLs.append(0)
threadpool = concurrent.futures.ThreadPoolExecutor(max_workers=threadCount)
futures = (threadpool.submit(crawl, scheme, host, main_url, form, domURL,
blindXSS, blindPayload, headers, delay, timeout, skipDOM, encoding) for form, domURL in zip(forms, domURLs))
futures = (threadpool.submit(crawl, scheme, host, main_url, form,
blindXSS, blindPayload, headers, delay, timeout, encoding) for form, domURL in zip(forms, domURLs))
for i, _ in enumerate(concurrent.futures.as_completed(futures)):
if i + 1 == len(forms) or (i + 1) % threadCount == 0:
logger.info('Progress: %i/%i\r' % (i + 1, len(forms)))
Expand Down

1 comment on commit 4032e40

@xclydx12x
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Lol

Please sign in to comment.