mirror of
https://github.com/DeNNiiInc/UltyScan.git
synced 2026-04-17 18:26:00 +00:00
UltyScan Documentation Overhaul
This commit is contained in:
137
bin/github-subdomains.py
Normal file
137
bin/github-subdomains.py
Normal file
@@ -0,0 +1,137 @@
|
||||
#!/usr/bin/python3.5
|
||||
|
||||
# I don't believe in license.
|
||||
# You can do whatever you want with this program.
|
||||
|
||||
import os
|
||||
import sys
|
||||
import re
|
||||
import time
|
||||
import requests
|
||||
import random
|
||||
import argparse
|
||||
from functools import partial
|
||||
from colored import fg, bg, attr
|
||||
from multiprocessing.dummy import Pool
|
||||
|
||||
|
||||
TOKENS_FILE = os.path.dirname(os.path.realpath(__file__))+'/.tokens'
|
||||
|
||||
|
||||
def githubApiSearchCode( search, page ):
|
||||
headers = {"Authorization":"token "+random.choice(t_tokens)}
|
||||
url = 'https://api.github.com/search/code?s=indexed&type=Code&o=desc&q=' + search + '&page=' + str(page)
|
||||
# print(url)
|
||||
|
||||
try:
|
||||
r = requests.get( url, headers=headers, timeout=5 )
|
||||
json = r.json()
|
||||
return json
|
||||
except Exception as e:
|
||||
print( "%s[-] error occurred: %s%s" % (fg('red'),e,attr(0)) )
|
||||
return False
|
||||
|
||||
|
||||
def getRawUrl( result ):
|
||||
raw_url = result['html_url'];
|
||||
raw_url = raw_url.replace( 'https://github.com/', 'https://raw.githubusercontent.com/' )
|
||||
raw_url = raw_url.replace( '/blob/', '/' )
|
||||
return raw_url;
|
||||
|
||||
|
||||
def readCode( regexp, source, result ):
|
||||
url = getRawUrl( result )
|
||||
code = doGetCode( url )
|
||||
# print(code)
|
||||
|
||||
if code:
|
||||
matches = re.findall( regexp, code )
|
||||
if matches:
|
||||
for sub in matches:
|
||||
# print(sub)
|
||||
sub = sub[0].replace('2F','').lower().strip()
|
||||
if len(sub) and not sub in t_history:
|
||||
t_history.append( sub )
|
||||
sys.stdout.write( "%s" % sub )
|
||||
if source:
|
||||
sys.stdout.write( "\t-> %s" % result['html_url'] )
|
||||
sys.stdout.write( "\n" )
|
||||
|
||||
|
||||
def doGetCode( url ):
|
||||
# print( url )
|
||||
try:
|
||||
r = requests.get( url, timeout=5 )
|
||||
except Exception as e:
|
||||
sys.stdout.write( "%s[-] error occurred: %s%s\n" % (fg('red'),e,attr(0)) )
|
||||
return False
|
||||
|
||||
return r.text
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument( "-t","--token",help="auth token (required)" )
|
||||
parser.add_argument( "-d","--domain",help="domain you are looking for (required)" )
|
||||
parser.add_argument( "-e","--extend",help="also look for <dummy>example.com", action="store_true" )
|
||||
parser.add_argument( "-s","--source",help="display first url where subdomains are found", action="store_true" )
|
||||
parser.parse_args()
|
||||
args = parser.parse_args()
|
||||
|
||||
t_tokens = []
|
||||
if args.token:
|
||||
t_tokens = args.token.split(',')
|
||||
else:
|
||||
if os.path.isfile(TOKENS_FILE):
|
||||
fp = open(TOKENS_FILE,'r')
|
||||
t_tokens = fp.read().split("\n")
|
||||
fp.close()
|
||||
|
||||
if not len(t_tokens):
|
||||
parser.error( 'auth token is missing' )
|
||||
|
||||
if args.source:
|
||||
_source = True
|
||||
else:
|
||||
_source = False
|
||||
|
||||
if args.domain:
|
||||
_domain = args.domain
|
||||
else:
|
||||
parser.error( 'domain is missing' )
|
||||
|
||||
t_history = []
|
||||
page = 1
|
||||
_search = '"' + _domain + '"'
|
||||
|
||||
### this is a test, looks like we got more result that way
|
||||
import tldextract
|
||||
t_host_parse = tldextract.extract( _domain )
|
||||
_search = '"' + t_host_parse.domain + '"'
|
||||
# print( t_host_parse )
|
||||
# exit()
|
||||
###
|
||||
|
||||
# egrep -io "[0-9a-z_\-\.]+\.([0-9a-z_\-]+)?`echo $h|awk -F '.' '{print $(NF-1)}'`([0-9a-z_\-\.]+)?\.[a-z]{1,5}"
|
||||
|
||||
|
||||
if args.extend:
|
||||
# _regexp = r'[0-9a-zA-Z_\-\.]+' + _domain.replace('.','\.')
|
||||
_regexp = r'([0-9a-z_\-\.]+\.([0-9a-z_\-]+)?'+t_host_parse.domain+'([0-9a-z_\-\.]+)?\.[a-z]{1,5})'
|
||||
else:
|
||||
_regexp = r'(([0-9a-zA-Z_\-\.]+)\.' + _domain.replace('.','\.')+')'
|
||||
# print(_regexp)
|
||||
|
||||
# for page in range(1,10):
|
||||
while True:
|
||||
time.sleep( 1 )
|
||||
t_json = githubApiSearchCode( _search, page )
|
||||
# print(t_json)
|
||||
page = page + 1
|
||||
|
||||
if not t_json or 'documentation_url' in t_json or not 'items' in t_json or not len(t_json['items']):
|
||||
break
|
||||
|
||||
pool = Pool( 30 )
|
||||
pool.map( partial(readCode,_regexp,_source), t_json['items'] )
|
||||
pool.close()
|
||||
pool.join()
|
||||
11145
bin/http-default-accounts-fingerprints-nndefaccts.lua
Normal file
11145
bin/http-default-accounts-fingerprints-nndefaccts.lua
Normal file
File diff suppressed because it is too large
Load Diff
281
bin/nmap-bootstrap.xsl
Normal file
281
bin/nmap-bootstrap.xsl
Normal file
@@ -0,0 +1,281 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<!--
|
||||
Nmap Bootstrap XSL
|
||||
Creative Commons BY-SA
|
||||
Andreas Hontzia (@honze_net)
|
||||
-->
|
||||
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
|
||||
<xsl:output method="html" encoding="utf-8" indent="yes" doctype-system="about:legacy-compat"/>
|
||||
<xsl:template match="/">
|
||||
<html lang="en">
|
||||
<head>
|
||||
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css" integrity="sha384-BVYiiSIFeK1dGmJRAkycuHAHRg32OmUcww7on3RYdg4Va+PmSTsz/K68vbdEjh4u" crossorigin="anonymous"/>
|
||||
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap-theme.min.css" integrity="sha384-rHyoN1iRsVXV4nD0JutlnGaslCJuC7uwjduW9SVrLvRYooPp2bWYgmgJQIXwl/Sp" crossorigin="anonymous"/>
|
||||
<link rel="stylesheet" type="text/css" href="https://cdn.datatables.net/1.10.19/css/dataTables.bootstrap.min.css"/>
|
||||
<script src="https://code.jquery.com/jquery-3.3.1.js"></script>
|
||||
<script src="https://cdn.datatables.net/1.10.19/js/jquery.dataTables.min.js"></script>
|
||||
<script src="https://cdn.datatables.net/1.10.19/js/dataTables.bootstrap.min.js"></script>
|
||||
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js" integrity="sha384-Tc5IQib027qvyjSMfHjOMaLkfuWVxZxUPnCJA7l2mCWNIpG9mGCD8wGNIcPD7Txa" crossorigin="anonymous"></script>
|
||||
<style>
|
||||
.target:before {
|
||||
content: "";
|
||||
display: block;
|
||||
height: 50px;
|
||||
margin: -20px 0 0;
|
||||
}
|
||||
@media only screen and (min-width:1900px) {
|
||||
.container {
|
||||
width: 1800px;
|
||||
}
|
||||
}
|
||||
.footer {
|
||||
margin-top:60px;
|
||||
padding-top:60px;
|
||||
width: 100%;
|
||||
height: 180px;
|
||||
background-color: #f5f5f5;
|
||||
}
|
||||
</style>
|
||||
<title>Scan Report Nmap <xsl:value-of select="/nmaprun/@version"/></title>
|
||||
</head>
|
||||
<body>
|
||||
<nav class="navbar navbar-default navbar-fixed-top">
|
||||
<div class="container-fluid">
|
||||
<div class="navbar-header">
|
||||
<button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#bs-example-navbar-collapse-1" aria-expanded="false">
|
||||
<span class="sr-only">Toggle navigation</span>
|
||||
<span class="icon-bar"></span>
|
||||
<span class="icon-bar"></span>
|
||||
<span class="icon-bar"></span>
|
||||
</button>
|
||||
<a class="navbar-brand" href="#"><span class="glyphicon glyphicon-home"></span></a>
|
||||
</div>
|
||||
<div class="collapse navbar-collapse" id="bs-example-navbar-collapse-1">
|
||||
<ul class="nav navbar-nav">
|
||||
<li><a href="#scannedhosts">Scanned Hosts</a></li>
|
||||
<li><a href="#onlinehosts">Online Hosts</a></li>
|
||||
<li><a href="#openservices">Open Services</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
</nav>
|
||||
<div class="container">
|
||||
<div class="jumbotron">
|
||||
<h1>Scan Report<br/><small>Nmap <xsl:value-of select="/nmaprun/@version"/></small></h1>
|
||||
<pre style="white-space:pre-wrap; word-wrap:break-word;"><xsl:value-of select="/nmaprun/@args"/></pre>
|
||||
<p class="lead">
|
||||
<xsl:value-of select="/nmaprun/@startstr"/> – <xsl:value-of select="/nmaprun/runstats/finished/@timestr"/><br/>
|
||||
<xsl:value-of select="/nmaprun/runstats/hosts/@total"/> hosts scanned.
|
||||
<xsl:value-of select="/nmaprun/runstats/hosts/@up"/> hosts up.
|
||||
<xsl:value-of select="/nmaprun/runstats/hosts/@down"/> hosts down.
|
||||
</p>
|
||||
<div class="progress">
|
||||
<div class="progress-bar progress-bar-success" style="width: 0%">
|
||||
<xsl:attribute name="style">width:<xsl:value-of select="/nmaprun/runstats/hosts/@up div /nmaprun/runstats/hosts/@total * 100"/>%;</xsl:attribute>
|
||||
<xsl:value-of select="/nmaprun/runstats/hosts/@up"/>
|
||||
<span class="sr-only"></span>
|
||||
</div>
|
||||
<div class="progress-bar progress-bar-danger" style="width: 0%">
|
||||
<xsl:attribute name="style">width:<xsl:value-of select="/nmaprun/runstats/hosts/@down div /nmaprun/runstats/hosts/@total * 100"/>%;</xsl:attribute>
|
||||
<xsl:value-of select="/nmaprun/runstats/hosts/@down"/>
|
||||
<span class="sr-only"></span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<h2 id="scannedhosts" class="target">Scanned Hosts<xsl:if test="/nmaprun/runstats/hosts/@down > 1024"><small> (offline hosts are hidden)</small></xsl:if></h2>
|
||||
<div class="table-responsive">
|
||||
<table id="table-overview" class="table table-striped dataTable" role="grid">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>State</th>
|
||||
<th>Address</th>
|
||||
<th>Hostname</th>
|
||||
<th>TCP (open)</th>
|
||||
<th>UDP (open)</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<xsl:choose>
|
||||
<xsl:when test="/nmaprun/runstats/hosts/@down > 1024">
|
||||
<xsl:for-each select="/nmaprun/host[status/@state='up']">
|
||||
<tr>
|
||||
<td><span class="label label-danger"><xsl:if test="status/@state='up'"><xsl:attribute name="class">label label-success</xsl:attribute></xsl:if><xsl:value-of select="status/@state"/></span></td>
|
||||
<td><xsl:value-of select="address/@addr"/></td>
|
||||
<td><xsl:value-of select="hostnames/hostname/@name"/></td>
|
||||
<td><xsl:value-of select="count(ports/port[state/@state='open' and @protocol='tcp'])"/></td>
|
||||
<td><xsl:value-of select="count(ports/port[state/@state='open' and @protocol='udp'])"/></td>
|
||||
</tr>
|
||||
</xsl:for-each>
|
||||
</xsl:when>
|
||||
<xsl:otherwise>
|
||||
<xsl:for-each select="/nmaprun/host">
|
||||
<tr>
|
||||
<td><span class="label label-danger"><xsl:if test="status/@state='up'"><xsl:attribute name="class">label label-success</xsl:attribute></xsl:if><xsl:value-of select="status/@state"/></span></td>
|
||||
<td><xsl:value-of select="address/@addr"/></td>
|
||||
<td><xsl:value-of select="hostnames/hostname/@name"/></td>
|
||||
<td><xsl:value-of select="count(ports/port[state/@state='open' and @protocol='tcp'])"/></td>
|
||||
<td><xsl:value-of select="count(ports/port[state/@state='open' and @protocol='udp'])"/></td>
|
||||
</tr>
|
||||
</xsl:for-each>
|
||||
</xsl:otherwise>
|
||||
</xsl:choose>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
<script>
|
||||
$(document).ready(function() {
|
||||
$('#table-overview').DataTable();
|
||||
});
|
||||
</script>
|
||||
<h2 id="onlinehosts" class="target">Online Hosts</h2>
|
||||
<xsl:for-each select="/nmaprun/host[status/@state='up']">
|
||||
<div class="panel panel-default">
|
||||
<div class="panel-heading">
|
||||
<h3 class="panel-title"><xsl:value-of select="address/@addr"/><xsl:if test="count(hostnames/hostname) > 0"> - <xsl:value-of select="hostnames/hostname/@name"/></xsl:if></h3>
|
||||
</div>
|
||||
<div class="panel-body">
|
||||
<xsl:if test="count(hostnames/hostname) > 0">
|
||||
<h4>Hostnames</h4>
|
||||
<ul>
|
||||
<xsl:for-each select="hostnames/hostname">
|
||||
<li><xsl:value-of select="@name"/> (<xsl:value-of select="@type"/>)</li>
|
||||
</xsl:for-each>
|
||||
</ul>
|
||||
</xsl:if>
|
||||
<h4>Ports</h4>
|
||||
<div class="table-responsive">
|
||||
<table class="table table-bordered">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Port</th>
|
||||
<th>Protocol</th>
|
||||
<th>State<br/>Reason</th>
|
||||
<th>Service</th>
|
||||
<th>Product</th>
|
||||
<th>Version</th>
|
||||
<th>Extra Info</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<xsl:for-each select="ports/port">
|
||||
<xsl:choose>
|
||||
<xsl:when test="state/@state = 'open'">
|
||||
<tr class="success">
|
||||
<td title="Port"><xsl:value-of select="@portid"/></td>
|
||||
<td title="Protocol"><xsl:value-of select="@protocol"/></td>
|
||||
<td title="State / Reason"><xsl:value-of select="state/@state"/><br/><xsl:value-of select="state/@reason"/></td>
|
||||
<td title="Service"><xsl:value-of select="service/@name"/></td>
|
||||
<td title="Product"><xsl:value-of select="service/@product"/></td>
|
||||
<td title="Version"><xsl:value-of select="service/@version"/></td>
|
||||
<td title="Extra Info"><xsl:value-of select="service/@extrainfo"/></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td colspan="7">
|
||||
<a><xsl:attribute name="href">https://nvd.nist.gov/vuln/search/results?form_type=Advanced&cves=on&cpe_version=<xsl:value-of select="service/cpe"/></xsl:attribute><xsl:value-of select="service/cpe"/></a>
|
||||
<xsl:for-each select="script">
|
||||
<h5><xsl:value-of select="@id"/></h5>
|
||||
<pre style="white-space:pre-wrap; word-wrap:break-word;"><xsl:value-of select="@output"/></pre>
|
||||
</xsl:for-each>
|
||||
</td>
|
||||
</tr>
|
||||
</xsl:when>
|
||||
<xsl:when test="state/@state = 'filtered'">
|
||||
<tr class="warning">
|
||||
<td><xsl:value-of select="@portid"/></td>
|
||||
<td><xsl:value-of select="@protocol"/></td>
|
||||
<td><xsl:value-of select="state/@state"/><br/><xsl:value-of select="state/@reason"/></td>
|
||||
<td><xsl:value-of select="service/@name"/></td>
|
||||
<td><xsl:value-of select="service/@product"/></td>
|
||||
<td><xsl:value-of select="service/@version"/></td>
|
||||
<td><xsl:value-of select="service/@extrainfo"/></td>
|
||||
</tr>
|
||||
</xsl:when>
|
||||
<xsl:when test="state/@state = 'closed'">
|
||||
<tr class="active">
|
||||
<td><xsl:value-of select="@portid"/></td>
|
||||
<td><xsl:value-of select="@protocol"/></td>
|
||||
<td><xsl:value-of select="state/@state"/><br/><xsl:value-of select="state/@reason"/></td>
|
||||
<td><xsl:value-of select="service/@name"/></td>
|
||||
<td><xsl:value-of select="service/@product"/></td>
|
||||
<td><xsl:value-of select="service/@version"/></td>
|
||||
<td><xsl:value-of select="service/@extrainfo"/></td>
|
||||
</tr>
|
||||
</xsl:when>
|
||||
<xsl:otherwise>
|
||||
<tr class="info">
|
||||
<td><xsl:value-of select="@portid"/></td>
|
||||
<td><xsl:value-of select="@protocol"/></td>
|
||||
<td><xsl:value-of select="state/@state"/><br/><xsl:value-of select="state/@reason"/></td>
|
||||
<td><xsl:value-of select="service/@name"/></td>
|
||||
<td><xsl:value-of select="service/@product"/></td>
|
||||
<td><xsl:value-of select="service/@version"/></td>
|
||||
<td><xsl:value-of select="service/@extrainfo"/></td>
|
||||
</tr>
|
||||
</xsl:otherwise>
|
||||
</xsl:choose>
|
||||
</xsl:for-each>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
<xsl:if test="count(hostscript/script) > 0">
|
||||
<h4>Host Script</h4>
|
||||
</xsl:if>
|
||||
<xsl:for-each select="hostscript/script">
|
||||
<h5><xsl:value-of select="@id"/></h5>
|
||||
<pre style="white-space:pre-wrap; word-wrap:break-word;"><xsl:value-of select="@output"/></pre>
|
||||
</xsl:for-each>
|
||||
</div>
|
||||
</div>
|
||||
</xsl:for-each>
|
||||
<h2 id="openservices" class="target">Open Services</h2>
|
||||
<div class="table-responsive">
|
||||
<table id="table-services" class="table table-striped dataTable" role="grid">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Address</th>
|
||||
<th>Port</th>
|
||||
<th>Protocol</th>
|
||||
<th>Service</th>
|
||||
<th>Product</th>
|
||||
<th>Version</th>
|
||||
<th>CPE</th>
|
||||
<th>Extra info</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<xsl:for-each select="/nmaprun/host">
|
||||
<xsl:for-each select="ports/port[state/@state='open']">
|
||||
<tr>
|
||||
<td><xsl:value-of select="../../address/@addr"/><xsl:if test="count(../../hostnames/hostname) > 0"> - <xsl:value-of select="../../hostnames/hostname/@name"/></xsl:if></td>
|
||||
<td><xsl:value-of select="@portid"/></td>
|
||||
<td><xsl:value-of select="@protocol"/></td>
|
||||
<td><xsl:value-of select="service/@name"/></td>
|
||||
<td><xsl:value-of select="service/@product"/></td>
|
||||
<td><xsl:value-of select="service/@version"/></td>
|
||||
<td><xsl:value-of select="service/cpe"/></td>
|
||||
<td><xsl:value-of select="service/@extrainfo"/></td>
|
||||
</tr>
|
||||
</xsl:for-each>
|
||||
</xsl:for-each>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
<script>
|
||||
$(document).ready(function() {
|
||||
$('#table-services').DataTable();
|
||||
});
|
||||
</script>
|
||||
</div>
|
||||
<footer class="footer">
|
||||
<div class="container">
|
||||
<p class="text-muted">
|
||||
This report was generated with <a href="https://github.com/honze-net/nmap-bootstrap-xsl">Nmap Bootstrap XSL</a>.<br/>
|
||||
Licensed under <a href="https://creativecommons.org/licenses/by-sa/4.0/">Creative Commons BY-SA</a>.<br/>
|
||||
Designed and built by Andreas Hontzia (<a href="https://www.twitter.com/honze_net">@honze_net</a>).<br/>
|
||||
</p>
|
||||
</div>
|
||||
</footer>
|
||||
</body>
|
||||
</html>
|
||||
</xsl:template>
|
||||
</xsl:stylesheet>
|
||||
601
bin/pyText2pdf.py
Normal file
601
bin/pyText2pdf.py
Normal file
@@ -0,0 +1,601 @@
|
||||
#! /usr/bin/env python
|
||||
"""
|
||||
pyText2Pdf - Python script to convert plain text files into Adobe
|
||||
Acrobat PDF files with support for arbitrary page breaks etc.
|
||||
|
||||
Version 2.0
|
||||
|
||||
Author: Anand B Pillai <abpillai at gmail dot com>
|
||||
|
||||
"""
|
||||
|
||||
# Derived from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/189858
|
||||
|
||||
import sys, os
|
||||
import string
|
||||
import time
|
||||
import optparse
|
||||
import re
|
||||
|
||||
LF_EXTRA=0
|
||||
LINE_END='\015'
|
||||
# form feed character (^L)
|
||||
FF=chr(12)
|
||||
|
||||
ENCODING_STR = """\
|
||||
/Encoding <<
|
||||
/Differences [ 0 /.notdef /.notdef /.notdef /.notdef
|
||||
/.notdef /.notdef /.notdef /.notdef /.notdef /.notdef
|
||||
/.notdef /.notdef /.notdef /.notdef /.notdef /.notdef
|
||||
/.notdef /.notdef /.notdef /.notdef /.notdef /.notdef
|
||||
/.notdef /.notdef /.notdef /.notdef /.notdef /.notdef
|
||||
/.notdef /.notdef /.notdef /.notdef /space /exclam
|
||||
/quotedbl /numbersign /dollar /percent /ampersand
|
||||
/quoteright /parenleft /parenright /asterisk /plus /comma
|
||||
/hyphen /period /slash /zero /one /two /three /four /five
|
||||
/six /seven /eight /nine /colon /semicolon /less /equal
|
||||
/greater /question /at /A /B /C /D /E /F /G /H /I /J /K /L
|
||||
/M /N /O /P /Q /R /S /T /U /V /W /X /Y /Z /bracketleft
|
||||
/backslash /bracketright /asciicircum /underscore
|
||||
/quoteleft /a /b /c /d /e /f /g /h /i /j /k /l /m /n /o /p
|
||||
/q /r /s /t /u /v /w /x /y /z /braceleft /bar /braceright
|
||||
/asciitilde /.notdef /.notdef /.notdef /.notdef /.notdef
|
||||
/.notdef /.notdef /.notdef /.notdef /.notdef /.notdef
|
||||
/.notdef /.notdef /.notdef /.notdef /.notdef /.notdef
|
||||
/dotlessi /grave /acute /circumflex /tilde /macron /breve
|
||||
/dotaccent /dieresis /.notdef /ring /cedilla /.notdef
|
||||
/hungarumlaut /ogonek /caron /space /exclamdown /cent
|
||||
/sterling /currency /yen /brokenbar /section /dieresis
|
||||
/copyright /ordfeminine /guillemotleft /logicalnot /hyphen
|
||||
/registered /macron /degree /plusminus /twosuperior
|
||||
/threesuperior /acute /mu /paragraph /periodcentered
|
||||
/cedilla /onesuperior /ordmasculine /guillemotright
|
||||
/onequarter /onehalf /threequarters /questiondown /Agrave
|
||||
/Aacute /Acircumflex /Atilde /Adieresis /Aring /AE
|
||||
/Ccedilla /Egrave /Eacute /Ecircumflex /Edieresis /Igrave
|
||||
/Iacute /Icircumflex /Idieresis /Eth /Ntilde /Ograve
|
||||
/Oacute /Ocircumflex /Otilde /Odieresis /multiply /Oslash
|
||||
/Ugrave /Uacute /Ucircumflex /Udieresis /Yacute /Thorn
|
||||
/germandbls /agrave /aacute /acircumflex /atilde /adieresis
|
||||
/aring /ae /ccedilla /egrave /eacute /ecircumflex
|
||||
/edieresis /igrave /iacute /icircumflex /idieresis /eth
|
||||
/ntilde /ograve /oacute /ocircumflex /otilde /odieresis
|
||||
/divide /oslash /ugrave /uacute /ucircumflex /udieresis
|
||||
/yacute /thorn /ydieresis ]
|
||||
>>
|
||||
"""
|
||||
|
||||
INTRO="""\
|
||||
%prog [options] filename
|
||||
|
||||
PyText2Pdf makes a 7-bit clean PDF file from any input file.
|
||||
|
||||
It reads from a named file, and writes the PDF file to a file specified by
|
||||
the user, otherwise to a file with '.pdf' appended to the input file.
|
||||
|
||||
Author: Anand B Pillai."""
|
||||
|
||||
|
||||
class PyText2Pdf(object):
|
||||
""" Text2pdf converter in pure Python """
|
||||
|
||||
def __init__(self):
|
||||
# version number
|
||||
self._version="1.3"
|
||||
# iso encoding flag
|
||||
self._IsoEnc=False
|
||||
# formfeeds flag
|
||||
self._doFFs=False
|
||||
self._progname="PyText2Pdf"
|
||||
self._appname = " ".join((self._progname,str(self._version)))
|
||||
# default font
|
||||
self._font="/Courier"
|
||||
# default font size
|
||||
self._ptSize=10
|
||||
# default vert space
|
||||
self._vertSpace=12
|
||||
self._lines=0
|
||||
# number of characters in a row
|
||||
self._cols=80
|
||||
self._columns=1
|
||||
# page ht
|
||||
self._pageHt=792
|
||||
# page wd
|
||||
self._pageWd=612
|
||||
# input file
|
||||
self._ifile=""
|
||||
# output file
|
||||
self._ofile=""
|
||||
# default tab width
|
||||
self._tab=4
|
||||
# input file descriptor
|
||||
self._ifs=None
|
||||
# output file descriptor
|
||||
self._ofs=None
|
||||
# landscape flag
|
||||
self._landscape=False
|
||||
# Subject
|
||||
self._subject = ''
|
||||
# Author
|
||||
self._author = ''
|
||||
# Keywords
|
||||
self._keywords = []
|
||||
# Custom regexp for page breaks
|
||||
self._pagebreakre = None
|
||||
|
||||
# marker objects
|
||||
self._curobj = 5
|
||||
self._pageObs = [0]
|
||||
self._locations = [0,0,0,0,0,0]
|
||||
self._pageNo=0
|
||||
|
||||
# file position marker
|
||||
self._fpos=0
|
||||
|
||||
def parse_args(self):
|
||||
|
||||
""" Callback function called by argument parser.
|
||||
Helps to remove duplicate code """
|
||||
|
||||
if len(sys.argv)<2:
|
||||
sys.argv.append('-h')
|
||||
|
||||
parser = optparse.OptionParser(usage=INTRO)
|
||||
parser.add_option('-o','--output',dest='outfile',help='Direct output to file OUTFILE',metavar='OUTFILE')
|
||||
parser.add_option('-f','--font',dest='font',help='Use Postscript font FONT (must be in standard 14, default: Courier)',
|
||||
default='Courier')
|
||||
parser.add_option('-I','--isolatin',dest='isolatin',help='Use ISO latin-1 encoding',default=False,action='store_true')
|
||||
parser.add_option('-s','--size',dest='fontsize',help='Use font at PTSIZE points (default=>10)',metavar='PTSIZE',default=10)
|
||||
parser.add_option('-v','--linespace',dest='linespace',help='Use line spacing LINESPACE (deault 12)',metavar='LINESPACE',default=12)
|
||||
parser.add_option('-l','--lines',dest='lines',help='Lines per page (default 60, determined automatically if unspecified)',default=60, metavar=None)
|
||||
parser.add_option('-c','--chars',dest='chars',help='Maximum characters per line (default 80)',default=80,metavar=None)
|
||||
parser.add_option('-t','--tab',dest='tabspace',help='Spaces per tab character (default 4)',default=4,metavar=None)
|
||||
parser.add_option('-F','--ignoreff',dest='formfeed',help='Ignore formfeed character ^L (i.e, accept formfeed characters as pagebreaks)',default=False,action='store_true')
|
||||
parser.add_option('-P','--papersize',dest='papersize',help='Set paper size (default is letter, accepted values are "A4" or "A3")')
|
||||
parser.add_option('-W','--width',dest='width',help='Independent paper width in points',metavar=None,default=612)
|
||||
parser.add_option('-H','--height',dest='height',help='Independent paper height in points',metavar=None,default=792)
|
||||
parser.add_option('-2','--twocolumns',dest='twocolumns',help='Format as two columns',metavar=None,default=False,action='store_true')
|
||||
parser.add_option('-L','--landscape',dest='landscape',help='Format in landscape mode',metavar=None,default=False,action='store_true')
|
||||
parser.add_option('-R','--regexp',dest='pageregexp',help='Regular expression string to determine page breaks (if supplied, this will be used to split text into pages, instead of using line count)',metavar=None)
|
||||
parser.add_option('-S','--subject',dest='subject',help='Optional subject for the document',metavar=None)
|
||||
parser.add_option('-A','--author',dest='author',help='Optional author for the document',metavar=None)
|
||||
parser.add_option('-K','--keywords',dest='keywords',help='Optional list of keywords for the document (separated by commas)',metavar=None)
|
||||
|
||||
|
||||
optlist, args = parser.parse_args()
|
||||
# print optlist.__dict__, args
|
||||
|
||||
if len(args)==0:
|
||||
sys.exit('Error: input file argument missing')
|
||||
elif len(args)>1:
|
||||
sys.exit('Error: Too many arguments')
|
||||
|
||||
self._ifile = args[0]
|
||||
|
||||
d = optlist.__dict__
|
||||
if d.get('isolatin'): self._IsoEnc=True
|
||||
if d.get('formfeed'): self._doFFs = True
|
||||
if d.get('twocolumns'): self._columns = 2
|
||||
if d.get('landscape'): self._landscape = True
|
||||
|
||||
self._font = '/' + d.get('font')
|
||||
psize = d.get('papersize')
|
||||
if psize=='A4':
|
||||
self._pageWd=595
|
||||
self._pageHt=842
|
||||
elif psize=='A3':
|
||||
self._pageWd=842
|
||||
self._pageHt=1190
|
||||
|
||||
fsize = int(d.get('fontsize'))
|
||||
if fsize < 1: fsize = 1
|
||||
self._ptSize = fsize
|
||||
|
||||
lspace = int(d.get('linespace'))
|
||||
if lspace<1: lspace = 1
|
||||
self._vertSpace = lspace
|
||||
|
||||
lines = int(d.get('lines'))
|
||||
if lines<1: lines = 1
|
||||
self._lines = int(lines)
|
||||
|
||||
chars = int(d.get('chars'))
|
||||
if chars<4: chars = 4
|
||||
self._cols = chars
|
||||
|
||||
tab = int(d.get('tabspace'))
|
||||
if tab<1: tab = 1
|
||||
self._tab = tab
|
||||
|
||||
w = int(d.get('width'))
|
||||
if w<72: w=72
|
||||
self._pageWd = w
|
||||
|
||||
h = int(d.get('height'))
|
||||
if h<72: h=72
|
||||
self._pageHt = h
|
||||
|
||||
# Very optional args
|
||||
author = d.get('author')
|
||||
if author: self._author = author
|
||||
|
||||
subject = d.get('subject')
|
||||
if subject: self._subject = subject
|
||||
|
||||
keywords = d.get('keywords')
|
||||
if keywords:
|
||||
self._keywords = keywords.split(',')
|
||||
|
||||
pagebreak = d.get('pageregexp')
|
||||
if pagebreak:
|
||||
self._pagebreakre = re.compile(pagebreak, re.UNICODE|re.IGNORECASE)
|
||||
|
||||
outfile = d.get('outfile')
|
||||
if outfile: self._ofile = outfile
|
||||
|
||||
if self._landscape:
|
||||
print 'Landscape option on...'
|
||||
if self._columns==2:
|
||||
print 'Printing in two columns...'
|
||||
if self._doFFs:
|
||||
print 'Ignoring form feed character...'
|
||||
if self._IsoEnc:
|
||||
print 'Using ISO Latin Encoding...'
|
||||
|
||||
print 'Using font',self._font[1:],'size =', self._ptSize
|
||||
|
||||
def writestr(self, str):
|
||||
""" Write string to output file descriptor.
|
||||
All output operations go through this function.
|
||||
We keep the current file position also here"""
|
||||
|
||||
# update current file position
|
||||
self._fpos += len(str)
|
||||
for x in range(0, len(str)):
|
||||
if str[x] == '\n':
|
||||
self._fpos += LF_EXTRA
|
||||
try:
|
||||
self._ofs.write(str)
|
||||
except IOError, e:
|
||||
print e
|
||||
return -1
|
||||
|
||||
return 0
|
||||
|
||||
def convert(self):
|
||||
""" Perform the actual conversion """
|
||||
|
||||
if self._landscape:
|
||||
# swap page width & height
|
||||
tmp = self._pageHt
|
||||
self._pageHt = self._pageWd
|
||||
self._pageWd = tmp
|
||||
|
||||
if self._lines==0:
|
||||
self._lines = (self._pageHt - 72)/self._vertSpace
|
||||
if self._lines < 1:
|
||||
self._lines=1
|
||||
|
||||
try:
|
||||
self._ifs=open(self._ifile)
|
||||
except IOError, (strerror, errno):
|
||||
print 'Error: Could not open file to read --->', self._ifile
|
||||
sys.exit(3)
|
||||
|
||||
if self._ofile=="":
|
||||
self._ofile = os.path.splitext(self._ifile)[0] + '.pdf'
|
||||
|
||||
try:
|
||||
self._ofs = open(self._ofile, 'wb')
|
||||
except IOError, (strerror, errno):
|
||||
print 'Error: Could not open file to write --->', self._ofile
|
||||
sys.exit(3)
|
||||
|
||||
print 'Input file=>',self._ifile
|
||||
print 'Writing pdf file',self._ofile, '...'
|
||||
self.writeheader()
|
||||
self.writepages()
|
||||
self.writerest()
|
||||
|
||||
print 'Wrote file', self._ofile
|
||||
self._ifs.close()
|
||||
self._ofs.close()
|
||||
return 0
|
||||
|
||||
def writeheader(self):
|
||||
"""Write the PDF header"""
|
||||
|
||||
ws = self.writestr
|
||||
|
||||
title = self._ifile
|
||||
|
||||
t=time.localtime()
|
||||
timestr=str(time.strftime("D:%Y%m%d%H%M%S", t))
|
||||
ws("%PDF-1.4\n")
|
||||
self._locations[1] = self._fpos
|
||||
ws("1 0 obj\n")
|
||||
ws("<<\n")
|
||||
|
||||
buf = "".join(("/Creator (", self._appname, " By Anand B Pillai )\n"))
|
||||
ws(buf)
|
||||
buf = "".join(("/CreationDate (", timestr, ")\n"))
|
||||
ws(buf)
|
||||
buf = "".join(("/Producer (", self._appname, "(\\251 Anand B Pillai))\n"))
|
||||
ws(buf)
|
||||
if self._subject:
|
||||
title = self._subject
|
||||
buf = "".join(("/Subject (",self._subject,")\n"))
|
||||
ws(buf)
|
||||
if self._author:
|
||||
buf = "".join(("/Author (",self._author,")\n"))
|
||||
ws(buf)
|
||||
if self._keywords:
|
||||
buf = "".join(("/Keywords (",' '.join(self._keywords),")\n"))
|
||||
ws(buf)
|
||||
|
||||
if title:
|
||||
buf = "".join(("/Title (", title, ")\n"))
|
||||
ws(buf)
|
||||
|
||||
ws(">>\n")
|
||||
ws("endobj\n")
|
||||
|
||||
self._locations[2] = self._fpos
|
||||
|
||||
ws("2 0 obj\n")
|
||||
ws("<<\n")
|
||||
ws("/Type /Catalog\n")
|
||||
ws("/Pages 3 0 R\n")
|
||||
ws(">>\n")
|
||||
ws("endobj\n")
|
||||
|
||||
self._locations[4] = self._fpos
|
||||
ws("4 0 obj\n")
|
||||
ws("<<\n")
|
||||
buf = "".join(("/BaseFont ", str(self._font), " /Encoding /WinAnsiEncoding /Name /F1 /Subtype /Type1 /Type /Font >>\n"))
|
||||
ws(buf)
|
||||
|
||||
if self._IsoEnc:
|
||||
ws(ENCODING_STR)
|
||||
|
||||
ws(">>\n")
|
||||
ws("endobj\n")
|
||||
|
||||
self._locations[5] = self._fpos
|
||||
|
||||
ws("5 0 obj\n")
|
||||
ws("<<\n")
|
||||
ws(" /Font << /F1 4 0 R >>\n")
|
||||
ws(" /ProcSet [ /PDF /Text ]\n")
|
||||
ws(">>\n")
|
||||
ws("endobj\n")
|
||||
|
||||
def startpage(self):
|
||||
""" Start a page of data """
|
||||
|
||||
ws = self.writestr
|
||||
|
||||
self._pageNo += 1
|
||||
self._curobj += 1
|
||||
|
||||
self._locations.append(self._fpos)
|
||||
self._locations[self._curobj]=self._fpos
|
||||
|
||||
self._pageObs.append(self._curobj)
|
||||
self._pageObs[self._pageNo] = self._curobj
|
||||
|
||||
buf = "".join((str(self._curobj), " 0 obj\n"))
|
||||
|
||||
ws(buf)
|
||||
ws("<<\n")
|
||||
ws("/Type /Page\n")
|
||||
ws("/Parent 3 0 R\n")
|
||||
ws("/Resources 5 0 R\n")
|
||||
|
||||
self._curobj += 1
|
||||
buf = "".join(("/Contents ", str(self._curobj), " 0 R\n"))
|
||||
ws(buf)
|
||||
ws(">>\n")
|
||||
ws("endobj\n")
|
||||
|
||||
self._locations.append(self._fpos)
|
||||
self._locations[self._curobj] = self._fpos
|
||||
|
||||
buf = "".join((str(self._curobj), " 0 obj\n"))
|
||||
ws(buf)
|
||||
ws("<<\n")
|
||||
|
||||
buf = "".join(("/Length ", str(self._curobj + 1), " 0 R\n"))
|
||||
ws(buf)
|
||||
ws(">>\n")
|
||||
ws("stream\n")
|
||||
strmPos = self._fpos
|
||||
|
||||
ws("BT\n");
|
||||
buf = "".join(("/F1 ", str(self._ptSize), " Tf\n"))
|
||||
ws(buf)
|
||||
buf = "".join(("1 0 0 1 50 ", str(self._pageHt - 40), " Tm\n"))
|
||||
ws(buf)
|
||||
buf = "".join((str(self._vertSpace), " TL\n"))
|
||||
ws(buf)
|
||||
|
||||
return strmPos
|
||||
|
||||
def endpage(self, streamStart):
|
||||
"""End a page of data """
|
||||
|
||||
ws = self.writestr
|
||||
|
||||
ws("ET\n")
|
||||
streamEnd = self._fpos
|
||||
ws("endstream\n")
|
||||
ws("endobj\n")
|
||||
|
||||
self._curobj += 1
|
||||
self._locations.append(self._fpos)
|
||||
self._locations[self._curobj] = self._fpos
|
||||
|
||||
buf = "".join((str(self._curobj), " 0 obj\n"))
|
||||
ws(buf)
|
||||
buf = "".join((str(streamEnd - streamStart), '\n'))
|
||||
ws(buf)
|
||||
ws('endobj\n')
|
||||
|
||||
def writepages(self):
|
||||
"""Write pages as PDF"""
|
||||
|
||||
ws = self.writestr
|
||||
|
||||
beginstream=0
|
||||
lineNo, charNo=0,0
|
||||
ch, column=0,0
|
||||
padding,i=0,0
|
||||
atEOF=0
|
||||
linebuf = ''
|
||||
|
||||
while not atEOF:
|
||||
beginstream = self.startpage()
|
||||
column=1
|
||||
|
||||
while column <= self._columns:
|
||||
column += 1
|
||||
atFF=0
|
||||
atBOP=0
|
||||
lineNo=0
|
||||
# Special flag for regexp page break
|
||||
pagebreak = False
|
||||
|
||||
while lineNo < self._lines and not atFF and not atEOF and not pagebreak:
|
||||
linebuf = ''
|
||||
lineNo += 1
|
||||
ws("(")
|
||||
charNo=0
|
||||
|
||||
while charNo < self._cols:
|
||||
charNo += 1
|
||||
ch = self._ifs.read(1)
|
||||
cond = ((ch != '\n') and not(ch==FF and self._doFFs) and (ch != ''))
|
||||
if not cond:
|
||||
# See if this dude matches the pagebreak regexp
|
||||
if self._pagebreakre and self._pagebreakre.search(linebuf.strip()):
|
||||
pagebreak = True
|
||||
|
||||
linebuf = ''
|
||||
break
|
||||
else:
|
||||
linebuf = linebuf + ch
|
||||
|
||||
if ord(ch) >= 32 and ord(ch) <= 127:
|
||||
if ch == '(' or ch == ')' or ch == '\\':
|
||||
ws("\\")
|
||||
ws(ch)
|
||||
else:
|
||||
if ord(ch) == 9:
|
||||
padding =self._tab - ((charNo - 1) % self._tab)
|
||||
for i in range(padding):
|
||||
ws(" ")
|
||||
charNo += (padding -1)
|
||||
else:
|
||||
if ch != FF:
|
||||
# write \xxx form for dodgy character
|
||||
buf = "".join(('\\', ch))
|
||||
ws(buf)
|
||||
else:
|
||||
# dont print anything for a FF
|
||||
charNo -= 1
|
||||
|
||||
ws(")'\n")
|
||||
if ch == FF:
|
||||
atFF=1
|
||||
if lineNo == self._lines:
|
||||
atBOP=1
|
||||
|
||||
if atBOP:
|
||||
pos=0
|
||||
ch = self._ifs.read(1)
|
||||
pos= self._ifs.tell()
|
||||
if ch == FF:
|
||||
ch = self._ifs.read(1)
|
||||
pos=self._ifs.tell()
|
||||
# python's EOF signature
|
||||
if ch == '':
|
||||
atEOF=1
|
||||
else:
|
||||
# push position back by one char
|
||||
self._ifs.seek(pos-1)
|
||||
|
||||
elif atFF:
|
||||
ch = self._ifs.read(1)
|
||||
pos=self._ifs.tell()
|
||||
if ch == '':
|
||||
atEOF=1
|
||||
else:
|
||||
self._ifs.seek(pos-1)
|
||||
|
||||
if column < self._columns:
|
||||
buf = "".join(("1 0 0 1 ",
|
||||
str((self._pageWd/2 + 25)),
|
||||
" ",
|
||||
str(self._pageHt - 40),
|
||||
" Tm\n"))
|
||||
ws(buf)
|
||||
|
||||
self.endpage(beginstream)
|
||||
|
||||
def writerest(self):
|
||||
"""Finish the file"""
|
||||
|
||||
ws = self.writestr
|
||||
self._locations[3] = self._fpos
|
||||
|
||||
ws("3 0 obj\n")
|
||||
ws("<<\n")
|
||||
ws("/Type /Pages\n")
|
||||
buf = "".join(("/Count ", str(self._pageNo), "\n"))
|
||||
ws(buf)
|
||||
buf = "".join(("/MediaBox [ 0 0 ", str(self._pageWd), " ", str(self._pageHt), " ]\n"))
|
||||
ws(buf)
|
||||
ws("/Kids [ ")
|
||||
|
||||
for i in range(1, self._pageNo+1):
|
||||
buf = "".join((str(self._pageObs[i]), " 0 R "))
|
||||
ws(buf)
|
||||
|
||||
ws("]\n")
|
||||
ws(">>\n")
|
||||
ws("endobj\n")
|
||||
|
||||
xref = self._fpos
|
||||
ws("xref\n")
|
||||
buf = "".join(("0 ", str((self._curobj) + 1), "\n"))
|
||||
ws(buf)
|
||||
buf = "".join(("0000000000 65535 f ", str(LINE_END)))
|
||||
ws(buf)
|
||||
|
||||
for i in range(1, self._curobj + 1):
|
||||
val = self._locations[i]
|
||||
buf = "".join((string.zfill(str(val), 10), " 00000 n ", str(LINE_END)))
|
||||
ws(buf)
|
||||
|
||||
ws("trailer\n")
|
||||
ws("<<\n")
|
||||
buf = "".join(("/Size ", str(self._curobj + 1), "\n"))
|
||||
ws(buf)
|
||||
ws("/Root 2 0 R\n")
|
||||
ws("/Info 1 0 R\n")
|
||||
ws(">>\n")
|
||||
|
||||
ws("startxref\n")
|
||||
buf = "".join((str(xref), "\n"))
|
||||
ws(buf)
|
||||
ws("%%EOF\n")
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
pdfclass=PyText2Pdf()
|
||||
pdfclass.parse_args()
|
||||
pdfclass.convert()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
2
bin/report.py
Normal file
2
bin/report.py
Normal file
@@ -0,0 +1,2 @@
|
||||
import pdfkit
|
||||
pdfkit.from_url('/usr/share/sniper/loot/workspace/hulu/sniper-report.html', 'out.pdf')
|
||||
201
bin/samrdump.py
Normal file
201
bin/samrdump.py
Normal file
@@ -0,0 +1,201 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2003-2015 CORE Security Technologies
|
||||
#
|
||||
# This software is provided under under a slightly modified version
|
||||
# of the Apache Software License. See the accompanying LICENSE file
|
||||
# for more information.
|
||||
#
|
||||
# Description: DCE/RPC SAMR dumper.
|
||||
#
|
||||
# Author:
|
||||
# Javier Kohen <jkohen@coresecurity.com>
|
||||
# Alberto Solino (@agsolino)
|
||||
#
|
||||
# Reference for:
|
||||
# DCE/RPC for SAMR
|
||||
|
||||
import sys
|
||||
import logging
|
||||
import argparse
|
||||
|
||||
from impacket.examples import logger
|
||||
from impacket import version
|
||||
from impacket.nt_errors import STATUS_MORE_ENTRIES
|
||||
from impacket.dcerpc.v5 import transport, samr
|
||||
from impacket.dcerpc.v5.rpcrt import DCERPCException
|
||||
|
||||
|
||||
class ListUsersException(Exception):
|
||||
pass
|
||||
|
||||
class SAMRDump:
|
||||
KNOWN_PROTOCOLS = {
|
||||
'139/SMB': (r'ncacn_np:%s[\pipe\samr]', 139),
|
||||
'445/SMB': (r'ncacn_np:%s[\pipe\samr]', 445),
|
||||
}
|
||||
|
||||
|
||||
def __init__(self, protocols = None,
|
||||
username = '', password = '', domain = '', hashes = None, aesKey=None, doKerberos = False):
|
||||
if not protocols:
|
||||
self.__protocols = SAMRDump.KNOWN_PROTOCOLS.keys()
|
||||
else:
|
||||
self.__protocols = [protocols]
|
||||
|
||||
self.__username = username
|
||||
self.__password = password
|
||||
self.__domain = domain
|
||||
self.__lmhash = ''
|
||||
self.__nthash = ''
|
||||
self.__aesKey = aesKey
|
||||
self.__doKerberos = doKerberos
|
||||
if hashes is not None:
|
||||
self.__lmhash, self.__nthash = hashes.split(':')
|
||||
|
||||
|
||||
def dump(self, addr):
|
||||
"""Dumps the list of users and shares registered present at
|
||||
addr. Addr is a valid host name or IP address.
|
||||
"""
|
||||
|
||||
logging.info('Retrieving endpoint list from %s' % addr)
|
||||
|
||||
# Try all requested protocols until one works.
|
||||
entries = []
|
||||
for protocol in self.__protocols:
|
||||
protodef = SAMRDump.KNOWN_PROTOCOLS[protocol]
|
||||
port = protodef[1]
|
||||
|
||||
logging.info("Trying protocol %s..." % protocol)
|
||||
rpctransport = transport.SMBTransport(addr, port, r'\samr', self.__username, self.__password, self.__domain, self.__lmhash, self.__nthash, self.__aesKey, doKerberos = self.__doKerberos)
|
||||
|
||||
try:
|
||||
entries = self.__fetchList(rpctransport)
|
||||
except Exception, e:
|
||||
logging.critical(str(e))
|
||||
else:
|
||||
# Got a response. No need for further iterations.
|
||||
break
|
||||
|
||||
# Display results.
|
||||
|
||||
for entry in entries:
|
||||
(username, uid, user) = entry
|
||||
base = "%s (%d)" % (username, uid)
|
||||
print base + '/FullName:', user['FullName']
|
||||
print base + '/UserComment:', user['UserComment']
|
||||
print base + '/PrimaryGroupId:', user['PrimaryGroupId']
|
||||
print base + '/BadPasswordCount:', user['BadPasswordCount']
|
||||
print base + '/LogonCount:', user['LogonCount']
|
||||
|
||||
if entries:
|
||||
num = len(entries)
|
||||
if 1 == num:
|
||||
logging.info('Received one entry.')
|
||||
else:
|
||||
logging.info('Received %d entries.' % num)
|
||||
else:
|
||||
logging.info('No entries received.')
|
||||
|
||||
|
||||
def __fetchList(self, rpctransport):
|
||||
dce = rpctransport.get_dce_rpc()
|
||||
|
||||
entries = []
|
||||
|
||||
dce.connect()
|
||||
dce.bind(samr.MSRPC_UUID_SAMR)
|
||||
|
||||
try:
|
||||
resp = samr.hSamrConnect(dce)
|
||||
serverHandle = resp['ServerHandle']
|
||||
|
||||
resp = samr.hSamrEnumerateDomainsInSamServer(dce, serverHandle)
|
||||
domains = resp['Buffer']['Buffer']
|
||||
|
||||
print 'Found domain(s):'
|
||||
for domain in domains:
|
||||
print " . %s" % domain['Name']
|
||||
|
||||
logging.info("Looking up users in domain %s" % domains[0]['Name'])
|
||||
|
||||
resp = samr.hSamrLookupDomainInSamServer(dce, serverHandle,domains[0]['Name'] )
|
||||
|
||||
resp = samr.hSamrOpenDomain(dce, serverHandle = serverHandle, domainId = resp['DomainId'])
|
||||
domainHandle = resp['DomainHandle']
|
||||
|
||||
status = STATUS_MORE_ENTRIES
|
||||
enumerationContext = 0
|
||||
while status == STATUS_MORE_ENTRIES:
|
||||
try:
|
||||
resp = samr.hSamrEnumerateUsersInDomain(dce, domainHandle, enumerationContext = enumerationContext)
|
||||
except DCERPCException, e:
|
||||
if str(e).find('STATUS_MORE_ENTRIES') < 0:
|
||||
raise
|
||||
resp = e.get_packet()
|
||||
|
||||
for user in resp['Buffer']['Buffer']:
|
||||
r = samr.hSamrOpenUser(dce, domainHandle, samr.MAXIMUM_ALLOWED, user['RelativeId'])
|
||||
print "Found user: %s, uid = %d" % (user['Name'], user['RelativeId'] )
|
||||
info = samr.hSamrQueryInformationUser2(dce, r['UserHandle'],samr.USER_INFORMATION_CLASS.UserAllInformation)
|
||||
entry = (user['Name'], user['RelativeId'], info['Buffer']['All'])
|
||||
entries.append(entry)
|
||||
samr.hSamrCloseHandle(dce, r['UserHandle'])
|
||||
|
||||
enumerationContext = resp['EnumerationContext']
|
||||
status = resp['ErrorCode']
|
||||
|
||||
except ListUsersException, e:
|
||||
logging.critical("Error listing users: %s" % e)
|
||||
|
||||
dce.disconnect()
|
||||
|
||||
return entries
|
||||
|
||||
|
||||
# Process command-line arguments.
|
||||
if __name__ == '__main__':
|
||||
# Init the example's logger theme
|
||||
logger.init()
|
||||
print version.BANNER
|
||||
|
||||
parser = argparse.ArgumentParser(add_help = True, description = "This script downloads the list of users for the target system.")
|
||||
|
||||
parser.add_argument('target', action='store', help='[[domain/]username[:password]@]<targetName or address>')
|
||||
parser.add_argument('protocol', choices=SAMRDump.KNOWN_PROTOCOLS.keys(), nargs='?', default='445/SMB', help='transport protocol (default 445/SMB)')
|
||||
parser.add_argument('-debug', action='store_true', help='Turn DEBUG output ON')
|
||||
|
||||
group = parser.add_argument_group('authentication')
|
||||
|
||||
group.add_argument('-hashes', action="store", metavar = "LMHASH:NTHASH", help='NTLM hashes, format is LMHASH:NTHASH')
|
||||
group.add_argument('-no-pass', action="store_true", help='don\'t ask for password (useful for -k)')
|
||||
group.add_argument('-k', action="store_true", help='Use Kerberos authentication. Grabs credentials from ccache file (KRB5CCNAME) based on target parameters. If valid credentials cannot be found, it will use the ones specified in the command line')
|
||||
group.add_argument('-aesKey', action="store", metavar = "hex key", help='AES key to use for Kerberos Authentication (128 or 256 bits)')
|
||||
|
||||
if len(sys.argv)==1:
|
||||
parser.print_help()
|
||||
sys.exit(1)
|
||||
|
||||
options = parser.parse_args()
|
||||
|
||||
if options.debug is True:
|
||||
logging.getLogger().setLevel(logging.DEBUG)
|
||||
else:
|
||||
logging.getLogger().setLevel(logging.INFO)
|
||||
|
||||
import re
|
||||
|
||||
domain, username, password, address = re.compile('(?:(?:([^/@:]*)/)?([^@:]*)(?::([^@]*))?@)?(.*)').match(options.target).groups('')
|
||||
|
||||
if domain is None:
|
||||
domain = ''
|
||||
|
||||
if options.aesKey is not None:
|
||||
options.k = True
|
||||
|
||||
if password == '' and username != '' and options.hashes is None and options.no_pass is False and options.aesKey is None:
|
||||
from getpass import getpass
|
||||
password = getpass("Password:")
|
||||
|
||||
dumper = SAMRDump(options.protocol, username, password, domain, options.hashes, options.aesKey, options.k)
|
||||
dumper.dump(address)
|
||||
17
bin/slack.sh
Normal file
17
bin/slack.sh
Normal file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
# Slack API Integration script for Sn1per
|
||||
# By @xer0dayz - https://sn1persecurity.com
|
||||
#
|
||||
|
||||
source /usr/share/sniper/sniper.conf 2> /dev/null
|
||||
source /root/.sniper.conf 2> /dev/null
|
||||
source /root/.sniper_api_keys.conf 2> /dev/null
|
||||
|
||||
MESSAGE="$1"
|
||||
|
||||
if [ "$MESSAGE" == "postfile" ]; then
|
||||
FILENAME="$2"
|
||||
curl -F "file=@$FILENAME" -F "initial_comment=$FILENAME" -F "channels=$SLACK_CHANNEL" -H "Authorization: Bearer $SLACK_API_TOKEN" https://slack.com/api/files.upload 2> /dev/null > /dev/null
|
||||
else
|
||||
curl -X POST -H 'Content-type: application/json' --data "{\"text\":\"$MESSAGE\"}" $SLACK_WEBHOOK_URL 2> /dev/null > /dev/null
|
||||
fi
|
||||
47
bin/waybackrobots.py
Normal file
47
bin/waybackrobots.py
Normal file
@@ -0,0 +1,47 @@
|
||||
import requests
|
||||
import re
|
||||
import sys
|
||||
from multiprocessing.dummy import Pool
|
||||
|
||||
|
||||
def robots(host):
|
||||
r = requests.get(
|
||||
'https://web.archive.org/cdx/search/cdx\
|
||||
?url=%s/robots.txt&output=json&fl=timestamp,original&filter=statuscode:200&collapse=digest' % host)
|
||||
results = r.json()
|
||||
if len(results) == 0: # might find nothing
|
||||
return []
|
||||
results.pop(0) # The first item is ['timestamp', 'original']
|
||||
return results
|
||||
|
||||
|
||||
def getpaths(snapshot):
|
||||
url = 'https://web.archive.org/web/{0}/{1}'.format(snapshot[0], snapshot[1])
|
||||
robotstext = requests.get(url).text
|
||||
if 'Disallow:' in robotstext: # verify it's acually a robots.txt file, not 404 page
|
||||
paths = re.findall('/.*', robotstext)
|
||||
return paths
|
||||
return []
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if len(sys.argv) < 2:
|
||||
print('Usage:\n\tpython3 waybackrobots.py <domain-name>')
|
||||
sys.exit()
|
||||
|
||||
host = sys.argv[1]
|
||||
|
||||
snapshots = robots(host)
|
||||
print('Found %s unique results' % len(snapshots))
|
||||
if len(snapshots) == 0:
|
||||
sys.exit()
|
||||
print('This may take some time...')
|
||||
pool = Pool(4)
|
||||
paths = pool.map(getpaths, snapshots)
|
||||
unique_paths = set()
|
||||
for i in paths:
|
||||
unique_paths.update(i)
|
||||
filename = '%s-robots.txt' % host
|
||||
with open(filename, 'w') as f:
|
||||
f.write('\n'.join(unique_paths))
|
||||
print('[*] Saved results to %s' % filename)
|
||||
35
bin/waybackurls.py
Normal file
35
bin/waybackurls.py
Normal file
@@ -0,0 +1,35 @@
|
||||
import requests
|
||||
import sys
|
||||
import json
|
||||
|
||||
|
||||
def waybackurls(host, with_subs):
|
||||
if with_subs:
|
||||
url = 'http://web.archive.org/cdx/search/cdx?url=*.%s/*&output=json&fl=original&collapse=urlkey' % host
|
||||
else:
|
||||
url = 'http://web.archive.org/cdx/search/cdx?url=%s/*&output=json&fl=original&collapse=urlkey' % host
|
||||
r = requests.get(url)
|
||||
results = r.json()
|
||||
return results[1:]
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
argc = len(sys.argv)
|
||||
if argc < 2:
|
||||
print('Usage:\n\tpython3 waybackurls.py <url> <include_subdomains:optional>')
|
||||
sys.exit()
|
||||
|
||||
host = sys.argv[1]
|
||||
with_subs = False
|
||||
if argc > 3:
|
||||
with_subs = True
|
||||
|
||||
urls = waybackurls(host, with_subs)
|
||||
json_urls = json.dumps(urls)
|
||||
if urls:
|
||||
filename = '%s-waybackurls.json' % host
|
||||
with open(filename, 'w') as f:
|
||||
f.write(json_urls)
|
||||
print('[*] Saved results to %s' % filename)
|
||||
else:
|
||||
print('[-] Found nothing')
|
||||
168
bin/webscreenshot.js
Normal file
168
bin/webscreenshot.js
Normal file
@@ -0,0 +1,168 @@
|
||||
/***
|
||||
# This file is part of webscreenshot.
|
||||
#
|
||||
# Copyright (C) 2014, Thomas Debize <tdebize at mail.com>
|
||||
# All rights reserved.
|
||||
#
|
||||
# webscreenshot is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Lesser General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# webscreenshot is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with webscreenshot. If not, see <http://www.gnu.org/licenses/>.
|
||||
***/
|
||||
|
||||
var Page = (function(custom_headers, http_username, http_password) {
|
||||
var opts = {
|
||||
width: 1200,
|
||||
height: 800,
|
||||
ajaxTimeout: 400,
|
||||
maxTimeout: 800,
|
||||
httpAuthErrorCode: 2
|
||||
};
|
||||
|
||||
var requestCount = 0;
|
||||
var forceRenderTimeout;
|
||||
var ajaxRenderTimeout;
|
||||
|
||||
var page = require('webpage').create();
|
||||
page.viewportSize = {
|
||||
width: opts.width,
|
||||
height: opts.height
|
||||
};
|
||||
|
||||
page.settings.userAgent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1944.0 Safari/537.36';
|
||||
page.settings.userName = http_username;
|
||||
page.settings.password = http_password;
|
||||
|
||||
page.customHeaders = custom_headers;
|
||||
|
||||
page.onInitialized = function() {
|
||||
page.customHeaders = {};
|
||||
};
|
||||
// Silence confirmation messages and errors
|
||||
page.onConfirm = page.onPrompt = page.onError = noop;
|
||||
|
||||
page.onResourceRequested = function(request) {
|
||||
requestCount += 1;
|
||||
clearTimeout(ajaxRenderTimeout);
|
||||
};
|
||||
|
||||
page.onResourceReceived = function(response) {
|
||||
if (response.stage && response.stage == 'end' && response.status == '401') {
|
||||
page.failReason = '401';
|
||||
}
|
||||
|
||||
if (!response.stage || response.stage === 'end') {
|
||||
requestCount -= 1;
|
||||
if (requestCount === 0) {
|
||||
ajaxRenderTimeout = setTimeout(renderAndExit, opts.ajaxTimeout);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
var api = {};
|
||||
|
||||
api.render = function(url, file) {
|
||||
opts.file = file;
|
||||
|
||||
page.open(url, function(status) {
|
||||
if (status !== "success") {
|
||||
if (page.failReason && page.failReason == '401') {
|
||||
// Specific 401 HTTP code hint
|
||||
phantom.exit(opts.httpAuthErrorCode);
|
||||
} else {
|
||||
// All other failures
|
||||
phantom.exit(1);
|
||||
}
|
||||
} else {
|
||||
forceRenderTimeout = setTimeout(renderAndExit, opts.maxTimeout);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
function renderAndExit() {
|
||||
// Trick to avoid transparent background
|
||||
page.evaluate(function() {
|
||||
document.body.bgColor = 'white';
|
||||
});
|
||||
|
||||
page.render(opts.file);
|
||||
phantom.exit(0);
|
||||
}
|
||||
|
||||
function noop() {}
|
||||
|
||||
return api;
|
||||
});
|
||||
|
||||
function main() {
|
||||
|
||||
var system = require('system');
|
||||
var p_url = new RegExp('url_capture=(.*)');
|
||||
var p_outfile = new RegExp('output_file=(.*)');
|
||||
var p_header = new RegExp('header=(.*)');
|
||||
|
||||
var p_http_username = new RegExp('http_username=(.*)');
|
||||
var http_username = '';
|
||||
|
||||
var p_http_password = new RegExp('http_password=(.*)');
|
||||
var http_password = '';
|
||||
|
||||
var temp_custom_headers = {
|
||||
// Nullify Accept-Encoding header to disable compression (https://github.com/ariya/phantomjs/issues/10930)
|
||||
'Accept-Encoding': ' '
|
||||
};
|
||||
|
||||
for(var i = 0; i < system.args.length; i++) {
|
||||
if (p_url.test(system.args[i]) === true)
|
||||
{
|
||||
var URL = p_url.exec(system.args[i])[1];
|
||||
}
|
||||
|
||||
if (p_outfile.test(system.args[i]) === true)
|
||||
{
|
||||
var output_file = p_outfile.exec(system.args[i])[1];
|
||||
}
|
||||
|
||||
if (p_http_username.test(system.args[i]) === true)
|
||||
{
|
||||
http_username = p_http_username.exec(system.args[i])[1];
|
||||
}
|
||||
|
||||
if (p_http_password.test(system.args[i]) === true)
|
||||
{
|
||||
http_password = p_http_password.exec(system.args[i])[1];
|
||||
}
|
||||
|
||||
if (p_header.test(system.args[i]) === true)
|
||||
{
|
||||
var header = p_header.exec(system.args[i]);
|
||||
var p_header_split = header[1].split(': ', 2);
|
||||
var header_name = p_header_split[0];
|
||||
var header_value = p_header_split[1];
|
||||
|
||||
temp_custom_headers[header_name] = header_value;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
if (typeof(URL) === 'undefined' || URL.length == 0 || typeof(output_file) === 'undefined' || output_file.length == 0) {
|
||||
console.log("Usage: phantomjs [options] webscreenshot.js url_capture=<URL> output_file=<output_file.png> [header=<custom header> http_username=<HTTP basic auth username> http_password=<HTTP basic auth password>]");
|
||||
console.log('Please specify an URL to capture and an output png filename !');
|
||||
|
||||
phantom.exit(1);
|
||||
}
|
||||
else {
|
||||
var page = Page(temp_custom_headers, http_username, http_password);
|
||||
page.render(URL, output_file);
|
||||
}
|
||||
}
|
||||
|
||||
main();
|
||||
432
bin/webscreenshot.py
Normal file
432
bin/webscreenshot.py
Normal file
@@ -0,0 +1,432 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# This file is part of webscreenshot.
|
||||
#
|
||||
# Copyright (C) 2018, Thomas Debize <tdebize at mail.com>
|
||||
# All rights reserved.
|
||||
#
|
||||
# webscreenshot is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Lesser General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# webscreenshot is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with webscreenshot. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import re
|
||||
import os
|
||||
import sys
|
||||
import subprocess
|
||||
import datetime
|
||||
import time
|
||||
import signal
|
||||
import multiprocessing
|
||||
import itertools
|
||||
import shlex
|
||||
import logging
|
||||
import errno
|
||||
|
||||
# Script version
|
||||
VERSION = '2.2.1'
|
||||
|
||||
# OptionParser imports
|
||||
from optparse import OptionParser
|
||||
from optparse import OptionGroup
|
||||
|
||||
# Options definition
|
||||
parser = OptionParser(usage="usage: %prog [options] URL")
|
||||
|
||||
main_grp = OptionGroup(parser, 'Main parameters')
|
||||
main_grp.add_option('-i', '--input-file', help = '<INPUT_FILE>: text file containing the target list. Ex: list.txt', nargs = 1)
|
||||
main_grp.add_option('-o', '--output-directory', help = '<OUTPUT_DIRECTORY> (optional): screenshots output directory (default \'./screenshots/\')', nargs = 1)
|
||||
main_grp.add_option('-r', '--renderer', help = '<RENDERER> (optional): renderer to use among \'phantomjs\' (legacy but best results), \'chrome\', \'chromium\' (version > 57) (default \'phantomjs\')', choices = ['phantomjs', 'chrome', 'chromium'], default = 'phantomjs', nargs = 1)
|
||||
main_grp.add_option('-w', '--workers', help = '<WORKERS> (optional): number of parallel execution workers (default 2)', default = 2, nargs = 1)
|
||||
main_grp.add_option('-v', '--verbosity', help = '<VERBOSITY> (optional): verbosity level, repeat it to increase the level { -v INFO, -vv DEBUG } (default verbosity ERROR)', action = 'count', default = 0)
|
||||
|
||||
proc_grp = OptionGroup(parser, 'Input processing parameters')
|
||||
proc_grp.add_option('-p', '--port', help = '<PORT> (optional): use the specified port for each target in the input list. Ex: -p 80', nargs = 1)
|
||||
proc_grp.add_option('-s', '--ssl', help = '<SSL> (optional): enforce ssl for every connection', action = 'store_true', default = False)
|
||||
proc_grp.add_option('-m', '--multiprotocol', help = '<MULTIPROTOCOL> (optional): perform screenshots over HTTP and HTTPS for each target', action = 'store_true', default = False)
|
||||
|
||||
http_grp = OptionGroup(parser, 'HTTP parameters')
|
||||
http_grp.add_option('-c', '--cookie', help = '<COOKIE_STRING> (optional): cookie string to add. Ex: -c "JSESSIONID=1234; YOLO=SWAG"', nargs = 1)
|
||||
http_grp.add_option('-a', '--header', help = '<HEADER> (optional): custom or additional header. Repeat this option for every header. Ex: -a "Host: localhost" -a "Foo: bar"', action = 'append')
|
||||
|
||||
http_grp.add_option('-u', '--http-username', help = '<HTTP_USERNAME> (optional): specify a username for HTTP Basic Authentication.')
|
||||
http_grp.add_option('-b', '--http-password', help = '<HTTP_PASSWORD> (optional): specify a password for HTTP Basic Authentication.')
|
||||
|
||||
conn_grp = OptionGroup(parser, 'Connection parameters')
|
||||
conn_grp.add_option('-P', '--proxy', help = '<PROXY> (optional): specify a proxy. Ex: -P http://proxy.company.com:8080')
|
||||
conn_grp.add_option('-A', '--proxy-auth', help = '<PROXY_AUTH> (optional): provides authentication information for the proxy. Ex: -A user:password')
|
||||
conn_grp.add_option('-T', '--proxy-type', help = '<PROXY_TYPE> (optional): specifies the proxy type, "http" (default), "none" (disable completely), or "socks5". Ex: -T socks')
|
||||
conn_grp.add_option('-t', '--timeout', help = '<TIMEOUT> (optional): renderer execution timeout in seconds (default 30 sec)', default = 30, nargs = 1)
|
||||
|
||||
parser.option_groups.extend([main_grp, proc_grp, http_grp, conn_grp])
|
||||
|
||||
# renderer binaries, hoping to find it in a $PATH directory
|
||||
## Be free to change them to your own full-path location
|
||||
PHANTOMJS_BIN = 'phantomjs'
|
||||
CHROME_BIN = 'google-chrome'
|
||||
CHROMIUM_BIN = 'chromium'
|
||||
|
||||
WEBSCREENSHOT_JS = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), './webscreenshot.js'))
|
||||
SCREENSHOTS_DIRECTORY = os.path.abspath(os.path.join(os.getcwdu(), './screenshots/'))
|
||||
|
||||
# Logger definition
|
||||
LOGLEVELS = {0 : 'ERROR', 1 : 'INFO', 2 : 'DEBUG'}
|
||||
logger_output = logging.StreamHandler(sys.stdout)
|
||||
logger_output.setFormatter(logging.Formatter('[%(levelname)s][%(name)s] %(message)s'))
|
||||
|
||||
logger_gen = logging.getLogger("General")
|
||||
logger_gen.addHandler(logger_output)
|
||||
|
||||
# Macros
|
||||
SHELL_EXECUTION_OK = 0
|
||||
SHELL_EXECUTION_ERROR = -1
|
||||
PHANTOMJS_HTTP_AUTH_ERROR_CODE = 2
|
||||
|
||||
# Handful patterns
|
||||
p_ipv4_elementary = '(?:[\d]{1,3})\.(?:[\d]{1,3})\.(?:[\d]{1,3})\.(?:[\d]{1,3})'
|
||||
p_domain = '[a-z0-9]+([\-\.]{1}[a-z0-9]+)*\.[a-z]{2,6}'
|
||||
p_port = '\d{0,5}'
|
||||
p_resource = '(?:/(?P<res>.*))?'
|
||||
|
||||
full_uri_domain = re.compile('^(?P<protocol>http(?:|s))://(?P<host>%s|%s)(?::(?P<port>%s))?%s$' % (p_domain, p_ipv4_elementary, p_port, p_resource))
|
||||
|
||||
fqdn_and_port = re.compile('^(?P<host>%s):(?P<port>%s)%s$' % (p_domain, p_port, p_resource))
|
||||
fqdn_only = re.compile('^(?P<host>%s)%s$' % (p_domain, p_resource))
|
||||
|
||||
ipv4_and_port = re.compile('^(?P<host>%s):(?P<port>%s)%s' % (p_ipv4_elementary, p_port, p_resource))
|
||||
ipv4_only = re.compile('^(?P<host>%s)%s$' % (p_ipv4_elementary, p_resource))
|
||||
|
||||
entry_from_csv = re.compile('^(?P<host>%s|%s)\s+(?P<port>\d+)$' % (p_domain, p_ipv4_elementary))
|
||||
|
||||
# Handful functions
|
||||
def init_worker():
|
||||
"""
|
||||
Tell the workers to ignore a global SIGINT interruption
|
||||
"""
|
||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
|
||||
def kill_em_all(signal, frame):
|
||||
"""
|
||||
Terminate all processes while capturing a SIGINT from the user
|
||||
"""
|
||||
logger_gen.info('CTRL-C received, exiting')
|
||||
sys.exit(0)
|
||||
|
||||
def shell_exec(url, command, options):
|
||||
"""
|
||||
Execute a shell command following a timeout
|
||||
Taken from http://howto.pui.ch/post/37471155682/set-timeout-for-a-shell-command-in-python
|
||||
"""
|
||||
global SHELL_EXECUTION_OK, SHELL_EXECUTION_ERROR
|
||||
|
||||
logger_url = logging.getLogger("%s" % url)
|
||||
logger_url.setLevel(options.log_level)
|
||||
|
||||
timeout = int(options.timeout)
|
||||
start = datetime.datetime.now()
|
||||
|
||||
try :
|
||||
p = subprocess.Popen(shlex.split(command), shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
|
||||
# binaries timeout
|
||||
while p.poll() is None:
|
||||
time.sleep(0.1)
|
||||
now = datetime.datetime.now()
|
||||
if (now - start).seconds > timeout:
|
||||
logger_url.debug("Shell command PID %s reached the timeout, killing it now" % p.pid)
|
||||
logger_url.error("Screenshot somehow failed\n")
|
||||
|
||||
if sys.platform == 'win32':
|
||||
p.send_signal(signal.SIGTERM)
|
||||
else:
|
||||
p.send_signal(signal.SIGKILL)
|
||||
|
||||
return SHELL_EXECUTION_ERROR
|
||||
|
||||
retval = p.poll()
|
||||
if retval != SHELL_EXECUTION_OK:
|
||||
if retval == PHANTOMJS_HTTP_AUTH_ERROR_CODE:
|
||||
# HTTP Authentication request
|
||||
logger_url.error("HTTP Authentication requested, try to pass credentials with -u and -b options")
|
||||
else:
|
||||
# Phantomjs general error
|
||||
logger_url.error("Shell command PID %s returned an abnormal error code: '%s'" % (p.pid,retval))
|
||||
logger_url.error("Screenshot somehow failed\n")
|
||||
|
||||
return SHELL_EXECUTION_ERROR
|
||||
|
||||
else:
|
||||
# Phantomjs ok
|
||||
logger_url.debug("Shell command PID %s ended normally" % p.pid)
|
||||
logger_url.info("Screenshot OK\n")
|
||||
return SHELL_EXECUTION_OK
|
||||
|
||||
except Exception as e:
|
||||
if e.errno and e.errno == errno.ENOENT :
|
||||
logger_url.error('renderer binary could not have been found in your current PATH environment variable, exiting')
|
||||
else:
|
||||
logger_gen.error('Unknown error: %s, exiting' % e )
|
||||
return SHELL_EXECUTION_ERROR
|
||||
|
||||
def filter_bad_filename_chars(filename):
|
||||
#print (filename)
|
||||
"""
|
||||
Filter bad chars for any filename
|
||||
"""
|
||||
# Before, just avoid triple underscore escape for the classic '://' pattern
|
||||
filename = filename.replace('http://', '')
|
||||
filename = filename.replace('https://', '')
|
||||
#print (filename)
|
||||
|
||||
return re.sub('[^\w\-_\. ]', '-port', filename)
|
||||
#print (filename)
|
||||
|
||||
def extract_all_matched_named_groups(regex, match):
|
||||
"""
|
||||
Return a set of all extractable matched parameters.
|
||||
>>> full_uri_domain.groupindex
|
||||
{'domain': 1, 'port': 3}
|
||||
>>>full_uri_domain.match('http://8.8.8.8:80').group('domain')
|
||||
'8.8.8.8'
|
||||
>>>extract_all_matched_named_groups() => {'domain': '8.8.8.8', 'port': '80'}
|
||||
|
||||
"""
|
||||
result = {}
|
||||
for name, id in regex.groupindex.items():
|
||||
matched_value = match.group(name)
|
||||
if matched_value != None: result[name] = matched_value
|
||||
|
||||
return result
|
||||
|
||||
def entry_format_validator(line):
|
||||
"""
|
||||
Validate the current line against several regexes and return matched parameters (ip, domain, port etc.)
|
||||
"""
|
||||
tab = { 'full_uri_domain' : full_uri_domain,
|
||||
'fqdn_only' : fqdn_only,
|
||||
'fqdn_and_port' : fqdn_and_port,
|
||||
'ipv4_and_port' : ipv4_and_port,
|
||||
'ipv4_only' : ipv4_only,
|
||||
'entry_from_csv' : entry_from_csv
|
||||
}
|
||||
|
||||
for name, regex in tab.items():
|
||||
validator = regex.match(line)
|
||||
if validator:
|
||||
return extract_all_matched_named_groups(regex, validator)
|
||||
|
||||
def parse_targets(options, arguments):
|
||||
"""
|
||||
Parse list and convert each target to valid URI with port(protocol://foobar:port)
|
||||
"""
|
||||
|
||||
target_list = []
|
||||
|
||||
if options.input_file != None:
|
||||
with open(options.input_file,'rb') as fd_input:
|
||||
try:
|
||||
lines = [l.decode('utf-8').lstrip().rstrip().strip() for l in fd_input.readlines()]
|
||||
except UnicodeDecodeError as e:
|
||||
logger_gen.error('Your input file is not UTF-8 encoded, please encode it before using this script')
|
||||
sys.exit(0)
|
||||
else:
|
||||
lines = arguments
|
||||
|
||||
for index, line in enumerate(lines, start=1):
|
||||
matches = entry_format_validator(line)
|
||||
|
||||
# pass if line can be recognized as a correct input, or if no 'host' group could be found with all the regexes
|
||||
if matches == None or not('host' in matches.keys()):
|
||||
logger_gen.warn("Line %s '%s' could not have been recognized as a correct input" % (index, line))
|
||||
pass
|
||||
else:
|
||||
host = matches['host']
|
||||
|
||||
# Protocol is 'http' by default, unless ssl is forced
|
||||
if options.ssl == True:
|
||||
protocol = 'https'
|
||||
elif 'protocol' in matches.keys():
|
||||
protocol = str(matches['protocol'])
|
||||
else:
|
||||
protocol = 'http'
|
||||
|
||||
# Port is ('80' for http) or ('443' for https) by default, unless a specific port is supplied
|
||||
if options.port != None:
|
||||
port = options.port
|
||||
elif 'port' in matches.keys():
|
||||
port = int(matches['port'])
|
||||
|
||||
# if port is 443, assume protocol is https if is not specified
|
||||
protocol = 'https' if port == 443 else protocol
|
||||
else:
|
||||
port = 443 if protocol == 'https' else 80
|
||||
|
||||
# No resource URI by default
|
||||
if 'res' in matches.keys():
|
||||
res = str(matches['res'])
|
||||
else:
|
||||
res = None
|
||||
|
||||
# perform screenshots over HTTP and HTTPS for each target
|
||||
if options.multiprotocol:
|
||||
final_uri_http_port = int(matches['port']) if 'port' in matches.keys() else 80
|
||||
final_uri_http = '%s://%s:%s' % ('http', host, final_uri_http_port)
|
||||
target_list.append(final_uri_http)
|
||||
logger_gen.info("'%s' has been formatted as '%s' with supplied overriding options" % (line, final_uri_http))
|
||||
|
||||
|
||||
final_uri_https_port = int(matches['port']) if 'port' in matches.keys() else 443
|
||||
final_uri_https = '%s://%s:%s' % ('https', host, final_uri_https_port)
|
||||
target_list.append(final_uri_https)
|
||||
logger_gen.info("'%s' has been formatted as '%s' with supplied overriding options" % (line, final_uri_https))
|
||||
|
||||
else:
|
||||
final_uri = '%s://%s:%s' % (protocol, host, port)
|
||||
final_uri = final_uri + '/%s' % res if res != None else final_uri
|
||||
target_list.append(final_uri)
|
||||
|
||||
logger_gen.info("'%s' has been formatted as '%s' with supplied overriding options" % (line, final_uri))
|
||||
|
||||
return target_list
|
||||
|
||||
def craft_cmd(url_and_options):
|
||||
"""
|
||||
Craft the correct command with url and options
|
||||
"""
|
||||
global logger_output, PHANTOMJS_BIN, WEBSCREENSHOT_JS, SCREENSHOTS_DIRECTORY, SHELL_EXECUTION_OK, SHELL_EXECUTION_ERROR
|
||||
|
||||
url, options = url_and_options
|
||||
|
||||
logger_url = logging.getLogger("%s" % url)
|
||||
logger_url.addHandler(logger_output)
|
||||
logger_url.setLevel(options.log_level)
|
||||
|
||||
#output_filename = os.path.join(SCREENSHOTS_DIRECTORY, ('%s.png' % filter_bad_filename_chars(url)))
|
||||
output_filename = os.path.join(SCREENSHOTS_DIRECTORY, ('%s.jpg' % filter_bad_filename_chars(url)))
|
||||
|
||||
# PhantomJS renderer
|
||||
if options.renderer == 'phantomjs':
|
||||
# If you ever want to add some voodoo options to the phantomjs command to be executed, that's here right below
|
||||
cmd_parameters = [ PHANTOMJS_BIN,
|
||||
'--ignore-ssl-errors true',
|
||||
'--ssl-protocol any',
|
||||
'--ssl-ciphers ALL'
|
||||
]
|
||||
|
||||
cmd_parameters.append("--proxy %s" % options.proxy) if options.proxy != None else None
|
||||
cmd_parameters.append("--proxy-auth %s" % options.proxy_auth) if options.proxy_auth != None else None
|
||||
cmd_parameters.append("--proxy-type %s" % options.proxy_type) if options.proxy_type != None else None
|
||||
|
||||
cmd_parameters.append('"%s" url_capture="%s" output_file="%s"' % (WEBSCREENSHOT_JS, url, output_filename))
|
||||
|
||||
cmd_parameters.append('header="Cookie: %s"' % options.cookie.rstrip(';')) if options.cookie != None else None
|
||||
|
||||
cmd_parameters.append('http_username="%s"' % options.http_username) if options.http_username != None else None
|
||||
cmd_parameters.append('http_password="%s"' % options.http_password) if options.http_password != None else None
|
||||
|
||||
if options.header:
|
||||
for header in options.header:
|
||||
cmd_parameters.append('header="%s"' % header.rstrip(';'))
|
||||
|
||||
# Chrome and chromium renderers
|
||||
else:
|
||||
cmd_parameters = [ CHROME_BIN ] if options.renderer == 'chrome' else [ CHROMIUM_BIN ]
|
||||
cmd_parameters += [ '--allow-running-insecure-content',
|
||||
'--ignore-certificate-errors',
|
||||
'--ignore-urlfetcher-cert-requests',
|
||||
'--reduce-security-for-testing',
|
||||
'--no-sandbox',
|
||||
'--headless',
|
||||
'--disable-gpu',
|
||||
'--hide-scrollbars',
|
||||
'--incognito',
|
||||
'-screenshot="%s"' % output_filename,
|
||||
'--window-size=1200,800',
|
||||
'"%s"' % url
|
||||
]
|
||||
cmd_parameters.append('--proxy-server="%s"' % options.proxy) if options.proxy != None else None
|
||||
|
||||
cmd = " ".join(cmd_parameters)
|
||||
|
||||
logger_url.debug("Shell command to be executed\n'%s'\n" % cmd)
|
||||
|
||||
execution_retval = shell_exec(url, cmd, options)
|
||||
|
||||
return execution_retval, url
|
||||
|
||||
|
||||
def take_screenshot(url_list, options):
|
||||
"""
|
||||
Launch the screenshot workers
|
||||
Thanks http://noswap.com/blog/python-multiprocessing-keyboardinterrupt
|
||||
"""
|
||||
global SHELL_EXECUTION_OK, SHELL_EXECUTION_ERROR
|
||||
|
||||
screenshot_number = len(url_list)
|
||||
print "[+] %s URLs to be screenshot" % screenshot_number
|
||||
|
||||
pool = multiprocessing.Pool(processes=int(options.workers), initializer=init_worker)
|
||||
|
||||
taken_screenshots = [r for r in pool.imap(func=craft_cmd, iterable=itertools.izip(url_list, itertools.repeat(options)))]
|
||||
|
||||
screenshots_error_url = [url for retval, url in taken_screenshots if retval == SHELL_EXECUTION_ERROR]
|
||||
screenshots_error = sum(retval == SHELL_EXECUTION_ERROR for retval, url in taken_screenshots)
|
||||
screenshots_ok = int(screenshot_number - screenshots_error)
|
||||
|
||||
print "[+] %s actual URLs screenshot" % screenshots_ok
|
||||
print "[+] %s error(s)" % screenshots_error
|
||||
|
||||
if screenshots_error != 0:
|
||||
for url in screenshots_error_url:
|
||||
print " %s" % url
|
||||
|
||||
return None
|
||||
|
||||
def main():
|
||||
"""
|
||||
Dat main
|
||||
"""
|
||||
global VERSION, SCREENSHOTS_DIRECTORY, LOGLEVELS
|
||||
signal.signal(signal.SIGINT, kill_em_all)
|
||||
|
||||
print 'webscreenshot.py version %s\n' % VERSION
|
||||
|
||||
options, arguments = parser.parse_args()
|
||||
|
||||
try :
|
||||
options.log_level = LOGLEVELS[options.verbosity]
|
||||
logger_gen.setLevel(options.log_level)
|
||||
except :
|
||||
parser.error("Please specify a valid log level")
|
||||
|
||||
if (options.input_file == None and (len(arguments) > 1 or len(arguments) == 0)):
|
||||
parser.error('Please specify a valid input file or a valid URL')
|
||||
|
||||
if (options.input_file != None and len(arguments) == 1):
|
||||
parser.error('Please specify either an input file or an URL')
|
||||
|
||||
if (options.output_directory != None):
|
||||
SCREENSHOTS_DIRECTORY = os.path.abspath(os.path.join(os.getcwdu(), options.output_directory))
|
||||
|
||||
logger_gen.debug("Options: %s\n" % options)
|
||||
if not os.path.exists(SCREENSHOTS_DIRECTORY):
|
||||
logger_gen.info("'%s' does not exist, will then be created" % SCREENSHOTS_DIRECTORY)
|
||||
os.makedirs(SCREENSHOTS_DIRECTORY)
|
||||
|
||||
url_list = parse_targets(options, arguments)
|
||||
|
||||
take_screenshot(url_list, options)
|
||||
|
||||
return None
|
||||
|
||||
if __name__ == "__main__" :
|
||||
main()
|
||||
510
bin/zap-scan.py
Normal file
510
bin/zap-scan.py
Normal file
@@ -0,0 +1,510 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
'''
|
||||
This script aims to be the most generic and the most explicit possible.
|
||||
It works with OWASP ZAP API Python client.
|
||||
To use it, you have to load the Python API client module and start ZAP
|
||||
|
||||
Before starting this script for the first time: Open ZAP, go to
|
||||
Tools -> Options -> API -> Generate random Key, copy and paste the key in the
|
||||
variable "apiKey" of the configuration area
|
||||
|
||||
This script is divided into two parts : a configuration area, where you have to
|
||||
change variables according to your needs, and the part with API calls.
|
||||
|
||||
Author : aine-rb on Github, from Sopra Steria - modified for Sn1per by @xer0dayz
|
||||
'''
|
||||
|
||||
import time
|
||||
from pprint import pprint
|
||||
from zapv2 import ZAPv2
|
||||
import sys, getopt
|
||||
|
||||
targetURL = str(sys.argv[1])
|
||||
|
||||
#######################################
|
||||
### BEGINNING OF CONFIGURATION AREA ###
|
||||
#######################################
|
||||
## The user only needs to change variable values bellow to make the script
|
||||
## work according to his/her needs. MANDATORY parameters must not be empty
|
||||
|
||||
# MANDATORY. Define the API key generated by ZAP and used to verify actions.
|
||||
apiKey=''
|
||||
|
||||
# MANDATORY. Define the listening address of ZAP instance
|
||||
localProxy = {"http": "http://127.0.0.1:8081", "https": "http://127.0.0.1:8081"}
|
||||
|
||||
# MANDATORY. True to create another ZAP session (overwrite the former if the
|
||||
# same name already exists), False to use an existing one
|
||||
isNewSession = True
|
||||
# MANDATORY. ZAP Session name
|
||||
sessionName = 'WebgoatSession'
|
||||
|
||||
# Define the list of global exclude URL regular expressions. List can be empty.
|
||||
# The expressions must follow the java.util.regex.Pattern class syntax
|
||||
# The following example excludes every single URL except http://localhost:8081
|
||||
globalExcludeUrl = ['^(?:(?!http:\/\/localhost:8081).*).$']
|
||||
|
||||
# MANDATORY. Define if an outgoing proxy server is used
|
||||
useProxyChain = False
|
||||
# MANDATORY only if useProxyChain is True, ignored otherwise.
|
||||
# Outgoing proxy address and port
|
||||
proxyAddress = 'my.corp.proxy'
|
||||
proxyPort = '8080'
|
||||
# Define the addresses to skip in case useProxyChain is True. Ignored
|
||||
# otherwise. List can be empty.
|
||||
skipProxyAddresses = ('127.0.0.1;'
|
||||
'localhost')
|
||||
# MANDATORY only if useProxyChain is True. Ignored otherwise.
|
||||
# Define if proxy server needs authentication
|
||||
useProxyChainAuth = False
|
||||
# MANDATORY only if useProxyChainAuth is True. Ignored otherwise
|
||||
proxyUsername = ''
|
||||
proxyPassword = ''
|
||||
proxyRealm = ''
|
||||
|
||||
# MANDATORY. Determine if a proxy script must be loaded. Proxy scripts are
|
||||
# executed for every request traversing ZAP
|
||||
useProxyScript = False
|
||||
# MANDATORY only if useProxyScript is True. Ignored otherwise
|
||||
proxyScriptName = 'proxyScript.js'
|
||||
# Script engine values: "Oracle Nashorn" for Javascript,
|
||||
# "jython" for python, "JSR 223 JRuby Engine" for ruby
|
||||
proxyScriptEngine = 'Oracle Nashorn'
|
||||
# Asolute local path
|
||||
proxyScriptFileName = '/zap/scripts/proxy/proxyScript.js'
|
||||
proxyScriptDescription = 'This is a description'
|
||||
|
||||
# MANDATORY. Determine if context must be configured then used during scans.
|
||||
# You have to set this parameter to True if you want that ZAP performs scans
|
||||
# from the point of view of a specific user
|
||||
useContextForScan = False
|
||||
|
||||
# MANDATORY only if useContextForScan is True. Ignored otherwise. Set value to
|
||||
# True to define a new context. Set value to False to use an existing one.
|
||||
defineNewContext = False
|
||||
# MANDATORY only if defineNewContext is True. Ignored otherwise
|
||||
contextName = 'WebGoat_script-based'
|
||||
# MANDATORY only if defineNewContext is False. Disregarded otherwise.
|
||||
# Corresponds to the ID of the context to use
|
||||
contextId = 0
|
||||
# Define Context Include URL regular expressions. Ignored if useContextForScan
|
||||
# is False. You have to put the URL you want to test in this list.
|
||||
contextIncludeURL = [targetURL + '.*']
|
||||
# Define Context Exclude URL regular expressions. Ignored if useContextForScan
|
||||
# is False. List can be empty.
|
||||
contextExcludeURL = ['http://localhost:8081/WebGoat/j_spring_security_logout',
|
||||
'http://localhost:8081/WebGoat/logout.mvc']
|
||||
|
||||
# MANDATORY only if useContextForScan is True. Ignored otherwise. Define the
|
||||
# session management method for the context. Possible values are:
|
||||
# "cookieBasedSessionManagement"; "httpAuthSessionManagement"
|
||||
sessionManagement = 'cookieBasedSessionManagement'
|
||||
|
||||
# MANDATORY only if useContextForScan is True. Ignored otherwise. Define
|
||||
# authentication method for the context. Possible values are:
|
||||
# "manualAuthentication"; "scriptBasedAuthentication"; "httpAuthentication";
|
||||
# "formBasedAuthentication"
|
||||
authMethod = 'scriptBasedAuthentication'
|
||||
|
||||
# MANDATORY only if authMethod is set to scriptBasedAuthentication.
|
||||
# Ignored otherwise
|
||||
authScriptName = 'TwoStepAuthentication.js'
|
||||
# Script engine values: Oracle Nashorn for Javascript
|
||||
# jython for python, JSR 223 JRuby Engine for ruby
|
||||
authScriptEngine = 'Oracle Nashorn'
|
||||
# Absolute local path
|
||||
authScriptFileName = '/zap/scripts/authentication/TwoStepAuthentication.js'
|
||||
authScriptDescription = 'This is a description'
|
||||
|
||||
# MANDATORY only if useContextForScan is True. Ignored otherwise. Each
|
||||
# name/value pair of authParams are expected to be "x-www-form-urlencoded"
|
||||
# Here is an example for scriptBasedAuthentication method:
|
||||
authParams = ('scriptName=' + authScriptName + '&'
|
||||
'Submission Form URL=http://localhost:8081/WebGoat/j_spring_security_check&'
|
||||
'Username field=username&'
|
||||
'Password field=password&'
|
||||
'Target URL=http://localhost:8081/WebGoat/welcome.mvc')
|
||||
## Here is an example for formBasedAuthentication method:
|
||||
#authParams = ('loginUrl=http://localhost:8081/WebGoat/j_spring_security_check&'
|
||||
# 'loginRequestData=username%3D%7B%25username%25%7D%26'
|
||||
# 'password%3D%7B%25password%25%7D')
|
||||
##Here is an example for httpAuthentication method:
|
||||
#authParams = ('hostname=http://www.example.com&'
|
||||
# 'realm=CORP\\administrator&'
|
||||
# 'port=80')
|
||||
|
||||
# MANDATORY only if useContextForScan is True. Ignored otherwise.
|
||||
# Set the value to True if a loggedin indicator must be used. False if it's a
|
||||
# logged out indicator that must be used
|
||||
isLoggedInIndicator = False
|
||||
# MANDATORY only if useContextForScan is True. Ignored otherwise.
|
||||
# Define either a loggedin or a loggedout indicator regular expression.
|
||||
# It allows ZAP to see if the user is always authenticated during scans.
|
||||
indicatorRegex = '\QLocation: http://localhost:8081/WebGoat/login.mvc\E'
|
||||
|
||||
|
||||
# MANDATORY only if useContextForScan is True. Ignored otherwise.
|
||||
# Set value to True to create new users, False otherwise
|
||||
createUser = False
|
||||
# MANDATORY only if createUser is True. Ignored otherwise. Define the list of
|
||||
# users, with name and credentials (in x-www-form-urlencoded format)
|
||||
## Here is an example with the script NashornTwoStepAuthentication.js:
|
||||
userList = [
|
||||
{'name': 'guest', 'credentials': 'Username=guest&Password=guest'},
|
||||
{'name': 'webgoat', 'credentials': 'Username=webgoat&Password=webgoat'}
|
||||
]
|
||||
## Here is an example with formBasedAuthentication:
|
||||
#userList = [
|
||||
# {'name': 'guest', 'credentials': 'username=guest&password=guest'},
|
||||
# {'name': 'webgoat', 'credentials': 'username=webgoat&password=webgoat'}
|
||||
#]
|
||||
|
||||
# MANDATORY only if useContextForScan is True. Ignored otherwise. List can be
|
||||
# empty. Define the userid list. Created users will be added to this list later
|
||||
userIdList = []
|
||||
|
||||
# MANDATORY. Define the target site to test
|
||||
#target = 'http://10.0.0.19/'
|
||||
target = targetURL
|
||||
# You can specify other URL in order to help ZAP discover more site locations
|
||||
# List can be empty
|
||||
applicationURL = ['']
|
||||
|
||||
# MANDATORY. Set value to True if you want to customize and use a scan policy
|
||||
useScanPolicy = False
|
||||
# MANDATORY only if useScanPolicy is True. Ignored otherwise. Set a policy name
|
||||
scanPolicyName = 'SQL Injection and XSS'
|
||||
# MANDATORY only if useScanPolicy is True. Ignored otherwise.
|
||||
# Set value to True to disable all scan types except the ones set in ascanIds,
|
||||
# False to enable all scan types except the ones set in ascanIds..
|
||||
isWhiteListPolicy = False
|
||||
# MANDATORY only if useScanPolicy is True. Ignored otherwise. Set the scan IDs
|
||||
# to use with the policy. Other scan types will be disabled if
|
||||
# isWhiteListPolicy is True, enabled if isWhiteListPolicy is False.
|
||||
# Use zap.ascan.scanners() to list all ascan IDs.
|
||||
## In the example bellow, the first line corresponds to SQL Injection scan IDs,
|
||||
## the second line corresponds to some XSS scan IDs
|
||||
ascanIds = [40018, 40019, 40020, 40021, 40022, 40024, 90018,
|
||||
40012, 40014, 40016, 40017]
|
||||
# MANDATORY only if useScanPolicy is True. Ignored otherwise. Set the alert
|
||||
# Threshold and the attack strength of enabled active scans.
|
||||
# Currently, possible values are:
|
||||
# Low, Medium and High for alert Threshold
|
||||
# Low, Medium, High and Insane for attack strength
|
||||
alertThreshold = 'Medium'
|
||||
attackStrength = 'Low'
|
||||
|
||||
# MANDATORY. Set True to use Ajax Spider, False otherwise.
|
||||
useAjaxSpider = True
|
||||
|
||||
# MANDATORY. Set True to shutdown ZAP once finished, False otherwise
|
||||
shutdownOnceFinished = False
|
||||
|
||||
#################################
|
||||
### END OF CONFIGURATION AREA ###
|
||||
#################################
|
||||
sys.stdout = open("/usr/share/sniper/bin/zap-report.txt", "w")
|
||||
|
||||
# Connect ZAP API client to the listening address of ZAP instance
|
||||
zap = ZAPv2(proxies=localProxy, apikey=apiKey)
|
||||
|
||||
# Start the ZAP session
|
||||
core = zap.core
|
||||
if isNewSession:
|
||||
pprint('Create ZAP session: ' + sessionName + ' -> ' +
|
||||
core.new_session(name=sessionName, overwrite=True))
|
||||
else:
|
||||
pprint('Load ZAP session: ' + sessionName + ' -> ' +
|
||||
core.load_session(name=sessionName))
|
||||
|
||||
# Configure ZAP global Exclude URL option
|
||||
print('Add Global Exclude URL regular expressions:')
|
||||
for regex in globalExcludeUrl:
|
||||
pprint(regex + ' ->' + core.exclude_from_proxy(regex=regex))
|
||||
|
||||
# Configure ZAP outgoing proxy server connection option
|
||||
pprint('Enable outgoing proxy chain: ' + str(useProxyChain) + ' -> ' +
|
||||
core.set_option_use_proxy_chain(boolean=useProxyChain))
|
||||
if useProxyChain:
|
||||
pprint('Set outgoing proxy name: ' + proxyAddress + ' -> ' +
|
||||
core.set_option_proxy_chain_name(string=proxyAddress))
|
||||
pprint('Set outgoing proxy port: ' + proxyPort + ' -> ' +
|
||||
core.set_option_proxy_chain_port(integer=proxyPort))
|
||||
pprint('Skip names for outgoing proxy: ' + skipProxyAddresses + ' -> ' +
|
||||
core.set_option_proxy_chain_skip_name(string=skipProxyAddresses))
|
||||
|
||||
# Configure ZAP outgoing proxy server authentication
|
||||
pprint('Set outgoing proxy chain authentication: ' +
|
||||
str(useProxyChainAuth) + ' -> ' +
|
||||
core.set_option_use_proxy_chain_auth(boolean=useProxyChainAuth))
|
||||
if useProxyChainAuth:
|
||||
pprint('Set outgoing proxy username -> ' +
|
||||
core.set_option_proxy_chain_user_name(string=proxyUsername))
|
||||
pprint('Set outgoing proxy password -> ' +
|
||||
core.set_option_proxy_chain_password(string=proxyPassword))
|
||||
pprint('Set outgoing proxy realm: ' + proxyRealm + ' -> ' +
|
||||
core.set_option_proxy_chain_realm(string=proxyRealm))
|
||||
|
||||
if useProxyScript:
|
||||
script = zap.script
|
||||
script.remove(scriptname=proxyScriptName)
|
||||
pprint('Load proxy script: ' + proxyScriptName + ' -> ' +
|
||||
script.load(scriptname=proxyScriptName, scripttype='proxy',
|
||||
scriptengine=proxyScriptEngine,
|
||||
filename=proxyScriptFileName,
|
||||
scriptdescription=proxyScriptDescription))
|
||||
pprint('Enable proxy script: ' + proxyScriptName + ' -> ' +
|
||||
script.enable(scriptname=proxyScriptName))
|
||||
|
||||
|
||||
if useContextForScan:
|
||||
# Define the ZAP context
|
||||
context = zap.context
|
||||
if defineNewContext:
|
||||
contextId = context.new_context(contextname=contextName)
|
||||
pprint('Use context ID: ' + contextId)
|
||||
|
||||
# Include URL in the context
|
||||
print('Include URL in context:')
|
||||
for url in contextIncludeURL:
|
||||
pprint(url + ' -> ' +
|
||||
context.include_in_context(contextname=contextName,
|
||||
regex=url))
|
||||
|
||||
# Exclude URL in the context
|
||||
print('Exclude URL from context:')
|
||||
for url in contextExcludeURL:
|
||||
pprint(url + ' -> ' +
|
||||
context.exclude_from_context(contextname=contextName,
|
||||
regex=url))
|
||||
|
||||
# Setup session management for the context.
|
||||
# There is no methodconfigparams to provide for both current methods
|
||||
pprint('Set session management method: ' + sessionManagement + ' -> ' +
|
||||
zap.sessionManagement.set_session_management_method(
|
||||
contextid=contextId, methodname=sessionManagement,
|
||||
methodconfigparams=None))
|
||||
|
||||
## In case we use the scriptBasedAuthentication method, load the script
|
||||
if authMethod == 'scriptBasedAuthentication':
|
||||
script = zap.script
|
||||
script.remove(scriptname=authScriptName)
|
||||
pprint('Load script: ' + authScriptName + ' -> ' +
|
||||
script.load(scriptname=authScriptName,
|
||||
scripttype='authentication',
|
||||
scriptengine=authScriptEngine,
|
||||
filename=authScriptFileName,
|
||||
scriptdescription=authScriptDescription))
|
||||
|
||||
# Define an authentication method with parameters for the context
|
||||
auth = zap.authentication
|
||||
pprint('Set authentication method: ' + authMethod + ' -> ' +
|
||||
auth.set_authentication_method(contextid=contextId,
|
||||
authmethodname=authMethod,
|
||||
authmethodconfigparams=authParams))
|
||||
# Define either a loggedin indicator or a loggedout indicator regexp
|
||||
# It allows ZAP to see if the user is always authenticated during scans
|
||||
if isLoggedInIndicator:
|
||||
pprint('Define Loggedin indicator: ' + indicatorRegex + ' -> ' +
|
||||
auth.set_logged_in_indicator(contextid=contextId,
|
||||
loggedinindicatorregex=indicatorRegex))
|
||||
else:
|
||||
pprint('Define Loggedout indicator: ' + indicatorRegex + ' -> ' +
|
||||
auth.set_logged_out_indicator(contextid=contextId,
|
||||
loggedoutindicatorregex=indicatorRegex))
|
||||
|
||||
# Define the users
|
||||
users = zap.users
|
||||
if createUser:
|
||||
for user in userList:
|
||||
userName = user.get('name')
|
||||
print('Create user ' + userName + ':')
|
||||
userId = users.new_user(contextid=contextId, name=userName)
|
||||
userIdList.append(userId)
|
||||
pprint('User ID: ' + userId + '; username -> ' +
|
||||
users.set_user_name(contextid=contextId, userid=userId,
|
||||
name=userName) +
|
||||
'; credentials -> ' +
|
||||
users.set_authentication_credentials(contextid=contextId,
|
||||
userid=userId,
|
||||
authcredentialsconfigparams=user.get('credentials')) +
|
||||
'; enabled -> ' +
|
||||
users.set_user_enabled(contextid=contextId, userid=userId,
|
||||
enabled=True))
|
||||
|
||||
# Enable all passive scanners (it's possible to do a more specific policy by
|
||||
# setting needed scan ID: Use zap.pscan.scanners() to list all passive scanner
|
||||
# IDs, then use zap.scan.enable_scanners(ids) to enable what you want
|
||||
pprint('Enable all passive scanners -> ' +
|
||||
zap.pscan.enable_all_scanners())
|
||||
|
||||
ascan = zap.ascan
|
||||
# Define if a new scan policy is used
|
||||
if useScanPolicy:
|
||||
ascan.remove_scan_policy(scanpolicyname=scanPolicyName)
|
||||
pprint('Add scan policy ' + scanPolicyName + ' -> ' +
|
||||
ascan.add_scan_policy(scanpolicyname=scanPolicyName))
|
||||
for policyId in range(0, 5):
|
||||
# Set alert Threshold for all scans
|
||||
ascan.set_policy_alert_threshold(id=policyId,
|
||||
alertthreshold=alertThreshold,
|
||||
scanpolicyname=scanPolicyName)
|
||||
# Set attack strength for all scans
|
||||
ascan.set_policy_attack_strength(id=policyId,
|
||||
attackstrength=attackStrength,
|
||||
scanpolicyname=scanPolicyName)
|
||||
if isWhiteListPolicy:
|
||||
# Disable all active scanners in order to enable only what you need
|
||||
pprint('Disable all scanners -> ' +
|
||||
ascan.disable_all_scanners(scanpolicyname=scanPolicyName))
|
||||
# Enable some active scanners
|
||||
pprint('Enable given scan IDs -> ' +
|
||||
ascan.enable_scanners(ids=ascanIds,
|
||||
scanpolicyname=scanPolicyName))
|
||||
else:
|
||||
# Enable all active scanners
|
||||
pprint('Enable all scanners -> ' +
|
||||
ascan.enable_all_scanners(scanpolicyname=scanPolicyName))
|
||||
# Disable some active scanners
|
||||
pprint('Disable given scan IDs -> ' +
|
||||
ascan.disable_scanners(ids=ascanIds,
|
||||
scanpolicyname=scanPolicyName))
|
||||
else:
|
||||
print('No custom policy used for scan')
|
||||
scanPolicyName = None
|
||||
|
||||
# Open URL inside ZAP
|
||||
pprint('Access target URL ' + target)
|
||||
core.access_url(url=target, followredirects=True)
|
||||
for url in applicationURL:
|
||||
pprint('Access URL ' + url)
|
||||
core.access_url(url=url, followredirects=True)
|
||||
# Give the sites tree a chance to get updated
|
||||
time.sleep(2)
|
||||
|
||||
# Launch Spider, Ajax Spider (if useAjaxSpider is set to true) and
|
||||
# Active scans, with a context and users or not
|
||||
forcedUser = zap.forcedUser
|
||||
spider = zap.spider
|
||||
ajax = zap.ajaxSpider
|
||||
scanId = 0
|
||||
print('Starting Scans on target: ' + target)
|
||||
if useContextForScan:
|
||||
for userId in userIdList:
|
||||
print('Starting scans with User ID: ' + userId)
|
||||
|
||||
# Spider the target and recursively scan every site node found
|
||||
scanId = spider.scan_as_user(contextid=contextId, userid=userId,
|
||||
url=target, maxchildren=None, recurse=True, subtreeonly=None)
|
||||
print('Start Spider scan with user ID: ' + userId +
|
||||
'. Scan ID equals: ' + scanId)
|
||||
# Give the spider a chance to start
|
||||
time.sleep(2)
|
||||
while (int(spider.status(scanId)) < 100):
|
||||
print('Spider progress: ' + spider.status(scanId) + '%')
|
||||
time.sleep(2)
|
||||
print('Spider scan for user ID ' + userId + ' completed')
|
||||
|
||||
if useAjaxSpider:
|
||||
# Prepare Ajax Spider scan
|
||||
pprint('Set forced user mode enabled -> ' +
|
||||
forcedUser.set_forced_user_mode_enabled(boolean=True))
|
||||
pprint('Set user ID: ' + userId + ' for forced user mode -> ' +
|
||||
forcedUser.set_forced_user(contextid=contextId,
|
||||
userid=userId))
|
||||
# Ajax Spider the target URL
|
||||
pprint('Ajax Spider the target with user ID: ' + userId + ' -> ' +
|
||||
ajax.scan(url=target, inscope=None))
|
||||
# Give the Ajax spider a chance to start
|
||||
time.sleep(10)
|
||||
while (ajax.status != 'stopped'):
|
||||
print('Ajax Spider is ' + ajax.status)
|
||||
time.sleep(5)
|
||||
for url in applicationURL:
|
||||
# Ajax Spider every url configured
|
||||
pprint('Ajax Spider the URL: ' + url + ' with user ID: ' +
|
||||
userId + ' -> ' +
|
||||
ajax.scan(url=url, inscope=None))
|
||||
# Give the Ajax spider a chance to start
|
||||
time.sleep(10)
|
||||
while (ajax.status != 'stopped'):
|
||||
print('Ajax Spider is ' + ajax.status)
|
||||
time.sleep(5)
|
||||
pprint('Set forced user mode disabled -> ' +
|
||||
forcedUser.set_forced_user_mode_enabled(boolean=False))
|
||||
print('Ajax Spider scan for user ID ' + userId + ' completed')
|
||||
|
||||
# Launch Active Scan with the configured policy on the target url
|
||||
# and recursively scan every site node
|
||||
scanId = ascan.scan_as_user(url=target, contextid=contextId,
|
||||
userid=userId, recurse=True, scanpolicyname=scanPolicyName,
|
||||
method=None, postdata=True)
|
||||
print('Start Active Scan with user ID: ' + userId +
|
||||
'. Scan ID equals: ' + scanId)
|
||||
# Give the scanner a chance to start
|
||||
time.sleep(2)
|
||||
while (int(ascan.status(scanId)) < 100):
|
||||
print('Active Scan progress: ' + ascan.status(scanId) + '%')
|
||||
time.sleep(2)
|
||||
print('Active Scan for user ID ' + userId + ' completed')
|
||||
|
||||
else:
|
||||
# Spider the target and recursively scan every site node found
|
||||
scanId = spider.scan(url=target, maxchildren=None, recurse=True,
|
||||
contextname=None, subtreeonly=None)
|
||||
print('Scan ID equals ' + scanId)
|
||||
# Give the Spider a chance to start
|
||||
time.sleep(2)
|
||||
while (int(spider.status(scanId)) < 100):
|
||||
print('Spider progress ' + spider.status(scanId) + '%')
|
||||
time.sleep(2)
|
||||
print('Spider scan completed')
|
||||
|
||||
if useAjaxSpider:
|
||||
# Ajax Spider the target URL
|
||||
pprint('Start Ajax Spider -> ' + ajax.scan(url=target, inscope=None))
|
||||
# Give the Ajax spider a chance to start
|
||||
time.sleep(10)
|
||||
while (ajax.status != 'stopped'):
|
||||
print('Ajax Spider is ' + ajax.status)
|
||||
time.sleep(5)
|
||||
for url in applicationURL:
|
||||
# Ajax Spider every url configured
|
||||
pprint('Ajax Spider the URL: ' + url + ' -> ' +
|
||||
ajax.scan(url=url, inscope=None))
|
||||
# Give the Ajax spider a chance to start
|
||||
time.sleep(10)
|
||||
while (ajax.status != 'stopped'):
|
||||
print('Ajax Spider is ' + ajax.status)
|
||||
time.sleep(5)
|
||||
print('Ajax Spider scan completed')
|
||||
|
||||
# Launch Active scan with the configured policy on the target url and
|
||||
# recursively scan every site node
|
||||
scanId = zap.ascan.scan(url=target, recurse=True, inscopeonly=None,
|
||||
scanpolicyname=scanPolicyName, method=None, postdata=True)
|
||||
print('Start Active scan. Scan ID equals ' + scanId)
|
||||
while (int(ascan.status(scanId)) < 100):
|
||||
print('Active Scan progress: ' + ascan.status(scanId) + '%')
|
||||
time.sleep(5)
|
||||
print('Active Scan completed')
|
||||
|
||||
# Give the passive scanner a chance to finish
|
||||
time.sleep(5)
|
||||
|
||||
# If you want to retrieve alerts:
|
||||
## pprint(zap.core.alerts(baseurl=target, start=None, count=None))
|
||||
|
||||
print('HTML report:')
|
||||
pprint(core.htmlreport())
|
||||
|
||||
# To retrieve ZAP report in XML or HTML format
|
||||
print('XML report')
|
||||
pprint(core.xmlreport())
|
||||
|
||||
if shutdownOnceFinished:
|
||||
# Shutdown ZAP once finished
|
||||
pprint('Shutdown ZAP -> ' + core.shutdown())
|
||||
|
||||
sys.stdout.close()
|
||||
Reference in New Issue
Block a user