diff -urN src.orig/cert_auth.py src/cert_auth.py --- src.orig/cert_auth.py 2014-12-21 18:21:49.000000000 +0100 +++ src/cert_auth.py 2014-12-17 00:13:00.000000000 +0100 @@ -4,6 +4,22 @@ import hashlib import random import time +import logging + + +# from mitmproxy: netlib/certutils.py +import OpenSSL + +DEFAULT_EXP = 62208000 # =24 * 60 * 60 * 720 +# Generated with "openssl dhparam". It's too slow to generate this on startup. +DEFAULT_DHPARAM = """-----BEGIN DH PARAMETERS----- +MIGHAoGBAOdPzMbYgoYfO3YBYauCLRlE8X1XypTiAjoeCFD0qWRx8YUsZ6Sj20W5 +zsfQxlZfKovo3f2MftjkDkbI/C/tDgxoe0ZPbjy5CjdOhkzxn0oTbKTs16Rw8DyK +1LjTR65sQJkJEdgsX8TSi/cicCftJZl9CaZEaObF2bdgSgGK+PezAgEC +-----END DH PARAMETERS-----""" + + + class CertAndKeyContainer(object): """ @@ -41,21 +57,54 @@ this wil create one. Else it will load the CA and Key into self.ca_cert and self.ca_key """ + self.log = logging.getLogger("mallorymain") + + self.exp = DEFAULT_EXP self.store_of_certs = {} if (not os.path.exists("ca/ca.cer")): self.ca_cert, self.ca_pkey = self.ca() self.ca_cert_file = open ("ca/ca.cer","w") - self.ca_cert_file.write(self.ca_cert.as_pem()) + self.ca_cert_file.write(OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, self.ca_cert)) self.ca_cert_file.close() self.ca_key_file = open ("ca/ca.key","w") - self.ca_key_file.write(self.ca_pkey.as_pem(None)) + self.ca_key_file.write(OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, self.ca_pkey)) self.ca_key_file.close() else: - self.ca_cert = M2Crypto.X509.load_cert_string( + self.ca_cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, open("ca/ca.cer","r").read()) - self.ca_pkey = M2Crypto.EVP.load_key_string( + self.ca_pkey = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, open("ca/ca.key","r").read()) + # from mitmproxy: netlib/certutils.py + def create_ca(self, o, cn, exp): + key = OpenSSL.crypto.PKey() + key.generate_key(OpenSSL.crypto.TYPE_RSA, 1024) + cert = OpenSSL.crypto.X509() + cert.set_serial_number(int(time.time()*10000)) + cert.set_version(2) + cert.get_subject().CN = cn + cert.get_subject().O = o + cert.gmtime_adj_notBefore(-3600*48) + cert.gmtime_adj_notAfter(exp) + cert.set_issuer(cert.get_subject()) + cert.set_pubkey(key) + cert.add_extensions([ + OpenSSL.crypto.X509Extension("basicConstraints", True, + "CA:TRUE"), + OpenSSL.crypto.X509Extension("nsCertType", False, + "sslCA"), + OpenSSL.crypto.X509Extension("extendedKeyUsage", False, + "serverAuth,clientAuth,emailProtection,timeStamping,msCodeInd,msCodeCom,msCTLSign,msSGC,msEFS,nsSGC" + ), + OpenSSL.crypto.X509Extension("keyUsage", True, + "keyCertSign, cRLSign"), + OpenSSL.crypto.X509Extension("subjectKeyIdentifier", False, "hash", + subject=cert), + ]) + cert.sign(key, "sha1") + return cert, key + + def ca(self): """ @@ -63,10 +112,14 @@ It returns the cert as M2Crypt.X509 and key as M2Crypto.EVP.PKEY """ - key = self.generate_rsa_key() - pkey = self.make_pkey(key) - cert = self.make_ca_cert(pkey) - return (cert, pkey) + #key = self.generate_rsa_key() + #pkey = self.make_pkey(key) + #cert = self.make_ca_cert(pkey) + #return (cert, pkey) + # use code from mitmproxy + #return self.create_ca("VeriSign, Inc", "VeriSign Clas 3 Secure Server CA - GG", self.exp); + # after change of ca name (cn, o) the content of src/ca/ has to be removed to trigger regeneration of ca files + return self.create_ca("rssbs security", "rssbs security ca", self.exp); def generate_rsa_key(self): return M2Crypto.RSA.gen_key(1024, M2Crypto.m2.RSA_F4) @@ -97,10 +150,11 @@ name.O = "VeriSign, Inc" name.CN = "VeriSign Clas 3 Secure Server CA - GG" + # This is here becuase the real cert has it however the # cert that we verified to work on iOS does not. I should # try to add it in later. - #name.OU = "Verisign Trust Networks" + name.OU = "Verisign Trust Networks" cert = M2Crypto.X509.X509() cert.set_serial_number(1) @@ -126,9 +180,9 @@ # The proper way to make the id extention # Too bad we cant use it because of M2 bug in ubuntu - #modulus = cert.get_pubkey().get_modulus() - #sha_hash = hashlib.sha1(modulus).digest() - #sub_key_id = ":".join(["%02X"%ord(byte) for byte in sha_hash]) + modulus = cert.get_pubkey().get_modulus() + sha_hash = hashlib.sha1(modulus).digest() + sub_key_id = ":".join(["%02X"%ord(byte) for byte in sha_hash]) # Start of the hack to make Ubuntu M2 work # Hard code SKID to match with the sub cert exts @@ -160,6 +214,43 @@ cert.sign(ca_pkey, 'sha1') return cert + # import from mitmproxy netlib/certutils.py + def dummy_cert(self, privkey, cacert, commonname, sans, peer_iss): + """ + Generates a dummy certificate. + + privkey: CA private key + cacert: CA certificate + commonname: Common name for the generated certificate. + sans: A list of Subject Alternate Names. + + Returns cert if operation succeeded, None if not. + """ + ss = [] + for i in sans: + ss.append("DNS: %s"%i) + ss = ", ".join(ss) + + cert = OpenSSL.crypto.X509() + cert.gmtime_adj_notBefore(-3600*48) + cert.gmtime_adj_notAfter(60 * 60 * 24 * 30) + cert.set_issuer(cacert.get_subject()) + cert.get_subject().CN = commonname + self.log.debug("fake cert: cn: %s o:%s" % + (cert.get_subject().CN, cert.get_subject().O)) + cert.set_serial_number(int(time.time()*10000)) + if ss: + cert.set_version(2) + cert.add_extensions([OpenSSL.crypto.X509Extension("subjectAltName", False, ss)]) + cert.set_pubkey(cacert.get_pubkey()) + self.log.debug("cacert pkey bits: %s ############" % self.ca_pkey.bits()) + cert.sign(privkey, "sha1") + return cert + + + + + def cert (self, peer_sub, peer_iss, peer_not_after, peer_not_before, peer_serial): """ @@ -171,12 +262,14 @@ @param peer_serialt: peer cert serial @return cert and key """ - key = self.generate_rsa_key() - peer_key = self.make_pkey(key) - peer_cert = self.make_peer_cert(peer_sub, peer_iss, peer_not_after, - peer_not_before, peer_serial, - peer_key) - return (peer_cert, peer_key) + # from mitmproxy + return (self.dummy_cert(self.ca_pkey, self.ca_cert, peer_sub.CN, [], peer_iss), self.ca_pkey) + #key = self.generate_rsa_key() + #peer_key = self.make_pkey(key) + #peer_cert = self.make_peer_cert(peer_sub, peer_iss, peer_not_after, + # peer_not_before, peer_serial, + # peer_key) + #return (peer_cert, peer_key) def make_peer_cert(self, peer_sub, peer_iss, peer_not_after, peer_not_before, @@ -220,37 +313,56 @@ cert.sign(self.ca_pkey,'sha1') return cert - def get_fake_cert_and_key(self, real_cert): - peer_sub = real_cert.get_subject() + #def get_fake_cert_and_key(self, real_cert): + def get_fake_cert_and_key_helper(self, real_cert, sn=None): + peer_sub = real_cert.get_subject() + if sn != None: + peer_sub.CN = sn peer_iss = real_cert.get_issuer() - peer_not_after = real_cert.get_not_after() - peer_not_before = real_cert.get_not_before() + #peer_not_after = real_cert.get_not_after() + #peer_not_before = real_cert.get_not_before() + peer_not_after = real_cert.get_notAfter() + peer_not_before = real_cert.get_notBefore() peer_serial = real_cert.get_serial_number() fake_cert, fake_key = self.cert(peer_sub, peer_iss, peer_not_after, peer_not_before, peer_serial) return (fake_cert, fake_key) - def get_fake_cert_and_key_filename(self, real_cert): - real_cert = M2Crypto.X509.load_cert_der_string(real_cert) - cert_subject = real_cert.get_subject().as_text() + def get_fake_cert_and_key(self, real_cert, sn=None): + #def get_fake_cert_and_key(self, real_cert_string): + if real_cert == None: + return (None, None) + if type(real_cert) != OpenSSL.crypto.X509: + #real_cert = M2Crypto.X509.load_cert_der_string(real_cert) + real_cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_ASN1, real_cert) + #cert_subject = real_cert.get_subject().as_text() + if sn == None: + cert_subject = real_cert.get_subject().CN + else: + cert_subject = sn if cert_subject in self.store_of_certs: cert_container = self.store_of_certs[cert_subject] - return cert_container.cert_file_name, cert_container.key_file_name + #return cert_container.cert_file_name, cert_container.key_file_name + return cert_container.cert, cert_container.key else: - fake_cert, fake_key = self.get_fake_cert_and_key(real_cert) + #fake_cert, fake_key = self.get_fake_cert_and_key(real_cert) + fake_cert, fake_key = self.get_fake_cert_and_key_helper(real_cert, sn) temp_cert_file = tempfile.NamedTemporaryFile(delete=False) - temp_cert_file.write(fake_cert.as_text()+'\n'+fake_cert.as_pem()) + #temp_cert_file.write(fake_cert.as_text()+'\n'+fake_cert.as_pem()) + temp_cert_file.write(OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, fake_cert)) temp_cert_file.flush() temp_key_file = tempfile.NamedTemporaryFile(delete=False) - temp_key_file.write(fake_key.as_pem(None)) + #temp_key_file.write(fake_key.as_pem(None)) + temp_key_file.write(OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, fake_key)) temp_key_file.flush() self.store_of_certs[cert_subject] = CertAndKeyContainer(fake_cert, fake_key, temp_cert_file.name, temp_key_file.name) - return (temp_cert_file.name, temp_key_file.name) + #return (temp_cert_file.name, temp_key_file.name) + return (fake_cert, fake_key) ca = CertAuth() diff -urN src.orig/certffi.py src/certffi.py --- src.orig/certffi.py 1970-01-01 01:00:00.000000000 +0100 +++ src/certffi.py 2014-12-03 21:08:11.000000000 +0100 @@ -0,0 +1,41 @@ +from __future__ import (absolute_import, print_function, division) +import cffi +import OpenSSL + +xffi = cffi.FFI() +xffi.cdef(""" + struct rsa_meth_st { + int flags; + ...; + }; + struct rsa_st { + int pad; + long version; + struct rsa_meth_st *meth; + ...; + }; +""") +xffi.verify( + """#include """, + extra_compile_args=['-w'] +) + + +def handle(privkey): + new = xffi.new("struct rsa_st*") + newbuf = xffi.buffer(new) + rsa = OpenSSL.SSL._lib.EVP_PKEY_get1_RSA(privkey._pkey) + oldbuf = OpenSSL.SSL._ffi.buffer(rsa) + newbuf[:] = oldbuf[:] + return new + + +def set_flags(privkey, val): + hdl = handle(privkey) + hdl.meth.flags = val + return privkey + + +def get_flags(privkey): + hdl = handle(privkey) + return hdl.meth.flags diff -urN src.orig/cmdlineopts.py src/cmdlineopts.py --- src.orig/cmdlineopts.py 2014-12-21 18:21:49.000000000 +0100 +++ src/cmdlineopts.py 2014-12-01 00:29:01.000000000 +0100 @@ -1,4 +1,5 @@ import optparse +import os class CmdLineOpts(object): def __init__(self): @@ -44,5 +45,12 @@ "by the real destination, port 80 for HTTP, while " \ "the traffic is still sent to the proxy.") + # default db dir + current_dir = os.path.dirname(os.path.realpath(__file__)) + default_datadir = os.path.realpath(current_dir + "/../db/") + + parser.add_option("-D", "--datadir", dest="datadir", + help="Specify the data directory name.", + default=default_datadir) (self.options, self.args) = parser.parse_args() \ Kein Zeilenumbruch am Dateiende. diff -urN src.orig/config_if.py src/config_if.py --- src.orig/config_if.py 2014-12-21 18:21:49.000000000 +0100 +++ src/config_if.py 2014-11-30 23:24:16.000000000 +0100 @@ -8,6 +8,7 @@ from ctypes import * import subprocess +from config import MalloryConfig # Structures as defined by: # http://www.kernel.org/doc/man-pages/online/pages/man3/getifaddrs.3.html @@ -41,6 +42,7 @@ self.mitm_interfaces = [] self.outbound_interfaces = [] self.banned_interfaces = ['lo'] + self.saved_iptable_rule_file = "mallory_iptables_backup" def set_interfaces(self, interfaces): self.interfaces = interfaces @@ -117,6 +119,9 @@ self.interfaces = [] self.mitm_interfaces = [] self.outbound_interfaces = [] + + def get_iptables_backup_file(self): + return MalloryConfig.get("datadir") + "/" + self.saved_iptable_rule_file def save(self): """ @@ -128,7 +133,25 @@ a malicious interface name onto your system to sneak into these shell commands you were already in trouble. Probably owned by an APT. """ + + # Do nothing if outbound interface not set + if len(self.outbound_interfaces) < 1: + print "[*] ConfigInterfaces.save: Outbound interface not set" + self.restore() + print "[*] ConfigInterfaces.save: iptables restored" + return False + + # Do nothing if MiTM interfaces is empty + if len(self.get_mitm()) < 1: + print "[*] ConfigInterfaces.save: MiTM interface not set" + self.restore() + print "[*] ConfigInterfaces.save: iptables restored" + return False + cmds = [] + + # save rules + cmds.append("test -e "+self.get_iptables_backup_file()+" || iptables-save > " + self.get_iptables_backup_file()); # Turn on ip_forwarding. Linux only cmds.append("echo 1 > /proc/sys/net/ipv4/ip_forward") @@ -149,16 +172,52 @@ ("iptables -t nat -A POSTROUTING -o " "%s -j MASQUERADE") % self.outbound_interfaces[0] ) + # Mallory listen port + port = MalloryConfig.get("listen") + print "[*] ConfigInterfaces: Mallory is listening at TCP/UDP (%s)" % port for interface in self.get_mitm(): cmds.append( ("iptables -t nat -A PREROUTING -j REDIRECT -i " - "%s -p tcp -m tcp --to-ports 20755") % interface) + "%s -p tcp -m tcp --to-ports %s") % (interface, port)) cmds.append( ("iptables -t nat -A PREROUTING -j REDIRECT -i " - "%s -p udp -m udp --to-ports 20755") % interface) + "%s -p udp -m udp --to-ports %s") % (interface, port)) for cmd in cmds: subprocess.call(cmd, shell=True) print cmds + + def restore(self): + + """ + This method restores the previously saved rules of proxy box. + """ + cmds = [] + try: + with open(self.get_iptables_backup_file()): + # Delete all iptables rules and set it to + cmds.append("iptables -F") + cmds.append("iptables -X") + cmds.append("iptables -t nat -F") + cmds.append("iptables -t nat -X") + cmds.append("iptables -t mangle -F") + cmds.append("iptables -t mangle -X") + cmds.append("iptables -P INPUT ACCEPT") + cmds.append("iptables -P FORWARD ACCEPT") + cmds.append("iptables -P OUTPUT ACCEPT") + + # Restore from rules + c = "iptables-restore < "+self.get_iptables_backup_file() + " && rm -f "+self.get_iptables_backup_file() + cmds.append(c) + + for cmd in cmds: + subprocess.call(cmd, shell=True) + + print cmds + print '[*] ConfigInterfaces: restored from %s' % self.get_iptables_backup_file() + except IOError: + print '[*] ConfigInterfaces: %s not found' % self.get_iptables_backup_file() + + def __str__(self): return ("ifs:%s, mitm_ifs:%s, outbound_ifs:%s" % (self.interfaces, self.mitm_interfaces, diff -urN src.orig/config.py src/config.py --- src.orig/config.py 2014-12-21 18:21:49.000000000 +0100 +++ src/config.py 2014-12-03 17:11:58.000000000 +0100 @@ -1,11 +1,12 @@ import logging import sys +import json class Config(object): def __init__(self): self.debug = 1 self.http = {} - self.http['flip_images'] = True + self.http['flip_images'] = False def logsetup(self, log): """ @@ -27,4 +28,66 @@ log.setLevel(logging.DEBUG) log.addHandler(console) - log.info("Logging setup complete") \ Kein Zeilenumbruch am Dateiende. + log.info("Logging setup complete") + + +class MalloryConfigObject(): + def __init__(self, dict): + for field in dict.iterkeys(): + setattr(self, field, dict[field]) + + +class MalloryConfigVars(): + vars = ["listen", "dbname", "datadir"] + + def __init__(self): + pass + + @staticmethod + def get_vars(): + return MalloryConfigVars.vars + + +class MalloryConfig(): + debugger_uri = "PYROLOC://127.0.0.1:7766/debugger" + config_protocols_uri = "PYROLOC://127.0.0.1:7766/config_proto" + config_rules_uri = "PYROLOC://127.0.0.1:7766/config_rules" + + def __init__(self): + pass + + @staticmethod + def get_object(): + return MalloryConfigObject(MalloryConfig.get_dict()) + + @staticmethod + def get_dict(): + config = {} + + for field in MalloryConfigVars.get_vars(): + config[field] = MalloryConfig.get(field) + + return config + + @staticmethod + def set_dict(dict): + for field in dict.iterkeys(): + MalloryConfig.set(field, dict[field]) + + @staticmethod + def get_json(): + return json.dumps(MalloryConfig.get_dict()) + + @staticmethod + def set_json(json_string): + dict = json.loads(json_string) + MalloryConfig.set_dict(dict) + + @staticmethod + def get(key): + return getattr(MalloryConfig, key, "") + @staticmethod + def set(key, val): + setattr(MalloryConfig, key, val) + + diff -urN src.orig/debug.py src/debug.py --- src.orig/debug.py 2014-12-21 18:21:49.000000000 +0100 +++ src/debug.py 2014-11-30 22:40:32.000000000 +0100 @@ -6,6 +6,7 @@ import binascii import base64 import pickle +from config import MalloryConfig # import Pyro here @@ -62,6 +63,13 @@ def getdatabase(self): return self.dbname + + def save_config(self, config_json): + self.config_json = config_json + + def get_config(self): + return self.config_json + def setdebug(self, state): """ This method updates the state of debugging and notifies all observers, diff -urN src.orig/gui/DbGui.py src/gui/DbGui.py --- src.orig/gui/DbGui.py 2014-12-21 18:21:49.000000000 +0100 +++ src/gui/DbGui.py 2014-11-30 22:43:20.000000000 +0100 @@ -9,7 +9,7 @@ from protocol import http, sslproto - +from config import MalloryConfig FLOWS_QUERY = """ select c.connCount, c.serverIp, c.serverPort, c.clientIp, c.clientPort, @@ -31,7 +31,7 @@ """ def __init__(self, table_dbview, btn_exec_sql, btn_set_flows, text_db_sql, - splitter_db,dbname): + splitter_db): self.table_dbview = table_dbview self.btn_exec_sql = btn_exec_sql self.btn_set_flows = btn_set_flows @@ -60,7 +60,8 @@ self.connect_handlers() - self.db.setDatabaseName("../db/%s" % (dbname)) + self.db.setDatabaseName("%s/%s" % (MalloryConfig.get("datadir"), MalloryConfig.get("dbname"))) + open = self.db.open() def connect_handlers(self): diff -urN src.orig/gui/guimain.py src/gui/guimain.py --- src.orig/gui/guimain.py 2014-12-21 18:21:49.000000000 +0100 +++ src/gui/guimain.py 2014-11-30 23:00:32.000000000 +0100 @@ -24,9 +24,12 @@ import logging import AboutDialog -from config import Config +from config import Config, MalloryConfig + # Pyro imports go here +import config_if + class keyPressEvent(QtCore.QObject): def __init__(self, parent): super(keyPressEvent, self).__init__(parent) @@ -57,10 +60,12 @@ self.rulegui = None self.dbgui = None - debugger_uri = "PYROLOC://127.0.0.1:7766/debugger" - self.remote_debugger = Pyro.core.getProxyForURI(debugger_uri) + #debugger_uri = "PYROLOC://127.0.0.1:7766/debugger" + self.remote_debugger = Pyro.core.getProxyForURI(MalloryConfig.get("debugger_uri")) + j = self.remote_debugger.get_config() + MalloryConfig.set_json(j) - self.main.dbname = self.remote_debugger.getdatabase() + #self.main.dbname = self.remote_debugger.getdatabase() #self.proxy = xmlrpclib.ServerProxy("http://localhost:20757") #self.objectproxy = xmlrpclib.ServerProxy("http://localhost:20758") self.curdebugevent = "" @@ -135,7 +140,7 @@ self.updateStatusBar() def updateStatusBar(self): - self.main.statusbar.showMessage("Intercept: %s Autosend: %s Database: %s" % (str(self.main.btnicept.isChecked()), str(self.main.btnauto.isChecked()),self.main.dbname)) + self.main.statusbar.showMessage("Intercept: %s Autosend: %s Database: %s" % (str(self.main.btnicept.isChecked()), str(self.main.btnauto.isChecked()),MalloryConfig.get("dbname"))) def eventFilter(self, object, event): if event.type() == QtCore.QEvent.KeyPress: @@ -242,6 +247,7 @@ def check_for_de(self): print "[*] MalloryGui: Launching event check thread" + errorcnt = 0 eventcnt = 0 while True: try: @@ -294,6 +300,28 @@ print "[*] MalloryGui: check_for_de: exception in de check loop" print sys.exc_info() traceback.print_exc() + errorcnt += 1 + # Sleep every 5 times + if errorcnt == 5: + print "[*] MalloryGui: problems encountered, please check" + errorcnt = 0 + time.sleep(5) + + def closeEvent(self, event): + print "[*] MalloryGui: Closing event begin" + quit_msg = "Are you sure you want to exit the program?" + reply = QtGui.QMessageBox.question(self, 'Message', quit_msg, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No) + + if reply == QtGui.QMessageBox.Yes: + if_cfg = config_if.ConfigInterfaces() + if_cfg.restore() + print "[*] MalloryGui: Closing event Interface Config Restored" + event.accept() + else: + event.ignore() + + print "[*] MalloryGui: Closing event end" + class StreamListDelegate(QtGui.QItemDelegate): def __init__(self, parent, model): @@ -466,7 +494,7 @@ window.main.btndbexec, window.main.btndbflowsq, window.main.textdbsql, - window.main.splitter_db,window.main.dbname) + window.main.splitter_db) window.rulegui = RuleGui.RuleEdit(window.main) diff -urN src.orig/gui/InterfacesGui.py src/gui/InterfacesGui.py --- src.orig/gui/InterfacesGui.py 2014-12-21 18:21:49.000000000 +0100 +++ src/gui/InterfacesGui.py 2014-11-30 22:44:20.000000000 +0100 @@ -1,4 +1,5 @@ from PyQt4 import QtGui, QtCore, Qt +from config import MalloryConfig import config_if diff -urN src.orig/gui/ProtocolsGui.py src/gui/ProtocolsGui.py --- src.orig/gui/ProtocolsGui.py 2014-12-21 18:21:49.000000000 +0100 +++ src/gui/ProtocolsGui.py 2014-11-30 22:47:55.000000000 +0100 @@ -2,6 +2,7 @@ import Pyro.core from protocol import http, sslproto +from config import MalloryConfig class ProtocolsGui(object): """ @@ -19,9 +20,9 @@ self.splitter_proto.setSizes([200, 100]) # Remote protocol configuration object - config_protocols_uri = "PYROLOC://127.0.0.1:7766/config_proto" + #config_protocols_uri = "PYROLOC://127.0.0.1:7766/config_proto" self.remote_proto = \ - Pyro.core.getProxyForURI(config_protocols_uri) + Pyro.core.getProxyForURI(MalloryConfig.get("config_protocols_uri")) # Setup model for table view self.protocols_model = ProtocolsTableModel(self.remote_proto) @@ -179,7 +180,9 @@ if index.column() == PROTO_DEBUG: debuggable = spacing + "No" + spacing - if proto.__class__ == "TcpProtocol": +# if proto.__class__ == "TcpProtocol": + from protocol.base import TcpProtocol, UdpProtocol + if isinstance( proto, TcpProtocol ): debuggable = spacing + "Yes" + spacing return debuggable diff -urN src.orig/gui/RuleGui.py src/gui/RuleGui.py --- src.orig/gui/RuleGui.py 2014-12-21 18:21:49.000000000 +0100 +++ src/gui/RuleGui.py 2014-11-30 22:50:09.000000000 +0100 @@ -7,6 +7,7 @@ import Pyro from PyQt4 import QtGui, QtCore +from config import MalloryConfig RULEUP = -1 RULEDOWN = 1 @@ -31,9 +32,9 @@ self.main = main # Remote protocol configuration object - config_rules_uri = "PYROLOC://127.0.0.1:7766/config_rules" + #config_rules_uri = "PYROLOC://127.0.0.1:7766/config_rules" self.remote_rule = \ - Pyro.core.getProxyForURI(config_rules_uri) + Pyro.core.getProxyForURI(MalloryConfig.get("config_rules_uri")) rules = self.remote_rule.get_rules() diff -urN src.orig/malloryevt.py src/malloryevt.py --- src.orig/malloryevt.py 2014-12-21 18:21:50.000000000 +0100 +++ src/malloryevt.py 2014-12-20 11:20:21.000000000 +0100 @@ -2,6 +2,8 @@ # Server Socket Create SSCREATE = 1 +# Server Socket Create +SSCONNECT = 6 # Client Socket Accept CSACCEPT = 2 # Client Socket After Server Socket diff -urN src.orig/mallory.py src/mallory.py --- src.orig/mallory.py 2014-12-21 18:21:50.000000000 +0100 +++ src/mallory.py 2014-12-20 11:37:16.000000000 +0100 @@ -94,14 +94,17 @@ from debug import DebugEvent, Debugger from binascii import hexlify, unhexlify import cert_auth +from config import MalloryConfig # These protocols have no dependencies and are safe to import from protocol import base, dnsp try: # These protocols have dependencies and may not be safe to import - from protocol import sslproto, http, ssh, https +# from protocol import sslproto, http, ssh, https + from protocol import sslproto, ssh, http, https from plugin_managers import http_plugin_manager + from plugin_managers import https_plugin_manager except ImportError: print "ImportError: Trouble importing protocols with dependencies. " \ "Proceeding with minimal protocol support." @@ -171,6 +174,7 @@ self.protoinstances = [] self.opts = options.options self.dbname = self.opts.trafficdb + self.datadir = self.opts.datadir self.debugon = False self.debugger = Debugger() self.config_protocols = config_proto.ConfigProtocols() @@ -236,7 +240,27 @@ elif mevt == malloryevt.SSCREATE: protoinst.configure_server_socket() - + def connect_socket(self, mevt, **kwargs): + """ + Private method. Hook for actions after connect of destination socket. + + @param mevt: This is a mallory event from module malloryevt + @param kwargs: keyworded arguments. Currently expects one named + argument, protoinst. The protoinst must be a protocol instance. + @type kwargs: protoinst=L{Protocol} + """ + protoinst = kwargs["protoinst"] + + if not protoinst: + return + + for proto in self.configured_protos: + if mevt in proto.supports: + if proto.serverPort == protoinst.serverPort: + if mevt == malloryevt.SSCONNECT: + protoinst.connect_server_socket() + + def forward(self, protoinst, conndata): """ Internal method for setting up data pumps for sockets. @@ -297,9 +321,14 @@ - A new thread for processing the DB Queue is created - The proxy begins listening for incoming connections """ - dbConn = TrafficDb(self.dbname) + dbConn = TrafficDb(self.dbname, self.datadir) self.dbname = dbConn.getDbName() #get the trafficDb being used self.debugger.setdatabase(self.dbname) + + MalloryConfig.set("dbname", self.dbname) + MalloryConfig.set("datadir", self.datadir) + MalloryConfig.set("listen", self.opts.listen) + self.debugger.save_config(MalloryConfig.get_json()) # Kick off a thread for the debugger #thread.start_new_thread(self.debugger.rpcserver, ()) @@ -438,6 +467,7 @@ # Set server sock data in protocol instance then config it protoinst.destination = ssock protoinst.serverPort = sport + protoinst.serverHost = shost self.configure_socket(malloryevt.SSCREATE, protoinst=protoinst) @@ -447,7 +477,10 @@ # Connect the server socket + self.log.debug("Mallory.main: connect %s : %d" % (shost, int(sport))) protoinst.destination.connect((shost, int(sport))) + self.connect_socket(malloryevt.SSCONNECT, + protoinst=protoinst) # Client socket configuration after server socket creation protoinst.source = csock @@ -514,6 +547,7 @@ mallory.add_plugin_manager(http_plugin_manager.HttpPluginManager()) + mallory.add_plugin_manager(https_plugin_manager.HttpsPluginManager()) # Pull in the protocol configured on the command line for use with the # no-transparent option when the proxy is not being used transparently diff -urN src.orig/plugin_managers/http_plugin_manager.py src/plugin_managers/http_plugin_manager.py --- src.orig/plugin_managers/http_plugin_manager.py 2014-12-21 18:21:50.000000000 +0100 +++ src/plugin_managers/http_plugin_manager.py 2014-12-01 00:01:00.000000000 +0100 @@ -5,6 +5,7 @@ from plugin_managers import base from plugin.session_hijack import SessionHijack +from plugin.spb_test import SpbTest from plugin.image_flip import ImageFlip from plugin.image_invert import ImageInvert @@ -28,6 +29,7 @@ #Will support persistent, non persistent, plugins plugs = [] plugs.append(SessionHijack ()) + plugs.append(SpbTest()) plugs.append(ImageFlip()) plugs.append(ImageInvert()) diff -urN src.orig/plugin_managers/https_plugin_manager.py src/plugin_managers/https_plugin_manager.py --- src.orig/plugin_managers/https_plugin_manager.py 1970-01-01 01:00:00.000000000 +0100 +++ src/plugin_managers/https_plugin_manager.py 2014-12-07 17:42:19.000000000 +0100 @@ -0,0 +1,42 @@ + +import config + +import thread + +from plugin_managers import base +from plugin.session_hijack import SessionHijack +from plugin.spb_test import SpbTest +#from plugin.image_flip import ImageFlip +#from plugin.image_invert import ImageInvert + +#try: +# from plugin_managers.plugin.edit_object import ObjectEditor +# oedit_imported = True +#except ImportError: +# print "ImportError: Trouble importing object editor. Check for twisted" \ +# " dependency (did it get installed?)" +# oedit_imported = False + +class HttpsPluginManager (base.Base): + def __init__(self, rules = [], config = config.Config()): + base.Base.__init__(self) + self.server_port = 443 + self.plugin_config() + + + def plugin_config (self): + #Make this more generic + #Will support persistent, non persistent, plugins + plugs = [] + plugs.append(SessionHijack ()) + plugs.append(SpbTest()) + #plugs.append(ImageFlip()) + #plugs.append(ImageInvert()) + + #if oedit_imported: + # plugs.append(ObjectEditor()) + #plugs.append(edit_object.ObjectEditor()) + for plug in plugs: + if plug.persistent == 1: + thread.start_new_thread (plug.runp, ()) + self.plugins.append(plug) diff -urN src.orig/plugin_managers/plugin/image_flip.py src/plugin_managers/plugin/image_flip.py --- src.orig/plugin_managers/plugin/image_flip.py 2014-12-21 18:21:50.000000000 +0100 +++ src/plugin_managers/plugin/image_flip.py 2014-12-01 00:50:15.000000000 +0100 @@ -15,7 +15,8 @@ for header in response.msg.headers: if "Content-Type" in header and "image" in header: try: - response.clean_body = self.flip_image(response.clean_body) + self.log.debug ("===================== flip NOT ACTIVE ===========================") + #response.clean_body = self.flip_image(response.clean_body) except: pass kwargs['data'] = response diff -urN src.orig/plugin_managers/plugin/image_invert.py src/plugin_managers/plugin/image_invert.py --- src.orig/plugin_managers/plugin/image_invert.py 2014-12-21 18:21:50.000000000 +0100 +++ src/plugin_managers/plugin/image_invert.py 2014-12-01 00:49:12.000000000 +0100 @@ -16,7 +16,8 @@ for header in response.msg.headers: if "Content-Type" in header and "image" in header: try: - response.clean_body = self.invert_image(response.clean_body) + self.log.debug ("====== NOT ACTIVE ==============") + #response.clean_body = self.invert_image(response.clean_body) except: pass kwargs['data'] = response diff -urN src.orig/plugin_managers/plugin/spb_test.py src/plugin_managers/plugin/spb_test.py --- src.orig/plugin_managers/plugin/spb_test.py 1970-01-01 01:00:00.000000000 +0100 +++ src/plugin_managers/plugin/spb_test.py 2014-12-07 17:57:25.000000000 +0100 @@ -0,0 +1,32 @@ +import config +import re +import StringIO +from base import Base + +class SpbTest (Base): + def __init__(self, rules = [], config = config.Config()): + Base.__init__(self) + self.persistent = 0 + + def do (self, **kwargs): + #Check to see if this came of the dameon we expected + self.log.debug ("BBAAAAAAAAAAAAAAAA %s" % kwargs['event']) + if kwargs['event'] == "HTTP:s2c": + response = kwargs['data'] + for header in response.msg.headers: + if "Content-Type" in header and "text/html" in header: + try: + self.log.debug ("BBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA") + response.clean_body = self.change_text(response.clean_body) + except: + pass + kwargs['data'] = response + return kwargs + + def change_text(self, textin): + # string prefixed with r: raw string: do not process \ + outstr = re.sub(r"(9)(ab)", r"\2\1", textin) + # trailing argument 2: only replace 2 occurences: + outstr = re.sub("2010", "2110", outstr, 2) + outstr = re.sub("Ostheimer", "Ortheimer", outstr, 2) + return outstr \ Kein Zeilenumbruch am Dateiende. diff -urN src.orig/protocol/base.py src/protocol/base.py --- src.orig/protocol/base.py 2014-12-21 18:21:50.000000000 +0100 +++ src/protocol/base.py 2014-12-21 10:57:34.000000000 +0100 @@ -20,6 +20,9 @@ from debug import DebugEvent from binascii import hexlify, unhexlify, crc32 +#from OpenSSL import SSL +#from errno import EAGAIN, EWOULDBLOCK + class ConnData(): """This class encapsulates all of the information about a connection it is mostly designed to be a data holding class and provide convenience @@ -595,6 +598,7 @@ return msgCnt = msgCnt+1 + #################################### ### Rules Processing diff -urN src.orig/protocol/http.py src/protocol/http.py --- src.orig/protocol/http.py 1970-01-01 01:00:00.000000000 +0100 +++ src/protocol/http.py 2014-12-21 11:26:21.000000000 +0100 @@ -0,0 +1,1180 @@ +from array import array +from base import TcpProtocol +from sys import py3kwarning +from urlparse import urlsplit + +import socket +import select +import urlparse +import warnings +import zlib +import gzip +import time +import Image +import sys +import StringIO +import logging +import traceback +import cgi + +from OpenSSL import SSL + +import malloryevt + + +with warnings.catch_warnings(): + if py3kwarning: + warnings.filterwarnings("ignore", ".*mimetools has been removed", + DeprecationWarning) + import mimetools + +#try: +# from cStringIO import StringIO +#except ImportError: +# from StringIO import StringIO + + +HTTP_PORT = 80 +HTTPS_PORT = 443 + +_UNKNOWN = 'UNKNOWN' + +# connection states +_CS_IDLE = 'Idle' +_CS_REQ_STARTED = 'Request-started' +_CS_REQ_SENT = 'Request-sent' + +# status codes +# informational +CONTINUE = 100 +SWITCHING_PROTOCOLS = 101 +PROCESSING = 102 + +# successful +OK = 200 +CREATED = 201 +ACCEPTED = 202 +NON_AUTHORITATIVE_INFORMATION = 203 +NO_CONTENT = 204 +RESET_CONTENT = 205 +PARTIAL_CONTENT = 206 +MULTI_STATUS = 207 +IM_USED = 226 + +# redirection +MULTIPLE_CHOICES = 300 +MOVED_PERMANENTLY = 301 +FOUND = 302 +SEE_OTHER = 303 +NOT_MODIFIED = 304 +USE_PROXY = 305 +TEMPORARY_REDIRECT = 307 + +# client error +BAD_REQUEST = 400 +UNAUTHORIZED = 401 +PAYMENT_REQUIRED = 402 +FORBIDDEN = 403 +NOT_FOUND = 404 +METHOD_NOT_ALLOWED = 405 +NOT_ACCEPTABLE = 406 +PROXY_AUTHENTICATION_REQUIRED = 407 +REQUEST_TIMEOUT = 408 +CONFLICT = 409 +GONE = 410 +LENGTH_REQUIRED = 411 +PRECONDITION_FAILED = 412 +REQUEST_ENTITY_TOO_LARGE = 413 +REQUEST_URI_TOO_LONG = 414 +UNSUPPORTED_MEDIA_TYPE = 415 +REQUESTED_RANGE_NOT_SATISFIABLE = 416 +EXPECTATION_FAILED = 417 +UNPROCESSABLE_ENTITY = 422 +LOCKED = 423 +FAILED_DEPENDENCY = 424 +UPGRADE_REQUIRED = 426 + +# server error +INTERNAL_SERVER_ERROR = 500 +NOT_IMPLEMENTED = 501 +BAD_GATEWAY = 502 +SERVICE_UNAVAILABLE = 503 +GATEWAY_TIMEOUT = 504 +HTTP_VERSION_NOT_SUPPORTED = 505 +INSUFFICIENT_STORAGE = 507 +NOT_EXTENDED = 510 + +MAXAMOUNT = 1024*1024 + +# Mapping status codes to official W3C names +responses = { + 100: 'Continue', + 101: 'Switching Protocols', + + 200: 'OK', + 201: 'Created', + 202: 'Accepted', + 203: 'Non-Authoritative Information', + 204: 'No Content', + 205: 'Reset Content', + 206: 'Partial Content', + + 300: 'Multiple Choices', + 301: 'Moved Permanently', + 302: 'Found', + 303: 'See Other', + 304: 'Not Modified', + 305: 'Use Proxy', + 306: '(Unused)', + 307: 'Temporary Redirect', + + 400: 'Bad Request', + 401: 'Unauthorized', + 402: 'Payment Required', + 403: 'Forbidden', + 404: 'Not Found', + 405: 'Method Not Allowed', + 406: 'Not Acceptable', + 407: 'Proxy Authentication Required', + 408: 'Request Timeout', + 409: 'Conflict', + 410: 'Gone', + 411: 'Length Required', + 412: 'Precondition Failed', + 413: 'Request Entity Too Large', + 414: 'Request-URI Too Long', + 415: 'Unsupported Media Type', + 416: 'Requested Range Not Satisfiable', + 417: 'Expectation Failed', + + 500: 'Internal Server Error', + 501: 'Not Implemented', + 502: 'Bad Gateway', + 503: 'Service Unavailable', + 504: 'Gateway Timeout', + 505: 'HTTP Version Not Supported', +} + +# from mitmproxy: helper classes for https: ssl socket does not support .makefile (=file descriptor for socket) +class NetLibError(Exception): pass +class NetLibDisconnect(NetLibError): pass +class NetLibTimeout(NetLibError): pass +class NetLibSSLError(NetLibError): pass + +def close_socket(sock): + """ + Does a hard close of a socket, without emitting a RST. + """ + try: + # We already indicate that we close our end. + # If we close RD, any further received bytes would result in a RST being set, which we want to avoid + # for our purposes + sock.shutdown(socket.SHUT_WR) # may raise "Transport endpoint is not connected" on Linux + + # Section 4.2.2.13 of RFC 1122 tells us that a close() with any + # pending readable data could lead to an immediate RST being sent (which is the case on Windows). + # http://ia600609.us.archive.org/22/items/TheUltimateSo_lingerPageOrWhyIsMyTcpNotReliable/the-ultimate-so_linger-page-or-why-is-my-tcp-not-reliable.html + # + # However, we cannot rely on the shutdown()-followed-by-read()-eof technique proposed by the page above: + # Some remote machines just don't send a TCP FIN, which would leave us in the unfortunate situation that + # recv() would block infinitely. + # As a workaround, we set a timeout here even if we are in blocking mode. + # Please let us know if you have a better solution to this problem. + + sock.settimeout(sock.gettimeout() or 20) + # may raise a timeout/disconnect exception. + while sock.recv(4096): # pragma: no cover + pass + + except socket.error: + pass + + sock.close() + + +class _FileLike: + BLOCKSIZE = 1024 * 32 + def __init__(self, o): + self.o = o + self._log = None + self.first_byte_timestamp = None + + def set_descriptor(self, o): + self.o = o + + def __getattr__(self, attr): + return getattr(self.o, attr) + + def start_log(self): + """ + Starts or resets the log. + + This will store all bytes read or written. + """ + self._log = [] + + def stop_log(self): + """ + Stops the log. + """ + self._log = None + + def is_logging(self): + return self._log is not None + + def get_log(self): + """ + Returns the log as a string. + """ + if not self.is_logging(): + raise ValueError("Not logging!") + return "".join(self._log) + + def add_log(self, v): + if self.is_logging(): + self._log.append(v) + + def reset_timestamps(self): + self.first_byte_timestamp = None + + +class Writer(_FileLike): + def flush(self): + """ + May raise NetLibDisconnect + """ + if hasattr(self.o, "flush"): + try: + self.o.flush() + except (socket.error, IOError), v: + raise NetLibDisconnect(str(v)) + + def write(self, v): + """ + May raise NetLibDisconnect + """ + if v: + try: + if hasattr(self.o, "sendall"): + self.add_log(v) + return self.o.sendall(v) + else: + r = self.o.write(v) + self.add_log(v[:r]) + return r + except (SSL.Error, socket.error), v: + raise NetLibDisconnect(str(v)) + + +class Reader(_FileLike): + def read(self, length): + """ + If length is -1, we read until connection closes. + """ + result = '' + start = time.time() + while length == -1 or length > 0: + if length == -1 or length > self.BLOCKSIZE: + rlen = self.BLOCKSIZE + else: + rlen = length + try: + data = self.o.read(rlen) + except SSL.ZeroReturnError: + break + except SSL.WantReadError: + if (time.time() - start) < self.o.gettimeout(): + time.sleep(0.1) + continue + else: + raise NetLibTimeout + except socket.timeout: + raise NetLibTimeout + except socket.error: + raise NetLibDisconnect + except SSL.SysCallError as e: + if e.args == (-1, 'Unexpected EOF'): + break + raise NetLibDisconnect + except SSL.Error, v: + raise NetLibSSLError(v.message) + self.first_byte_timestamp = self.first_byte_timestamp or time.time() + if not data: + break + result += data + if length != -1: + length -= len(data) + self.add_log(result) + return result + + def readline(self, size = None): + result = '' + bytes_read = 0 + while True: + if size is not None and bytes_read >= size: + break + try: + ch = self.read(1) + except NetLibDisconnect: + break + bytes_read += 1 + if not ch: + break + else: + result += ch + if ch == '\n': + break + return result + + + + +class HTTP(TcpProtocol): + """ + This module interprets HTTP responses and gives an object oriented + interface to an HTTP response. There is an external dependency on + the Python Imaging Library (PIL). However, it is wrapped in exception + handling code that will not prevent the module from operating properly. + If the dependency is not met it will, however, generate a lot of errors. + """ + def __init__(self, trafficdb, source, destination): + TcpProtocol.__init__(self, trafficdb, source, destination) + self.friendly_name = "HTTP" + self.serverPort = 80 + self.log = logging.getLogger("mallorymain") + self.log.info("HTTP protocol handler initialized") + self.supports = {malloryevt.STARTS2C:True, malloryevt.STARTC2S:True} + + + + def forward_c2s(self, conndata): + try: + self.log.debug("HTTP: starting http request") + request = HTTPRequest(self.source) + request.begin() + #RAJRAJRAJ + #This is where the first notify for plugs are... need to be able to + #distinguish between blogking and non blocking plugsin.. + #The events will represent where in proto notify was sent + #Each proto will have diff hooks points. + #STill need to think about blocking and non blocking problem + if self.plugin_manager != None: + request = self.plugin_manager.process(event="HTTP:c2s", data=request) + + if self.config.debug > 1: + self.log.debug("HTTP: c2s: Request: \r\n%s" % (repr(str(request)))) + self.log.debug("HTTP: c2s: Request: \r\n%s" % (str(request.headers))) + + str_request = str(request) + self.trafficdb.qFlow.put((conndata.conncount, \ + conndata.direction, 0, time.time(), repr(str_request))) + self.destination.sendall(str(request)) + + except: + traceback.print_exc() + + def forward_s2c(self, conndata): + """ + This method is intended to interpret HTTP responses from the victim's + server. It encapsulates the HTTP response in an object and provides + a high level interface to manipulate HTTP content. + """ + self.log.debug("HTTP: starting s2c") + response = HTTPResponse(self.destination) + + response.begin() + responsebody = response.read() + response.dirty_body = responsebody + + #isimage = False + isgzip = False + isdeflate = False + remove_header = [] + + # Process headers + for header in response.msg.headers: + + # Get rid of this to avoid having to do math + if "Content-Length" in header: + remove_header.append(header) + + # Clean up and deal with compression + if "Content-Encoding" in header: + encoding = response.msg.getheader("content-encoding") + + # Content will not be compressed + if encoding == "gzip": + isgzip = True + remove_header.append(header) + if encoding == "deflate": + isdeflate = True + + # Content will not be chunked + if "Transfer-Encoding" in header or "Transfer-encoding" in header: + + remove_header.append(header) + + # Take out headers that are not needed + for header in remove_header: + response.msg.headers.remove(header) + + remove_header = [] + + if isgzip: + decompress = gzip.GzipFile("", "rb", 9, \ + StringIO.StringIO(responsebody)) + responsebody = decompress.read() + + response.clean_body = responsebody + + if self.plugin_manager != None: + response = self.plugin_manager.process(event="HTTP:s2c", data=response) + + + + # Push the response off to the victim + str_status = responses[response.status] + + # HTTP status line + responseline = "HTTP/1.1 %d %s\r\n" % \ + (response.status, str_status) + self.source.sendall(responseline) + + # HTTP Headers + self.source.sendall(str(response.msg) + "\r\n") + + # Some response types will not have a body + if len(response.clean_body) > 0: + self.source.sendall(response.clean_body) + + + # Make sure nothing bad has happened reading the response + assert response.will_close != 'UNKNOWN' + + str_response = responseline + str(response.msg) + "\r\n" + responsebody + + self.trafficdb.qFlow.put((conndata.conncount, \ + conndata.direction, 0, time.time(), repr(str_response))) + + self.log.debug("HTTP.forward_s2c(): cc:%s dir:%s mc:%s time:%s bytes:%d" \ + " peek:%s" % (conndata.conncount, conndata.direction, + 1, time.time(), len(str_response), repr(str_response[0:24]))) + #self.log.debug("HTTP.forward_s2c: %s" %(str_response)) + if type(self.destination) != SSL.Connection: + #self.destination.shutdown(socket.SHUT_RD) + close_socket(self.destination) + else: + close_socket(self.destination._socket) + if type(self.source) != SSL.Connection: + #self.source.shutdown(socket.SHUT_WR) + close_socket(self.source) + else: + close_socket(self.source._socket) + + + def flip_image(self, imagein): + outstr = "" + outfile = StringIO.StringIO(outstr) + img = Image.open(StringIO.StringIO(imagein)) + out = img.transpose(Image.ROTATE_180) + out.save(outfile, img.format) + return outfile.getvalue() + +class HTTPException(Exception): + # Subclasses that define an __init__ must call Exception.__init__ + # or define self.args. Otherwise, str() will fail. + pass + +class ImproperConnectionState(HTTPException): + pass + +class CannotSendRequest(ImproperConnectionState): + pass + +class CannotSendHeader(ImproperConnectionState): + pass + +class ResponseNotReady(ImproperConnectionState): + pass + +class BadStatusLine(HTTPException): + def __init__(self, line): + self.args = line, + self.line = line + + +class IncompleteRead(HTTPException): + def __init__(self, partial, expected=None): + self.args = partial, + self.partial = partial + self.expected = expected + def __repr__(self): + if self.expected is not None: + e = ', %i more expected' % self.expected + else: + e = '' + return 'IncompleteRead(%i bytes read%s)' % (len(self.partial), e) + def __str__(self): + return repr(self) + + +class HTTPRequest: + def __init__(self, sock): + self.msg = None + self.sock = sock + if type(self.sock) !=SSL.Connection: + self.fp = self.sock.makefile('rb', 0) + else: + self.fp = Reader(self.sock) + #self.fp = sock.makefile('rb', 0) + self.command = "" + self.path = "" + self.request_version = "" + self.log = logging.getLogger("mallorymain") + self.default_request_version = "HTTP/0.9" + self.request_version = (0, 0) + self.headers = None + self.body = "" + + + + def __str__(self): + request_line = "%s %s %s\r\n" \ + % (self.command, self.path, self.request_version) + + request = request_line + str(self.headers) + + if self.body: + request += "\r\n" + request += self.body + else: + request += "\r\n\r\n" + + return request + def toDict(self): + return {"command" : self.command, "path":self.path, "request_version": + self.request_version, "headers":self.headers.dict, "body": self.body} + + def _quote_html(self, html): + return html.replace("&", "&").replace("<", "<").replace(">", ">") + + def send_error(self, code, message=None): + """Send and log an error reply. + + Arguments are the error code, and a detailed message. + The detailed message defaults to the short entry matching the + response code. + + This sends an error response (so it must be called before any + output has been generated), logs the error, and finally sends + a piece of HTML explaining the error to the user. + + """ + try: + short, long = self.responses[code] + except KeyError: + short, long = '???', '???' + if message is None: + message = short + explain = long + self.log_error("code %d, message %s", code, message) + # using _quote_html to prevent Cross Site Scripting attacks (see bug #1100201) + content = (self.error_message_format % + {'code': code, 'message': self._quote_html(message), 'explain': explain}) + self.send_response(code, message) + self.send_header("Content-Type", self.error_content_type) + self.send_header('Connection', 'close') + self.end_headers() + if self.command != 'HEAD' and code >= 200 and code not in (204, 304): + self.wfile.write(content) + + + def begin(self): + self.raw_requestline = self.fp.readline() + self.parse_request() + self.log.info("HTTPRequest: %s : %s : %s" % \ + (self.request_version, self.command, self.path)) + + def parse_request(self): + """Parse a request (internal). + + The request should be stored in self.raw_requestline; the results + are in self.command, self.path, self.request_version and + self.headers. + + Return True for success, False for failure; on failure, an + error is sent back. + + """ + self.command = None # set in case of error on the first line + self.request_version = version = self.default_request_version + self.close_connection = 1 + requestline = self.raw_requestline + if requestline[-2:] == '\r\n': + requestline = requestline[:-2] + elif requestline[-1:] == '\n': + requestline = requestline[:-1] + self.requestline = requestline + words = requestline.split() + if len(words) == 3: + [command, path, version] = words + if version[:5] != 'HTTP/': + self.send_error(400, "Bad request version (%r)" % version) + return False + try: + base_version_number = version.split('/', 1)[1] + version_number = base_version_number.split(".") + # RFC 2145 section 3.1 says there can be only one "." and + # - major and minor numbers MUST be treated as + # separate integers; + # - HTTP/2.4 is a lower version than HTTP/2.13, which in + # turn is lower than HTTP/12.3; + # - Leading zeros MUST be ignored by recipients. + if len(version_number) != 2: + raise ValueError + version_number = int(version_number[0]), int(version_number[1]) + self.request_version = version_number + except (ValueError, IndexError): + self.send_error(400, "Bad request version (%r)" % version) + return False + # + # Mallory is a very liberal proxy and is accepting of ideals that + # others might reject + # + # if version_number >= (1, 1) and self.protocol_version >= "HTTP/1.1": + # self.close_connection = 0 + # if version_number >= (2, 0): + # self.send_error(505, + # "Invalid HTTP Version (%s)" % base_version_number) + # return False + + + elif len(words) == 2: + [command, path] = words + self.close_connection = 1 + if command != 'GET': + self.send_error(400, + "Bad HTTP/0.9 request type (%r)" % command) + return False + elif not words: + return False + else: + self.send_error(400, "Bad request syntax (%r)" % requestline) + return False + self.command, self.path, self.request_version = command, path, version + + # Examine the headers and look for a Connection directive + self.headers = HTTPMessage(self.fp, 0) + + # TODO: We need better support of the message body here. Currently + # it is not really interpreted at all. This will be problematic for + # multi-part form posts. + if self.command == "POST": + if self.headers["Content-Length"]: + self.body = self.fp.read(int(self.headers["Content-Length"])) + +class HTTPResponse: + + # strict: If true, raise BadStatusLine if the status line can't be + # parsed as a valid HTTP/1.0 or 1.1 status line. By default it is + # false because it prevents clients from talking to HTTP/0.9 + # servers. Note that a response with a sufficiently corrupted + # status line will look like an HTTP/0.9 response. + + # See RFC 2616 sec 19.6 and RFC 1945 sec 6 for details. + + def __init__(self, sock, debuglevel=1, strict=0, method=None): + self.sock = sock + if type(self.sock) !=SSL.Connection: + self.fp = self.sock.makefile('rb', 0) + else: + self.fp = Reader(self.sock) + self.debuglevel = debuglevel + self.strict = strict + self._method = method + self.log = logging.getLogger("mallorymain") + #Body + self.dirty_body = None + self.clean_body = None + #Headers + self.msg = None + + # from the Status-Line of the response + self.version = _UNKNOWN # HTTP-Version + self.status = _UNKNOWN # Status-Code + self.reason = _UNKNOWN # Reason-Phrase + + self.chunked = _UNKNOWN # is "chunked" being used? + self.chunk_left = _UNKNOWN # bytes left to read in current chunk + self.length = _UNKNOWN # number of bytes left in response + self.will_close = _UNKNOWN # conn will close at end of response + + def _read_status(self): + # Initialize with Simple-Response defaults + line = self.fp.readline() + if self.debuglevel > 1: + self.log.debug("reply: %s" % repr(line)) + + if not line: + # Presumably, the server closed the connection before + # sending a valid response. + raise BadStatusLine(line) + try: + [version, status, reason] = line.split(None, 2) + except ValueError: + try: + [version, status] = line.split(None, 1) + reason = "" + except ValueError: + # empty version will cause next test to fail and status + # will be treated as 0.9 response. + version = "" + if not version.startswith('HTTP/'): + if self.strict: + self.close() + raise BadStatusLine(line) + else: + # assume it's a Simple-Response from an 0.9 server + self.fp = LineAndFileWrapper(line, self.fp) + return "HTTP/0.9", 200, "" + + # The status code is a three-digit number + try: + status = int(status) + if status < 100 or status > 999: + raise BadStatusLine(line) + except ValueError: + raise BadStatusLine(line) + return version, status, reason + + def begin(self): + if self.msg is not None: + # we've already started reading the response + return + + # read until we get a non-100 response + while True: + version, status, reason = self._read_status() + if status != CONTINUE: + break + # skip the header from the 100 response + while True: + skip = self.fp.readline().strip() + if not skip: + break + if self.debuglevel > 1: + print "header:", skip + + self.status = status + self.reason = reason.strip() + if version == 'HTTP/1.0': + self.version = 10 + elif version.startswith('HTTP/1.'): + self.version = 11 # use HTTP/1.1 code for HTTP/1.x where x>=1 + elif version == 'HTTP/0.9': + self.version = 9 + else: + #raise UnknownProtocol(version) + print "Things are going downhill fast my dear." + + if self.version == 9: + self.length = None + self.chunked = 0 + self.will_close = 1 + self.msg = HTTPMessage(StringIO.StringIO()) + return + + self.msg = HTTPMessage(self.fp, 0) + if self.debuglevel > 1: + for hdr in self.msg.headers: + print "header:", hdr, + + # don't let the msg keep an fp + self.msg.fp = None + + # are we using the chunked-style of transfer encoding? + tr_enc = self.msg.getheader('transfer-encoding') + if tr_enc and tr_enc.lower() == "chunked": + self.chunked = 1 + self.chunk_left = None + else: + self.chunked = 0 + + # will the connection close at the end of the response? + self.will_close = self._check_close() + + # do we have a Content-Length? + # NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked" + length = self.msg.getheader('content-length') + if length and not self.chunked: + try: + self.length = int(length) + except ValueError: + self.length = None + else: + if self.length < 0: # ignore nonsensical negative lengths + self.length = None + else: + self.length = None + + # does the body have a fixed length? (of zero) + if (status == NO_CONTENT or status == NOT_MODIFIED or + 100 <= status < 200 or # 1xx codes + self._method == 'HEAD'): + self.length = 0 + + # if the connection remains open, and we aren't using chunked, and + # a content-length was not provided, then assume that the connection + # WILL close. + if not self.will_close and \ + not self.chunked and \ + self.length is None: + self.will_close = 1 + + def _check_close(self): + conn = self.msg.getheader('connection') + if self.version == 11: + # An HTTP/1.1 proxy is assumed to stay open unless + # explicitly closed. + conn = self.msg.getheader('connection') + if conn and "close" in conn.lower(): + return True + return False + + # Some HTTP/1.0 implementations have support for persistent + # connections, using rules different than HTTP/1.1. + + # For older HTTP, Keep-Alive indicates persistent connection. + if self.msg.getheader('keep-alive'): + return False + + # At least Akamai returns a "Connection: Keep-Alive" header, + # which was supposed to be sent by the client. + if conn and "keep-alive" in conn.lower(): + return False + + # Proxy-Connection is a netscape hack. + pconn = self.msg.getheader('proxy-connection') + if pconn and "keep-alive" in pconn.lower(): + return False + + # otherwise, assume it will close + return True + + def close(self): + if self.fp: + self.fp.close() + self.fp = None + + def isclosed(self): + # NOTE: it is possible that we will not ever call self.close(). This + # case occurs when will_close is TRUE, length is None, and we + # read up to the last byte, but NOT past it. + # + # IMPLIES: if will_close is FALSE, then self.close() will ALWAYS be + # called, meaning self.isclosed() is meaningful. + return self.fp is None + + # XXX It would be nice to have readline and __iter__ for this, too. + + def read(self, amt=None): + if self.fp is None: + return '' + + if self.chunked: + return self._read_chunked(amt) + + if amt is None: + # unbounded read + if self.length is None: + s = self.fp.read() + else: + s = self._safe_read(self.length) + self.length = 0 + self.close() # we read everything + return s + + if self.length is not None: + if amt > self.length: + # clip the read to the "end of response" + amt = self.length + + # we do not use _safe_read() here because this may be a .will_close + # connection, and the user is reading more bytes than will be provided + # (for example, reading in 1k chunks) + s = self.fp.read(amt) + if self.length is not None: + self.length -= len(s) + if not self.length: + self.close() + return s + + def _read_chunked(self, amt): + assert self.chunked != _UNKNOWN + chunk_left = self.chunk_left + value = [] + while True: + if chunk_left is None: + line = self.fp.readline() + i = line.find(';') + if i >= 0: + line = line[:i] # strip chunk-extensions + try: + chunk_left = int(line, 16) + except ValueError: + # close the connection as protocol synchronisation is + # probably lost + self.close() + raise IncompleteRead(''.join(value)) + if chunk_left == 0: + break + if amt is None: + value.append(self._safe_read(chunk_left)) + elif amt < chunk_left: + value.append(self._safe_read(amt)) + self.chunk_left = chunk_left - amt + return ''.join(value) + elif amt == chunk_left: + value.append(self._safe_read(amt)) + self._safe_read(2) # toss the CRLF at the end of the chunk + self.chunk_left = None + return ''.join(value) + else: + value.append(self._safe_read(chunk_left)) + amt -= chunk_left + + # we read the whole chunk, get another + self._safe_read(2) # toss the CRLF at the end of the chunk + chunk_left = None + + # read and discard trailer up to the CRLF terminator + ### note: we shouldn't have any trailers! + while True: + line = self.fp.readline() + if not line: + # a vanishingly small number of sites EOF without + # sending the trailer + break + if line == '\r\n': + break + + # we read everything; close the "file" + self.close() + + return ''.join(value) + + def _safe_read(self, amt): + """Read the number of bytes requested, compensating for partial reads. + + Normally, we have a blocking socket, but a read() can be interrupted + by a signal (resulting in a partial read). + + Note that we cannot distinguish between EOF and an interrupt when zero + bytes have been read. IncompleteRead() will be raised in this + situation. + + This function should be used when bytes "should" be present for + reading. If the bytes are truly not available (due to EOF), then the + IncompleteRead exception can be used to detect the problem. + """ + s = [] + while amt > 0: + chunk = self.fp.read(min(amt, MAXAMOUNT)) + if not chunk: + raise IncompleteRead(''.join(s), amt) + s.append(chunk) + amt -= len(chunk) + return ''.join(s) + + def getheader(self, name, default=None): + if self.msg is None: + raise ResponseNotReady() + return self.msg.getheader(name, default) + + def getheaders(self): + """Return list of (header, value) tuples.""" + if self.msg is None: + raise ResponseNotReady() + return self.msg.items() + + def toDict(self): + return {"status" : self.status, "reason":self.reason, + "version": self.version, "msg":self.msg.dict, + "body":self.body, "clean_body":self.clean_body, "TEST":"TEST"} + +class LineAndFileWrapper: + """A limited file-like object for HTTP/0.9 responses.""" + + # The status-line parsing code calls readline(), which normally + # get the HTTP status line. For a 0.9 response, however, this is + # actually the first line of the body! Clients need to get a + # readable file object that contains that line. + + def __init__(self, line, file): + self._line = line + self._file = file + self._line_consumed = 0 + self._line_offset = 0 + self._line_left = len(line) + + def __getattr__(self, attr): + return getattr(self._file, attr) + + def _done(self): + # called when the last byte is read from the line. After the + # call, all read methods are delegated to the underlying file + # object. + self._line_consumed = 1 + self.read = self._file.read + self.readline = self._file.readline + self.readlines = self._file.readlines + + def read(self, amt=None): + if self._line_consumed: + return self._file.read(amt) + assert self._line_left + if amt is None or amt > self._line_left: + s = self._line[self._line_offset:] + self._done() + if amt is None: + return s + self._file.read() + else: + return s + self._file.read(amt - len(s)) + else: + assert amt <= self._line_left + i = self._line_offset + j = i + amt + s = self._line[i:j] + self._line_offset = j + self._line_left -= amt + if self._line_left == 0: + self._done() + return s + + def readline(self): + if self._line_consumed: + return self._file.readline() + assert self._line_left + s = self._line[self._line_offset:] + self._done() + return s + + def readlines(self, size=None): + if self._line_consumed: + return self._file.readlines(size) + assert self._line_left + L = [self._line[self._line_offset:]] + self._done() + if size is None: + return L + self._file.readlines() + else: + return L + self._file.readlines(size) + + +class HTTPMessage(mimetools.Message): + + def addheader(self, key, value): + """Add header for field key handling repeats.""" + prev = self.dict.get(key) + if prev is None: + self.dict[key] = value + else: + combined = ", ".join((prev, value)) + self.dict[key] = combined + + def addcontinue(self, key, more): + """Add more field data from a continuation line.""" + prev = self.dict[key] + self.dict[key] = prev + "\n " + more + + def toDict(self): + return self.dict + + def readheaders(self): + """Read header lines. + + Read header lines up to the entirely blank line that terminates them. + The (normally blank) line that ends the headers is skipped, but not + included in the returned list. If a non-header line ends the headers, + (which is an error), an attempt is made to backspace over it; it is + never included in the returned list. + + The variable self.status is set to the empty string if all went well, + otherwise it is an error message. The variable self.headers is a + completely uninterpreted list of lines contained in the header (so + printing them will reproduce the header exactly as it appears in the + file). + + If multiple header fields with the same name occur, they are combined + according to the rules in RFC 2616 sec 4.2: + + Appending each subsequent field-value to the first, each separated + by a comma. The order in which header fields with the same field-name + are received is significant to the interpretation of the combined + field value. + """ + # XXX The implementation overrides the readheaders() method of + # rfc822.Message. The base class design isn't amenable to + # customized behavior here so the method here is a copy of the + # base class code with a few small changes. + + self.dict = {} + self.unixfrom = '' + self.headers = hlist = [] + self.status = '' + headerseen = "" + firstline = 1 + startofline = unread = tell = None + if hasattr(self.fp, 'unread'): + unread = self.fp.unread + elif self.seekable: + tell = self.fp.tell + while True: + if tell: + try: + startofline = tell() + except IOError: + startofline = tell = None + self.seekable = 0 + line = self.fp.readline() + if not line: + self.status = 'EOF in headers' + break + # Skip unix From name time lines + if firstline and line.startswith('From '): + self.unixfrom = self.unixfrom + line + continue + firstline = 0 + if headerseen and line[0] in ' \t': + # XXX Not sure if continuation lines are handled properly + # for http and/or for repeating headers + # It's a continuation line. + hlist.append(line) + self.addcontinue(headerseen, line.strip()) + continue + elif self.iscomment(line): + # It's a comment. Ignore it. + continue + elif self.islast(line): + # Note! No pushback here! The delimiter line gets eaten. + break + headerseen = self.isheader(line) + if headerseen: + # It's a legal header line, save it. + hlist.append(line) + self.addheader(headerseen, line[len(headerseen)+1:].strip()) + continue + else: + # It's not a header line; throw it back and stop here. + if not self.dict: + self.status = 'No headers' + else: + self.status = 'Non-header line where header expected' + # Try to undo the read. + if unread: + unread(line) + elif tell: + self.fp.seek(startofline) + else: + self.status = self.status + '; bad seek' + break diff -urN src.orig/protocol/https.py src/protocol/https.py --- src.orig/protocol/https.py 2014-12-21 18:21:50.000000000 +0100 +++ src/protocol/https.py 2014-12-21 11:22:36.000000000 +0100 @@ -14,4 +14,4 @@ self.name = "HTTPS" self.log.debug("HTTPS: Initializing") self.supports = {malloryevt.STARTS2C:True, malloryevt.STARTC2S:True, - malloryevt.CSAFTERSS:True, malloryevt.SSCREATE:True} \ Kein Zeilenumbruch am Dateiende. + malloryevt.CSAFTERSS:True, malloryevt.SSCONNECT:True} \ Kein Zeilenumbruch am Dateiende. diff -urN src.orig/protocol/sslproto.py src/protocol/sslproto.py --- src.orig/protocol/sslproto.py 2014-12-21 18:21:50.000000000 +0100 +++ src/protocol/sslproto.py 2014-12-21 11:08:19.000000000 +0100 @@ -2,6 +2,7 @@ import os import os.path +import time import socket import ssl import malloryevt @@ -15,6 +16,22 @@ from x509 import * from pyasn1.codec.der import decoder +from binascii import hexlify, unhexlify, crc32 + +from OpenSSL import SSL +from OpenSSL import crypto +from errno import EAGAIN, EWOULDBLOCK + +# Generated with "openssl dhparam". It's too slow to generate this on startup. +DEFAULT_DHPARAM = """-----BEGIN DH PARAMETERS----- +MIGHAoGBAOdPzMbYgoYfO3YBYauCLRlE8X1XypTiAjoeCFD0qWRx8YUsZ6Sj20W5 +zsfQxlZfKovo3f2MftjkDkbI/C/tDgxoe0ZPbjy5CjdOhkzxn0oTbKTs16Rw8DyK +1LjTR65sQJkJEdgsX8TSi/cicCftJZl9CaZEaObF2bdgSgGK+PezAgEC +-----END DH PARAMETERS-----""" + +dhparams_global = None # setting dhparams as class variable breaks serialization by pickle (pyro) used in mallory gui + + class SSLProtocol(TcpProtocol): """This class knows how to wrap a socket using SSL and create fake @@ -22,38 +39,354 @@ def __init__(self, trafficdb, source, destination): TcpProtocol.__init__(self, trafficdb, source, destination) + #self.config.debug = 2 self.friendly_name = "SSL Base" self.serverPort = 443 self.name = "SSL" self.log = logging.getLogger("mallorymain") self.log.debug("SSLProtocol: Initializing") - self.supports = {malloryevt.CSAFTERSS:True, malloryevt.SSCREATE:True} + #self.supports = {malloryevt.CSAFTERSS:True, malloryevt.SSCREATE:True, malloryevt.SSCONNECT:True} + self.supports = {malloryevt.CSAFTERSS:True, malloryevt.SSCONNECT:True} + self.sni = None + dhparams_global = self.set_dhparams() + + def set_dhparams(self): + return None + dhpath = "ca/dh.par" + if not os.path.exists(dhpath): + with open(dhpath, "wb") as f: + f.write(DEFAULT_DHPARAM) + + bio = SSL._lib.BIO_new_file(dhpath, b"r") + if bio != SSL._ffi.NULL: + bio = SSL._ffi.gc(bio, SSL._lib.BIO_free) + dhparams = SSL._lib.PEM_read_bio_DHparams( + bio, SSL._ffi.NULL, SSL._ffi.NULL, SSL._ffi.NULL + ) + return SSL._ffi.gc(dhparams, SSL._lib.DH_free) + + def close_socket(self, sock): + """ + Does a hard close of a socket, without emitting a RST. + """ + try: + # We already indicate that we close our end. + # If we close RD, any further received bytes would result in a RST being set, which we want to avoid + # for our purposes + sock.shutdown(socket.SHUT_WR) # may raise "Transport endpoint is not connected" on Linux + + # Section 4.2.2.13 of RFC 1122 tells us that a close() with any + # pending readable data could lead to an immediate RST being sent (which is the case on Windows). + # http://ia600609.us.archive.org/22/items/TheUltimateSo_lingerPageOrWhyIsMyTcpNotReliable/the-ultimate-so_linger-page-or-why-is-my-tcp-not-reliable.html + # + # However, we cannot rely on the shutdown()-followed-by-read()-eof technique proposed by the page above: + # Some remote machines just don't send a TCP FIN, which would leave us in the unfortunate situation that + # recv() would block infinitely. + # As a workaround, we set a timeout here even if we are in blocking mode. + # Please let us know if you have a better solution to this problem. + + sock.settimeout(sock.gettimeout() or 20) + # may raise a timeout/disconnect exception. + while sock.recv(4096): # pragma: no cover + pass + + except socket.error: + pass + + sock.close() + + def create_ssl_context(self, cert, key, method=SSL.SSLv23_METHOD, options=None, + handle_sni=None, request_client_cert=None, cipher_list=None, + dhparams=None, chain_file=None): + ctx = SSL.Context(SSL.TLSv1_METHOD) + if not options is None: + ctx.set_options(options) + ctx.load_verify_locations(chain_file) + if cipher_list: + try: + ctx.set_cipher_list(cipher_list) + except SSL.Error, v: + self.log.debug("SSL cipher specification error: %s"%str(v)) + traceback.print_exc() + if handle_sni: + # SNI callback happens during do_handshake() + ctx.set_tlsext_servername_callback(handle_sni) + ctx.use_privatekey(key) + ctx.use_certificate(cert) + if dhparams: + SSL._lib.SSL_CTX_set_tmp_dh(ctx._context, dhparams) + return ctx + + + def handle_sni(self, connection): + """ + This callback gets called during the SSL handshake with the client. + The client has just sent the Server Name Indication (SNI). We now connect upstream to + figure out which certificate needs to be served. + """ + try: + sn = connection.get_servername() + if sn and sn != self.sni: + self.sni = sn.decode("utf8").encode("idna") + self.log.debug("SNI received: %s (get_servername: %s)" % (self.sni, sn)) + # Now, change client context to reflect changed certificate: + cert_from_remote_server = self.destination.get_peer_certificate() + fake_cert, fake_key = cert_auth.ca.get_fake_cert_and_key(cert_from_remote_server, self.sni) + new_context = self.create_ssl_context( + fake_cert, fake_key, + #method=SSL.TLSv1_METHOD, # currently fails with google.com + method=SSL.SSLv23_METHOD, + options=None, + cipher_list=None, + dhparams=dhparams_global, + chain_file="ca/ca.cer" + ) + connection.set_tlsext_host_name(self.sni) # https://github.com/msabramo/pyOpenSSL/blob/master/examples/sni/client.py + connection.set_context(new_context) + self.log.debug("SSL destination SNI host %s wanted" % self.sni) + # An unhandled exception in this method will core dump PyOpenSSL, so + # make dang sure it doesn't happen. + except: # pragma: no cover + import traceback + self.log.debug("Error in handle_sni:\r\n %s" % traceback.format_exc()) def configure_client_socket(self): """This is the socket from mallory to the victim""" self.log.debug("SSLProto: Getting common name from socket") - cert_from_remote_server = self.destination.getpeercert(True) - - fake_cert, fake_key = cert_auth.ca.get_fake_cert_and_key_filename(cert_from_remote_server) + cert_from_remote_server = self.destination.get_peer_certificate() + if cert_from_remote_server == None: + self.log.debug("SSL 2: FAILED to get remote server:\r\n") + else: + self.log.debug("SSL 2: remote server:\r\n %s" % cert_from_remote_server.get_subject().CN) + fake_cert, fake_key = cert_auth.ca.get_fake_cert_and_key(cert_from_remote_server) + self.log.debug("SSL 2: fake cert:\r\n %s" % fake_cert) self.log.debug("SSLProto: Starting Socket") + + # from libmproxy -> netlib/tcp.py BaseHandler _create_ssl_context + ctx = self.create_ssl_context( + fake_cert, fake_key, + method=SSL.TLSv1_METHOD, + options=None, + handle_sni=self.handle_sni, + cipher_list=None, + dhparams=dhparams_global, + chain_file="ca/ca.cer" + ) + self.source = SSL.Connection(ctx, self.source) + self.source.set_accept_state() try: - self.source = ssl.wrap_socket(self.source, - server_side=True, - certfile=fake_cert, - keyfile=fake_key, - ssl_version=ssl.PROTOCOL_SSLv23) - except: - self.log.debug("SSLProto: Client Closed SSL Connection") + self.source.do_handshake() + except SSL.Error, v: + self.log.debug("SSL source handshake error: %s"%repr(v)) traceback.print_exc() - self.log.debug("SSLProto: WoWzer!!") + +# def verify_cb(self, conn, cert_x509, errno, errdepth, retval): +# return True + + # unused def configure_server_socket(self): """This is the socket from mallory to the server""" self.log.debug("SSLProto: configure_server_socket") - self.destination = ssl.wrap_socket(self.destination) + """ + ssl.wrap_socket (https://docs.python.org/2/library/ssl.html) + For client-side sockets, the context construction is lazy; if the underlying socket isn't connected yet, + the context construction will be performed after connect() is called on the socket. + For server-side sockets, if the socket has no remote peer, it is assumed to be a listening socket, + and the server-side SSL wrapping is automatically performed on client connections + accepted via the accept() method. + wrap_socket() may raise SSLError. + """ + #self.destination = ssl.wrap_socket(self.destination) # module ssl does not support sni in python2.7 + #use module OpenSSL ("pyopenssl") which allows explicit control of the connection return None, None - + + def connect_server_socket(self): + """ called from main loop immediately after connect() of server socket (mallory.py) """ + self.log.debug("SSLProto: connect_server_socket") + #ctx = SSL.Context(SSL.TLSv1_METHOD) + ctx = SSL.Context(SSL.SSLv23_METHOD) + #ctx.set_verify(SSL.VERIFY_NONE, self.verify_cb) + self.destination = SSL.Connection(ctx, self.destination) + if type(self.destination) == SSL.Connection: + self.destination.set_connect_state() + if True: + try: + self.destination.do_handshake() + except SSL.Error, v: + self.log.debug("SSL destination handshake error: %s"%repr(v)) + traceback.print_exc() + + return None, None + + def forward_any(self, conndata): + """Generic forwarding method. Use this method if both the client to + server and server to client handling can be done with the same method""" + string = ' ' + msgCnt = 0 + + source = self.source + destination = self.destination + + # pre condition: source, conndata and destination must be valid + if not source or not destination or not conndata: + raise Exception("TcpProtocol was improperly configured") + return + + + self.log.info("TcpProtocol.forward_any(): Setting up forward for " + "client-->server %s-->%s" + % (source.getpeername(), destination.getpeername())) + if self.config.debug > 1: + self.log.debug("TcpProtocol.forward_any: before switch: direction:%s, source: %s dest:%s" % \ + (conndata.direction, source.getpeername(), destination.getpeername())) + + # Swapping source and destinationso method can work in both directions + if conndata.direction == "s2c": + source = self.destination + destination = self.source + + if self.config.debug > 1: + self.log.debug("TcpProtocol.forward_any: after switch: direction:%s, source: %s dest:%s" % \ + (conndata.direction, source.getpeername(), destination.getpeername())) + + + if self.config.debug > 1: + self.log.debug("TcpProtocol: Begin read loop direction:%s, source: %s dest:%s" % \ + (conndata.direction, source.getpeername(), destination.getpeername())) + + while string: + if self.config.debug > 1: + self.log.debug("TcpProtocol: recv bytes:%s" % (conndata.direction)) + + + source.setblocking(False) + while True: + try: + # There needs to be some exception catching here. + string = source.recv(8192) + except (SSL.WantReadError, SSL.WantWriteError): + time.sleep(0.1) # prevent 100% CPU usage + pass + except SSL.SysCallError, (errnum, errstr): + if errnum == EAGAIN or errnum == EWOULDBLOCK: # socket has no data + time.sleep(0.1) # prevent 100% CPU usage + pass + #self.done = True + #string = "" + #self.log.debug("SSL recv error: EAGAIN or EWOULDBLOCK") + #break + else: + self.log.debug("SSL recv error: %i %s" % (errnum, errstr)) + string = "" + break + except SSL.ZeroReturnError: + self.done = True + string = "" + self.log.debug("SSL recv: ssl connection has been closed") + break + except SSL.Error, v: + self.log.debug("SSL recv error: %s"%repr(v)) + string = "" + break + except: + self.done = True + string = "" + raise Exception("base.TcpProtocol: error with source.recv") + else: + break + + + crc1 = crc32(string) + if string: + if self.config.debug > 1: + self.log.debug("TcpProtocol: sendall:%s" % (conndata.direction)) + + sorig = string + + if self.config.debug > 2: + self.log.debug("CRC: %08x" % (crc32(sorig))) + + shoulddebug = True + + + if self.rules is not None: + shoulddebug, string = self.processrules(string, conndata, msgCnt) + + # Potentially pause here while waiting for incoming data + if shoulddebug: + # self.debugon must also be true. Allows debugger to + # mallory-wide turn debugging on/off + string = self.waitfordebug(string, conndata) + + if self.config.debug > 2: + crc2 = crc32(string) + if crc1 != crc2: + self.log.debug("TcpProtocol: Internal CRC FAIL: %08x - %08x" % (crc1, crc2)) + self.log.debug("TcpProtocol: %s****\n\n****%s" % (repr(sorig), repr(string))) + + destination.setblocking(False) + # http://stackoverflow.com/questions/20715408/solved-pyopenssl-ssl3-write-pendingbad-write-retry-return-self-sslobj-write + import StringIO + io = StringIO.StringIO() + io.write(string) + buffer = io.getvalue() + while 1: + try: + #destination.sendall(string) + destination.send(buffer) + except (SSL.WantReadError, SSL.WantWriteError), v: + time.sleep(0.1) # prevent 100% CPU usage + pass + except SSL.SysCallError, (errnum, errstr): + if errnum == EAGAIN or errnum == EWOULDBLOCK: # socket has no data + time.sleep(0.1) # prevent 100% CPU usage + pass + else: + self.log.debug("SSL sendall error: %i %s" % (errnum, errstr)) + self.done = True + break + except SSL.Error, v: + self.log.debug("SSL sendall error: %s"%repr(v)) + self.done = True + destination.flush() + break + except v: + self.done = True + raise Exception("TcpProtocol.forward_any: sendall error: %s"%repr(v)) + break + else: + break + + # Store the stream data + self.trafficdb.qFlow.put((conndata.conncount, \ + conndata.direction, msgCnt, time.time(), repr(string))) + + self.log.debug("forward_any(): cc:%s dir:%s mc:%s time:%s bytes:%d" \ + " peek:%s" % (conndata.conncount, \ + conndata.direction, msgCnt, time.time(), len(string), \ + repr(string[0:24]))) + + else: + if self.config.debug == 1: + self.log.debug("forward_any(): [%s] CLOSE" % (conndata.direction)) + self.log.debug("forward_any(): conndata:%s" % (conndata)) + try: + if self.config.debug > 1: + self.log.debug("TcpProtocol: shutting down SOURCE.READ and DESTINATION.WRITE direction:%s, source: %s dest:%s" % \ + (conndata.direction, source.getpeername(), destination.getpeername())) + + source.shutdown(socket.SHUT_RD) + destination.shutdown(socket.SHUT_WR) + self.done = True + return + except: + return + + msgCnt = msgCnt+1 + + # def supports(self, mevt): # if mevt == malloryevt.CSAFTERSS: # return True diff -urN src.orig/ruleconfig.py src/ruleconfig.py --- src.orig/ruleconfig.py 2014-12-21 18:21:50.000000000 +0100 +++ src/ruleconfig.py 2014-11-30 16:29:32.000000000 +0100 @@ -32,6 +32,12 @@ "direction":"s2c", "passthru":"True" }, +#{ +# "name":"spbTest", +# "action":rule.Muck(["2010/2110/g"]), +# "direction":"s2c", +# "passthru":"True" +#}, { "name":"default", "action":rule.Debug() diff -urN src.orig/trafficdb.py src/trafficdb.py --- src.orig/trafficdb.py 2014-12-21 18:21:50.000000000 +0100 +++ src/trafficdb.py 2014-12-01 00:25:00.000000000 +0100 @@ -95,7 +95,7 @@ try: os.mkdir(self.dbPath) except: - print "Failed to create the directory for the DB" + print "Failed to create the directory -- " + self.dbPath + " -- for the DB" return existed = os.path.exists(dbName)