Fortinet black logo

Python script for importing the FortiDeceptor image

Python script for importing the FortiDeceptor image

To view the help message for the for this script use the command -h.

import boto3
import time, sys, os,traceback
import json
import pprint
from datetime import datetime
from types import SimpleNamespace

global_region_name="us-west-2"
global_aws_access_key_id=""
global_aws_secret_access_key=""
global_bucket=""

class DatetimeEncoder(json.JSONEncoder):
    def default(self, obj):
        if isinstance(obj, datetime):
            return obj.strftime('%Y-%m-%dT%H:%M:%SZ')
        elif isinstance(obj, date):
            return obj.strftime('%Y-%m-%d')
    # Let the base class default method raise the TypeError
        return json.JSONEncoder.default(self, obj)

def check_return(resp):
    if resp != None:
        if resp['ResponseMetadata']['HTTPStatusCode'] == 200:    
            return 0
    return -1
    
def list_bucket():
    bna = []
    for bucket in s3.buckets.all():
        bna.append(bucket.name)
    return bna

def resp2obj(resp):
    s = json.dumps(resp, cls=DatetimeEncoder)
    return json.loads(s, object_hook=lambda d: SimpleNamespace(**d))

def bucket_exists(s3s, fk):
    for b in s3s.buckets.all():
        if b.name == fk:
            return True
    return False

def import_as_AMI(filename,imagename, arch, size):
    if filename is None:
        print("Incorrect parameter")
        return
    
    fn = filename #sys.argv[1]
    fk = imagename #sys.argv[2]
    arch =  arch #sys.argv[3]
    size = size #sys.argv[4]
    s3s = boto3.resource('s3', region_name=global_region_name, aws_access_key_id=global_aws_access_key_id, aws_secret_access_key=global_aws_secret_access_key)
    s3c = boto3.client('s3', region_name=global_region_name, aws_access_key_id=global_aws_access_key_id, aws_secret_access_key=global_aws_secret_access_key)
    buck=global_bucket
    if not bucket_exists(s3s, buck):
        bucket = s3s.create_bucket(ACL='private', Bucket=buck, CreateBucketConfiguration={'LocationConstraint':global_region_name})
        if bucket != None:                
            bucket.wait_until_exists()
        else:
            print("Failed to create bucket %s" % (buck))
            return
    else:
        bucket = s3s.Bucket(buck)        
    bucket = s3s.Bucket(buck)
    s3c.delete_object(Bucket=buck, Key=fk)
    bucket.upload_file(fn, fk)
    ec2 = boto3.client('ec2', region_name=global_region_name, aws_access_key_id=global_aws_access_key_id, aws_secret_access_key=global_aws_secret_access_key)
    try:
        resp = ec2.import_snapshot(
            Description='import FDC image snapshot',
            DiskContainer={
                'Format': 'VHD',
                'UserBucket': {
                    'S3Bucket': buck,
                    'S3Key': fk
                }
            })
        r = resp2obj(resp)
    except Exception as e:
        print('''Please make sure you have the service role 'vmimport' with below permissions:
            -- Resource to s3:your-bucket
            *) s3:ListBucket
            *) s3:GetBucketLocation
            *) s3:GetObject
            -- Resource to ec2:*
            *) ec2:ModifySnapshotAttribute
            *) ec2:CopySnapshot
            *) ec2:RegisterImage
            *) ec2:Describe*

            For more information, please refer to https://docs.aws.amazon.com/vm-import/latest/userguide/vmie_prereqs.html , section 'Required service role'
        ''')
        print(traceback.format_exc())
        sys.exit(-1)

    print("Importing image: taskid={}".format(r.ImportTaskId))
    while True:
        time.sleep(10)
        resp = ec2.describe_import_snapshot_tasks(ImportTaskIds=[r.ImportTaskId])
        #print(resp)
        if check_return(resp) == 0:                
            taskdetail = resp['ImportSnapshotTasks'][0]
            st = taskdetail['SnapshotTaskDetail']['Status']
            print("Importing image: {}".format(st))
            if st == 'completed':
                break
            elif st == "deleted":
                print(taskdetail)
                return
    
    print("Imported image successfully")
    r = resp2obj(resp)
    ec2s = boto3.resource('ec2', region_name=global_region_name, aws_access_key_id=global_aws_access_key_id, aws_secret_access_key=global_aws_secret_access_key)
    snapshot = ec2s.Snapshot(r.ImportSnapshotTasks[0].SnapshotTaskDetail.SnapshotId)
    snapshot.create_tags(Tags=[{'Key':'Name', 'Value':fk}])
    resp = ec2.register_image(Name=fk, Architecture=arch, RootDeviceName='/dev/sda1',
        BlockDeviceMappings=[{'DeviceName':'/dev/sda1',
                                'Ebs':{'SnapshotId':snapshot.id,'VolumeType':'gp2','VolumeSize':int(size),'DeleteOnTermination':True}}, 
                                {'DeviceName':'/dev/sdb',
                                'Ebs':{'VolumeType':'gp2','VolumeSize':50,'DeleteOnTermination':True}},],
                                VirtualizationType='hvm', EnaSupport=True)  
    if check_return(resp) == 0:
        print("Registered image successfully")
    else:
        print("Failed to register image")
        print(resp)
    r = resp2obj(resp)
    image = ec2s.Image(r.ImageId)
    image.create_tags(Tags=[{'Key':'Name', 'Value':fk}])
    s3c.delete_object(Bucket=buck, Key=fk) 
    print("Deleted the image file from bucket {}".format(buck))

              
if __name__ == "__main__":
    import argparse
    parser = argparse.ArgumentParser()

    parser.add_argument("-r", "--region_name", help="region_name")
    parser.add_argument("-i", "--aws_access_key_id", help="aws_access_key_id")
    parser.add_argument("-k", "--aws_secret_access_key", help="aws_secret_access_key")
    parser.add_argument("-b", "--bucket", help="The bucket name")
    parser.add_argument("-f", "--filename", help="The FDC AWS vhd full file name")
    parser.add_argument("-n", "--imagename", help="The AMI image name on AWS")
    parser.add_argument("-a", "--arch", help="Optional: default is 86_64")
    parser.add_argument("-s", "--size", help="Optional: The size of the image file, default is 1GB. ")
    args = parser.parse_args()

    global_region_name=args.region_name
    global_aws_access_key_id=args.aws_access_key_id
    global_aws_secret_access_key=args.aws_secret_access_key
    
    global_bucket="fdcbucket".lower()
    if args.bucket:
        global_bucket = args.bucket

    filename=args.filename
    imagename=args.imagename
    arch="x86_64"
    if args.arch:
        arch = args.arch
    size=1
    if args.size:
        size=args.size    
    
    import_as_AMI(filename, imagename, arch, size)

Python script for importing the FortiDeceptor image

To view the help message for the for this script use the command -h.

import boto3
import time, sys, os,traceback
import json
import pprint
from datetime import datetime
from types import SimpleNamespace

global_region_name="us-west-2"
global_aws_access_key_id=""
global_aws_secret_access_key=""
global_bucket=""

class DatetimeEncoder(json.JSONEncoder):
    def default(self, obj):
        if isinstance(obj, datetime):
            return obj.strftime('%Y-%m-%dT%H:%M:%SZ')
        elif isinstance(obj, date):
            return obj.strftime('%Y-%m-%d')
    # Let the base class default method raise the TypeError
        return json.JSONEncoder.default(self, obj)

def check_return(resp):
    if resp != None:
        if resp['ResponseMetadata']['HTTPStatusCode'] == 200:    
            return 0
    return -1
    
def list_bucket():
    bna = []
    for bucket in s3.buckets.all():
        bna.append(bucket.name)
    return bna

def resp2obj(resp):
    s = json.dumps(resp, cls=DatetimeEncoder)
    return json.loads(s, object_hook=lambda d: SimpleNamespace(**d))

def bucket_exists(s3s, fk):
    for b in s3s.buckets.all():
        if b.name == fk:
            return True
    return False

def import_as_AMI(filename,imagename, arch, size):
    if filename is None:
        print("Incorrect parameter")
        return
    
    fn = filename #sys.argv[1]
    fk = imagename #sys.argv[2]
    arch =  arch #sys.argv[3]
    size = size #sys.argv[4]
    s3s = boto3.resource('s3', region_name=global_region_name, aws_access_key_id=global_aws_access_key_id, aws_secret_access_key=global_aws_secret_access_key)
    s3c = boto3.client('s3', region_name=global_region_name, aws_access_key_id=global_aws_access_key_id, aws_secret_access_key=global_aws_secret_access_key)
    buck=global_bucket
    if not bucket_exists(s3s, buck):
        bucket = s3s.create_bucket(ACL='private', Bucket=buck, CreateBucketConfiguration={'LocationConstraint':global_region_name})
        if bucket != None:                
            bucket.wait_until_exists()
        else:
            print("Failed to create bucket %s" % (buck))
            return
    else:
        bucket = s3s.Bucket(buck)        
    bucket = s3s.Bucket(buck)
    s3c.delete_object(Bucket=buck, Key=fk)
    bucket.upload_file(fn, fk)
    ec2 = boto3.client('ec2', region_name=global_region_name, aws_access_key_id=global_aws_access_key_id, aws_secret_access_key=global_aws_secret_access_key)
    try:
        resp = ec2.import_snapshot(
            Description='import FDC image snapshot',
            DiskContainer={
                'Format': 'VHD',
                'UserBucket': {
                    'S3Bucket': buck,
                    'S3Key': fk
                }
            })
        r = resp2obj(resp)
    except Exception as e:
        print('''Please make sure you have the service role 'vmimport' with below permissions:
            -- Resource to s3:your-bucket
            *) s3:ListBucket
            *) s3:GetBucketLocation
            *) s3:GetObject
            -- Resource to ec2:*
            *) ec2:ModifySnapshotAttribute
            *) ec2:CopySnapshot
            *) ec2:RegisterImage
            *) ec2:Describe*

            For more information, please refer to https://docs.aws.amazon.com/vm-import/latest/userguide/vmie_prereqs.html , section 'Required service role'
        ''')
        print(traceback.format_exc())
        sys.exit(-1)

    print("Importing image: taskid={}".format(r.ImportTaskId))
    while True:
        time.sleep(10)
        resp = ec2.describe_import_snapshot_tasks(ImportTaskIds=[r.ImportTaskId])
        #print(resp)
        if check_return(resp) == 0:                
            taskdetail = resp['ImportSnapshotTasks'][0]
            st = taskdetail['SnapshotTaskDetail']['Status']
            print("Importing image: {}".format(st))
            if st == 'completed':
                break
            elif st == "deleted":
                print(taskdetail)
                return
    
    print("Imported image successfully")
    r = resp2obj(resp)
    ec2s = boto3.resource('ec2', region_name=global_region_name, aws_access_key_id=global_aws_access_key_id, aws_secret_access_key=global_aws_secret_access_key)
    snapshot = ec2s.Snapshot(r.ImportSnapshotTasks[0].SnapshotTaskDetail.SnapshotId)
    snapshot.create_tags(Tags=[{'Key':'Name', 'Value':fk}])
    resp = ec2.register_image(Name=fk, Architecture=arch, RootDeviceName='/dev/sda1',
        BlockDeviceMappings=[{'DeviceName':'/dev/sda1',
                                'Ebs':{'SnapshotId':snapshot.id,'VolumeType':'gp2','VolumeSize':int(size),'DeleteOnTermination':True}}, 
                                {'DeviceName':'/dev/sdb',
                                'Ebs':{'VolumeType':'gp2','VolumeSize':50,'DeleteOnTermination':True}},],
                                VirtualizationType='hvm', EnaSupport=True)  
    if check_return(resp) == 0:
        print("Registered image successfully")
    else:
        print("Failed to register image")
        print(resp)
    r = resp2obj(resp)
    image = ec2s.Image(r.ImageId)
    image.create_tags(Tags=[{'Key':'Name', 'Value':fk}])
    s3c.delete_object(Bucket=buck, Key=fk) 
    print("Deleted the image file from bucket {}".format(buck))

              
if __name__ == "__main__":
    import argparse
    parser = argparse.ArgumentParser()

    parser.add_argument("-r", "--region_name", help="region_name")
    parser.add_argument("-i", "--aws_access_key_id", help="aws_access_key_id")
    parser.add_argument("-k", "--aws_secret_access_key", help="aws_secret_access_key")
    parser.add_argument("-b", "--bucket", help="The bucket name")
    parser.add_argument("-f", "--filename", help="The FDC AWS vhd full file name")
    parser.add_argument("-n", "--imagename", help="The AMI image name on AWS")
    parser.add_argument("-a", "--arch", help="Optional: default is 86_64")
    parser.add_argument("-s", "--size", help="Optional: The size of the image file, default is 1GB. ")
    args = parser.parse_args()

    global_region_name=args.region_name
    global_aws_access_key_id=args.aws_access_key_id
    global_aws_secret_access_key=args.aws_secret_access_key
    
    global_bucket="fdcbucket".lower()
    if args.bucket:
        global_bucket = args.bucket

    filename=args.filename
    imagename=args.imagename
    arch="x86_64"
    if args.arch:
        arch = args.arch
    size=1
    if args.size:
        size=args.size    
    
    import_as_AMI(filename, imagename, arch, size)