#!/usr/bin/env python3
"""
CS 451 Data-Intensive Distributed Computing:
Assignment 4 public check script

Sample usage:
$ ./check_assignment4_public.py 
"""

import sys
import os
import socket
from subprocess import call
import argparse
import re

def check_a4(env,iterations,questions,sources):
    spark_cmd = ["spark-submit"]
    input_path = "data/p2p-Gnutella08-adj.txt"
    output_paths = {1:"cs451-a4-PageRank",2:"cs451-a4-PersonalizedPageRank"}
    cluster_options = ""
    partitions = 2
    if env != 'linux':
        spark_cmd = ["spark-submit", "--num-executors", "2", "--executor-cores", "4", "--executor-memory", "24G"]
        input_path = '/data/cs451/enwiki-20180901-adj.txt'
        partitions = 8

    call(["mvn","clean","package"])
    result = {}
    
    if 1 in questions:
        print("# Running Question 1 - Standard Page Rank - " + str(iterations) + " iterations")
        with open("q1.out", "w") as of, open("q1.err", "w") as ef:
            result[1] = call(spark_cmd + [
                "--class", "ca.uwaterloo.cs451.a4.PageRank",
                "target/assignments-1.0.jar", "--input", input_path,
                "--output", "cs451-a4-PageRank",
                "--iterations", str(iterations),
                "--partitions", str(partitions)],stdout=of,stderr=ef)

    if 2 in questions:
        print("# Running Question 2 - Personalized Page Rank - " + str(iterations) + " iterations, sources=" + str(sources))
        with open("q2.out", "w") as of, open("q2.err", "w") as ef:
            result[2] = call(spark_cmd + [
                "--class", "ca.uwaterloo.cs451.a4.PersonalizedPageRank",
                "target/assignments-1.0.jar", "--input", input_path,
                "--output", "cs451-a4-PersonalizedPageRank",
                "--iterations", str(iterations),
                "--partitions", str(partitions),
                "--sources"] + list(map(str, sources)), stdout=of, stderr=ef)
    print(result)
    for q in questions:
        if result[q] == 0:
            print(f"Q{q} runs successfully, standard output (limited to top 5 using head) - if you don't see anything, check your output format!:")
            call(f"grep -P '^\\d+\\t(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?$' q{q}.out | head -5", shell=True)
            print("PageRank output file (sample)")
            call(f"hdfs dfs -head {output_paths[q]}/part-00000 | head -n 5",shell=True)
        else:
            print(f"Q{q} did not terminate successfully, errors:")
            call(["cat", f"q{q}.err"])
    


if __name__ == "__main__":
    env = 'datasci' if socket.gethostname() == 'datasci-login' else 'linux'
    parser = argparse.ArgumentParser(description="CS 451 2021 Fall Assignment 4 Public Test Script")
    parser.add_argument('-i', '--iterations', help='Number of iterations',type=int,default=20)
    parser.add_argument('-q', '--questions', help='Question(s) to evaluate', type=int, nargs='+', default=[1,2])
    parser.add_argument('-s', '--sources', help='Sources for Q2 (ignored if not running Q2)', type=int, nargs='+', default=[367, 249, 145] if env == 'linux' else [73273, 73276])
    args=parser.parse_args()
    try:
        check_a4(env, args.iterations, args.questions, args.sources)
    except Exception as e:
        print(e)
