# Advanced performance evaluator demo

from definitions import *
from pyopus.evaluator.performance import PerformanceEvaluator
from pyopus.design.sensitivity import Sensitivity, screen
from pyopus.parallel.mpi import MPI
from pyopus.parallel.cooperative import cOS
from pprint import pprint
import numpy as np

if __name__=='__main__':
	# Prepare statistical parameters dictionary with nominal values
	nominalStat={ name: desc['dist'].mean() for name, desc in statParams.items() }
	
	# Prepare operating parameters dictionary with nominal values
	nominalOp={ name: desc['init'] for name, desc in opParams.items() }
	
	# Prepare initial design parameters dictionary
	# initialDesign={ name: desc['init'] for name, desc in designParams.items() }

	# Result of design across corners
	initialDesign = {
		  'c_out':   3.4227551453164880e-13, 
         'diff_l':   7.4308192182875534e-07, 
         'diff_w':   1.2789110397149770e-06, 
         'load_l':   2.1097689639936013e-06, 
         'load_w':   2.3180449555941561e-06, 
         'mirr_l':   4.0529113121872304e-07, 
        'mirr_ld':   1.3311984218957902e-06, 
         'mirr_w':   3.4036562767439648e-05, 
        'mirr_wd':   2.3745920801687639e-06, 
        'mirr_wo':   2.1285465666785311e-05, 
          'out_l':   5.3631175887258056e-07, 
          'out_w':   2.6406246699146450e-05, 
          'r_out':   3.0372450962642072e+00, 
	}
	
	designParamNames=sorted(list(initialDesign.keys()))
	
	nominalParams={}
	nominalParams.update(nominalStat)
	nominalParams.update(nominalOp)
	
	# Prepare one corner
	#  defined for all simulator setups
	#  module 'mc' definines the typical Monte Carlo model
	#  operating parameters are set to nominal values
	#  statistical parameters are set to 
	corners={
		'nom': {
			'params': nominalParams, 
			'modules': ['mc']
		}
	}
	
	# Prepare parallel evaluation environment, cOS will use MPI as its backend
	# Remote tasks will be started in their own local folders. Before a task 
	# is started all files in the current folder of the machine that spawns a 
	# the remote task will be copied to that task's local folder (this is 
	# specified by the mirrorMap argument to MPI constructor). 
	cOS.setVM(MPI(mirrorMap={'*':'.'}))
	
	# In definitions.py measures have no corners listed, 
	# Therefore they will be evaluated across all specified corners. 
	# The storeResults argument makes the evaluator store the waveforms
	# in temporary result files in the system TMP folder (because resultsFolder
	# is set to None) with prefix 'restemp_'
	pe=PerformanceEvaluator(
		heads, analyses, measures, corners, variables=variables, 
		activeMeasures=['vgs_drv', 'cmrr'], 
		debug=0
	)
	
	# Sensitivity setups
	sens=Sensitivity(pe, designParams, designParamNames, debug=2, relPerturbHiLo=0.05)
	
	# Compute deltas
	diffs, deltas, anCount = sens(initialDesign)
	
	
	# Extract deltas for cmrr
	dv=sens.diffVector('cmrr', 'nom')
	
	# Parameter screening for cmrr
	print("\nSignificant design parameters influencing cmrr")
	outNdx, inNdx = screen(
		dv, deltas, designParamNames, 
		contribThreshold=0.01, cumulativeThreshold=0.25, useSens=False, squared=True, 
		debug=2
	)
	print([designParamNames[ii] for ii in inNdx])
	
	
	# Extract deltas for vgs_drv
	dv=sens.diffVector('vgs_drv', 'nom')
	
	# Screening for first component of vgs_drv
	print("\nSignificant design parameters influencing vgs_drv[0]")
	outNdx, inNdx = screen(
		dv[:,0], deltas, designParamNames, 
		contribThreshold=0.01, cumulativeThreshold=0.25, useSens=False, squared=True, 
		debug=2
	)
	print([designParamNames[ii] for ii in inNdx])
	
	
	# Prepare for evaluation sensitivities of cmrr to statistical parameters
	nominalParams={}
	nominalParams.update(nominalOp)
	nominalParams.update(initialDesign)
	corners={
		'nom': {
			'params': nominalParams, 
			'modules': ['mc']
		}
	}
	# To make evaluations faster, evaluate only cmrr
	pe=PerformanceEvaluator(
		heads, analyses, measures, corners, variables=variables, 
		activeMeasures=['cmrr'], 
		debug=0
	)
	sensParams={
		name: {
			'lo': -10, 
			'hi': 10, 
			'init': desc['dist'].mean(), 
		} for name, desc in statParams.items()
	}
	sensNames=list(sensParams.keys())
	sens=Sensitivity(pe, sensParams, sensNames, debug=0, relPerturbHiLo=1/64, diffType="central")
	diffs, deltas, anCount = sens()
	dv=sens.diffVector('cmrr', 'nom')
	# Parameter screening for cmrr
	print("\nSignificant statistical parameters influencing cmrr")
	outNdx, inNdx = screen(
		dv, deltas, sensNames, 
		contribThreshold=0.01, cumulativeThreshold=0.25, useSens=False, squared=True, 
		debug=2
	)
	print([sensNames[ii] for ii in inNdx])
	
	
	# Cleanup temporary files generated by the evaluator
	pe.finalize()
	
	# Finalize cOS parallel environment
	cOS.finalize()
	
