from pyopus.evaluator.performance import PerformanceEvaluator, updateAnalysisCount
from pyopus.evaluator.aggregate import *
from pyopus.optimizer import optimizerClass
from pyopus.parallel.cooperative import cOS
import numpy as np

heads = {
	'opus': {
		'simulator': 'SpiceOpus', 
		'settings': {
			'debug': 0
		},
		'moddefs': {
			'def':  { 'file': 'bjtamp.inc' }, 
		}, 
		'options': {
		}, 
		'params': {
		}
	}
}

analyses = {
	'dc': {
		'head': 'opus', 
		'modules': [ 'def' ], 
		'command': "dc(-100e-6, 100e-6, 'lin', 100, 'i1', 'dc')"
	}, 
}

# Define performance measures, dependencies, and design requirements (lower and upper bounds)
# Gain should be above 20kV/A, nonlinearity should be below 80mV
measures = {
	'gain': {
		'analysis': 'dc', 
		'expression': """
# Get response
x, y = scale(), v('out')

# Construct line from first to last point, extract gain
gain=(y[-1]-y[0])/(x[-1]-x[0])
""", 
	}, 
	'nonlinearity': {
		'analysis': 'dc', 
		'expression': """
# Get response
x, y = scale(), v('out')

# Construct line from first to last point
ylin=y[0]+(x-x[0])*(y[-1]-y[0])/(x[-1]-x[0])

# Maximal deviation from line
nonlinearity=(np.abs(y-ylin)).max()
""", 
	}, 
}

costDefinition = [
	{
		'measure': 'gain', 
		# How to normalize it 
		#   anything below 20e3 is negative, while everything above is positive
		#   a change of 1e3 corresponds to contribution of size 1 to the aggregate cost function
		#   a failed measurement results in contribution 10000.0 (default value)
		'norm': Nabove(20e3, 1e3, 10000.0),	
		# Shape of the contribution is piecewise linear. It is obtaine by multiplying 
		#   negative normalized values with 0 and 
		#   positive normalized values with 1
		'shape': Slinear2(1.0,0.0),
		# The contribution will be computed from the value in the worst corner
		'reduce': Rworst()
	},
	{
		'measure': 'nonlinearity', 
		# Should be below 80mV, norm is 10mV
		'norm': Nbelow(80e-3, 10e-3, 10000.0),	
		'shape': Slinear2(1.0,0.0),
		'reduce': Rworst()
	},
]
	
# Nominal corner
corners={
	'nominal': {
		'heads': ['opus'], 
		'params': {
			'vcc': 12, 
			'temperature': 25
		}, 
		'modules': []
	}
}
		
if __name__=='__main__':
	# Uncomment if you want to run it in parallel using MPI
	from pyopus.parallel.mpi import MPI
	cOS.setVM(MPI())

	# Performance evaluator
	pe=PerformanceEvaluator(heads, analyses, measures, corners, debug=0)
	
	# Design parameter settings
	xnames=np.array(['r1', 'r2'])
	xlow=  np.array([5e3,  20e3])
	xhi=   np.array([50e3, 200e3])
	xinit= np.array([45e3, 195e3])
	
	# Aggregate cost function
	ce=Aggregator(pe, costDefinition, xnames, debug=0)
	
	# Optimizer
	opt=optimizerClass("ParallelSADE")(ce, xlo=xlow, xhi=xhi, maxiter=1000)
	
	# Set initial point. Must be a numpy array. 
	opt.reset(xinit)
	
	# Install reporter plugin. 
	# Print cost. Also print performance every time the cost is decreased. 
	opt.installPlugin(ce.getReporter())
	
	# Install stopper plugin. 
	# Stop run when all requirements are satisfied (all cost contributions are 0 or less) 
	opt.installPlugin(ce.getStopWhenAllSatisfied())
	
	# Run
	opt.run()

	# Optimization result
	xresult=opt.x
	iterresult=opt.bestIter
		
	# Final evaluation at xresult. 
	cf=ce(xresult)
	
	# Print results. 
	print("\n\nFinal cost: "+str(cf)+", found in iter "+str(iterresult)+", total "+str(opt.niter)+" iteration(s)")
	print(ce.formatParameters())
	print(ce.formatResults(nMeasureName=12, nCornerName=15))
	print("")
	# ce was used by pe for evaluation, so all the performance measure values are there
	print("Performance in corners")
	print(pe.formatResults(['gain', 'nonlinearity'], nMeasureName=12, nCornerName=15))
	print("")
	
	# Every call to pe results in the same analysis count. 
	# Therefore we can multiply the analysis count of a single pe call
	# with the number of calls to get the actual analysis count. 
	acnt={}
	updateAnalysisCount(acnt, pe.analysisCount, opt.niter+1)
	print("Analysis count: "+str(acnt))
	
	# Cleanup intemediate files
	pe.finalize()
	
	# Finalize cOS parallel environment
	cOS.finalize()
	
