| 12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855 |
- """
- Unit tests for optimization routines from optimize.py
- Authors:
- Ed Schofield, Nov 2005
- Andrew Straw, April 2008
- To run it in its simplest form::
- nosetests test_optimize.py
- """
- import itertools
- import platform
- import numpy as np
- from numpy.testing import (assert_allclose, assert_equal,
- assert_almost_equal,
- assert_no_warnings, assert_warns,
- assert_array_less, suppress_warnings)
- import pytest
- from pytest import raises as assert_raises
- from scipy import optimize
- from scipy.optimize._minimize import Bounds, NonlinearConstraint
- from scipy.optimize._minimize import MINIMIZE_METHODS, MINIMIZE_SCALAR_METHODS
- from scipy.optimize._linprog import LINPROG_METHODS
- from scipy.optimize._root import ROOT_METHODS
- from scipy.optimize._root_scalar import ROOT_SCALAR_METHODS
- from scipy.optimize._qap import QUADRATIC_ASSIGNMENT_METHODS
- from scipy.optimize._differentiable_functions import ScalarFunction, FD_METHODS
- from scipy.optimize._optimize import MemoizeJac, show_options
- def test_check_grad():
- # Verify if check_grad is able to estimate the derivative of the
- # expit (logistic sigmoid) function.
- def expit(x):
- return 1 / (1 + np.exp(-x))
- def der_expit(x):
- return np.exp(-x) / (1 + np.exp(-x))**2
- x0 = np.array([1.5])
- r = optimize.check_grad(expit, der_expit, x0)
- assert_almost_equal(r, 0)
- r = optimize.check_grad(expit, der_expit, x0,
- direction='random', seed=1234)
- assert_almost_equal(r, 0)
- r = optimize.check_grad(expit, der_expit, x0, epsilon=1e-6)
- assert_almost_equal(r, 0)
- r = optimize.check_grad(expit, der_expit, x0, epsilon=1e-6,
- direction='random', seed=1234)
- assert_almost_equal(r, 0)
- # Check if the epsilon parameter is being considered.
- r = abs(optimize.check_grad(expit, der_expit, x0, epsilon=1e-1) - 0)
- assert r > 1e-7
- r = abs(optimize.check_grad(expit, der_expit, x0, epsilon=1e-1,
- direction='random', seed=1234) - 0)
- assert r > 1e-7
- def x_sinx(x):
- return (x*np.sin(x)).sum()
- def der_x_sinx(x):
- return np.sin(x) + x*np.cos(x)
- x0 = np.arange(0, 2, 0.2)
- r = optimize.check_grad(x_sinx, der_x_sinx, x0,
- direction='random', seed=1234)
- assert_almost_equal(r, 0)
- assert_raises(ValueError, optimize.check_grad,
- x_sinx, der_x_sinx, x0,
- direction='random_projection', seed=1234)
- # checking can be done for derivatives of vector valued functions
- r = optimize.check_grad(himmelblau_grad, himmelblau_hess, himmelblau_x0,
- direction='all', seed=1234)
- assert r < 5e-7
- class CheckOptimize:
- """ Base test case for a simple constrained entropy maximization problem
- (the machine translation example of Berger et al in
- Computational Linguistics, vol 22, num 1, pp 39--72, 1996.)
- """
- def setup_method(self):
- self.F = np.array([[1, 1, 1],
- [1, 1, 0],
- [1, 0, 1],
- [1, 0, 0],
- [1, 0, 0]])
- self.K = np.array([1., 0.3, 0.5])
- self.startparams = np.zeros(3, np.float64)
- self.solution = np.array([0., -0.524869316, 0.487525860])
- self.maxiter = 1000
- self.funccalls = 0
- self.gradcalls = 0
- self.trace = []
- def func(self, x):
- self.funccalls += 1
- if self.funccalls > 6000:
- raise RuntimeError("too many iterations in optimization routine")
- log_pdot = np.dot(self.F, x)
- logZ = np.log(sum(np.exp(log_pdot)))
- f = logZ - np.dot(self.K, x)
- self.trace.append(np.copy(x))
- return f
- def grad(self, x):
- self.gradcalls += 1
- log_pdot = np.dot(self.F, x)
- logZ = np.log(sum(np.exp(log_pdot)))
- p = np.exp(log_pdot - logZ)
- return np.dot(self.F.transpose(), p) - self.K
- def hess(self, x):
- log_pdot = np.dot(self.F, x)
- logZ = np.log(sum(np.exp(log_pdot)))
- p = np.exp(log_pdot - logZ)
- return np.dot(self.F.T,
- np.dot(np.diag(p), self.F - np.dot(self.F.T, p)))
- def hessp(self, x, p):
- return np.dot(self.hess(x), p)
- class CheckOptimizeParameterized(CheckOptimize):
- def test_cg(self):
- # conjugate gradient optimization routine
- if self.use_wrapper:
- opts = {'maxiter': self.maxiter, 'disp': self.disp,
- 'return_all': False}
- res = optimize.minimize(self.func, self.startparams, args=(),
- method='CG', jac=self.grad,
- options=opts)
- params, fopt, func_calls, grad_calls, warnflag = \
- res['x'], res['fun'], res['nfev'], res['njev'], res['status']
- else:
- retval = optimize.fmin_cg(self.func, self.startparams,
- self.grad, (), maxiter=self.maxiter,
- full_output=True, disp=self.disp,
- retall=False)
- (params, fopt, func_calls, grad_calls, warnflag) = retval
- assert_allclose(self.func(params), self.func(self.solution),
- atol=1e-6)
- # Ensure that function call counts are 'known good'; these are from
- # SciPy 0.7.0. Don't allow them to increase.
- assert self.funccalls == 9, self.funccalls
- assert self.gradcalls == 7, self.gradcalls
- # Ensure that the function behaves the same; this is from SciPy 0.7.0
- assert_allclose(self.trace[2:4],
- [[0, -0.5, 0.5],
- [0, -5.05700028e-01, 4.95985862e-01]],
- atol=1e-14, rtol=1e-7)
- def test_cg_cornercase(self):
- def f(r):
- return 2.5 * (1 - np.exp(-1.5*(r - 0.5)))**2
- # Check several initial guesses. (Too far away from the
- # minimum, the function ends up in the flat region of exp.)
- for x0 in np.linspace(-0.75, 3, 71):
- sol = optimize.minimize(f, [x0], method='CG')
- assert sol.success
- assert_allclose(sol.x, [0.5], rtol=1e-5)
- def test_bfgs(self):
- # Broyden-Fletcher-Goldfarb-Shanno optimization routine
- if self.use_wrapper:
- opts = {'maxiter': self.maxiter, 'disp': self.disp,
- 'return_all': False}
- res = optimize.minimize(self.func, self.startparams,
- jac=self.grad, method='BFGS', args=(),
- options=opts)
- params, fopt, gopt, Hopt, func_calls, grad_calls, warnflag = (
- res['x'], res['fun'], res['jac'], res['hess_inv'],
- res['nfev'], res['njev'], res['status'])
- else:
- retval = optimize.fmin_bfgs(self.func, self.startparams, self.grad,
- args=(), maxiter=self.maxiter,
- full_output=True, disp=self.disp,
- retall=False)
- (params, fopt, gopt, Hopt,
- func_calls, grad_calls, warnflag) = retval
- assert_allclose(self.func(params), self.func(self.solution),
- atol=1e-6)
- # Ensure that function call counts are 'known good'; these are from
- # SciPy 0.7.0. Don't allow them to increase.
- assert self.funccalls == 10, self.funccalls
- assert self.gradcalls == 8, self.gradcalls
- # Ensure that the function behaves the same; this is from SciPy 0.7.0
- assert_allclose(self.trace[6:8],
- [[0, -5.25060743e-01, 4.87748473e-01],
- [0, -5.24885582e-01, 4.87530347e-01]],
- atol=1e-14, rtol=1e-7)
- def test_bfgs_infinite(self):
- # Test corner case where -Inf is the minimum. See gh-2019.
- func = lambda x: -np.e**-x
- fprime = lambda x: -func(x)
- x0 = [0]
- with np.errstate(over='ignore'):
- if self.use_wrapper:
- opts = {'disp': self.disp}
- x = optimize.minimize(func, x0, jac=fprime, method='BFGS',
- args=(), options=opts)['x']
- else:
- x = optimize.fmin_bfgs(func, x0, fprime, disp=self.disp)
- assert not np.isfinite(func(x))
- def test_bfgs_xrtol(self):
- # test for #17345 to test xrtol parameter
- x0 = [1.3, 0.7, 0.8, 1.9, 1.2]
- res = optimize.minimize(optimize.rosen,
- x0, method='bfgs', options={'xrtol': 1e-3})
- ref = optimize.minimize(optimize.rosen,
- x0, method='bfgs', options={'gtol': 1e-3})
- assert res.nit != ref.nit
- def test_powell(self):
- # Powell (direction set) optimization routine
- if self.use_wrapper:
- opts = {'maxiter': self.maxiter, 'disp': self.disp,
- 'return_all': False}
- res = optimize.minimize(self.func, self.startparams, args=(),
- method='Powell', options=opts)
- params, fopt, direc, numiter, func_calls, warnflag = (
- res['x'], res['fun'], res['direc'], res['nit'],
- res['nfev'], res['status'])
- else:
- retval = optimize.fmin_powell(self.func, self.startparams,
- args=(), maxiter=self.maxiter,
- full_output=True, disp=self.disp,
- retall=False)
- (params, fopt, direc, numiter, func_calls, warnflag) = retval
- assert_allclose(self.func(params), self.func(self.solution),
- atol=1e-6)
- # params[0] does not affect the objective function
- assert_allclose(params[1:], self.solution[1:], atol=5e-6)
- # Ensure that function call counts are 'known good'; these are from
- # SciPy 0.7.0. Don't allow them to increase.
- #
- # However, some leeway must be added: the exact evaluation
- # count is sensitive to numerical error, and floating-point
- # computations are not bit-for-bit reproducible across
- # machines, and when using e.g., MKL, data alignment
- # etc., affect the rounding error.
- #
- assert self.funccalls <= 116 + 20, self.funccalls
- assert self.gradcalls == 0, self.gradcalls
- @pytest.mark.xfail(reason="This part of test_powell fails on some "
- "platforms, but the solution returned by powell is "
- "still valid.")
- def test_powell_gh14014(self):
- # This part of test_powell started failing on some CI platforms;
- # see gh-14014. Since the solution is still correct and the comments
- # in test_powell suggest that small differences in the bits are known
- # to change the "trace" of the solution, seems safe to xfail to get CI
- # green now and investigate later.
- # Powell (direction set) optimization routine
- if self.use_wrapper:
- opts = {'maxiter': self.maxiter, 'disp': self.disp,
- 'return_all': False}
- res = optimize.minimize(self.func, self.startparams, args=(),
- method='Powell', options=opts)
- params, fopt, direc, numiter, func_calls, warnflag = (
- res['x'], res['fun'], res['direc'], res['nit'],
- res['nfev'], res['status'])
- else:
- retval = optimize.fmin_powell(self.func, self.startparams,
- args=(), maxiter=self.maxiter,
- full_output=True, disp=self.disp,
- retall=False)
- (params, fopt, direc, numiter, func_calls, warnflag) = retval
- # Ensure that the function behaves the same; this is from SciPy 0.7.0
- assert_allclose(self.trace[34:39],
- [[0.72949016, -0.44156936, 0.47100962],
- [0.72949016, -0.44156936, 0.48052496],
- [1.45898031, -0.88313872, 0.95153458],
- [0.72949016, -0.44156936, 0.47576729],
- [1.72949016, -0.44156936, 0.47576729]],
- atol=1e-14, rtol=1e-7)
- def test_powell_bounded(self):
- # Powell (direction set) optimization routine
- # same as test_powell above, but with bounds
- bounds = [(-np.pi, np.pi) for _ in self.startparams]
- if self.use_wrapper:
- opts = {'maxiter': self.maxiter, 'disp': self.disp,
- 'return_all': False}
- res = optimize.minimize(self.func, self.startparams, args=(),
- bounds=bounds,
- method='Powell', options=opts)
- params, fopt, direc, numiter, func_calls, warnflag = (
- res['x'], res['fun'], res['direc'], res['nit'],
- res['nfev'], res['status'])
- assert func_calls == self.funccalls
- assert_allclose(self.func(params), self.func(self.solution),
- atol=1e-6, rtol=1e-5)
- # The exact evaluation count is sensitive to numerical error, and
- # floating-point computations are not bit-for-bit reproducible
- # across machines, and when using e.g. MKL, data alignment etc.
- # affect the rounding error.
- # It takes 155 calls on my machine, but we can add the same +20
- # margin as is used in `test_powell`
- assert self.funccalls <= 155 + 20
- assert self.gradcalls == 0
- def test_neldermead(self):
- # Nelder-Mead simplex algorithm
- if self.use_wrapper:
- opts = {'maxiter': self.maxiter, 'disp': self.disp,
- 'return_all': False}
- res = optimize.minimize(self.func, self.startparams, args=(),
- method='Nelder-mead', options=opts)
- params, fopt, numiter, func_calls, warnflag = (
- res['x'], res['fun'], res['nit'], res['nfev'],
- res['status'])
- else:
- retval = optimize.fmin(self.func, self.startparams,
- args=(), maxiter=self.maxiter,
- full_output=True, disp=self.disp,
- retall=False)
- (params, fopt, numiter, func_calls, warnflag) = retval
- assert_allclose(self.func(params), self.func(self.solution),
- atol=1e-6)
- # Ensure that function call counts are 'known good'; these are from
- # SciPy 0.7.0. Don't allow them to increase.
- assert self.funccalls == 167, self.funccalls
- assert self.gradcalls == 0, self.gradcalls
- # Ensure that the function behaves the same; this is from SciPy 0.7.0
- assert_allclose(self.trace[76:78],
- [[0.1928968, -0.62780447, 0.35166118],
- [0.19572515, -0.63648426, 0.35838135]],
- atol=1e-14, rtol=1e-7)
- def test_neldermead_initial_simplex(self):
- # Nelder-Mead simplex algorithm
- simplex = np.zeros((4, 3))
- simplex[...] = self.startparams
- for j in range(3):
- simplex[j+1, j] += 0.1
- if self.use_wrapper:
- opts = {'maxiter': self.maxiter, 'disp': False,
- 'return_all': True, 'initial_simplex': simplex}
- res = optimize.minimize(self.func, self.startparams, args=(),
- method='Nelder-mead', options=opts)
- params, fopt, numiter, func_calls, warnflag = (res['x'],
- res['fun'],
- res['nit'],
- res['nfev'],
- res['status'])
- assert_allclose(res['allvecs'][0], simplex[0])
- else:
- retval = optimize.fmin(self.func, self.startparams,
- args=(), maxiter=self.maxiter,
- full_output=True, disp=False, retall=False,
- initial_simplex=simplex)
- (params, fopt, numiter, func_calls, warnflag) = retval
- assert_allclose(self.func(params), self.func(self.solution),
- atol=1e-6)
- # Ensure that function call counts are 'known good'; these are from
- # SciPy 0.17.0. Don't allow them to increase.
- assert self.funccalls == 100, self.funccalls
- assert self.gradcalls == 0, self.gradcalls
- # Ensure that the function behaves the same; this is from SciPy 0.15.0
- assert_allclose(self.trace[50:52],
- [[0.14687474, -0.5103282, 0.48252111],
- [0.14474003, -0.5282084, 0.48743951]],
- atol=1e-14, rtol=1e-7)
- def test_neldermead_initial_simplex_bad(self):
- # Check it fails with a bad simplices
- bad_simplices = []
- simplex = np.zeros((3, 2))
- simplex[...] = self.startparams[:2]
- for j in range(2):
- simplex[j+1, j] += 0.1
- bad_simplices.append(simplex)
- simplex = np.zeros((3, 3))
- bad_simplices.append(simplex)
- for simplex in bad_simplices:
- if self.use_wrapper:
- opts = {'maxiter': self.maxiter, 'disp': False,
- 'return_all': False, 'initial_simplex': simplex}
- assert_raises(ValueError,
- optimize.minimize,
- self.func,
- self.startparams,
- args=(),
- method='Nelder-mead',
- options=opts)
- else:
- assert_raises(ValueError, optimize.fmin,
- self.func, self.startparams,
- args=(), maxiter=self.maxiter,
- full_output=True, disp=False, retall=False,
- initial_simplex=simplex)
- def test_ncg_negative_maxiter(self):
- # Regression test for gh-8241
- opts = {'maxiter': -1}
- result = optimize.minimize(self.func, self.startparams,
- method='Newton-CG', jac=self.grad,
- args=(), options=opts)
- assert result.status == 1
- def test_ncg(self):
- # line-search Newton conjugate gradient optimization routine
- if self.use_wrapper:
- opts = {'maxiter': self.maxiter, 'disp': self.disp,
- 'return_all': False}
- retval = optimize.minimize(self.func, self.startparams,
- method='Newton-CG', jac=self.grad,
- args=(), options=opts)['x']
- else:
- retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
- args=(), maxiter=self.maxiter,
- full_output=False, disp=self.disp,
- retall=False)
- params = retval
- assert_allclose(self.func(params), self.func(self.solution),
- atol=1e-6)
- # Ensure that function call counts are 'known good'; these are from
- # SciPy 0.7.0. Don't allow them to increase.
- assert self.funccalls == 7, self.funccalls
- assert self.gradcalls <= 22, self.gradcalls # 0.13.0
- # assert self.gradcalls <= 18, self.gradcalls # 0.9.0
- # assert self.gradcalls == 18, self.gradcalls # 0.8.0
- # assert self.gradcalls == 22, self.gradcalls # 0.7.0
- # Ensure that the function behaves the same; this is from SciPy 0.7.0
- assert_allclose(self.trace[3:5],
- [[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01],
- [-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]],
- atol=1e-6, rtol=1e-7)
- def test_ncg_hess(self):
- # Newton conjugate gradient with Hessian
- if self.use_wrapper:
- opts = {'maxiter': self.maxiter, 'disp': self.disp,
- 'return_all': False}
- retval = optimize.minimize(self.func, self.startparams,
- method='Newton-CG', jac=self.grad,
- hess=self.hess,
- args=(), options=opts)['x']
- else:
- retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
- fhess=self.hess,
- args=(), maxiter=self.maxiter,
- full_output=False, disp=self.disp,
- retall=False)
- params = retval
- assert_allclose(self.func(params), self.func(self.solution),
- atol=1e-6)
- # Ensure that function call counts are 'known good'; these are from
- # SciPy 0.7.0. Don't allow them to increase.
- assert self.funccalls <= 7, self.funccalls # gh10673
- assert self.gradcalls <= 18, self.gradcalls # 0.9.0
- # assert self.gradcalls == 18, self.gradcalls # 0.8.0
- # assert self.gradcalls == 22, self.gradcalls # 0.7.0
- # Ensure that the function behaves the same; this is from SciPy 0.7.0
- assert_allclose(self.trace[3:5],
- [[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01],
- [-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]],
- atol=1e-6, rtol=1e-7)
- def test_ncg_hessp(self):
- # Newton conjugate gradient with Hessian times a vector p.
- if self.use_wrapper:
- opts = {'maxiter': self.maxiter, 'disp': self.disp,
- 'return_all': False}
- retval = optimize.minimize(self.func, self.startparams,
- method='Newton-CG', jac=self.grad,
- hessp=self.hessp,
- args=(), options=opts)['x']
- else:
- retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
- fhess_p=self.hessp,
- args=(), maxiter=self.maxiter,
- full_output=False, disp=self.disp,
- retall=False)
- params = retval
- assert_allclose(self.func(params), self.func(self.solution),
- atol=1e-6)
- # Ensure that function call counts are 'known good'; these are from
- # SciPy 0.7.0. Don't allow them to increase.
- assert self.funccalls <= 7, self.funccalls # gh10673
- assert self.gradcalls <= 18, self.gradcalls # 0.9.0
- # assert self.gradcalls == 18, self.gradcalls # 0.8.0
- # assert self.gradcalls == 22, self.gradcalls # 0.7.0
- # Ensure that the function behaves the same; this is from SciPy 0.7.0
- assert_allclose(self.trace[3:5],
- [[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01],
- [-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]],
- atol=1e-6, rtol=1e-7)
- def test_maxfev_test():
- rng = np.random.default_rng(271707100830272976862395227613146332411)
- def cost(x):
- return rng.random(1) * 1000 # never converged problem
- for imaxfev in [1, 10, 50]:
- # "TNC" and "L-BFGS-B" also supports max function evaluation, but
- # these may violate the limit because of evaluating gradients
- # by numerical differentiation. See the discussion in PR #14805.
- for method in ['Powell', 'Nelder-Mead']:
- result = optimize.minimize(cost, rng.random(10),
- method=method,
- options={'maxfev': imaxfev})
- assert result["nfev"] == imaxfev
- def test_wrap_scalar_function_with_validation():
- def func_(x):
- return x
- fcalls, func = optimize._optimize.\
- _wrap_scalar_function_maxfun_validation(func_, np.asarray(1), 5)
- for i in range(5):
- func(np.asarray(i))
- assert fcalls[0] == i+1
- msg = "Too many function calls"
- with assert_raises(optimize._optimize._MaxFuncCallError, match=msg):
- func(np.asarray(i)) # exceeded maximum function call
- fcalls, func = optimize._optimize.\
- _wrap_scalar_function_maxfun_validation(func_, np.asarray(1), 5)
- msg = "The user-provided objective function must return a scalar value."
- with assert_raises(ValueError, match=msg):
- func(np.array([1, 1]))
- def test_obj_func_returns_scalar():
- match = ("The user-provided "
- "objective function must "
- "return a scalar value.")
- with assert_raises(ValueError, match=match):
- optimize.minimize(lambda x: x, np.array([1, 1]), method='BFGS')
- def test_neldermead_iteration_num():
- x0 = np.array([1.3, 0.7, 0.8, 1.9, 1.2])
- res = optimize._minimize._minimize_neldermead(optimize.rosen, x0,
- xatol=1e-8)
- assert res.nit <= 339
- def test_neldermead_xatol_fatol():
- # gh4484
- # test we can call with fatol, xatol specified
- func = lambda x: x[0]**2 + x[1]**2
- optimize._minimize._minimize_neldermead(func, [1, 1], maxiter=2,
- xatol=1e-3, fatol=1e-3)
- def test_neldermead_adaptive():
- func = lambda x: np.sum(x**2)
- p0 = [0.15746215, 0.48087031, 0.44519198, 0.4223638, 0.61505159,
- 0.32308456, 0.9692297, 0.4471682, 0.77411992, 0.80441652,
- 0.35994957, 0.75487856, 0.99973421, 0.65063887, 0.09626474]
- res = optimize.minimize(func, p0, method='Nelder-Mead')
- assert_equal(res.success, False)
- res = optimize.minimize(func, p0, method='Nelder-Mead',
- options={'adaptive': True})
- assert_equal(res.success, True)
- def test_bounded_powell_outsidebounds():
- # With the bounded Powell method if you start outside the bounds the final
- # should still be within the bounds (provided that the user doesn't make a
- # bad choice for the `direc` argument).
- func = lambda x: np.sum(x**2)
- bounds = (-1, 1), (-1, 1), (-1, 1)
- x0 = [-4, .5, -.8]
- # we're starting outside the bounds, so we should get a warning
- with assert_warns(optimize.OptimizeWarning):
- res = optimize.minimize(func, x0, bounds=bounds, method="Powell")
- assert_allclose(res.x, np.array([0.] * len(x0)), atol=1e-6)
- assert_equal(res.success, True)
- assert_equal(res.status, 0)
- # However, now if we change the `direc` argument such that the
- # set of vectors does not span the parameter space, then we may
- # not end up back within the bounds. Here we see that the first
- # parameter cannot be updated!
- direc = [[0, 0, 0], [0, 1, 0], [0, 0, 1]]
- # we're starting outside the bounds, so we should get a warning
- with assert_warns(optimize.OptimizeWarning):
- res = optimize.minimize(func, x0,
- bounds=bounds, method="Powell",
- options={'direc': direc})
- assert_allclose(res.x, np.array([-4., 0, 0]), atol=1e-6)
- assert_equal(res.success, False)
- assert_equal(res.status, 4)
- def test_bounded_powell_vs_powell():
- # here we test an example where the bounded Powell method
- # will return a different result than the standard Powell
- # method.
- # first we test a simple example where the minimum is at
- # the origin and the minimum that is within the bounds is
- # larger than the minimum at the origin.
- func = lambda x: np.sum(x**2)
- bounds = (-5, -1), (-10, -0.1), (1, 9.2), (-4, 7.6), (-15.9, -2)
- x0 = [-2.1, -5.2, 1.9, 0, -2]
- options = {'ftol': 1e-10, 'xtol': 1e-10}
- res_powell = optimize.minimize(func, x0, method="Powell", options=options)
- assert_allclose(res_powell.x, 0., atol=1e-6)
- assert_allclose(res_powell.fun, 0., atol=1e-6)
- res_bounded_powell = optimize.minimize(func, x0, options=options,
- bounds=bounds,
- method="Powell")
- p = np.array([-1, -0.1, 1, 0, -2])
- assert_allclose(res_bounded_powell.x, p, atol=1e-6)
- assert_allclose(res_bounded_powell.fun, func(p), atol=1e-6)
- # now we test bounded Powell but with a mix of inf bounds.
- bounds = (None, -1), (-np.inf, -.1), (1, np.inf), (-4, None), (-15.9, -2)
- res_bounded_powell = optimize.minimize(func, x0, options=options,
- bounds=bounds,
- method="Powell")
- p = np.array([-1, -0.1, 1, 0, -2])
- assert_allclose(res_bounded_powell.x, p, atol=1e-6)
- assert_allclose(res_bounded_powell.fun, func(p), atol=1e-6)
- # next we test an example where the global minimum is within
- # the bounds, but the bounded Powell method performs better
- # than the standard Powell method.
- def func(x):
- t = np.sin(-x[0]) * np.cos(x[1]) * np.sin(-x[0] * x[1]) * np.cos(x[1])
- t -= np.cos(np.sin(x[1] * x[2]) * np.cos(x[2]))
- return t**2
- bounds = [(-2, 5)] * 3
- x0 = [-0.5, -0.5, -0.5]
- res_powell = optimize.minimize(func, x0, method="Powell")
- res_bounded_powell = optimize.minimize(func, x0,
- bounds=bounds,
- method="Powell")
- assert_allclose(res_powell.fun, 0.007136253919761627, atol=1e-6)
- assert_allclose(res_bounded_powell.fun, 0, atol=1e-6)
- # next we test the previous example where the we provide Powell
- # with (-inf, inf) bounds, and compare it to providing Powell
- # with no bounds. They should end up the same.
- bounds = [(-np.inf, np.inf)] * 3
- res_bounded_powell = optimize.minimize(func, x0,
- bounds=bounds,
- method="Powell")
- assert_allclose(res_powell.fun, res_bounded_powell.fun, atol=1e-6)
- assert_allclose(res_powell.nfev, res_bounded_powell.nfev, atol=1e-6)
- assert_allclose(res_powell.x, res_bounded_powell.x, atol=1e-6)
- # now test when x0 starts outside of the bounds.
- x0 = [45.46254415, -26.52351498, 31.74830248]
- bounds = [(-2, 5)] * 3
- # we're starting outside the bounds, so we should get a warning
- with assert_warns(optimize.OptimizeWarning):
- res_bounded_powell = optimize.minimize(func, x0,
- bounds=bounds,
- method="Powell")
- assert_allclose(res_bounded_powell.fun, 0, atol=1e-6)
- def test_onesided_bounded_powell_stability():
- # When the Powell method is bounded on only one side, a
- # np.tan transform is done in order to convert it into a
- # completely bounded problem. Here we do some simple tests
- # of one-sided bounded Powell where the optimal solutions
- # are large to test the stability of the transformation.
- kwargs = {'method': 'Powell',
- 'bounds': [(-np.inf, 1e6)] * 3,
- 'options': {'ftol': 1e-8, 'xtol': 1e-8}}
- x0 = [1, 1, 1]
- # df/dx is constant.
- f = lambda x: -np.sum(x)
- res = optimize.minimize(f, x0, **kwargs)
- assert_allclose(res.fun, -3e6, atol=1e-4)
- # df/dx gets smaller and smaller.
- def f(x):
- return -np.abs(np.sum(x)) ** (0.1) * (1 if np.all(x > 0) else -1)
- res = optimize.minimize(f, x0, **kwargs)
- assert_allclose(res.fun, -(3e6) ** (0.1))
- # df/dx gets larger and larger.
- def f(x):
- return -np.abs(np.sum(x)) ** 10 * (1 if np.all(x > 0) else -1)
- res = optimize.minimize(f, x0, **kwargs)
- assert_allclose(res.fun, -(3e6) ** 10, rtol=1e-7)
- # df/dx gets larger for some of the variables and smaller for others.
- def f(x):
- t = -np.abs(np.sum(x[:2])) ** 5 - np.abs(np.sum(x[2:])) ** (0.1)
- t *= (1 if np.all(x > 0) else -1)
- return t
- kwargs['bounds'] = [(-np.inf, 1e3)] * 3
- res = optimize.minimize(f, x0, **kwargs)
- assert_allclose(res.fun, -(2e3) ** 5 - (1e6) ** (0.1), rtol=1e-7)
- class TestOptimizeWrapperDisp(CheckOptimizeParameterized):
- use_wrapper = True
- disp = True
- class TestOptimizeWrapperNoDisp(CheckOptimizeParameterized):
- use_wrapper = True
- disp = False
- class TestOptimizeNoWrapperDisp(CheckOptimizeParameterized):
- use_wrapper = False
- disp = True
- class TestOptimizeNoWrapperNoDisp(CheckOptimizeParameterized):
- use_wrapper = False
- disp = False
- class TestOptimizeSimple(CheckOptimize):
- def test_bfgs_nan(self):
- # Test corner case where nan is fed to optimizer. See gh-2067.
- func = lambda x: x
- fprime = lambda x: np.ones_like(x)
- x0 = [np.nan]
- with np.errstate(over='ignore', invalid='ignore'):
- x = optimize.fmin_bfgs(func, x0, fprime, disp=False)
- assert np.isnan(func(x))
- def test_bfgs_nan_return(self):
- # Test corner cases where fun returns NaN. See gh-4793.
- # First case: NaN from first call.
- func = lambda x: np.nan
- with np.errstate(invalid='ignore'):
- result = optimize.minimize(func, 0)
- assert np.isnan(result['fun'])
- assert result['success'] is False
- # Second case: NaN from second call.
- func = lambda x: 0 if x == 0 else np.nan
- fprime = lambda x: np.ones_like(x) # Steer away from zero.
- with np.errstate(invalid='ignore'):
- result = optimize.minimize(func, 0, jac=fprime)
- assert np.isnan(result['fun'])
- assert result['success'] is False
- def test_bfgs_numerical_jacobian(self):
- # BFGS with numerical Jacobian and a vector epsilon parameter.
- # define the epsilon parameter using a random vector
- epsilon = np.sqrt(np.spacing(1.)) * np.random.rand(len(self.solution))
- params = optimize.fmin_bfgs(self.func, self.startparams,
- epsilon=epsilon, args=(),
- maxiter=self.maxiter, disp=False)
- assert_allclose(self.func(params), self.func(self.solution),
- atol=1e-6)
- def test_finite_differences_jac(self):
- methods = ['BFGS', 'CG', 'TNC']
- jacs = ['2-point', '3-point', None]
- for method, jac in itertools.product(methods, jacs):
- result = optimize.minimize(self.func, self.startparams,
- method=method, jac=jac)
- assert_allclose(self.func(result.x), self.func(self.solution),
- atol=1e-6)
- def test_finite_differences_hess(self):
- # test that all the methods that require hess can use finite-difference
- # For Newton-CG, trust-ncg, trust-krylov the FD estimated hessian is
- # wrapped in a hessp function
- # dogleg, trust-exact actually require true hessians at the moment, so
- # they're excluded.
- methods = ['trust-constr', 'Newton-CG', 'trust-ncg', 'trust-krylov']
- hesses = FD_METHODS + (optimize.BFGS,)
- for method, hess in itertools.product(methods, hesses):
- if hess is optimize.BFGS:
- hess = hess()
- result = optimize.minimize(self.func, self.startparams,
- method=method, jac=self.grad,
- hess=hess)
- assert result.success
- # check that the methods demand some sort of Hessian specification
- # Newton-CG creates its own hessp, and trust-constr doesn't need a hess
- # specified either
- methods = ['trust-ncg', 'trust-krylov', 'dogleg', 'trust-exact']
- for method in methods:
- with pytest.raises(ValueError):
- optimize.minimize(self.func, self.startparams,
- method=method, jac=self.grad,
- hess=None)
- def test_bfgs_gh_2169(self):
- def f(x):
- if x < 0:
- return 1.79769313e+308
- else:
- return x + 1./x
- xs = optimize.fmin_bfgs(f, [10.], disp=False)
- assert_allclose(xs, 1.0, rtol=1e-4, atol=1e-4)
- def test_bfgs_double_evaluations(self):
- # check BFGS does not evaluate twice in a row at same point
- def f(x):
- xp = x[0]
- assert xp not in seen
- seen.add(xp)
- return 10*x**2, 20*x
- seen = set()
- optimize.minimize(f, -100, method='bfgs', jac=True, tol=1e-7)
- def test_l_bfgs_b(self):
- # limited-memory bound-constrained BFGS algorithm
- retval = optimize.fmin_l_bfgs_b(self.func, self.startparams,
- self.grad, args=(),
- maxiter=self.maxiter)
- (params, fopt, d) = retval
- assert_allclose(self.func(params), self.func(self.solution),
- atol=1e-6)
- # Ensure that function call counts are 'known good'; these are from
- # SciPy 0.7.0. Don't allow them to increase.
- assert self.funccalls == 7, self.funccalls
- assert self.gradcalls == 5, self.gradcalls
- # Ensure that the function behaves the same; this is from SciPy 0.7.0
- # test fixed in gh10673
- assert_allclose(self.trace[3:5],
- [[8.117083e-16, -5.196198e-01, 4.897617e-01],
- [0., -0.52489628, 0.48753042]],
- atol=1e-14, rtol=1e-7)
- def test_l_bfgs_b_numjac(self):
- # L-BFGS-B with numerical Jacobian
- retval = optimize.fmin_l_bfgs_b(self.func, self.startparams,
- approx_grad=True,
- maxiter=self.maxiter)
- (params, fopt, d) = retval
- assert_allclose(self.func(params), self.func(self.solution),
- atol=1e-6)
- def test_l_bfgs_b_funjac(self):
- # L-BFGS-B with combined objective function and Jacobian
- def fun(x):
- return self.func(x), self.grad(x)
- retval = optimize.fmin_l_bfgs_b(fun, self.startparams,
- maxiter=self.maxiter)
- (params, fopt, d) = retval
- assert_allclose(self.func(params), self.func(self.solution),
- atol=1e-6)
- def test_l_bfgs_b_maxiter(self):
- # gh7854
- # Ensure that not more than maxiters are ever run.
- class Callback:
- def __init__(self):
- self.nit = 0
- self.fun = None
- self.x = None
- def __call__(self, x):
- self.x = x
- self.fun = optimize.rosen(x)
- self.nit += 1
- c = Callback()
- res = optimize.minimize(optimize.rosen, [0., 0.], method='l-bfgs-b',
- callback=c, options={'maxiter': 5})
- assert_equal(res.nit, 5)
- assert_almost_equal(res.x, c.x)
- assert_almost_equal(res.fun, c.fun)
- assert_equal(res.status, 1)
- assert res.success is False
- assert_equal(res.message,
- 'STOP: TOTAL NO. of ITERATIONS REACHED LIMIT')
- def test_minimize_l_bfgs_b(self):
- # Minimize with L-BFGS-B method
- opts = {'disp': False, 'maxiter': self.maxiter}
- r = optimize.minimize(self.func, self.startparams,
- method='L-BFGS-B', jac=self.grad,
- options=opts)
- assert_allclose(self.func(r.x), self.func(self.solution),
- atol=1e-6)
- assert self.gradcalls == r.njev
- self.funccalls = self.gradcalls = 0
- # approximate jacobian
- ra = optimize.minimize(self.func, self.startparams,
- method='L-BFGS-B', options=opts)
- # check that function evaluations in approximate jacobian are counted
- # assert_(ra.nfev > r.nfev)
- assert self.funccalls == ra.nfev
- assert_allclose(self.func(ra.x), self.func(self.solution),
- atol=1e-6)
- self.funccalls = self.gradcalls = 0
- # approximate jacobian
- ra = optimize.minimize(self.func, self.startparams, jac='3-point',
- method='L-BFGS-B', options=opts)
- assert self.funccalls == ra.nfev
- assert_allclose(self.func(ra.x), self.func(self.solution),
- atol=1e-6)
- def test_minimize_l_bfgs_b_ftol(self):
- # Check that the `ftol` parameter in l_bfgs_b works as expected
- v0 = None
- for tol in [1e-1, 1e-4, 1e-7, 1e-10]:
- opts = {'disp': False, 'maxiter': self.maxiter, 'ftol': tol}
- sol = optimize.minimize(self.func, self.startparams,
- method='L-BFGS-B', jac=self.grad,
- options=opts)
- v = self.func(sol.x)
- if v0 is None:
- v0 = v
- else:
- assert v < v0
- assert_allclose(v, self.func(self.solution), rtol=tol)
- def test_minimize_l_bfgs_maxls(self):
- # check that the maxls is passed down to the Fortran routine
- sol = optimize.minimize(optimize.rosen, np.array([-1.2, 1.0]),
- method='L-BFGS-B', jac=optimize.rosen_der,
- options={'disp': False, 'maxls': 1})
- assert not sol.success
- def test_minimize_l_bfgs_b_maxfun_interruption(self):
- # gh-6162
- f = optimize.rosen
- g = optimize.rosen_der
- values = []
- x0 = np.full(7, 1000)
- def objfun(x):
- value = f(x)
- values.append(value)
- return value
- # Look for an interesting test case.
- # Request a maxfun that stops at a particularly bad function
- # evaluation somewhere between 100 and 300 evaluations.
- low, medium, high = 30, 100, 300
- optimize.fmin_l_bfgs_b(objfun, x0, fprime=g, maxfun=high)
- v, k = max((y, i) for i, y in enumerate(values[medium:]))
- maxfun = medium + k
- # If the minimization strategy is reasonable,
- # the minimize() result should not be worse than the best
- # of the first 30 function evaluations.
- target = min(values[:low])
- xmin, fmin, d = optimize.fmin_l_bfgs_b(f, x0, fprime=g, maxfun=maxfun)
- assert_array_less(fmin, target)
- def test_custom(self):
- # This function comes from the documentation example.
- def custmin(fun, x0, args=(), maxfev=None, stepsize=0.1,
- maxiter=100, callback=None, **options):
- bestx = x0
- besty = fun(x0)
- funcalls = 1
- niter = 0
- improved = True
- stop = False
- while improved and not stop and niter < maxiter:
- improved = False
- niter += 1
- for dim in range(np.size(x0)):
- for s in [bestx[dim] - stepsize, bestx[dim] + stepsize]:
- testx = np.copy(bestx)
- testx[dim] = s
- testy = fun(testx, *args)
- funcalls += 1
- if testy < besty:
- besty = testy
- bestx = testx
- improved = True
- if callback is not None:
- callback(bestx)
- if maxfev is not None and funcalls >= maxfev:
- stop = True
- break
- return optimize.OptimizeResult(fun=besty, x=bestx, nit=niter,
- nfev=funcalls, success=(niter > 1))
- x0 = [1.35, 0.9, 0.8, 1.1, 1.2]
- res = optimize.minimize(optimize.rosen, x0, method=custmin,
- options=dict(stepsize=0.05))
- assert_allclose(res.x, 1.0, rtol=1e-4, atol=1e-4)
- @pytest.mark.xfail(reason="output not reliable on all platforms")
- def test_gh13321(self, capfd):
- # gh-13321 reported issues with console output in fmin_l_bfgs_b;
- # check that iprint=0 works.
- kwargs = {'func': optimize.rosen, 'x0': [4, 3],
- 'fprime': optimize.rosen_der, 'bounds': ((3, 5), (3, 5))}
- # "L-BFGS-B" is always in output; should show when iprint >= 0
- # "At iterate" is iterate info; should show when iprint >= 1
- optimize.fmin_l_bfgs_b(**kwargs, iprint=-1)
- out, _ = capfd.readouterr()
- assert "L-BFGS-B" not in out and "At iterate" not in out
- optimize.fmin_l_bfgs_b(**kwargs, iprint=0)
- out, _ = capfd.readouterr()
- assert "L-BFGS-B" in out and "At iterate" not in out
- optimize.fmin_l_bfgs_b(**kwargs, iprint=1)
- out, _ = capfd.readouterr()
- assert "L-BFGS-B" in out and "At iterate" in out
- # `disp is not None` overrides `iprint` behavior
- # `disp=0` should suppress all output
- # `disp=1` should be the same as `iprint = 1`
- optimize.fmin_l_bfgs_b(**kwargs, iprint=1, disp=False)
- out, _ = capfd.readouterr()
- assert "L-BFGS-B" not in out and "At iterate" not in out
- optimize.fmin_l_bfgs_b(**kwargs, iprint=-1, disp=True)
- out, _ = capfd.readouterr()
- assert "L-BFGS-B" in out and "At iterate" in out
- def test_gh10771(self):
- # check that minimize passes bounds and constraints to a custom
- # minimizer without altering them.
- bounds = [(-2, 2), (0, 3)]
- constraints = 'constraints'
- def custmin(fun, x0, **options):
- assert options['bounds'] is bounds
- assert options['constraints'] is constraints
- return optimize.OptimizeResult()
- x0 = [1, 1]
- optimize.minimize(optimize.rosen, x0, method=custmin,
- bounds=bounds, constraints=constraints)
- def test_minimize_tol_parameter(self):
- # Check that the minimize() tol= argument does something
- def func(z):
- x, y = z
- return x**2*y**2 + x**4 + 1
- def dfunc(z):
- x, y = z
- return np.array([2*x*y**2 + 4*x**3, 2*x**2*y])
- for method in ['nelder-mead', 'powell', 'cg', 'bfgs',
- 'newton-cg', 'l-bfgs-b', 'tnc',
- 'cobyla', 'slsqp']:
- if method in ('nelder-mead', 'powell', 'cobyla'):
- jac = None
- else:
- jac = dfunc
- sol1 = optimize.minimize(func, [1, 1], jac=jac, tol=1e-10,
- method=method)
- sol2 = optimize.minimize(func, [1, 1], jac=jac, tol=1.0,
- method=method)
- assert func(sol1.x) < func(sol2.x), "%s: %s vs. %s" % (method, func(sol1.x), func(sol2.x))
- @pytest.mark.parametrize('method',
- ['fmin', 'fmin_powell', 'fmin_cg', 'fmin_bfgs',
- 'fmin_ncg', 'fmin_l_bfgs_b', 'fmin_tnc',
- 'fmin_slsqp'] + MINIMIZE_METHODS)
- def test_minimize_callback_copies_array(self, method):
- # Check that arrays passed to callbacks are not modified
- # inplace by the optimizer afterward
- if method in ('fmin_tnc', 'fmin_l_bfgs_b'):
- func = lambda x: (optimize.rosen(x), optimize.rosen_der(x))
- else:
- func = optimize.rosen
- jac = optimize.rosen_der
- hess = optimize.rosen_hess
- x0 = np.zeros(10)
- # Set options
- kwargs = {}
- if method.startswith('fmin'):
- routine = getattr(optimize, method)
- if method == 'fmin_slsqp':
- kwargs['iter'] = 5
- elif method == 'fmin_tnc':
- kwargs['maxfun'] = 100
- elif method in ('fmin', 'fmin_powell'):
- kwargs['maxiter'] = 3500
- else:
- kwargs['maxiter'] = 5
- else:
- def routine(*a, **kw):
- kw['method'] = method
- return optimize.minimize(*a, **kw)
- if method == 'tnc':
- kwargs['options'] = dict(maxfun=100)
- else:
- kwargs['options'] = dict(maxiter=5)
- if method in ('fmin_ncg',):
- kwargs['fprime'] = jac
- elif method in ('newton-cg',):
- kwargs['jac'] = jac
- elif method in ('trust-krylov', 'trust-exact', 'trust-ncg', 'dogleg',
- 'trust-constr'):
- kwargs['jac'] = jac
- kwargs['hess'] = hess
- # Run with callback
- results = []
- def callback(x, *args, **kwargs):
- results.append((x, np.copy(x)))
- routine(func, x0, callback=callback, **kwargs)
- # Check returned arrays coincide with their copies
- # and have no memory overlap
- assert len(results) > 2
- assert all(np.all(x == y) for x, y in results)
- assert not any(np.may_share_memory(x[0], y[0]) for x, y in itertools.combinations(results, 2))
- @pytest.mark.parametrize('method', ['nelder-mead', 'powell', 'cg',
- 'bfgs', 'newton-cg', 'l-bfgs-b',
- 'tnc', 'cobyla', 'slsqp'])
- def test_no_increase(self, method):
- # Check that the solver doesn't return a value worse than the
- # initial point.
- def func(x):
- return (x - 1)**2
- def bad_grad(x):
- # purposefully invalid gradient function, simulates a case
- # where line searches start failing
- return 2*(x - 1) * (-1) - 2
- x0 = np.array([2.0])
- f0 = func(x0)
- jac = bad_grad
- options = dict(maxfun=20) if method == 'tnc' else dict(maxiter=20)
- if method in ['nelder-mead', 'powell', 'cobyla']:
- jac = None
- sol = optimize.minimize(func, x0, jac=jac, method=method,
- options=options)
- assert_equal(func(sol.x), sol.fun)
- if method == 'slsqp':
- pytest.xfail("SLSQP returns slightly worse")
- assert func(sol.x) <= f0
- def test_slsqp_respect_bounds(self):
- # Regression test for gh-3108
- def f(x):
- return sum((x - np.array([1., 2., 3., 4.]))**2)
- def cons(x):
- a = np.array([[-1, -1, -1, -1], [-3, -3, -2, -1]])
- return np.concatenate([np.dot(a, x) + np.array([5, 10]), x])
- x0 = np.array([0.5, 1., 1.5, 2.])
- res = optimize.minimize(f, x0, method='slsqp',
- constraints={'type': 'ineq', 'fun': cons})
- assert_allclose(res.x, np.array([0., 2, 5, 8])/3, atol=1e-12)
- @pytest.mark.parametrize('method', ['Nelder-Mead', 'Powell', 'CG', 'BFGS',
- 'Newton-CG', 'L-BFGS-B', 'SLSQP',
- 'trust-constr', 'dogleg', 'trust-ncg',
- 'trust-exact', 'trust-krylov'])
- def test_respect_maxiter(self, method):
- # Check that the number of iterations equals max_iter, assuming
- # convergence doesn't establish before
- MAXITER = 4
- x0 = np.zeros(10)
- sf = ScalarFunction(optimize.rosen, x0, (), optimize.rosen_der,
- optimize.rosen_hess, None, None)
- # Set options
- kwargs = {'method': method, 'options': dict(maxiter=MAXITER)}
- if method in ('Newton-CG',):
- kwargs['jac'] = sf.grad
- elif method in ('trust-krylov', 'trust-exact', 'trust-ncg', 'dogleg',
- 'trust-constr'):
- kwargs['jac'] = sf.grad
- kwargs['hess'] = sf.hess
- sol = optimize.minimize(sf.fun, x0, **kwargs)
- assert sol.nit == MAXITER
- assert sol.nfev >= sf.nfev
- if hasattr(sol, 'njev'):
- assert sol.njev >= sf.ngev
- # method specific tests
- if method == 'SLSQP':
- assert sol.status == 9 # Iteration limit reached
- @pytest.mark.parametrize('method', ['Nelder-Mead', 'Powell',
- 'fmin', 'fmin_powell'])
- def test_runtime_warning(self, method):
- x0 = np.zeros(10)
- sf = ScalarFunction(optimize.rosen, x0, (), optimize.rosen_der,
- optimize.rosen_hess, None, None)
- options = {"maxiter": 1, "disp": True}
- with pytest.warns(RuntimeWarning,
- match=r'Maximum number of iterations'):
- if method.startswith('fmin'):
- routine = getattr(optimize, method)
- routine(sf.fun, x0, **options)
- else:
- optimize.minimize(sf.fun, x0, method=method, options=options)
- def test_respect_maxiter_trust_constr_ineq_constraints(self):
- # special case of minimization with trust-constr and inequality
- # constraints to check maxiter limit is obeyed when using internal
- # method 'tr_interior_point'
- MAXITER = 4
- f = optimize.rosen
- jac = optimize.rosen_der
- hess = optimize.rosen_hess
- fun = lambda x: np.array([0.2 * x[0] - 0.4 * x[1] - 0.33 * x[2]])
- cons = ({'type': 'ineq',
- 'fun': fun},)
- x0 = np.zeros(10)
- sol = optimize.minimize(f, x0, constraints=cons, jac=jac, hess=hess,
- method='trust-constr',
- options=dict(maxiter=MAXITER))
- assert sol.nit == MAXITER
- def test_minimize_automethod(self):
- def f(x):
- return x**2
- def cons(x):
- return x - 2
- x0 = np.array([10.])
- sol_0 = optimize.minimize(f, x0)
- sol_1 = optimize.minimize(f, x0, constraints=[{'type': 'ineq',
- 'fun': cons}])
- sol_2 = optimize.minimize(f, x0, bounds=[(5, 10)])
- sol_3 = optimize.minimize(f, x0,
- constraints=[{'type': 'ineq', 'fun': cons}],
- bounds=[(5, 10)])
- sol_4 = optimize.minimize(f, x0,
- constraints=[{'type': 'ineq', 'fun': cons}],
- bounds=[(1, 10)])
- for sol in [sol_0, sol_1, sol_2, sol_3, sol_4]:
- assert sol.success
- assert_allclose(sol_0.x, 0, atol=1e-7)
- assert_allclose(sol_1.x, 2, atol=1e-7)
- assert_allclose(sol_2.x, 5, atol=1e-7)
- assert_allclose(sol_3.x, 5, atol=1e-7)
- assert_allclose(sol_4.x, 2, atol=1e-7)
- def test_minimize_coerce_args_param(self):
- # Regression test for gh-3503
- def Y(x, c):
- return np.sum((x-c)**2)
- def dY_dx(x, c=None):
- return 2*(x-c)
- c = np.array([3, 1, 4, 1, 5, 9, 2, 6, 5, 3, 5])
- xinit = np.random.randn(len(c))
- optimize.minimize(Y, xinit, jac=dY_dx, args=(c), method="BFGS")
- def test_initial_step_scaling(self):
- # Check that optimizer initial step is not huge even if the
- # function and gradients are
- scales = [1e-50, 1, 1e50]
- methods = ['CG', 'BFGS', 'L-BFGS-B', 'Newton-CG']
- def f(x):
- if first_step_size[0] is None and x[0] != x0[0]:
- first_step_size[0] = abs(x[0] - x0[0])
- if abs(x).max() > 1e4:
- raise AssertionError("Optimization stepped far away!")
- return scale*(x[0] - 1)**2
- def g(x):
- return np.array([scale*(x[0] - 1)])
- for scale, method in itertools.product(scales, methods):
- if method in ('CG', 'BFGS'):
- options = dict(gtol=scale*1e-8)
- else:
- options = dict()
- if scale < 1e-10 and method in ('L-BFGS-B', 'Newton-CG'):
- # XXX: return initial point if they see small gradient
- continue
- x0 = [-1.0]
- first_step_size = [None]
- res = optimize.minimize(f, x0, jac=g, method=method,
- options=options)
- err_msg = "{0} {1}: {2}: {3}".format(method, scale,
- first_step_size,
- res)
- assert res.success, err_msg
- assert_allclose(res.x, [1.0], err_msg=err_msg)
- assert res.nit <= 3, err_msg
- if scale > 1e-10:
- if method in ('CG', 'BFGS'):
- assert_allclose(first_step_size[0], 1.01, err_msg=err_msg)
- else:
- # Newton-CG and L-BFGS-B use different logic for the first
- # step, but are both scaling invariant with step sizes ~ 1
- assert first_step_size[0] > 0.5 and first_step_size[0] < 3, err_msg
- else:
- # step size has upper bound of ||grad||, so line
- # search makes many small steps
- pass
- @pytest.mark.parametrize('method', ['nelder-mead', 'powell', 'cg', 'bfgs',
- 'newton-cg', 'l-bfgs-b', 'tnc',
- 'cobyla', 'slsqp', 'trust-constr',
- 'dogleg', 'trust-ncg', 'trust-exact',
- 'trust-krylov'])
- def test_nan_values(self, method):
- # Check nan values result to failed exit status
- np.random.seed(1234)
- count = [0]
- def func(x):
- return np.nan
- def func2(x):
- count[0] += 1
- if count[0] > 2:
- return np.nan
- else:
- return np.random.rand()
- def grad(x):
- return np.array([1.0])
- def hess(x):
- return np.array([[1.0]])
- x0 = np.array([1.0])
- needs_grad = method in ('newton-cg', 'trust-krylov', 'trust-exact',
- 'trust-ncg', 'dogleg')
- needs_hess = method in ('trust-krylov', 'trust-exact', 'trust-ncg',
- 'dogleg')
- funcs = [func, func2]
- grads = [grad] if needs_grad else [grad, None]
- hesss = [hess] if needs_hess else [hess, None]
- options = dict(maxfun=20) if method == 'tnc' else dict(maxiter=20)
- with np.errstate(invalid='ignore'), suppress_warnings() as sup:
- sup.filter(UserWarning, "delta_grad == 0.*")
- sup.filter(RuntimeWarning, ".*does not use Hessian.*")
- sup.filter(RuntimeWarning, ".*does not use gradient.*")
- for f, g, h in itertools.product(funcs, grads, hesss):
- count = [0]
- sol = optimize.minimize(f, x0, jac=g, hess=h, method=method,
- options=options)
- assert_equal(sol.success, False)
- @pytest.mark.parametrize('method', ['nelder-mead', 'cg', 'bfgs',
- 'l-bfgs-b', 'tnc',
- 'cobyla', 'slsqp', 'trust-constr',
- 'dogleg', 'trust-ncg', 'trust-exact',
- 'trust-krylov'])
- def test_duplicate_evaluations(self, method):
- # check that there are no duplicate evaluations for any methods
- jac = hess = None
- if method in ('newton-cg', 'trust-krylov', 'trust-exact',
- 'trust-ncg', 'dogleg'):
- jac = self.grad
- if method in ('trust-krylov', 'trust-exact', 'trust-ncg',
- 'dogleg'):
- hess = self.hess
- with np.errstate(invalid='ignore'), suppress_warnings() as sup:
- # for trust-constr
- sup.filter(UserWarning, "delta_grad == 0.*")
- optimize.minimize(self.func, self.startparams,
- method=method, jac=jac, hess=hess)
- for i in range(1, len(self.trace)):
- if np.array_equal(self.trace[i - 1], self.trace[i]):
- raise RuntimeError(
- "Duplicate evaluations made by {}".format(method))
- @pytest.mark.parametrize(
- 'method',
- ['l-bfgs-b', 'tnc', 'Powell', 'Nelder-Mead']
- )
- def test_minimize_with_scalar(method):
- # checks that minimize works with a scalar being provided to it.
- def f(x):
- return np.sum(x ** 2)
- res = optimize.minimize(f, 17, bounds=[(-100, 100)], method=method)
- assert res.success
- assert_allclose(res.x, [0.0], atol=1e-5)
- class TestLBFGSBBounds:
- def setup_method(self):
- self.bounds = ((1, None), (None, None))
- self.solution = (1, 0)
- def fun(self, x, p=2.0):
- return 1.0 / p * (x[0]**p + x[1]**p)
- def jac(self, x, p=2.0):
- return x**(p - 1)
- def fj(self, x, p=2.0):
- return self.fun(x, p), self.jac(x, p)
- def test_l_bfgs_b_bounds(self):
- x, f, d = optimize.fmin_l_bfgs_b(self.fun, [0, -1],
- fprime=self.jac,
- bounds=self.bounds)
- assert d['warnflag'] == 0, d['task']
- assert_allclose(x, self.solution, atol=1e-6)
- def test_l_bfgs_b_funjac(self):
- # L-BFGS-B with fun and jac combined and extra arguments
- x, f, d = optimize.fmin_l_bfgs_b(self.fj, [0, -1], args=(2.0, ),
- bounds=self.bounds)
- assert d['warnflag'] == 0, d['task']
- assert_allclose(x, self.solution, atol=1e-6)
- def test_minimize_l_bfgs_b_bounds(self):
- # Minimize with method='L-BFGS-B' with bounds
- res = optimize.minimize(self.fun, [0, -1], method='L-BFGS-B',
- jac=self.jac, bounds=self.bounds)
- assert res['success'], res['message']
- assert_allclose(res.x, self.solution, atol=1e-6)
- @pytest.mark.parametrize('bounds', [
- ([(10, 1), (1, 10)]),
- ([(1, 10), (10, 1)]),
- ([(10, 1), (10, 1)])
- ])
- def test_minimize_l_bfgs_b_incorrect_bounds(self, bounds):
- with pytest.raises(ValueError, match='.*bounds.*'):
- optimize.minimize(self.fun, [0, -1], method='L-BFGS-B',
- jac=self.jac, bounds=bounds)
- def test_minimize_l_bfgs_b_bounds_FD(self):
- # test that initial starting value outside bounds doesn't raise
- # an error (done with clipping).
- # test all different finite differences combos, with and without args
- jacs = ['2-point', '3-point', None]
- argss = [(2.,), ()]
- for jac, args in itertools.product(jacs, argss):
- res = optimize.minimize(self.fun, [0, -1], args=args,
- method='L-BFGS-B',
- jac=jac, bounds=self.bounds,
- options={'finite_diff_rel_step': None})
- assert res['success'], res['message']
- assert_allclose(res.x, self.solution, atol=1e-6)
- class TestOptimizeScalar:
- def setup_method(self):
- self.solution = 1.5
- def fun(self, x, a=1.5):
- """Objective function"""
- return (x - a)**2 - 0.8
- def test_brent(self):
- x = optimize.brent(self.fun)
- assert_allclose(x, self.solution, atol=1e-6)
- x = optimize.brent(self.fun, brack=(-3, -2))
- assert_allclose(x, self.solution, atol=1e-6)
- x = optimize.brent(self.fun, full_output=True)
- assert_allclose(x[0], self.solution, atol=1e-6)
- x = optimize.brent(self.fun, brack=(-15, -1, 15))
- assert_allclose(x, self.solution, atol=1e-6)
- message = r"\(f\(xb\) < f\(xa\)\) and \(f\(xb\) < f\(xc\)\)"
- with pytest.raises(ValueError, match=message):
- optimize.brent(self.fun, brack=(-1, 0, 1))
- message = r"\(xa < xb\) and \(xb < xc\)"
- with pytest.raises(ValueError, match=message):
- optimize.brent(self.fun, brack=(0, -1, 1))
- def test_golden(self):
- x = optimize.golden(self.fun)
- assert_allclose(x, self.solution, atol=1e-6)
- x = optimize.golden(self.fun, brack=(-3, -2))
- assert_allclose(x, self.solution, atol=1e-6)
- x = optimize.golden(self.fun, full_output=True)
- assert_allclose(x[0], self.solution, atol=1e-6)
- x = optimize.golden(self.fun, brack=(-15, -1, 15))
- assert_allclose(x, self.solution, atol=1e-6)
- x = optimize.golden(self.fun, tol=0)
- assert_allclose(x, self.solution)
- maxiter_test_cases = [0, 1, 5]
- for maxiter in maxiter_test_cases:
- x0 = optimize.golden(self.fun, maxiter=0, full_output=True)
- x = optimize.golden(self.fun, maxiter=maxiter, full_output=True)
- nfev0, nfev = x0[2], x[2]
- assert_equal(nfev - nfev0, maxiter)
- message = r"\(f\(xb\) < f\(xa\)\) and \(f\(xb\) < f\(xc\)\)"
- with pytest.raises(ValueError, match=message):
- optimize.golden(self.fun, brack=(-1, 0, 1))
- message = r"\(xa < xb\) and \(xb < xc\)"
- with pytest.raises(ValueError, match=message):
- optimize.golden(self.fun, brack=(0, -1, 1))
- def test_fminbound(self):
- x = optimize.fminbound(self.fun, 0, 1)
- assert_allclose(x, 1, atol=1e-4)
- x = optimize.fminbound(self.fun, 1, 5)
- assert_allclose(x, self.solution, atol=1e-6)
- x = optimize.fminbound(self.fun, np.array([1]), np.array([5]))
- assert_allclose(x, self.solution, atol=1e-6)
- assert_raises(ValueError, optimize.fminbound, self.fun, 5, 1)
- def test_fminbound_scalar(self):
- with pytest.raises(ValueError, match='.*must be finite scalars.*'):
- optimize.fminbound(self.fun, np.zeros((1, 2)), 1)
- x = optimize.fminbound(self.fun, 1, np.array(5))
- assert_allclose(x, self.solution, atol=1e-6)
- def test_gh11207(self):
- def fun(x):
- return x**2
- optimize.fminbound(fun, 0, 0)
- def test_minimize_scalar(self):
- # combine all tests above for the minimize_scalar wrapper
- x = optimize.minimize_scalar(self.fun).x
- assert_allclose(x, self.solution, atol=1e-6)
- x = optimize.minimize_scalar(self.fun, method='Brent')
- assert x.success
- x = optimize.minimize_scalar(self.fun, method='Brent',
- options=dict(maxiter=3))
- assert not x.success
- x = optimize.minimize_scalar(self.fun, bracket=(-3, -2),
- args=(1.5, ), method='Brent').x
- assert_allclose(x, self.solution, atol=1e-6)
- x = optimize.minimize_scalar(self.fun, method='Brent',
- args=(1.5,)).x
- assert_allclose(x, self.solution, atol=1e-6)
- x = optimize.minimize_scalar(self.fun, bracket=(-15, -1, 15),
- args=(1.5, ), method='Brent').x
- assert_allclose(x, self.solution, atol=1e-6)
- x = optimize.minimize_scalar(self.fun, bracket=(-3, -2),
- args=(1.5, ), method='golden').x
- assert_allclose(x, self.solution, atol=1e-6)
- x = optimize.minimize_scalar(self.fun, method='golden',
- args=(1.5,)).x
- assert_allclose(x, self.solution, atol=1e-6)
- x = optimize.minimize_scalar(self.fun, bracket=(-15, -1, 15),
- args=(1.5, ), method='golden').x
- assert_allclose(x, self.solution, atol=1e-6)
- x = optimize.minimize_scalar(self.fun, bounds=(0, 1), args=(1.5,),
- method='Bounded').x
- assert_allclose(x, 1, atol=1e-4)
- x = optimize.minimize_scalar(self.fun, bounds=(1, 5), args=(1.5, ),
- method='bounded').x
- assert_allclose(x, self.solution, atol=1e-6)
- x = optimize.minimize_scalar(self.fun, bounds=(np.array([1]),
- np.array([5])),
- args=(np.array([1.5]), ),
- method='bounded').x
- assert_allclose(x, self.solution, atol=1e-6)
- assert_raises(ValueError, optimize.minimize_scalar, self.fun,
- bounds=(5, 1), method='bounded', args=(1.5, ))
- assert_raises(ValueError, optimize.minimize_scalar, self.fun,
- bounds=(np.zeros(2), 1), method='bounded', args=(1.5, ))
- x = optimize.minimize_scalar(self.fun, bounds=(1, np.array(5)),
- method='bounded').x
- assert_allclose(x, self.solution, atol=1e-6)
- def test_minimize_scalar_custom(self):
- # This function comes from the documentation example.
- def custmin(fun, bracket, args=(), maxfev=None, stepsize=0.1,
- maxiter=100, callback=None, **options):
- bestx = (bracket[1] + bracket[0]) / 2.0
- besty = fun(bestx)
- funcalls = 1
- niter = 0
- improved = True
- stop = False
- while improved and not stop and niter < maxiter:
- improved = False
- niter += 1
- for testx in [bestx - stepsize, bestx + stepsize]:
- testy = fun(testx, *args)
- funcalls += 1
- if testy < besty:
- besty = testy
- bestx = testx
- improved = True
- if callback is not None:
- callback(bestx)
- if maxfev is not None and funcalls >= maxfev:
- stop = True
- break
- return optimize.OptimizeResult(fun=besty, x=bestx, nit=niter,
- nfev=funcalls, success=(niter > 1))
- res = optimize.minimize_scalar(self.fun, bracket=(0, 4),
- method=custmin,
- options=dict(stepsize=0.05))
- assert_allclose(res.x, self.solution, atol=1e-6)
- def test_minimize_scalar_coerce_args_param(self):
- # Regression test for gh-3503
- optimize.minimize_scalar(self.fun, args=1.5)
- @pytest.mark.parametrize('method', ['brent', 'bounded', 'golden'])
- def test_disp(self, method):
- # test that all minimize_scalar methods accept a disp option.
- for disp in [0, 1, 2, 3]:
- optimize.minimize_scalar(self.fun, options={"disp": disp})
- @pytest.mark.parametrize('method', ['brent', 'bounded', 'golden'])
- def test_result_attributes(self, method):
- kwargs = {"bounds": [-10, 10]} if method == 'bounded' else {}
- result = optimize.minimize_scalar(self.fun, method=method, **kwargs)
- assert hasattr(result, "x")
- assert hasattr(result, "success")
- assert hasattr(result, "message")
- assert hasattr(result, "fun")
- assert hasattr(result, "nfev")
- assert hasattr(result, "nit")
- @pytest.mark.parametrize('method', ['brent', 'bounded', 'golden'])
- def test_nan_values(self, method):
- # Check nan values result to failed exit status
- np.random.seed(1234)
- count = [0]
- def func(x):
- count[0] += 1
- if count[0] > 4:
- return np.nan
- else:
- return x**2 + 0.1 * np.sin(x)
- bracket = (-1, 0, 1)
- bounds = (-1, 1)
- with np.errstate(invalid='ignore'), suppress_warnings() as sup:
- sup.filter(UserWarning, "delta_grad == 0.*")
- sup.filter(RuntimeWarning, ".*does not use Hessian.*")
- sup.filter(RuntimeWarning, ".*does not use gradient.*")
- count = [0]
- kwargs = {"bounds": bounds} if method == 'bounded' else {}
- sol = optimize.minimize_scalar(func, bracket=bracket,
- **kwargs, method=method,
- options=dict(maxiter=20))
- assert_equal(sol.success, False)
- def test_minimize_scalar_defaults_gh10911(self):
- # Previously, bounds were silently ignored unless `method='bounds'`
- # was chosen. See gh-10911. Check that this is no longer the case.
- def f(x):
- return x**2
- res = optimize.minimize_scalar(f)
- assert_allclose(res.x, 0, atol=1e-8)
- res = optimize.minimize_scalar(f, bounds=(1, 100),
- options={'xatol': 1e-10})
- assert_allclose(res.x, 1)
- def test_minimize_non_finite_bounds_gh10911(self):
- # Previously, minimize_scalar misbehaved with infinite bounds.
- # See gh-10911. Check that it now raises an error, instead.
- msg = "Optimization bounds must be finite scalars."
- with pytest.raises(ValueError, match=msg):
- optimize.minimize_scalar(np.sin, bounds=(1, np.inf))
- with pytest.raises(ValueError, match=msg):
- optimize.minimize_scalar(np.sin, bounds=(np.nan, 1))
- @pytest.mark.parametrize("method", ['brent', 'golden'])
- def test_minimize_unbounded_method_with_bounds_gh10911(self, method):
- # Previously, `bounds` were silently ignored when `method='brent'` or
- # `method='golden'`. See gh-10911. Check that error is now raised.
- msg = "Use of `bounds` is incompatible with..."
- with pytest.raises(ValueError, match=msg):
- optimize.minimize_scalar(np.sin, method=method, bounds=(1, 2))
- def test_brent_negative_tolerance():
- assert_raises(ValueError, optimize.brent, np.cos, tol=-.01)
- class TestNewtonCg:
- def test_rosenbrock(self):
- x0 = np.array([-1.2, 1.0])
- sol = optimize.minimize(optimize.rosen, x0,
- jac=optimize.rosen_der,
- hess=optimize.rosen_hess,
- tol=1e-5,
- method='Newton-CG')
- assert sol.success, sol.message
- assert_allclose(sol.x, np.array([1, 1]), rtol=1e-4)
- def test_himmelblau(self):
- x0 = np.array(himmelblau_x0)
- sol = optimize.minimize(himmelblau,
- x0,
- jac=himmelblau_grad,
- hess=himmelblau_hess,
- method='Newton-CG',
- tol=1e-6)
- assert sol.success, sol.message
- assert_allclose(sol.x, himmelblau_xopt, rtol=1e-4)
- assert_allclose(sol.fun, himmelblau_min, atol=1e-4)
- def test_finite_difference(self):
- x0 = np.array([-1.2, 1.0])
- sol = optimize.minimize(optimize.rosen, x0,
- jac=optimize.rosen_der,
- hess='2-point',
- tol=1e-5,
- method='Newton-CG')
- assert sol.success, sol.message
- assert_allclose(sol.x, np.array([1, 1]), rtol=1e-4)
- def test_hessian_update_strategy(self):
- x0 = np.array([-1.2, 1.0])
- sol = optimize.minimize(optimize.rosen, x0,
- jac=optimize.rosen_der,
- hess=optimize.BFGS(),
- tol=1e-5,
- method='Newton-CG')
- assert sol.success, sol.message
- assert_allclose(sol.x, np.array([1, 1]), rtol=1e-4)
- def test_line_for_search():
- # _line_for_search is only used in _linesearch_powell, which is also
- # tested below. Thus there are more tests of _line_for_search in the
- # test_linesearch_powell_bounded function.
- line_for_search = optimize._optimize._line_for_search
- # args are x0, alpha, lower_bound, upper_bound
- # returns lmin, lmax
- lower_bound = np.array([-5.3, -1, -1.5, -3])
- upper_bound = np.array([1.9, 1, 2.8, 3])
- # test when starting in the bounds
- x0 = np.array([0., 0, 0, 0])
- # and when starting outside of the bounds
- x1 = np.array([0., 2, -3, 0])
- all_tests = (
- (x0, np.array([1., 0, 0, 0]), -5.3, 1.9),
- (x0, np.array([0., 1, 0, 0]), -1, 1),
- (x0, np.array([0., 0, 1, 0]), -1.5, 2.8),
- (x0, np.array([0., 0, 0, 1]), -3, 3),
- (x0, np.array([1., 1, 0, 0]), -1, 1),
- (x0, np.array([1., 0, -1, 2]), -1.5, 1.5),
- (x0, np.array([2., 0, -1, 2]), -1.5, 0.95),
- (x1, np.array([1., 0, 0, 0]), -5.3, 1.9),
- (x1, np.array([0., 1, 0, 0]), -3, -1),
- (x1, np.array([0., 0, 1, 0]), 1.5, 5.8),
- (x1, np.array([0., 0, 0, 1]), -3, 3),
- (x1, np.array([1., 1, 0, 0]), -3, -1),
- (x1, np.array([1., 0, -1, 0]), -5.3, -1.5),
- )
- for x, alpha, lmin, lmax in all_tests:
- mi, ma = line_for_search(x, alpha, lower_bound, upper_bound)
- assert_allclose(mi, lmin, atol=1e-6)
- assert_allclose(ma, lmax, atol=1e-6)
- # now with infinite bounds
- lower_bound = np.array([-np.inf, -1, -np.inf, -3])
- upper_bound = np.array([np.inf, 1, 2.8, np.inf])
- all_tests = (
- (x0, np.array([1., 0, 0, 0]), -np.inf, np.inf),
- (x0, np.array([0., 1, 0, 0]), -1, 1),
- (x0, np.array([0., 0, 1, 0]), -np.inf, 2.8),
- (x0, np.array([0., 0, 0, 1]), -3, np.inf),
- (x0, np.array([1., 1, 0, 0]), -1, 1),
- (x0, np.array([1., 0, -1, 2]), -1.5, np.inf),
- (x1, np.array([1., 0, 0, 0]), -np.inf, np.inf),
- (x1, np.array([0., 1, 0, 0]), -3, -1),
- (x1, np.array([0., 0, 1, 0]), -np.inf, 5.8),
- (x1, np.array([0., 0, 0, 1]), -3, np.inf),
- (x1, np.array([1., 1, 0, 0]), -3, -1),
- (x1, np.array([1., 0, -1, 0]), -5.8, np.inf),
- )
- for x, alpha, lmin, lmax in all_tests:
- mi, ma = line_for_search(x, alpha, lower_bound, upper_bound)
- assert_allclose(mi, lmin, atol=1e-6)
- assert_allclose(ma, lmax, atol=1e-6)
- def test_linesearch_powell():
- # helper function in optimize.py, not a public function.
- linesearch_powell = optimize._optimize._linesearch_powell
- # args are func, p, xi, fval, lower_bound=None, upper_bound=None, tol=1e-3
- # returns new_fval, p + direction, direction
- func = lambda x: np.sum((x - np.array([-1., 2., 1.5, -.4]))**2)
- p0 = np.array([0., 0, 0, 0])
- fval = func(p0)
- lower_bound = np.array([-np.inf] * 4)
- upper_bound = np.array([np.inf] * 4)
- all_tests = (
- (np.array([1., 0, 0, 0]), -1),
- (np.array([0., 1, 0, 0]), 2),
- (np.array([0., 0, 1, 0]), 1.5),
- (np.array([0., 0, 0, 1]), -.4),
- (np.array([-1., 0, 1, 0]), 1.25),
- (np.array([0., 0, 1, 1]), .55),
- (np.array([2., 0, -1, 1]), -.65),
- )
- for xi, l in all_tests:
- f, p, direction = linesearch_powell(func, p0, xi,
- fval=fval, tol=1e-5)
- assert_allclose(f, func(l * xi), atol=1e-6)
- assert_allclose(p, l * xi, atol=1e-6)
- assert_allclose(direction, l * xi, atol=1e-6)
- f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5,
- lower_bound=lower_bound,
- upper_bound=upper_bound,
- fval=fval)
- assert_allclose(f, func(l * xi), atol=1e-6)
- assert_allclose(p, l * xi, atol=1e-6)
- assert_allclose(direction, l * xi, atol=1e-6)
- def test_linesearch_powell_bounded():
- # helper function in optimize.py, not a public function.
- linesearch_powell = optimize._optimize._linesearch_powell
- # args are func, p, xi, fval, lower_bound=None, upper_bound=None, tol=1e-3
- # returns new_fval, p+direction, direction
- func = lambda x: np.sum((x-np.array([-1., 2., 1.5, -.4]))**2)
- p0 = np.array([0., 0, 0, 0])
- fval = func(p0)
- # first choose bounds such that the same tests from
- # test_linesearch_powell should pass.
- lower_bound = np.array([-2.]*4)
- upper_bound = np.array([2.]*4)
- all_tests = (
- (np.array([1., 0, 0, 0]), -1),
- (np.array([0., 1, 0, 0]), 2),
- (np.array([0., 0, 1, 0]), 1.5),
- (np.array([0., 0, 0, 1]), -.4),
- (np.array([-1., 0, 1, 0]), 1.25),
- (np.array([0., 0, 1, 1]), .55),
- (np.array([2., 0, -1, 1]), -.65),
- )
- for xi, l in all_tests:
- f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5,
- lower_bound=lower_bound,
- upper_bound=upper_bound,
- fval=fval)
- assert_allclose(f, func(l * xi), atol=1e-6)
- assert_allclose(p, l * xi, atol=1e-6)
- assert_allclose(direction, l * xi, atol=1e-6)
- # now choose bounds such that unbounded vs bounded gives different results
- lower_bound = np.array([-.3]*3 + [-1])
- upper_bound = np.array([.45]*3 + [.9])
- all_tests = (
- (np.array([1., 0, 0, 0]), -.3),
- (np.array([0., 1, 0, 0]), .45),
- (np.array([0., 0, 1, 0]), .45),
- (np.array([0., 0, 0, 1]), -.4),
- (np.array([-1., 0, 1, 0]), .3),
- (np.array([0., 0, 1, 1]), .45),
- (np.array([2., 0, -1, 1]), -.15),
- )
- for xi, l in all_tests:
- f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5,
- lower_bound=lower_bound,
- upper_bound=upper_bound,
- fval=fval)
- assert_allclose(f, func(l * xi), atol=1e-6)
- assert_allclose(p, l * xi, atol=1e-6)
- assert_allclose(direction, l * xi, atol=1e-6)
- # now choose as above but start outside the bounds
- p0 = np.array([-1., 0, 0, 2])
- fval = func(p0)
- all_tests = (
- (np.array([1., 0, 0, 0]), .7),
- (np.array([0., 1, 0, 0]), .45),
- (np.array([0., 0, 1, 0]), .45),
- (np.array([0., 0, 0, 1]), -2.4),
- )
- for xi, l in all_tests:
- f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5,
- lower_bound=lower_bound,
- upper_bound=upper_bound,
- fval=fval)
- assert_allclose(f, func(p0 + l * xi), atol=1e-6)
- assert_allclose(p, p0 + l * xi, atol=1e-6)
- assert_allclose(direction, l * xi, atol=1e-6)
- # now mix in inf
- p0 = np.array([0., 0, 0, 0])
- fval = func(p0)
- # now choose bounds that mix inf
- lower_bound = np.array([-.3, -np.inf, -np.inf, -1])
- upper_bound = np.array([np.inf, .45, np.inf, .9])
- all_tests = (
- (np.array([1., 0, 0, 0]), -.3),
- (np.array([0., 1, 0, 0]), .45),
- (np.array([0., 0, 1, 0]), 1.5),
- (np.array([0., 0, 0, 1]), -.4),
- (np.array([-1., 0, 1, 0]), .3),
- (np.array([0., 0, 1, 1]), .55),
- (np.array([2., 0, -1, 1]), -.15),
- )
- for xi, l in all_tests:
- f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5,
- lower_bound=lower_bound,
- upper_bound=upper_bound,
- fval=fval)
- assert_allclose(f, func(l * xi), atol=1e-6)
- assert_allclose(p, l * xi, atol=1e-6)
- assert_allclose(direction, l * xi, atol=1e-6)
- # now choose as above but start outside the bounds
- p0 = np.array([-1., 0, 0, 2])
- fval = func(p0)
- all_tests = (
- (np.array([1., 0, 0, 0]), .7),
- (np.array([0., 1, 0, 0]), .45),
- (np.array([0., 0, 1, 0]), 1.5),
- (np.array([0., 0, 0, 1]), -2.4),
- )
- for xi, l in all_tests:
- f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5,
- lower_bound=lower_bound,
- upper_bound=upper_bound,
- fval=fval)
- assert_allclose(f, func(p0 + l * xi), atol=1e-6)
- assert_allclose(p, p0 + l * xi, atol=1e-6)
- assert_allclose(direction, l * xi, atol=1e-6)
- def test_powell_limits():
- # gh15342 - powell was going outside bounds for some function evaluations.
- bounds = optimize.Bounds([0, 0], [0.6, 20])
- def fun(x):
- a, b = x
- assert (x >= bounds.lb).all() and (x <= bounds.ub).all()
- return a ** 2 + b ** 2
- optimize.minimize(fun, x0=[0.6, 20], method='Powell', bounds=bounds)
- # Another test from the original report - gh-13411
- bounds = optimize.Bounds(lb=[0,], ub=[1,], keep_feasible=[True,])
- def func(x):
- assert x >= 0 and x <= 1
- return np.exp(x)
- optimize.minimize(fun=func, x0=[0.5], method='powell', bounds=bounds)
- class TestRosen:
- def test_hess(self):
- # Compare rosen_hess(x) times p with rosen_hess_prod(x,p). See gh-1775.
- x = np.array([3, 4, 5])
- p = np.array([2, 2, 2])
- hp = optimize.rosen_hess_prod(x, p)
- dothp = np.dot(optimize.rosen_hess(x), p)
- assert_equal(hp, dothp)
- def himmelblau(p):
- """
- R^2 -> R^1 test function for optimization. The function has four local
- minima where himmelblau(xopt) == 0.
- """
- x, y = p
- a = x*x + y - 11
- b = x + y*y - 7
- return a*a + b*b
- def himmelblau_grad(p):
- x, y = p
- return np.array([4*x**3 + 4*x*y - 42*x + 2*y**2 - 14,
- 2*x**2 + 4*x*y + 4*y**3 - 26*y - 22])
- def himmelblau_hess(p):
- x, y = p
- return np.array([[12*x**2 + 4*y - 42, 4*x + 4*y],
- [4*x + 4*y, 4*x + 12*y**2 - 26]])
- himmelblau_x0 = [-0.27, -0.9]
- himmelblau_xopt = [3, 2]
- himmelblau_min = 0.0
- def test_minimize_multiple_constraints():
- # Regression test for gh-4240.
- def func(x):
- return np.array([25 - 0.2 * x[0] - 0.4 * x[1] - 0.33 * x[2]])
- def func1(x):
- return np.array([x[1]])
- def func2(x):
- return np.array([x[2]])
- cons = ({'type': 'ineq', 'fun': func},
- {'type': 'ineq', 'fun': func1},
- {'type': 'ineq', 'fun': func2})
- f = lambda x: -1 * (x[0] + x[1] + x[2])
- res = optimize.minimize(f, [0, 0, 0], method='SLSQP', constraints=cons)
- assert_allclose(res.x, [125, 0, 0], atol=1e-10)
- class TestOptimizeResultAttributes:
- # Test that all minimizers return an OptimizeResult containing
- # all the OptimizeResult attributes
- def setup_method(self):
- self.x0 = [5, 5]
- self.func = optimize.rosen
- self.jac = optimize.rosen_der
- self.hess = optimize.rosen_hess
- self.hessp = optimize.rosen_hess_prod
- self.bounds = [(0., 10.), (0., 10.)]
- def test_attributes_present(self):
- attributes = ['nit', 'nfev', 'x', 'success', 'status', 'fun',
- 'message']
- skip = {'cobyla': ['nit']}
- for method in MINIMIZE_METHODS:
- with suppress_warnings() as sup:
- sup.filter(RuntimeWarning,
- ("Method .+ does not use (gradient|Hessian.*)"
- " information"))
- res = optimize.minimize(self.func, self.x0, method=method,
- jac=self.jac, hess=self.hess,
- hessp=self.hessp)
- for attribute in attributes:
- if method in skip and attribute in skip[method]:
- continue
- assert hasattr(res, attribute)
- assert attribute in dir(res)
- # gh13001, OptimizeResult.message should be a str
- assert isinstance(res.message, str)
- def f1(z, *params):
- x, y = z
- a, b, c, d, e, f, g, h, i, j, k, l, scale = params
- return (a * x**2 + b * x * y + c * y**2 + d*x + e*y + f)
- def f2(z, *params):
- x, y = z
- a, b, c, d, e, f, g, h, i, j, k, l, scale = params
- return (-g*np.exp(-((x-h)**2 + (y-i)**2) / scale))
- def f3(z, *params):
- x, y = z
- a, b, c, d, e, f, g, h, i, j, k, l, scale = params
- return (-j*np.exp(-((x-k)**2 + (y-l)**2) / scale))
- def brute_func(z, *params):
- return f1(z, *params) + f2(z, *params) + f3(z, *params)
- class TestBrute:
- # Test the "brute force" method
- def setup_method(self):
- self.params = (2, 3, 7, 8, 9, 10, 44, -1, 2, 26, 1, -2, 0.5)
- self.rranges = (slice(-4, 4, 0.25), slice(-4, 4, 0.25))
- self.solution = np.array([-1.05665192, 1.80834843])
- def brute_func(self, z, *params):
- # an instance method optimizing
- return brute_func(z, *params)
- def test_brute(self):
- # test fmin
- resbrute = optimize.brute(brute_func, self.rranges, args=self.params,
- full_output=True, finish=optimize.fmin)
- assert_allclose(resbrute[0], self.solution, atol=1e-3)
- assert_allclose(resbrute[1], brute_func(self.solution, *self.params),
- atol=1e-3)
- # test minimize
- resbrute = optimize.brute(brute_func, self.rranges, args=self.params,
- full_output=True,
- finish=optimize.minimize)
- assert_allclose(resbrute[0], self.solution, atol=1e-3)
- assert_allclose(resbrute[1], brute_func(self.solution, *self.params),
- atol=1e-3)
- # test that brute can optimize an instance method (the other tests use
- # a non-class based function
- resbrute = optimize.brute(self.brute_func, self.rranges,
- args=self.params, full_output=True,
- finish=optimize.minimize)
- assert_allclose(resbrute[0], self.solution, atol=1e-3)
- def test_1D(self):
- # test that for a 1-D problem the test function is passed an array,
- # not a scalar.
- def f(x):
- assert len(x.shape) == 1
- assert x.shape[0] == 1
- return x ** 2
- optimize.brute(f, [(-1, 1)], Ns=3, finish=None)
- def test_workers(self):
- # check that parallel evaluation works
- resbrute = optimize.brute(brute_func, self.rranges, args=self.params,
- full_output=True, finish=None)
- resbrute1 = optimize.brute(brute_func, self.rranges, args=self.params,
- full_output=True, finish=None, workers=2)
- assert_allclose(resbrute1[-1], resbrute[-1])
- assert_allclose(resbrute1[0], resbrute[0])
- def test_runtime_warning(self):
- rng = np.random.default_rng(1234)
- def func(z, *params):
- return rng.random(1) * 1000 # never converged problem
- with pytest.warns(RuntimeWarning,
- match=r'Either final optimization did not succeed'):
- optimize.brute(func, self.rranges, args=self.params, disp=True)
- def test_coerce_args_param(self):
- # optimize.brute should coerce non-iterable args to a tuple.
- def f(x, *args):
- return x ** args[0]
- resbrute = optimize.brute(f, (slice(-4, 4, .25),), args=2)
- assert_allclose(resbrute, 0)
- def test_cobyla_threadsafe():
- # Verify that cobyla is threadsafe. Will segfault if it is not.
- import concurrent.futures
- import time
- def objective1(x):
- time.sleep(0.1)
- return x[0]**2
- def objective2(x):
- time.sleep(0.1)
- return (x[0]-1)**2
- min_method = "COBYLA"
- def minimizer1():
- return optimize.minimize(objective1,
- [0.0],
- method=min_method)
- def minimizer2():
- return optimize.minimize(objective2,
- [0.0],
- method=min_method)
- with concurrent.futures.ThreadPoolExecutor() as pool:
- tasks = []
- tasks.append(pool.submit(minimizer1))
- tasks.append(pool.submit(minimizer2))
- for t in tasks:
- res = t.result()
- class TestIterationLimits:
- # Tests that optimisation does not give up before trying requested
- # number of iterations or evaluations. And that it does not succeed
- # by exceeding the limits.
- def setup_method(self):
- self.funcalls = 0
- def slow_func(self, v):
- self.funcalls += 1
- r, t = np.sqrt(v[0]**2+v[1]**2), np.arctan2(v[0], v[1])
- return np.sin(r*20 + t)+r*0.5
- def test_neldermead_limit(self):
- self.check_limits("Nelder-Mead", 200)
- def test_powell_limit(self):
- self.check_limits("powell", 1000)
- def check_limits(self, method, default_iters):
- for start_v in [[0.1, 0.1], [1, 1], [2, 2]]:
- for mfev in [50, 500, 5000]:
- self.funcalls = 0
- res = optimize.minimize(self.slow_func, start_v,
- method=method,
- options={"maxfev": mfev})
- assert self.funcalls == res["nfev"]
- if res["success"]:
- assert res["nfev"] < mfev
- else:
- assert res["nfev"] >= mfev
- for mit in [50, 500, 5000]:
- res = optimize.minimize(self.slow_func, start_v,
- method=method,
- options={"maxiter": mit})
- if res["success"]:
- assert res["nit"] <= mit
- else:
- assert res["nit"] >= mit
- for mfev, mit in [[50, 50], [5000, 5000], [5000, np.inf]]:
- self.funcalls = 0
- res = optimize.minimize(self.slow_func, start_v,
- method=method,
- options={"maxiter": mit,
- "maxfev": mfev})
- assert self.funcalls == res["nfev"]
- if res["success"]:
- assert res["nfev"] < mfev and res["nit"] <= mit
- else:
- assert res["nfev"] >= mfev or res["nit"] >= mit
- for mfev, mit in [[np.inf, None], [None, np.inf]]:
- self.funcalls = 0
- res = optimize.minimize(self.slow_func, start_v,
- method=method,
- options={"maxiter": mit,
- "maxfev": mfev})
- assert self.funcalls == res["nfev"]
- if res["success"]:
- if mfev is None:
- assert res["nfev"] < default_iters*2
- else:
- assert res["nit"] <= default_iters*2
- else:
- assert res["nfev"] >= default_iters*2 or res["nit"] >= default_iters*2
- def test_result_x_shape_when_len_x_is_one():
- def fun(x):
- return x * x
- def jac(x):
- return 2. * x
- def hess(x):
- return np.array([[2.]])
- methods = ['Nelder-Mead', 'Powell', 'CG', 'BFGS', 'L-BFGS-B', 'TNC',
- 'COBYLA', 'SLSQP']
- for method in methods:
- res = optimize.minimize(fun, np.array([0.1]), method=method)
- assert res.x.shape == (1,)
- # use jac + hess
- methods = ['trust-constr', 'dogleg', 'trust-ncg', 'trust-exact',
- 'trust-krylov', 'Newton-CG']
- for method in methods:
- res = optimize.minimize(fun, np.array([0.1]), method=method, jac=jac,
- hess=hess)
- assert res.x.shape == (1,)
- class FunctionWithGradient:
- def __init__(self):
- self.number_of_calls = 0
- def __call__(self, x):
- self.number_of_calls += 1
- return np.sum(x**2), 2 * x
- @pytest.fixture
- def function_with_gradient():
- return FunctionWithGradient()
- def test_memoize_jac_function_before_gradient(function_with_gradient):
- memoized_function = MemoizeJac(function_with_gradient)
- x0 = np.array([1.0, 2.0])
- assert_allclose(memoized_function(x0), 5.0)
- assert function_with_gradient.number_of_calls == 1
- assert_allclose(memoized_function.derivative(x0), 2 * x0)
- assert function_with_gradient.number_of_calls == 1, \
- "function is not recomputed " \
- "if gradient is requested after function value"
- assert_allclose(
- memoized_function(2 * x0), 20.0,
- err_msg="different input triggers new computation")
- assert function_with_gradient.number_of_calls == 2, \
- "different input triggers new computation"
- def test_memoize_jac_gradient_before_function(function_with_gradient):
- memoized_function = MemoizeJac(function_with_gradient)
- x0 = np.array([1.0, 2.0])
- assert_allclose(memoized_function.derivative(x0), 2 * x0)
- assert function_with_gradient.number_of_calls == 1
- assert_allclose(memoized_function(x0), 5.0)
- assert function_with_gradient.number_of_calls == 1, \
- "function is not recomputed " \
- "if function value is requested after gradient"
- assert_allclose(
- memoized_function.derivative(2 * x0), 4 * x0,
- err_msg="different input triggers new computation")
- assert function_with_gradient.number_of_calls == 2, \
- "different input triggers new computation"
- def test_memoize_jac_with_bfgs(function_with_gradient):
- """ Tests that using MemoizedJac in combination with ScalarFunction
- and BFGS does not lead to repeated function evaluations.
- Tests changes made in response to GH11868.
- """
- memoized_function = MemoizeJac(function_with_gradient)
- jac = memoized_function.derivative
- hess = optimize.BFGS()
- x0 = np.array([1.0, 0.5])
- scalar_function = ScalarFunction(
- memoized_function, x0, (), jac, hess, None, None)
- assert function_with_gradient.number_of_calls == 1
- scalar_function.fun(x0 + 0.1)
- assert function_with_gradient.number_of_calls == 2
- scalar_function.fun(x0 + 0.2)
- assert function_with_gradient.number_of_calls == 3
- def test_gh12696():
- # Test that optimize doesn't throw warning gh-12696
- with assert_no_warnings():
- optimize.fminbound(
- lambda x: np.array([x**2]), -np.pi, np.pi, disp=False)
- # --- Test minimize with equal upper and lower bounds --- #
- def setup_test_equal_bounds():
- np.random.seed(0)
- x0 = np.random.rand(4)
- lb = np.array([0, 2, -1, -1.0])
- ub = np.array([3, 2, 2, -1.0])
- i_eb = (lb == ub)
- def check_x(x, check_size=True, check_values=True):
- if check_size:
- assert x.size == 4
- if check_values:
- assert_allclose(x[i_eb], lb[i_eb])
- def func(x):
- check_x(x)
- return optimize.rosen(x)
- def grad(x):
- check_x(x)
- return optimize.rosen_der(x)
- def callback(x, *args):
- check_x(x)
- def constraint1(x):
- check_x(x, check_values=False)
- return x[0:1] - 1
- def jacobian1(x):
- check_x(x, check_values=False)
- dc = np.zeros_like(x)
- dc[0] = 1
- return dc
- def constraint2(x):
- check_x(x, check_values=False)
- return x[2:3] - 0.5
- def jacobian2(x):
- check_x(x, check_values=False)
- dc = np.zeros_like(x)
- dc[2] = 1
- return dc
- c1a = NonlinearConstraint(constraint1, -np.inf, 0)
- c1b = NonlinearConstraint(constraint1, -np.inf, 0, jacobian1)
- c2a = NonlinearConstraint(constraint2, -np.inf, 0)
- c2b = NonlinearConstraint(constraint2, -np.inf, 0, jacobian2)
- # test using the three methods that accept bounds, use derivatives, and
- # have some trouble when bounds fix variables
- methods = ('L-BFGS-B', 'SLSQP', 'TNC')
- # test w/out gradient, w/ gradient, and w/ combined objective/gradient
- kwds = ({"fun": func, "jac": False},
- {"fun": func, "jac": grad},
- {"fun": (lambda x: (func(x), grad(x))),
- "jac": True})
- # test with both old- and new-style bounds
- bound_types = (lambda lb, ub: list(zip(lb, ub)),
- Bounds)
- # Test for many combinations of constraints w/ and w/out jacobian
- # Pairs in format: (test constraints, reference constraints)
- # (always use analytical jacobian in reference)
- constraints = ((None, None), ([], []),
- (c1a, c1b), (c2b, c2b),
- ([c1b], [c1b]), ([c2a], [c2b]),
- ([c1a, c2a], [c1b, c2b]),
- ([c1a, c2b], [c1b, c2b]),
- ([c1b, c2b], [c1b, c2b]))
- # test with and without callback function
- callbacks = (None, callback)
- data = {"methods": methods, "kwds": kwds, "bound_types": bound_types,
- "constraints": constraints, "callbacks": callbacks,
- "lb": lb, "ub": ub, "x0": x0, "i_eb": i_eb}
- return data
- eb_data = setup_test_equal_bounds()
- # This test is about handling fixed variables, not the accuracy of the solvers
- @pytest.mark.xfail_on_32bit("Failures due to floating point issues, not logic")
- @pytest.mark.parametrize('method', eb_data["methods"])
- @pytest.mark.parametrize('kwds', eb_data["kwds"])
- @pytest.mark.parametrize('bound_type', eb_data["bound_types"])
- @pytest.mark.parametrize('constraints', eb_data["constraints"])
- @pytest.mark.parametrize('callback', eb_data["callbacks"])
- def test_equal_bounds(method, kwds, bound_type, constraints, callback):
- """
- Tests that minimizers still work if (bounds.lb == bounds.ub).any()
- gh12502 - Divide by zero in Jacobian numerical differentiation when
- equality bounds constraints are used
- """
- # GH-15051; slightly more skips than necessary; hopefully fixed by GH-14882
- if (platform.machine() == 'aarch64' and method == "TNC"
- and kwds["jac"] is False and callback is not None):
- pytest.skip('Tolerance violation on aarch')
- lb, ub = eb_data["lb"], eb_data["ub"]
- x0, i_eb = eb_data["x0"], eb_data["i_eb"]
- test_constraints, reference_constraints = constraints
- if test_constraints and not method == 'SLSQP':
- pytest.skip('Only SLSQP supports nonlinear constraints')
- # reference constraints always have analytical jacobian
- # if test constraints are not the same, we'll need finite differences
- fd_needed = (test_constraints != reference_constraints)
- bounds = bound_type(lb, ub) # old- or new-style
- kwds.update({"x0": x0, "method": method, "bounds": bounds,
- "constraints": test_constraints, "callback": callback})
- res = optimize.minimize(**kwds)
- expected = optimize.minimize(optimize.rosen, x0, method=method,
- jac=optimize.rosen_der, bounds=bounds,
- constraints=reference_constraints)
- # compare the output of a solution with FD vs that of an analytic grad
- assert res.success
- assert_allclose(res.fun, expected.fun, rtol=1e-6)
- assert_allclose(res.x, expected.x, rtol=5e-4)
- if fd_needed or kwds['jac'] is False:
- expected.jac[i_eb] = np.nan
- assert res.jac.shape[0] == 4
- assert_allclose(res.jac[i_eb], expected.jac[i_eb], rtol=1e-6)
- if not (kwds['jac'] or test_constraints or isinstance(bounds, Bounds)):
- # compare the output to an equivalent FD minimization that doesn't
- # need factorization
- def fun(x):
- new_x = np.array([np.nan, 2, np.nan, -1])
- new_x[[0, 2]] = x
- return optimize.rosen(new_x)
- fd_res = optimize.minimize(fun,
- x0[[0, 2]],
- method=method,
- bounds=bounds[::2])
- assert_allclose(res.fun, fd_res.fun)
- # TODO this test should really be equivalent to factorized version
- # above, down to res.nfev. However, testing found that when TNC is
- # called with or without a callback the output is different. The two
- # should be the same! This indicates that the TNC callback may be
- # mutating something when it should't.
- assert_allclose(res.x[[0, 2]], fd_res.x, rtol=2e-6)
- @pytest.mark.parametrize('method', eb_data["methods"])
- def test_all_bounds_equal(method):
- # this only tests methods that have parameters factored out when lb==ub
- # it does not test other methods that work with bounds
- def f(x, p1=1):
- return np.linalg.norm(x) + p1
- bounds = [(1, 1), (2, 2)]
- x0 = (1.0, 3.0)
- res = optimize.minimize(f, x0, bounds=bounds, method=method)
- assert res.success
- assert_allclose(res.fun, f([1.0, 2.0]))
- assert res.nfev == 1
- assert res.message == 'All independent variables were fixed by bounds.'
- args = (2,)
- res = optimize.minimize(f, x0, bounds=bounds, method=method, args=args)
- assert res.success
- assert_allclose(res.fun, f([1.0, 2.0], 2))
- if method.upper() == 'SLSQP':
- def con(x):
- return np.sum(x)
- nlc = NonlinearConstraint(con, -np.inf, 0.0)
- res = optimize.minimize(
- f, x0, bounds=bounds, method=method, constraints=[nlc]
- )
- assert res.success is False
- assert_allclose(res.fun, f([1.0, 2.0]))
- assert res.nfev == 1
- message = "All independent variables were fixed by bounds, but"
- assert res.message.startswith(message)
- nlc = NonlinearConstraint(con, -np.inf, 4)
- res = optimize.minimize(
- f, x0, bounds=bounds, method=method, constraints=[nlc]
- )
- assert res.success is True
- assert_allclose(res.fun, f([1.0, 2.0]))
- assert res.nfev == 1
- message = "All independent variables were fixed by bounds at values"
- assert res.message.startswith(message)
- def test_eb_constraints():
- # make sure constraint functions aren't overwritten when equal bounds
- # are employed, and a parameter is factored out. GH14859
- def f(x):
- return x[0]**3 + x[1]**2 + x[2]*x[3]
- def cfun(x):
- return x[0] + x[1] + x[2] + x[3] - 40
- constraints = [{'type': 'ineq', 'fun': cfun}]
- bounds = [(0, 20)] * 4
- bounds[1] = (5, 5)
- optimize.minimize(
- f,
- x0=[1, 2, 3, 4],
- method='SLSQP',
- bounds=bounds,
- constraints=constraints,
- )
- assert constraints[0]['fun'] == cfun
- def test_show_options():
- solver_methods = {
- 'minimize': MINIMIZE_METHODS,
- 'minimize_scalar': MINIMIZE_SCALAR_METHODS,
- 'root': ROOT_METHODS,
- 'root_scalar': ROOT_SCALAR_METHODS,
- 'linprog': LINPROG_METHODS,
- 'quadratic_assignment': QUADRATIC_ASSIGNMENT_METHODS,
- }
- for solver, methods in solver_methods.items():
- for method in methods:
- # testing that `show_options` works without error
- show_options(solver, method)
- unknown_solver_method = {
- 'minimize': "ekki", # unknown method
- 'maximize': "cg", # unknown solver
- 'maximize_scalar': "ekki", # unknown solver and method
- }
- for solver, method in unknown_solver_method.items():
- # testing that `show_options` raises ValueError
- assert_raises(ValueError, show_options, solver, method)
- def test_bounds_with_list():
- # gh13501. Bounds created with lists weren't working for Powell.
- bounds = optimize.Bounds(lb=[5., 5.], ub=[10., 10.])
- optimize.minimize(
- optimize.rosen, x0=np.array([9, 9]), method='Powell', bounds=bounds
- )
- def test_x_overwritten_user_function():
- # if the user overwrites the x-array in the user function it's likely
- # that the minimizer stops working properly.
- # gh13740
- def fquad(x):
- a = np.arange(np.size(x))
- x -= a
- x *= x
- return np.sum(x)
- def fquad_jac(x):
- a = np.arange(np.size(x))
- x *= 2
- x -= 2 * a
- return x
- fquad_hess = lambda x: np.eye(np.size(x)) * 2.0
- meth_jac = [
- 'newton-cg', 'dogleg', 'trust-ncg', 'trust-exact',
- 'trust-krylov', 'trust-constr'
- ]
- meth_hess = [
- 'dogleg', 'trust-ncg', 'trust-exact', 'trust-krylov', 'trust-constr'
- ]
- x0 = np.ones(5) * 1.5
- for meth in MINIMIZE_METHODS:
- jac = None
- hess = None
- if meth in meth_jac:
- jac = fquad_jac
- if meth in meth_hess:
- hess = fquad_hess
- res = optimize.minimize(fquad, x0, method=meth, jac=jac, hess=hess)
- assert_allclose(res.x, np.arange(np.size(x0)), atol=2e-4)
- class TestGlobalOptimization:
- def test_optimize_result_attributes(self):
- def func(x):
- return x ** 2
- # Note that `brute` solver does not return `OptimizeResult`
- results = [optimize.basinhopping(func, x0=1),
- optimize.differential_evolution(func, [(-4, 4)]),
- optimize.shgo(func, [(-4, 4)]),
- optimize.dual_annealing(func, [(-4, 4)]),
- optimize.direct(func, [(-4, 4)]),
- ]
- for result in results:
- assert isinstance(result, optimize.OptimizeResult)
- assert hasattr(result, "x")
- assert hasattr(result, "success")
- assert hasattr(result, "message")
- assert hasattr(result, "fun")
- assert hasattr(result, "nfev")
- assert hasattr(result, "nit")
- def test_approx_fprime():
- # check that approx_fprime (serviced by approx_derivative) works for
- # jac and hess
- g = optimize.approx_fprime(himmelblau_x0, himmelblau)
- assert_allclose(g, himmelblau_grad(himmelblau_x0), rtol=5e-6)
- h = optimize.approx_fprime(himmelblau_x0, himmelblau_grad)
- assert_allclose(h, himmelblau_hess(himmelblau_x0), rtol=5e-6)
- def test_gh12594():
- # gh-12594 reported an error in `_linesearch_powell` and
- # `_line_for_search` when `Bounds` was passed lists instead of arrays.
- # Check that results are the same whether the inputs are lists or arrays.
- def f(x):
- return x[0]**2 + (x[1] - 1)**2
- bounds = Bounds(lb=[-10, -10], ub=[10, 10])
- res = optimize.minimize(f, x0=(0, 0), method='Powell', bounds=bounds)
- bounds = Bounds(lb=np.array([-10, -10]), ub=np.array([10, 10]))
- ref = optimize.minimize(f, x0=(0, 0), method='Powell', bounds=bounds)
- assert_allclose(res.fun, ref.fun)
- assert_allclose(res.x, ref.x)
|