test__differential_evolution.py 60 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485
  1. """
  2. Unit tests for the differential global minimization algorithm.
  3. """
  4. import multiprocessing
  5. import platform
  6. from scipy.optimize._differentialevolution import (DifferentialEvolutionSolver,
  7. _ConstraintWrapper)
  8. from scipy.optimize import differential_evolution
  9. from scipy.optimize._constraints import (Bounds, NonlinearConstraint,
  10. LinearConstraint)
  11. from scipy.optimize import rosen, minimize
  12. from scipy.sparse import csr_matrix
  13. from scipy import stats
  14. from scipy._lib._pep440 import Version
  15. import numpy as np
  16. from numpy.testing import (assert_equal, assert_allclose, assert_almost_equal,
  17. assert_string_equal, assert_, suppress_warnings)
  18. from pytest import raises as assert_raises, warns
  19. import pytest
  20. class TestDifferentialEvolutionSolver:
  21. def setup_method(self):
  22. self.old_seterr = np.seterr(invalid='raise')
  23. self.limits = np.array([[0., 0.],
  24. [2., 2.]])
  25. self.bounds = [(0., 2.), (0., 2.)]
  26. self.dummy_solver = DifferentialEvolutionSolver(self.quadratic,
  27. [(0, 100)])
  28. # dummy_solver2 will be used to test mutation strategies
  29. self.dummy_solver2 = DifferentialEvolutionSolver(self.quadratic,
  30. [(0, 1)],
  31. popsize=7,
  32. mutation=0.5)
  33. # create a population that's only 7 members long
  34. # [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7]
  35. population = np.atleast_2d(np.arange(0.1, 0.8, 0.1)).T
  36. self.dummy_solver2.population = population
  37. def teardown_method(self):
  38. np.seterr(**self.old_seterr)
  39. def quadratic(self, x):
  40. return x[0]**2
  41. def test__strategy_resolves(self):
  42. # test that the correct mutation function is resolved by
  43. # different requested strategy arguments
  44. solver = DifferentialEvolutionSolver(rosen,
  45. self.bounds,
  46. strategy='best1exp')
  47. assert_equal(solver.strategy, 'best1exp')
  48. assert_equal(solver.mutation_func.__name__, '_best1')
  49. solver = DifferentialEvolutionSolver(rosen,
  50. self.bounds,
  51. strategy='best1bin')
  52. assert_equal(solver.strategy, 'best1bin')
  53. assert_equal(solver.mutation_func.__name__, '_best1')
  54. solver = DifferentialEvolutionSolver(rosen,
  55. self.bounds,
  56. strategy='rand1bin')
  57. assert_equal(solver.strategy, 'rand1bin')
  58. assert_equal(solver.mutation_func.__name__, '_rand1')
  59. solver = DifferentialEvolutionSolver(rosen,
  60. self.bounds,
  61. strategy='rand1exp')
  62. assert_equal(solver.strategy, 'rand1exp')
  63. assert_equal(solver.mutation_func.__name__, '_rand1')
  64. solver = DifferentialEvolutionSolver(rosen,
  65. self.bounds,
  66. strategy='rand2exp')
  67. assert_equal(solver.strategy, 'rand2exp')
  68. assert_equal(solver.mutation_func.__name__, '_rand2')
  69. solver = DifferentialEvolutionSolver(rosen,
  70. self.bounds,
  71. strategy='best2bin')
  72. assert_equal(solver.strategy, 'best2bin')
  73. assert_equal(solver.mutation_func.__name__, '_best2')
  74. solver = DifferentialEvolutionSolver(rosen,
  75. self.bounds,
  76. strategy='rand2bin')
  77. assert_equal(solver.strategy, 'rand2bin')
  78. assert_equal(solver.mutation_func.__name__, '_rand2')
  79. solver = DifferentialEvolutionSolver(rosen,
  80. self.bounds,
  81. strategy='rand2exp')
  82. assert_equal(solver.strategy, 'rand2exp')
  83. assert_equal(solver.mutation_func.__name__, '_rand2')
  84. solver = DifferentialEvolutionSolver(rosen,
  85. self.bounds,
  86. strategy='randtobest1bin')
  87. assert_equal(solver.strategy, 'randtobest1bin')
  88. assert_equal(solver.mutation_func.__name__, '_randtobest1')
  89. solver = DifferentialEvolutionSolver(rosen,
  90. self.bounds,
  91. strategy='randtobest1exp')
  92. assert_equal(solver.strategy, 'randtobest1exp')
  93. assert_equal(solver.mutation_func.__name__, '_randtobest1')
  94. solver = DifferentialEvolutionSolver(rosen,
  95. self.bounds,
  96. strategy='currenttobest1bin')
  97. assert_equal(solver.strategy, 'currenttobest1bin')
  98. assert_equal(solver.mutation_func.__name__, '_currenttobest1')
  99. solver = DifferentialEvolutionSolver(rosen,
  100. self.bounds,
  101. strategy='currenttobest1exp')
  102. assert_equal(solver.strategy, 'currenttobest1exp')
  103. assert_equal(solver.mutation_func.__name__, '_currenttobest1')
  104. def test__mutate1(self):
  105. # strategies */1/*, i.e. rand/1/bin, best/1/exp, etc.
  106. result = np.array([0.05])
  107. trial = self.dummy_solver2._best1((2, 3, 4, 5, 6))
  108. assert_allclose(trial, result)
  109. result = np.array([0.25])
  110. trial = self.dummy_solver2._rand1((2, 3, 4, 5, 6))
  111. assert_allclose(trial, result)
  112. def test__mutate2(self):
  113. # strategies */2/*, i.e. rand/2/bin, best/2/exp, etc.
  114. # [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7]
  115. result = np.array([-0.1])
  116. trial = self.dummy_solver2._best2((2, 3, 4, 5, 6))
  117. assert_allclose(trial, result)
  118. result = np.array([0.1])
  119. trial = self.dummy_solver2._rand2((2, 3, 4, 5, 6))
  120. assert_allclose(trial, result)
  121. def test__randtobest1(self):
  122. # strategies randtobest/1/*
  123. result = np.array([0.15])
  124. trial = self.dummy_solver2._randtobest1((2, 3, 4, 5, 6))
  125. assert_allclose(trial, result)
  126. def test__currenttobest1(self):
  127. # strategies currenttobest/1/*
  128. result = np.array([0.1])
  129. trial = self.dummy_solver2._currenttobest1(1, (2, 3, 4, 5, 6))
  130. assert_allclose(trial, result)
  131. def test_can_init_with_dithering(self):
  132. mutation = (0.5, 1)
  133. solver = DifferentialEvolutionSolver(self.quadratic,
  134. self.bounds,
  135. mutation=mutation)
  136. assert_equal(solver.dither, list(mutation))
  137. def test_invalid_mutation_values_arent_accepted(self):
  138. func = rosen
  139. mutation = (0.5, 3)
  140. assert_raises(ValueError,
  141. DifferentialEvolutionSolver,
  142. func,
  143. self.bounds,
  144. mutation=mutation)
  145. mutation = (-1, 1)
  146. assert_raises(ValueError,
  147. DifferentialEvolutionSolver,
  148. func,
  149. self.bounds,
  150. mutation=mutation)
  151. mutation = (0.1, np.nan)
  152. assert_raises(ValueError,
  153. DifferentialEvolutionSolver,
  154. func,
  155. self.bounds,
  156. mutation=mutation)
  157. mutation = 0.5
  158. solver = DifferentialEvolutionSolver(func,
  159. self.bounds,
  160. mutation=mutation)
  161. assert_equal(0.5, solver.scale)
  162. assert_equal(None, solver.dither)
  163. def test_invalid_functional(self):
  164. def func(x):
  165. return np.array([np.sum(x ** 2), np.sum(x)])
  166. with assert_raises(
  167. RuntimeError,
  168. match=r"func\(x, \*args\) must return a scalar value"):
  169. differential_evolution(func, [(-2, 2), (-2, 2)])
  170. def test__scale_parameters(self):
  171. trial = np.array([0.3])
  172. assert_equal(30, self.dummy_solver._scale_parameters(trial))
  173. # it should also work with the limits reversed
  174. self.dummy_solver.limits = np.array([[100], [0.]])
  175. assert_equal(30, self.dummy_solver._scale_parameters(trial))
  176. def test__unscale_parameters(self):
  177. trial = np.array([30])
  178. assert_equal(0.3, self.dummy_solver._unscale_parameters(trial))
  179. # it should also work with the limits reversed
  180. self.dummy_solver.limits = np.array([[100], [0.]])
  181. assert_equal(0.3, self.dummy_solver._unscale_parameters(trial))
  182. def test__ensure_constraint(self):
  183. trial = np.array([1.1, -100, 0.9, 2., 300., -0.00001])
  184. self.dummy_solver._ensure_constraint(trial)
  185. assert_equal(trial[2], 0.9)
  186. assert_(np.logical_and(trial >= 0, trial <= 1).all())
  187. def test_differential_evolution(self):
  188. # test that the Jmin of DifferentialEvolutionSolver
  189. # is the same as the function evaluation
  190. solver = DifferentialEvolutionSolver(
  191. self.quadratic, [(-2, 2)], maxiter=1, polish=False
  192. )
  193. result = solver.solve()
  194. assert_equal(result.fun, self.quadratic(result.x))
  195. solver = DifferentialEvolutionSolver(
  196. self.quadratic, [(-2, 2)], maxiter=1, polish=True
  197. )
  198. result = solver.solve()
  199. assert_equal(result.fun, self.quadratic(result.x))
  200. def test_best_solution_retrieval(self):
  201. # test that the getter property method for the best solution works.
  202. solver = DifferentialEvolutionSolver(self.quadratic, [(-2, 2)])
  203. result = solver.solve()
  204. assert_equal(result.x, solver.x)
  205. def test_callback_terminates(self):
  206. # test that if the callback returns true, then the minimization halts
  207. bounds = [(0, 2), (0, 2)]
  208. expected_msg = 'callback function requested stop early by returning True'
  209. def callback_python_true(param, convergence=0.):
  210. return True
  211. result = differential_evolution(rosen, bounds, callback=callback_python_true)
  212. assert_string_equal(result.message, expected_msg)
  213. def callback_evaluates_true(param, convergence=0.):
  214. # DE should stop if bool(self.callback) is True
  215. return [10]
  216. result = differential_evolution(rosen, bounds, callback=callback_evaluates_true)
  217. assert_string_equal(result.message, expected_msg)
  218. def callback_evaluates_false(param, convergence=0.):
  219. return []
  220. result = differential_evolution(rosen, bounds, callback=callback_evaluates_false)
  221. assert result.success
  222. def test_args_tuple_is_passed(self):
  223. # test that the args tuple is passed to the cost function properly.
  224. bounds = [(-10, 10)]
  225. args = (1., 2., 3.)
  226. def quadratic(x, *args):
  227. if type(args) != tuple:
  228. raise ValueError('args should be a tuple')
  229. return args[0] + args[1] * x + args[2] * x**2.
  230. result = differential_evolution(quadratic,
  231. bounds,
  232. args=args,
  233. polish=True)
  234. assert_almost_equal(result.fun, 2 / 3.)
  235. def test_init_with_invalid_strategy(self):
  236. # test that passing an invalid strategy raises ValueError
  237. func = rosen
  238. bounds = [(-3, 3)]
  239. assert_raises(ValueError,
  240. differential_evolution,
  241. func,
  242. bounds,
  243. strategy='abc')
  244. def test_bounds_checking(self):
  245. # test that the bounds checking works
  246. func = rosen
  247. bounds = [(-3)]
  248. assert_raises(ValueError,
  249. differential_evolution,
  250. func,
  251. bounds)
  252. bounds = [(-3, 3), (3, 4, 5)]
  253. assert_raises(ValueError,
  254. differential_evolution,
  255. func,
  256. bounds)
  257. # test that we can use a new-type Bounds object
  258. result = differential_evolution(rosen, Bounds([0, 0], [2, 2]))
  259. assert_almost_equal(result.x, (1., 1.))
  260. def test_select_samples(self):
  261. # select_samples should return 5 separate random numbers.
  262. limits = np.arange(12., dtype='float64').reshape(2, 6)
  263. bounds = list(zip(limits[0, :], limits[1, :]))
  264. solver = DifferentialEvolutionSolver(None, bounds, popsize=1)
  265. candidate = 0
  266. r1, r2, r3, r4, r5 = solver._select_samples(candidate, 5)
  267. assert_equal(
  268. len(np.unique(np.array([candidate, r1, r2, r3, r4, r5]))), 6)
  269. def test_maxiter_stops_solve(self):
  270. # test that if the maximum number of iterations is exceeded
  271. # the solver stops.
  272. solver = DifferentialEvolutionSolver(rosen, self.bounds, maxiter=1)
  273. result = solver.solve()
  274. assert_equal(result.success, False)
  275. assert_equal(result.message,
  276. 'Maximum number of iterations has been exceeded.')
  277. def test_maxfun_stops_solve(self):
  278. # test that if the maximum number of function evaluations is exceeded
  279. # during initialisation the solver stops
  280. solver = DifferentialEvolutionSolver(rosen, self.bounds, maxfun=1,
  281. polish=False)
  282. result = solver.solve()
  283. assert_equal(result.nfev, 2)
  284. assert_equal(result.success, False)
  285. assert_equal(result.message,
  286. 'Maximum number of function evaluations has '
  287. 'been exceeded.')
  288. # test that if the maximum number of function evaluations is exceeded
  289. # during the actual minimisation, then the solver stops.
  290. # Have to turn polishing off, as this will still occur even if maxfun
  291. # is reached. For popsize=5 and len(bounds)=2, then there are only 10
  292. # function evaluations during initialisation.
  293. solver = DifferentialEvolutionSolver(rosen,
  294. self.bounds,
  295. popsize=5,
  296. polish=False,
  297. maxfun=40)
  298. result = solver.solve()
  299. assert_equal(result.nfev, 41)
  300. assert_equal(result.success, False)
  301. assert_equal(result.message,
  302. 'Maximum number of function evaluations has '
  303. 'been exceeded.')
  304. # now repeat for updating='deferred version
  305. # 47 function evaluations is not a multiple of the population size,
  306. # so maxfun is reached partway through a population evaluation.
  307. solver = DifferentialEvolutionSolver(rosen,
  308. self.bounds,
  309. popsize=5,
  310. polish=False,
  311. maxfun=47,
  312. updating='deferred')
  313. result = solver.solve()
  314. assert_equal(result.nfev, 47)
  315. assert_equal(result.success, False)
  316. assert_equal(result.message,
  317. 'Maximum number of function evaluations has '
  318. 'been reached.')
  319. def test_quadratic(self):
  320. # test the quadratic function from object
  321. solver = DifferentialEvolutionSolver(self.quadratic,
  322. [(-100, 100)],
  323. tol=0.02)
  324. solver.solve()
  325. assert_equal(np.argmin(solver.population_energies), 0)
  326. def test_quadratic_from_diff_ev(self):
  327. # test the quadratic function from differential_evolution function
  328. differential_evolution(self.quadratic,
  329. [(-100, 100)],
  330. tol=0.02)
  331. def test_seed_gives_repeatability(self):
  332. result = differential_evolution(self.quadratic,
  333. [(-100, 100)],
  334. polish=False,
  335. seed=1,
  336. tol=0.5)
  337. result2 = differential_evolution(self.quadratic,
  338. [(-100, 100)],
  339. polish=False,
  340. seed=1,
  341. tol=0.5)
  342. assert_equal(result.x, result2.x)
  343. assert_equal(result.nfev, result2.nfev)
  344. def test_random_generator(self):
  345. # check that np.random.Generator can be used (numpy >= 1.17)
  346. # obtain a np.random.Generator object
  347. rng = np.random.default_rng()
  348. inits = ['random', 'latinhypercube', 'sobol', 'halton']
  349. for init in inits:
  350. differential_evolution(self.quadratic,
  351. [(-100, 100)],
  352. polish=False,
  353. seed=rng,
  354. tol=0.5,
  355. init=init)
  356. def test_exp_runs(self):
  357. # test whether exponential mutation loop runs
  358. solver = DifferentialEvolutionSolver(rosen,
  359. self.bounds,
  360. strategy='best1exp',
  361. maxiter=1)
  362. solver.solve()
  363. def test_gh_4511_regression(self):
  364. # This modification of the differential evolution docstring example
  365. # uses a custom popsize that had triggered an off-by-one error.
  366. # Because we do not care about solving the optimization problem in
  367. # this test, we use maxiter=1 to reduce the testing time.
  368. bounds = [(-5, 5), (-5, 5)]
  369. # result = differential_evolution(rosen, bounds, popsize=1815,
  370. # maxiter=1)
  371. # the original issue arose because of rounding error in arange, with
  372. # linspace being a much better solution. 1815 is quite a large popsize
  373. # to use and results in a long test time (~13s). I used the original
  374. # issue to figure out the lowest number of samples that would cause
  375. # this rounding error to occur, 49.
  376. differential_evolution(rosen, bounds, popsize=49, maxiter=1)
  377. def test_calculate_population_energies(self):
  378. # if popsize is 3, then the overall generation has size (6,)
  379. solver = DifferentialEvolutionSolver(rosen, self.bounds, popsize=3)
  380. solver._calculate_population_energies(solver.population)
  381. solver._promote_lowest_energy()
  382. assert_equal(np.argmin(solver.population_energies), 0)
  383. # initial calculation of the energies should require 6 nfev.
  384. assert_equal(solver._nfev, 6)
  385. def test_iteration(self):
  386. # test that DifferentialEvolutionSolver is iterable
  387. # if popsize is 3, then the overall generation has size (6,)
  388. solver = DifferentialEvolutionSolver(rosen, self.bounds, popsize=3,
  389. maxfun=12)
  390. x, fun = next(solver)
  391. assert_equal(np.size(x, 0), 2)
  392. # 6 nfev are required for initial calculation of energies, 6 nfev are
  393. # required for the evolution of the 6 population members.
  394. assert_equal(solver._nfev, 12)
  395. # the next generation should halt because it exceeds maxfun
  396. assert_raises(StopIteration, next, solver)
  397. # check a proper minimisation can be done by an iterable solver
  398. solver = DifferentialEvolutionSolver(rosen, self.bounds)
  399. _, fun_prev = next(solver)
  400. for i, soln in enumerate(solver):
  401. x_current, fun_current = soln
  402. assert fun_prev >= fun_current
  403. _, fun_prev = x_current, fun_current
  404. # need to have this otherwise the solver would never stop.
  405. if i == 50:
  406. break
  407. def test_convergence(self):
  408. solver = DifferentialEvolutionSolver(rosen, self.bounds, tol=0.2,
  409. polish=False)
  410. solver.solve()
  411. assert_(solver.convergence < 0.2)
  412. def test_maxiter_none_GH5731(self):
  413. # Pre 0.17 the previous default for maxiter and maxfun was None.
  414. # the numerical defaults are now 1000 and np.inf. However, some scripts
  415. # will still supply None for both of those, this will raise a TypeError
  416. # in the solve method.
  417. solver = DifferentialEvolutionSolver(rosen, self.bounds, maxiter=None,
  418. maxfun=None)
  419. solver.solve()
  420. def test_population_initiation(self):
  421. # test the different modes of population initiation
  422. # init must be either 'latinhypercube' or 'random'
  423. # raising ValueError is something else is passed in
  424. assert_raises(ValueError,
  425. DifferentialEvolutionSolver,
  426. *(rosen, self.bounds),
  427. **{'init': 'rubbish'})
  428. solver = DifferentialEvolutionSolver(rosen, self.bounds)
  429. # check that population initiation:
  430. # 1) resets _nfev to 0
  431. # 2) all population energies are np.inf
  432. solver.init_population_random()
  433. assert_equal(solver._nfev, 0)
  434. assert_(np.all(np.isinf(solver.population_energies)))
  435. solver.init_population_lhs()
  436. assert_equal(solver._nfev, 0)
  437. assert_(np.all(np.isinf(solver.population_energies)))
  438. solver.init_population_qmc(qmc_engine='halton')
  439. assert_equal(solver._nfev, 0)
  440. assert_(np.all(np.isinf(solver.population_energies)))
  441. solver = DifferentialEvolutionSolver(rosen, self.bounds, init='sobol')
  442. solver.init_population_qmc(qmc_engine='sobol')
  443. assert_equal(solver._nfev, 0)
  444. assert_(np.all(np.isinf(solver.population_energies)))
  445. # we should be able to initialize with our own array
  446. population = np.linspace(-1, 3, 10).reshape(5, 2)
  447. solver = DifferentialEvolutionSolver(rosen, self.bounds,
  448. init=population,
  449. strategy='best2bin',
  450. atol=0.01, seed=1, popsize=5)
  451. assert_equal(solver._nfev, 0)
  452. assert_(np.all(np.isinf(solver.population_energies)))
  453. assert_(solver.num_population_members == 5)
  454. assert_(solver.population_shape == (5, 2))
  455. # check that the population was initialized correctly
  456. unscaled_population = np.clip(solver._unscale_parameters(population),
  457. 0, 1)
  458. assert_almost_equal(solver.population[:5], unscaled_population)
  459. # population values need to be clipped to bounds
  460. assert_almost_equal(np.min(solver.population[:5]), 0)
  461. assert_almost_equal(np.max(solver.population[:5]), 1)
  462. # shouldn't be able to initialize with an array if it's the wrong shape
  463. # this would have too many parameters
  464. population = np.linspace(-1, 3, 15).reshape(5, 3)
  465. assert_raises(ValueError,
  466. DifferentialEvolutionSolver,
  467. *(rosen, self.bounds),
  468. **{'init': population})
  469. # provide an initial solution
  470. # bounds are [(0, 2), (0, 2)]
  471. x0 = np.random.uniform(low=0.0, high=2.0, size=2)
  472. solver = DifferentialEvolutionSolver(
  473. rosen, self.bounds, x0=x0
  474. )
  475. # parameters are scaled to unit interval
  476. assert_allclose(solver.population[0], x0 / 2.0)
  477. def test_x0(self):
  478. # smoke test that checks that x0 is usable.
  479. res = differential_evolution(rosen, self.bounds, x0=[0.2, 0.8])
  480. assert res.success
  481. # check what happens if some of the x0 lay outside the bounds
  482. with assert_raises(ValueError):
  483. differential_evolution(rosen, self.bounds, x0=[0.2, 2.1])
  484. def test_infinite_objective_function(self):
  485. # Test that there are no problems if the objective function
  486. # returns inf on some runs
  487. def sometimes_inf(x):
  488. if x[0] < .5:
  489. return np.inf
  490. return x[1]
  491. bounds = [(0, 1), (0, 1)]
  492. differential_evolution(sometimes_inf, bounds=bounds, disp=False)
  493. def test_deferred_updating(self):
  494. # check setting of deferred updating, with default workers
  495. bounds = [(0., 2.), (0., 2.)]
  496. solver = DifferentialEvolutionSolver(rosen, bounds, updating='deferred')
  497. assert_(solver._updating == 'deferred')
  498. assert_(solver._mapwrapper._mapfunc is map)
  499. solver.solve()
  500. def test_immediate_updating(self):
  501. # check setting of immediate updating, with default workers
  502. bounds = [(0., 2.), (0., 2.)]
  503. solver = DifferentialEvolutionSolver(rosen, bounds)
  504. assert_(solver._updating == 'immediate')
  505. # should raise a UserWarning because the updating='immediate'
  506. # is being overridden by the workers keyword
  507. with warns(UserWarning):
  508. with DifferentialEvolutionSolver(rosen, bounds, workers=2) as solver:
  509. pass
  510. assert_(solver._updating == 'deferred')
  511. def test_parallel(self):
  512. # smoke test for parallelization with deferred updating
  513. bounds = [(0., 2.), (0., 2.)]
  514. with multiprocessing.Pool(2) as p, DifferentialEvolutionSolver(
  515. rosen, bounds, updating='deferred', workers=p.map) as solver:
  516. assert_(solver._mapwrapper.pool is not None)
  517. assert_(solver._updating == 'deferred')
  518. solver.solve()
  519. with DifferentialEvolutionSolver(rosen, bounds, updating='deferred',
  520. workers=2) as solver:
  521. assert_(solver._mapwrapper.pool is not None)
  522. assert_(solver._updating == 'deferred')
  523. solver.solve()
  524. def test_converged(self):
  525. solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)])
  526. solver.solve()
  527. assert_(solver.converged())
  528. def test_constraint_violation_fn(self):
  529. def constr_f(x):
  530. return [x[0] + x[1]]
  531. def constr_f2(x):
  532. return np.array([x[0]**2 + x[1], x[0] - x[1]])
  533. nlc = NonlinearConstraint(constr_f, -np.inf, 1.9)
  534. solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
  535. constraints=(nlc))
  536. cv = solver._constraint_violation_fn(np.array([1.0, 1.0]))
  537. assert_almost_equal(cv, 0.1)
  538. nlc2 = NonlinearConstraint(constr_f2, -np.inf, 1.8)
  539. solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
  540. constraints=(nlc, nlc2))
  541. # for multiple constraints the constraint violations should
  542. # be concatenated.
  543. xs = [(1.2, 1), (2.0, 2.0), (0.5, 0.5)]
  544. vs = [(0.3, 0.64, 0.0), (2.1, 4.2, 0.0), (0, 0, 0)]
  545. for x, v in zip(xs, vs):
  546. cv = solver._constraint_violation_fn(np.array(x))
  547. assert_allclose(cv, np.atleast_2d(v))
  548. # vectorized calculation of a series of solutions
  549. assert_allclose(
  550. solver._constraint_violation_fn(np.array(xs)), np.array(vs)
  551. )
  552. # the following line is used in _calculate_population_feasibilities.
  553. # _constraint_violation_fn returns an (1, M) array when
  554. # x.shape == (N,), i.e. a single solution. Therefore this list
  555. # comprehension should generate (S, 1, M) array.
  556. constraint_violation = np.array([solver._constraint_violation_fn(x)
  557. for x in np.array(xs)])
  558. assert constraint_violation.shape == (3, 1, 3)
  559. # we need reasonable error messages if the constraint function doesn't
  560. # return the right thing
  561. def constr_f3(x):
  562. # returns (S, M), rather than (M, S)
  563. return constr_f2(x).T
  564. nlc2 = NonlinearConstraint(constr_f3, -np.inf, 1.8)
  565. solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
  566. constraints=(nlc, nlc2),
  567. vectorized=False)
  568. solver.vectorized = True
  569. with pytest.raises(
  570. RuntimeError, match="An array returned from a Constraint"
  571. ):
  572. solver._constraint_violation_fn(np.array(xs))
  573. def test_constraint_population_feasibilities(self):
  574. def constr_f(x):
  575. return [x[0] + x[1]]
  576. def constr_f2(x):
  577. return [x[0]**2 + x[1], x[0] - x[1]]
  578. nlc = NonlinearConstraint(constr_f, -np.inf, 1.9)
  579. solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
  580. constraints=(nlc))
  581. # are population feasibilities correct
  582. # [0.5, 0.5] corresponds to scaled values of [1., 1.]
  583. feas, cv = solver._calculate_population_feasibilities(
  584. np.array([[0.5, 0.5], [1., 1.]]))
  585. assert_equal(feas, [False, False])
  586. assert_almost_equal(cv, np.array([[0.1], [2.1]]))
  587. assert cv.shape == (2, 1)
  588. nlc2 = NonlinearConstraint(constr_f2, -np.inf, 1.8)
  589. for vectorize in [False, True]:
  590. solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
  591. constraints=(nlc, nlc2),
  592. vectorized=vectorize,
  593. updating='deferred')
  594. feas, cv = solver._calculate_population_feasibilities(
  595. np.array([[0.5, 0.5], [0.6, 0.5]]))
  596. assert_equal(feas, [False, False])
  597. assert_almost_equal(cv, np.array([[0.1, 0.2, 0], [0.3, 0.64, 0]]))
  598. feas, cv = solver._calculate_population_feasibilities(
  599. np.array([[0.5, 0.5], [1., 1.]]))
  600. assert_equal(feas, [False, False])
  601. assert_almost_equal(cv, np.array([[0.1, 0.2, 0], [2.1, 4.2, 0]]))
  602. assert cv.shape == (2, 3)
  603. feas, cv = solver._calculate_population_feasibilities(
  604. np.array([[0.25, 0.25], [1., 1.]]))
  605. assert_equal(feas, [True, False])
  606. assert_almost_equal(cv, np.array([[0.0, 0.0, 0.], [2.1, 4.2, 0]]))
  607. assert cv.shape == (2, 3)
  608. def test_constraint_solve(self):
  609. def constr_f(x):
  610. return np.array([x[0] + x[1]])
  611. nlc = NonlinearConstraint(constr_f, -np.inf, 1.9)
  612. solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
  613. constraints=(nlc))
  614. # trust-constr warns if the constraint function is linear
  615. with warns(UserWarning):
  616. res = solver.solve()
  617. assert constr_f(res.x) <= 1.9
  618. assert res.success
  619. def test_impossible_constraint(self):
  620. def constr_f(x):
  621. return np.array([x[0] + x[1]])
  622. nlc = NonlinearConstraint(constr_f, -np.inf, -1)
  623. solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
  624. constraints=(nlc), popsize=3,
  625. seed=1)
  626. # a UserWarning is issued because the 'trust-constr' polishing is
  627. # attempted on the least infeasible solution found.
  628. with warns(UserWarning):
  629. res = solver.solve()
  630. assert res.maxcv > 0
  631. assert not res.success
  632. # test _promote_lowest_energy works when none of the population is
  633. # feasible. In this case, the solution with the lowest constraint
  634. # violation should be promoted.
  635. solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
  636. constraints=(nlc), polish=False)
  637. next(solver)
  638. assert not solver.feasible.all()
  639. assert not np.isfinite(solver.population_energies).all()
  640. # now swap two of the entries in the population
  641. l = 20
  642. cv = solver.constraint_violation[0]
  643. solver.population_energies[[0, l]] = solver.population_energies[[l, 0]]
  644. solver.population[[0, l], :] = solver.population[[l, 0], :]
  645. solver.constraint_violation[[0, l], :] = (
  646. solver.constraint_violation[[l, 0], :])
  647. solver._promote_lowest_energy()
  648. assert_equal(solver.constraint_violation[0], cv)
  649. def test_accept_trial(self):
  650. # _accept_trial(self, energy_trial, feasible_trial, cv_trial,
  651. # energy_orig, feasible_orig, cv_orig)
  652. def constr_f(x):
  653. return [x[0] + x[1]]
  654. nlc = NonlinearConstraint(constr_f, -np.inf, 1.9)
  655. solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
  656. constraints=(nlc))
  657. fn = solver._accept_trial
  658. # both solutions are feasible, select lower energy
  659. assert fn(0.1, True, np.array([0.]), 1.0, True, np.array([0.]))
  660. assert (fn(1.0, True, np.array([0.]), 0.1, True, np.array([0.]))
  661. == False)
  662. assert fn(0.1, True, np.array([0.]), 0.1, True, np.array([0.]))
  663. # trial is feasible, original is not
  664. assert fn(9.9, True, np.array([0.]), 1.0, False, np.array([1.]))
  665. # trial and original are infeasible
  666. # cv_trial have to be <= cv_original to be better
  667. assert (fn(0.1, False, np.array([0.5, 0.5]),
  668. 1.0, False, np.array([1., 1.0])))
  669. assert (fn(0.1, False, np.array([0.5, 0.5]),
  670. 1.0, False, np.array([1., 0.50])))
  671. assert (fn(1.0, False, np.array([0.5, 0.5]),
  672. 1.0, False, np.array([1., 0.4])) == False)
  673. def test_constraint_wrapper(self):
  674. lb = np.array([0, 20, 30])
  675. ub = np.array([0.5, np.inf, 70])
  676. x0 = np.array([1, 2, 3])
  677. pc = _ConstraintWrapper(Bounds(lb, ub), x0)
  678. assert (pc.violation(x0) > 0).any()
  679. assert (pc.violation([0.25, 21, 31]) == 0).all()
  680. # check vectorized Bounds constraint
  681. xs = np.arange(1, 16).reshape(5, 3)
  682. violations = []
  683. for x in xs:
  684. violations.append(pc.violation(x))
  685. np.testing.assert_allclose(pc.violation(xs.T), np.array(violations).T)
  686. x0 = np.array([1, 2, 3, 4])
  687. A = np.array([[1, 2, 3, 4], [5, 0, 0, 6], [7, 0, 8, 0]])
  688. pc = _ConstraintWrapper(LinearConstraint(A, -np.inf, 0), x0)
  689. assert (pc.violation(x0) > 0).any()
  690. assert (pc.violation([-10, 2, -10, 4]) == 0).all()
  691. # check vectorized LinearConstraint, for 7 lots of parameter vectors
  692. # with each parameter vector being 4 long, with 3 constraints
  693. # xs is the same shape as stored in the differential evolution
  694. # population, but it's sent to the violation function as (len(x), M)
  695. xs = np.arange(1, 29).reshape(7, 4)
  696. violations = []
  697. for x in xs:
  698. violations.append(pc.violation(x))
  699. np.testing.assert_allclose(pc.violation(xs.T), np.array(violations).T)
  700. pc = _ConstraintWrapper(LinearConstraint(csr_matrix(A), -np.inf, 0),
  701. x0)
  702. assert (pc.violation(x0) > 0).any()
  703. assert (pc.violation([-10, 2, -10, 4]) == 0).all()
  704. def fun(x):
  705. return A.dot(x)
  706. nonlinear = NonlinearConstraint(fun, -np.inf, 0)
  707. pc = _ConstraintWrapper(nonlinear, [-10, 2, -10, 4])
  708. assert (pc.violation(x0) > 0).any()
  709. assert (pc.violation([-10, 2, -10, 4]) == 0).all()
  710. def test_constraint_wrapper_violation(self):
  711. def cons_f(x):
  712. # written in vectorised form to accept an array of (N, S)
  713. # returning (M, S)
  714. # where N is the number of parameters,
  715. # S is the number of solution vectors to be examined,
  716. # and M is the number of constraint components
  717. return np.array([x[0] ** 2 + x[1],
  718. x[0] ** 2 - x[1]])
  719. nlc = NonlinearConstraint(cons_f, [-1, -0.8500], [2, 2])
  720. pc = _ConstraintWrapper(nlc, [0.5, 1])
  721. assert np.size(pc.bounds[0]) == 2
  722. xs = [(0.5, 1), (0.5, 1.2), (1.2, 1.2), (0.1, -1.2), (0.1, 2.0)]
  723. vs = [(0, 0), (0, 0.1), (0.64, 0), (0.19, 0), (0.01, 1.14)]
  724. for x, v in zip(xs, vs):
  725. assert_allclose(pc.violation(x), v)
  726. # now check that we can vectorize the constraint wrapper
  727. assert_allclose(pc.violation(np.array(xs).T),
  728. np.array(vs).T)
  729. assert pc.fun(np.array(xs).T).shape == (2, len(xs))
  730. assert pc.violation(np.array(xs).T).shape == (2, len(xs))
  731. assert pc.num_constr == 2
  732. assert pc.parameter_count == 2
  733. def test_L1(self):
  734. # Lampinen ([5]) test problem 1
  735. def f(x):
  736. x = np.hstack(([0], x)) # 1-indexed to match reference
  737. fun = np.sum(5*x[1:5]) - 5*x[1:5]@x[1:5] - np.sum(x[5:])
  738. return fun
  739. A = np.zeros((10, 14)) # 1-indexed to match reference
  740. A[1, [1, 2, 10, 11]] = 2, 2, 1, 1
  741. A[2, [1, 10]] = -8, 1
  742. A[3, [4, 5, 10]] = -2, -1, 1
  743. A[4, [1, 3, 10, 11]] = 2, 2, 1, 1
  744. A[5, [2, 11]] = -8, 1
  745. A[6, [6, 7, 11]] = -2, -1, 1
  746. A[7, [2, 3, 11, 12]] = 2, 2, 1, 1
  747. A[8, [3, 12]] = -8, 1
  748. A[9, [8, 9, 12]] = -2, -1, 1
  749. A = A[1:, 1:]
  750. b = np.array([10, 0, 0, 10, 0, 0, 10, 0, 0])
  751. L = LinearConstraint(A, -np.inf, b)
  752. bounds = [(0, 1)]*9 + [(0, 100)]*3 + [(0, 1)]
  753. # using a lower popsize to speed the test up
  754. res = differential_evolution(f, bounds, strategy='best1bin', seed=1234,
  755. constraints=(L), popsize=2)
  756. x_opt = (1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 1)
  757. f_opt = -15
  758. assert_allclose(f(x_opt), f_opt)
  759. assert res.success
  760. assert_allclose(res.x, x_opt, atol=5e-4)
  761. assert_allclose(res.fun, f_opt, atol=5e-3)
  762. assert_(np.all(A@res.x <= b))
  763. assert_(np.all(res.x >= np.array(bounds)[:, 0]))
  764. assert_(np.all(res.x <= np.array(bounds)[:, 1]))
  765. # now repeat the same solve, using the same overall constraints,
  766. # but using a sparse matrix for the LinearConstraint instead of an
  767. # array
  768. L = LinearConstraint(csr_matrix(A), -np.inf, b)
  769. # using a lower popsize to speed the test up
  770. res = differential_evolution(f, bounds, strategy='best1bin', seed=1234,
  771. constraints=(L), popsize=2)
  772. assert_allclose(f(x_opt), f_opt)
  773. assert res.success
  774. assert_allclose(res.x, x_opt, atol=5e-4)
  775. assert_allclose(res.fun, f_opt, atol=5e-3)
  776. assert_(np.all(A@res.x <= b))
  777. assert_(np.all(res.x >= np.array(bounds)[:, 0]))
  778. assert_(np.all(res.x <= np.array(bounds)[:, 1]))
  779. # now repeat the same solve, using the same overall constraints,
  780. # but specify half the constraints in terms of LinearConstraint,
  781. # and the other half by NonlinearConstraint
  782. def c1(x):
  783. x = np.hstack(([0], x))
  784. return [2*x[2] + 2*x[3] + x[11] + x[12],
  785. -8*x[3] + x[12]]
  786. def c2(x):
  787. x = np.hstack(([0], x))
  788. return -2*x[8] - x[9] + x[12]
  789. L = LinearConstraint(A[:5, :], -np.inf, b[:5])
  790. L2 = LinearConstraint(A[5:6, :], -np.inf, b[5:6])
  791. N = NonlinearConstraint(c1, -np.inf, b[6:8])
  792. N2 = NonlinearConstraint(c2, -np.inf, b[8:9])
  793. constraints = (L, N, L2, N2)
  794. with suppress_warnings() as sup:
  795. sup.filter(UserWarning)
  796. res = differential_evolution(f, bounds, strategy='rand1bin',
  797. seed=1234, constraints=constraints,
  798. popsize=2)
  799. assert_allclose(res.x, x_opt, atol=5e-4)
  800. assert_allclose(res.fun, f_opt, atol=5e-3)
  801. assert_(np.all(A@res.x <= b))
  802. assert_(np.all(res.x >= np.array(bounds)[:, 0]))
  803. assert_(np.all(res.x <= np.array(bounds)[:, 1]))
  804. def test_L2(self):
  805. # Lampinen ([5]) test problem 2
  806. def f(x):
  807. x = np.hstack(([0], x)) # 1-indexed to match reference
  808. fun = ((x[1]-10)**2 + 5*(x[2]-12)**2 + x[3]**4 + 3*(x[4]-11)**2 +
  809. 10*x[5]**6 + 7*x[6]**2 + x[7]**4 - 4*x[6]*x[7] - 10*x[6] -
  810. 8*x[7])
  811. return fun
  812. def c1(x):
  813. x = np.hstack(([0], x)) # 1-indexed to match reference
  814. return [127 - 2*x[1]**2 - 3*x[2]**4 - x[3] - 4*x[4]**2 - 5*x[5],
  815. 196 - 23*x[1] - x[2]**2 - 6*x[6]**2 + 8*x[7],
  816. 282 - 7*x[1] - 3*x[2] - 10*x[3]**2 - x[4] + x[5],
  817. -4*x[1]**2 - x[2]**2 + 3*x[1]*x[2] - 2*x[3]**2 -
  818. 5*x[6] + 11*x[7]]
  819. N = NonlinearConstraint(c1, 0, np.inf)
  820. bounds = [(-10, 10)]*7
  821. constraints = (N)
  822. with suppress_warnings() as sup:
  823. sup.filter(UserWarning)
  824. res = differential_evolution(f, bounds, strategy='rand1bin',
  825. seed=1234, constraints=constraints)
  826. f_opt = 680.6300599487869
  827. x_opt = (2.330499, 1.951372, -0.4775414, 4.365726,
  828. -0.6244870, 1.038131, 1.594227)
  829. assert_allclose(f(x_opt), f_opt)
  830. assert_allclose(res.fun, f_opt)
  831. assert_allclose(res.x, x_opt, atol=1e-5)
  832. assert res.success
  833. assert_(np.all(np.array(c1(res.x)) >= 0))
  834. assert_(np.all(res.x >= np.array(bounds)[:, 0]))
  835. assert_(np.all(res.x <= np.array(bounds)[:, 1]))
  836. def test_L3(self):
  837. # Lampinen ([5]) test problem 3
  838. def f(x):
  839. x = np.hstack(([0], x)) # 1-indexed to match reference
  840. fun = (x[1]**2 + x[2]**2 + x[1]*x[2] - 14*x[1] - 16*x[2] +
  841. (x[3]-10)**2 + 4*(x[4]-5)**2 + (x[5]-3)**2 + 2*(x[6]-1)**2 +
  842. 5*x[7]**2 + 7*(x[8]-11)**2 + 2*(x[9]-10)**2 +
  843. (x[10] - 7)**2 + 45
  844. )
  845. return fun # maximize
  846. A = np.zeros((4, 11))
  847. A[1, [1, 2, 7, 8]] = -4, -5, 3, -9
  848. A[2, [1, 2, 7, 8]] = -10, 8, 17, -2
  849. A[3, [1, 2, 9, 10]] = 8, -2, -5, 2
  850. A = A[1:, 1:]
  851. b = np.array([-105, 0, -12])
  852. def c1(x):
  853. x = np.hstack(([0], x)) # 1-indexed to match reference
  854. return [3*x[1] - 6*x[2] - 12*(x[9]-8)**2 + 7*x[10],
  855. -3*(x[1]-2)**2 - 4*(x[2]-3)**2 - 2*x[3]**2 + 7*x[4] + 120,
  856. -x[1]**2 - 2*(x[2]-2)**2 + 2*x[1]*x[2] - 14*x[5] + 6*x[6],
  857. -5*x[1]**2 - 8*x[2] - (x[3]-6)**2 + 2*x[4] + 40,
  858. -0.5*(x[1]-8)**2 - 2*(x[2]-4)**2 - 3*x[5]**2 + x[6] + 30]
  859. L = LinearConstraint(A, b, np.inf)
  860. N = NonlinearConstraint(c1, 0, np.inf)
  861. bounds = [(-10, 10)]*10
  862. constraints = (L, N)
  863. with suppress_warnings() as sup:
  864. sup.filter(UserWarning)
  865. res = differential_evolution(f, bounds, seed=1234,
  866. constraints=constraints, popsize=3)
  867. x_opt = (2.171996, 2.363683, 8.773926, 5.095984, 0.9906548,
  868. 1.430574, 1.321644, 9.828726, 8.280092, 8.375927)
  869. f_opt = 24.3062091
  870. assert_allclose(f(x_opt), f_opt, atol=1e-5)
  871. assert_allclose(res.x, x_opt, atol=1e-6)
  872. assert_allclose(res.fun, f_opt, atol=1e-5)
  873. assert res.success
  874. assert_(np.all(A @ res.x >= b))
  875. assert_(np.all(np.array(c1(res.x)) >= 0))
  876. assert_(np.all(res.x >= np.array(bounds)[:, 0]))
  877. assert_(np.all(res.x <= np.array(bounds)[:, 1]))
  878. def test_L4(self):
  879. # Lampinen ([5]) test problem 4
  880. def f(x):
  881. return np.sum(x[:3])
  882. A = np.zeros((4, 9))
  883. A[1, [4, 6]] = 0.0025, 0.0025
  884. A[2, [5, 7, 4]] = 0.0025, 0.0025, -0.0025
  885. A[3, [8, 5]] = 0.01, -0.01
  886. A = A[1:, 1:]
  887. b = np.array([1, 1, 1])
  888. def c1(x):
  889. x = np.hstack(([0], x)) # 1-indexed to match reference
  890. return [x[1]*x[6] - 833.33252*x[4] - 100*x[1] + 83333.333,
  891. x[2]*x[7] - 1250*x[5] - x[2]*x[4] + 1250*x[4],
  892. x[3]*x[8] - 1250000 - x[3]*x[5] + 2500*x[5]]
  893. L = LinearConstraint(A, -np.inf, 1)
  894. N = NonlinearConstraint(c1, 0, np.inf)
  895. bounds = [(100, 10000)] + [(1000, 10000)]*2 + [(10, 1000)]*5
  896. constraints = (L, N)
  897. with suppress_warnings() as sup:
  898. sup.filter(UserWarning)
  899. res = differential_evolution(f, bounds, strategy='rand1bin',
  900. seed=1234, constraints=constraints,
  901. popsize=3)
  902. f_opt = 7049.248
  903. x_opt = [579.306692, 1359.97063, 5109.9707, 182.0177, 295.601172,
  904. 217.9823, 286.416528, 395.601172]
  905. assert_allclose(f(x_opt), f_opt, atol=0.001)
  906. assert_allclose(res.fun, f_opt, atol=0.001)
  907. # use higher tol here for 32-bit Windows, see gh-11693
  908. if (platform.system() == 'Windows' and np.dtype(np.intp).itemsize < 8):
  909. assert_allclose(res.x, x_opt, rtol=2.4e-6, atol=0.0035)
  910. else:
  911. # tolerance determined from macOS + MKL failure, see gh-12701
  912. assert_allclose(res.x, x_opt, rtol=5e-6, atol=0.0024)
  913. assert res.success
  914. assert_(np.all(A @ res.x <= b))
  915. assert_(np.all(np.array(c1(res.x)) >= 0))
  916. assert_(np.all(res.x >= np.array(bounds)[:, 0]))
  917. assert_(np.all(res.x <= np.array(bounds)[:, 1]))
  918. def test_L5(self):
  919. # Lampinen ([5]) test problem 5
  920. def f(x):
  921. x = np.hstack(([0], x)) # 1-indexed to match reference
  922. fun = (np.sin(2*np.pi*x[1])**3*np.sin(2*np.pi*x[2]) /
  923. (x[1]**3*(x[1]+x[2])))
  924. return -fun # maximize
  925. def c1(x):
  926. x = np.hstack(([0], x)) # 1-indexed to match reference
  927. return [x[1]**2 - x[2] + 1,
  928. 1 - x[1] + (x[2]-4)**2]
  929. N = NonlinearConstraint(c1, -np.inf, 0)
  930. bounds = [(0, 10)]*2
  931. constraints = (N)
  932. res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234,
  933. constraints=constraints)
  934. x_opt = (1.22797135, 4.24537337)
  935. f_opt = -0.095825
  936. assert_allclose(f(x_opt), f_opt, atol=2e-5)
  937. assert_allclose(res.fun, f_opt, atol=1e-4)
  938. assert res.success
  939. assert_(np.all(np.array(c1(res.x)) <= 0))
  940. assert_(np.all(res.x >= np.array(bounds)[:, 0]))
  941. assert_(np.all(res.x <= np.array(bounds)[:, 1]))
  942. def test_L6(self):
  943. # Lampinen ([5]) test problem 6
  944. def f(x):
  945. x = np.hstack(([0], x)) # 1-indexed to match reference
  946. fun = (x[1]-10)**3 + (x[2] - 20)**3
  947. return fun
  948. def c1(x):
  949. x = np.hstack(([0], x)) # 1-indexed to match reference
  950. return [(x[1]-5)**2 + (x[2] - 5)**2 - 100,
  951. -(x[1]-6)**2 - (x[2] - 5)**2 + 82.81]
  952. N = NonlinearConstraint(c1, 0, np.inf)
  953. bounds = [(13, 100), (0, 100)]
  954. constraints = (N)
  955. res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234,
  956. constraints=constraints, tol=1e-7)
  957. x_opt = (14.095, 0.84296)
  958. f_opt = -6961.814744
  959. assert_allclose(f(x_opt), f_opt, atol=1e-6)
  960. assert_allclose(res.fun, f_opt, atol=0.001)
  961. assert_allclose(res.x, x_opt, atol=1e-4)
  962. assert res.success
  963. assert_(np.all(np.array(c1(res.x)) >= 0))
  964. assert_(np.all(res.x >= np.array(bounds)[:, 0]))
  965. assert_(np.all(res.x <= np.array(bounds)[:, 1]))
  966. def test_L7(self):
  967. # Lampinen ([5]) test problem 7
  968. def f(x):
  969. x = np.hstack(([0], x)) # 1-indexed to match reference
  970. fun = (5.3578547*x[3]**2 + 0.8356891*x[1]*x[5] +
  971. 37.293239*x[1] - 40792.141)
  972. return fun
  973. def c1(x):
  974. x = np.hstack(([0], x)) # 1-indexed to match reference
  975. return [
  976. 85.334407 + 0.0056858*x[2]*x[5] + 0.0006262*x[1]*x[4] -
  977. 0.0022053*x[3]*x[5],
  978. 80.51249 + 0.0071317*x[2]*x[5] + 0.0029955*x[1]*x[2] +
  979. 0.0021813*x[3]**2,
  980. 9.300961 + 0.0047026*x[3]*x[5] + 0.0012547*x[1]*x[3] +
  981. 0.0019085*x[3]*x[4]
  982. ]
  983. N = NonlinearConstraint(c1, [0, 90, 20], [92, 110, 25])
  984. bounds = [(78, 102), (33, 45)] + [(27, 45)]*3
  985. constraints = (N)
  986. res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234,
  987. constraints=constraints)
  988. # using our best solution, rather than Lampinen/Koziel. Koziel solution
  989. # doesn't satisfy constraints, Lampinen f_opt just plain wrong.
  990. x_opt = [78.00000686, 33.00000362, 29.99526064, 44.99999971,
  991. 36.77579979]
  992. f_opt = -30665.537578
  993. assert_allclose(f(x_opt), f_opt)
  994. assert_allclose(res.x, x_opt, atol=1e-3)
  995. assert_allclose(res.fun, f_opt, atol=1e-3)
  996. assert res.success
  997. assert_(np.all(np.array(c1(res.x)) >= np.array([0, 90, 20])))
  998. assert_(np.all(np.array(c1(res.x)) <= np.array([92, 110, 25])))
  999. assert_(np.all(res.x >= np.array(bounds)[:, 0]))
  1000. assert_(np.all(res.x <= np.array(bounds)[:, 1]))
  1001. @pytest.mark.slow
  1002. @pytest.mark.xfail(platform.machine() == 'ppc64le',
  1003. reason="fails on ppc64le")
  1004. def test_L8(self):
  1005. def f(x):
  1006. x = np.hstack(([0], x)) # 1-indexed to match reference
  1007. fun = 3*x[1] + 0.000001*x[1]**3 + 2*x[2] + 0.000002/3*x[2]**3
  1008. return fun
  1009. A = np.zeros((3, 5))
  1010. A[1, [4, 3]] = 1, -1
  1011. A[2, [3, 4]] = 1, -1
  1012. A = A[1:, 1:]
  1013. b = np.array([-.55, -.55])
  1014. def c1(x):
  1015. x = np.hstack(([0], x)) # 1-indexed to match reference
  1016. return [
  1017. 1000*np.sin(-x[3]-0.25) + 1000*np.sin(-x[4]-0.25) +
  1018. 894.8 - x[1],
  1019. 1000*np.sin(x[3]-0.25) + 1000*np.sin(x[3]-x[4]-0.25) +
  1020. 894.8 - x[2],
  1021. 1000*np.sin(x[4]-0.25) + 1000*np.sin(x[4]-x[3]-0.25) +
  1022. 1294.8
  1023. ]
  1024. L = LinearConstraint(A, b, np.inf)
  1025. N = NonlinearConstraint(c1, np.full(3, -0.001), np.full(3, 0.001))
  1026. bounds = [(0, 1200)]*2+[(-.55, .55)]*2
  1027. constraints = (L, N)
  1028. with suppress_warnings() as sup:
  1029. sup.filter(UserWarning)
  1030. # original Lampinen test was with rand1bin, but that takes a
  1031. # huge amount of CPU time. Changing strategy to best1bin speeds
  1032. # things up a lot
  1033. res = differential_evolution(f, bounds, strategy='best1bin',
  1034. seed=1234, constraints=constraints,
  1035. maxiter=5000)
  1036. x_opt = (679.9453, 1026.067, 0.1188764, -0.3962336)
  1037. f_opt = 5126.4981
  1038. assert_allclose(f(x_opt), f_opt, atol=1e-3)
  1039. assert_allclose(res.x[:2], x_opt[:2], atol=2e-3)
  1040. assert_allclose(res.x[2:], x_opt[2:], atol=2e-3)
  1041. assert_allclose(res.fun, f_opt, atol=2e-2)
  1042. assert res.success
  1043. assert_(np.all(A@res.x >= b))
  1044. assert_(np.all(np.array(c1(res.x)) >= -0.001))
  1045. assert_(np.all(np.array(c1(res.x)) <= 0.001))
  1046. assert_(np.all(res.x >= np.array(bounds)[:, 0]))
  1047. assert_(np.all(res.x <= np.array(bounds)[:, 1]))
  1048. def test_L9(self):
  1049. # Lampinen ([5]) test problem 9
  1050. def f(x):
  1051. x = np.hstack(([0], x)) # 1-indexed to match reference
  1052. return x[1]**2 + (x[2]-1)**2
  1053. def c1(x):
  1054. x = np.hstack(([0], x)) # 1-indexed to match reference
  1055. return [x[2] - x[1]**2]
  1056. N = NonlinearConstraint(c1, [-.001], [0.001])
  1057. bounds = [(-1, 1)]*2
  1058. constraints = (N)
  1059. res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234,
  1060. constraints=constraints)
  1061. x_opt = [np.sqrt(2)/2, 0.5]
  1062. f_opt = 0.75
  1063. assert_allclose(f(x_opt), f_opt)
  1064. assert_allclose(np.abs(res.x), x_opt, atol=1e-3)
  1065. assert_allclose(res.fun, f_opt, atol=1e-3)
  1066. assert res.success
  1067. assert_(np.all(np.array(c1(res.x)) >= -0.001))
  1068. assert_(np.all(np.array(c1(res.x)) <= 0.001))
  1069. assert_(np.all(res.x >= np.array(bounds)[:, 0]))
  1070. assert_(np.all(res.x <= np.array(bounds)[:, 1]))
  1071. def test_integrality(self):
  1072. # test fitting discrete distribution to data
  1073. rng = np.random.default_rng(6519843218105)
  1074. dist = stats.nbinom
  1075. shapes = (5, 0.5)
  1076. x = dist.rvs(*shapes, size=10000, random_state=rng)
  1077. def func(p, *args):
  1078. dist, x = args
  1079. # negative log-likelihood function
  1080. ll = -np.log(dist.pmf(x, *p)).sum(axis=-1)
  1081. if np.isnan(ll): # occurs when x is outside of support
  1082. ll = np.inf # we don't want that
  1083. return ll
  1084. integrality = [True, False]
  1085. bounds = [(1, 18), (0, 0.95)]
  1086. res = differential_evolution(func, bounds, args=(dist, x),
  1087. integrality=integrality, polish=False,
  1088. seed=rng)
  1089. # tolerance has to be fairly relaxed for the second parameter
  1090. # because we're fitting a distribution to random variates.
  1091. assert res.x[0] == 5
  1092. assert_allclose(res.x, shapes, rtol=0.02)
  1093. # check that we can still use integrality constraints with polishing
  1094. res2 = differential_evolution(func, bounds, args=(dist, x),
  1095. integrality=integrality, polish=True,
  1096. seed=rng)
  1097. def func2(p, *args):
  1098. n, dist, x = args
  1099. return func(np.array([n, p[0]]), dist, x)
  1100. # compare the DE derived solution to an LBFGSB solution (that doesn't
  1101. # have to find the integral values). Note we're setting x0 to be the
  1102. # output from the first DE result, thereby making the polishing step
  1103. # and this minimisation pretty much equivalent.
  1104. LBFGSB = minimize(func2, res2.x[1], args=(5, dist, x),
  1105. bounds=[(0, 0.95)])
  1106. assert_allclose(res2.x[1], LBFGSB.x)
  1107. assert res2.fun <= res.fun
  1108. def test_integrality_limits(self):
  1109. def f(x):
  1110. return x
  1111. integrality = [True, False, True]
  1112. bounds = [(0.2, 1.1), (0.9, 2.2), (3.3, 4.9)]
  1113. # no integrality constraints
  1114. solver = DifferentialEvolutionSolver(f, bounds=bounds, polish=False,
  1115. integrality=False)
  1116. assert_allclose(solver.limits[0], [0.2, 0.9, 3.3])
  1117. assert_allclose(solver.limits[1], [1.1, 2.2, 4.9])
  1118. # with integrality constraints
  1119. solver = DifferentialEvolutionSolver(f, bounds=bounds, polish=False,
  1120. integrality=integrality)
  1121. assert_allclose(solver.limits[0], [0.5, 0.9, 3.5])
  1122. assert_allclose(solver.limits[1], [1.5, 2.2, 4.5])
  1123. assert_equal(solver.integrality, [True, False, True])
  1124. assert solver.polish is False
  1125. bounds = [(-1.2, -0.9), (0.9, 2.2), (-10.3, 4.1)]
  1126. solver = DifferentialEvolutionSolver(f, bounds=bounds, polish=False,
  1127. integrality=integrality)
  1128. assert_allclose(solver.limits[0], [-1.5, 0.9, -10.5])
  1129. assert_allclose(solver.limits[1], [-0.5, 2.2, 4.5])
  1130. # A lower bound of -1.2 is converted to
  1131. # np.nextafter(np.ceil(-1.2) - 0.5, np.inf)
  1132. # with a similar process to the upper bound. Check that the
  1133. # conversions work
  1134. assert_allclose(np.round(solver.limits[0]), [-1.0, 1.0, -10.0])
  1135. assert_allclose(np.round(solver.limits[1]), [-1.0, 2.0, 4.0])
  1136. bounds = [(-10.2, -8.1), (0.9, 2.2), (-10.9, -9.9999)]
  1137. solver = DifferentialEvolutionSolver(f, bounds=bounds, polish=False,
  1138. integrality=integrality)
  1139. assert_allclose(solver.limits[0], [-10.5, 0.9, -10.5])
  1140. assert_allclose(solver.limits[1], [-8.5, 2.2, -9.5])
  1141. bounds = [(-10.2, -10.1), (0.9, 2.2), (-10.9, -9.9999)]
  1142. with pytest.raises(ValueError, match='One of the integrality'):
  1143. DifferentialEvolutionSolver(f, bounds=bounds, polish=False,
  1144. integrality=integrality)
  1145. def test_vectorized(self):
  1146. def quadratic(x):
  1147. return np.sum(x**2)
  1148. def quadratic_vec(x):
  1149. return np.sum(x**2, axis=0)
  1150. # A vectorized function needs to accept (len(x), S) and return (S,)
  1151. with pytest.raises(RuntimeError, match='The vectorized function'):
  1152. differential_evolution(quadratic, self.bounds,
  1153. vectorized=True, updating='deferred')
  1154. # vectorized overrides the updating keyword, check for warning
  1155. with warns(UserWarning, match="differential_evolution: the 'vector"):
  1156. differential_evolution(quadratic_vec, self.bounds,
  1157. vectorized=True)
  1158. # vectorized defers to the workers keyword, check for warning
  1159. with warns(UserWarning, match="differential_evolution: the 'workers"):
  1160. differential_evolution(quadratic_vec, self.bounds,
  1161. vectorized=True, workers=map,
  1162. updating='deferred')
  1163. ncalls = [0]
  1164. def rosen_vec(x):
  1165. ncalls[0] += 1
  1166. return rosen(x)
  1167. bounds = [(0, 10), (0, 10)]
  1168. res1 = differential_evolution(rosen, bounds, updating='deferred',
  1169. seed=1)
  1170. res2 = differential_evolution(rosen_vec, bounds, vectorized=True,
  1171. updating='deferred', seed=1)
  1172. # the two minimisation runs should be functionally equivalent
  1173. assert_allclose(res1.x, res2.x)
  1174. assert ncalls[0] == res2.nfev
  1175. assert res1.nit == res2.nit
  1176. def test_vectorized_constraints(self):
  1177. def constr_f(x):
  1178. return np.array([x[0] + x[1]])
  1179. def constr_f2(x):
  1180. return np.array([x[0]**2 + x[1], x[0] - x[1]])
  1181. nlc1 = NonlinearConstraint(constr_f, -np.inf, 1.9)
  1182. nlc2 = NonlinearConstraint(constr_f2, (0.9, 0.5), (2.0, 2.0))
  1183. def rosen_vec(x):
  1184. # accept an (len(x0), S) array, returning a (S,) array
  1185. v = 100 * (x[1:] - x[:-1]**2.0)**2.0
  1186. v += (1 - x[:-1])**2.0
  1187. return np.squeeze(v)
  1188. bounds = [(0, 10), (0, 10)]
  1189. res1 = differential_evolution(rosen, bounds, updating='deferred',
  1190. seed=1, constraints=[nlc1, nlc2],
  1191. polish=False)
  1192. res2 = differential_evolution(rosen_vec, bounds, vectorized=True,
  1193. updating='deferred', seed=1,
  1194. constraints=[nlc1, nlc2],
  1195. polish=False)
  1196. # the two minimisation runs should be functionally equivalent
  1197. assert_allclose(res1.x, res2.x)
  1198. def test_constraint_violation_error_message(self):
  1199. def func(x):
  1200. return np.cos(x[0]) + np.sin(x[1])
  1201. # Intentionally infeasible constraints.
  1202. c0 = NonlinearConstraint(lambda x: x[1] - (x[0]-1)**2, 0, np.inf)
  1203. c1 = NonlinearConstraint(lambda x: x[1] + x[0]**2, -np.inf, 0)
  1204. result = differential_evolution(func,
  1205. bounds=[(-1, 2), (-1, 1)],
  1206. constraints=[c0, c1],
  1207. maxiter=10,
  1208. polish=False,
  1209. seed=864197532)
  1210. assert result.success is False
  1211. # The numerical value in the error message might be sensitive to
  1212. # changes in the implementation. It can be updated if the code is
  1213. # changed. The essential part of the test is that there is a number
  1214. # after the '=', so if necessary, the text could be reduced to, say,
  1215. # "MAXCV = 0.".
  1216. assert "MAXCV = 0.404" in result.message