diff --git a/.gitignore b/.gitignore index 7909d367..0b468357 100755 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,6 @@ +# SPISEA-generated isochrones from test functions +spisea/tests/isochrones/ + # Compiled files *.py[co] *.a @@ -51,4 +54,7 @@ distribute-*.tar.gz # OS Generated Files .DS_Store -._* \ No newline at end of file +._* + +# Test files generated +spisea/tests/isochrones diff --git a/docs/add_filters.rst b/docs/add_filters.rst index 1bbfef12..4fe10c88 100644 --- a/docs/add_filters.rst +++ b/docs/add_filters.rst @@ -12,8 +12,10 @@ If the user wants to add new photometric filters to SPISEA, there are 4 main ste call the new function in ``filters.py`` when the new filter string is called. 4) Edit the ``get_obs_str()`` function in ``synthetic.py`` to - convert between column name and filter string (e.g input string - for get_filter_info) for the new filters. + convert between column name and filter string (e.g input string + for get_filter_info) for the new filters. + 5) Add the filter to the ``filt_list`` in the ``test_filters()`` function in ``tests/test_models.py`` to + ensure the filter can be loaded properly. Additional documentation on this is coming soon. In the meantime, let us know on the Github `issue tracker `_ if you'd like to diff --git a/docs/conf.py b/docs/conf.py index c72f260d..f10e26c1 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -25,9 +25,9 @@ author = 'Matthew Hosek Jr, Jessica R. Lu, Casey Y. Lam' # The short X.Y version -version = '2.2' +version = '2.4' # The full version, including alpha/beta/rc tags -release = '2.2' +release = '2.4' diff --git a/docs/contributors.rst b/docs/contributors.rst index 714e4f94..1944147e 100644 --- a/docs/contributors.rst +++ b/docs/contributors.rst @@ -37,8 +37,14 @@ what operating system is used Sage Hironaka Remulla -- added Rubin Observatory filters -Lingfeng Wei -- bugfix to improve creation of iso_dir in IsochronePhot +Lingfeng Wei -- bugfix to improve creation of iso_dir in +IsochronePhot, implemented faster cluster generation and test +functions for primary and companion star mass generation (v2.3), +updated random state generators (v2.4) Macy Huston -- New metallicity bound + isochrone filter checks, imf_mass_lim bugfix, roman filter bugfix, added Euclid filters, Synthpop compatibility -updates (v2.2) +updates (v2.2), improvements for reading in/updated existing isochrone +files, Vega mag to ST mag conversion function (v2.4) + +Anna Pusack -- Added IRTF L-band filter support diff --git a/docs/filters.rst b/docs/filters.rst index ab2e5418..3112d4a1 100644 --- a/docs/filters.rst +++ b/docs/filters.rst @@ -7,16 +7,16 @@ Photometric Filters The user can specify what filters are used for synthetic photometry when defining the :ref:`isochrone_objects`. Each filter is identified by a unique string, and an array of such strings -are passed into the Isochrone call. +are passed into the Isochrone call. For example:: - + # Use the HST WFC3-IR F127M and F153M filters, along with NIRC2 Kp filt_list = ['wfc3,ir,f127m', 'wfc3,ir,f153m', 'nirc2,Kp'] my_iso = synthetic.IsochronePhot(logAge, AKs, dist, metallicity=0, evo_model=evo_model, atm_func=atm_func, red_law=red_law, filters=filt_list) - + These strings follow the format ``,``. Note that there is no space after the comma, and case matters. @@ -35,12 +35,13 @@ Available filters: * GAIA * HAWK-I * Hubble Space Telescope +* IRTF * Johnson-Cousins * Johnson-Glass * JWST * Keck NIRC * Keck NIRC2 -* NACO +* NACO * PanStarrs 1 * Roman Space Telescope * UKIRT @@ -48,11 +49,11 @@ Available filters: * VISTA * ZTF - + Filter Sets ------------ - + **2MASS** `Two-Micron Sky Survey `_ @@ -83,9 +84,10 @@ Example: ``'decam,r'`` **Euclid** -`Euclid (NISP) space telescope filters `_ +Euclid space telescope `NISP filters `_ +and `VIS single filter `_ -Filters: Y, J, H +Filters: VIS, Y, J, H Example: ``'euclid,Y'`` @@ -145,10 +147,10 @@ Example: ``'jg,K'`` JWST NIRCam filters, downloaded from `NIRCam website `_. The filter functions in the nircam_throughputs/modAB_mean/nrc_plus_ote folder is used. -Filters: F070W, F090W, F115W, F140M, F150W, F150W2, F162M, F164N, F182M, F187N, F200W, F210M, F212N, F250M, F277W, F300M, F322W2, F323N, F335M, F356W, F360M, F405N, F410M, F430M, F444W, F460M, F466N, F470N, F480M +Filters: F070W, F090W, F115W, F140M, F150W, F150W2, F162M, F164N, F182M, F187N, F200W, F210M, F212N, F250M, F277W, F300M, F322W2, F323N, F335M, F356W, F360M, F405N, F410M, F430M, F444W, F460M, F466N, F470N, F480M Example: ``'jwst,F356W'`` - + **Keck NIRC** @@ -236,3 +238,11 @@ Example: ``'vista,Y'`` Filters: g, r, i Example: ``'ztf,g'`` + +**IRTF** + +`IRTF NSFCam `_ + +Filters: L + +Example: ``'nsfcam,L'`` diff --git a/docs/index.rst b/docs/index.rst index 11e1e44e..048f5c8b 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -82,6 +82,27 @@ releases will be co-authors in future SPISEA software papers. Change Log ---------- +2.4 (2026-03-20) + * Added backward compatibility for isochrone file names created + before v2.3 + * When reading isochrone from existing file, only keep user + requested filters in resulting table. + * Changed the global random seed to a random generator within each class, + but still retaining the reproducibility. Test cluster files in + test_data also updated accordingly for testing purposes + * Added filter support for IRTF L-band + * Added conversion function between ST mags and Vega mags + +2.3 (2026-02-10) + * Achieves faster cluster generation (factor of about 2x) by using + replacing ragged arrays with masked arrays when calculating + multiplicity properties + * Added new test functions (and associated test data files) ensuring + that the primary mass and companion + mass distibutions remain the same as generated with SPISEA <= v2.2 + * Added support to Euclid VIS filter + + 2.2 (2026-01-16) * Compatibility updates for SPISEA to work with `SynthPop `_. Updates include: diff --git a/docs/make_isochrone.rst b/docs/make_isochrone.rst index c4f3270e..5f764c93 100644 --- a/docs/make_isochrone.rst +++ b/docs/make_isochrone.rst @@ -9,7 +9,10 @@ total extinction, and metallicity, along with the :ref:`atmo_models`, :ref:`evo_models`, and :ref:`ext_law`. If the IsochronePhot sub-class is used then synthetic photometry -will be produced. The :ref:`filters` are defined as additional inputs. +will be produced. The :ref:`filters` are defined as additional +inputs. The output photometry is in terms of vega mags, but the user +can also calculate the required conversion to AB mags or ST mags using +the functions in :ref:`Photometry Conversion Functions `. An example of making an IsochronePhot object:: @@ -76,10 +79,9 @@ Tips and Tricks: The IsochronePhot Object * **WARNING**: When IsochronePhot checks to see if the desired isochrone table already exists, it checks all isochrone properties - except for the photometric filters (evolution models, atmosphere - models, and reddening law are encoded in the table meta-data). + (evolution models, atmosphere models, and reddening law are encoded in the table meta-data). If any of these parameters do not match, then the isochrone will - be re-calculated. + be re-calculated. However, to keep the isochrone filenames reasonable, only the age, extinction, distance, and metallicity are encoded in the @@ -91,13 +93,6 @@ Tips and Tricks: The IsochronePhot Object that users specify different iso_dir paths when making isochrones with different evolution models, atmosphere models, or reddening laws.* - - * **WARNING**: IsochronePhot does not check existing - isochrone tables to see if the photometric filters match - those specified by the user. *So, if the user wishes to generate an - isochrone with different filters, we recommend either using a - different iso_dir path or setting the keyword recomp=True (see - docs below).* Base Isochrone Class ---------------------------- @@ -112,3 +107,13 @@ Isochrone Sub-classes .. autoclass:: synthetic.IsochronePhot :show-inheritance: :members: make_photometry, plot_CMD, plot_mass_magnitude + + +Photometry Conversion Functions +----------------------------- +.. _phot_conversions: + +.. autofunction:: synthetic.calc_ab_vega_filter_conversion + +.. autofunction:: synthetic.calc_st_vega_filter_conversion + diff --git a/filt_func/euclid/VIS.dat b/filt_func/euclid/VIS.dat new file mode 100755 index 00000000..8071c831 --- /dev/null +++ b/filt_func/euclid/VIS.dat @@ -0,0 +1,551 @@ +4369.190 0.00056679011507854 +4379.190 0.00095861003123264 +4389.180 0.0014222192668852 +4399.180 0.0017743627364614 +4409.170 0.0017759292855133 +4419.170 0.0015893083675879 +4429.160 0.0015197711551908 +4439.150 0.0016367665308994 +4449.150 0.0017004749033207 +4459.140 0.001630730073096 +4469.140 0.0021505801024037 +4479.130 0.0038159666691211 +4489.120 0.005469151955157 +4499.120 0.0049075209173836 +4509.110 0.002649285648969 +4519.110 0.0013916987804899 +4529.100 0.0023998297521869 +4539.090 0.0038630219733651 +4549.090 0.0041725310261388 +4559.080 0.0031289801957178 +4569.080 0.0020110738305816 +4579.070 0.0019870963483393 +4589.070 0.002191420037652 +4599.060 0.0018406845500468 +4609.050 0.0011385280750117 +4619.050 0.0007594274011402 +4629.040 0.00087678522847243 +4639.040 0.0011249222580268 +4649.030 0.0011341370502031 +4659.020 0.0011462535246013 +4669.020 0.0015963659028934 +4679.010 0.0024726424098241 +4689.010 0.0030068413053051 +4699.000 0.002603962680456 +4708.990 0.0017987888079545 +4718.990 0.0014040251456908 +4728.980 0.0016755087444703 +4738.980 0.002177489295466 +4748.970 0.0023486920333469 +4758.970 0.0020829558832946 +4768.960 0.0018660000177174 +4778.950 0.002215059040708 +4788.950 0.0031384776820817 +4798.940 0.0041855519026953 +4808.940 0.0047026021430328 +4818.930 0.0042623183842051 +4828.920 0.0033869108483343 +4838.920 0.0024023710308675 +4848.910 0.0015861923787352 +4858.910 0.0014205437245275 +4868.900 0.0022510422288618 +4878.890 0.0041980197268167 +4888.890 0.0061275472601809 +4898.880 0.0066743229446149 +4908.880 0.0054680387927494 +4918.870 0.0036412071136536 +4928.870 0.0026423834721874 +4938.860 0.0032095535171861 +4948.850 0.0051665785288905 +4958.850 0.0080215399077462 +4968.840 0.010915676381017 +4978.840 0.013184261377986 +4988.830 0.014638163290972 +4998.820 0.015207171139077 +5008.820 0.01488566079731 +5018.810 0.013719511350726 +5028.810 0.012188537989717 +5038.800 0.010463857759635 +5048.790 0.008857850265311 +5058.790 0.0074643941465607 +5068.780 0.0064922333486447 +5078.780 0.0063862841182279 +5088.770 0.0072924135461072 +5098.770 0.0085739047796755 +5108.760 0.0092846913453615 +5118.750 0.0092293286644982 +5128.750 0.0090154575040874 +5138.740 0.0096418264754396 +5148.740 0.010918794268614 +5158.730 0.011769548684928 +5168.720 0.011494670377109 +5178.720 0.010647697788314 +5188.710 0.011119396055422 +5198.710 0.013504491527394 +5208.700 0.016668611773929 +5218.690 0.018062780681459 +5228.690 0.017033208329435 +5238.680 0.015005380433075 +5248.680 0.01447860609999 +5258.670 0.01657785767815 +5268.670 0.01958232809472 +5278.660 0.021642875109229 +5288.650 0.022208101952549 +5298.650 0.022101448625035 +5308.640 0.021265715551366 +5318.640 0.018455490536066 +5328.630 0.013472882331065 +5338.620 0.0093757519838535 +5348.620 0.008158786498743 +5358.610 0.0089445409322939 +5368.610 0.013055939482046 +5378.600 0.029430699622136 +5388.590 0.055072583791663 +5398.590 0.098966162555504 +5408.580 0.26478578413047 +5418.580 0.52152093739391 +5428.570 0.65157221422763 +5438.570 0.68504800885672 +5448.560 0.69348490596313 +5458.550 0.69751282390087 +5468.550 0.69939616531267 +5478.540 0.70129426661459 +5488.540 0.70261323352984 +5498.530 0.70402755910426 +5508.520 0.70552192536322 +5518.520 0.70746272258824 +5528.510 0.70862337773673 +5538.510 0.71042760078267 +5548.500 0.71194072195338 +5558.500 0.71301196344608 +5568.490 0.71401303884674 +5578.480 0.71574991366373 +5588.480 0.71695938760185 +5598.470 0.71800488631136 +5608.470 0.71906084394614 +5618.460 0.7206412789746 +5628.450 0.72168347895724 +5638.450 0.7222594049741 +5648.440 0.72354792212106 +5658.440 0.72498348934643 +5668.430 0.72577626604428 +5678.420 0.72642182621488 +5688.420 0.72741663950544 +5698.410 0.72814100430195 +5708.410 0.72981989075814 +5718.400 0.73072607649955 +5728.400 0.73186649434245 +5738.390 0.73267253149841 +5748.380 0.73353661853071 +5758.380 0.73456251910471 +5768.370 0.73481520001219 +5778.370 0.73604177629867 +5788.360 0.73697881160407 +5798.350 0.73772872920691 +5808.350 0.73821850752894 +5818.340 0.73949869568861 +5828.340 0.74002529645517 +5838.330 0.74063631239788 +5848.320 0.74223180963898 +5858.320 0.74267049496043 +5868.310 0.74312341983756 +5878.310 0.74404893509719 +5888.300 0.74379323328503 +5898.300 0.74408539218787 +5908.290 0.7453677969365 +5918.280 0.74621855406871 +5928.280 0.74677909372091 +5938.270 0.74790338452716 +5948.270 0.74854737239955 +5958.260 0.74874648042409 +5968.250 0.74946942932513 +5978.250 0.74982619560218 +5988.240 0.75029640748311 +5998.240 0.75078244598891 +6008.230 0.75167738992787 +6018.220 0.75195028577403 +6028.220 0.75209550464419 +6038.210 0.75279616292191 +6048.210 0.7534604638549 +6058.200 0.75336748320712 +6068.200 0.75446717013109 +6078.190 0.75444559209972 +6088.180 0.75383962468349 +6098.180 0.75384594311573 +6108.170 0.75448129495368 +6118.170 0.75471794302129 +6128.160 0.75489300675431 +6138.150 0.75525094503945 +6148.150 0.75542455037631 +6158.140 0.75573453084327 +6168.140 0.75570310648917 +6178.130 0.75564670829285 +6188.120 0.75622134457586 +6198.120 0.75598650906028 +6208.110 0.75594613506249 +6218.110 0.756256647077 +6228.100 0.75669562009595 +6238.100 0.75719862123529 +6248.090 0.75696775032618 +6258.080 0.75767115461061 +6268.080 0.75747051249984 +6278.070 0.75802497185897 +6288.070 0.75823516226824 +6298.060 0.75722442224016 +6308.050 0.75709657180328 +6318.050 0.75741417842801 +6328.040 0.75775965771516 +6338.040 0.75798612056007 +6348.030 0.7575653850704 +6358.020 0.7577846789686 +6368.020 0.75805908533789 +6378.010 0.75820198155069 +6388.010 0.75893427095814 +6398.000 0.75950923206492 +6408.000 0.75979778732508 +6417.990 0.75852932871412 +6427.980 0.75787985015038 +6437.980 0.75811778193895 +6447.970 0.7577258448963 +6457.970 0.75846837242834 +6467.960 0.75885069685602 +6477.950 0.75824809501231 +6487.950 0.7575648821369 +6497.940 0.75767216305622 +6507.940 0.75882348814418 +6517.930 0.75888951773332 +6527.920 0.75853024397401 +6537.920 0.75775284355406 +6547.910 0.75731717390092 +6557.910 0.75700209169956 +6567.900 0.75795848480387 +6577.900 0.75754399909288 +6587.890 0.75722829692287 +6597.880 0.75747243197232 +6607.880 0.75708854103291 +6617.870 0.75751727971505 +6627.870 0.75680299174188 +6637.860 0.75705834765783 +6647.850 0.75590949797028 +6657.850 0.75556853192996 +6667.840 0.75605248397383 +6677.840 0.75607149004025 +6687.830 0.75652840128407 +6697.820 0.75584335243906 +6707.820 0.75542388541319 +6717.810 0.75619600205319 +6727.810 0.75711659768978 +6737.800 0.75674770449548 +6747.800 0.75702552552465 +6757.790 0.75692031251085 +6767.780 0.75735958138062 +6777.780 0.7575182565327 +6787.770 0.75716700468388 +6797.770 0.75645374034373 +6807.760 0.75824515375142 +6817.750 0.75715939577143 +6827.750 0.75789535459157 +6837.740 0.75735986249054 +6847.740 0.75680089335351 +6857.730 0.75733590098527 +6867.720 0.75788969312547 +6877.720 0.75705557905583 +6887.710 0.75766741756827 +6897.710 0.75752812354956 +6907.700 0.75683216515874 +6917.700 0.75686380340304 +6927.690 0.75830353259964 +6937.680 0.75638839858959 +6947.680 0.75741581074026 +6957.670 0.75623493442731 +6967.670 0.75666254125879 +6977.660 0.75673545796586 +6987.650 0.7565582892044 +6997.650 0.75567848030726 +7007.640 0.75677776974179 +7017.640 0.75627224048648 +7027.630 0.7553807753322 +7037.620 0.75616255841218 +7047.620 0.75648844227034 +7057.610 0.75622967612262 +7067.610 0.75591929304253 +7077.600 0.75590370492396 +7087.600 0.75433122590147 +7097.590 0.75506590345157 +7107.580 0.75519745318298 +7117.580 0.75495535374232 +7127.570 0.75388872863049 +7137.570 0.75412459122268 +7147.560 0.75451647510116 +7157.550 0.75452027205857 +7167.550 0.75418106927016 +7177.540 0.75317150138935 +7187.540 0.75251775840512 +7197.530 0.75388235301711 +7207.520 0.75337132789133 +7217.520 0.75434311338633 +7227.510 0.75417841223249 +7237.510 0.75362399218033 +7247.500 0.75339777473383 +7257.500 0.75214848920468 +7267.490 0.75334289716519 +7277.480 0.75190899294069 +7287.480 0.75102489262193 +7297.470 0.75178627371052 +7307.470 0.75128284298361 +7317.460 0.7505331801578 +7327.450 0.75026413003566 +7337.450 0.75109533639664 +7347.440 0.75084114431948 +7357.440 0.75042690648153 +7367.430 0.75034274417101 +7377.430 0.74927680597834 +7387.420 0.74950257972589 +7397.410 0.74903597653949 +7407.410 0.74816016362582 +7417.400 0.74694885541503 +7427.400 0.74627142819768 +7437.390 0.74617200383489 +7447.380 0.7468706725726 +7457.380 0.74636660711056 +7467.370 0.74529050764697 +7477.370 0.74422467433808 +7487.360 0.74446252310029 +7497.350 0.7434215702289 +7507.350 0.7414989483712 +7517.340 0.74160470400977 +7527.340 0.74146098717775 +7537.330 0.74025387579879 +7547.330 0.7405986214532 +7557.320 0.73982573054869 +7567.310 0.73967782025902 +7577.310 0.73897117538846 +7587.300 0.73832886734726 +7597.300 0.73698272259756 +7607.290 0.73641068114224 +7617.280 0.73615653828958 +7627.280 0.73503503066034 +7637.270 0.73364433768867 +7647.270 0.73378968647958 +7657.260 0.7333577233541 +7667.250 0.73288964198882 +7677.250 0.73308713262742 +7687.240 0.73247890542474 +7697.240 0.73166173021043 +7707.230 0.73116929329985 +7717.230 0.73016066587466 +7727.220 0.7289491240918 +7737.210 0.72875909338676 +7747.210 0.72747139833289 +7757.200 0.72582881555014 +7767.200 0.72573458329529 +7777.190 0.72482075917079 +7787.180 0.72365271970866 +7797.180 0.7236466436918 +7807.170 0.72293215573379 +7817.170 0.72154114475589 +7827.160 0.72087095899613 +7837.150 0.71804705559577 +7847.150 0.71812810609631 +7857.140 0.71779147019493 +7867.140 0.71520878937939 +7877.130 0.71317311273569 +7887.130 0.712551049217 +7897.120 0.71319305096669 +7907.110 0.71224744037969 +7917.110 0.71099189644703 +7927.100 0.71002733626669 +7937.100 0.70863721040215 +7947.090 0.70714558557684 +7957.080 0.70506396271971 +7967.080 0.70424931752515 +7977.070 0.70230486212971 +7987.070 0.70126147860348 +7997.060 0.70091705912214 +8007.050 0.69792382125994 +8017.050 0.69709252099253 +8027.040 0.69643469604925 +8037.040 0.69310315958 +8047.030 0.69178717801081 +8057.030 0.69127798422358 +8067.020 0.69002670321429 +8077.010 0.6882244731268 +8087.010 0.6870896762242 +8097.000 0.6839128511515 +8107.000 0.68310659129323 +8116.990 0.68241911629537 +8126.980 0.68079905383961 +8136.980 0.67901690309709 +8146.970 0.67669912895804 +8156.970 0.67533405919467 +8166.960 0.67327867005991 +8176.950 0.66976952659535 +8186.950 0.66902906836705 +8196.940 0.66666055898708 +8206.940 0.66502866460775 +8216.930 0.66263355545988 +8226.930 0.66099337692568 +8236.920 0.65926464899262 +8246.910 0.65644217831596 +8256.910 0.65625805207753 +8266.900 0.65290295765942 +8276.900 0.6520249135266 +8286.890 0.6505936518845 +8296.880 0.64842586169791 +8306.880 0.64580795810202 +8316.870 0.6434841963777 +8326.870 0.63909997455472 +8336.860 0.63681651896476 +8346.850 0.63535792809447 +8356.850 0.63378590628387 +8366.840 0.63167478736561 +8376.840 0.62955393175452 +8386.830 0.62744343020531 +8396.830 0.62396875417645 +8406.820 0.6211783641969 +8416.810 0.61957539889752 +8426.810 0.61688645785512 +8436.800 0.61436837236046 +8446.800 0.61175763749629 +8456.790 0.60778526127727 +8466.780 0.60432424479245 +8476.780 0.60376609775782 +8486.770 0.60142892309807 +8496.770 0.59826167765443 +8506.760 0.59555270867158 +8516.750 0.59198426090626 +8526.750 0.58720181621604 +8536.740 0.5853846753161 +8546.740 0.58316269474943 +8556.730 0.57879959545287 +8566.730 0.57618336780876 +8576.720 0.57494939953259 +8586.710 0.5716794607612 +8596.710 0.56763477538524 +8606.700 0.56508799673329 +8616.700 0.561631312651 +8626.690 0.55856438852838 +8636.680 0.5553352513111 +8646.680 0.55152131774258 +8656.670 0.54783954233821 +8666.670 0.54469497628019 +8676.660 0.54222105365277 +8686.650 0.538258847283 +8696.650 0.53534984611295 +8706.640 0.53202365037088 +8716.640 0.52865826149872 +8726.630 0.52506654787573 +8736.630 0.52163561373824 +8746.620 0.51820272966638 +8756.610 0.51460413474214 +8766.610 0.51108045171206 +8776.600 0.50616320433653 +8786.600 0.50321086680027 +8796.590 0.4993327922659 +8806.580 0.49593686876588 +8816.580 0.49278241286922 +8826.570 0.48925400721951 +8836.570 0.48518375620363 +8846.560 0.48162773761241 +8856.550 0.47831343522124 +8866.550 0.47482064405227 +8876.540 0.4708890055848 +8886.540 0.46710540497257 +8896.530 0.46279636748755 +8906.530 0.45924203066999 +8916.520 0.45585563313013 +8926.510 0.45216001375879 +8936.510 0.44835063479816 +8946.500 0.44439151179595 +8956.500 0.44053211998481 +8966.490 0.43699475204292 +8976.480 0.43239239580763 +8986.480 0.42877146859883 +8996.470 0.424942079699 +9006.470 0.42139591587241 +9016.460 0.4173577485544 +9026.460 0.41367606825463 +9036.450 0.41016835953673 +9046.440 0.40608342577266 +9056.440 0.40250899901318 +9066.430 0.39858307848528 +9076.430 0.39417758377766 +9086.420 0.3897875284369 +9096.410 0.38618637671885 +9106.410 0.38235294457334 +9116.400 0.37794774381993 +9126.400 0.37341137021638 +9136.390 0.36895202440457 +9146.380 0.36328714818545 +9156.380 0.35754208681169 +9166.370 0.3508166362847 +9176.370 0.34260519968671 +9186.360 0.33253879327 +9196.360 0.31844023715854 +9206.350 0.3006215619 +9216.340 0.27810519532471 +9226.340 0.24885600788643 +9236.330 0.21402132304545 +9246.330 0.17581040981818 +9256.320 0.1363696798 +9266.310 0.098953051090909 +9276.310 0.06665592 +9286.300 0.041176490522727 +9296.300 0.023414895014091 +9306.290 0.012590144566364 +9316.280 0.0064553619204545 +9326.280 0.0034280374725 +9336.270 0.002226009088707 +9346.270 0.0019523646681218 +9356.260 0.0020848321319623 +9366.260 0.0020779097569996 +9376.250 0.0016824318087958 +9386.240 0.0017966062039917 +9396.240 0.0018048528048282 +9406.230 0.0017333761404298 +9416.230 0.0017535617263281 +9426.220 0.0016186719509396 +9436.210 0.0015486743936481 +9446.210 0.0017731119130382 +9456.200 0.0016531220250123 +9466.200 0.0017179207674836 +9476.190 0.0017453710317067 +9486.180 0.0020272159867925 +9496.180 0.0020784246398906 +9506.170 0.0020680387522659 +9516.170 0.0020882107793435 +9526.160 0.0019371734179243 +9536.160 0.0022359544177331 +9546.150 0.0018944983631254 +9556.140 0.001724830936193 +9566.140 0.0015570323169351 +9576.130 0.0013026837379996 +9586.130 0.0012895514161936 +9596.120 0.00096282755438635 +9606.110 0.0008088432407245 +9616.110 0.00074326667497407 +9626.100 0.00062661809949121 +9636.100 0.00057290349305953 +9646.090 0.00071019040475772 +9656.080 0.00077073183609538 +9666.080 0.00068011344382201 +9676.070 0.00065359859877905 +9686.070 0.00078419422311556 +9696.060 0.0007481979450244 +9706.060 0.00079932257049638 +9716.050 0.00085253228420119 +9726.040 0.00086729775556006 +9736.040 0.00078571303618106 +9746.030 0.00087756426995614 +9756.030 0.00078327224179827 +9766.020 0.00071942994733045 +9776.010 0.00073511791287835 +9786.010 0.00069026505263581 +9796.000 0.00061793427523083 +9806.000 0.00052330574625916 +9815.990 0.0004866337237607 +9825.980 0.00051533510351816 +9835.980 0.0005196795904907 +9845.970 0.00044117052286289 +9855.970 0.00041505572377556 +9865.960 0.00040388144251169 diff --git a/filt_func/nsfcam/L.dat b/filt_func/nsfcam/L.dat new file mode 100755 index 00000000..43589640 --- /dev/null +++ b/filt_func/nsfcam/L.dat @@ -0,0 +1,390 @@ +30156.8 0.000962257 +30175 0.00120592 +30193.2 0.00134921 +30211.5 0.00127125 +30229.7 0.00150251 +30248 0.00161243 +30266.3 0.00188732 +30284.7 0.00170279 +30303 0.00204206 +30321.4 0.00192571 +30339.8 0.00215411 +30358.2 0.0022676 +30376.7 0.00244951 +30395.1 0.00242066 +30413.6 0.0025301 +30432.1 0.00248027 +30450.7 0.0023284 +30469.2 0.00224257 +30487.8 0.00251317 +30506.4 0.0024693 +30525 0.00250125 +30543.7 0.00249219 +30562.3 0.00242495 +30581 0.00247026 +30599.8 0.00256109 +30618.5 0.00265122 +30637.3 0.00268054 +30656 0.0026195 +30674.8 0.00252914 +30693.7 0.0025599 +30712.5 0.00271273 +30731.4 0.00289178 +30750.3 0.0028379 +30769.2 0.0027976 +30788.2 0.00280762 +30807.1 0.00294924 +30826.1 0.00277042 +30845.2 0.00315022 +30864.2 0.00310421 +30883.3 0.0032618 +30902.3 0.00327802 +30921.5 0.00363994 +30940.6 0.00354338 +30959.8 0.00367117 +30978.9 0.00364733 +30998.1 0.00382757 +31017.4 0.00397182 +31036.6 0.00422025 +31055.9 0.00454044 +31075.2 0.00470376 +31094.5 0.00489855 +31113.9 0.00507116 +31133.3 0.00551271 +31152.6 0.00583959 +31172.1 0.00608921 +31191.5 0.00644302 +31211 0.00705314 +31230.5 0.00754762 +31250 0.00817514 +31269.5 0.0087316 +31289.1 0.00952125 +31308.7 0.0100901 +31328.3 0.0112662 +31348 0.012145 +31367.6 0.0135512 +31387.3 0.0147848 +31407 0.0162675 +31426.8 0.0178838 +31446.5 0.0200837 +31466.3 0.0222387 +31486.1 0.0248327 +31506 0.0277917 +31525.9 0.0313036 +31545.7 0.0353098 +31565.7 0.0398614 +31585.6 0.0450525 +31605.6 0.0513139 +31625.6 0.0586095 +31645.6 0.0669079 +31665.6 0.0764895 +31685.7 0.0874972 +31705.8 0.100346 +31725.9 0.115176 +31746 0.132822 +31766.2 0.152965 +31786.4 0.176419 +31806.6 0.202869 +31826.9 0.232942 +31847.1 0.26663 +31867.4 0.304328 +31887.8 0.346514 +31908.1 0.39176 +31928.5 0.440715 +31948.9 0.49065 +31969.3 0.54304 +31989.8 0.595515 +32010.2 0.646355 +32030.7 0.69369 +32051.3 0.736929 +32071.8 0.776407 +32092.4 0.810945 +32113 0.840741 +32133.7 0.865207 +32154.3 0.882942 +32175 0.895848 +32195.8 0.907165 +32216.5 0.914134 +32237.3 0.918469 +32258.1 0.918946 +32278.9 0.921683 +32299.7 0.921975 +32320.6 0.922349 +32341.5 0.920204 +32362.5 0.919714 +32383.4 0.918919 +32404.4 0.919387 +32425.4 0.917863 +32446.5 0.919708 +32467.5 0.918676 +32488.6 0.920314 +32509.8 0.921267 +32530.9 0.924305 +32552.1 0.924901 +32573.3 0.92679 +32594.5 0.927062 +32615.8 0.92867 +32637.1 0.930581 +32658.4 0.932568 +32679.7 0.933752 +32701.1 0.934407 +32722.5 0.934966 +32743.9 0.935562 +32765.4 0.935877 +32786.9 0.935577 +32808.4 0.937084 +32829.9 0.935535 +32851.5 0.935269 +32873.1 0.933562 +32894.7 0.934837 +32916.4 0.934186 +32938.1 0.93382 +32959.8 0.930758 +32981.5 0.932153 +33003.3 0.930245 +33025.1 0.931608 +33046.9 0.929196 +33068.8 0.928553 +33090.7 0.927891 +33112.6 0.927366 +33134.5 0.925955 +33156.5 0.925616 +33178.5 0.924155 +33200.5 0.9242 +33222.6 0.924963 +33244.7 0.926667 +33266.8 0.926907 +33288.9 0.927095 +33311.1 0.927768 +33333.3 0.927881 +33355.6 0.927298 +33377.8 0.928207 +33400.1 0.929544 +33422.5 0.92875 +33444.8 0.931329 +33467.2 0.931714 +33489.6 0.933643 +33512.1 0.933482 +33534.5 0.934703 +33557 0.933673 +33579.6 0.936075 +33602.2 0.935964 +33624.7 0.937671 +33647.4 0.937828 +33670 0.93953 +33692.7 0.93949 +33715.4 0.940382 +33738.2 0.939342 +33761 0.939697 +33783.8 0.939215 +33806.6 0.939933 +33829.5 0.939316 +33852.4 0.93844 +33875.3 0.936932 +33898.3 0.937429 +33921.3 0.935991 +33944.3 0.934474 +33967.4 0.93386 +33990.5 0.933919 +34013.6 0.934497 +34036.8 0.932878 +34059.9 0.932388 +34083.2 0.930515 +34106.4 0.930335 +34129.7 0.926779 +34153 0.928042 +34176.3 0.927017 +34199.7 0.927041 +34223.1 0.925245 +34246.6 0.925419 +34270 0.922434 +34293.6 0.922526 +34317.1 0.920628 +34340.7 0.922238 +34364.3 0.922019 +34387.9 0.92312 +34411.6 0.922412 +34435.3 0.922376 +34459 0.922568 +34482.8 0.92339 +34506.6 0.923071 +34530.4 0.922723 +34554.3 0.923598 +34578.1 0.924315 +34602.1 0.92512 +34626 0.925369 +34650 0.926424 +34674.1 0.926304 +34698.1 0.927742 +34722.2 0.927231 +34746.4 0.928697 +34770.5 0.928282 +34794.7 0.928515 +34818.9 0.928225 +34843.2 0.931308 +34867.5 0.93055 +34891.8 0.931965 +34916.2 0.931467 +34940.6 0.933056 +34965 0.931511 +34989.5 0.93317 +35014 0.932156 +35038.5 0.933175 +35063.1 0.93295 +35087.7 0.933402 +35112.4 0.93237 +35137 0.932794 +35161.7 0.933112 +35186.5 0.933091 +35211.3 0.932481 +35236.1 0.931611 +35260.9 0.931829 +35285.8 0.931225 +35310.7 0.931367 +35335.7 0.930309 +35360.7 0.930468 +35385.7 0.928837 +35410.8 0.929125 +35435.9 0.927485 +35461 0.928169 +35486.2 0.926087 +35511.4 0.926735 +35536.6 0.924653 +35561.9 0.925792 +35587.2 0.923333 +35612.5 0.924337 +35637.9 0.922857 +35663.3 0.923206 +35688.8 0.922176 +35714.3 0.922241 +35739.8 0.920767 +35765.4 0.921537 +35791 0.921272 +35816.6 0.921205 +35842.3 0.920734 +35868 0.921399 +35893.8 0.922207 +35919.5 0.922209 +35945.4 0.923026 +35971.2 0.923312 +35997.1 0.924458 +36023.1 0.923587 +36049 0.925046 +36075 0.925553 +36101.1 0.927519 +36127.2 0.927259 +36153.3 0.929243 +36179.5 0.929186 +36205.6 0.931319 +36231.9 0.931036 +36258.2 0.933156 +36284.5 0.932827 +36310.8 0.935527 +36337.2 0.935068 +36363.6 0.936368 +36390.1 0.93617 +36416.6 0.938059 +36443.1 0.938573 +36469.7 0.939709 +36496.4 0.93988 +36523 0.939998 +36549.7 0.94011 +36576.4 0.940587 +36603.2 0.940885 +36630 0.940585 +36656.9 0.939915 +36683.8 0.938672 +36710.7 0.93903 +36737.7 0.937852 +36764.7 0.937917 +36791.8 0.936105 +36818.9 0.936005 +36846 0.933661 +36873.2 0.933678 +36900.4 0.930873 +36927.6 0.931034 +36954.9 0.927961 +36982.2 0.927815 +37009.6 0.92528 +37037 0.925085 +37064.5 0.922289 +37092 0.92209 +37119.5 0.92012 +37147.1 0.919756 +37174.7 0.917933 +37202.4 0.917613 +37230.1 0.916242 +37257.8 0.915813 +37285.6 0.914763 +37313.4 0.914014 +37341.3 0.913402 +37369.2 0.912757 +37397.2 0.911949 +37425.1 0.910474 +37453.2 0.909624 +37481.3 0.906503 +37509.4 0.903579 +37537.5 0.898389 +37565.7 0.893606 +37594 0.884823 +37622.3 0.876159 +37650.6 0.863052 +37679 0.849638 +37707.4 0.830408 +37735.8 0.810358 +37764.4 0.784773 +37792.9 0.757942 +37821.5 0.7256 +37850.1 0.692032 +37878.8 0.654269 +37907.5 0.615479 +37936.3 0.57444 +37965.1 0.533411 +37993.9 0.491468 +38022.8 0.450241 +38051.8 0.410215 +38080.7 0.371899 +38109.8 0.335394 +38138.8 0.301191 +38167.9 0.269683 +38197.1 0.240773 +38226.3 0.214541 +38255.5 0.190484 +38284.8 0.169015 +38314.2 0.149827 +38343.6 0.132494 +38373 0.117145 +38402.5 0.103844 +38432 0.0915825 +38461.5 0.081001 +38491.1 0.071492 +38520.8 0.0633302 +38550.5 0.055959 +38580.2 0.0494764 +38610 0.043674 +38639.9 0.0386024 +38669.8 0.0340512 +38699.7 0.0300536 +38729.7 0.0265405 +38759.7 0.0234788 +38789.8 0.0208454 +38819.9 0.0184088 +38850 0.0161228 +38880.2 0.014158 +38910.5 0.0123668 +38940.8 0.0108354 +38971.2 0.00944018 +39001.6 0.00825095 +39032 0.00721598 +39062.5 0.00630403 +39093 0.00532269 +39123.6 0.00468755 +39154.3 0.00382352 +39185 0.00340962 +39215.7 0.00293064 +39246.5 0.00240946 +39277.3 0.00178313 +39308.2 0.00182629 +39339.1 0.00121212 +39370.1 0.00120258 +39401.1 0.000939846 diff --git a/spisea/atmospheres.py b/spisea/atmospheres.py index 71cc5726..9867db81 100755 --- a/spisea/atmospheres.py +++ b/spisea/atmospheres.py @@ -12,13 +12,13 @@ log = logging.getLogger('atmospheres') -def get_atmosphere_bounds(model_dir, metallicity=0, temperature=20000, gravity=4): +def get_atmosphere_bounds(model_dir, metallicity=0, temperature=20000, gravity=4, verbose=False): """ Given atmosphere model, get temperature and gravity bounds """ # Open catalog fits file and break out row indices catalog = Table.read('{0}/grid/{1}/catalog.fits'.format(os.environ['PYSYN_CDBS'], model_dir)) - + teff_arr = [] z_arr = [] logg_arr = [] @@ -36,11 +36,11 @@ def get_atmosphere_bounds(model_dir, metallicity=0, temperature=20000, gravity=4 metal_list = np.unique(np.array(z_arr)) metal_idx = np.argmin(np.abs(metal_list - metallicity)) metallicity_new = metal_list[metal_idx] - + z_filt = np.where(z_arr == metal_list[metal_idx]) teff_arr = teff_arr[z_filt] logg_arr = logg_arr[z_filt] - + # # Now find the closest atmosphere in parameter space to # # the one we want. We'll find the match with the lowest # # fractional difference @@ -52,50 +52,51 @@ def get_atmosphere_bounds(model_dir, metallicity=0, temperature=20000, gravity=4 # # temperature_new = teff_arr[idx_f] # gravity_new = logg_arr[idx_f] - + # First check if temperature within bounds temperature_new = temperature if temperature > np.max(teff_arr): temperature_new = np.max(teff_arr) if temperature < np.min(teff_arr): temperature_new = np.min(teff_arr) - + # If temperature within bounds, then check if metallicity within bounds teff_diff = np.abs(teff_arr - temperature) sorted_min_diffs = np.unique(teff_diff) - + ## Find two closest temperatures teff_close_1 = teff_arr[np.where(teff_diff == sorted_min_diffs[0])[0][0]] teff_close_2 = teff_arr[np.where(teff_diff == sorted_min_diffs[1])[0][0]] - + logg_arr_1 = logg_arr[np.where(teff_arr == teff_close_1)] logg_arr_2 = logg_arr[np.where(teff_arr == teff_close_2)] - + ## Switch to most conservative bound of logg out of two closest temps gravity_new = gravity if gravity > np.min([np.max(logg_arr_1), np.max(logg_arr_2)]): gravity_new = np.min([np.max(logg_arr_1), np.max(logg_arr_2)]) if gravity < np.max([np.min(logg_arr_1), np.min(logg_arr_2)]): gravity_new = np.max([np.min(logg_arr_1), np.min(logg_arr_2)]) - - # Print out changes, if any - if temperature_new != temperature: - teff_msg = 'Changing to T={0:6.0f} for met={1:4.2f} T={2:6.0f} logg={3:4.2f}' - print( teff_msg.format(temperature_new, metallicity, temperature, gravity)) - - if gravity_new != gravity: - logg_msg = 'Changing to logg={0:4.2f} for met={1:4.2f} T={2:6.0f} logg={3:4.2f}' - print( logg_msg.format(gravity_new, metallicity, temperature, gravity)) + + if verbose: + # Print out changes, if any + if temperature_new != temperature: + teff_msg = 'Changing to T={0:6.0f} for met={1:4.2f} T={2:6.0f} logg={3:4.2f}' + print( teff_msg.format(temperature_new, metallicity, temperature, gravity)) + + if gravity_new != gravity: + logg_msg = 'Changing to logg={0:4.2f} for met={1:4.2f} T={2:6.0f} logg={3:4.2f}' + print( logg_msg.format(gravity_new, metallicity, temperature, gravity)) if metallicity_new != metallicity: logg_msg = 'Changing to met={0:4.2f} for met={1:4.2f} T={2:6.0f} logg={3:4.2f}' print( logg_msg.format(metallicity_new, metallicity, temperature, gravity)) - + return (temperature_new, gravity_new, metallicity_new) def get_kurucz_atmosphere(metallicity=0, temperature=20000, gravity=4, rebin=False): """ - Return atmosphere from the Kurucz pysnphot grid + Return atmosphere from the Kurucz pysnphot grid (`Kurucz 1993 `_). Grid Range: @@ -114,7 +115,7 @@ def get_kurucz_atmosphere(metallicity=0, temperature=20000, gravity=4, rebin=Fal gravity: float The stellar gravity, in cgs units - + rebin: boolean Always false for this particular function """ @@ -126,7 +127,7 @@ def get_kurucz_atmosphere(metallicity=0, temperature=20000, gravity=4, rebin=Fal metallicity=metallicity, temperature=temperature, gravity=gravity) - + sp = pysynphot.Icat('k93models', temperature, metallicity, gravity) # Do some error checking @@ -141,10 +142,10 @@ def get_kurucz_atmosphere(metallicity=0, temperature=20000, gravity=4, rebin=Fal def get_castelli_atmosphere(metallicity=0, temperature=20000, gravity=4, rebin=False): """ - Return atmospheres from the pysynphot ATLAS9 atlas + Return atmospheres from the pysynphot ATLAS9 atlas (`Castelli & Kurucz 2004 `_). - Grid Range: + Grid Range: * Teff: 3500 - 50000 K * gravity: 0 - 5.0 cgs @@ -160,7 +161,7 @@ def get_castelli_atmosphere(metallicity=0, temperature=20000, gravity=4, rebin=F gravity: float The stellar gravity, in cgs units - + rebin: boolean If true, rebins the atmospheres so that they are the same resolution as the Castelli+04 atmospheres. Default is False, @@ -177,9 +178,9 @@ def get_castelli_atmosphere(metallicity=0, temperature=20000, gravity=4, rebin=F metallicity=metallicity, temperature=temperature, gravity=gravity) - + sp = pysynphot.Icat('ck04models', temperature, metallicity, gravity) - + # Do some error checking idx = np.where(sp.flux != 0)[0] if len(idx) == 0: @@ -204,7 +205,7 @@ def get_nextgen_atmosphere(metallicity=0, temperature=5000, gravity=4, rebin=Fal metallicity=metallicity, temperature=temperature, gravity=gravity) - + sp = pysynphot.Icat('nextgen', temperature, metallicity, gravity) # Do some error checking @@ -238,7 +239,7 @@ def get_amesdusty_atmosphere(metallicity=0, temperature=5000, gravity=4, rebin=F def get_phoenix_atmosphere(metallicity=0, temperature=5000, gravity=4, rebin=False): """ - Return atmosphere from the pysynphot + Return atmosphere from the pysynphot `PHOENIX atlas `_. Parameters @@ -251,7 +252,7 @@ def get_phoenix_atmosphere(metallicity=0, temperature=5000, gravity=4, gravity: float The stellar gravity, in cgs units - + rebin: boolean If true, rebins the atmospheres so that they are the same resolution as the Castelli+04 atmospheres. Default is False, @@ -266,7 +267,7 @@ def get_phoenix_atmosphere(metallicity=0, temperature=5000, gravity=4, metallicity=metallicity, temperature=temperature, gravity=gravity) - + sp = pysynphot.Icat('phoenix', temperature, metallicity, gravity) # Do some error checking @@ -279,7 +280,7 @@ def get_phoenix_atmosphere(metallicity=0, temperature=5000, gravity=4, return sp -def get_cmfgenRot_atmosphere(metallicity=0, temperature=24000, gravity=4.3, rebin=True): +def get_cmfgenRot_atmosphere(metallicity=0, temperature=24000, gravity=4.3, rebin=True, verbose=False): """ metallicity = [M/H] (def = 0) temperature = Kelvin (def = 24000) @@ -290,14 +291,15 @@ def get_cmfgenRot_atmosphere(metallicity=0, temperature=24000, gravity=4.3, rebi # Take care of atmospheres outside the catalog boundaries logg_msg = 'Changing to logg={0:3.1f} for T={1:6.0f} logg={2:4.2f}' if gravity > 4.3: - print( logg_msg.format(4.3, temperature, gravity)) + if verbose: + print( logg_msg.format(4.3, temperature, gravity)) gravity = 4.3 - + if rebin: sp = pysynphot.Icat('cmfgen_rot_rebin', temperature, metallicity, gravity) else: sp = pysynphot.Icat('cmfgen_rot', temperature, metallicity, gravity) - + # Do some error checking idx = np.where(sp.flux != 0)[0] if len(idx) == 0: @@ -311,7 +313,7 @@ def get_cmfgenRot_atmosphere(metallicity=0, temperature=24000, gravity=4.3, rebi def get_cmfgenRot_atmosphere_closest(metallicity=0, temperature=24000, gravity=4.3, rebin=True, verbose=False): """ - For a given stellar atmosphere, get extract the closest possible match in + For a given stellar atmosphere, get extract the closest possible match in Teff/logg space. Note that this is different from the normal routine which interpolates along the input grid to get final spectrum. We can't do this here because the Fierro+15 atmosphere grid is so sparse @@ -346,7 +348,7 @@ def get_cmfgenRot_atmosphere_closest(metallicity=0, temperature=24000, gravity=4 # fractional difference teff_diff = (teff_arr - temperature) / temperature logg_diff = (logg_arr - gravity) / gravity - + diff_tot = abs(teff_diff) + abs(logg_diff) idx_f = np.where(diff_tot == min(diff_tot))[0][0] @@ -354,7 +356,7 @@ def get_cmfgenRot_atmosphere_closest(metallicity=0, temperature=24000, gravity=4 # pysynphot object infile = cat[idx_f]['FILENAME'].split('.') spec = Table.read('{0}/{1}.fits'.format(root_dir, infile[0])) - + # Now, the CMFGEN atmospheres assume a distance of 1 kpc, while the the # ATLAS models are in FLAM at the surface. So, we need to multiply the # CMFGEN atmospheres by (1000/R)**2. in order to convert to FLAM on surface. @@ -368,13 +370,13 @@ def get_cmfgenRot_atmosphere_closest(metallicity=0, temperature=24000, gravity=4 radius = np.sqrt( lum / (4.0 * np.pi * teff**4. * sigma) ) # in cm radius /= 3.08*10**18 # in pc - + # Make the pysynphot spectrum w = spec['Wavelength'] f = spec['Flux'] * (1000 / radius)**2. sp = pysynphot.ArraySpectrum(w,f) - + #sp = pysynphot.FileSpectrum('{0}/{1}.fits'.format(root_dir, infile[0])) # Print out parameters of match, if desired @@ -396,7 +398,7 @@ def get_cmfgenNoRot_atmosphere(metallicity=0, temperature=22500, gravity=3.98, r sp = pysynphot.Icat('cmfgen_norot_rebin', temperature, metallicity, gravity) else: sp = pysynphot.Icat('cmfgen_norot', temperature, metallicity, gravity) - + # Do some error checking idx = np.where(sp.flux != 0)[0] if len(idx) == 0: @@ -427,9 +429,9 @@ def get_cmfgenNoRot_atmosphere(metallicity=0, temperature=30000, gravity=4.14): def get_phoenixv16_atmosphere(metallicity=0, temperature=4000, gravity=4, rebin=True): """ - Return PHOENIX v16 atmospheres from - `Husser et al. 2013 `_. - + Return PHOENIX v16 atmospheres from + `Husser et al. 2013 `_. + Models originally downloaded via `ftp `_. Solar metallicity and [alpha/Fe] is used. @@ -449,7 +451,7 @@ def get_phoenixv16_atmosphere(metallicity=0, temperature=4000, gravity=4, rebin= gravity: float The stellar gravity, in cgs units - + rebin: boolean If true, rebins the atmospheres so that they are the same resolution as the Castelli+04 atmospheres. Default is False, @@ -470,9 +472,9 @@ def get_phoenixv16_atmosphere(metallicity=0, temperature=4000, gravity=4, rebin= metallicity=metallicity, temperature=temperature, gravity=gravity) - + sp = pysynphot.Icat(atm_model_name, temperature, metallicity, gravity) - + # Do some error checking idx = np.where(sp.flux != 0)[0] if len(idx) == 0: @@ -485,14 +487,14 @@ def get_phoenixv16_atmosphere(metallicity=0, temperature=4000, gravity=4, rebin= def get_BTSettl_2015_atmosphere(metallicity=0, temperature=2500, gravity=4, rebin=True): """ - Return atmosphere from CIFIST2011_2015 grid - (`Allard et al. 2012 `_, + Return atmosphere from CIFIST2011_2015 grid + (`Allard et al. 2012 `_, `Baraffe et al. 2015 `_ ) Grid originally downloaded from `website `_. Grid Range: - + * Teff: 1200 - 7000 K * gravity: 2.5 - 5.5 cgs * [M/H] = 0 @@ -507,11 +509,11 @@ def get_BTSettl_2015_atmosphere(metallicity=0, temperature=2500, gravity=4, rebi gravity: float The stellar gravity, in cgs units - + rebin: boolean If true, rebins the atmospheres so that they are the same resolution as the Castelli+04 atmospheres. Default is False, - which is often sufficient synthetic photometry in most cases. + which is often sufficient synthetic photometry in most cases. """ if rebin == True: atm_name = 'BTSettl_2015_rebin' @@ -526,10 +528,10 @@ def get_BTSettl_2015_atmosphere(metallicity=0, temperature=2500, gravity=4, rebi metallicity=metallicity, temperature=temperature, gravity=gravity) - + sp = pysynphot.Icat(atm_name, temperature, metallicity, gravity) - - + + # Do some error checking idx = np.where(sp.flux != 0)[0] if len(idx) == 0: @@ -542,7 +544,7 @@ def get_BTSettl_2015_atmosphere(metallicity=0, temperature=2500, gravity=4, rebi def get_BTSettl_atmosphere(metallicity=0, temperature=2500, gravity=4.5, rebin=True): """ - Return atmosphere from CIFIST2011 grid + Return atmosphere from CIFIST2011 grid (`Allard et al. 2012 `_) Grid originally downloaded `here `_ @@ -550,16 +552,16 @@ def get_BTSettl_atmosphere(metallicity=0, temperature=2500, gravity=4.5, rebin=T Notes ------ Grid Range: - + * [M/H] = -2.5, -2.0, -1.5, -1.0, -0.5, 0, 0.5 - + Teff and gravity ranges depend on metallicity: [M/H] = -2.5 * Teff: 2600 - 4600 K * gravity: 4.5 - 5.5 - + [M/H] = -2.0 * Teff: 2600 - 7000 @@ -573,7 +575,7 @@ def get_BTSettl_atmosphere(metallicity=0, temperature=2500, gravity=4.5, rebin=T [M/H] = -1.0 * Teff: 2600 - 7000 - * gravity: Teff < 3200 --> 4.5 - 5.5; Teff > 3200 --> 2.5 - 5.5 + * gravity: Teff < 3200 --> 4.5 - 5.5; Teff > 3200 --> 2.5 - 5.5 [M/H] = -0.5 @@ -607,7 +609,7 @@ def get_BTSettl_atmosphere(metallicity=0, temperature=2500, gravity=4.5, rebin=T gravity: float The stellar gravity, in cgs units - + rebin: boolean If true, rebins the atmospheres so that they are the same resolution as the Castelli+04 atmospheres. Default is False, @@ -626,10 +628,10 @@ def get_BTSettl_atmosphere(metallicity=0, temperature=2500, gravity=4.5, rebin=T metallicity=metallicity, temperature=temperature, gravity=gravity) - + sp = pysynphot.Icat(atm_name, temperature, metallicity, gravity) - - + + # Do some error checking idx = np.where(sp.flux != 0)[0] if len(idx) == 0: @@ -642,7 +644,7 @@ def get_BTSettl_atmosphere(metallicity=0, temperature=2500, gravity=4.5, rebin=T def get_wdKoester_atmosphere(metallicity=0, temperature=20000, gravity=7): """ - Return white dwarf atmospheres from + Return white dwarf atmospheres from `Koester et al. 2010 `_ Parameters @@ -655,7 +657,7 @@ def get_wdKoester_atmosphere(metallicity=0, temperature=20000, gravity=7): gravity: float The stellar gravity, in cgs units - + rebin: boolean If true, rebins the atmospheres so that they are the same resolution as the Castelli+04 atmospheres. Default is False, @@ -670,14 +672,14 @@ def get_wdKoester_atmosphere(metallicity=0, temperature=20000, gravity=7): print( ' temperature = %d' % temperature) print( ' metallicity = %.1f' % metallicity) print( ' log gravity = %.1f' % gravity) - + return sp def get_atlas_phoenix_atmosphere(metallicity=0, temperature=5250, gravity=4): """ Return atmosphere that is a linear merge of atlas ck04 model and phoenixV16. - Only valid for temps between 5000 - 5500K, gravity from 0 = 5.0 + Only valid for temps between 5000 - 5500K, gravity from 0 = 5.0 """ try: sp = pysynphot.Icat('merged_atlas_phoenix', temperature, metallicity, gravity) @@ -687,7 +689,7 @@ def get_atlas_phoenix_atmosphere(metallicity=0, temperature=5250, gravity=4): metallicity=metallicity, temperature=temperature, gravity=gravity) - + sp = pysynphot.Icat('merged_atlas_phoenix', temperature, metallicity, gravity) # Do some error checking @@ -705,7 +707,7 @@ def get_BTSettl_phoenix_atmosphere(metallicity=0, temperature=5250, gravity=4): Return atmosphere that is a linear merge of BTSettl_CITFITS2011_2015 model and phoenixV16. - Only valid for temps between 3200 - 3800K, gravity from 2.5 - 5.5 + Only valid for temps between 3200 - 3800K, gravity from 2.5 - 5.5 """ try: sp = pysynphot.Icat('merged_BTSettl_phoenix', temperature, metallicity, gravity) @@ -715,7 +717,7 @@ def get_BTSettl_phoenix_atmosphere(metallicity=0, temperature=5250, gravity=4): metallicity=metallicity, temperature=temperature, gravity=gravity) - + sp = pysynphot.Icat('merged_BTSettl_phoenix', temperature, metallicity, gravity) # Do some error checking @@ -732,7 +734,7 @@ def get_BTSettl_phoenix_atmosphere(metallicity=0, temperature=5250, gravity=4): def get_merged_atmosphere(metallicity=0, temperature=20000, gravity=4.5, verbose=False, rebin=True): """ - Return a stellar atmosphere from a suite of different model grids, + Return a stellar atmosphere from a suite of different model grids, depending on the input temperature, (all values in K). Parameters @@ -745,7 +747,7 @@ def get_merged_atmosphere(metallicity=0, temperature=20000, gravity=4.5, verbose gravity: float The stellar gravity, in cgs units - + rebin: boolean If true, rebins the atmospheres so that they are the same resolution as the Castelli+04 atmospheres. Default is False, @@ -756,7 +758,7 @@ def get_merged_atmosphere(metallicity=0, temperature=20000, gravity=4.5, verbose Notes ----- - The underlying stellar model grid used changes as a function of + The underlying stellar model grid used changes as a function of stellar temperature (in K): * T > 20,000: ATLAS @@ -767,14 +769,14 @@ def get_merged_atmosphere(metallicity=0, temperature=20000, gravity=4.5, verbose For T < 3800, there is an additional gravity and metallicity dependence: - If T < 3800 and [M/H] = 0: - + If T < 3800 and [M/H] = 0: + * T < 3800, logg < 2.5: PHOENIX v16 * 3200 <= T < 3800, logg > 2.5: BTSettl_CIFITS2011_2015/PHOENIXV16 merge * 3200 < T <= 1200, logg > 2.5: BTSettl_CIFITS2011_2015 Otherwise, if T < 3800 and [M/H] != 0: - + * T < 3800: PHOENIX v16 References: @@ -783,21 +785,21 @@ def get_merged_atmosphere(metallicity=0, temperature=20000, gravity=4.5, verbose * PHOENIXv16 (`Husser et al. 2013 `_) * BTSettl_CIFITS2011_2015: Baraffee+15, Allard+ (https://phoenix.ens-lyon.fr/Grids/BT-Settl/CIFIST2011_2015/SPECTRA/) - LTE WARNING: + LTE WARNING: The ATLAS atmospheres are calculated with LTE, and so they are less accurate when non-LTE conditions apply (e.g. T > 20,000 K). Ultimately we'd like to add a non-LTE atmosphere grid for the hottest stars in the future. - HOW BOUNDARIES BETWEEN MODELS ARE TREATED: + HOW BOUNDARIES BETWEEN MODELS ARE TREATED: - At the boundary between two models grids a temperature range is defined - where the resulting atmosphere is a weighted average between the two + At the boundary between two models grids a temperature range is defined + where the resulting atmosphere is a weighted average between the two grids. Near one boundary one model - is weighted more heavily, while at the other boundary the other - model is weighted more heavily. These are calculated in the - temperature ranges where we switch between model grids, to + is weighted more heavily, while at the other boundary the other + model is weighted more heavily. These are calculated in the + temperature ranges where we switch between model grids, to ensure a smooth transition. """ # For T < 3800, atmosphere depends on metallicity + gravity. @@ -813,7 +815,7 @@ def get_merged_atmosphere(metallicity=0, temperature=20000, gravity=4.5, verbose temperature=temperature, gravity=gravity, rebin=rebin) - + if (temperature >= 3200) & (temperature < 3800) & (gravity > 2.5): if verbose: print( 'BTSettl/Phoenixv16 merged atmosphere') @@ -829,7 +831,7 @@ def get_merged_atmosphere(metallicity=0, temperature=20000, gravity=4.5, verbose temperature=temperature, gravity=gravity, rebin=rebin) - + if (temperature <= 3800) & (metallicity != 0): if verbose: print( 'Phoenixv16 atmosphere') @@ -853,7 +855,7 @@ def get_merged_atmosphere(metallicity=0, temperature=20000, gravity=4.5, verbose return get_atlas_phoenix_atmosphere(metallicity=metallicity, temperature=temperature, gravity=gravity) - + if (temperature >= 5500) & (temperature < 20000): if verbose: print( 'ATLAS merged atmosphere') @@ -873,14 +875,14 @@ def get_merged_atmosphere(metallicity=0, temperature=20000, gravity=4.5, verbose # temperature=temperature, # gravity=gravity) - + def get_wd_atmosphere(metallicity=0, temperature=20000, gravity=4, verbose=False): """ - Return the white dwarf atmosphere from - `Koester et al. 2010 `_. - If desired parameters are + Return the white dwarf atmosphere from + `Koester et al. 2010 `_. + If desired parameters are outside of grid, return a blackbody spectrum instead Parameters @@ -893,7 +895,7 @@ def get_wd_atmosphere(metallicity=0, temperature=20000, gravity=4, verbose=False gravity: float The stellar gravity, in cgs units - + rebin: boolean If true, rebins the atmospheres so that they are the same resolution as the Castelli+04 atmospheres. Default is False, @@ -909,7 +911,7 @@ def get_wd_atmosphere(metallicity=0, temperature=20000, gravity=4, verbose=False return get_wdKoester_atmosphere(metallicity=metallicity, temperature=temperature, gravity=gravity) - + except pysynphot.exceptions.ParameterOutOfBounds: # Use a black-body atmosphere. bbspec = get_bb_atmosphere(temperature=temperature, verbose=verbose) @@ -940,29 +942,29 @@ def get_bb_atmosphere(metallicity=None, temperature=20_000, gravity=None, warnings.warn( 'Only `temperature` keyword is used for black-body atmosphere' ) - + if verbose: print('Black-body atmosphere') - + # Modify pysynphot's default waveset to specified bounds pysynphot.refs.set_default_waveset( minwave=wave_min, maxwave=wave_max, num=wave_num ) - + # Get black-body atmosphere for specified temperature from pysynphot bbspec = pysynphot.spectrum.BlackBody(temperature) - + # pysynphot `BlackBody` generates spectrum in `photlam`, need in `flam` bbspec.convert('flam') - + # `BlackBody` spectrum is normalized to solar radius star at 1 kiloparsec. # Need to remove this normalization for SPISEA by multiplying bbspec # by (1000 * 1 parsec / 1 Rsun)**2 = (1000 * 3.08e18 cm / 6.957e10 cm)**2 bbspec *= (1000 * 3.086e18 / 6.957e10)**2 - + return bbspec - + #--------------------------------------# # Atmosphere formatting functions #--------------------------------------# @@ -978,7 +980,7 @@ def download_CMFGEN_atmospheres(Table_rot, Table_norot): Fierro+15 paper Website addresses are hardcoded - + Puts downloaded models in the current working directory. """ print( 'WARNING: THIS DOES NOT COMPLETELY WORK') @@ -1047,7 +1049,7 @@ def organize_CMFGEN_atmospheres(path_to_dir): """ # First, record current working directory to return to later start_dir = os.getcwd() - + # Enter atmosphere directory, collect rotating and non-rotating # file names (assumed to all start with "t") os.chdir(path_to_dir) @@ -1072,10 +1074,10 @@ def organize_CMFGEN_atmospheres(path_to_dir): # Also move Tables with model parameters into correct directory os.system('mv Table_rot.txt cmfgenF15_rot') os.system('mv Table_noRot.txt cmfgenF15_noRot') - + # Return to original directory os.chdir(start_dir) - + return def make_CMFGEN_catalog(path_to_dir): @@ -1097,10 +1099,10 @@ def make_CMFGEN_catalog(path_to_dir): """ # Record current working directory for later start_dir = os.getcwd() - + # Enter atmosphere directory os.chdir(path_to_dir) - + # Extract parameters for each atmosphere # Note: can't rely on filename for this because not precise enough!! @@ -1115,7 +1117,7 @@ def make_CMFGEN_catalog(path_to_dir): # lum = float(lumtmp[0][:-5]) * 1000.0 # In L_sun # mass = float(lumtmp[0][5:-1]) # In M_sun - + # Need to calculate log g from T and L (cgs) # lum_sun = 3.846 * 10**33 # erg/s # M_sun = 2 * 10**33 # g @@ -1143,18 +1145,18 @@ def make_CMFGEN_catalog(path_to_dir): #---NOTE: THE FOLLOWING DEPENDS ON FINAL LOCATION OF CATALOG FILE---# #path = path_to_dir + '/' + names[i] path = names[i] + '.fits[Flux]' - + index_str.append(index) name_str.append(path) - + catalog = Table([index_str, name_str], names = ('INDEX', 'FILENAME')) # Create catalog.fits file in directory with the models catalog.write('catalog.fits', format = 'fits') - + # Move back to original directory, create the catalog.fits file os.chdir(start_dir) - + return def cdbs_cmfgen(path_to_dir, path_to_cdbs_dir): @@ -1190,23 +1192,23 @@ def cdbs_cmfgen(path_to_dir, path_to_cdbs_dir): unique = np.unique(wave, return_index=True) wave = wave[unique[1]] flux = flux[unique[1]] - - # Make fits table from individual columns. + + # Make fits table from individual columns. c0 = fits.Column(name='Wavelength', format='D', array=wave) c1 = fits.Column(name='Flux', format='E', array=flux) cols = fits.ColDefs([c0, c1]) tbhdu = fits.BinTableHDU.from_columns(cols) - #Adding unit keywords + #Adding unit keywords tbhdu.header['TUNIT1'] = 'ANGSTROM' tbhdu.header['TUNIT2'] = 'FLAM' prihdu = fits.PrimaryHDU() - + finalhdu = fits.HDUList([prihdu, tbhdu]) finalhdu.writeto(i[:-4]+'.fits', overwrite=True) - + print( 'Done {0:2.0f} of {1:2.0f}'.format(counter, len(files))) # Return to original directory, copy over new .fits files to cdbs directory @@ -1223,7 +1225,7 @@ def rebin_cmfgen(cdbs_path, rot=True): cdbs_path: path to cdbs directory rot=True for rotating models (cmfgen_rot), False for non-rotating models - + makes new directory in cdbs/grid: cmfgen_rot_rebin or cmfgen_norot_rebin """ # Get an atlas ck04 model, we will use this to set wavelength grid @@ -1239,7 +1241,7 @@ def rebin_cmfgen(cdbs_path, rot=True): tmp = cdbs_path+'/grid/cmfgen_norot/t0200l0007m009n.fits' path = cdbs_path+'/grid/cmfgen_norot_rebin/' orig_path = cdbs_path+'/grid/cmfgen_norot/' - + cmfgen_hdu = fits.open(tmp) header0 = cmfgen_hdu[0].header # Create rebin directories if they don't already exist. Copy over @@ -1254,7 +1256,7 @@ def rebin_cmfgen(cdbs_path, rot=True): files_all = [cat[ii][1].split('[')[0] for ii in range(len(cat))] # First column in new files will be for [atlas] wavelength - c0 = fits.Column(name='Wavelength', format='D', array=sp_atlas.wave) + c0 = fits.Column(name='Wavelength', format='D', array=sp_atlas.wave) # For each catalog.fits entry, read the unbinned spectrum and rebin to # the atlas resolution. Make a new fits file in rebin directory @@ -1266,16 +1268,16 @@ def rebin_cmfgen(cdbs_path, rot=True): temp = float(vals[0]) metal = float(vals[1]) grav = float(vals[2]) - + # Fetch the spectrum - if rot == True: + if rot == True: sp = pysynphot.Icat('cmfgen_rot', temp, metal, grav) else: sp = pysynphot.Icat('cmfgen_norot', temp, metal, grav) # Rebin flux_rebin = rebin_spec(sp.wave, sp.flux, sp_atlas.wave) - c1 = fits.Column(name='Flux', format='E', array=flux_rebin) + c1 = fits.Column(name='Flux', format='E', array=flux_rebin) # Make the FITS file from the columns with header cols = fits.ColDefs([c0,c1]) @@ -1300,7 +1302,7 @@ def organize_PHOENIXv16_atmospheres(path_to_dir, met_str='m00'): path_to_dir is the path to the directory containing all of the downloaded files - + met_str is the name of the current metallicity Creates new fits files for each atmosphere: phoenix_.fits, @@ -1317,7 +1319,7 @@ def organize_PHOENIXv16_atmospheres(path_to_dir, met_str='m00'): pass else: os.mkdir(sub_dir) - + # Extract wavelength array, make column for later wavefile = fits.open('WAVE_PHOENIX-ACES-AGSS-COND-2011.fits') wave = wavefile[0].data @@ -1340,7 +1342,7 @@ def organize_PHOENIXv16_atmospheres(path_to_dir, met_str='m00'): for f in files: # Extract the logg out of filename logg = f[9:13] - + # Extract fluxes from file spectrum = fits.open(f) flux = spectrum[0].data @@ -1349,11 +1351,11 @@ def organize_PHOENIXv16_atmospheres(path_to_dir, met_str='m00'): # Make Column object with fluxes, add to table col = Column(flux, name = 'g{0:2.1f}'.format(float(logg))) t.add_column(col) - + # Now, construct final fits file for the given temp outname = 'phoenix{0}_{1:05d}.fits'.format(met_str, temp) - t.write('{0}/{1}'.format(sub_dir, outname), format = 'fits', overwrite = True) - + t.write('{0}/{1}'.format(sub_dir, outname), format = 'fits', overwrite = True) + # Progress counter for user i += 1 print( 'Done {0:d} of {1:d}'.format(i, len(temp_arr))) @@ -1370,18 +1372,18 @@ def make_PHOENIXv16_catalog(path_to_dir, met_str='m00'): path_to_directory is the path to the directory with the reformatted models (i.e. the output from construct_atmospheres, phoenix[met_str]) - + Puts catalog.fits file in directory the user starts in """ # Save starting directory for later, move into working directory start_dir = os.getcwd() os.chdir(path_to_dir) - + # Extract metallicity from metallicity string met = float(met_str[1]) + (float(met_str[2]) * 0.1) if 'm' in met_str: met *= -1. - + # Collect the filenames. Each is a unique temp with many different log g's files = glob.glob('phoenix*.fits') files.sort() @@ -1394,7 +1396,7 @@ def make_PHOENIXv16_catalog(path_to_dir, met_str='m00'): t = Table.read(i, format='fits') keys = t.keys() logg_vals = keys[1:] - + # Extract temp from filename name = i.split('_') temp = float(name[1][:-5]) @@ -1407,20 +1409,20 @@ def make_PHOENIXv16_catalog(path_to_dir, met_str='m00'): filename_arr.append(filename) catalog = Table([index_arr, filename_arr], names=('INDEX', 'FILENAME')) - + # Return to starting directory, write catalog os.chdir(start_dir) - + if os.path.exists('catalog.fits'): from astropy.table import vstack - + prev_catalog = Table.read('catalog.fits', format='fits') joined_catalog = vstack([prev_catalog, catalog]) - + joined_catalog.write('catalog.fits', format='fits', overwrite=True) else: catalog.write('catalog.fits', format='fits', overwrite=True) - + return def cdbs_PHOENIXv16(path_to_cdbs_dir): @@ -1441,7 +1443,7 @@ def cdbs_PHOENIXv16(path_to_cdbs_dir): # Collect the filenames, make necessary changes to each one files = glob.glob('phoenix*.fits') - + ## Need to sort filenames; glob doesn't always give them in order files.sort() @@ -1449,28 +1451,28 @@ def cdbs_PHOENIXv16(path_to_cdbs_dir): counter = 0 for i in files: counter += 1 - + # Read in current FITS table cur_table = Table.read(i, format='fits') - + cur_table.columns[0].name = 'Wavelength' - + num_cols = len(cur_table.colnames) - - # Multiplying each flux column by 10^-8 for conversion + + # Multiplying each flux column by 10^-8 for conversion for cur_col_index in range(1, num_cols, 1): cur_col_name = cur_table.colnames[cur_col_index] cur_table[cur_col_name] = cur_table[cur_col_name] * 10.**-8 - - + + # Construct new FITS file based on old one hdu = fits.open(i) header_0 = hdu[0].header header_1 = hdu[1].header sci = hdu[1].data - + tbhdu = fits.table_to_hdu(cur_table) - + # Copying over the older headers, adding unit keywords prihdu = fits.PrimaryHDU(header=header_0) tbhdu.header['TUNIT1'] = 'ANGSTROM' @@ -1487,17 +1489,17 @@ def cdbs_PHOENIXv16(path_to_cdbs_dir): tbhdu.header['TUNIT12'] = 'FLAM' tbhdu.header['TUNIT13'] = 'FLAM' tbhdu.header['TUNIT14'] = 'FLAM' - + # Construct and write out final FITS file finalhdu = fits.HDUList([prihdu, tbhdu]) finalhdu.writeto(i, overwrite=True) - + hdu.close() print( 'Done {0:2.0f} of {1:2.0f}'.format(counter, len(files))) - + # Change back to starting directory os.chdir(start_dir) - + return def rebin_phoenixV16(cdbs_path): @@ -1522,7 +1524,7 @@ def rebin_phoenixV16(cdbs_path): path = cdbs_path+'/grid/phoenix_v16_rebin/' if not os.path.exists(path): os.mkdir(path) - + # Read in the existing catalog.fits file and rebin every spectrum. cat = fits.getdata(cdbs_path + '/grid/phoenix_v16/catalog.fits') @@ -1537,51 +1539,51 @@ def rebin_phoenixV16(cdbs_path): temp_arr[ff] = float(vals[0]) metal_arr[ff] = float(vals[1]) logg_arr[ff] = float(vals[2]) - + metal_uniq = np.unique(metal_arr) temp_uniq = np.unique(temp_arr) - + for mm in range(len(metal_uniq)): metal = metal_uniq[mm] # metallicity - + # Construct str for metallicity (for appropriate directory name) met_str = str(int(np.abs(metal))) + str(int((metal % 1.0)*10)) if metal > 0: met_str = 'p' + met_str else: met_str = 'm' + met_str - + # Make directory for current metallicity if it does not exist yet if not os.path.exists(path + 'phoenix' + met_str): os.mkdir(path + 'phoenix' + met_str) - + for tt in range(len(temp_uniq)): temp = temp_uniq[tt] # temperature - # Pick out the list of gravities for this T, Z combo + # Pick out the list of gravities for this T, Z combo idx = np.where((metal_arr == metal) & (temp_arr == temp))[0] logg_exist = logg_arr[idx] - + # All gravities will go in one file. Here is the output # file name. outfile = path + files_all[idx[0]].split('[')[0] - + ## If the rebinned file already exists, continue if os.path.exists(outfile): continue - + # Build a columns array. One column for each gravity. cols_arr = [] # Make the wavelength column, which is first in the cols array. c0 = fits.Column(name='Wavelength', format='D', array=sp_atlas.wave) cols_arr.append(c0) - + for gg in range(len(logg_exist)): grav = logg_exist[gg] # gravity - # Fetch the spectrum + # Fetch the spectrum sp = pysynphot.Icat('phoenix_v16', temp, metal, grav) flux_rebin = rebin_spec(sp.wave, sp.flux, sp_atlas.wave) @@ -1589,7 +1591,7 @@ def rebin_phoenixV16(cdbs_path): name = 'g{0:3.1f}'.format(grav) col = fits.Column(name=name, format='E', array=flux_rebin) cols_arr.append(col) - + # Make the FITS file from the columns with header. cols = fits.ColDefs(cols_arr) @@ -1605,7 +1607,7 @@ def rebin_phoenixV16(cdbs_path): finalhdu.writeto(outfile) print( 'Finished file ' + outfile + ' with gravities: ', logg_exist) - + return @@ -1620,7 +1622,7 @@ def rebin_spec(wave, specin, wavnew): f = np.ones(len(wave)) filt = pysynphot.spectrum.ArraySpectralElement(wave, f, waveunits='angstrom') obs = pysynphot.observation.Observation(spec, filt, binset=wavnew, force='taper') - + return obs.binflux def organize_BTSettl_2015_atmospheres(path_to_dir): @@ -1651,7 +1653,7 @@ def organize_BTSettl_2015_atmospheres(path_to_dir): spec = hdu[1].data header_0 = hdu[0].header header_1 = hdu[1].header - + wave = spec.field(0) flux = spec.field(1) @@ -1672,13 +1674,13 @@ def organize_BTSettl_2015_atmospheres(path_to_dir): tbhdu.header['TUNIT1'] = 'ANGSTROM' tbhdu.header['TUNIT2'] = 'FLAM' hdu_new = fits.HDUList([prihdu, tbhdu]) - + # Write new fits table in cdbs directory hdu_new.writeto(os.environ['PYSYN_CDBS']+'grid/BTSettl_2015/'+i, overwrite=True) hdu.close() hdu_new.close() - + # Return to original directory os.chdir(start_dir) return @@ -1695,10 +1697,10 @@ def make_BTSettl_2015_catalog(path_to_dir): """ # Record current working directory for later start_dir = os.getcwd() - + # Enter atmosphere directory os.chdir(path_to_dir) - + # Extract parameters for each atmosphere from the filename, # construct columns for catalog file files = glob.glob("*spec.fits") @@ -1717,10 +1719,10 @@ def make_BTSettl_2015_catalog(path_to_dir): # Create catalog.fits file in directory with the models catalog.write('catalog.fits', format = 'fits', overwrite=True) - + # Move back to original directory, create the catalog.fits file os.chdir(start_dir) - + return def rebin_BTSettl_2015(cdbs_path=os.environ['PYSYN_CDBS']): @@ -1763,8 +1765,8 @@ def rebin_BTSettl_2015(cdbs_path=os.environ['PYSYN_CDBS']): # Make new output c0 = fits.Column(name='Wavelength', format='D', array=sp_atlas.wave) - c1 = fits.Column(name='Flux', format='E', array=flux_rebin) - + c1 = fits.Column(name='Flux', format='E', array=flux_rebin) + cols = fits.ColDefs([c0, c1]) tbhdu = fits.BinTableHDU.from_columns(cols) prihdu = fits.PrimaryHDU(header=header0) @@ -1773,7 +1775,7 @@ def rebin_BTSettl_2015(cdbs_path=os.environ['PYSYN_CDBS']): outfile = path + files_all[ff].split('[')[0] finalhdu = fits.HDUList([prihdu, tbhdu]) - finalhdu.writeto(outfile, overwrite=True) + finalhdu.writeto(outfile, overwrite=True) return @@ -1792,7 +1794,7 @@ def make_wavelength_unique(files, dirname): if len(t) != len(test[0]): t = t[test[1]] - + c0 = fits.Column(name='Wavelength', format='D', array=t['Wavelength']) c1 = fits.Column(name='Flux', format='E', array=t['Flux']) cols = fits.ColDefs([c0, c1]) @@ -1830,14 +1832,14 @@ def organize_BTSettl_atmospheres(): """ Construct cdbs-ready atmospheres for the BTSettl grid (CIFITS2011). The code expects tp be run in cdbs/grid/BTSettl, and expects that the - individual model files have been downloaded from online + individual model files have been downloaded from online (https://phoenix.ens-lyon.fr/Grids/BT-Settl/CIFIST2011/SPECTRA/) - and processed into python-readable ascii files. + and processed into python-readable ascii files. """ orig_dir = os.getcwd() dirs = ['btm25', 'btm20', 'btm15', 'btm10', 'btm05', 'btp00', 'btp05'] #dirs = ['btm10', 'btm05', 'btp00', 'btp05'] - + # Go through each directory, turning each spectrum into a cdbs-ready file. # Will convert flux into Ergs/sec/cm**2/A (FLAM) units and save as a fits file, @@ -1869,13 +1871,13 @@ def organize_BTSettl_atmospheres(): tbhdu.header['TUNIT1'] = 'ANGSTROM' tbhdu.header['TUNIT2'] = 'FLAM' hdu_new = fits.HDUList([prihdu, tbhdu]) - + # Write new fits table in cdbs directory hdu_new.writeto('{0}.fits'.format(jj[:-4]), overwrite=True) hdu_new.close() count += 1 print('Done {0} of {1}'.format(count, len(files))) - + # Now, clean up all the files made when unzipping the spectra cmd1 = 'rm *.bz2' cmd2 = 'rm *.tmp' @@ -1886,7 +1888,7 @@ def organize_BTSettl_atmospheres(): print('==============================') print('Done {0}'.format(ii)) print('==============================') - + # Go back to original directory, move to next metallicity directory os.chdir(orig_dir) @@ -1922,7 +1924,7 @@ def make_BTSettl_catalog(): metal_flag = -1 * float(ii[3:])*0.1 else: metal_flag = float(ii[3:])*0.1 - + # Now collect the info from the files for jj in files: tmp = jj.split('-') @@ -1936,7 +1938,7 @@ def make_BTSettl_catalog(): else: temp = float(tmp[0][3:]) * 100.0 # In kelvin logg = float(tmp[1]) - + index_str.append('{0},{1},{2:3.2f}'.format(int(temp), metal_flag, logg)) name_str.append('{0}/{1}[Flux]'.format(ii, jj)) @@ -1949,10 +1951,10 @@ def make_BTSettl_catalog(): # Create catalog.fits file in directory with the models catalog.write('catalog.fits', format = 'fits', overwrite=True) - + # Move back to original directory, create the catalog.fits file os.chdir(start_dir) - + return def rebin_BTSettl(make_unique=False): @@ -1983,7 +1985,7 @@ def rebin_BTSettl(make_unique=False): # tmp.append(ii) #files_all = tmp #=============================# - + print( 'Rebinning BTSettl spectra') if make_unique: print('Making unique') @@ -2003,14 +2005,14 @@ def rebin_BTSettl(make_unique=False): # Make new output c0 = fits.Column(name='Wavelength', format='D', array=sp_atlas.wave) - c1 = fits.Column(name='Flux', format='E', array=flux_rebin) - + c1 = fits.Column(name='Flux', format='E', array=flux_rebin) + cols = fits.ColDefs([c0, c1]) tbhdu = fits.BinTableHDU.from_columns(cols) prihdu = fits.PrimaryHDU() tbhdu.header['TUNIT1'] = 'ANGSTROM' tbhdu.header['TUNIT2'] = 'FLAM' - + outfile = path + files_all[ff].split('[')[0] finalhdu = fits.HDUList([prihdu, tbhdu]) finalhdu.writeto(outfile, overwrite=True) @@ -2020,9 +2022,9 @@ def rebin_BTSettl(make_unique=False): outfile = path + files_all[ff].split('[')[0] cmd = 'cp {0} {1}'.format(orig_file, outfile) os.system(cmd) - + print('Done {0} of {1}'.format(ff, len(files_all))) - + return def organize_WDKoester_atmospheres(path_to_dir): @@ -2046,7 +2048,7 @@ def organize_WDKoester_atmospheres(path_to_dir): for i in files: data = Table.read(i, format='ascii') - + wave = data['col1'] # angstrom flux = data['col2'] # erg/s/cm^2/A @@ -2062,12 +2064,12 @@ def organize_WDKoester_atmospheres(path_to_dir): tbhdu.header['TUNIT1'] = 'ANGSTROM' tbhdu.header['TUNIT2'] = 'FLAM' hdu_new = fits.HDUList([prihdu, tbhdu]) - + # Write new fits table in cdbs directory hdu_new.writeto(os.environ['PYSYN_CDBS']+'/grid/wdKoester/'+i.replace('.txt', '.fits'), overwrite=True) hdu_new.close() - + # Return to original directory os.chdir(start_dir) return @@ -2084,10 +2086,10 @@ def make_WDKoester_catalog(path_to_dir): """ # Record current working directory for later start_dir = os.getcwd() - + # Enter atmosphere directory os.chdir(path_to_dir) - + # Extract parameters for each atmosphere from the filename, # construct columns for catalog file files = glob.glob("*dk.dat.fits") @@ -2107,10 +2109,10 @@ def make_WDKoester_catalog(path_to_dir): # Create catalog.fits file in directory with the models catalog.write('catalog.fits', format = 'fits', overwrite=True) - + # Move back to original directory, create the catalog.fits file os.chdir(start_dir) - + return def rebin_WDKoester(cdbs_path=os.environ['PYSYN_CDBS']): @@ -2153,8 +2155,8 @@ def rebin_WDKoester(cdbs_path=os.environ['PYSYN_CDBS']): # Make new output c0 = fits.Column(name='Wavelength', format='D', array=sp_atlas.wave) - c1 = fits.Column(name='Flux', format='E', array=flux_rebin) - + c1 = fits.Column(name='Flux', format='E', array=flux_rebin) + cols = fits.ColDefs([c0, c1]) tbhdu = fits.BinTableHDU.from_columns(cols) prihdu = fits.PrimaryHDU(header=header0) @@ -2163,8 +2165,8 @@ def rebin_WDKoester(cdbs_path=os.environ['PYSYN_CDBS']): outfile = path + files_all[ff].split('[')[0] finalhdu = fits.HDUList([prihdu, tbhdu]) - finalhdu.writeto(outfile, overwrite=True) + finalhdu.writeto(outfile, overwrite=True) return - - + + diff --git a/spisea/conftest.py b/spisea/conftest.py index 142ae511..fab9a899 100755 --- a/spisea/conftest.py +++ b/spisea/conftest.py @@ -18,10 +18,10 @@ ASTROPY_HEADER = True except ImportError: ASTROPY_HEADER = False - + else: # As of Astropy 5.1, the pytest plugins provided by Astropy have been removed - # and are instead provided by pytest-astropy-header + # and are instead provided by pytest-astropy-header # (https://github.com/astropy/pytest-astropy-header) from pytest_astropy_header.display import PYTEST_HEADER_MODULES, TESTED_VERSIONS ASTROPY_HEADER = True diff --git a/spisea/evolution.py b/spisea/evolution.py index 21215964..2f9dd760 100755 --- a/spisea/evolution.py +++ b/spisea/evolution.py @@ -30,7 +30,7 @@ def get_installed_grid_num(input_models_dir): """ # Define the installed model grid number file_name = input_models_dir + '/grid_version.txt' - + # Read in the file. In the case where it doesn't # exist, then grid version is assumed to be 1.0 # (since this didn't always exist) @@ -51,15 +51,15 @@ def check_evo_grid_number(required_num, input_models_dir): grid version number. Installed grid number must be greater than or equal to this number """ - + # Get installed gridnumber grid_num = get_installed_grid_num(input_models_dir) - + # Check: is installed grid number < required_num? # If not, raise mismatch error if grid_num < required_num: raise exceptions.ModelMismatch(required_num, grid_num, 'evolution') - + return grid_num class StellarEvolution(object): @@ -90,9 +90,9 @@ def __init__(self, model_dir, age_list, mass_list, z_list): self.mass_list = mass_list self.age_list = age_list self.model_version_name = "None" - + return - + class Geneva(StellarEvolution): def __init__(self): r""" @@ -101,20 +101,20 @@ def __init__(self): self.model_version_name = "Geneva" # populate list of model masses (in solar masses) mass_list = [(0.1 + i*0.005) for i in range(181)] - + # define metallicity parameters for Geneva models z_list = [0.01, 0.02, 0.03] - + # populate list of isochrone ages (log scale) age_list = [round(5.5 + 0.01*i, 2) for i in range(190)] age_list += [round(7.4 + 0.05*i, 2) for i in range(12)] age_list += [round(math.log10(1.e8*x), 2) for x in range(1, 10)] age_list += [round(math.log10(1.e9*x), 2) for x in range(1, 10)] age_list = age_list - - # specify location of model files - model_dir = models_dir + 'geneva/' + # specify location of model files + self.model_dir = models_dir + 'geneva/' + StellarEvolution.__init__(self, model_dir, age_list, mass_list, z_list) self.z_solar = 0.02 @@ -122,7 +122,7 @@ def __init__(self): # Define required evo_grid number self.evo_grid_min = 1.0 - + def isochrone(self, age=1.e8, metallicity=0.0): r""" Extract an individual isochrone from the Geneva collection. @@ -131,31 +131,31 @@ def isochrone(self, age=1.e8, metallicity=0.0): # grid is compatible with code version. Also return # current grid num self.evo_grid_num = check_evo_grid_number(self.evo_grid_min, models_dir) - + # convert metallicity to mass fraction z_defined = self.z_solar*10.**metallicity - + # check age and metallicity are within bounds if ((log_age < np.min(self.age_list)) or (log_age > np.max(self.age_list))): - logger.error('Requested age {0} is out of bounds.'.format(log_age)) - + raise ValueError(f'Requested age {log_age} is out of bounds between {np.min(self.age_list)} and {np.max(self.age_list)}.') + if ((z_defined < np.min(self.z_list)) or (z_defined > np.max(self.z_list))): - logger.error('Requested metallicity {0} is out of bounds.'.format(z_defined)) - + raise ValueError(f'Requested metallicity {z_defined} is out of bounds between {np.min(self.z_list)} and {np.max(self.z_list)}.') + # convert age (in yrs) to log scale and find nearest value in grid log_age = np.log10(age) age_idx = np.where(abs(np.array(self.age_list) - log_age) == min(abs(np.array(self.age_list) - log_age)) )[0][0] iso_file = 'iso_' + str(self.age_list[age_idx]) + '.fits' - + # find closest metallicity value z_idx = np.where(abs(np.array(self.z_list) - z_defined) == min(abs(np.array(self.z_list) - z_defined)) )[0][0] z_dir = self.z_file_map[self.z_list[z_idx]] - + # generate isochrone file string full_iso_file = self.model_dir + 'iso/' + z_dir + iso_file - + # return isochrone data return genfromtxt(full_iso_file, comments='#') @@ -166,7 +166,7 @@ def isochrone(self, age=1.e8, metallicity=0.0): class Ekstrom12(StellarEvolution): """ - Evolution models from + Evolution models from `Ekstrom et al. 2012 `_. Downloaded from `website `_. @@ -183,10 +183,10 @@ def __init__(self, rot=True): self.model_version_name = "Ekstrom12-norot" # define metallicity parameters for Ekstrom+12 models self.z_list = [0.014] - + # populate list of isochrone ages (log scale) self.age_list = np.arange(6.0, 8.0+0.005, 0.01) - + # Specify location of model files self.model_dir = models_dir+'Ekstrom2012/' @@ -199,7 +199,7 @@ def __init__(self, rot=True): # Define required evo_grid number self.evo_grid_min = 1.0 - + def isochrone(self, age=1.e8, metallicity=0.0): r""" Extract an individual isochrone from the Ekstrom+12 Geneva collection. @@ -208,34 +208,34 @@ def isochrone(self, age=1.e8, metallicity=0.0): # grid is compatible with code version. Also return # current grid num self.evo_grid_num = check_evo_grid_number(self.evo_grid_min, models_dir) - + # convert metallicity to mass fraction z_defined = self.z_solar*10.**metallicity log_age = math.log10(age) - + # check age and metallicity are within bounds if ((log_age < np.min(self.age_list)) or (log_age > np.max(self.age_list))): - logger.error('Requested age {0} is out of bounds.'.format(log_age)) - + raise ValueError(f'Requested age {log_age} is out of bounds between {np.min(self.age_list)} and {np.max(self.age_list)}.') + if ((z_defined < np.min(self.z_list)) or (z_defined > np.max(self.z_list))): - logger.error('Requested metallicity {0} is out of bounds.'.format(z_defined)) - + raise ValueError(f'Requested metallicity z_solar * 10^{metallicity} = {z_defined} is out of bounds between {np.min(self.z_list)} and {np.max(self.z_list)}.') + # Find nearest age in grid to input grid age_idx = np.where(abs(np.array(self.age_list) - log_age) == min(abs(np.array(self.age_list) - log_age)) )[0][0] iso_file = 'iso_{0:.2f}.fits'.format(self.age_list[age_idx]) - + # find closest metallicity value z_idx = np.where(abs(np.array(self.z_list) - z_defined) == min(abs(np.array(self.z_list) - z_defined)) )[0][0] z_dir = self.z_file_map[self.z_list[z_idx]] - + # generate isochrone file string - if self.rot: + if self.rot: full_iso_file = self.model_dir + 'iso/' + z_dir + 'rot/' + iso_file else: full_iso_file = self.model_dir + 'iso/' + z_dir + 'norot/' + iso_file - + # Return isochrone data iso = Table.read(full_iso_file, format='fits') iso.rename_column('col4', 'Z') @@ -267,11 +267,11 @@ def format_isochrones(input_iso_dir): Parse iso.fits (filename hardcoded) file downloaded from Ekstrom+12 models, create individual isochrone files for the different ages. - input_iso_directory should lead to - Ekstrom2012/iso/ + input_iso_directory should lead to + Ekstrom2012/iso/ directory, where iso.fits file should be located. - Creates two new directories, rot and norot, which contain their + Creates two new directories, rot and norot, which contain their respective isochrones. """ # Store current directory for later @@ -279,13 +279,13 @@ def format_isochrones(input_iso_dir): # Move into metallicity direcotry, read iso.fits file os.chdir(input_iso_dir) - + print( 'Read Input: this is slow') iso = Table.read('iso.fits') print( 'Done' ) - + ages_all = iso['col1'] - + # Extract the unique ages age_arr = np.unique(ages_all) @@ -299,7 +299,7 @@ def format_isochrones(input_iso_dir): else: os.mkdir('rot') os.mkdir('norot') - + print( 'Making individual isochrone files') for age in age_arr: good = np.where(ages_all == age) @@ -310,7 +310,7 @@ def format_isochrones(input_iso_dir): tmp_r = iso[good][idx_r] tmp_n = iso[good][idx_n] - + # Write tables tmp_r.write('rot/iso_{0:4.2f}.fits'.format(age)) tmp_n.write('norot/iso_{0:4.2f}.fits'.format(age)) @@ -327,14 +327,14 @@ def create_iso(fileList, ageList, rot=True): iso.fits format for parse_iso code. fileList: list of downloaded isochrone files (could be one) - + ageList: list of lists of ages associated with each file in filelist. MUST BE IN SAME ORDER AS ISOCHRONES IN FILE! Also needs to be in logAge - + rot = TRUE: assumes that models are rotating, will add appropriate column - + This code writes the individual files, which is then easiest to combine by hand - in aquamacs + in aquamacs """ # Read each file in fileList individually, add necessary columns for i in range(len(fileList)): @@ -345,14 +345,14 @@ def create_iso(fileList, ageList, rot=True): start = np.where(t['M_ini'] == 0.8) # Now, each identified start is assumed to be associated with the - # corresponding age in ages + # corresponding age in ages if len(start[0]) != len(ages): print( 'Ages mismatched in file! Quitting...') return age_arr = np.zeros(len(t)) - + for j in range(len(start[0])): low_ind = start[0][j] # Deal with case at end of file @@ -371,9 +371,9 @@ def create_iso(fileList, ageList, rot=True): rot_val[:] = 'r' if not rot: rot_val[:] = 'n' - + col_rot = Column(rot_val, name='Rot') - + t.add_column(col_rot, index=0) t.add_column(col_age, index=0) @@ -387,7 +387,7 @@ def create_iso(fileList, ageList, rot=True): class Parsec(StellarEvolution): """ - Evolution models from + Evolution models from `Bressan et al. 2012 `_, version 1.2s. @@ -413,14 +413,14 @@ def __init__(self): # populate list of model masses (in solar masses) self.model_version_name = "Parsec1.2s" #mass_list = [(0.1 + i*0.005) for i in range(181)] - + # define metallicity parameters for Parsec models self.z_list = [0.005, 0.015, 0.04] - + # populate list of isochrone ages (log scale) self.age_list = np.arange(6.6, 10.12+0.005, 0.01) self.age_list = np.append(6.40, self.age_list) - + # Specify location of model files self.model_dir = models_dir+'ParsecV1.2s/' @@ -429,8 +429,8 @@ def __init__(self): self.z_file_map = {0.005: 'z005/', 0.015: 'z015/', 0.04: 'z04/'} # Define required evo_grid number - self.evo_grid_min = 1.0 - + self.evo_grid_min = 1.0 + def isochrone(self, age=1.e8, metallicity=0.0): r""" Extract an individual isochrone from the Parsec version 1.2s @@ -440,31 +440,31 @@ def isochrone(self, age=1.e8, metallicity=0.0): # grid is compatible with code version. Also return # current grid num self.evo_grid_num = check_evo_grid_number(self.evo_grid_min, models_dir) - + # convert metallicity to mass fraction z_defined = self.z_solar*10.**metallicity log_age = math.log10(age) - + # check age and metallicity are within bounds if ((log_age < np.min(self.age_list)) or (log_age > np.max(self.age_list))): - logger.error('Requested age {0} is out of bounds.'.format(log_age)) - + raise ValueError(f'Requested age {log_age} is out of bounds between {np.min(self.age_list)} and {np.max(self.age_list)}.') + if ((z_defined < np.min(self.z_list)) or (z_defined > np.max(self.z_list))): - logger.error('Requested metallicity {0} is out of bounds.'.format(z_defined)) - + raise ValueError(f'Requested metallicity {z_defined} is out of bounds between {np.min(self.z_list)} and {np.max(self.z_list)}.') + # Find nearest age in grid to input grid age_idx = np.where(abs(np.array(self.age_list) - log_age) == min(abs(np.array(self.age_list) - log_age)) )[0][0] iso_file = 'iso_{0:.2f}.fits'.format(self.age_list[age_idx]) - + # find closest metallicity value z_idx = np.where(abs(np.array(self.z_list) - z_defined) == min(abs(np.array(self.z_list) - z_defined)) )[0][0] z_dir = self.z_file_map[self.z_list[z_idx]] - + # generate isochrone file string full_iso_file = self.model_dir + 'iso/' + z_dir + iso_file - + # return isochrone data iso = Table.read(full_iso_file, format='fits') iso.rename_column('col1', 'Z') @@ -480,19 +480,19 @@ def isochrone(self, age=1.e8, metallicity=0.0): # Parsec doesn't identify WR stars, so identify all as "False" isWR = Column([False] * len(iso), name='isWR') iso.add_column(isWR) - + iso.meta['log_age'] = log_age iso.meta['metallicity_in'] = metallicity iso.meta['metallicity_act'] = np.log10(self.z_list[z_idx] / self.z_solar) return iso - + def format_isochrones(input_iso_dir, metallicity_list): r""" Parse isochrone file downloaded from Parsec version 1.2 for different metallicities, create individual isochrone files for the different ages. - + input_iso_dir: points to ParsecV1.2s/iso directory. Assumes metallicity subdirectories already exist with isochrone files downloaded in them (isochrones files expected to start with "output*") @@ -505,7 +505,7 @@ def format_isochrones(input_iso_dir, metallicity_list): # Move into isochrone directory os.chdir(input_iso_dir) - + # Work on each metallicity isochrones individually for metal in metallicity_list: # More into metallicity directory, read isochrone file @@ -515,7 +515,7 @@ def format_isochrones(input_iso_dir, metallicity_list): print( 'Read Input: this is slow') iso = Table.read(isoFile[0], format='fits') print( 'Done') - + ages_all = iso['col2'] # Extract the unique ages @@ -544,9 +544,9 @@ def format_isochrones(input_iso_dir, metallicity_list): class Pisa(StellarEvolution): """ - Evolution models from + Evolution models from `Tognelli et al. 2011 `_. - + Downloaded `online `_ Notes @@ -565,10 +565,10 @@ def __init__(self): self.model_version_name = "Pisa" # define metallicity parameters for Pisa models self.z_list = [0.015] - + # populate list of isochrone ages (log scale) self.age_list = np.arange(6.0, 8.01+0.005, 0.01) - + # Specify location of model files self.model_dir = models_dir+'Pisa2011/' @@ -578,7 +578,7 @@ def __init__(self): # Define required evo_grid number self.evo_grid_min = 1.0 - + def isochrone(self, age=1.e8, metallicity=0.0): r""" Extract an individual isochrone from the Pisa (Tognelli+11) @@ -588,31 +588,31 @@ def isochrone(self, age=1.e8, metallicity=0.0): # grid is compatible with code version. Also return # current grid num self.evo_grid_num = check_evo_grid_number(self.evo_grid_min, models_dir) - + # convert metallicity to mass fraction z_defined = self.z_solar*10.**metallicity log_age = math.log10(age) - + # check age and metallicity are within bounds if ((log_age < np.min(self.age_list)) or (log_age > np.max(self.age_list))): - logger.error('Requested age {0} is out of bounds.'.format(log_age)) - + raise ValueError(f'Requested age {log_age} is out of bounds between {np.min(self.age_list)} and {np.max(self.age_list)}.') + return if ((z_defined < np.min(self.z_list)) or (z_defined > np.max(self.z_list))): - logger.error('Requested metallicity {0} is out of bounds for evolution model. Available z-vals: {1}.'.format(z_defined, self.z_list)) - + raise ValueError(f'Requested metallicity {z_defined} is out of bounds for evolution model. Available z-vals: {self.z_list}.') + # Find nearest age in grid to input grid age_idx = np.where(abs(np.array(self.age_list) - log_age) == min(abs(np.array(self.age_list) - log_age)) )[0][0] iso_file = 'iso_{0:.2f}.fits'.format(self.age_list[age_idx]) - + # find closest metallicity value z_idx = np.where(abs(np.array(self.z_list) - z_defined) == min(abs(np.array(self.z_list) - z_defined)) )[0][0] z_dir = self.z_file_map[self.z_list[z_idx]] - + # generate isochrone file string full_iso_file = self.model_dir + 'iso/' + z_dir + iso_file - + # return isochrone data iso = Table.read(full_iso_file, format='fits') iso.rename_column('col1', 'logL') @@ -625,7 +625,7 @@ def isochrone(self, age=1.e8, metallicity=0.0): isWR = Column([False] * len(iso), name='isWR') iso.add_column(isWR) - # Add columns for current mass and phase. + # Add columns for current mass and phase. iso.add_column( Column(np.zeros(len(iso)), name = 'phase')) iso.add_column( Column(iso['mass'], name = 'mass_current')) @@ -643,7 +643,7 @@ def format_isochrones(input_iso_dir, metallicity_list): input_iso_dir: points to Pisa2011/iso directory. Individual metallicity directories with the downloaded isochrones are expected to already exist there - + metallicity_list is the list of metallicities on which function is to be run. @@ -665,7 +665,7 @@ def format_isochrones(input_iso_dir, metallicity_list): else: # Create a ReadMe with the original file names to preserve the # model details - + cmd = "ls *.FITS > ReadMe" os.system(cmd) @@ -699,14 +699,14 @@ def make_isochrone_grid(metallicity=0.015): while 0.0150 would not) """ logAge_arr = np.arange(6.0, 8.0+0.005, 0.01) - + count = 0 for logAge in logAge_arr: # Could interpolate using evolutionary tracks, but less accurate. make_isochrone_pisa_interp(logAge, metallicity=metallicity) count += 1 - + print( 'Done {0} of {1} models'.format(count, (len(logAge_arr)))) return @@ -716,7 +716,7 @@ def make_isochrone_grid(metallicity=0.015): #==============================# class Baraffe15(StellarEvolution): """ - Evolution models published in + Evolution models published in `Baraffe et al. 2015 `_. Downloaded from `BHAC15 site `_. @@ -725,10 +725,10 @@ def __init__(self): self.model_version_name = "Baraffe15" # define metallicity parameters for Baraffe models self.z_list = [0.015] - + # populate list of isochrone ages (log scale) self.age_list = np.arange(6.0, 8.0+0.005, 0.01) - + # Specify location of model files self.model_dir = models_dir+'Baraffe15/' @@ -738,7 +738,7 @@ def __init__(self): # Define required evo_grid number self.evo_grid_min = 1.0 - + def isochrone(self, age=5.e7, metallicity=0.0): r""" Extract an individual isochrone from the Baraffe+15 @@ -748,43 +748,43 @@ def isochrone(self, age=5.e7, metallicity=0.0): # grid is compatible with code version. Also return # current grid num self.evo_grid_num = check_evo_grid_number(self.evo_grid_min, models_dir) - + # convert metallicity to mass fraction z_defined = self.z_solar*10.**metallicity log_age = math.log10(age) - + # check age and metallicity are within bounds if ((log_age < np.min(self.age_list)) or (log_age > np.max(self.age_list))): - logger.error('Requested age {0} is out of bounds.'.format(log_age)) - + raise ValueError(f'Requested age {log_age} is out of bounds between {np.min(self.age_list)} and {np.max(self.age_list)}.') + if ((z_defined < np.min(self.z_list)) or (z_defined > np.max(self.z_list))): - logger.error('Requested metallicity {0} is out of bounds.'.format(z_defined)) - + raise ValueError(f'Requested metallicity {z_defined} is out of bounds between {np.min(self.z_list)} and {np.max(self.z_list)}.') + # Find nearest age in grid to input grid age_idx = np.where(abs(np.array(self.age_list) - log_age) == min(abs(np.array(self.age_list) - log_age)) )[0][0] iso_file = 'iso_{0:.2f}.fits'.format(self.age_list[age_idx]) - + # find closest metallicity value z_idx = np.where(abs(np.array(self.z_list) - z_defined) == min(abs(np.array(self.z_list) - z_defined)) )[0][0] z_dir = self.z_file_map[self.z_list[z_idx]] - + # generate isochrone file string full_iso_file = self.model_dir + 'iso/' + z_dir + iso_file - + # Read isochrone, get in proper format iso = Table.read(full_iso_file, format='fits') iso.rename_column('Mass', 'mass') iso.rename_column('logG', 'logg') iso['logT'] = np.log10(iso['Teff']) - + # Pisa models are too low for WR phase, add WR column with all False iso['logT_WR'] = iso['logT'] isWR = Column([False] * len(iso), name='isWR') iso.add_column(isWR) - # Add columns for current mass and phase. + # Add columns for current mass and phase. iso.add_column( Column(np.zeros(len(iso)), name = 'phase')) iso.add_column( Column(iso['mass'], name = 'mass_current')) @@ -798,11 +798,11 @@ def tracks_to_isochrones(self, tracksFile): r""" Create isochrones at desired age sampling (6.0 < logAge < 8.0, steps of 0.01; hardcoded) from the Baraffe+15 tracks downloaded - online. + online. tracksFile: tracks.dat file downloaded from Baraffe+15, with format modified to be read in python - + Writes isochrones in iso/ subdirectory off of work directory. Will create this subdirectory if it doesn't already exist """ @@ -810,7 +810,7 @@ def tracks_to_isochrones(self, tracksFile): age_arr = np.arange(6.0, 8.0+0.005, 0.01) #age_arr = [6.28] - + # Loop through the masses, interpolating track over time at each. # Resample track properties at hardcoded ages masses = np.unique(tracks['col1']) @@ -835,13 +835,13 @@ def tracks_to_isochrones(self, tracksFile): # Interpolate Teff, logL, and logG using linear interpolator tck_Teff = interpolate.interp1d(tmp['col2'], tmp['col3']) tck_logL = interpolate.interp1d(tmp['col2'], tmp['col4']) - tck_logG = interpolate.interp1d(tmp['col2'], tmp['col5']) + tck_logG = interpolate.interp1d(tmp['col2'], tmp['col5']) Teff = tck_Teff(age_arr) logL = tck_logL(age_arr) logG = tck_logG(age_arr) - + # Test interpolation if desired test=False if test: @@ -868,9 +868,9 @@ def tracks_to_isochrones(self, tracksFile): py.xlabel('logAge') py.ylabel('logG') py.savefig('test_logG.png') - + pdb.set_trace() - + # Build upon arrays of interpolated values mass_interp = np.concatenate((mass_interp, np.ones(len(Teff)) * mass)) age_interp = np.concatenate((age_interp, age_arr)) @@ -905,11 +905,11 @@ def tracks_to_isochrones(self, tracksFile): def test_age_interp(self, onlineIso, interpIso): r""" Compare one of our interpolated ischrones with one - of the isochrones provided online by Baraffe+15. + of the isochrones provided online by Baraffe+15. """ true_iso = Table.read(onlineIso, format='ascii') our_iso = Table.read(interpIso, format='fits') - + # Compare the two isochrones using plots. Look at mass vs. Teff, # mass vs. logG, mass vs. logL. Ideally these isochrones should # be identical @@ -941,7 +941,7 @@ def test_age_interp(self, onlineIso, interpIso): Teff_diff = np.mean(abs(true_iso['col2'][7:] - our_iso['Teff'])) logL_diff = np.mean(abs(true_iso['col3'][7:] - our_iso['logL'])) logG_diff = np.mean(abs(true_iso['col4'][7:] - our_iso['logG'])) - + print( 'Average abs difference in Teff: {0}'.format(Teff_diff)) print( 'Average abs difference in logL: {0}'.format(logL_diff)) print( 'Average abs difference in logg: {0}'.format(logG_diff)) @@ -958,7 +958,7 @@ def compare_Baraffe_Pisa(BaraffeIso, PisaIso): name = BaraffeIso.split('_') age = name[1][:4] - + # Extract paramters we need b_mass = b['Mass'] b_logT = np.log10(b['Teff']) @@ -972,7 +972,7 @@ def compare_Baraffe_Pisa(BaraffeIso, PisaIso): m05_b = np.where( abs(b_mass - 0.5) == min(abs(b_mass - 0.5)) ) m05_p = np.where( abs(p_mass - 0.5) == min(abs(p_mass - 0.5)) ) - + # Comparison plots py.figure(1, figsize=(10,10)) py.clf() @@ -1000,7 +1000,7 @@ def compare_Baraffe_Pisa(BaraffeIso, PisaIso): #py.axis([4.4, 3.4, -3, 4]) #py.gca().invert_xaxis() py.legend() - py.savefig('BaraffePisa_comp_mass_{0}.png'.format(age)) + py.savefig('BaraffePisa_comp_mass_{0}.png'.format(age)) return @@ -1010,7 +1010,7 @@ def compare_Baraffe_Pisa(BaraffeIso, PisaIso): class MISTv1(StellarEvolution): """ Define intrinsic properties for the MIST v1 stellar - models. + models. Models originally downloaded from `online server `_. @@ -1021,11 +1021,11 @@ class MISTv1(StellarEvolution): was downloaded from MIST website on 2/2017, while Version 1.2 was downloaded on 8/2018 (solar metallicity) and 4/2019 (other metallicities). Default is 1.2. - + synthpop_extension: boolean (default False) If True, the isochrones are extended down to a minimum initial mass of 0.1Msun using grids interpolated via SynthPop. If False, - the web-downloaded MIST isochrones are used with their varying + the web-downloaded MIST isochrones are used with their varying lower mass limits. True option is only valid for version=1.2. """ def __init__(self, version=1.2, synthpop_extension=False): @@ -1045,7 +1045,7 @@ def __init__(self, version=1.2, synthpop_extension=False): 0.014, # [Fe/H] = 0.00 0.025, # [Fe/H] = 0.25 0.045] # [Fe/H] = 0.50 - + # populate list of isochrone ages (log scale) self.age_list = np.arange(5.01, 10.30+0.005, 0.01) @@ -1062,7 +1062,7 @@ def __init__(self, version=1.2, synthpop_extension=False): version_dir = 'v1.2/' else: raise ValueError('Version {0} not supported for MIST isochrones'.format(version)) - + # Specify location of model files self.model_dir = models_dir+'MISTv1/' + version_dir if self.synthpop_extension: @@ -1094,7 +1094,7 @@ def __init__(self, version=1.2, synthpop_extension=False): # Define required evo_grid number (now 1.2 for synthpop extension) self.evo_grid_min = 1.2 - + def isochrone(self, age=1.e8, metallicity=0.0): r""" Extract an individual isochrone from the MISTv1 @@ -1104,7 +1104,7 @@ def isochrone(self, age=1.e8, metallicity=0.0): # grid is compatible with code version. Also return # current grid num self.evo_grid_num = check_evo_grid_number(self.evo_grid_min, models_dir) - + # convert metallicity to mass fraction z_defined = self.z_solar * (10.**metallicity) @@ -1112,25 +1112,25 @@ def isochrone(self, age=1.e8, metallicity=0.0): # check age and metallicity are within bounds if ((log_age < np.min(self.age_list)) or (log_age > np.max(self.age_list))): - logger.error('Requested age {0} is out of bounds.'.format(log_age)) - + raise ValueError(f'Requested age {log_age} is out of bounds between {np.min(self.age_list)} and {np.max(self.age_list)}.') + if ((z_defined < np.min(self.z_list)) or (z_defined > np.max(self.z_list))): - logger.error('Requested metallicity {0} is out of bounds.'.format(z_defined)) + raise ValueError(f'Requested metallicity {z_defined} is out of bounds between {np.min(self.z_list)} and {np.max(self.z_list)}.') # Find nearest age in grid to input grid age_idx = np.where(abs(np.array(self.age_list) - log_age) == min(abs(np.array(self.age_list) - log_age)) )[0][0] iso_file = 'iso_{0:.2f}.fits'.format(self.age_list[age_idx]) - + # find closest metallicity value z_idx = np.where(abs(np.array(self.z_list) - z_defined) == min(abs(np.array(self.z_list) - z_defined)) )[0][0] z_dir = self.z_file_map[self.z_list[z_idx]] - + # generate isochrone file string full_iso_file = self.model_dir + 'iso/' + z_dir + iso_file if self.synthpop_extension: addl_iso_file = self.model_extension_dir + 'iso/' + z_dir + iso_file - + # return isochrone data. Column locations depend on # version iso = Table.read(full_iso_file, format='fits') @@ -1174,7 +1174,7 @@ def isochrone(self, age=1.e8, metallicity=0.0): iso.meta['metallicity_act'] = np.log10(self.z_list[z_idx] / self.z_solar) return iso - + def format_isochrones(self): r""" Parse isochrone file downloaded from MIST web server, @@ -1199,7 +1199,7 @@ def format_isochrones(self): # Move into isochrone directory os.chdir(input_iso_dir) - + # Work on each metallicity isochrones individually for metal in metallicity_list: # More into metallicity directory, read isochrone file @@ -1253,19 +1253,19 @@ class MergedBaraffePisaEkstromParsec(StellarEvolution): The model used depends on the age of the population and what stellar masses are being modeled: - + For logAge < 7.4: * Baraffe: 0.08 - 0.4 M_sun - * Baraffe/Pisa transition: 0.4 - 0.5 M_sun + * Baraffe/Pisa transition: 0.4 - 0.5 M_sun * Pisa: 0.5 M_sun to the highest mass in Pisa isochrone (typically 5 - 7 Msun) * Geneva: Highest mass of Pisa models to 120 M_sun For logAge > 7.4: * Parsec v1.2s: full mass range - + Parameters ---------- rot: boolean, optional @@ -1278,18 +1278,18 @@ def __init__(self, rot=True): self.model_version_name = "MergedBaraffePisaEkstromParsec-norot" # populate list of model masses (in solar masses) mass_list = [(0.1 + i*0.005) for i in range(181)] - + # define metallicity parameters for Geneva models z_list = [0.015] - + # populate list of isochrone ages (log scale) age_list = np.arange(6.0, 10.091, 0.01).tolist() - + # specify location of model files model_dir = models_dir + 'merged/baraffe_pisa_ekstrom_parsec/' StellarEvolution.__init__(self, model_dir, age_list, mass_list, z_list) self.z_solar = 0.015 - + # Switch to specify rotating/non-rotating models if rot: self.z_file_map = {0.015: 'z015_rot/'} @@ -1298,36 +1298,36 @@ def __init__(self, rot=True): # Define required evo_grid number self.evo_grid_min = 1.0 - - + + def isochrone(self, age=1.e8, metallicity=0.0): r""" - Extract an individual isochrone from the Baraffe-Pisa-Ekstrom-Parsec + Extract an individual isochrone from the Baraffe-Pisa-Ekstrom-Parsec collection """ # Error check to see if installed evolution model # grid is compatible with code version. Also return # current grid num self.evo_grid_num = check_evo_grid_number(self.evo_grid_min, models_dir) - + # convert metallicity to mass fraction z_defined = self.z_solar*10.**metallicity log_age = math.log10(age) - + # check age and metallicity are within bounds if ((log_age < np.min(self.age_list)) or (log_age > np.max(self.age_list))): - logger.error('Requested age {0} is out of bounds.'.format(log_age)) - + raise ValueError(f'Requested age {log_age} is out of bounds between {np.min(self.age_list)} and {np.max(self.age_list)}.') + if ((z_defined < np.min(self.z_list)) or (z_defined > np.max(self.z_list))): - logger.error('Requested metallicity {0} is out of bounds.'.format(z_defined)) + raise ValueError(f'Requested metallicity {z_defined} is out of bounds between {np.min(self.z_list)} and {np.max(self.z_list)}.') # Find nearest age in grid to input grid age_idx = np.where(abs(np.array(self.age_list) - log_age) == min(abs(np.array(self.age_list) - log_age)) )[0][0] iso_file = 'iso_{0:.2f}.fits'.format(self.age_list[age_idx]) - - + + # find closest metallicity value z_idx = np.where(abs(np.array(self.z_list) - z_defined) == min(abs(np.array(self.z_list) - z_defined)) )[0][0] z_dir = self.z_file_map[self.z_list[z_idx]] @@ -1343,9 +1343,9 @@ def isochrone(self, age=1.e8, metallicity=0.0): # ASCII version of files (newer model evo grids iso_file = 'iso_{0:.2f}.dat'.format(self.age_list[age_idx]) full_iso_file = self.model_dir + z_dir + iso_file - + iso = Table.read(full_iso_file, format='ascii') - + iso.rename_column('col1', 'mass') iso.rename_column('col2', 'logT') iso.rename_column('col3', 'logL') @@ -1360,18 +1360,18 @@ def isochrone(self, age=1.e8, metallicity=0.0): idx_WR = np.where(iso['logT'] != iso['logT_WR']) isWR[idx_WR] = True iso.add_column(isWR) - + iso.meta['log_age'] = log_age iso.meta['metallicity_in'] = metallicity iso.meta['metallicity_act'] = np.log10(self.z_list[z_idx] / self.z_solar) - + return iso class MergedPisaEkstromParsec(StellarEvolution): """ Same as MergedBaraffePisaEkstromParsec, but without - the Baraffe models. + the Baraffe models. Parameters ---------- @@ -1385,13 +1385,13 @@ def __init__(self, rot=True): self.model_version_name = "MergedPisaEkstromParsec-norot" # populate list of model masses (in solar masses) mass_list = [(0.1 + i*0.005) for i in range(181)] - + # define metallicity parameters for Geneva models z_list = [0.015] - + # populate list of isochrone ages (log scale) age_list = np.arange(6.0, 8.001, 0.01).tolist() - + # specify location of model files model_dir = models_dir + 'merged/pisa_ekstrom_parsec/' StellarEvolution.__init__(self, model_dir, age_list, mass_list, z_list) @@ -1405,13 +1405,13 @@ def __init__(self, rot=True): # Define required evo_grid number self.evo_grid_min = 1.0 - + # Error check to see if installed evolution model # grid is compatible with code version. Also return # current grid num self.evo_grid_num = check_evo_grid_number(self.evo_grid_min, models_dir) - - + + def isochrone(self, age=1.e8, metallicity=0.0): r""" Extract an individual isochrone from the Pisa-Ekstrom-Parsec collection. @@ -1420,18 +1420,18 @@ def isochrone(self, age=1.e8, metallicity=0.0): z_defined = self.z_solar*10.**metallicity log_age = math.log10(age) - + # check age and metallicity are within bounds if (log_age < self.age_list[0]) or (log_age > self.age_list[-1]): - logger.error('Requested age {0} is out of bounds.'.format(log_age)) - + raise ValueError(f'Requested age {log_age} is out of bounds between {np.min(self.age_list)} and {np.max(self.age_list)}.') + if not z_defined in self.z_list: - logger.error('Requested metallicity {0} is out of bounds.'.format(z_defined)) - + raise ValueError(f'Requested metallicity {z_defined} is out of bounds between {np.min(self.z_list)} and {np.max(self.z_list)}.') + # Find nearest age in grid to input grid age_idx = np.where(abs(np.array(self.age_list) - log_age) == min(abs(np.array(self.age_list) - log_age)) )[0][0] iso_file = 'iso_{0:.2f}.fits'.format(self.age_list[age_idx]) - + # find closest metallicity value z_idx = np.where(abs(np.array(self.z_list) - z_defined) == min(abs(np.array(self.z_list) - z_defined)) )[0][0] z_dir = self.z_file_map[self.z_list[z_idx]] @@ -1451,7 +1451,7 @@ def isochrone(self, age=1.e8, metallicity=0.0): iso.meta['log_age'] = log_age iso.meta['metallicity_in'] = metallicity iso.meta['metallicity_act'] = np.log10(self.z_list[z_idx] / self.z_solar) - + return iso class MergedSiessGenevaPadova(StellarEvolution): @@ -1466,27 +1466,27 @@ class MergedSiessGenevaPadova(StellarEvolution): * Padova (`Marigo et al. 2008 `_) For logAge < 7.4: - + * Siess: 0.1 - 7 M_sun * Siess/Geneva transition: 7 - 9 M_sun * Geneva: > 9 M_sun For logAge > 7.4: - + * Padova: full mass range """ def __init__(self): """ - Define intrinsic properties for merged Siess-meynetMaeder-Padova + Define intrinsic properties for merged Siess-meynetMaeder-Padova stellar models. """ self.model_version_name = "MergedSiessGenevaPadova" # populate list of model masses (in solar masses) mass_list = [(0.1 + i*0.005) for i in range(181)] - + # define metallicity parameters for Geneva models z_list = [0.02] - + # populate list of isochrone ages (log scale) age_list = np.arange(5.5, 7.41, 0.01).tolist() age_list.append(7.48) @@ -1506,24 +1506,24 @@ def __init__(self): age_list.append(9.60) age_list.append(9.70) age_list.append(9.78) - + # specify location of model files model_dir = models_dir + 'merged/siess_meynetMaeder_padova/' StellarEvolution.__init__(self, model_dir, age_list, mass_list, z_list) self.z_solar = 0.02 - + # Metallicity map self.z_file_map = {0.02: 'z02/'} # Define required evo_grid number self.evo_grid_min = 1.0 - + # Error check to see if installed evolution model # grid is compatible with code version. Also return # current grid num self.evo_grid_num = check_evo_grid_number(self.evo_grid_min, models_dir) - - + + def isochrone(self, age=1.e8, metallicity=0.0): r""" Extract an individual isochrone from the Siess-Geneva-Padova collection. @@ -1532,18 +1532,18 @@ def isochrone(self, age=1.e8, metallicity=0.0): z_defined = self.z_solar*10.**metallicity log_age = math.log10(age) - + # check age and metallicity are within bounds if (log_age < self.age_list[0]) or (log_age > self.age_list[-1]): - logger.error('Requested age {0} is out of bounds.'.format(log_age)) - + raise ValueError(f'Requested age {log_age} is out of bounds between {np.min(self.age_list)} and {np.max(self.age_list)}.') + if not z_defined in self.z_list: - logger.error('Requested metallicity {0} is out of bounds.'.format(z_defined)) - + raise ValueError(f'Requested metallicity {z_defined} is out of bounds between {np.min(self.z_list)} and {np.max(self.z_list)}.') + # Find nearest age in grid to input grid age_idx = np.where(abs(np.array(self.age_list) - log_age) == min(abs(np.array(self.age_list) - log_age)) )[0][0] iso_file = 'iso_{0:.2f}.fits'.format(self.age_list[age_idx]) - + # find closest metallicity value z_idx = np.where(abs(np.array(self.z_list) - z_defined) == min(abs(np.array(self.z_list) - z_defined)) )[0][0] z_dir = self.z_file_map[self.z_list[z_idx]] @@ -1559,16 +1559,16 @@ def isochrone(self, age=1.e8, metallicity=0.0): iso.rename_column('col4', 'logg') iso.rename_column('col5', 'logT_WR') iso.rename_column('col6', 'model_ref') - + iso.meta['log_age'] = log_age iso.meta['metallicity_in'] = metallicity iso.meta['metallicity_act'] = np.log10(self.z_list[z_idx] / self.z_solar) - + return iso #================================================# - -def make_isochrone_pisa_interp(log_age, metallicity=0.015, + +def make_isochrone_pisa_interp(log_age, metallicity=0.015, tracks=None, test=False): """ Read in a set of isochrones and generate an isochrone at log_age @@ -1595,14 +1595,14 @@ def make_isochrone_pisa_interp(log_age, metallicity=0.015, if os.path.exists(rootDir+'iso_{0:3.2f}.fits'.format(log_age)): print( 'Isochrone at logAge = {0:3.2f} already exists'.format(log_age)) return - + # Name/directory for interpolated isochrone isoFile = rootDir+'iso_%3.2f.fits' % log_age outSuffix = '_%.2f' % (log_age) print( '*** Generating Pisa isochrone for log t = %3.2f and Z = %.3f' % \ (log_age, metallicity)) - + import time print( time.asctime(), 'Getting original Pisa isochrones.') iso = get_orig_pisa_isochrones(metallicity=metallicity) @@ -1616,7 +1616,7 @@ def make_isochrone_pisa_interp(log_age, metallicity=0.015, good = np.where(tmp == log_age) young_model_logage = tmp[good[0]-1] old_model_logage = tmp[good[0]+1] - + # Isolate younger/older isochrones young_ind = np.where(iso.log_ages == young_model_logage) old_ind = np.where(iso.log_ages == old_model_logage) @@ -1629,7 +1629,7 @@ def make_isochrone_pisa_interp(log_age, metallicity=0.015, if abs(young_model_logage - log_age) <= abs(old_model_logage - log_age): # Use young model mass grid young_iso, old_iso = interpolate_iso_tempgrid(young_iso, old_iso) - + else: # Use old model mass grid old_iso, young_iso = interpolate_iso_tempgrid(old_iso, young_iso) @@ -1637,25 +1637,25 @@ def make_isochrone_pisa_interp(log_age, metallicity=0.015, # Now, can interpolate in time over the two models. Do this star by star. # Work in linear time here!! numStars = len(young_iso.M) - + interp_iso = Isochrone(log_age) interp_iso.log_Teff = np.zeros(numStars, dtype=float) interp_iso.log_L = np.zeros(numStars, dtype=float) interp_iso.log_g = np.zeros(numStars, dtype=float) interp_iso.M = young_iso.M # Since mass grids should already be matched - + for i in range(numStars): # Do interpolations in linear space model_ages = [10**young_model_logage[0], 10**old_model_logage[0]] target_age = 10**log_age #model_ages = [young_model_logage[0], old_model_logage[0]] #target_age = log_age - + # Build interpolation functions Teff_arr = [10**young_iso.log_Teff[i], 10**old_iso.log_Teff[i]] logL_arr = [10**young_iso.log_L[i], 10**old_iso.log_L[i]] logg_arr = [10**young_iso.log_g[i], 10**old_iso.log_g[i]] - + f_log_Teff = interpolate.interp1d(model_ages, Teff_arr, kind='linear') f_log_L = interpolate.interp1d(model_ages, logL_arr, kind='linear') f_log_g = interpolate.interp1d(model_ages, logg_arr, kind='linear') @@ -1681,15 +1681,15 @@ def make_isochrone_pisa_interp(log_age, metallicity=0.015, py.legend() py.title('Pisa 2011 Isochrone at log t = %.2f' % log_age) py.savefig(rootDir + 'plots/interp_isochrone_at' + outSuffix + '.png') - + print( time.asctime(), 'Finished.') # Write output to file, MUST BE IN SAME ORDER AS ORIG FILES _out = open(isoFile, 'w') - - _out.write('%10s %10s %10s %10s\n' % + + _out.write('%10s %10s %10s %10s\n' % ('# log L', 'log Teff', 'Mass', 'log g')) - _out.write('%10s %10s %10s %10s\n' % + _out.write('%10s %10s %10s %10s\n' % ('# (Lsun)', '(Kelvin)', '(Msun)', '(cgs)')) for ii in range(len(interp_iso.M)): @@ -1713,7 +1713,7 @@ def get_orig_pisa_isochrones(metallicity=0.015): if not os.path.exists(pms_dir): print( 'Failed to find Siess PMS isochrones for metallicity = ' + metSuffix) return - + # Collect the isochrones files = glob.glob(pms_dir + '*.dat') count = len(files) @@ -1722,7 +1722,7 @@ def get_orig_pisa_isochrones(metallicity=0.015): data.isochrones = [] data.log_ages = [] - + # Extract useful params from isochrones for ff in range(len(files)): d = Table.read(files[ff], format='ascii') @@ -1730,7 +1730,7 @@ def get_orig_pisa_isochrones(metallicity=0.015): # Extract logAge from filename log_age = float(files[ff].split('_')[2][:-4]) - # Create an isochrone object + # Create an isochrone object iso = Isochrone(log_age) iso.M = d['col3'] iso.log_Teff = d['col2'] @@ -1739,13 +1739,13 @@ def get_orig_pisa_isochrones(metallicity=0.015): # If a log g column exist, extract it. Otherwise, calculate # log g from T and L and add column at end if len(d.keys()) == 3: - + # Calculate log g from T and L L_sun = 3.8 * 10**33 #cgs SB_sig = 5.67 * 10**-5 #cgs M_sun = 2. * 10**33 #cgs G_const = 6.67 * 10**-8 #cgs - + radius = np.sqrt( (10**d['col1'] * L_sun) / (4 * np.pi * SB_sig * (10**d['col2'])**4) ) g = (G_const * d['col3'] * M_sun) / radius**2 @@ -1754,7 +1754,7 @@ def get_orig_pisa_isochrones(metallicity=0.015): iso.log_g = np.log10(g.astype(np.float)) else: iso.log_g = d['col4'] - + data.isochrones.append(iso) data.log_ages.append(log_age) diff --git a/spisea/exceptions.py b/spisea/exceptions.py index 5b45baa1..e0587d90 100644 --- a/spisea/exceptions.py +++ b/spisea/exceptions.py @@ -8,12 +8,12 @@ class ModelMismatch(Exception): """ def __init__(self, required_num, grid_num, model_type): assert (model_type == 'evolution') | (model_type == 'atmosphere') - + if model_type == 'evolution': model_file = 'spisea_models.tar.gz' elif model_type == 'atmosphere': model_file = 'spisea_cdbs.tar.gz' - + # Compose error message str1 = 'WARNING: Desired {0} model requires model grid version >= {1},'.format(model_type, required_num) str2 = 'but model grid version {0} is installed.'.format(grid_num) diff --git a/spisea/filters.py b/spisea/filters.py index 23c029b3..e2a2aee9 100755 --- a/spisea/filters.py +++ b/spisea/filters.py @@ -29,7 +29,7 @@ def get_nirc2_filt(name): while len(idx) != 0: wavelength[idx+1] += 1.0e-8 - + diff = np.diff(wavelength) idx = np.where(diff <= 0)[0] #print( 'Duplicate entry loop' ) @@ -68,7 +68,7 @@ def get_2mass_filt(name): name='2MASS_{0}'.format(name)) return spectrum - + def get_vista_filt(name): """ @@ -79,21 +79,21 @@ def get_vista_filt(name): t = Table.read('{0}/vista/VISTA_Filters_at80K_forETC_{1}.dat'.format(filters_dir, name), format='ascii') except: - raise ValueError('Could not find VISTA filter file {0}/vista/VISTA_Filters_at80K_forETC_{1}.dat'.format(filters_dir, name)) + raise ValueError('Could not find VISTA filter file {0}/vista/VISTA_Filters_at80K_forETC_{1}.dat'.format(filters_dir, name)) # Wavelength must be in angstroms, transmission in fraction wave = t['col1'] * 10 trans = t['col2'] * 0.01 - + # Change any negative numbers to 0, as well as anything shortward # of 0.4 microns or longward of 2.9 microns # (no VISTA filter transmissions beyond these boundaries) bad = np.where( (trans < 0) | (wave < 4000) | (wave > 29000) ) trans[bad] = 0 - + # Now we can define the VISTA filter bandpass objects spectrum = pysynphot.ArrayBandpass(wave, trans, waveunits='angstrom', name='VISTA_{0}'.format(name)) - + return spectrum def get_decam_filt(name): @@ -104,18 +104,18 @@ def get_decam_filt(name): try: t = Table.read('{0}/decam/DECam_filters.txt'.format(filters_dir), format='ascii') t.rename_column('Y', 'y') - + cols = np.array(t.keys()) idx = np.where(cols == name)[0][0] trans = t[cols[idx]] except: - raise ValueError('Could not find DECAM filter {0} in {1}/decam/DECam_filters.txt'.format(name, filters_dir)) + raise ValueError('Could not find DECAM filter {0} in {1}/decam/DECam_filters.txt'.format(name, filters_dir)) # Limit to unmasked regions only mask = np.ma.getmask(trans) good = np.where(mask == False) - + # Convert wavelengths from nm to angstroms, while eliminating masked regions wave = t['wavelength'][good] * 10. trans = trans[good] @@ -126,7 +126,7 @@ def get_decam_filt(name): return spectrum -def get_PS1_filt(name): +def get_PS1_filt(name): """ Define PS1 filter as pysynphot object """ @@ -145,11 +145,11 @@ def get_PS1_filt(name): trans = t[cols[idx]] except: - raise ValueError('Could not find PS1 filter {0} in {1}/ps1'.format(name, filters_dir)) + raise ValueError('Could not find PS1 filter {0} in {1}/ps1'.format(name, filters_dir)) # Convert wavelengths from nm to angstroms wave = t['wave'] * 10. - + spectrum = pysynphot.ArrayBandpass(wave, trans, waveunits='angstrom', name='ps1_{0}'.format(name)) return spectrum @@ -161,7 +161,7 @@ def get_jwst_filt(name): try: t = Table.read('{0}/jwst/{1}.txt'.format(filters_dir, name), format='ascii') except: - raise ValueError('Could not find JWST filter {0} in {1}/jwst'.format(name, filters_dir)) + raise ValueError('Could not find JWST filter {0} in {1}/jwst'.format(name, filters_dir)) # Convert wavelengths to angstroms wave = t['microns'] * 10**4. @@ -170,10 +170,10 @@ def get_jwst_filt(name): # Change any negative numbers to 0 bad = np.where(trans < 0) trans[bad] = 0 - + spectrum = pysynphot.ArrayBandpass(wave, trans, waveunits='angstrom', name='jwst_{0}'.format(name)) - return spectrum + return spectrum def get_Johnson_Glass_filt(name): """ @@ -182,7 +182,7 @@ def get_Johnson_Glass_filt(name): try: t = Table.read('{0}/Johnson_Glass/{1}.txt'.format(filters_dir, name), format='ascii') except: - raise ValueError('Could not find Johnson-Glass filter {0} in {1}/Johnson_Glass'.format(name, filters_dir)) + raise ValueError('Could not find Johnson-Glass filter {0} in {1}/Johnson_Glass'.format(name, filters_dir)) # Convert wavelengths to angstroms wave = t['col1'] * 10. @@ -191,10 +191,10 @@ def get_Johnson_Glass_filt(name): # Change any negative numbers to 0 bad = np.where(trans < 0) trans[bad] = 0 - + spectrum = pysynphot.ArrayBandpass(wave, trans, waveunits='angstrom', name='jg_{0}'.format(name)) - return spectrum + return spectrum def get_nirc1_filt(name): """ @@ -203,12 +203,12 @@ def get_nirc1_filt(name): try: t = Table.read('{0}/nirc1/{1}.txt'.format(filters_dir, name), format='ascii') except: - raise ValueError('Could not find NIRC1 filter {0} in {1}/nirc1'.format(name, filters_dir)) + raise ValueError('Could not find NIRC1 filter {0} in {1}/nirc1'.format(name, filters_dir)) # Convert wavelengths to angstroms wave = t['col1'] * 10**4 trans = t['col2'] - + # Lets fix wavelength array for duplicate values or negative vals; # delete these entries diff = np.diff(wave) @@ -222,14 +222,14 @@ def get_nirc1_filt(name): diff = np.diff(wave) idx = np.where(diff <= 0)[0] - + # Change any negative transmission vals to 0 bad = np.where(trans < 0) trans[bad] = 0 spectrum = pysynphot.ArrayBandpass(wave, trans, waveunits='angstrom', name='nirc1_{0}'.format(name)) - return spectrum + return spectrum def get_ctio_osiris_filt(name): """ @@ -238,7 +238,7 @@ def get_ctio_osiris_filt(name): try: t = Table.read('{0}/CTIO_OSIRIS/{1}.txt'.format(filters_dir, name), format='ascii') except: - raise ValueError('Could not find CTIO/OSIRIS filter {0} in {1}/CTIO_OSIRIS'.format(name, filters_dir)) + raise ValueError('Could not find CTIO/OSIRIS filter {0} in {1}/CTIO_OSIRIS'.format(name, filters_dir)) # Convert wavelengths to angstroms wave = t['col1'] * 10**4 @@ -247,7 +247,7 @@ def get_ctio_osiris_filt(name): # Change any negative numbers to 0 bad = np.where(trans < 0) trans[bad] = 0 - + spectrum = pysynphot.ArrayBandpass(wave, trans, waveunits='angstrom', name='ctio_osiris_{0}'.format(name)) return spectrum @@ -259,7 +259,7 @@ def get_naco_filt(name): try: t = Table.read('{0}/naco/{1}.dat'.format(filters_dir, name), format='ascii') except: - raise ValueError('Could not find NACO filter {0} in {1}/naco'.format(name, filters_dir)) + raise ValueError('Could not find NACO filter {0} in {1}/naco'.format(name, filters_dir)) # Convert wavelengths to angstroms wave = t['col1'] * 10**4 @@ -268,7 +268,7 @@ def get_naco_filt(name): # Change any negative numbers to 0 bad = np.where(trans < 0) trans[bad] = 0 - + spectrum = pysynphot.ArrayBandpass(wave, trans, waveunits='angstrom', name='naco_{0}'.format(name)) return spectrum @@ -282,7 +282,7 @@ def get_ubv_filt(name): except: raise ValueError('Could not find ubv filter {0} in {1}/ubv'.format(name, filters_dir)) - # Convert wavelength from nm to angstroms + # Convert wavelength from nm to angstroms wave = t[t.keys()[0]] * 10. # Convert transmission to ratio (from percent) trans = t[t.keys()[1]] / 100. @@ -336,8 +336,8 @@ def get_keck_osiris_filt(name): def get_gaia_filt(version, name): """ Define Gaia filters as pysynphot object. - To avoid confusion, we will only support - the revised DR2 zeropoints from + To avoid confusion, we will only support + the revised DR2 zeropoints from Evans+18. version: specify dr1, dr2, or dr2_rev @@ -357,7 +357,7 @@ def get_gaia_filt(version, name): path = '{0}/gaia/dr2_rev/'.format(filters_dir) else: raise ValueError('GAIA filter version {0} not understood. Please use dr1, dr2, or dr2_rev'.format(version)) - + # Get the filter info try: t = Table.read('{0}/Gaia_passbands.txt'.format(path), format='ascii') @@ -380,7 +380,7 @@ def get_gaia_filt(version, name): bad = np.where(trans > 90) trans[bad] = 0 except: - raise ValueError('Could not find Gaia filter {0}'.format(name)) + raise ValueError('Could not find Gaia filter {0}'.format(name)) # Convert wavelengths to angstroms (from nm) wave = t['LAMBDA'] * 10 @@ -472,3 +472,21 @@ def get_euclid_filt(name): name='euclid_{0}'.format(name)) return spectrum + +def get_nsfcam_filt(name): + + """ + Define irtf nsfcam filters as pysynphot object + """ + try: + t = Table.read('{0}/nsfcam/{1}.dat'.format(filters_dir, name), format='ascii') + except: + raise ValueError('Could not find nsfcam filter {0} in {1}/nsfcam'.format(name, filters_dir)) + + # Wavelength already in angstrom and and transmission in fraction + wave = t['col1'] + trans = t['col2'] + + spectrum = pysynphot.ArrayBandpass(wave, trans, waveunits='angstrom', name='nsfcam_{0}'.format(name)) + + return spectrum diff --git a/spisea/ifmr.py b/spisea/ifmr.py index f88afdbe..b7cc7235 100755 --- a/spisea/ifmr.py +++ b/spisea/ifmr.py @@ -12,7 +12,7 @@ #https://ui.adsabs.harvard.edu/abs/2014ApJ...783...10S/abstract #BH/NS IFMR based on Sukhbold et al. 2016 for solar-Z models:: #https://ui.adsabs.harvard.edu/abs/2016ApJ...821...38S/abstract -#PPISN based on Woosley 2017: +#PPISN based on Woosley 2017: #https://ui.adsabs.harvard.edu/abs/2017ApJ...836..244W/abstract #PPSIN based on Woosley et al. 2020: #https://ui.adsabs.harvard.edu/abs/2020ApJ...896...56W/abstract @@ -25,8 +25,9 @@ import numpy as np class IFMR(object): - def __init__(self): - pass + def __init__(self, seed=None): + self.seed = seed + self.rng = np.random.default_rng(seed) def get_Z(self, Fe_H): """ @@ -37,7 +38,7 @@ def get_Z(self, Fe_H): return 10**(Fe_H - 1.85387) def Kalirai_mass(self, MZAMS): - """ + """ From Kalirai+08 https://ui.adsabs.harvard.edu/abs/2008ApJ...676..594K/abstract 1.16 < MZAMS < 6.5 But we use this function for anything between 0.5 and 9 depending on the IFMR. @@ -47,36 +48,34 @@ def Kalirai_mass(self, MZAMS): result = 0.109*MZAMS + 0.394 final = np.zeros(len(MZAMS)) - - bad_idx = np.where(MZAMS < 0.5) - final[bad_idx] = -99 - - good_idx = np.where(MZAMS >= 0.5) + + good_idx = MZAMS >= 0.5 final[good_idx] = result[good_idx] + final[~good_idx] = -99 return final - - + + class IFMR_Spera15(IFMR): """ The BH/NS IFMR (used for MZAMS>= 7 M_sun) comes from `Spera et. al. (2015) Appendix C `_. - The WD IFMR (used for MZAMS< 7_Msun) comes from + The WD IFMR (used for MZAMS< 7_Msun) comes from `Kalirai et al. (2008) `_. See Rose et al. (submitted) for more details. - + """ - + #The get_Mco functions come from C11 of Spera def get_Mco_low_metal(self, Z, MZAMS): """ C15 of Spera, valid for Z < 1.0e-3 - + """ - + B1 = 67.07 K1 = 46.89 K2 = 1.138e2 @@ -95,7 +94,7 @@ def get_Mco_med_metal(self, Z, MZAMS): C14 of Spera, valid for Z >= 1.0e-3 and Z <= 4.0e-3 """ - + B1 = 40.98 + 3.415e4*Z - 8.064e6*Z**2 K1 = 35.17 + 1.548e4*Z - 3.759e6*Z**2 K2 = 20.36 + 1.162e5*Z - 2.276e7*Z**2 @@ -114,7 +113,7 @@ def get_Mco_high_metal(self, Z, MZAMS): C13 of Spera, valid for Z > 4.0e-3 """ - + B1 = 59.63 - 2.969e3*Z + 4.988e4*Z**2 K1 = 45.04 - 2.176e3*Z + 3.806e4*Z**2 K2 = 1.389e2 - 4.664e3*Z + 5.106e4*Z**2 @@ -124,7 +123,7 @@ def get_Mco_high_metal(self, Z, MZAMS): g1 = 0.5/(1+10**((K1-MZAMS)*(d1))) #C12 of Spera g2 = 0.5/(1+10**((K2-MZAMS)*(d2))) #C12 of Spera - + return -2.0 + (B1 + 2.0)*(g1 + g2) #C11 of Spera @@ -133,8 +132,8 @@ def get_Mco(self, Z, MZAMS): This function uses Spera C11-C15 in order to reurn an array of core masses from an array of metallicities and ZAMS masses. It will be the same length as these two arrays with -99 entries where invalid (ie MZAMS< 7 M_sun) - Parameters: - + Parameters: + Z: an array with metallicities reported as Z where Z is metal_mass/total_mass MZAMS: an array of ZAMS masses in solar masses. The Spera functions are valid for MZAMS> 7 M_sun @@ -159,28 +158,28 @@ def get_Mco(self, Z, MZAMS): core_masses[high_metal_idx] = self.get_Mco_high_metal(Z[high_metal_idx], MZAMS[high_metal_idx]) return core_masses - + def M_rem_very_low_metal_low_mass(self, Z, Mco): """ C1 of Spera, valid for Z <= 5.0e-4 and Mco <= 5.0 - + Parameters: Z: an array of metallicities reported as metal_mass/total_mass Mco: an arrray of core masses in M_sun """ - + p = -2.333 + 0.1559*Mco + 0.2700*Mco**2 #C2 of Spera #need to return p or 1.27, whichever is greater final = np.zeros(len(p)) - + p_max_idx = np.where(p >= 1.27) final[p_max_idx] = p[p_max_idx] - + p_min_idx = np.where(p < 1.27) final[p_min_idx] = 1.27 @@ -218,7 +217,7 @@ def M_rem_very_low_metal_high_mass(self, Z, Mco): m = -6.476e2*Z + 1.911 #C3 of Spera q = 2.300e3*Z + 11.67 #C3 of Spera - + f = m*Mco + q #C2 of Spera #need to return either p or f, whichever is less @@ -229,7 +228,7 @@ def M_rem_very_low_metal_high_mass(self, Z, Mco): f_min_idx = np.where(f < p) final[f_min_idx] = f[f_min_idx] - + return final def M_rem_low_metal_low_mass(self, Z, Mco): @@ -253,15 +252,15 @@ def M_rem_low_metal_low_mass(self, Z, Mco): #need to return h or 1.27, whichever is greater final = np.zeros(len(h)) - + h_max_idx = np.where(h >= 1.27) final[h_max_idx] = h[h_max_idx] - + h_min_idx = np.where(h < 1.27) final[h_min_idx] = 1.27 return final - + def M_rem_low_metal_med_mass(self, Z, Mco): """ @@ -317,9 +316,9 @@ def M_rem_low_metal_high_mass(self, Z, Mco): f_max_idx = np.where(f > h) final[f_max_idx] = f[f_max_idx] - + return final - + def M_rem_med_metal_low_mass(self, Z, Mco): """ @@ -342,10 +341,10 @@ def M_rem_med_metal_low_mass(self, Z, Mco): #need to return h or 1.27, whichever is greater final = np.zeros(len(h)) - + h_max_idx = np.where(h >= 1.27) final[h_max_idx] = h[h_max_idx] - + h_min_idx = np.where(h < 1.27) final[h_min_idx] = 1.27 @@ -396,7 +395,7 @@ def M_rem_med_metal_high_mass_1(self, Z, Mco): q = -1.296e4*Z + 26.98 f = m*Mco + q #C5 of Spera - + #need to return either h or f, whichever is greater final = np.zeros(len(h)) @@ -405,7 +404,7 @@ def M_rem_med_metal_high_mass_1(self, Z, Mco): f_max_idx = np.where(f > h) final[f_max_idx] = f[f_max_idx] - + return final def M_rem_med_metal_high_mass_2(self, Z, Mco): @@ -432,7 +431,7 @@ def M_rem_med_metal_high_mass_2(self, Z, Mco): q = 1.061 f = m*Mco + q #C5 of Spera - + #need to return either h or f, whichever is greater final = np.zeros(len(h)) @@ -441,7 +440,7 @@ def M_rem_med_metal_high_mass_2(self, Z, Mco): f_max_idx = np.where(f > h) final[f_max_idx] = f[f_max_idx] - + return final @@ -466,10 +465,10 @@ def M_rem_high_metal_low_mass(self, Z, Mco): #need to return h or 1.27, whichever is greater final = np.zeros(len(h)) - + h_max_idx = np.where(h >= 1.27) final[h_max_idx] = h[h_max_idx] - + h_min_idx = np.where(h < 1.27) final[h_min_idx] = 1.27 @@ -520,7 +519,7 @@ def M_rem_high_metal_high_mass(self, Z, Mco): q = 1.061 f = m*Mco + q #C5 of Spera - + #need to return either h or f, whichever is greater final = np.zeros(len(h)) @@ -529,15 +528,15 @@ def M_rem_high_metal_high_mass(self, Z, Mco): f_max_idx = np.where(f > h) final[f_max_idx] = f[f_max_idx] - + return final def generate_death_mass(self, mass_array, metallicity_array): """ - The top-level function that assigns the remnant type - and mass based on the stellar initial mass. - + The top-level function that assigns the remnant type + and mass based on the stellar initial mass. + Parameters ---------- mass_array: array of floats @@ -548,20 +547,20 @@ def generate_death_mass(self, mass_array, metallicity_array): Notes ------ The output typecode tells what compact object formed: - + * WD: typecode = 101 * NS: typecode = 102 * BH: typecode = 103 - A typecode of value -1 means you're outside the range of - validity for applying the ifmr formula. - A remnant mass of -99 means you're outside the range of + A typecode of value -1 means you're outside the range of + validity for applying the ifmr formula. + A remnant mass of -99 means you're outside the range of validity for applying the ifmr formula. - Range of validity: MZAMS > 0.5 - + Range of validity: MZAMS > 0.5 + Returns ------- output_arr: 2-element array - output_array[0] contains the remnant mass, and + output_array[0] contains the remnant mass, and output_array[1] contains the typecode """ @@ -586,10 +585,10 @@ def generate_death_mass(self, mass_array, metallicity_array): Kal_idx = np.where(core_mass < 0) rem_mass_array[Kal_idx] = self.Kalirai_mass(mass_array[Kal_idx]) - + ##### very low metallicity Z < 5.0e-4 - + #remnant masses of stars with Z < 5.0e-4 and Mco < 5.0 very_low_metal_low_mass_idx = np.where((Z_array < 5.0e-4) & (core_mass < 5.0) & (core_mass >= 0)) rem_mass_array[very_low_metal_low_mass_idx] = self.M_rem_very_low_metal_low_mass(Z_array[very_low_metal_low_mass_idx], core_mass[very_low_metal_low_mass_idx]) @@ -603,7 +602,7 @@ def generate_death_mass(self, mass_array, metallicity_array): rem_mass_array[very_low_metal_high_mass_idx] = self.M_rem_very_low_metal_high_mass(Z_array[very_low_metal_high_mass_idx], core_mass[very_low_metal_high_mass_idx]) #### low metallicity 5.0e-4 <= Z < 1.0e-3 - + #remnant masses of stars with 5.0e-4 <= Z < 1.0e-3 and Mco < 5.0 low_metal_low_mass_idx = np.where((Z_array >= 5.0e-4) & (Z_array < 1.0e-3) & (core_mass < 5.0) & (core_mass >= 0)) rem_mass_array[low_metal_low_mass_idx] = self.M_rem_low_metal_low_mass(Z_array[low_metal_low_mass_idx], core_mass[low_metal_low_mass_idx]) @@ -615,9 +614,9 @@ def generate_death_mass(self, mass_array, metallicity_array): #remnant masses of stars with 5.0e-4 <= Z < 1.0e-3 and Mco > 10.0 low_metal_high_mass_idx = np.where((Z_array >= 5.0e-4) & (Z_array < 1.0e-3) & (core_mass > 10.0)) rem_mass_array[low_metal_high_mass_idx] = self.M_rem_low_metal_high_mass(Z_array[low_metal_high_mass_idx], core_mass[low_metal_high_mass_idx]) - + #### medium metallicity 1.0e-3 <= Z <= 4.0e-3 - + #remnant masses of stars with 1.0e-3 <= Z <= 4.0e-3 and Mco < 5.0 med_metal_low_mass_idx = np.where((Z_array >= 1.0e-3) & (Z_array <= 4.0e-3) & (core_mass < 5.0) & (core_mass >= 0)) rem_mass_array[med_metal_low_mass_idx] = self.M_rem_med_metal_low_mass(Z_array[med_metal_low_mass_idx],core_mass[med_metal_low_mass_idx]) @@ -633,9 +632,9 @@ def generate_death_mass(self, mass_array, metallicity_array): #remnant masses of stars with 2.0e-3 <= Z <= 4.0e-3 and Mco > 10.0 med_metal_high_mass_idx_2 = np.where((Z_array >= 2.0e-3) & (Z_array <= 4.0e-3) & (core_mass > 10.0)) rem_mass_array[med_metal_high_mass_idx_2] = self.M_rem_med_metal_high_mass_2(Z_array[med_metal_high_mass_idx_2], core_mass[med_metal_high_mass_idx_2]) - + #### high metallicity Z > 4.0e-3 - + #remnant masses of stars with Z > 4.0e-3 and Mco < 5.0 high_metal_low_mass_idx = np.where((Z_array > 4.0e-3) & (core_mass < 5.0) & (core_mass >= 0)) rem_mass_array[high_metal_low_mass_idx] = self.M_rem_high_metal_low_mass(Z_array[high_metal_low_mass_idx], core_mass[high_metal_low_mass_idx]) @@ -647,16 +646,16 @@ def generate_death_mass(self, mass_array, metallicity_array): #remnant masses of stars with Z > 4.0e-3 and MZAMS > 10.0 high_metal_high_mass_idx = np.where((Z_array > 4.0e-3) & (core_mass > 10.0)) rem_mass_array[high_metal_high_mass_idx] = self.M_rem_high_metal_high_mass(Z_array[high_metal_high_mass_idx], core_mass[high_metal_high_mass_idx]) - + #assign object types based on remnant mass bad_idx = np.where(rem_mass_array < 0) #outside the range of validity for the ifmr WD_idx = np.where((rem_mass_array <= 1.4) & (rem_mass_array >= 0 )) #based on the Chandresekhar limit - NS_idx = np.where((rem_mass_array > 1.4) & (rem_mass_array <= 3.0)) #based on figures 15-17 of Spera + NS_idx = np.where((rem_mass_array > 1.4) & (rem_mass_array <= 3.0)) #based on figures 15-17 of Spera BH_idx = np.where(rem_mass_array > 3.0) #based on figures 15-17 of Spera output_array[0][bad_idx] = rem_mass_array[bad_idx] output_array[1][bad_idx] = -1 - + output_array[0][WD_idx] = rem_mass_array[WD_idx] output_array[1][WD_idx] = codes['WD'] @@ -667,76 +666,76 @@ def generate_death_mass(self, mass_array, metallicity_array): output_array[1][BH_idx] = codes['BH'] return output_array - + class IFMR_Raithel18(IFMR): """ - The IFMR is a combination of the WD IFMR from + The IFMR is a combination of the WD IFMR from `Kalirai et al. (2008) `_ and the NS/BH IFMR from `Raithel et al. (2018) `_. - Note that the NS masses are NOT assigned based on the above results. + Note that the NS masses are NOT assigned based on the above results. We do take the NS/BH formation ratio and the BH masses. - NS masses are assigned based on random draws from a Gaussian (see NS_mass function). + NS masses are assigned based on random draws from a Gaussian (see NS_mass function). - See + See `Lam et al. (2020) `_ and Rose et al. (submitted) for more details. """ def BH_mass_core_low(self, MZAMS): - """ - Eqn (1) - Paper: 15 < MZAMS < 40 - Us extending: 15 < MZAMS < 42.22 + """ + Eqn (1) + Paper: 15 < MZAMS < 40 + Us extending: 15 < MZAMS < 42.22 """ return -2.024 + 0.4130*MZAMS def BH_mass_all_low(self, MZAMS): - """ - Eqn (2) - Paper: 15 < MZAMS < 40 - Us extending: 15 < MZAMS < 42.22 + """ + Eqn (2) + Paper: 15 < MZAMS < 40 + Us extending: 15 < MZAMS < 42.22 """ return 16.28 + 0.00694 * (MZAMS - 21.872) - 0.05973 * (MZAMS - 21.872)**2 + 0.003112 * (MZAMS - 21.872)**3 def BH_mass_high(self, MZAMS): - """ - Eqn (3) - Paper: 45 < MZAMS < 120 - Us extending: 42.22 < MZAMS < 120 + """ + Eqn (3) + Paper: 45 < MZAMS < 120 + Us extending: 42.22 < MZAMS < 120 """ return 5.795 + 1.007 * 10**9 * MZAMS**-4.926 def BH_mass_low(self, MZAMS, f_ej): - """ - Eqn (4) - Paper: 15 < MZAMS < 40 - Us extending: 15 < MZAMS < 42.22 + """ + Eqn (4) + Paper: 15 < MZAMS < 40 + Us extending: 15 < MZAMS < 42.22 """ return f_ej * self.BH_mass_core_low(MZAMS) + (1 - f_ej) * self.BH_mass_all_low(MZAMS) def NS_mass(self, MZAMS): - """ + """ Drawing the NS mass from a Gaussian distrobuton based on observational data. Gaussian fit by Emily Ramey and Sergiy Vasylyev of University of Caifornia, Berkeley using a - sample of NSs from Ozel & Freire (2016) — J1811+2405 Ng et al. (2020), - J2302+4442 Kirichenko et al. (2018), J2215+5135 Linares et al. (2018), - J1913+1102 Ferdman & Collaboration (2017), J1411+2551 Martinez et al. (2017), + sample of NSs from Ozel & Freire (2016) — J1811+2405 Ng et al. (2020), + J2302+4442 Kirichenko et al. (2018), J2215+5135 Linares et al. (2018), + J1913+1102 Ferdman & Collaboration (2017), J1411+2551 Martinez et al. (2017), J1757+1854 Cameron et al. (2018), J0030+0451 Riley et al. (2019), J1301+0833 Romani et al. (2016) The Gaussian distribution was fit using this data and a Bayesian MCMC method adapted from Kiziltan et al. (2010). - + """ - return np.random.normal(loc=1.36, scale=0.09, size=len(MZAMS)) + return self.rng.normal(loc=1.36, scale=0.09, size=len(MZAMS)) def generate_death_mass(self, mass_array): """ - The top-level function that assigns the remnant type - and mass based on the stellar initial mass. - + The top-level function that assigns the remnant type + and mass based on the stellar initial mass. + Parameters ---------- mass_array: array of floats @@ -747,15 +746,15 @@ def generate_death_mass(self, mass_array): Notes ------ The output typecode tells what compact object formed: - + * WD: typecode = 101 * NS: typecode = 102 * BH: typecode = 103 - A typecode of value -1 means you're outside the range of - validity for applying the ifmr formula. + A typecode of value -1 means you're outside the range of + validity for applying the ifmr formula. - A remnant mass of -99 means you're outside the range of + A remnant mass of -99 means you're outside the range of validity for applying the ifmr formula. Range of validity: 0.5 < MZAMS < 120 @@ -763,7 +762,7 @@ def generate_death_mass(self, mass_array): Returns ------- output_arr: 2-element array - output_array[0] contains the remnant mass, and + output_array[0] contains the remnant mass, and output_array[1] contains the typecode """ @@ -772,10 +771,10 @@ def generate_death_mass(self, mass_array): output_array = np.zeros((2, len(mass_array))) #Random array to get probabilities for what type of object will form - random_array = np.random.randint(1, 1001, size = len(mass_array)) + random_array = self.rng.integers(1, 1001, size = len(mass_array)) codes = {'WD': 101, 'NS': 102, 'BH': 103} - + """ The id_arrays are to separate all the different formation regimes """ @@ -802,7 +801,7 @@ def generate_death_mass(self, mass_array): id_array4_BH = np.where((mass_array >= 17.8) & (mass_array < 18.5) & (random_array > 833)) output_array[0][id_array4_BH]= self.BH_mass_low(mass_array[id_array4_BH], 0.9) output_array[1][id_array4_BH] = codes['BH'] - + id_array4_NS = np.where((mass_array >= 17.8) & (mass_array < 18.5) & (random_array <= 833)) output_array[0][id_array4_NS] = self.NS_mass(mass_array[id_array4_NS]) output_array[1][id_array4_NS] = codes['NS'] @@ -810,7 +809,7 @@ def generate_death_mass(self, mass_array): id_array5_BH = np.where((mass_array >= 18.5) & (mass_array < 21.7) & (random_array > 500)) output_array[0][id_array5_BH] = self.BH_mass_low(mass_array[id_array5_BH], 0.9) output_array[1][id_array5_BH] = codes['BH'] - + id_array5_NS = np.where((mass_array >= 18.5) & (mass_array < 21.7) & (random_array <= 500)) output_array[0][id_array5_NS] = self.NS_mass(mass_array[id_array5_NS]) output_array[1][id_array5_NS] = codes['NS'] @@ -822,7 +821,7 @@ def generate_death_mass(self, mass_array): id_array7_BH = np.where((mass_array >= 25.2) & (mass_array < 27.5) & (random_array > 652)) output_array[0][id_array7_BH] = self.BH_mass_low(mass_array[id_array7_BH], 0.9) output_array[1][id_array7_BH] = codes['BH'] - + id_array7_NS = np.where((mass_array >= 25.2) & (mass_array < 27.5) & (random_array <= 652)) output_array[0][id_array7_NS] = self.NS_mass(mass_array[id_array7_NS]) output_array[1][id_array7_NS] = codes['NS'] @@ -838,7 +837,7 @@ def generate_death_mass(self, mass_array): id_array10_BH = np.where((mass_array >= 60) & (mass_array < 120) & (random_array > 400)) output_array[0][id_array10_BH] = self.BH_mass_high(mass_array[id_array10_BH]) output_array[1][id_array10_BH] = codes['BH'] - + id_array10_NS = np.where((mass_array >= 60) & (mass_array < 120) & (random_array <= 400)) output_array[0][id_array10_NS] = self.NS_mass(mass_array[id_array10_NS]) output_array[1][id_array10_NS] = codes['NS'] @@ -851,11 +850,11 @@ class IFMR_N20_Sukhbold(IFMR): `Sukhbold & Woosley (2014) `_. The BH/NS IFMR for solar metallicity progenitors comes from `Sukhbold et al. (2016) `_. - The PPISN models are from + The PPISN models are from `Woosley (2017) `_ and `Woosley et al. (2020) `_. - The WD IFMR is from + The WD IFMR is from `Kalirai et al. (2008) `_. Note that the NS masses are NOT assigned based on the above results. We do take the NS/BH formation ratio and the BH masses. @@ -870,7 +869,7 @@ def zero_BH_mass(self, MZAMS): #func = np.poly1d(zero_coeff) #result = func(MZAMS) return 0.46522639*MZAMS + -3.29170817 - + def solar_BH_mass(self, MZAMS): #solar_coeff = [-0.27079245, 24.74320755] #func = np.poly1d(solar_coeff) @@ -880,24 +879,24 @@ def solar_BH_mass(self, MZAMS): Zsun = 0.014 def NS_mass(self, MZAMS): - """ + """ Drawing the NS mass from a Gaussian distrobuton based on observational data. Gaussian fit by Emily Ramey and Sergiy Vasylyev of University of Caifornia, Berkeley using a - sample of NSs from Ozel & Freire (2016) — J1811+2405 Ng et al. (2020), - J2302+4442 Kirichenko et al. (2018), J2215+5135 Linares et al. (2018), - J1913+1102 Ferdman & Collaboration (2017), J1411+2551 Martinez et al. (2017), + sample of NSs from Ozel & Freire (2016) — J1811+2405 Ng et al. (2020), + J2302+4442 Kirichenko et al. (2018), J2215+5135 Linares et al. (2018), + J1913+1102 Ferdman & Collaboration (2017), J1411+2551 Martinez et al. (2017), J1757+1854 Cameron et al. (2018), J0030+0451 Riley et al. (2019), J1301+0833 Romani et al. (2016) The Gaussian distribution was fit using this data and a Bayesian MCMC method adapted from Kiziltan et al. (2010). - + """ if isinstance(MZAMS, np.ndarray): - return np.random.normal(loc=1.36, scale=0.09, size=len(MZAMS)) + return self.rng.normal(loc=1.36, scale=0.09, size=len(MZAMS)) else: - return np.random.normal(loc=1.36, scale=0.09, size=1)[0] - - + return self.rng.normal(loc=1.36, scale=0.09, size=1)[0] + + def BH_mass_low(self, MZAMS): """ 9 < MZAMS < 40 Msun @@ -913,9 +912,8 @@ def BH_mass_high(self, MZAMS, Z): """ # Solar metallicity (what Sam is using) Zsun = 0.014 - - zfrac = Z/Zsun + zfrac = np.atleast_1d(Z/Zsun) # super-solar Z gives identical results as solar Z above_idx = np.where(zfrac > 1) if len(above_idx) > 1: @@ -937,15 +935,14 @@ def prob_BH_high(self, Z): """ # Solar metallicity (what Sam is using) Zsun = 0.014 - - zfrac = Z/Zsun - - # super-solar Z gives identical results as solar Z - if zfrac > 1: - zfrac = 1.0 - if zfrac < 0: - raise ValueError('Z must be non-negative') + Z = np.atleast_1d(Z) + # Convert from [Fe/H] to Z + zfrac = Z / Zsun + + # super-solar Z gives identical results as solar Z + zfrac[zfrac > 1] = 1.0 + zfrac[zfrac < 0] = np.nan pBH = 1 - 0.8*zfrac @@ -954,9 +951,9 @@ def prob_BH_high(self, Z): def generate_death_mass(self, mass_array, metallicity_array): """ - The top-level function that assigns the remnant type - and mass based on the stellar initial mass. - + The top-level function that assigns the remnant type + and mass based on the stellar initial mass. + Parameters ---------- mass_array: array of floats @@ -967,93 +964,101 @@ def generate_death_mass(self, mass_array, metallicity_array): Notes ------ The output typecode tells what compact object formed: - + * WD: typecode = 101 * NS: typecode = 102 * BH: typecode = 103 - A typecode of value -1 means you're outside the range of - validity for applying the ifmr formula. - A remnant mass of -99 means you're outside the range of + A typecode of value -1 means you're outside the range of + validity for applying the ifmr formula. + A remnant mass of -99 means you're outside the range of validity for applying the ifmr formula. - Range of validity: MZAMS > 0.5 - + Range of validity: MZAMS > 0.5 + Returns ------- output_arr: 2-element array - output_array[0] contains the remnant mass, and + output_array[0] contains the remnant mass, and output_array[1] contains the typecode """ #output_array[0] holds the remnant mass #output_array[1] holds the remnant type + mass_array = np.atleast_1d(mass_array) + metallicity_array = np.atleast_1d(metallicity_array) + output_array = np.zeros((2, len(mass_array))) codes = {'WD': 101, 'NS': 102, 'BH': 103} + # Array to store the remnant masses - rem_mass_array = np.zeros(len(mass_array)) + # rem_mass_array = np.zeros(len(mass_array)) # Convert from [Fe/H] to Z # FIXME: if have Fe/H = nan that makes Z = 0. Is that the behavior we want? Z_array = np.zeros((len(metallicity_array))) - metal_idx = np.where(metallicity_array != np.nan) + metal_idx = ~np.isnan(metallicity_array) Z_array[metal_idx] = self.get_Z(metallicity_array[metal_idx]) # Random array to get probabilities for what type of object will form - random_array = np.random.randint(1, 101, size = len(mass_array)) + random_array = self.rng.integers(1, 101, size=len(mass_array)) - id_array0 = np.where((mass_array < 0.5) | (mass_array >= 120)) - output_array[0][id_array0] = -99 * np.ones(len(id_array0)) - output_array[1][id_array0] = -1 * np.ones(len(id_array0)) + id_array0 = (mass_array < 0.5) | (mass_array >= 120) + output_array[0][id_array0] = -99 + output_array[1][id_array0] = -1 - id_array1 = np.where((mass_array >= 0.5) & (mass_array < 9)) + id_array1 = (mass_array >= 0.5) & (mass_array < 9) output_array[0][id_array1] = self.Kalirai_mass(mass_array[id_array1]) output_array[1][id_array1]= codes['WD'] - id_array2 = np.where((mass_array >= 9) & (mass_array < 15)) + id_array2 = (mass_array >= 9) & (mass_array < 15) output_array[0][id_array2] = self.NS_mass(mass_array[id_array2]) output_array[1][id_array2] = codes['NS'] - id_array3_BH = np.where((mass_array >= 15) & (mass_array < 21.8) & (random_array > 75)) + id_array3_BH = (mass_array >= 15) & (mass_array < 21.8) & (random_array > 75) output_array[0][id_array3_BH] = self.BH_mass_low(mass_array[id_array3_BH]) output_array[1][id_array3_BH] = codes['BH'] - id_array3_NS = np.where((mass_array >= 15) & (mass_array < 21.8) & (random_array <= 75)) + id_array3_NS = (mass_array >= 15) & (mass_array < 21.8) & (random_array <= 75) output_array[0][id_array3_NS] = self.NS_mass(mass_array[id_array3_NS]) output_array[1][id_array3_NS] = codes['NS'] - id_array4 = np.where((mass_array >= 21.8) & (mass_array < 25.2)) + id_array4 = (mass_array >= 21.8) & (mass_array < 25.2) output_array[0][id_array4] = self.BH_mass_low(mass_array[id_array4]) output_array[1][id_array4] = codes['BH'] - id_array5 = np.where((mass_array >= 25.2) & (mass_array < 27.4)) + id_array5 = (mass_array >= 25.2) & (mass_array < 27.4) output_array[0][id_array5] = self.NS_mass(mass_array[id_array5]) output_array[1][id_array5] = codes['NS'] - id_array6 = np.where((mass_array >= 27.4) & (mass_array < 39.6)) + id_array6 = (mass_array >= 27.4) & (mass_array < 39.6) output_array[0][id_array6] = self.BH_mass_low(mass_array[id_array6]) output_array[1][id_array6] = codes['BH'] - id_array7 = np.where((mass_array >= 39.6) & (mass_array < 60)) + id_array7 = (mass_array >= 39.6) & (mass_array < 60) output_array[0][id_array7] = self.BH_mass_high(mass_array[id_array7], Z_array[id_array7]) output_array[1][id_array7] = codes['BH'] - id_array8 = np.where((mass_array >= 60) & (mass_array < 120)) - for i in range(0, len(id_array8[0])): - pBH = self.prob_BH_high(Z_array[id_array8][i]) - if random_array[id_array8][i] > 100*pBH: - output_array[0][id_array8[0][i]] = self.BH_mass_high(mass_array[id_array8][i], - Z_array[id_array8][i]) - output_array[1][id_array8[0][i]] = codes['BH'] - - else: - output_array[0][id_array8[0][i]] = self.NS_mass(mass_array[id_array8][i]) - output_array[1][id_array8[0][i]] = codes['NS'] + BH_or_NS = np.where((mass_array >= 60) & (mass_array < 120))[0] + pBH = self.prob_BH_high(Z_array[BH_or_NS]) + is_BH = random_array[BH_or_NS] > 100 * pBH + + id_array8 = BH_or_NS[is_BH] + id_array9 = BH_or_NS[~is_BH] + + # Assign BH masses and types for BH-forming indices + output_array[0][id_array8] = self.BH_mass_high(mass_array[id_array8], Z_array[id_array8]) + output_array[1][id_array8] = codes['BH'] + + # Assign NS masses and types for NS-forming indices + output_array[0][id_array9] = self.NS_mass(mass_array[id_array9]) + output_array[1][id_array9] = codes['NS'] + #this is where sam's janky fix for unphysical BH massses goes #any BH with mass less then 3 M_sun is reassigned as a NS #and given a mass from the NS mass dist instead - id_array9 = np.where((output_array[1] == codes['BH']) & (output_array[0] < 3.0)) - output_array[0][id_array9] = self.NS_mass(mass_array[id_array9]) - output_array[1][id_array9] = codes['NS'] + id_array10 = (output_array[1] == codes['BH']) & (output_array[0] < 3.0) + output_array[0][id_array10] = self.NS_mass(mass_array[id_array10]) + output_array[1][id_array10] = codes['NS'] - return(output_array) + return output_array diff --git a/spisea/imf/imf.py b/spisea/imf/imf.py index 14ea7fb3..df6a8887 100755 --- a/spisea/imf/imf.py +++ b/spisea/imf/imf.py @@ -4,7 +4,7 @@ # Original code was taken from libimf package written by Jan Pflamm-Altenburg # and has been modified only marginally. The libimf code was licensed under # a GNU General Public License. -# +# # When I use this code, I should cite Pflamm-Altenburg & Kroupa 2006 # # Unfortunately, the code was almost completely un-commented, so all @@ -22,47 +22,82 @@ class IMF(object): """ - The IMF base class. The mass sampling and multiplicity - implementation is here. - - Notes - ----- - Code author: J. Lu. - - Original code was taken from libimf package written by Jan Pflamm-Altenburg - (`Pflamm-Altenburg & Kroupa 2006 `_) - and has been modified only marginally, though more convinient and general purpose - functions have been added. The libimf code was licensed under - a GNU General Public License. + The IMF base class. The mass sampling and multiplicity + implementation is here. Parameters ---------- massLimits : 2 element array; optional - Define the minimum and maximum stellar masses in the IMF, in - solar masses. First element is taken as the min, second element + Define the minimum and maximum stellar masses in the IMF, in + solar masses. First element is taken as the min, second element the max (e.g. `massLimits` = [min_mass, max_mass]). multiplicity : Multiplicity object or None - If None, no multiplicity is assumed. Otherwise, use + If None, no multiplicity is assumed. Otherwise, use multiplicity object to create multiple star systems. + + seed : int, optional + Seed for the random generator numpy.random.default_rng(seed). + All random functions in the class will use this generator, by default None. + Behavior: + :: + + imf = IMF(..., seed=42) + result1 = imf.generate_cluster() + result2 = imf.generate_cluster() + imf = IMF(..., seed=42) + result3 = imf.generate_cluster() + result4 = imf.generate_cluster() + + result1==result3, result2==result4, but result1≠result2, result3≠result4. + This is the same behavior as + :: + + rng = np.random.default_rng(seed=42) + result1 = rng.random(1) + result2 = rng.random(1) + rng = np.random.default_rng(seed=42) + result3 = rng.random(1) + result4 = rng.random(1) + + If identical output is desired over each run, the random state can be reset before running the function, e.g. + :: + + imf.rng = np.random.default_rng(seed=42) + result1 = imf.generate_cluster() + imf.rng = np.random.default_rng(seed=42) + result2 = imf.generate_cluster() + + In this case, result1==result2 + + Notes + ----- + Code author: J. Lu. + + Original code was taken from libimf package written by Jan Pflamm-Altenburg + (`Pflamm-Altenburg & Kroupa 2006 `_) + and has been modified only marginally, though more convinient and general purpose + functions have been added. The libimf code was licensed under + a GNU General Public License. + """ - def __init__(self, massLimits=np.array([0.1,150]), multiplicity=None): + def __init__(self, massLimits=np.array([0.1,150]), multiplicity=None, seed=None): self._multi_props = multiplicity - self._mass_limits = massLimits + self._mass_limits = np.atleast_1d(massLimits) + self.rng = np.random.default_rng(seed) - if multiplicity == None: - self.make_multiples = False - else: + if multiplicity: self.make_multiples = True - + else: + self.make_multiples = False + return - - def generate_cluster(self, totalMass, seed=None): + def generate_cluster(self, totalMass): """ Generate a cluster of stellar systems with the specified IMF. - + Randomly sample from an IMF with specified mass limits until the desired total mass is reached. The maximum stellar mass is not allowed to exceed the total cluster mass. @@ -79,23 +114,21 @@ def generate_cluster(self, totalMass, seed=None): totalMass : float The total mass of the cluster (including companions) in solar masses. - seed: int - If set to non-None, all random sampling will be seeded with the - specified seed, forcing identical output. - Default None - Returns ------- masses : numpy float array - List of primary star masses. + Array of primary star masses. isMultiple : numpy boolean array - List of booleans with True for each primary star that is in a multiple + Array of booleans with True for each primary star that is in a multiple system and False for each single star. - companionMasses : numpy float array - List of - + companionMasses : numpy masked array + Masked array of companion masses. Each row corresponds to a primary star, and each column corresponds to a companion. The mask is True for entries that are not valid companions (e.g. for single stars or for companions that are below the minimum mass limit). + + systemMasses : numpy float array + Array of total system masses (primary + companions) for each primary star. + """ initial_mass_limit = self._mass_limits[-1] @@ -114,7 +147,8 @@ def generate_cluster(self, totalMass, seed=None): # Generate output arrays. masses = np.array([], dtype=float) isMultiple = np.array([], dtype=bool) - compMasses = np.array([], dtype=object) + # compMasses = {} # Hashmap for index -> compMasses for faster lookup + compMasses = [] systemMasses = np.array([], dtype=float) # Loop through and add stars to the cluster until we get to @@ -122,51 +156,45 @@ def generate_cluster(self, totalMass, seed=None): totalMassTally = 0 loopCnt = 0 - # Set the random seed, if desired - if seed: - np.random.seed(seed=seed) - + # start_while = time.time() while totalMassTally < totalMass: # Generate a random number array. - uniX = np.random.rand(int(newStarCount)) - + uniX = self.rng.random(int(newStarCount)) # Convert into the IMF from the inverted CDF newMasses = self.dice_star_cl(uniX) - + # Testing for Nans produced in masses test = np.isnan(newMasses) if np.sum(test) > 0: raise ValueError('Nan detected in cluster mass') - + # Dealing with multiplicity - if self._multi_props != None: - newCompMasses = np.empty((len(newMasses),), dtype=object) - newCompMasses.fill([]) - + if self._multi_props: + # newCompMasses = np.empty((len(newMasses),), dtype=object) + # newCompMasses.fill([]) # Determine the multiplicity of every star MF = self._multi_props.multiplicity_fraction(newMasses) CSF = self._multi_props.companion_star_fraction(newMasses) - - newIsMultiple = np.random.rand(int(newStarCount)) < MF - # Copy over the primary masses. Eventually add the companions. - newSystemMasses = newMasses.copy() + newIsMultiple = self.rng.random(int(newStarCount)) < MF # Function to calculate multiple systems more efficiently - newCompMasses, newSystemMasses, newIsMultiple = self.calc_multi(newMasses, newCompMasses, - newSystemMasses, newIsMultiple, - CSF, MF) - + # start_calc = time.time() + newCompMasses, newSystemMasses, newIsMultiple = self.calc_multi(newMasses, newIsMultiple, CSF, MF) + # end_calc = time.time() + # print('Time taken for calc_multi: ', end_calc - start_calc) newTotalMassTally = newSystemMasses.sum() isMultiple = np.append(isMultiple, newIsMultiple) systemMasses = np.append(systemMasses, newSystemMasses) - compMasses = np.append(compMasses, newCompMasses) + compMasses.append(newCompMasses) + else: newTotalMassTally = newMasses.sum() - + # end_while = time.time() + # print('Time taken for while loop: ', end_while - start_while) # Append to our primary masses array masses = np.append(masses, newMasses) - + if (loopCnt >= 0): log.info('sample_imf: Loop %d added %.2e Msun to previous total of %.2e Msun' % (loopCnt, newTotalMassTally, totalMassTally)) @@ -174,9 +202,28 @@ def generate_cluster(self, totalMass, seed=None): totalMassTally += newTotalMassTally newStarCount = mean_number * 0.1 # increase by 20% each pass loopCnt += 1 - + # Make a running sum of the system masses if self._multi_props: + # Concatenate the companion masses + if len(compMasses) > 1: + max_cols = max(compMass.shape[1] for compMass in compMasses) + + # Pad each array to have the same number of columns + padded_arrays = [ + np.ma.masked_all((compMass.shape[0], max_cols)) for compMass in compMasses + ] + + for i, compMass in enumerate(compMasses): + padded_arrays[i][:, :compMass.shape[1]] = compMass + + # Vertically stack the padded arrays + compMasses = np.ma.vstack(padded_arrays) + + else: + compMasses = compMasses[0] + + # Make a running sum of the system masses massCumSum = systemMasses.cumsum() else: massCumSum = masses.cumsum() @@ -198,77 +245,55 @@ def generate_cluster(self, totalMass, seed=None): self._mass_limits[-1] = initial_mass_limit return (masses, isMultiple, compMasses, systemMasses) - - def calc_multi(self, newMasses, compMasses, newSystemMasses, newIsMultiple, CSF, MF): + + def calc_multi(self, newMasses, newIsMultiple, CSF, MF): """ Helper function to calculate multiples more efficiently. We will use array operations as much as possible """ - # Identify multiple systems, calculate number of companions for - # each - idx = np.where(newIsMultiple == True)[0] - n_comp_arr = 1 + np.random.poisson((CSF[idx] / MF[idx]) - 1) - if self._multi_props.companion_max == True: - too_many = np.where(n_comp_arr > self._multi_props.CSF_max)[0] - n_comp_arr[too_many] = self._multi_props.CSF_max - primary = newMasses[idx] + # Copy over the primary masses. Eventually add the companions. + newSystemMasses = newMasses.copy() + + # Identify multiple systems, calculate number of companions for each + multiple_idx = np.where(newIsMultiple)[0] + comp_nums = 1 + self.rng.poisson((CSF[multiple_idx] / MF[multiple_idx]) - 1) + if self._multi_props.companion_max: + too_many = np.where(comp_nums > self._multi_props.CSF_max)[0] + comp_nums[too_many] = self._multi_props.CSF_max + primary = newMasses[multiple_idx] # We will deal with each number of multiple system independently. This is # so we can put in uniform arrays in _multi_props.random_q. - num = np.unique(n_comp_arr) - for ii in num: - tmp = np.where(n_comp_arr == ii)[0] - - if ii == 1: - # Single companion case - q_values = self._multi_props.random_q(np.random.rand(len(tmp))) - - # Calculate mass of companion - m_comp = q_values * primary[tmp] - - # Only keep companions that are more than the minimum mass. Update - # compMasses, newSystemMasses, and newIsMultiple appropriately - good = np.where(m_comp >= self._mass_limits[0])[0] - for jj in good: - compMasses[idx[tmp[jj]]] = np.transpose([m_comp[jj]]) - newSystemMasses[idx[tmp[jj]]] += compMasses[idx[tmp[jj]]] - - bad = np.where(m_comp < self._mass_limits[0])[0] - newIsMultiple[idx[tmp[bad]]] = False - else: - # Multple companion case - q_values = self._multi_props.random_q(np.random.rand(len(tmp), ii)) + comp_unique = np.unique(comp_nums) + comp_indices = [np.where(comp_nums == i)[0] for i in comp_unique] + compMasses = np.zeros((len(newMasses), max(comp_unique))) - # Calculate masses of companions - m_comp = np.multiply(q_values, np.transpose([primary[tmp]])) + for comp_num, comp_index in zip(comp_unique, comp_indices): + # Calculate masses of companions + q_values = self._multi_props.random_q(self.rng.random((len(comp_index), comp_num))) + m_comp = np.multiply(q_values, np.transpose([primary[comp_index]])) + compMasses[multiple_idx[comp_index], :comp_num] = m_comp - # Update compMasses, newSystemMasses, and newIsMultiple appropriately - for jj in range(len(tmp)): - m_comp_tmp = m_comp[jj] - compMasses[idx[tmp[jj]]] = m_comp_tmp[m_comp_tmp >= self._mass_limits[0]] - newSystemMasses[idx[tmp[jj]]] += compMasses[idx[tmp[jj]]].sum() - - # Double check for the case when we drop all companions. - # This happens a lot near the minimum allowed mass. - if len(compMasses[idx[tmp[jj]]]) == 0: - newIsMultiple[idx[tmp[jj]]] = False + # Mask out companions that are less than the minimum mass + compMasses = np.ma.MaskedArray(compMasses, mask=compMasses < self._mass_limits[0]) + newSystemMasses[multiple_idx] += compMasses[multiple_idx].sum(axis=1) + newIsMultiple = np.any(~compMasses.mask, axis=1) return compMasses, newSystemMasses, newIsMultiple - - + class IMF_broken_powerlaw(IMF): """ Initialize a multi-part power-law with N parts. Each part of the power-law is described with a probability density function: - P(m) \propto m ** power[n] + P(m) ∝ m ** power[n] for mass_limits[n] < m <= mass_limits[n+1]. Parameters ---------- mass_limits : numpy array - Array of length (N + 1) with lower and upper limits of + Array of length (N + 1) with lower and upper limits of the power-law segments. powers : numpy array @@ -276,28 +301,23 @@ class IMF_broken_powerlaw(IMF): power-law segment. multiplicity : Multiplicity object or None - If None, no multiplicity is assumed. Otherwise, use + If None, no multiplicity is assumed. Otherwise, use multiplicity object to create multiple star systems. """ - def __init__(self, mass_limits, powers, multiplicity=None): + def __init__(self, mass_limits, powers, multiplicity=None, seed=None): + super().__init__(massLimits=mass_limits, multiplicity=multiplicity, seed=seed) + powers = np.atleast_1d(powers) if len(mass_limits) != len(powers) + 1: msg = 'Incorrect specification of multi-part powerlaw.\n' msg += ' len(massLimts) != len(powers)+1\n' - msg += ' len(massLimits) = \n' + len(massLimits) - msg += ' len(powers) = \n' + len(powers) + msg += ' len(massLimits) = \n' + str(len(mass_limits)) + msg += ' len(powers) = \n' + str(len(powers)) - raise RuntimeException(msg) - - self._mass_limits = np.atleast_1d(mass_limits) + raise RuntimeError(msg) + mass_limits = np.atleast_1d(mass_limits) self._m_limits_low = mass_limits[0:-1] self._m_limits_high = mass_limits[1:] - self._powers = powers - self._multi_props = multiplicity - - if multiplicity == None: - self.make_multiples = False - else: - self.make_multiples = True + self._powers = np.atleast_1d(powers) # Calculate the coeffs to make the function continuous nterms = len(self._powers) @@ -326,7 +346,7 @@ def xi(self, m): xi - probability of measuring that mass. """ returnFloat = type(m) == float - + m = np.atleast_1d(m) # Temporary arrays @@ -343,7 +363,7 @@ def xi(self, m): # Maybe we are all done? if len(idx) == 0: break - + m_tmp = m[idx] aux_tmp = aux[idx] @@ -356,7 +376,7 @@ def xi(self, m): z *= delta(m - self._m_limits_high[i]) xi = self.k * z * y - + if returnFloat: return xi[0] else: @@ -384,7 +404,7 @@ def m_xi(self, m): # Maybe we are all done? if len(idx) == 0: break - + m_tmp = m[idx] aux_tmp = aux[idx] @@ -397,7 +417,7 @@ def m_xi(self, m): z *= delta(m - self._m_limits_high[i]) mxi = self.k * z * y - + if returnFloat: return mxi[0] else: @@ -405,23 +425,23 @@ def m_xi(self, m): def getProbabilityBetween(self, massLo, massHi): - """Return the integrated probability between some low and high + """Return the integrated probability between some low and high mass value. """ return self.int_xi(massLo, massHi) def int_xi(self, massLo, massHi): - """Return the integrated probability between some low and high + """Return the integrated probability between some low and high mass value. """ return self.prim_xi(massHi) - self.prim_xi(massLo) - + def getMassBetween(self, massLo, massHi): - """Return the integrated mass between some low and high + """Return the integrated mass between some low and high mass value. """ return self.int_mxi(massLo, massHi) - + def int_mxi(self, massLo, massHi): """Return the integrated total mass between some low and high stellar mass value. Be sure to normalize the IMF instance beforehand. @@ -443,7 +463,7 @@ def prim_xi(self, a): t3 = prim_power(self._m_limits_low, self._powers) y1 = (t1 * (t2 - t3)).sum() - t1 = gamma_closed(a[i], self._m_limits_low, self._m_limits_high) + t1 = gamma_closed(a[i], self._m_limits_low, self._m_limits_high) t1 *= self.coeffs t2 = prim_power(a[i], self._powers) t3 = prim_power(self._m_limits_low, self._powers) @@ -461,7 +481,7 @@ def prim_mxi(self, a): Helper function """ returnFloat = type(a) == float - + a = np.atleast_1d(a) val = np.zeros(len(a), dtype=float) @@ -470,8 +490,8 @@ def prim_mxi(self, a): t2 = prim_power(self._m_limits_high, self._powers+1) t3 = prim_power(self._m_limits_low, self._powers+1) y1 = (t1 * (t2 - t3)).sum() - - t1 = gamma_closed(a[i], self._m_limits_low, self._m_limits_high) + + t1 = gamma_closed(a[i], self._m_limits_low, self._m_limits_high) t1 *= self.coeffs t2 = prim_power(a[i], self._powers+1) t3 = prim_power(self._m_limits_low, self._powers+1) @@ -491,7 +511,7 @@ def normalize(self, Mcl, Mmin=None, Mmax=None): """ self.k = 1.0 self.Mcl = Mcl - + if Mmax == None: Mmax = self._m_limits_high[-1] @@ -501,7 +521,7 @@ def normalize(self, Mcl, Mmin=None, Mmax=None): if Mmax > Mcl: Mmax = Mcl - + if Mmax > self._m_limits_high[-1]: Mmax = self._m_limits_high[-1] @@ -510,7 +530,7 @@ def normalize(self, Mcl, Mmin=None, Mmax=None): self.norm_Mmin = Mmin self.norm_Mmax = Mmax - + self.k = Mcl / self.int_mxi(self.norm_Mmin, self.norm_Mmax) self.lamda = self.int_xi_cl(self._m_limits_low[0], self._mass_limits) @@ -529,7 +549,7 @@ def norm_cl_wk04(self, Mcl, Mmax=None, Mmin=None): if Mmax > Mcl: Mmax = Mcl - + if Mmax > self._m_limits_high[-1]: Mmax = self._m_limits_high[-1] @@ -597,21 +617,21 @@ def dice_star_cl(self, r): returnFloat = type(r) == float r = np.atleast_1d(r) # Make sure it is an array - x = r * self.lamda[-1] - y = np.zeros(len(r), dtype=float) - z = np.ones(len(r), dtype=float) + x = r * self.lamda[-1] + y = np.zeros_like(r) + z = np.ones_like(r) # Loop through the different parts of the power law. for i in range(self.nterms): #-----For i = 0 --> n, where n is the number of intervals aux = x - self.lamda[i] #---Should this be i - 1? - + # Only continue for those entries that are in later segments - idx = np.where(aux >= 0)[0] + idx = aux >= 0 # Maybe we are all done? - if len(idx) == 0: + if sum(idx) == 0: break - + x_tmp = x[idx] aux_tmp = aux[idx] @@ -694,7 +714,7 @@ def __init__(self, multiplicity=None): multiplicity=multiplicity) ################################################## -# +# # Generic functions -- see if we can move these up. # ################################################## @@ -714,11 +734,11 @@ def prim_power(m, power): power = np.repeat(power, len(m)) z = 1.0 + power - val = (m**z) / z - - val[power == -1] = np.log(m[power == -1]) + val = np.empty_like(m) + valid_idx = power != -1 + val[valid_idx] = (m[valid_idx]**z[valid_idx]) / z[valid_idx] + val[~valid_idx] = np.log(m[~valid_idx]) - if returnFloat: return val[0] else: @@ -726,7 +746,7 @@ def prim_power(m, power): def inv_prim_power(x, power): """ - returns ((1+power) * x)**(1.0 / (1 + power)) and handles the case + returns ((1+power) * x)**(1.0 / (1 + power)) and handles the case when power == -1. """ returnFloat = (type(x) == float) and (type(power) == float) @@ -740,22 +760,19 @@ def inv_prim_power(x, power): power = np.repeat(power, len(x)) if x.shape != power.shape: - pdb.set_trace() - + raise ValueError('spisea.imf.inv_prim_power: Dimension mismatch, x and power must have the same shape') + z = 1.0 + power - val = (z * x)**(1.0 / z) - - #--------------BUG CHECK---------------------# - # This line doesn't make sense if x is an N-element array and - # power is just a 1-element array, which it appears to be for - # imf.generate_cluster - val[power == -1] = np.exp(x[power == -1]) - #-----------------------------------------------# + val = np.empty_like(x) + valid_idx = power != -1 + val[valid_idx] = (z[valid_idx] * x[valid_idx])**(1.0 / z[valid_idx]) + val[~valid_idx] = np.exp(x[~valid_idx]) + if returnFloat: return val[0] else: return val - + def log_normal(m, mean_logm, sigma_logm): returnFloat = (type(m) == float) and (type(mean_logm) == float) and \ @@ -767,7 +784,7 @@ def log_normal(m, mean_logm, sigma_logm): z = np.log10(m) - mean_logm val = np.exp(-z**2 / (2.0 * sigma_logm**2)) / m - + if returnFloat: return val[0] else: @@ -783,7 +800,7 @@ def prim_log_normal(m, mean_logm, sigma_logm): mu = (np.log10(m) - mean_logm) / (1.4142135623731 * sigma_logm) val = 2.88586244942136 * sigma_logm * error(mu) - + if returnFloat: return val[0] else: @@ -796,10 +813,10 @@ def inv_prim_log_normal(x, mean_logm, sigma_logm): m = np.atleast_1d(m) mean_logm = np.atleat_1d(mean_logm) sigma_logm = np.atleat_1d(sigma_logm) - + mu = inv_error(0.346516861952484 * x / sigma_logm) val = 10.0**(1.4142135623731 * sigma_logm * mu + mean_logm) - + if returnFloat: return val[0] else: @@ -815,7 +832,7 @@ def mlog_normal(x, mean_logm, sigma_logm): z = np.log10(m) - mean_logm val = np.exp(-z**2 / (2.0 * sigma_logm**2)) - + if returnFloat: return val[0] else: @@ -836,12 +853,12 @@ def prim_mlog_normal(x, mean_logm, sigma_logm): val = error(eta) val *= 2.88586244942136 * sigma_logm * np.exp(2.30258509299405 * t1) - + if returnFloat: return val[0] else: return val - + def theta_closed(x): """ @@ -891,7 +908,7 @@ def delta(x): def gamma_closed(m, left, right): """ - + """ return theta_closed(m - left) * theta_closed(right - m) @@ -899,7 +916,7 @@ def gamma_closed(m, left, right): def error(x): x2 = x**2 ax2 = 0.140012288686666 * x2 - + val = np.sqrt(1.0 - np.exp(-x2*(1.27323954473516+ax2)/(1+ax2))) if x >=0: @@ -912,11 +929,10 @@ def inv_error(x): lnx2 = np.log(1.0 - x2) aux = 4.54688497944829 + (lnx2 / 2.0) y = -aux + np.sqrt(aux**2 - (lnx2 / 0.140012288686666)) - + val = np.sqrt(y) if x>=0: return y else: return -y - diff --git a/spisea/imf/multiplicity.py b/spisea/imf/multiplicity.py index 0d28ced6..685d5917 100755 --- a/spisea/imf/multiplicity.py +++ b/spisea/imf/multiplicity.py @@ -1,6 +1,7 @@ import numpy as np import astropy.modeling from random import choice +from scipy.stats import truncnorm defaultMF_amp = 0.44 defaultMF_power = 0.51 @@ -249,29 +250,34 @@ def log_semimajoraxis(self, mass): Parameters ---------- - mass : float - Mass of primary star + mass : array-like + Mass array of primary star Returns ------- - log_semimajoraxis : float + log_semimajoraxis : array-like Log of the semimajor axis/separation between the stars in units of AU """ a_mean_func = astropy.modeling.powerlaws.BrokenPowerLaw1D(amplitude=self.a_amp, x_break=self.a_break, alpha_1=self.a_slope1, alpha_2=self.a_slope2) log_a_mean = np.log10(a_mean_func(mass)) #mean log(a) log_a_std_func = astropy.modeling.models.Linear1D(slope=self.a_std_slope, intercept=self.a_std_intercept) log_a_std = log_a_std_func(np.log10(mass)) #sigma_log(a) - if mass >= 2.9: - log_a_std = log_a_std_func(np.log10(2.9)) #sigma_log(a) - if log_a_std < 0.1: - log_a_std = 0.1 - - log_semimajoraxis = np.random.normal(log_a_mean, log_a_std) - while 10**log_semimajoraxis > 2000 or log_semimajoraxis < -2: #AU - log_semimajoraxis = np.random.normal(log_a_mean, log_a_std) - + + large_mass_idx = mass >= 2.9 + log_a_std[large_mass_idx] = log_a_std_func(np.log10(2.9)) #sigma_log(a) + log_a_std = np.clip(log_a_std, 0.1, None) + + # Trunc normal distribution between -2 and 2000 AU + log_a_lower = np.log10(0.01) + log_a_upper = np.log10(2000) + + # Convert bounds to standard normal space + a_lower_std = (log_a_lower - log_a_mean) / log_a_std + a_upper_std = (log_a_upper - log_a_mean) / log_a_std + + log_semimajoraxis = truncnorm.rvs(a_lower_std, a_upper_std, loc=log_a_mean, scale=log_a_std) return log_semimajoraxis - + def random_e(self, x): """ Generate random eccentricity from the inverse of the CDF where the PDF is f(e) = 2e from Duchene and Kraus 2013 diff --git a/spisea/imf/tests/__init__.py b/spisea/imf/tests/__init__.py deleted file mode 100755 index 8b137891..00000000 --- a/spisea/imf/tests/__init__.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/spisea/reddening.py b/spisea/reddening.py index 2754719b..0d25e5ae 100755 --- a/spisea/reddening.py +++ b/spisea/reddening.py @@ -24,12 +24,12 @@ def get_red_law(str): ---------- str: str Reddening law name and additional params (comma-separated). - Name must match + Name must match """ # Parse the string, extracting redlaw name and other params tmp = str.split(',') name = tmp[0] - + # How we split this up changes for the broken power law EL # versus the other ELs (since we have arrays for broken power law EL) if name == 'broken_pl': @@ -86,12 +86,12 @@ def get_red_law(str): class RedLawNishiyama09(pysynphot.reddening.CustomRedLaw): """ - The extinction law towards the Galactic Center - from `Nishiyama et al. 2009 + The extinction law towards the Galactic Center + from `Nishiyama et al. 2009 `_, - combined with the Av / AKs value from `Nishiyama et al. 2008 - `_. - This law is defined between 0.5 - 8.0 microns. + combined with the Av / AKs value from `Nishiyama et al. 2008 + `_. + This law is defined between 0.5 - 8.0 microns. This law is constructed in 3 segments: @@ -107,7 +107,7 @@ class RedLawNishiyama09(pysynphot.reddening.CustomRedLaw): def __init__(self): # Fetch the extinction curve, pre-interpolate across 3-8 microns wave = np.arange(0.5, 8.0, 0.001) - + # This will eventually be scaled by AKs when you # call reddening(). Right now, calc for AKs=1 wave_vals, Alambda_scaled = RedLawNishiyama09._derive_nishiyama09(wave) @@ -115,7 +115,7 @@ def __init__(self): # Convert wavelength to angstrom wave_vals *= 10 ** 4 - pysynphot.reddening.CustomRedLaw.__init__(self, wave=wave_vals, + pysynphot.reddening.CustomRedLaw.__init__(self, wave=wave_vals, waveunits='angstrom', Avscaled=Alambda_scaled, name='Nishiyama09', @@ -128,10 +128,10 @@ def __init__(self): # Other info self.scale_lambda = 2.14 self.name = 'N09' - + @staticmethod def _derive_nishiyama09(wavelength): - """ + """ Calculate the N09 extinction law as defined in the paper: a A_lambda/AKs = power law of exponent -2.0 between JHK. Then use a *linear* interpolation in 1/lambda space to go from J to the V-band observation, @@ -148,7 +148,7 @@ def _derive_nishiyama09(wavelength): #-----Define power law extinction law between JHK----# jhk_idx = np.where( (wavelength >= 1.25) & (wavelength <= 2.14) ) #jhk_idx = np.where( (wavelength >= 1.25) & (wavelength <= 2.3) ) - + alpha = 2.0 wave_jhk = wavelength[jhk_idx] idx_scale = np.where(abs(wave_jhk - 2.14) == min(abs(wave_jhk - 2.14)) ) @@ -177,10 +177,10 @@ def _derive_nishiyama09(wavelength): #wave = np.array([0.551, 1.25, 1.63, 2.14, wave_jhk[-1], 3.545, 4.442, 5.675, 7.760]) #A_AKs = np.array([16.13, 3.02, 1.73, 1.00, A_Ks_jhk[-1], 0.500, 0.390, 0.360, 0.430]) #interp_idx = np.where(wave > 2.2) - + spline_interp = interpolate.splrep(wave[interp_idx], A_AKs[interp_idx], k=3, s=0) A_AKs_long = interpolate.splev(wavelength[long_idx], spline_interp) - + # Stitch together sections for the final law wave_vals = np.concatenate((wavelength[jv_idx[0]], wavelength[jhk_idx[0]])) A_AKs_vjhk = np.concatenate((A_Ks_jv, A_Ks_jhk)) @@ -192,8 +192,8 @@ def _derive_nishiyama09(wavelength): return wave_vals, A_AKs_final def Nishiyama09(self, wavelength, AKs): - """ - Return the extinction at a given wavelength assuming the + """ + Return the extinction at a given wavelength assuming the extinction law and an overall `AKs` value. Parameters @@ -213,7 +213,7 @@ def Nishiyama09(self, wavelength, AKs): # extinction law if ((min(wavelength) < (self.low_lim*10**-4)) | (max(wavelength) > (self.high_lim*10**-4))): return ValueError('{0}: wavelength values beyond interpolation range'.format(self)) - + # Extract wave and A/AKs from law, turning wave into micron units wave = self.wave * (10**-4) law = self.obscuration @@ -253,7 +253,7 @@ def plot_Nishiyama09(self): py.errorbar(wave_obs, A_AKs, yerr=A_AKs_err, fmt='k.', ms=10, label='Measured') py.xlabel('Wavelength (microns)') - py.ylabel('Extinction (A$_{\lambda}$)') + py.ylabel(r'Extinction (A$_{\lambda}$)') py.title('Nishiyama+09 EL') py.gca().set_xscale('log') py.gca().set_yscale('log') @@ -261,33 +261,33 @@ def plot_Nishiyama09(self): py.savefig('nishiyama09_el.png') return - + class RedLawCardelli(pysynphot.reddening.CustomRedLaw): - """ - Defines the extinction law from - `Cardelli et al. 1989 `_. + r""" + Defines the extinction law from + `Cardelli et al. 1989 `_. The law is defined from 0.3 - 3 microns, and in terms of :math:`A_{\lambda} / A_{Ks}`, where Ks is 2.174 microns. Parameters ---------- Rv : float - Ratio of absolute to selective extinction, :math:`A(V) / E(B-V)`. + Ratio of absolute to selective extinction, :math:`A(V) / E(B-V)`. The standard value for the diffuse ISM is 3.1. """ def __init__(self, Rv): # Fetch the extinction curve, pre-interpolate across 0.3-3 microns wave = np.arange(0.3, 3.0, 0.001) - + # This will eventually be scaled by AKs when you - # call reddening(). Produces A_lambda for AKs = 1, which will be + # call reddening(). Produces A_lambda for AKs = 1, which will be # scaled later. Expects wavelength in microns Alambda_scaled = RedLawCardelli._derive_cardelli(wave, Rv) # Convert wavelength to angstrom wave *= 10 ** 4 - pysynphot.reddening.CustomRedLaw.__init__(self, wave=wave, + pysynphot.reddening.CustomRedLaw.__init__(self, wave=wave, waveunits='angstrom', Avscaled=Alambda_scaled, name='Cardelli89', @@ -298,7 +298,7 @@ def __init__(self, Rv): self.high_lim = max(wave) # other info - self.scale_lambda = 2.174 + self.scale_lambda = 2.174 self.name = 'C89,{0}'.format(Rv) @staticmethod @@ -317,7 +317,7 @@ def _derive_cardelli(wavelength, Rv): if (np.max(x) > 8.0): print( 'wavelength is shorter than applicable range for Cardelli law') return None - + # Set up some arrays for coefficients that we will need a = np.zeros(len(x), dtype=float) b = np.zeros(len(x), dtype=float) @@ -366,14 +366,14 @@ def _derive_cardelli(wavelength, Rv): k_ind = np.where(abs(x-0.46) == min(abs(x-0.46))) Aks_Av = a[k_ind] + b[k_ind]/Rv # Aks / Av Av_Aks = 1.0 / Aks_Av # Av / Aks - + output = extinction * Av_Aks # (A(lamb) / Av) * (Av / Aks) = (A(lamb) / Aks) return output def Cardelli89(self, wavelength, AKs): - """ - Return the extinction at a given wavelength assuming the + """ + Return the extinction at a given wavelength assuming the extinction law and an overall `AKs` value. Parameters @@ -393,7 +393,7 @@ def Cardelli89(self, wavelength, AKs): # extinction law if ((min(wavelength) < (self.low_lim*10**-4)) | (max(wavelength) > (self.high_lim*10**-4))): return ValueError('{0}: wavelength values beyond interpolation range'.format(self)) - + # Extract wave and A/AKs from law, turning wave into micron units wave = self.wave * (10**-4) law = self.obscuration @@ -409,12 +409,12 @@ def Cardelli89(self, wavelength, AKs): A_at_wave = np.array(A_AKs_at_wave) * AKs return A_at_wave - + class RedLawRomanZuniga07(pysynphot.reddening.CustomRedLaw): """ Defines extinction law from `Roman-Zuniga et al. 2007 `_ - for the dense cloud core Barnard 59. The law is a cubic spline fit + for the dense cloud core Barnard 59. The law is a cubic spline fit to the values of A_lambda / A_Ks derived using the color-color diagrams slopes in their Table 1. It is defined between 1.0 - 8.0 microns. @@ -424,7 +424,7 @@ class RedLawRomanZuniga07(pysynphot.reddening.CustomRedLaw): def __init__(self): # Fetch the extinction curve, pre-interpolate across 1-8 microns wave = np.arange(1.0, 8.0, 0.01) - + # This will eventually be scaled by AKs when you # call reddening(). Right now, calc for AKs=1 Alambda_scaled = RedLawRomanZuniga07._derive_romanzuniga07(wave) @@ -432,7 +432,7 @@ def __init__(self): # Convert wavelength to angstrom wave *= 10**4 - pysynphot.reddening.CustomRedLaw.__init__(self, wave=wave, + pysynphot.reddening.CustomRedLaw.__init__(self, wave=wave, waveunits='angstrom', Avscaled=Alambda_scaled, name='RomanZuniga07', @@ -452,7 +452,7 @@ def _derive_romanzuniga07(wavelength): wave = np.array([1.240, 1.664, 2.164, 3.545, 4.442, 5.675, 7.760]) A_AKs = np.array([2.299, 1.550, 1.000, 0.618, 0.525, 0.462, 0.455]) A_AKs_err = np.array([0.530, 0.080, 0.000, 0.077, 0.063, 0.055, 0.059]) - + # Interpolate over the curve spline_interp = interpolate.splrep(wave, A_AKs, k=3, s=0) A_AKs_at_wave = interpolate.splev(wavelength, spline_interp) @@ -460,8 +460,8 @@ def _derive_romanzuniga07(wavelength): return A_AKs_at_wave def RomanZuniga07(self, wavelength, AKs): - """ - Return the extinction at a given wavelength assuming the + """ + Return the extinction at a given wavelength assuming the extinction law and an overall `AKs` value. Parameters @@ -481,7 +481,7 @@ def RomanZuniga07(self, wavelength, AKs): # extinction law if ((min(wavelength) < (self.low_lim*10**-4)) | (max(wavelength) > (self.high_lim*10**-4))): return ValueError('{0}: wavelength values beyond interpolation range'.format(self)) - + # Extract wave and A/AKs from law, turning wave into micron units wave = self.wave * (10**-4) law = self.obscuration @@ -519,7 +519,7 @@ def plot_RomanZuniga07(self): py.errorbar(wave_obs, A_AKs, yerr=A_AKs_err, fmt='k.', ms=10, label='Measured') py.xlabel('Wavelength (microns)') - py.ylabel('Extinction (A$_{\lambda}$)') + py.ylabel(r'Extinction (A$_{\lambda}$)') py.title('Roman-Zuniga+07 EL') py.gca().set_xscale('log') py.gca().set_yscale('log') @@ -538,7 +538,7 @@ class RedLawRiekeLebofsky(pysynphot.reddening.CustomRedLaw): def __init__(self): # Define the wavelength range of the extinction law wave = np.arange(1.0, 5.0, 0.001) - + # This will eventually be scaled by AKs when you # call reddening(). Right now, calc for AKs=1 Alambda_scaled = RedLawRiekeLebofsky._derive_RiekeLebofsky(wave) @@ -546,7 +546,7 @@ def __init__(self): # Convert wavelength to angstrom wave *= 10 ** 4 - pysynphot.reddening.CustomRedLaw.__init__(self, wave=wave, + pysynphot.reddening.CustomRedLaw.__init__(self, wave=wave, waveunits='angstrom', Avscaled=Alambda_scaled, name='RiekeLebofsky', @@ -569,17 +569,17 @@ def _derive_RiekeLebofsky(wavelength): Data pulled from Rieke+Lebofsky 1985, Table 3. Note that their Table contains values from 0.3 - 13 microns, but only 1 - 5 microns - is measured directly. + is measured directly. Wavelengths for filters I - M are from Rieke+89, table 4. """ # Arrays with the values from the paper - filters = ['U', 'B', 'V', 'R', 'I', 'J', 'H', 'K', 'L', 'M', - '[8.0]', '[8.5]', '[9.0]', '[9.5]', '[10.0]', '[10.5]', + filters = ['U', 'B', 'V', 'R', 'I', 'J', 'H', 'K', 'L', 'M', + '[8.0]', '[8.5]', '[9.0]', '[9.5]', '[10.0]', '[10.5]', '[11.0]', '[11.5]', '[12.0]', '[12.5]', '[13.0]'] - wave = np.array([0.365, 0.445, 0.551, 0.658, 0.9, 1.25, 1.60, 2.2, + wave = np.array([0.365, 0.445, 0.551, 0.658, 0.9, 1.25, 1.60, 2.2, 3.50, 4.8, 8.0, 8.5, 9.0, 9.5, 10.0, 10.5, 11.0, - 11.5, 12.0, 12.5, 13.0]) + 11.5, 12.0, 12.5, 13.0]) A_Av = np.array([1.531, 1.324, 1.00, 0.748, 0.482, 0.282, 0.175, 0.112, 0.058, 0.023, 0.02, 0.043, 0.074, 0.087, 0.083, 0.074, 0.060, 0.047, 0.037, 0.030, 0.027]) @@ -597,7 +597,7 @@ def _derive_RiekeLebofsky(wavelength): wave_interp = wave[idx1:idx2+1] A_Ak_interp = A_Ak[idx1:idx2+1] assert len(wave_interp) == 6 - + # Interpolate over the curve over desired wavelength range spline_interp = interpolate.splrep(wave_interp, A_Ak_interp, k=3, s=0) A_Ak_at_wave = interpolate.splev(wavelength, spline_interp) @@ -605,8 +605,8 @@ def _derive_RiekeLebofsky(wavelength): return A_Ak_at_wave def RiekeLebofsky85(self, wavelength, AKs): - """ - Return the extinction at a given wavelength assuming the + """ + Return the extinction at a given wavelength assuming the extinction law and an overall `AKs` value. Parameters @@ -626,7 +626,7 @@ def RiekeLebofsky85(self, wavelength, AKs): # extinction law if ((min(wavelength) < (self.low_lim*10**-4)) | (max(wavelength) > (self.high_lim*10**-4))): return ValueError('{0}: wavelength values beyond interpolation range'.format(self)) - + # Extract wave and A/AKs from law, turning wave into micron units wave = self.wave * (10**-4) law = self.obscuration @@ -657,12 +657,12 @@ def plot_RiekeLebofsky85(self): # Get the observed values from their Table 3. Note only JHKLM is # measured directly by RL85, other values come from elsewhere. # Wavelengths are from Rieke+89, Table 4 - filters = ['U', 'B', 'V', 'R', 'I', 'J', 'H', 'K', 'L', 'M', - '[8.0]', '[8.5]', '[9.0]', '[9.5]', '[10.0]', '[10.5]', + filters = ['U', 'B', 'V', 'R', 'I', 'J', 'H', 'K', 'L', 'M', + '[8.0]', '[8.5]', '[9.0]', '[9.5]', '[10.0]', '[10.5]', '[11.0]', '[11.5]', '[12.0]', '[12.5]', '[13.0]'] - wave_obs = np.array([0.365, 0.445, 0.551, 0.658, 0.9, 1.25, 1.60, 2.2, + wave_obs = np.array([0.365, 0.445, 0.551, 0.658, 0.9, 1.25, 1.60, 2.2, 3.50, 4.8, 8.0, 8.5, 9.0, 9.5, 10.0, 10.5, 11.0, - 11.5, 12.0, 12.5, 13.0]) + 11.5, 12.0, 12.5, 13.0]) A_Av = np.array([1.531, 1.324, 1.00, 0.748, 0.482, 0.282, 0.175, 0.112, 0.058, 0.023, 0.02, 0.043, 0.074, 0.087, 0.083, 0.074, 0.060, 0.047, 0.037, 0.030, 0.027]) @@ -687,7 +687,7 @@ def plot_RiekeLebofsky85(self): py.plot(wave_obs_f, A_Ak_f, 'k.', ms=10, label='Measured') py.xlabel('Wavelength (microns)') - py.ylabel('Extinction (A$_{\lambda}$)') + py.ylabel(r'Extinction (A$_{\lambda}$)') py.title('Rieke+Lebofsky+85 EL') py.gca().set_xscale('log') py.gca().set_yscale('log') @@ -708,7 +708,7 @@ class RedLawDamineli16(pysynphot.reddening.CustomRedLaw): def __init__(self): # Fetch the extinction curve, pre-interpolate across 0.4-4.8 microns wave = np.arange(0.4, 4.8, 0.001) - + # This will eventually be scaled by AKs when you # call reddening(). Right now, calc for AKs=1 Alambda_scaled = RedLawDamineli16._derive_Damineli16(wave) @@ -716,7 +716,7 @@ def __init__(self): # Convert wavelength to angstrom wave *= 10 ** 4 - pysynphot.reddening.CustomRedLaw.__init__(self, wave=wave, + pysynphot.reddening.CustomRedLaw.__init__(self, wave=wave, waveunits='angstrom', Avscaled=Alambda_scaled, name='Damineli16', @@ -730,7 +730,7 @@ def __init__(self): self.name = 'D16' return - + @staticmethod def _derive_Damineli16(wavelength): """ @@ -748,13 +748,13 @@ def _derive_Damineli16(wavelength): log_A_AKs = -0.015 + 2.33*x + 0.522*x**2. - 3.001*x**3. + 2.034*x**4. # Now to convert this back to linear space - A_AKs_at_wave = 10**log_A_AKs + A_AKs_at_wave = 10**log_A_AKs return A_AKs_at_wave def Damineli16(self, wavelength, AKs): - """ - Return the extinction at a given wavelength assuming the + """ + Return the extinction at a given wavelength assuming the extinction law and an overall `AKs` value. Parameters @@ -774,7 +774,7 @@ def Damineli16(self, wavelength, AKs): # extinction law if ((min(wavelength) < (self.low_lim*10**-4)) | (max(wavelength) > (self.high_lim*10**-4))): return ValueError('{0}: wavelength values beyond interpolation range'.format(self)) - + # Extract wave and A/AKs from law, turning wave into micron units wave = self.wave * (10**-4) law = self.obscuration @@ -814,14 +814,14 @@ def plot_Damineli16(self): py.errorbar(wave_obs, A_AKs, fmt='k.', ms=10, label='Measured') py.xlabel('Wavelength (microns)') - py.ylabel('Extinction (A$_{\lambda}$)') + py.ylabel(r'Extinction (A$_{\lambda}$)') py.title('Damineli+16 EL') py.gca().set_xscale('log') py.gca().set_yscale('log') py.legend() py.savefig('damineli16_el.png') return - + class RedLawDeMarchi16(pysynphot.reddening.CustomRedLaw): """ Defines extinction law from `De Marchi et al. 2016 @@ -831,7 +831,7 @@ class RedLawDeMarchi16(pysynphot.reddening.CustomRedLaw): def __init__(self): # Fetch the extinction curve, pre-interpolate across 1-8 microns wave = np.arange(0.3, 8.0, 0.001) - + # This will eventually be scaled by AK when you # call reddening(). Right now, calc for AKs=1 Alambda_scaled = RedLawDeMarchi16._derive_DeMarchi16(wave) @@ -839,7 +839,7 @@ def __init__(self): # Convert wavelength to angstrom wave *= 10 ** 4 - pysynphot.reddening.CustomRedLaw.__init__(self, wave=wave, + pysynphot.reddening.CustomRedLaw.__init__(self, wave=wave, waveunits='angstrom', Avscaled=Alambda_scaled, name='DeMarchi16', @@ -891,8 +891,8 @@ def _derive_DeMarchi16(wavelength): return A_AK_at_wave def DeMarchi16(self, wavelength, AK): - """ - Return the extinction at a given wavelength assuming the + """ + Return the extinction at a given wavelength assuming the extinction law and an overall `AKs` value. Parameters @@ -912,7 +912,7 @@ def DeMarchi16(self, wavelength, AK): # extinction law if ((min(wavelength) < (self.low_lim*10**-4)) | (max(wavelength) > (self.high_lim*10**-4))): return ValueError('{0}: wavelength values beyond interpolation range'.format(self)) - + # Extract wave and A/AKs from law, turning wave into micron units wave = self.wave * (10**-4) law = self.obscuration @@ -928,32 +928,32 @@ def DeMarchi16(self, wavelength, AK): A_at_wave = np.array(A_AKs_at_wave) * AK return A_at_wave - + class RedLawFitzpatrick09(pysynphot.reddening.CustomRedLaw): """ - Defines the extinction law from + Defines the extinction law from `Fitzpatrick et al. 2009 `_. The law is defined between 0.5 -- 3 microns. The extinction law is as defined in their equation 5, and has two free parameters: :math:`\alpha` and R(V). Averaged over 14 sight-lines, - the authors generally find either :math:`alpha` ~ 2.5, R(V) ~ 3, or - :math:`alpha` ~ 1.8, R(V) ~ 5 (their Figure 6). + the authors generally find either :math:`alpha` ~ 2.5, R(V) ~ 3, or + :math:`alpha` ~ 1.8, R(V) ~ 5 (their Figure 6). A_lambda / A_K = 1 at lambda = 2.18 Parameters ---------- alpha : float - alpha parameter for extinction law. + alpha parameter for extinction law. RV : float - R(V) parameter for extinction law. + R(V) parameter for extinction law. """ def __init__(self, alpha, RV): # Fetch the extinction curve, pre-interpolate across 1-8 microns wave = np.arange(0.5, 3.0, 0.001) - + # This will eventually be scaled by AK when you # call reddening(). Right now, calc for AKs=1 Alambda_scaled = RedLawFitzpatrick09._derive_Fitzpatrick09(wave, alpha, RV) @@ -961,7 +961,7 @@ def __init__(self, alpha, RV): # Convert wavelength to angstrom wave *= 10 ** 4 - pysynphot.reddening.CustomRedLaw.__init__(self, wave=wave, + pysynphot.reddening.CustomRedLaw.__init__(self, wave=wave, waveunits='angstrom', Avscaled=Alambda_scaled, name='Fitzpatrick09', @@ -995,14 +995,14 @@ def _derive_Fitzpatrick09(wavelength, alpha, RV): """ alpha = float(alpha) RV = float(RV) - + # First we'll calculate k(lambda - V) = E(lambda - V) / E(B - V), # directly from equation 5 k = (0.349 + 2.087*RV) * (1.0 / (1.0 + (wavelength / 0.507)**alpha)) - RV # We'll calculate Alam/Av from K + Rv - Alam_Av = (k / RV) + 1. - + Alam_Av = (k / RV) + 1. + # Finally, to get A_lambda/Aks we need to divide Alam_Av by AKs_Av. # We'll assume a wavelength of 2.18 for Ks, since it is the wavelength # they report for K-band @@ -1013,8 +1013,8 @@ def _derive_Fitzpatrick09(wavelength, alpha, RV): return A_AKs_at_wave def Fitzpatrick09(self, wavelength, AKs): - """ - Return the extinction at a given wavelength assuming the + """ + Return the extinction at a given wavelength assuming the extinction law and an overall `AKs` value. Parameters @@ -1034,7 +1034,7 @@ def Fitzpatrick09(self, wavelength, AKs): # extinction law if ((min(wavelength) < (self.low_lim*10**-4)) | (max(wavelength) > (self.high_lim*10**-4))): return ValueError('{0}: wavelength values beyond interpolation range'.format(self)) - + # Extract wave and A/AKs from law, turning wave into micron units wave = self.wave * (10**-4) law = self.obscuration @@ -1053,7 +1053,7 @@ def Fitzpatrick09(self, wavelength, AKs): class RedLawSchlafly16(pysynphot.reddening.CustomRedLaw): """ - Defines the extinction law from `Schlafly et al. 2016 + Defines the extinction law from `Schlafly et al. 2016 `_. The law is defined between 0.5 - 4.8 microns. @@ -1069,7 +1069,7 @@ class RedLawSchlafly16(pysynphot.reddening.CustomRedLaw): def __init__(self, AH_AKs, x): # Fetch the extinction curve, pre-interpolate across 0.5-4.8 microns wave = np.arange(0.5, 4.8, 0.001) - + # This will eventually be scaled by AK when you # call reddening(). Right now, calc for AKs=1 Alambda_scaled = RedLawSchlafly16._derive_Schlafly16(wave, AH_AKs, x) @@ -1077,7 +1077,7 @@ def __init__(self, AH_AKs, x): # Convert wavelength to angstrom wave *= 10 ** 4 - pysynphot.reddening.CustomRedLaw.__init__(self, wave=wave, + pysynphot.reddening.CustomRedLaw.__init__(self, wave=wave, waveunits='angstrom', Avscaled=Alambda_scaled, name='Schlafly16', @@ -1091,7 +1091,7 @@ def __init__(self, AH_AKs, x): @staticmethod def _derive_Schlafly16(wavelength, AH_AKs, x): """ - Calculate Schalfly+16 extinction law according to + Calculate Schalfly+16 extinction law according to code provided in appendix of the paper. AH_AKs sets the gray component while x sets the shape of the law in an Rv-like way @@ -1102,20 +1102,20 @@ def _derive_Schlafly16(wavelength, AH_AKs, x): # Evaluate function for desired wavelengths (in angstroms) law = law_func(wavelength*10**4) - + # Now normalize to A_lambda/AKs, rather than A_lambda/A(5420) idx = np.where( abs(wavelength - 2.151) == min(abs(wavelength - 2.151)) ) law_out = law / law[idx] - + return law_out @staticmethod def _Schlafly_appendix(x, rhk): - """ + """ Schlafly+16 extinction law as defined in paper appendix. We've modified - the wrapper slightly so that the user has control of rhk and x. Here is + the wrapper slightly so that the user has control of rhk and x. Here is the comments from that code: - + Returns the extinction curve, A(lambda)/A(5420 A), according to Schlafly+2016, for the parameter "x," which controls the overall shape of the extinction curve in an R(V)-like way. The extinction curve returned @@ -1137,7 +1137,7 @@ def _Schlafly_appendix(x, rhk): lam: anchor wavelengths (angstroms), default to Schlafly+2016 Returns: the extinction curve E, so the extinction alam = A(lam)/A(5420 A) - is given by: + is given by: A = extcurve(x) alam = A(lam) """ @@ -1162,8 +1162,8 @@ def _Schlafly_appendix(x, rhk): return CubicSpline(lam, anchors/cs0(5420.), yp='3d=0') def Schlafly16(self, wavelength, AKs): - """ - Return the extinction at a given wavelength assuming the + """ + Return the extinction at a given wavelength assuming the extinction law and an overall `AKs` value. Parameters @@ -1183,7 +1183,7 @@ def Schlafly16(self, wavelength, AKs): # extinction law if ((min(wavelength) < (self.low_lim*10**-4)) | (max(wavelength) > (self.high_lim*10**-4))): return ValueError('{0}: wavelength values beyond interpolation range'.format(self)) - + # Extract wave and A/AKs from law, turning wave into micron units wave = self.wave * (10**-4) law = self.obscuration @@ -1202,7 +1202,7 @@ def Schlafly16(self, wavelength, AKs): class RedLawIndebetouw05(pysynphot.reddening.CustomRedLaw): """ - Defines the extinction law from `Indebetouw et al. 2005 + Defines the extinction law from `Indebetouw et al. 2005 `_. The law is defined between 1.25 - 8 microns using Equation 4 in their paper. @@ -1217,7 +1217,7 @@ def __init__(self): # Convert wavelength to angstrom wave *= 10 ** 4 - pysynphot.reddening.CustomRedLaw.__init__(self, wave=wave, + pysynphot.reddening.CustomRedLaw.__init__(self, wave=wave, waveunits='angstrom', Avscaled=Alambda_scaled, name='Indebetouw05', @@ -1243,9 +1243,9 @@ def _derive_Indebetouw05(wave): Alambda_AK = 10**log_Alambda_AK return Alambda_AK - def Indebetouw05(self, wavelength, AK): - """ - Return the extinction at a given wavelength assuming the + def Indebetouw05(self, wavelength, AKs): + """ + Return the extinction at a given wavelength assuming the extinction law and an overall extinction at AK (2.164 microns) Parameters @@ -1265,7 +1265,7 @@ def Indebetouw05(self, wavelength, AK): # extinction law if ((min(wavelength) < (self.low_lim*10**-4)) | (max(wavelength) > (self.high_lim*10**-4))): return ValueError('{0}: wavelength values beyond interpolation range'.format(self)) - + # Extract wave and A/AKs from law, turning wave into micron units wave = self.wave * (10**-4) law = self.obscuration @@ -1284,7 +1284,7 @@ def Indebetouw05(self, wavelength, AK): def plot_Indebetouw05(self): """ - Plot Indebetouw+05 extinciton curve versus their + Plot Indebetouw+05 extinciton curve versus their actual measured values (their Table 1). This is similar to their Figure 6. @@ -1296,20 +1296,20 @@ def plot_Indebetouw05(self): # Convert wave to microns for plot wave *= 10**-4 - + # Their average measurements across sight lines # from Table 1 wave_arr = [1.240, 1.664, 2.164, 3.545, 4.442, 5.675, 7.760] law_obs_arr = [2.50, 1.55, 1.0, 0.56, 0.43, 0.43, 0.43] law_obs_err_arr = [0.15, 0.08, 0.0, 0.06, 0.08, 0.10, 0.10] - + # Make plot py.figure(figsize=(10,10)) py.plot(wave, law, 'r-', label='EL Function') py.errorbar(wave_arr, law_obs_arr, yerr=law_obs_err_arr, fmt='k.', ms=10, label='Measured') py.xlabel('Wavelength (microns)') - py.ylabel('Extinction (A$_{\lambda}$)') + py.ylabel(r'Extinction (A$_{\lambda}$)') py.title('Indebetouw+05 EL') py.gca().set_xscale('log') py.gca().set_yscale('log') @@ -1317,14 +1317,14 @@ def plot_Indebetouw05(self): py.savefig('indebetouw05_el.png') return - + class RedLawPowerLaw(pysynphot.reddening.CustomRedLaw): - """ - Extinction object that is a power-law extinction law: + r""" + Extinction object that is a power-law extinction law: :math:`A_{\lambda} \propto \lambda^{\alpha}`. - For example, to create an extinction law between - 0.8 and 3 microns where :math:`\alpha = 2.21`, + For example, to create an extinction law between + 0.8 and 3 microns where :math:`\alpha = 2.21`, where :math:`A_{\lambda} / A_{Ks} = 1` at 2.12 microns: >>> from spisea import reddening @@ -1349,7 +1349,7 @@ class RedLawPowerLaw(pysynphot.reddening.CustomRedLaw): def __init__(self, alpha, K_wave, wave_min=0.5, wave_max=5.0): # Fetch the extinction curve, pre-interpolate across wave_min to wave_max wave = np.arange(wave_min, wave_max, 0.001) - + # This will eventually be scaled by AK when you # call reddening(). Right now, calc for AKs=1 Alambda_scaled = RedLawPowerLaw._derive_powerlaw(wave, alpha, K_wave) @@ -1357,7 +1357,7 @@ def __init__(self, alpha, K_wave, wave_min=0.5, wave_max=5.0): # Convert wavelength to angstrom wave *= 10 ** 4 - pysynphot.reddening.CustomRedLaw.__init__(self, wave=wave, + pysynphot.reddening.CustomRedLaw.__init__(self, wave=wave, waveunits='angstrom', Avscaled=Alambda_scaled, name='Power law') @@ -1379,8 +1379,8 @@ def _derive_powerlaw(wavelength, alpha, K_wave): in microns alpha: float - -1.0 * (power law exponent) - + -1.0 * (power law exponent) + K_wave: float Desired K-band wavelength, in microns """ @@ -1394,8 +1394,8 @@ def _derive_powerlaw(wavelength, alpha, K_wave): return A_AKs_at_wave def powerlaw(self, wavelength, AKs): - """ - Return the extinction at a given wavelength assuming the + """ + Return the extinction at a given wavelength assuming the extinction law and an overall `AKs` value. Parameters @@ -1415,7 +1415,7 @@ def powerlaw(self, wavelength, AKs): # extinction law if ((min(wavelength) < (self.low_lim*10**-4)) | (max(wavelength) > (self.high_lim*10**-4))): return ValueError('{0}: wavelength values beyond interpolation range'.format(self)) - + # Extract wave and A/AKs from law, turning wave into micron units wave = self.wave * (10**-4) law = self.obscuration @@ -1433,23 +1433,23 @@ def powerlaw(self, wavelength, AKs): return A_at_wave class RedLawBrokenPowerLaw(pysynphot.reddening.CustomRedLaw): - """ - Extinction object that is a broken power-law extinction law: + r""" + Extinction object that is a broken power-law extinction law: :math:`A_{\lambda} \propto \lambda^{\alpha[n]}` - for: + for: :math: `\lambda_{limits}[n] < \lambda <= \lambda_{limits}[n+1]` - Note: lambda_limits must be continuous in wavelength and K_wave must be - within one of the section defined by the lambda_limits array. + Note: lambda_limits must be continuous in wavelength and K_wave must be + within one of the section defined by the lambda_limits array. Extinction law is only defined over lambda_limits - + Units of lambda_limits array is microns. Parameters ---------- lambda_limits : numpy array - Array of length (N + 1) with lower and upper wavelength limits of + Array of length (N + 1) with lower and upper wavelength limits of the power-law segments. Units are microns. alpha_vals : numpy array @@ -1480,7 +1480,7 @@ def __init__(self, lambda_limits, alpha_vals, K_wave): # Convert wavelength to angstrom wave *= 10 ** 4 - pysynphot.reddening.CustomRedLaw.__init__(self, wave=wave, + pysynphot.reddening.CustomRedLaw.__init__(self, wave=wave, waveunits='angstrom', Avscaled=Alambda_scaled, name='Broken Power law') @@ -1502,8 +1502,8 @@ def _derive_broken_powerlaw(wave, lambda_limits, alpha_vals, K_wave): in microns alpha: float - -1.0 * (power law exponent) - + -1.0 * (power law exponent) + K_wave: float Desired K-band wavelength, in microns """ @@ -1529,14 +1529,14 @@ def _derive_broken_powerlaw(wave, lambda_limits, alpha_vals, K_wave): #print('wave_connect = {0}'.format(wave_connect)) #print('alph_num = {0}'.format(alpha_vals[jj])) #print('alpha_den = {0}'.format(alpha_vals[jj+1])) - + coeff *= val - + law[idx] = coeff * (wave[idx]**(-1.0 * alpha)) # Let's make sure we didn't miss updating any parts of the law assert np.sum(np.isnan(law)) == 0 - + # We'll identify K-band as 2.14 microns idx = np.where(abs(wave - K_wave) == min(abs(wave - K_wave))) A_AKs_at_wave = law / law[idx] @@ -1544,8 +1544,8 @@ def _derive_broken_powerlaw(wave, lambda_limits, alpha_vals, K_wave): return A_AKs_at_wave def broken_powerlaw(self, wavelength, AKs): - """ - Return the extinction at a given wavelength assuming the + """ + Return the extinction at a given wavelength assuming the extinction law and an overall `AKs` value. Parameters @@ -1565,7 +1565,7 @@ def broken_powerlaw(self, wavelength, AKs): # extinction law if ((min(wavelength) < (self.low_lim*10**-4)) | (max(wavelength) > (self.high_lim*10**-4))): return ValueError('{0}: wavelength values beyond interpolation range'.format(self)) - + # Extract wave and A/AKs from law, turning wave into micron units wave = self.wave * (10**-4) law = self.obscuration @@ -1584,7 +1584,7 @@ def broken_powerlaw(self, wavelength, AKs): class RedLawFritz11(pysynphot.reddening.CustomRedLaw): """ - Defines extinction law from `Fritz et al. 2011 + Defines extinction law from `Fritz et al. 2011 `_ for the Galactic Center. The law is defined from 1.0 -- 26 microns. @@ -1613,12 +1613,12 @@ def __init__(self, scale_lambda=2.166): ext_scale = ext / ext[idx] # Make custom reddening law - pysynphot.reddening.CustomRedLaw.__init__(self, wave=wave, + pysynphot.reddening.CustomRedLaw.__init__(self, wave=wave, waveunits='angstrom', Avscaled=ext_scale, name='Fritz11', litref='Fritz+2011') - + # Set the upper/lower wavelength limits of law (in angstroms) self.low_lim = min(wave) self.high_lim = max(wave) @@ -1628,13 +1628,13 @@ def __init__(self, scale_lambda=2.166): self.name = 'F11' return - + @staticmethod def _read_Fritz11(): """ - Return the interpolated extinction curve from Fritz+11, - as defined in their Table 8. - + Return the interpolated extinction curve from Fritz+11, + as defined in their Table 8. + Output: ------ wave: array @@ -1649,7 +1649,7 @@ def _read_Fritz11(): # Read in file with Table 8 info (published with Fritz+11 paper) inpath = os.path.dirname(os.path.abspath(__file__)) infile = os.path.join(inpath, 'el_files', 'fritz11_EL_table8.fits') - + t = Table.read(infile, format='fits') wave = t['lambda'] ext = t['A'] @@ -1661,7 +1661,7 @@ def _read_Fritz11(): def _read_Fritz11_obs(): """ Return the Fritz+11 observed values, from their Table 2 - + Output: ------- wave: array @@ -1703,7 +1703,7 @@ def plot_Fritz11(self): # extinction at 2.166 microns. Remember that this produces # throughput = 10^-0.4*Alambda ext_scaled = self.reddening(2.62) - + # Make plot py.figure(figsize=(10,10)) py.plot(wave, ext, 'r-', label='Interpolated EL') @@ -1713,7 +1713,7 @@ def plot_Fritz11(self): label='Measured') py.plot(ext_scaled.wave*10**-4, np.log10(ext_scaled.throughput)/-0.4, 'b-', label='Scaled EL') py.xlabel('Wavelength (microns)') - py.ylabel('Extinction (A$_{\lambda}$)') + py.ylabel(r'Extinction (A$_{\lambda}$)') py.title('Fritz+11 EL') py.gca().set_xscale('log') py.gca().set_yscale('log') @@ -1723,8 +1723,8 @@ def plot_Fritz11(self): return def Fritz11(self, wavelength, A_scale_lambda): - """ - Return the extinction at a given wavelength assuming the + """ + Return the extinction at a given wavelength assuming the extinction law and a total extinction at the scale_lambda (the wavelength where the extinction law = 1) @@ -1745,7 +1745,7 @@ def Fritz11(self, wavelength, A_scale_lambda): # extinction law if ((min(wavelength) < (self.low_lim*10**-4)) | (max(wavelength) > (self.high_lim*10**-4))): return ValueError('{0}: wavelength values beyond interpolation range'.format(self)) - + # Extract wave and A/A_scale_lambda from law, turning wave into micron units wave = self.wave * (10**-4) law = self.obscuration @@ -1768,18 +1768,18 @@ def Fritz11(self, wavelength, A_scale_lambda): #==============================================# #class RedLawHosek18(pysynphot.reddening.CustomRedLaw): # """ -# Defines extinction law from `Hosek et al. 2018 +# Defines extinction law from `Hosek et al. 2018 # `_ -# for the Arches Cluster and Wd1. The law is defined between +# for the Arches Cluster and Wd1. The law is defined between # 0.7 - 3.54 microns. # -# WARNING: DEPRECATED! This law has revised to RedLawHosek18b, which +# WARNING: DEPRECATED! This law has revised to RedLawHosek18b, which # should be used instead # """ # def __init__(self): # # Fetch the extinction curve, pre-interpolate across 3-8 microns # wave = np.arange(0.7, 3.545, 0.001) -# +# # # This will eventually be scaled by AKs when you # # call reddening(). Right now, calc for AKs=1 # Alambda_scaled = RedLawHosek18._derive_Hosek18(wave) @@ -1787,7 +1787,7 @@ def Fritz11(self, wavelength, A_scale_lambda): # # Convert wavelength to angstrom # wave *= 10 ** 4 # -# pysynphot.reddening.CustomRedLaw.__init__(self, wave=wave, +# pysynphot.reddening.CustomRedLaw.__init__(self, wave=wave, # waveunits='angstrom', # Avscaled=Alambda_scaled, # name='Hosek+18', @@ -1797,12 +1797,12 @@ def Fritz11(self, wavelength, A_scale_lambda): # self.low_lim = min(wave) # self.high_lim = max(wave) # self.name = 'H18' -# +# # @staticmethod # def _derive_Hosek18(wavelength): -# """ -# Derive the Hosek+18 extinction law, using the data from Table 4. -# +# """ +# Derive the Hosek+18 extinction law, using the data from Table 4. +# # Calculate the resulting extinction for an array of wavelengths. # The extinction is normalized with A_Ks. # @@ -1816,19 +1816,19 @@ def Fritz11(self, wavelength, A_scale_lambda): # # Extinction law definition # wave = np.array([0.8059, 0.962, 1.25, 1.53, 2.14, 3.545]) # A_AKs = np.array([9.66, 6.29, 3.56, 2.33, 1.0, 0.50]) -# +# # # # Following Hosek+18, Interpolate over the curve with cubic spline interpolation # spline_interp = interpolate.splrep(wave, A_AKs, k=3, s=0) # A_AKs_at_wave = interpolate.splev(wavelength, spline_interp) # # # This curve already assumes A_Ks = 1.0, so we can go straight to -# # output +# # output # return A_AKs_at_wave # # def Hosek18(self, wavelength, AKs): -# """ -# Return the extinction at a given wavelength assuming the +# """ +# Return the extinction at a given wavelength assuming the # extinction law and an overall `AKs` value. # # Parameters @@ -1848,7 +1848,7 @@ def Fritz11(self, wavelength, A_scale_lambda): # # extinction law # if ((min(wavelength) < (self.low_lim*10**-4)) | (max(wavelength) > (self.high_lim*10**-4))): # return ValueError('{0}: wavelength values beyond interpolation range'.format(self)) -# +# # # Extract wave and A/AKs from law, turning wave into micron units # wave = self.wave * (10**-4) # law = self.obscuration @@ -1868,7 +1868,7 @@ def Fritz11(self, wavelength, A_scale_lambda): class RedLawHosek18b(pysynphot.reddening.CustomRedLaw): """ - Defines extinction law from `Hosek et al. 2019 + Defines extinction law from `Hosek et al. 2019 `_ for the Arches cluster and Wd1. The law is derived between 0.7 - 3.54 microns @@ -1876,7 +1876,7 @@ class RedLawHosek18b(pysynphot.reddening.CustomRedLaw): def __init__(self): # Fetch the extinction curve, pre-interpolate across 3-8 microns wave = np.arange(0.7, 3.545, 0.001) - + # This will eventually be scaled by AKs when you # call reddening(). Right now, calc for AKs=1 Alambda_scaled = RedLawHosek18b._derive_Hosek18b(wave) @@ -1884,7 +1884,7 @@ def __init__(self): # Convert wavelength to angstrom wave *= 10 ** 4 - pysynphot.reddening.CustomRedLaw.__init__(self, wave=wave, + pysynphot.reddening.CustomRedLaw.__init__(self, wave=wave, waveunits='angstrom', Avscaled=Alambda_scaled, name='Hosek+18b', @@ -1894,12 +1894,12 @@ def __init__(self): self.low_lim = min(wave) self.high_lim = max(wave) self.name = 'H18b' - + @staticmethod def _derive_Hosek18b(wavelength): - """ - Derive the Hosek+18 extinction law, using the data from Table 4. - + """ + Derive the Hosek+18 extinction law, using the data from Table 4. + Calculate the resulting extinction for an array of wavelengths. The extinction is normalized with A_Ks. @@ -1913,18 +1913,18 @@ def _derive_Hosek18b(wavelength): # Extinction law definition wave = np.array([0.8059, 0.962, 1.25, 1.53, 2.14, 3.545]) A_AKs = np.array([7.943, 5.715, 3.142, 2.04, 1.0, 0.50]) - + # Following Hosek+18, Interpolate over the curve with cubic spline interpolation spline_interp = interpolate.splrep(wave, A_AKs, k=3, s=0) A_AKs_at_wave = interpolate.splev(wavelength, spline_interp) # This curve already assumes A_Ks = 1.0, so we can go straight to - # output + # output return A_AKs_at_wave def Hosek18b(self, wavelength, AKs): - """ - Return the extinction at a given wavelength assuming the + """ + Return the extinction at a given wavelength assuming the extinction law and an overall `AKs` value. Parameters @@ -1944,7 +1944,7 @@ def Hosek18b(self, wavelength, AKs): # extinction law if ((min(wavelength) < (self.low_lim*10**-4)) | (max(wavelength) > (self.high_lim*10**-4))): return ValueError('{0}: wavelength values beyond interpolation range'.format(self)) - + # Extract wave and A/AKs from law, turning wave into micron units wave = self.wave * (10**-4) law = self.obscuration @@ -1967,15 +1967,15 @@ class RedLawSchoedel10(RedLawBrokenPowerLaw): `_ for the Galactic Center. It is defined between 1.5 - 3.8 microns. - Power law indices: + Power law indices: * 1.677 - 2.168 microns ---> alpha = 2.21 +/- 0.24 * 2.168 - 3.636 microns ---> alpha = 1.34 +/- 0.29 - Wavelengths come from effective wavelengths of observations (some buffer + Wavelengths come from effective wavelengths of observations (some buffer is added to either side of these values). - - Reddening law is scaled such that A_lambda / A_Ks = 1 at + + Reddening law is scaled such that A_lambda / A_Ks = 1 at lambda = 2.168 microns. """ def __init__(self): @@ -1983,7 +1983,7 @@ def __init__(self): alpha_vals = [1.34, 2.21] K_wave = 2.168 RedLawBrokenPowerLaw.__init__(self, lambda_limits, alpha_vals, K_wave) - + # Set the upper/lower wavelength limits of law (in angstroms) self.low_lim = np.min(lambda_limits)*10**4 self.high_lim = np.max(lambda_limits)*10**4 @@ -1995,8 +1995,8 @@ def __init__(self): return def Schoedel10(self, wavelength, AKs): - """ - Return the extinction at a given wavelength assuming the + """ + Return the extinction at a given wavelength assuming the extinction law and a total extinction at scale_lambda (the wavelength where the extinction law = 1) @@ -2016,7 +2016,7 @@ def Schoedel10(self, wavelength, AKs): # Return error if any wavelength is beyond interpolation range of # extinction law if ((min(wavelength) < (self.low_lim*10**-4)) | (max(wavelength) > (self.high_lim*10**-4))): - return ValueError('{0}: wavelength values beyond interpolation range'.format(self)) + return ValueError('{0}: wavelength values beyond interpolation range'.format(self)) # Extract wave and A/AKs from law, turning wave into micron units wave = self.wave * (10**-4) @@ -2032,21 +2032,21 @@ def Schoedel10(self, wavelength, AKs): # Now multiply by AKs (since law assumes AKs = 1) A_at_wave = np.array(A_AKs_at_wave) * AKs - return A_at_wave + return A_at_wave + - class RedLawNoguerasLara18(RedLawPowerLaw): """ - Defines extinction law from `Nogueras-Lara et al. 2018 + Defines extinction law from `Nogueras-Lara et al. 2018 `_ for the Galactic Center. It is defined between 1.0 - 3.0 microns. - Measurements were made in JHK, with effective wavelengths + Measurements were made in JHK, with effective wavelengths of 1.2685, 1.6506, and 2.1629 microns, respectively. - This extinction law is a single power law with exponent + This extinction law is a single power law with exponent of alpha = 2.3 +/- 0.08. - Reddening law is scaled such that A_lambda / A_Ks = 1 at + Reddening law is scaled such that A_lambda / A_Ks = 1 at lambda = 2.163 microns (the observed K-band) """ def __init__(self): @@ -2054,7 +2054,7 @@ def __init__(self): wave_max = 3.0 K_wave = 2.163 RedLawPowerLaw.__init__(self, 2.30, K_wave, wave_min=wave_min, wave_max=wave_max) - + # Set the upper/lower wavelength limits of law (in angstroms) self.low_lim = wave_min*10**4 self.high_lim = wave_max*10**4 @@ -2063,8 +2063,8 @@ def __init__(self): self.name = 'NL18' def NoguerasLara18(self, wavelength, AKs): - """ - Return the extinction at a given wavelength assuming the + """ + Return the extinction at a given wavelength assuming the extinction law and an overall `AKs` value. Parameters @@ -2083,7 +2083,7 @@ def NoguerasLara18(self, wavelength, AKs): # Return error if any wavelength is beyond interpolation range of # extinction law if ((min(wavelength) < (self.low_lim*10**-4)) | (max(wavelength) > (self.high_lim*10**-4))): - return ValueError('{0}: wavelength values beyond interpolation range'.format(self)) + return ValueError('{0}: wavelength values beyond interpolation range'.format(self)) # Extract wave and A/AKs from law, turning wave into micron units wave = self.wave * (10**-4) @@ -2106,18 +2106,18 @@ class RedLawNoguerasLara20(RedLawBrokenPowerLaw): Defines extinction law from `Nogueras-Lara et al. 2020 `_ for the Galactic Center. It is defined between 1.0 -- 3 microns. - Measurements were made in JHK, with effective wavelengths + Measurements were made in JHK, with effective wavelengths of 1.2685, 1.6506, and 2.1629 microns, respectively - Measured power law indices: - + Measured power law indices: + * 1.2685 - 1.6505 microns ---> alpha = 2.44 +/- 0.05 * 1.6505 - 2.1629 microns ---> alpha = 2.23 +/- 0.05 - Wavelengths come from effective wavelengths of observations (some buffer + Wavelengths come from effective wavelengths of observations (some buffer is added to either side of these values). - - Reddening law is scaled such that A_lambda / A_Ks = 1 at + + Reddening law is scaled such that A_lambda / A_Ks = 1 at lambda = 2.163 microns (the observed K-band) """ def __init__(self): @@ -2125,7 +2125,7 @@ def __init__(self): alpha_vals = [2.44, 2.23] K_wave = 2.163 RedLawBrokenPowerLaw.__init__(self, lambda_limits, alpha_vals, K_wave) - + # Set the upper/lower wavelength limits of law (in angstroms) self.low_lim = np.min(lambda_limits)*10**4 self.high_lim = np.max(lambda_limits)*10**4 @@ -2137,8 +2137,8 @@ def __init__(self): return def NoguerasLara20(self, wavelength, AKs): - """ - Return the extinction at a given wavelength assuming the + """ + Return the extinction at a given wavelength assuming the extinction law and a total extinction at scale_lambda (the wavelength where the extinction law = 1) @@ -2158,7 +2158,7 @@ def NoguerasLara20(self, wavelength, AKs): # Return error if any wavelength is beyond interpolation range of # extinction law if ((min(wavelength) < (self.low_lim*10**-4)) | (max(wavelength) > (self.high_lim*10**-4))): - return ValueError('{0}: wavelength values beyond interpolation range'.format(self)) + return ValueError('{0}: wavelength values beyond interpolation range'.format(self)) # Extract wave and A/AKs from law, turning wave into micron units wave = self.wave * (10**-4) @@ -2174,7 +2174,7 @@ def NoguerasLara20(self, wavelength, AKs): # Now multiply by AKs (since law assumes AKs = 1) A_at_wave = np.array(A_AKs_at_wave) * AKs - return A_at_wave + return A_at_wave #---------------------------# # Cubic spline function from Schalfly+16 appendix diff --git a/spisea/synthetic.py b/spisea/synthetic.py index d8f17d87..7cbca17c 100755 --- a/spisea/synthetic.py +++ b/spisea/synthetic.py @@ -1,33 +1,21 @@ +import os +import time +import math +import scipy +import inspect +import warnings import numpy as np import pylab as plt -from spisea import reddening -from spisea import evolution +import matplotlib.pyplot as plt +from spisea import reddening, evolution, filters from spisea import atmospheres as atm -from spisea import filters -from spisea.imf import imf, multiplicity -from scipy import interpolate -from scipy import stats -from scipy.special import erf +from scipy.spatial import cKDTree as KDTree +from spisea.imf import multiplicity from pysynphot import spectrum from pysynphot import ObsBandpass from pysynphot import observation as obs -import pysynphot from astropy import constants, units -from astropy.table import Table, Column, MaskedColumn -import pickle -import time, datetime -import math -import os, glob -import tempfile -import scipy -import matplotlib -import matplotlib.pyplot as plt -import time -import warnings -import pdb -from scipy.spatial import cKDTree as KDTree -import inspect -import astropy.modeling +from astropy.table import Table, Column default_evo_model = evolution.MISTv1() default_red_law = reddening.RedLawNishiyama09() @@ -37,7 +25,7 @@ def Vega(): # Use Vega as our zeropoint... assume V=0.03 mag and all colors = 0.0 # These parameters are defined in Girardi+02 - vega = atm.get_kurucz_atmosphere(temperature=9550, + vega = atm.get_kurucz_atmosphere(temperature=9550, gravity=3.95, metallicity=-0.5) @@ -47,22 +35,39 @@ def Vega(): # This is (R/d)**2 as reported by Girardi et al. 2002, page 198, col 1. # and is used to convert to flux observed at Earth. - vega *= 6.247e-17 - + vega *= 6.247e-17 + return vega vega = Vega() +class Interpolator(object): + def __init__(self, xp, yp): + """Wrapper for np.interp to allow for pickling in multiprocessing. + + Parameters + ---------- + xp: array-like + x data points for interpolation + yp: array-like + y data points for interpolation + """ + self.xp = xp + self.yp = yp + + def __call__(self, x): + return np.interp(x, self.xp, self.yp, left=np.nan, right=np.nan) + class Cluster(object): """ Base class to create a cluster with user-specified isochrone, - imf, ifmr, and total mass. + imf, ifmr, and total mass. Parameters ----------- iso: isochrone object SPISEA isochrone object - + imf: imf object SPISEA IMF object @@ -75,9 +80,9 @@ class Cluster(object): no compact remnants are produced. seed: int - If set to non-None, all random sampling will be seeded with the - specified seed, forcing identical output. - Default None + Seed for the random number generator numpy.random.default_rng(seed). + All random functions in the class will use this generator, + unless a different generator is passed in as an argument to the function, by default None. vebose: boolean True for verbose output. @@ -90,17 +95,18 @@ def __init__(self, iso, imf, cluster_mass, ifmr=None, verbose=False, self.ifmr = ifmr self.cluster_mass = cluster_mass self.seed = seed - + self.rng = np.random.default_rng(self.seed) + return - + class ResolvedCluster(Cluster): """ Cluster sub-class that produces a *resolved* stellar cluster. - A table is output with the synthetic photometry and intrinsic - properties of the individual stars (or stellar systems, if + A table is output with the synthetic photometry and intrinsic + properties of the individual stars (or stellar systems, if mutliplicity is used in the IMF object). - If multiplicity is used, than a second table is produced that + If multiplicity is used, than a second table is produced that contains the properties of the companion stars independent of their primary stars. @@ -108,31 +114,63 @@ class ResolvedCluster(Cluster): ----------- iso: isochrone object SPISEA isochrone object - + imf: imf object SPISEA IMF object cluster_mass: float Total initial mass of the cluster, in M_sun - ifmr: ifmr object or None + ifmr: ifmr object, optional If ifmr object is defined, will create compact remnants produced by the cluster at the given isochrone age. Otherwise, no compact remnants are produced. + By default None. - keep_low_mass_stars: boolean (default False) - If True, the cluster will not cut out stars below the isochrone grid - on initial mass. They are assigned a current mass equal to their initial + keep_low_mass_stars: boolean, optional + If True, the cluster will not cut out stars below the isochrone grid + on initial mass. They are assigned a current mass equal to their initial mass, a phase of 98, and no other evolutionary properties or photometry. If False, stars below the isochrone initial mass limit are cut out. + By default False. - seed: int - If set to non-None, all random sampling will be seeded with the - specified seed, forcing identical output. - Default None + seed: int, optional + Seed for the random number generator numpy.random.default_rng(seed). + All random functions in the class will use this generator, + unless a different generator is passed in as an argument to the function, by default None. - vebose: boolean + vebose: boolean, optional True for verbose output. + + Attributes + ---------- + star_systems: astropy.table.Table + Table containing the properties of the primary stars (or stellar systems, if multiplicity is used). The columns include: + mass: primary mass + isMultiple: boolean for whether the star is in a multiple system + systemMass: total initial mass of the stellar system (primary + companions) + Teff: effective temperature of the star + L: luminosity of the star in L_sun + logg: surface gravity of the star in cgs + isWR: boolean for whether the star is a Wolf-Rayet star + mass_current: current mass of the star + phase: evolutionary phase of the star, as defined by the isochrone model + metallicity: metallicity of the star + filter columns: magnitude of the star in each filter defined by the isochrone model + + companions: astropy.table.Table (only if multiplicity is used in the IMF object) + Table containing the properties of the companion stars. The columns include: + system_idx: index of the stellar system this companion belongs to, which can be used to match to the star_systems table + mass: initial mass of the companion star + Teff: effective temperature of the companion star + L: luminosity of the companion star in L_sun + logg: surface gravity of the companion star in cgs + isWR: boolean for whether the companion star is a Wolf-Rayet star + mass_current: current mass of the companion star + phase: evolutionary phase of the companion star, as defined by the isochrone model + metallicity: metallicity of the companion star + filter columns: magnitude of the companion star in each filter defined by the isochrone model + If multiplicity properties are defined in the IMF object, additional columns for those properties (e.g., log_a, e, i, Omega, omega) are included. """ def __init__(self, iso, imf, cluster_mass, ifmr=None, verbose=True, seed=None, keep_low_mass_stars=False): @@ -141,13 +179,15 @@ def __init__(self, iso, imf, cluster_mass, ifmr=None, verbose=True, # Provide a user warning is random seed is set if seed is not None and verbose: print('WARNING: random seed set to %i' % seed) + imf.rng = self.rng - t1 = time.time() - ##### + ##### # Sample the IMF to build up our cluster mass. ##### - mass, isMulti, compMass, sysMass = imf.generate_cluster(cluster_mass, - seed=seed) + # start0 = time.time() + mass, isMulti, compMass, sysMass = imf.generate_cluster(cluster_mass) + # end0 = time.time() + # print('IMF sampling took {0:f} s.'.format(end0 - start0)) # Figure out the filters we will make. self.filt_names = self.set_filter_names() @@ -159,31 +199,46 @@ def __init__(self, iso, imf, cluster_mass, ifmr=None, verbose=True, interp_keys = ['Teff', 'L', 'logg', 'isWR', 'mass_current', 'phase'] + self.filt_names self.iso_interps = {} for ikey in interp_keys: - self.iso_interps[ikey] = interpolate.interp1d(self.iso.points['mass'], self.iso.points[ikey], - kind='linear', bounds_error=False, fill_value=np.nan) - - ##### + # self.iso_interps[ikey] = interpolate.interp1d(self.iso.points['mass'], self.iso.points[ikey], + # kind='linear', bounds_error=False, fill_value=np.nan) + self.iso_interps[ikey] = Interpolator(self.iso.points['mass'], self.iso.points[ikey]) + + ##### # Make a table to contain all the information about each stellar system. ##### + # start1 = time.time() star_systems = self._make_star_systems_table(mass, isMulti, sysMass) - + # end1 = time.time() + # print('Star systems table took {0:f} s.'.format(end1 - start1)) + # Trim out bad systems; specifically, stars with masses outside those provided # by the model isochrone (except for compact objects). star_systems, compMass = self._remove_bad_systems(star_systems, compMass, keep_low_mass_stars) - ##### + ##### # Make a table to contain all the information about companions. ##### if self.imf.make_multiples: - companions = self._make_companions_table(star_systems, compMass) - + # start3 = time.time() + star_systems, companions = self._make_companions_table_new(star_systems, compMass) + # end3 = time.time() + # print('Companion table new took {0:f} s.'.format(end3 - start3)) + self.companions = companions + + # compMass = [ + # [value for value, mask in zip(row, row_mask) if not mask] + # for row, row_mask in zip(compMass.data, compMass.mask) + # ] + # start3 = time.time() + # star_systems, companions = self._make_companions_table(star_systems, compMass) + # end3 = time.time() + # print('Companion table took {0:f} s.'.format(end3-start3)) + # self.companions = companions + ##### # Save our arrays to the object ##### self.star_systems = star_systems - - if self.imf.make_multiples: - self.companions = companions return @@ -192,13 +247,13 @@ def set_filter_names(self): Set filter column names """ filt_names = [] - + for col_name in self.iso.points.colnames: if 'm_' in col_name: filt_names.append(col_name) return filt_names - + def _make_star_systems_table(self, mass, isMulti, sysMass): """ Make a star_systems table and get synthetic photometry for each primary star. @@ -207,28 +262,20 @@ def _make_star_systems_table(self, mass, isMulti, sysMass): names=['mass', 'isMultiple', 'systemMass']) N_systems = len(star_systems) - # Add columns for the Teff, L, logg, isWR, mass_current, phase for the primary stars. - star_systems.add_column( Column(np.zeros(N_systems, dtype=float), name='Teff') ) - star_systems.add_column( Column(np.empty(N_systems, dtype=float), name='L') ) - star_systems.add_column( Column(np.empty(N_systems, dtype=float), name='logg') ) - star_systems.add_column( Column(np.empty(N_systems, dtype=float), name='isWR') ) - star_systems.add_column( Column(np.empty(N_systems, dtype=float), name='mass_current') ) - star_systems.add_column( Column(np.empty(N_systems, dtype=float), name='phase') ) - star_systems.add_column( Column(np.empty(N_systems, dtype=float), name='metallicity') ) + # Use our pre-built interpolators to fetch values from the isochrone for each star. + for key in ['Teff', 'L', 'logg', 'mass_current']: + star_systems.add_column(Column(self.iso_interps[key](star_systems['mass']), name=key)) + + # Treat out-of-range mass as isWR=True + star_systems.add_column(Column(~(self.iso_interps['isWR'](star_systems['mass']) < 0.5), name='isWR')) + star_systems.add_column(Column(np.round(self.iso_interps['phase'](star_systems['mass'])), name='phase')) + + star_systems['metallicity'] = np.ones(N_systems) * self.iso.metallicity # Add the filter columns to the table. They are empty so far. # Keep track of the filter names in : filt_names for filt in self.filt_names: - star_systems.add_column( Column(np.empty(N_systems, dtype=float), name=filt) ) - - # Use our pre-built interpolators to fetch values from the isochrone for each star. - star_systems['Teff'] = self.iso_interps['Teff'](star_systems['mass']) - star_systems['L'] = self.iso_interps['L'](star_systems['mass']) - star_systems['logg'] = self.iso_interps['logg'](star_systems['mass']) - star_systems['isWR'] = np.round(self.iso_interps['isWR'](star_systems['mass'])) - star_systems['mass_current'] = self.iso_interps['mass_current'](star_systems['mass']) - star_systems['phase'] = np.round(self.iso_interps['phase'](star_systems['mass'])) - star_systems['metallicity'] = np.ones(N_systems)*self.iso.metallicity + star_systems.add_column(Column(self.iso_interps[filt](star_systems['mass']), name=filt)) # For a very small fraction of stars, the star phase falls on integers in-between # the ones we have definition for, as a result of the interpolation. For these @@ -238,29 +285,22 @@ def _make_star_systems_table(self, mass, isMulti, sysMass): # effect is so small # Convert nan_to_num to avoid errors on greater than, less than comparisons star_systems_phase_non_nan = np.nan_to_num(star_systems['phase'], nan=-99) - bad = np.where( (star_systems_phase_non_nan > 5) & (star_systems_phase_non_nan < 101) & (star_systems_phase_non_nan != 9) & (star_systems_phase_non_nan != -99)) - # Print warning, if desired - verbose=False - if verbose: - for ii in range(len(bad[0])): - print('WARNING: changing phase {0} to 5'.format(star_systems['phase'][bad[0][ii]])) + bad = np.where( (star_systems_phase_non_nan > 5) & (star_systems_phase_non_nan < 101) & + (star_systems_phase_non_nan != 9) & (star_systems_phase_non_nan != -99)) star_systems['phase'][bad] = 5 - - for filt in self.filt_names: - star_systems[filt] = self.iso_interps[filt](star_systems['mass']) ##### # Make Remnants # Note: Some models already have WDs in them. If they do, then they shouldn't # be handled by this code here (because their Teff > 0). - # + # # Remnants have flux = 0 in all bands if they are generated here. - ##### + ##### if self.ifmr != None: # Identify compact objects as those with Teff = 0 or with phase > 100. highest_mass_iso = self.iso.points['mass'].max() idx_rem = np.where((np.isnan(star_systems['Teff'])) & (star_systems['mass'] > highest_mass_iso))[0] - + # Calculate remnant mass and ID for compact objects; update remnant_id and # remnant_mass arrays accordingly if 'metallicity_array' in inspect.getfullargspec(self.ifmr.generate_death_mass).args: @@ -271,7 +311,7 @@ def _make_star_systems_table(self, mass, isMulti, sysMass): # Drop remnants where it is not relevant (e.g. not a compact object or # outside mass range IFMR is defined for) - good = np.where(r_id_tmp > 0) + good = r_id_tmp > 0 idx_rem_good = idx_rem[good] star_systems['mass_current'][idx_rem_good] = r_mass_tmp[good] @@ -282,13 +322,111 @@ def _make_star_systems_table(self, mass, isMulti, sysMass): star_systems[filt][idx_rem_good] = np.full(len(idx_rem_good), np.nan) return star_systems - + + + def _make_companions_table_new(self, star_systems, compMass): + """Make companions table for resolved clusters with multiplicity. + + Parameters + ---------- + star_systems : astropy.table.Table + Table containing the properties of the primary stars. + compMass : numpy.ma.MaskedArray + Masked array containing the masses of the companions. + + Returns + ------- + companions : astropy.table.Table + """ + N_systems = len(star_systems) + N_companions = np.sum(~compMass.mask, axis=1) + N_comp_tot = np.sum(N_companions) + star_systems.add_column(Column(N_companions, name='N_companions')) + system_index = np.repeat(np.arange(N_systems), N_companions) + companions = Table([system_index], names=['system_idx']) + companions.add_column(np.zeros(N_comp_tot, dtype=float), name='mass') + + if isinstance(self.imf._multi_props, multiplicity.MultiplicityResolvedDK): + companions.add_column(Column(self.imf._multi_props.log_semimajoraxis(star_systems['mass'][companions['system_idx']]), name='log_a')) + companions.add_column(Column(self.imf._multi_props.random_e(self.rng.random(N_comp_tot)), name='e')) + companions['i'], companions['Omega'], companions['omega'] = self.imf._multi_props.random_keplarian_parameters( + self.rng.random(N_comp_tot), + self.rng.random(N_comp_tot), + self.rng.random(N_comp_tot) + ) + + companions['mass'] = compMass.compressed() + for key in ['Teff', 'L', 'logg', 'mass_current']: + companions[key] = self.iso_interps[key](companions['mass']) + + for key in ['isWR', 'phase']: + companions[key] = np.round(self.iso_interps[key](companions['mass'])) + + companions['metallicity'] = np.ones(N_comp_tot) * self.iso.metallicity + + # For a very small fraction of stars, the star phase falls on integers in-between + # the ones we have definition for, as a result of the interpolation. For these + # stars, round phase down to nearest defined phase (e.g., if phase is 71, + # then round it down to 5, rather than up to 101). + # Convert nan_to_num to avoid errors on greater than, less than comparisons + companions_phase_non_nan = np.nan_to_num(companions['phase'], nan=-99) + companions['phase'][ + (companions_phase_non_nan > 5) & + (companions_phase_non_nan < 101) & + (companions_phase_non_nan != 9) & + (companions_phase_non_nan != -99) + ] = 5 + + # Update primary fluxes to include the flux of companions. + for filt in self.filt_names: + companions[filt] = self.iso_interps[filt](companions['mass']) + primary_flux = 10**(-star_systems[filt] / 2.5) + # Sum the flux of all companions in each system + companions_flux = np.bincount(companions['system_idx'], weights=10**(-companions[filt] / 2.5), minlength=N_systems) + combined_flux = np.nansum(np.vstack((primary_flux, companions_flux)), axis=0) + combined_flux[combined_flux == 0] = np.nan + star_systems[filt] = -2.5 * np.log10(combined_flux) + + ##### + # Make Remnants with flux = 0 in all bands. + ##### + if self.ifmr: + # Identify compact objects as those with Teff = 0 or with masses above the max iso mass + highest_mass_iso = self.iso.points['mass'].max() + remnant_idx = np.where(np.isnan(companions['Teff']) & (companions['mass'] > highest_mass_iso))[0] + self.remnant_idx_new = remnant_idx + # Calculate remnant mass and ID for compact objects; update remnant_id and remnant_mass arrays accordingly + if 'metallicity_array' in inspect.getfullargspec(self.ifmr.generate_death_mass).args: + remnant_mass, remnant_code = self.ifmr.generate_death_mass(mass_array=companions['mass'][remnant_idx], metallicity_array=companions['metallicity'][remnant_idx]) + else: + remnant_mass, remnant_code = self.ifmr.generate_death_mass(mass_array=companions['mass'][remnant_idx]) + + # Drop remnants where it is not relevant (e.g. not a compact object or outside mass range IFMR is defined for) + remnant_valid = remnant_code > 0 + remnant_valid_idx = remnant_idx[remnant_valid] + self.remnant_mass_new = remnant_mass + self.remnant_valid_idx_new = remnant_valid_idx + companions['mass_current'][remnant_valid_idx] = remnant_mass[remnant_valid] + companions['phase'][remnant_valid_idx] = remnant_code[remnant_valid] + # Give remnants a magnitude of nan, so they can be filtered out later when calculating flux. + for filt in self.filt_names: + companions[filt][remnant_valid_idx] = np.full(len(remnant_idx[remnant_valid]), np.nan) + + companions_teff_non_nan = np.nan_to_num(companions['Teff'], nan=-99) + if self.verbose and sum(companions_teff_non_nan > 0) != N_comp_tot: + print(f'Found {N_comp_tot - sum(companions_teff_non_nan > 0):d} companions out of stellar mass range') + + assert companions['mass'][companions_teff_non_nan > 0].min() > 0, "Companion mass is not positive" + + return star_systems, companions + + def _make_companions_table(self, star_systems, compMass): N_systems = len(star_systems) - + ##### - # MULTIPLICITY + # MULTIPLICITY # Make a second table containing all the companion-star masses. # This table will be much longer... here are the arrays: # sysIndex - the index of the system this star belongs too @@ -312,25 +450,29 @@ def _make_companions_table(self, star_systems, compMass): companions.add_column( Column(np.empty(N_comp_tot, dtype=float), name='metallicity') ) for filt in self.filt_names: companions.add_column( Column(np.empty(N_comp_tot, dtype=float), name=filt) ) - + if isinstance(self.imf._multi_props, multiplicity.MultiplicityResolvedDK): companions.add_column( Column(np.zeros(N_comp_tot, dtype=float), name='log_a') ) companions.add_column( Column(np.zeros(N_comp_tot, dtype=float), name='e') ) companions.add_column( Column(np.zeros(N_comp_tot, dtype=float), name='i', description = 'degrees') ) companions.add_column( Column(np.zeros(N_comp_tot, dtype=float), name='Omega') ) companions.add_column( Column(np.zeros(N_comp_tot, dtype=float), name='omega') ) - + for ii in range(len(companions)): companions['log_a'][ii] = self.imf._multi_props.log_semimajoraxis(star_systems['mass'][companions['system_idx'][ii]]) - - companions['e'] = self.imf._multi_props.random_e(np.random.rand(N_comp_tot)) - companions['i'], companions['Omega'], companions['omega'] = self.imf._multi_props.random_keplarian_parameters(np.random.rand(N_comp_tot),np.random.rand(N_comp_tot),np.random.rand(N_comp_tot)) + + companions['e'] = self.imf._multi_props.random_e(self.rng.random(N_comp_tot)) + companions['i'], companions['Omega'], companions['omega'] = self.imf._multi_props.random_keplarian_parameters( + self.rng.random(N_comp_tot), + self.rng.random(N_comp_tot), + self.rng.random(N_comp_tot) + ) # Make an array that maps system index (ii), companion index (cc) to # the place in the 1D companions array. N_comp_max = N_companions.max() - + comp_index = np.zeros((N_systems, N_comp_max), dtype=int) kk = 0 for ii in range(N_systems): @@ -346,9 +488,10 @@ def _make_companions_table(self, star_systems, compMass): idx = np.where(N_companions >= cc)[0] # Get the location in the companions array for each system and - # the cc'th companion. + # the cc'th companion. cdx = comp_index[idx, cc-1] - + + # companions['mass'][cdx] = compMass[idx, cc-1] companions['mass'][cdx] = [compMass[ii][cc-1] for ii in idx] comp_mass = companions['mass'][cdx] @@ -370,12 +513,12 @@ def _make_companions_table(self, star_systems, compMass): bad = np.where( (companions_phase_non_nan > 5) & (companions_phase_non_nan < 101) & (companions_phase_non_nan != 9) & - (companions_phase_non_nan != -99)) + (companions_phase_non_nan != -99))[0] # Print warning, if desired verbose=False if verbose: - for ii in range(len(bad[0])): - print('WARNING: changing phase {0} to 5'.format(companions['phase'][bad[0][ii]])) + for ii in range(len(bad)): + print('WARNING: changing phase {0} to 5'.format(companions['phase'][bad[ii]])) companions['phase'][bad] = 5 for filt in self.filt_names: @@ -395,15 +538,15 @@ def _make_companions_table(self, star_systems, compMass): # If *both* objects are dark, then keep the magnitude # as np.nan. Otherwise, add fluxes together - good = np.where( (f1 != 0) | (f2 != 0) ) - bad = np.where( (f1 == 0) & (f2 == 0) ) - + good = np.where( (f1 != 0) | (f2 != 0) )[0] + bad = np.where( (f1 == 0) & (f2 == 0) )[0] + star_systems[filt][idx[good]] = -2.5 * np.log10(f1[good] + f2[good]) star_systems[filt][idx[bad]] = np.nan ##### # Make Remnants with flux = 0 in all bands. - ##### + ##### if self.ifmr != None: # Identify compact objects as those with Teff = 0 or with masses above the max iso mass highest_mass_iso = self.iso.points['mass'].max() @@ -417,16 +560,16 @@ def _make_companions_table(self, star_systems, compMass): metallicity_array=companions['metallicity'][cdx_rem]) else: r_mass_tmp, r_id_tmp = self.ifmr.generate_death_mass(mass_array=companions['mass'][cdx_rem]) - + # Drop remnants where it is not relevant (e.g. not a compact object or # outside mass range IFMR is defined for) - good = np.where(r_id_tmp > 0) + good = np.where(r_id_tmp > 0)[0] cdx_rem_good = cdx_rem[good] companions['mass_current'][cdx_rem_good] = r_mass_tmp[good] companions['phase'][cdx_rem_good] = r_id_tmp[good] - + # Give remnants a magnitude of nan, so they can be filtered out later when calculating flux. for filt in self.filt_names: companions[filt][cdx_rem_good] = np.full(len(cdx_rem_good), np.nan) @@ -445,19 +588,19 @@ def _make_companions_table(self, star_systems, compMass): companions['phase'][low_mass_idxs] = 98 # Double check that everything behaved properly. - if len(idx)>0: - assert companions['mass'][idx].min() > 0 + if len(idx) > 0: + assert companions['mass'][companions_teff_non_nan > 0].min() > 0, "Companion mass is not positive" + + return star_systems, companions - return companions - def _remove_bad_systems(self, star_systems, compMass, keep_low_mass_stars): """ Helper function to remove stars with masses outside the isochrone - mass range from the cluster. These stars are identified by having + mass range from the cluster. These stars are identified by having a Teff = 0, as set up by _make_star_systems_table_interp. - If self.ifmr == None, then both high and low-mass bad systems are - removed. If self.ifmr != None, then we will save the high mass systems + If self.ifmr == None, then both high and low-mass bad systems are + removed. If self.ifmr != None, then we will save the high mass systems since they will be plugged into an ifmr later. """ N_systems = len(star_systems) @@ -469,26 +612,35 @@ def _remove_bad_systems(self, star_systems, compMass, keep_low_mass_stars): if (self.ifmr == None) and (not keep_low_mass_stars): print('Remove low mass stars below grid and compact objects') # Keep only those stars with Teff assigned. - idx = np.where(star_systems_teff_non_nan > 0)[0] + idx = star_systems_teff_non_nan > 0 elif not keep_low_mass_stars: print('Remove low mass stars, keep compact objects') - # Keep stars (with Teff) and any other compact objects (with phase info). - idx = np.where( (star_systems_teff_non_nan > 0) | (star_systems_phase_non_nan >= 0) )[0] + # Keep stars (with Teff) and any other compact objects (with phase info). + idx = (star_systems_teff_non_nan > 0) | (star_systems_phase_non_nan >= 0) elif self.ifmr == None: print('Remove compact objects, keep low mass stars below grid') # Keep stars (with Teff) and objects below mass grid - idx = np.where( (star_systems_teff_non_nan > 0) | ((star_systems['mass'] 0) | (star_systems['mass'] < np.min(self.iso.points['mass'])) else: print('Keep low mass stars below grid and compact objects') # Keep all - idx = np.where( (star_systems_teff_non_nan > 0) | (star_systems_phase_non_nan >= 0) | - ((star_systems['mass'] 0) | \ + (star_systems_phase_non_nan >= 0) | \ + (star_systems['mass'] < np.min(self.iso.points['mass'])) - if len(idx) != N_systems and self.verbose: - print( 'Found {0:d} stars out of mass range'.format(N_systems - len(idx))) + n_out_of_range = N_systems - sum(idx) + if self.verbose and n_out_of_range > 0: + print( 'Found {0:d} stars out of mass range'.format(n_out_of_range)) if keep_low_mass_stars: - lm_idx = np.where(star_systems['mass'] 0) evol = evol[idx] @@ -1313,8 +1502,8 @@ def __init__(self, logAge, distance, evo_model=default_evo_model, evol = evol[idx] if max_mass != None: idx = np.where(evol['mass'] <= max_mass) - evol = evol[idx] - + evol = evol[idx] + # Trim down the table by selecting every Nth point where # N = mass sampling factor. evol = evol[::mass_sampling] @@ -1349,18 +1538,18 @@ def __init__(self, logAge, distance, evo_model=default_evo_model, # Get the atmosphere model now. Wavelength is in Angstroms # This is the time-intensive call... everything else is negligable. star = atm_func(temperature=T, gravity=gravity) - + # Trim wavelength range down to JHKL range (0.5 - 5.2 microns) star = spectrum.trimSpectrum(star, wave_range[0], wave_range[1]) # Convert into flux observed at Earth (unreddened) star *= (R / distance)**2 # in erg s^-1 cm^-2 A^-1 - - # Save the final spectrum to our spec_list for later use. + + # Save the final spectrum to our spec_list for later use. self.spec_list.append(star) # Append all the meta data to the summary table. - + tab.meta['ATMFUNC'] = atm_func.__name__ tab.meta['EVOMODEL'] = type(evo_model).__name__ tab.meta['EVOMODELVERSION'] = evo_model.model_version_name @@ -1370,10 +1559,10 @@ def __init__(self, logAge, distance, evo_model=default_evo_model, tab.meta['WAVEMAX'] = wave_range[1] self.points = tab - + t2 = time.time() print('Isochrone generation took {0:f} s.'.format(t2-t1)) - + return def apply_reddening(self, AKs, extinction_law, dAKs=0, dist='uniform', dAKs_max=None): @@ -1385,7 +1574,7 @@ def apply_reddening(self, AKs, extinction_law, dAKs=0, dist='uniform', dAKs_max= ---------- AKs: float Total extinction in AKs - + extinction_law: SPISEA extinction object Extinction law to be used on the spectra @@ -1395,13 +1584,13 @@ def apply_reddening(self, AKs, extinction_law, dAKs=0, dist='uniform', dAKs_max= dAKs_max: float or None If not none, defines the maximum |dAKs| a star can - have in gaussian distribution case + have in gaussian distribution case dist: string, 'uniform' or 'gaussian' Distribution to draw differential reddening from. If uniform, dAKs will cut off at Aks +/- dAKs. Otherwise, will draw from Gaussian of width AKs +/- dAks - + """ self.AKs = np.ones(len(self.spec_list)) # Apply reddening to each object in the spec list @@ -1412,25 +1601,25 @@ def apply_reddening(self, AKs, extinction_law, dAKs=0, dist='uniform', dAKs_max= # extinction law if dAKs != 0: if dist == 'gaussian': - AKs_act = np.random.normal(loc=AKs, scale=dAKs) + AKs_act = self.rng.normal(loc=AKs, scale=dAKs) # Apply dAKs_max if desired. Redo if diff > dAKs_max if dAKs_max != None: diff = abs(AKs_act - AKs) while diff > dAKs_max: print('While loop active') - AKs_act = np.random.normal(loc=AKs, scale=dAKs) + AKs_act = self.rng.normal(loc=AKs, scale=dAKs) diff = abs(AKs_act - AKs) elif dist == 'uniform': low = AKs - dAKs high = AKs + dAKs - AKs_act = np.random.uniform(low=low, high=high) + AKs_act = self.rng.uniform(low=low, high=high) else: print('dist {0} undefined'.format(dist)) return else: AKs_act = AKs - red = extinction_law.reddening(AKs_act).resample(star.wave) + red = extinction_law.reddening(AKs_act).resample(star.wave) star *= red # Update the spectrum in spec list @@ -1443,7 +1632,7 @@ def apply_reddening(self, AKs, extinction_law, dAKs=0, dist='uniform', dAKs_max= return def make_photometry(self, filters, rebin=True): - """ + """ Make synthetic photometry for the specified filters. This function udpates the self.points table to include new columns with the photometry. @@ -1453,11 +1642,11 @@ def make_photometry(self, filters, rebin=True): filters : dictionary A dictionary containing the filter name (for the output columns) and the filter specification string that can be processed by pysynphot. - + rebin: boolean - True to rebin filter function (only used if non-zero transmission points are + True to rebin filter function (only used if non-zero transmission points are larger than 1500 points) - + """ npoints = len(self.points) @@ -1474,35 +1663,78 @@ def make_photometry(self, filters, rebin=True): col_name = 'mag_' + filt_name mag_col = Column(np.zeros(npoints, dtype=float), name=col_name) self.points.add_column(mag_col) - + # Loop through each star in the isochrone and do the filter integration for ss in range(npoints): star = self.spec_list[ss] # These are already extincted, observed spectra. star_mag = mag_in_filter(star, filt) - + self.points[col_name][ss] = star_mag - - + + endTime = time.time() print( ' Time taken: {0:.2f} seconds'.format(endTime - ts)) return +def check_save_file(save_file_path, evo_model, atm_func, red_law, verbose=False): + """ + Check to see if save_file exists, as saved by the save_file + and save_file_legacy objects. If the filename exists, check the + meta-data as well. + + returns a boolean: True is file exists, false otherwise + """ + out_bool = False + + if not os.path.exists(save_file_path): + if verbose: print(f'Isochrone file {save_file_path} does not exist.') + return out_bool + + tmp = Table.read(save_file_path) + + # See if the meta-data matches: evo model, atm_func, redlaw + if ( (tmp.meta['EVOMODEL'] == type(evo_model).__name__) & + (tmp.meta['ATMFUNC'] == atm_func.__name__) & + (tmp.meta['REDLAW'] == red_law.name) ): + out_bool = True + else: + # If out_bool is false, print out what doesn't match + if verbose: + print(f'Isochrone file {save_file_path} exists, but meta-data does not match.') + if tmp.meta['EVOMODEL'] != type(evo_model).__name__: + print(f' EVOMODEL: {tmp.meta["EVOMODEL"]} != {type(evo_model).__name__}') + if tmp.meta['ATMFUNC'] != atm_func.__name__: + print(f' ATMFUNC: {tmp.meta["ATMFUNC"]} != {atm_func.__name__}') + if tmp.meta['REDLAW'] != red_law.name: + print(f' REDLAW: {tmp.meta["REDLAW"]} != {red_law.name}') + + # Check model version if it was logged + if 'EVOMODELVERSION' in tmp.meta: + if tmp.meta['EVOMODELVERSION'] != evo_model.model_version_name: + out_bool=False + if verbose: + print(f'EVOMODELVERSION does not match: The recorded {tmp.meta["EVOMODELVERSION"]} does not matched the existing version {evo_model.model_version_name}') + + + return out_bool + + def get_filter_info(name, vega=vega, rebin=True): - """ + """ Define filter functions, setting ZP according to Vega spectrum. Input name is the SPISEA obs_string """ tmp = name.split(',') filterName = tmp[-1] - + if name.startswith('nirc2'): filt = filters.get_nirc2_filt(filterName) elif name.startswith('2mass'): filt = filters.get_2mass_filt(filterName) - + elif name.startswith('vista'): filt = filters.get_vista_filt(filterName) @@ -1517,10 +1749,10 @@ def get_filter_info(name, vega=vega, rebin=True): elif name.startswith('jg'): filt = filters.get_Johnson_Glass_filt(filterName) - + elif name.startswith('nirc1'): filt = filters.get_nirc1_filt(filterName) - + elif name.startswith('ctio_osiris'): filt = filters.get_ctio_osiris_filt(filterName) @@ -1532,7 +1764,7 @@ def get_filter_info(name, vega=vega, rebin=True): elif name.startswith('ukirt'): filt = filters.get_ukirt_filt(filterName) - + elif name.startswith('keck_osiris'): filt = filters.get_keck_osiris_filt(filterName) @@ -1545,20 +1777,23 @@ def get_filter_info(name, vega=vega, rebin=True): elif name.startswith('hawki'): filt = filters.get_hawki_filt(filterName) - + elif name.startswith('rubin'): filt = filters.get_rubin_filt(filterName) elif name.startswith('euclid'): filt = filters.get_euclid_filt(filterName) - + + elif name.startswith('nsfcam'): + filt = filters.get_nsfcam_filt(filterName) + else: # Otherwise, look for the filter info in the cdbs/mtab and cdbs/comp files try: filt = ObsBandpass(name) except: raise Exception('Filter {0} not understood. Check spelling and make sure cdbs/mtab and cdbs/comp files are up to date'.format(name)) - + # Convert to ArraySpectralElement for resampling. filt = spectrum.ArraySpectralElement(filt.wave, filt.throughput, waveunits=filt.waveunits, @@ -1576,14 +1811,14 @@ def get_filter_info(name, vega=vega, rebin=True): # Otherwise, throw an error idx = np.where(filt.throughput > 0.001)[0] if (min(filt.wave[idx]) < min(vega.wave)) | (max(filt.wave[idx]) > max(vega.wave)): - raise ValueError('Vega spectrum doesnt cover filter wavelength range!') + raise ValueError('Vega spectrum doesnt cover filter wavelength range!') vega_obs = obs.Observation(vega, filt, binset=filt.wave, force='taper') #vega_flux = vega_obs.binflux.sum() diff = np.diff(vega_obs.binwave) diff = np.append(diff, diff[-1]) vega_flux = np.sum(vega_obs.binflux * diff) - + vega_mag = 0.03 filt.flux0 = vega_flux @@ -1594,7 +1829,7 @@ def get_filter_info(name, vega=vega, rebin=True): def get_filter_col_name(obs_str): """ - Get standard column name for synthetic photometry based on + Get standard column name for synthetic photometry based on the input string. The input string is expected to be an appropriate SPISEA obs_string """ @@ -1613,7 +1848,7 @@ def get_filter_col_name(obs_str): filt_name = 'hst_{0}'.format(tmp[-1]) else: filt_name = '{0}_{1}'.format(tmp[0], tmp[1]) - + return filt_name def get_obs_str(col): @@ -1623,7 +1858,7 @@ def get_obs_str(col): """ # Remove the trailing m_ name = col[2:] - + # Define dictionary for filters filt_list = {'hst_f127m': 'wfc3,ir,f127m', 'hst_f139m': 'wfc3,ir,f139m', 'hst_f153m': 'wfc3,ir,f153m', 'hst_f814w': 'acs,wfc1,f814w', 'hst_f125w': 'wfc3,ir,f125w', 'hst_f160w': 'wfc3,ir,f160w', @@ -1645,7 +1880,7 @@ def get_obs_str(col): 'jwst_F187N': 'jwst,F187N', 'jwst_F200W': 'jwst,F200W', 'jwst_F210M': 'jwst,F210M', - 'jwst_F250M': 'jwst,F250M', + 'jwst_F250M': 'jwst,F250M', 'jwst_F277W': 'jwst,F277W', 'jwst_F300M': 'jwst,F300M', 'jwst_F322W2': 'jwst,F322W2', @@ -1665,7 +1900,7 @@ def get_obs_str(col): 'nirc2_FeII': 'nirc2,FeII', 'nirc2_Brgamma': 'nirc2,Brgamma', '2mass_J': '2mass,J', '2mass_H': '2mass,H', '2mass_Ks': '2mass,Ks', 'ubv_U':'ubv,U', 'ubv_B':'ubv,B', 'ubv_V':'ubv,V', 'ubv_R':'ubv,R', - 'ubv_I':'ubv,I', + 'ubv_I':'ubv,I', 'jg_J': 'jg,J', 'jg_H': 'jg,H', 'jg_K': 'jg,K', 'nirc1_K':'nirc1,K', 'nirc1_H':'nirc1,H', 'naco_J':'naco,J', 'naco_H':'naco,H', 'naco_Ks':'naco,Ks', @@ -1695,12 +1930,14 @@ def get_obs_str(col): 'rubin_u':'rubin,u', 'rubin_z':'rubin,z', 'rubin_y':'rubin,y', + 'euclid_VIS':'euclid,VIS', 'euclid_Y':'euclid,Y', 'euclid_J':'euclid,J', - 'euclid_H':'euclid,H'} + 'euclid_H':'euclid,H', + 'nsfcam_L':'nsfcam,L'} obs_str = filt_list[name] - + return obs_str def rebin_spec(wave, specin, wavnew): @@ -1713,7 +1950,7 @@ def rebin_spec(wave, specin, wavnew): f = np.ones(len(wave)) filt = spectrum.ArraySpectralElement(wave, f, waveunits='angstrom') obs_f = obs.Observation(spec, filt, binset=wavnew, force='taper') - + return obs_f.binflux def make_isochrone_grid(age_arr, AKs_arr, dist_arr, evo_model=default_evo_model, @@ -1724,7 +1961,7 @@ def make_isochrone_grid(age_arr, AKs_arr, dist_arr, evo_model=default_evo_model, 'wfc3,ir,f153m']): """ Wrapper routine to generate a grid of isochrones of different ages, - extinctions, and distances. + extinctions, and distances. Parameters: ---------- @@ -1736,7 +1973,7 @@ def make_isochrone_grid(age_arr, AKs_arr, dist_arr, evo_model=default_evo_model, dist_arr: array Array of distances to loop over (pc) - + evo_models: SPISEA evolution object Which evolution models to use @@ -1753,7 +1990,7 @@ def make_isochrone_grid(age_arr, AKs_arr, dist_arr, evo_model=default_evo_model, Mass sampling of isochrone, relative to original mass sampling filters: dictionary - Which filters to do the synthetic photometry on + Which filters to do the synthetic photometry on """ print( '**************************************') print( 'Start generating isochrones') @@ -1798,7 +2035,7 @@ def mag_in_filter(star, filt): diff = np.diff(star_in_filter.binwave) diff = np.append(diff, diff[-1]) star_flux = np.sum(star_in_filter.binflux * diff) - + star_mag = -2.5 * math.log10(star_flux / filt.flux0) + filt.mag0 return star_mag @@ -1821,10 +2058,10 @@ def match_model_masses(isoMasses, starMasses): idx = np.where(dm_frac > 0.1)[0] indices[idx] = -1 - + return indices - + def get_evo_model_by_string(evo_model_string): return getattr(evolution, evo_model_string) @@ -1835,13 +2072,13 @@ def calc_ab_vega_filter_conversion(filt_str): AB and Vega magnitudes for a given filter: m_AB - m_vega - Note: this conversion is just the vega magnitude in + Note: this conversion is just the vega magnitude in AB system - Parameters: - ----------- + Parameters + ---------- filt_str: string - Filter identification string + SPISEA filter identification string (see Photometric Filters doc page) """ # Get filter info filt = get_filter_info(filt_str) @@ -1855,14 +2092,14 @@ def calc_ab_vega_filter_conversion(filt_str): filt_wave = filt.wave filt_mu = c / filt_wave s_filt = filt.throughput - + # Interpolate the filter function, determine what the # filter function is at the exact sampling of the # vega spectrum (in freq space) filt_interp = scipy.interpolate.interp1d(filt_mu, s_filt, kind='linear', bounds_error=False, fill_value=0) s_interp = filt_interp(vega_mu) - + # Now for the m_ab calculation mu_diff = np.diff(vega_mu) numerator = np.sum(vega_flux_mu[:-1] * s_interp[:-1] * mu_diff) @@ -1880,10 +2117,10 @@ def calc_ab_vega_filter_conversion(filt_str): # fill_value=0) #s_interp = filt_interp(vega.wave) - # Calculate the numerator + # Calculate the numerator #diff = np.diff(vega.wave) #numerator2 = np.sum((vega.wave[:-1]**2. / c) * vega.flux[:-1] * s_interp[:-1] * diff) - + # Now we need to intergrate the filter response for the denominator #denominator2 = np.sum(s_interp[:-1] * diff) @@ -1892,3 +2129,42 @@ def calc_ab_vega_filter_conversion(filt_str): return vega_mag_ab +def calc_st_vega_filter_conversion(filt_str): + """ + Function to calculate the conversion between + ST and Vega magnitudes for a given filter: + m_ST - m_vega + + Note: this conversion is just the vega magnitude in + ST system + + Parameters + ---------- + filt_str: string + SPISEA filter identification string (see Photometric Filters doc page) + """ + # Get filter info + filt = get_filter_info(filt_str) + + # Interpolate the filter function to be the exact same sampling as the + # vega spectrum + c = 2.997*10**18 # A / s + filt_interp = scipy.interpolate.interp1d(filt.wave, filt.throughput, kind='linear', bounds_error=False, + fill_value=0) + s_interp = filt_interp(vega.wave) + + # Calculate the numerator + diff = np.diff(vega.wave) + numerator = np.sum(vega.flux[:-1] * s_interp[:-1] * diff) + + # Now we need to intergrate the filter response for the denominator + denominator = np.sum(s_interp[:-1] * diff) + # Fλ must be in erg cm–2 sec–1 Å–1 + + # Calculate vega AB magnitude. This is the conversion + vega_mag_st = -2.5 * np.log10(numerator / denominator) - 21.1 + + print('For {0}, m_st - m_vega = {1}'.format(filt_str, vega_mag_st)) + + return vega_mag_st + diff --git a/spisea/tests/test_data/companions.pkl b/spisea/tests/test_data/companions.pkl new file mode 100644 index 00000000..0395d75d Binary files /dev/null and b/spisea/tests/test_data/companions.pkl differ diff --git a/spisea/tests/test_data/star_systems.pkl b/spisea/tests/test_data/star_systems.pkl new file mode 100644 index 00000000..0cbdd246 Binary files /dev/null and b/spisea/tests/test_data/star_systems.pkl differ diff --git a/spisea/tests/test_exceptions.py b/spisea/tests/test_exceptions.py index 4996bb3c..b20a132d 100644 --- a/spisea/tests/test_exceptions.py +++ b/spisea/tests/test_exceptions.py @@ -32,12 +32,6 @@ def test_grid_number_exception(): # Case 3: installed model grid is higher than required grid (no error) required_grid = installed_grid - 1.0 - evolution.check_evo_grid_number(required_grid, models_dir) - - return - - - - + evolution.check_evo_grid_number(required_grid, models_dir) - + return \ No newline at end of file diff --git a/spisea/imf/tests/test_imf.py b/spisea/tests/test_imf.py similarity index 92% rename from spisea/imf/tests/test_imf.py rename to spisea/tests/test_imf.py index 571f6edc..5f465747 100755 --- a/spisea/imf/tests/test_imf.py +++ b/spisea/tests/test_imf.py @@ -1,11 +1,10 @@ import numpy as np import time import pdb +import cProfile +from spisea.imf import imf, multiplicity def test_generate_cluster(): - from .. import imf - from .. import multiplicity - # Make multiplicity object imf_multi = multiplicity.MultiplicityUnresolved() @@ -32,8 +31,6 @@ def test_generate_cluster(): return def test_prim_power(): - from .. import imf - #mass_limits = np.array([0.1, 1.0, 100.0]) #powers = np.array([-2.0, -1.8]) mass_limits = np.array([1.0, 100.0]) @@ -60,9 +57,6 @@ def test_prim_power(): return def test_xi(): - from .. import imf - - import cProfile, pstats, io #from pstats import SortKey pr = cProfile.Profile() @@ -72,14 +66,14 @@ def test_xi(): ########## # - # Test validity of returned values. - # + # Test validity of returned values. + # ########## N_size = 10 m = np.linspace(0.2, 20, N_size) val_good = np.array([1.6206566 , 0.26895718, 0.10135922, 0.05639448, 0.03703704, 0.0243668 , 0.01613091, 0.01137139, 0.00839526, 0.00642142]) - + val_test = np.zeros(len(m), dtype=float) for ii in range(len(m)): val_test[ii] = imf_tmp.xi(m[ii]) @@ -90,26 +84,26 @@ def test_xi(): ########## # # Performance testing - # + # ########## t1 = time.time() # pr.enable() - + # Run a time test N_size = int(1e4) m = np.random.uniform(1.1, 99.0, size=N_size) foo1 = imf_tmp.xi(m) - + # pr.disable() t2 = time.time() print('test_xi() runtime = {0:.3f} s for {1:d} masses'.format(t2 - t1, N_size)) - + # s = io.StringIO() # sortby = SortKey.CUMULATIVE # ps = pstats.Stats(pr, stream=s).sort_stats(sortby) # ps.print_stats() - # print(s.getvalue()) + # print(s.getvalue()) return @@ -118,9 +112,6 @@ def test_xi2(): Test that xi() produces the correct probability for a given slope. """ - from .. import imf - - import cProfile, pstats, io #from pstats import SortKey pr = cProfile.Profile() @@ -173,13 +164,10 @@ def test_xi2(): return - -def test_mxi(): - from .. import imf - import cProfile, pstats, io +def test_mxi(): #from pstats import SortKey pr = cProfile.Profile() @@ -189,8 +177,8 @@ def test_mxi(): ########## # - # Test validity of returned values. - # + # Test validity of returned values. + # ########## N_size = 10 m = np.linspace(0.2, 20, N_size) @@ -207,11 +195,11 @@ def test_mxi(): ########## # # Performance testing - # + # ########## t1 = time.time() # pr.enable() - + # Run a time test N_size = int(1e4) m = np.random.uniform(1.1, 99.0, size=N_size) @@ -222,29 +210,26 @@ def test_mxi(): t2 = time.time() print('test_mxi() runtime = {0:.3f} s for {1:d} masses'.format(t2 - t1, N_size)) - + # s = io.StringIO() # sortby = SortKey.CUMULATIVE # ps = pstats.Stats(pr, stream=s).sort_stats(sortby) # ps.print_stats() - # print(s.getvalue()) + # print(s.getvalue()) return def test_theta_closed(): - from .. import imf - - import cProfile, pstats, io #from pstats import SortKey - + mass_limits = np.array([0.1, 1.0, 10.0, 100.0]) powers = np.array([-0.3, -1.5, -2.3]) imf_tmp = imf.IMF_broken_powerlaw(mass_limits, powers) ########## # - # Test validity of returned values. - # + # Test validity of returned values. + # ########## N_size = 10 m = np.linspace(0.2, 20, N_size) @@ -265,22 +250,22 @@ def test_theta_closed(): np.testing.assert_equal(val_test, val_good) - + ########## # # Speed tests and performance profiling. - # + # ########## N_size = 10000 m = np.linspace(1.1, 99, N_size) - + tmp = np.zeros((len(m), len(powers)), dtype=float) t1 = time.time() # pr = cProfile.Profile() # pr.enable() - + for ii in range(len(m)): tmp[ii] = imf.theta_closed(m[ii] - imf_tmp._m_limits_low) @@ -288,7 +273,7 @@ def test_theta_closed(): t2 = time.time() print('Runtime = {0:.3f} s for {1:d} masses'.format(t2 - t1, N_size)) - + # s = io.StringIO() # sortby = SortKey.CUMULATIVE # ps = pstats.Stats(pr, stream=s).sort_stats(sortby) @@ -297,4 +282,4 @@ def test_theta_closed(): return - + diff --git a/spisea/tests/test_models.py b/spisea/tests/test_models.py index 78d8e9e1..4ba0e683 100644 --- a/spisea/tests/test_models.py +++ b/spisea/tests/test_models.py @@ -1,21 +1,19 @@ # Test functions for the different stellar evolution and atmosphere models -from spisea import evolution +from spisea import evolution, atmospheres, synthetic import numpy as np import pdb def test_evo_model_grid_num(): """ - Make sure evolution models have both evo_grid_num + Make sure evolution models have both evo_grid_num and evo_grid_min (e.g., make sure these functions are working). Try it on one evolution model here; we'll test on all evo models in another function. """ - from spisea import evolution - # Make MIST evolution model, check evo grid variables evo = evolution.MISTv1() assert isinstance(evo.evo_grid_min, float) - + return def test_evolution_models(): @@ -32,10 +30,10 @@ def test_evolution_models(): metal_solar = [0] # Array of evolution models to test - evo_models = [evolution.MISTv1(version=1.2), evolution.MergedBaraffePisaEkstromParsec(), + evo_models = [evolution.MISTv1(version=1.2), evolution.MergedBaraffePisaEkstromParsec(), evolution.Parsec(), evolution.Baraffe15(), evolution.Ekstrom12(), evolution.Pisa()] - + # Array of age_ranges for the specific evolution models to test age_vals = [age_all_MIST_arr, age_all_arr, age_all_arr, age_young_arr, age_young_arr, age_young_arr] @@ -68,12 +66,12 @@ def test_evolution_models(): raise Exception('EVO TEST FAILED: {0}, age = {1}, metal = {2}'.format(evo, kk, jj)) print('Done {0}'.format(evo)) - + return def test_synthpop_MIST_extension(): """ - Testing the synthpop MIST extension to consistently lower masses + Testing the synthpop MIST extension to consistently lower masses """ evo1_grid = evolution.MISTv1(version=1.2, synthpop_extension=False) evo2_grid = evolution.MISTv1(version=1.2, synthpop_extension=True) @@ -93,11 +91,17 @@ def test_atmosphere_models(): """ Test the rebinned atmosphere models used for synthetic photometry """ - from spisea import atmospheres as atm - # Array of atmospheres - atm_arr = [atm.get_merged_atmosphere, atm.get_castelli_atmosphere, atm.get_phoenixv16_atmosphere, atm.get_BTSettl_2015_atmosphere, - atm.get_BTSettl_atmosphere, atm.get_kurucz_atmosphere, atm.get_phoenix_atmosphere, atm.get_wdKoester_atmosphere] + atm_arr = [ + atmospheres.get_merged_atmosphere, + atmospheres.get_castelli_atmosphere, + atmospheres.get_phoenixv16_atmosphere, + atmospheres.get_BTSettl_2015_atmosphere, + atmospheres.get_BTSettl_atmosphere, + atmospheres.get_kurucz_atmosphere, + atmospheres.get_phoenix_atmosphere, + atmospheres.get_wdKoester_atmosphere + ] # Array of metallicities metals_range = [-2.0, 0, 0.15] @@ -116,12 +120,12 @@ def test_atmosphere_models(): test = atm_func(metallicity=jj) except: raise Exception('ATM TEST FAILED: {0}, metal = {1}'.format(atm_func, jj)) - + print('Done {0}'.format(atm_func)) - + # Test get_merged_atmospheres at different temps temp_range = [2000, 3500, 4000, 5250, 6000, 12000] - atm_func = atm.get_merged_atmosphere + atm_func = atmospheres.get_merged_atmosphere for ii in metals_range: for jj in temp_range: try: @@ -131,30 +135,28 @@ def test_atmosphere_models(): print('get_merged_atmosphere: all temps/metallicities passed') - + # Test get_bb_atmosphere at different temps # This func only requests temp temp_range = [2000, 3500, 4000, 5250, 6000, 12000] - atm_func = atm.get_bb_atmosphere + atm_func = atmospheres.get_bb_atmosphere for jj in temp_range: try: test = atm_func(temperature=jj, verbose=True) except: raise Exception('ATM TEST FAILED: {0}, temp = {2}'.format(atm_func, jj)) - + print('get_bb_atmosphere: all temps passed') - + return def test_filters(): """ Test to make sure all of the filters work as expected """ - from spisea import synthetic - # Define vega spectrum vega = synthetic.Vega() - + # Filter list to test filt_list = ['wfc3,ir,f127m','acs,wfc1,f814w', '2mass,J', '2mass,H','2mass,Ks', @@ -186,28 +188,23 @@ def test_filters(): 'roman,wfi,f158', 'roman,wfi,f146', 'roman,wfi,f213', 'roman,wfi,f184', 'rubin,g', 'rubin,i', 'rubin,r', 'rubin,u', 'rubin,z', 'rubin,y', - 'euclid,Y', 'euclid,J', 'euclid,H'] + 'euclid,VIS', 'euclid,Y', 'euclid,J', 'euclid,H', + 'nsfcam,L'] # Loop through filters to test that they work: get_filter_info for ii in filt_list: - try: - filt = synthetic.get_filter_info(ii, rebin=True, vega=vega) - except: - raise Exception('get_filter_info TEST FAILED for {0}'.format(ii)) + filt = synthetic.get_filter_info(ii, rebin=True, vega=vega) print('get_filter_info pass') - + # Loop through filters to test that they work: get_obs_str for ii in filt_list: - try: - # Test going from col_name to obs_str - col_name = synthetic.get_filter_col_name(ii) - obs_str = synthetic.get_obs_str('m_{0}'.format(col_name)) - # Does the obs_str work? - filt_info = synthetic.get_filter_info(obs_str) - except: - raise Exception('get_obs_str TEST FAILED for {0}'.format(ii)) - + # Test going from col_name to obs_str + col_name = synthetic.get_filter_col_name(ii) + obs_str = synthetic.get_obs_str('m_{0}'.format(col_name)) + # Does the obs_str work? + filt_info = synthetic.get_filter_info(obs_str) + print('get_obs_str pass') print('Filters done') diff --git a/spisea/imf/tests/test_multiplicity.py b/spisea/tests/test_multiplicity.py similarity index 85% rename from spisea/imf/tests/test_multiplicity.py rename to spisea/tests/test_multiplicity.py index 3a9dc9b7..9df0776d 100755 --- a/spisea/imf/tests/test_multiplicity.py +++ b/spisea/tests/test_multiplicity.py @@ -1,12 +1,12 @@ import numpy as np import time - +import spisea +from spisea.imf import imf, multiplicity + def test_create_MultiplicityUnresolved(): """ Tests creating and accessing a MultiplicityUnresolved object. """ - from .. import multiplicity - # All default parameters -- check their values mu1 = multiplicity.MultiplicityUnresolved() assert mu1.MF_amp == 0.44 @@ -18,28 +18,26 @@ def test_create_MultiplicityUnresolved(): assert mu1.q_min == 0.01 # Test setting different parameters - mu2 = multiplicity.MultiplicityUnresolved(MF_amp=0.4, + mu2 = multiplicity.MultiplicityUnresolved(MF_amp=0.4, MF_power=0.4, - CSF_amp=0.4, - CSF_power=0.4, + CSF_amp=0.4, + CSF_power=0.4, CSF_max=4, - q_power=0.4, + q_power=0.4, q_min=0.04) - assert mu2.MF_amp == 0.4 + assert mu2.MF_amp == 0.4 assert mu2.MF_pow == 0.4 - assert mu2.CSF_amp == 0.4 - assert mu2.CSF_pow == 0.4 + assert mu2.CSF_amp == 0.4 + assert mu2.CSF_pow == 0.4 assert mu2.CSF_max == 4 - assert mu2.q_pow == 0.4 + assert mu2.q_pow == 0.4 assert mu2.q_min == 0.04 def test_multiplicity_fraction(): """ Test creating a MultiplicityUnresolved object and getting the multiplicity fraction out. - """ - from spisea.imf import multiplicity - + """ # First set of multiplicity parameters mu1 = multiplicity.MultiplicityUnresolved() @@ -57,14 +55,14 @@ def test_multiplicity_fraction(): CSF_amp=0.4, CSF_power=0.4, CSF_max=4, q_power=0.4, q_min=0.04) - mf2_1 = mu1.multiplicity_fraction(1.0) - np.testing.assert_almost_equal(mf2_1, 0.44, decimal=2) + mf2_1 = mu2.multiplicity_fraction(1.0) + np.testing.assert_almost_equal(mf2_1, 0.4, decimal=2) - mf2_2 = mu1.multiplicity_fraction(10.0) + mf2_2 = mu2.multiplicity_fraction(10.0) np.testing.assert_almost_equal(mf2_2, 1.0, decimal=2) - mf2_3 = mu1.multiplicity_fraction(0.1) - np.testing.assert_almost_equal(mf2_3, 0.136, decimal=2) + mf2_3 = mu2.multiplicity_fraction(0.1) + np.testing.assert_almost_equal(mf2_3, 0.159, decimal=2) def test_multiplicity_fraction_array(): @@ -72,8 +70,6 @@ def test_multiplicity_fraction_array(): Test multiplicity_fraction() on the MultiplicityUnresolved object where the inputs and outputs are in array form. """ - from spisea.imf import multiplicity - # First set of multiplicity parameters mu1 = multiplicity.MultiplicityUnresolved() @@ -83,14 +79,12 @@ def test_multiplicity_fraction_array(): np.testing.assert_almost_equal(mf_array[0], 0.44, decimal=2) np.testing.assert_almost_equal(mf_array[1], 1.0, decimal=2) np.testing.assert_almost_equal(mf_array[2], 0.136, decimal=2) - - + + def test_companion_star_fraction(): """ Test the companion_star fraction on the MultiplicityUnresolved object. """ - from spisea.imf import multiplicity - # First set of multiplicity parameters mu1 = multiplicity.MultiplicityUnresolved() @@ -120,12 +114,10 @@ def test_companion_star_fraction(): def test_resolvedmult(): """ - Test creating a MultiplicityResolvedDK object + Test creating a MultiplicityResolvedDK object and that the parameters it's populated with are correct. """ from spisea import synthetic, evolution, atmospheres, reddening, ifmr - from spisea.imf import imf, multiplicity - # Fetch isochrone logAge = 6.70 # Age in log(years) AKs = 1.0 # Ks filter extinction in mags @@ -135,56 +127,54 @@ def test_resolvedmult(): evo_merged = evolution.MISTv1() redlaw = reddening.RedLawCardelli(3.1) # Rv = 3.1 filt_list = ['nirc2,J', 'nirc2,Kp'] - + startTime = time.time() iso_merged = synthetic.IsochronePhot(logAge, AKs, dist, metallicity=metallicity, evo_model=evo_merged, atm_func=atm_func, filters=filt_list, red_law=redlaw, - mass_sampling=3) + mass_sampling=3, iso_dir=f'{spisea.__path__[0]}/tests/isochrones') print('Constructed isochrone: %d seconds' % (time.time() - startTime)) - - # Now we can make the cluster. + + # Now we can make the cluster. clust_mtot = 10**4. clust_multiplicity = multiplicity.MultiplicityResolvedDK() # Multiplicity is defined in the IMF object clust_imf_Mult = imf.Kroupa_2001(multiplicity=clust_multiplicity) - + # Make clusters clust_Mult = synthetic.ResolvedCluster(iso_merged, clust_imf_Mult, clust_mtot) clust_Mult_ss = clust_Mult.star_systems - + print('Constructed cluster: %d seconds' % (time.time() - startTime)) - + #check if columns were created assert 'log_a' in clust_Mult.companions.colnames assert 'e' in clust_Mult.companions.colnames assert 'i' in clust_Mult.companions.colnames assert 'Omega' in clust_Mult.companions.colnames assert 'omega' in clust_Mult.companions.colnames - + #check values are in correct range assert all(10**i<= 2000 and 10**i>= 0 for i in clust_Mult.companions['log_a']) #max separation is 2000 AU assert all(i<= 1 and i>= 0 for i in clust_Mult.companions['e']) assert all(i<= 180 and i>= 0 for i in clust_Mult.companions['i']) assert all(i<= 360 and i>= 0 for i in clust_Mult.companions['omega']) assert all(i<= 360 and i>= 0 for i in clust_Mult.companions['Omega']) - + #checks sign for inclination is being randomly genarated assert any(i > 90 for i in clust_Mult.companions['i']) and any(i < 90 for i in clust_Mult.companions['i']) - + #checks eccentricity follows f(e) = 2e pdf n, bins = np.histogram(clust_Mult.companions['e'], density = True) bin_centers = 0.5*(bins[1:] + bins[:-1]) assert all(np.abs(i) < 0.3 for i in 2*bin_centers - n) - + #checks shape of inclination histogram is sin(i) n, bins = np.histogram(clust_Mult.companions['i']) bin_centers = 0.5*(bins[1:] + bins[:-1]) assert all(np.abs(i) < 0.15 for i in n/max(n) - np.sin(np.pi*bin_centers/180)) - - return - + return diff --git a/spisea/tests/test_reddening.py b/spisea/tests/test_reddening.py index d4b99d88..99bbf78b 100644 --- a/spisea/tests/test_reddening.py +++ b/spisea/tests/test_reddening.py @@ -39,7 +39,7 @@ def test_RedLawBrokenPowerLaw(plots=False): # Compare law_test and the output from the redlaw object law_output = red_law.broken_powerlaw(wave_test, 1) - + assert len(law_test) == len(law_output) assert np.sum(np.isnan(law_output)) == 0 @@ -62,7 +62,7 @@ def test_RedLawBrokenPowerLaw(plots=False): idx2 = np.where(abs(wave_test-2.1) == np.min(abs(wave_test-2.1))) slope = (log_output[idx1] - log_output[idx2]) / (log_wave[idx1] - log_wave[idx2]) assert abs(slope - (-1.0 * alpha1)) < 10**-4 - + # If desired (debug only), make plot to see what law looks like if plots: # Test plot: these should match nearly exactly @@ -109,7 +109,7 @@ def test_RedLawBrokenPowerLaw(plots=False): idx = np.where( (wave_test >= 1.27) & (wave_test < 1.63)) coeff = (1.63 ** (-1*alpha1)) / (1.63 ** (-1*alpha2)) law_test[idx] = coeff * wave_test[idx] ** (-1*alpha2) - + # 1.27 - 0.8 idx = np.where( (wave_test >= 0.8) & (wave_test < 1.27)) coeff1 = (1.63 ** (-1*alpha1)) / (1.63 ** (-1*alpha2)) @@ -124,7 +124,7 @@ def test_RedLawBrokenPowerLaw(plots=False): coeff3 = (0.8 ** (-1*alpha3)) / (0.8 ** (-1*alpha4)) coeff_f = coeff1 * coeff2 * coeff3 law_test[idx] = coeff_f * wave_test[idx] ** (-1*alpha4) - + assert np.sum(np.isnan(law_test)) == 0 # Put in terms of A_lambda / A_Ks, like the reddening object @@ -133,7 +133,7 @@ def test_RedLawBrokenPowerLaw(plots=False): # Compare law_test and the output from the redlaw object law_output = red_law.broken_powerlaw(wave_test, 1) - + assert len(law_test) == len(law_output) assert np.sum(np.isnan(law_output)) == 0 @@ -166,7 +166,7 @@ def test_RedLawBrokenPowerLaw(plots=False): idx2 = np.where(abs(wave_test-0.7) == np.min(abs(wave_test-0.7))) slope = (log_output[idx1] - log_output[idx2]) / (log_wave[idx1] - log_wave[idx2]) assert abs(slope - (-1.0 * alpha4)) < 10**-4 - + # If desired (debug only), make plot to see what law looks like if plots: # Test plot: these should match nearly exactly @@ -195,9 +195,9 @@ def test_red_law_IsochronePhot(): metallicity = 0 # Metallicity in [M/H] # Define evolution/atmosphere models and extinction law - evo_model = evolution.MISTv1() + evo_model = evolution.MISTv1() atm_func = atmospheres.get_merged_atmosphere - + # Also specify filters for synthetic photometry. filt_list = ['wfc3,ir,f127m', 'wfc3,ir,f153m', 'nirc2,H', 'nirc2,Kp'] @@ -206,7 +206,7 @@ def test_red_law_IsochronePhot(): 'RL85', 'D16', 'F09,2.5,3', 'S16,1.55,0', 'DM16', 'H18b', 'NL18', 'C89,3.1', 'pl,2.12,0.9,2.4', 'broken_pl,[2.3,1.63,0.9],[2.23, 3.0],2.12'] - + aks_arr = [2.62, 2.46, 1.67, 2.3, 2.3, 2.3, 2.3, 2.3, 2.3, 2.3, 2.3, 2.3, 2.3, 2.3, 2.3, 2.3] for ii in range(len(redlaw_arr)): redlaw = reddening.get_red_law(redlaw_arr[ii]) @@ -214,15 +214,22 @@ def test_red_law_IsochronePhot(): aks = aks_arr[ii] # Try to run isochrone phot - iso_test = synthetic.IsochronePhot(logAge, aks, dist, metallicity=0, - evo_model=evo_model, atm_func=atm_func, - red_law=redlaw, filters=filt_list, - min_mass=0.95, max_mass=1.05) - # Now remove the iso file to make sure we recalc each time - cmd = 'rm iso_6.70_*_08000_p00.fits' - os.system(cmd) + iso_test = synthetic.IsochronePhot( + logAge, + aks, + dist, + metallicity=0, + evo_model=evo_model, + atm_func=atm_func, + red_law=redlaw, + filters=filt_list, + min_mass=0.95, + max_mass=1.05, + iso_dir='isochrones/', + recomp=True + ) print('----EL {0} works OK!-----'.format(redlaw_arr[ii])) - + return def test_all_EL(): diff --git a/spisea/tests/test_synthetic.py b/spisea/tests/test_synthetic.py index ec2628a2..7694ebfd 100755 --- a/spisea/tests/test_synthetic.py +++ b/spisea/tests/test_synthetic.py @@ -2,6 +2,8 @@ import numpy as np import pylab as plt import numpy as np +import pickle +import spisea from spisea import reddening, evolution, atmospheres, ifmr from spisea import synthetic as syn from spisea.imf import imf @@ -11,6 +13,8 @@ import pdb from scipy.spatial import cKDTree as KDTree +spisea_path = os.path.dirname(spisea.__file__) + def test_isochrone(plot=False): logAge = 6.7 AKs = 2.7 @@ -26,11 +30,11 @@ def test_isochrone(plot=False): assert iso.points.meta['AKS'] == AKs assert iso.points.meta['DISTANCE'] == distance assert len(iso.points) > 100 - + if plot: - plt.figure(1) + plt.figure(1) iso.plot_HR_diagram() - + plt.figure(2) iso.plot_mass_luminosity() @@ -38,21 +42,23 @@ def test_isochrone(plot=False): def test_iso_wave(): """ - Test to make sure isochrones generated have spectra with the proper + Test to make sure isochrones generated have spectra with the proper wavelength range, and that the user has control over that wavelength range (propagated through IsochronePhot) """ # Define isochrone parameters - logAge = np.log10(5*10**6.) # Age in log(years) - AKs = 0.8 # extinction in mags + logAge = 6.7 # Age in log(years) + AKs = 2.7 # extinction in mags dist = 4000 # distance in parsec + metal = 0.0 # metallicity + iso_dir = f'{spisea_path}/tests/isochrones' # Define evolution/atmosphere models and extinction law (optional) - evo_model = evolution.MergedBaraffePisaEkstromParsec() + evo_model = evolution.MergedBaraffePisaEkstromParsec() atm_func = atmospheres.get_merged_atmosphere red_law = reddening.RedLawHosek18b() - # Also specify filters for synthetic photometry (optional). Here we use + # Also specify filters for synthetic photometry (optional). Here we use # the HST WFC3-IR F127M, F139M, and F153M filters filt_list = ['wfc3,ir,f127m'] @@ -65,11 +71,18 @@ def test_iso_wave(): # Make Isochrone object. Will use wave_range = [3000,52000]. # Make sure range matches to resolution of atmosphere. wave_range1 = [3000, 52000] - my_iso = syn.IsochronePhot(logAge, AKs, dist, - evo_model=evo_model, atm_func=atm_func, - red_law=red_law, filters=filt_list, - mass_sampling=10, wave_range=wave_range1, - recomp=True) + my_iso = syn.IsochronePhot( + logAge, AKs, dist, + metallicity=metal, + evo_model=evo_model, + atm_func=atm_func, + red_law=red_law, + filters=filt_list, + mass_sampling=10, + wave_range=wave_range1, + recomp=True, + iso_dir=iso_dir + ) test = my_iso.spec_list[0] @@ -79,11 +92,19 @@ def test_iso_wave(): # Now let's try changing the wave range. Is it carried through # properly? wave_range2 = [1200, 90000] - my_iso = syn.IsochronePhot(logAge, AKs, dist, - evo_model=evo_model, atm_func=atm_func, - red_law=red_law, filters=filt_list, - mass_sampling=10, wave_range=wave_range2, - recomp=True) + my_iso = syn.IsochronePhot( + logAge, + AKs, + dist, + evo_model=evo_model, + atm_func=atm_func, + red_law=red_law, + filters=filt_list, + mass_sampling=10, + wave_range=wave_range2, + recomp=True, + iso_dir=iso_dir + ) test2 = my_iso.spec_list[0] @@ -93,13 +114,21 @@ def test_iso_wave(): # Does the error exception catch the bad wave_range? wave_range3 = [1200, 1000000] try: - my_iso = syn.IsochronePhot(logAge, AKs, dist, - evo_model=evo_model, atm_func=atm_func, - red_law=red_law, filters=filt_list, - mass_sampling=10, wave_range=wave_range3, - recomp=True) + my_iso = syn.IsochronePhot( + logAge, + AKs, + dist, + evo_model=evo_model, + atm_func=atm_func, + red_law=red_law, + filters=filt_list, + mass_sampling=10, + wave_range=wave_range3, + recomp=True, + iso_dir=iso_dir + ) print('WAVE TEST FAILED!!! Should have crashed here, wavelength range out of bounds') - raise ValueError() + raise ValueError() except: print('Wavelength out of bound condition passed. Test is good') pass @@ -111,20 +140,28 @@ def test_IsochronePhot(plot=False): distance = 4000 filt_list = ['wfc3,ir,f127m', 'nirc2,J'] mass_sampling=1 - iso_dir = 'iso/' + iso_dir = f'{spisea_path}/tests/isochrones' evo_model = evolution.MISTv1() atm_func = atmospheres.get_merged_atmosphere redlaw = reddening.RedLawNishiyama09() startTime = time.time() - iso = syn.IsochronePhot(logAge, AKs, distance, evo_model=evo_model, - atm_func=atm_func, red_law=redlaw, - filters=filt_list, - mass_sampling=mass_sampling, iso_dir=iso_dir) + iso = syn.IsochronePhot( + logAge, + AKs, + distance, + evo_model=evo_model, + atm_func=atm_func, + red_law=redlaw, + filters=filt_list, + mass_sampling=mass_sampling, + iso_dir=iso_dir, + recomp=True + ) endTime = time.time() print('IsochronePhot generated in: %d seconds' % (endTime - startTime)) - # Typically takes 120 seconds if file is regenerated. + # Typically takes 40 seconds if file is regenerated. # Limited by pysynphot.Icat call in atmospheres.py assert iso.points.meta['LOGAGE'] == logAge @@ -135,42 +172,64 @@ def test_IsochronePhot(plot=False): assert 'm_nirc2_J' in iso.points.colnames if plot: - plt.figure(1) + plt.figure(1) iso.plot_CMD('mag814w', 'mag160w') - + plt.figure(2) iso.plot_mass_magnitude('mag160w') # Finally, let's test the isochronePhot file generation - assert os.path.exists('{0}/iso_{1:.2f}_{2:4.2f}_{3:4s}_p00.fits'.format(iso_dir, logAge, - AKs, str(distance).zfill(5))) - + metal_value = 0. + metal_sign = 'm' if metal_value < 0 else 'p' + assert os.path.exists(f'{iso_dir}/iso_{logAge:.2f}_{AKs:4.2f}_{str(distance).zfill(5)}_{metal_sign}{metal_value:.2f}.fits') + # Check 1: If we try to remake the isochrone, does it read the file rather than # making a new one - iso_new = syn.IsochronePhot(logAge, AKs, distance, evo_model=evo_model, - atm_func=atm_func, red_law=redlaw, - filters=filt_list, - mass_sampling=mass_sampling, iso_dir=iso_dir) + iso_new = syn.IsochronePhot( + logAge, + AKs, + distance, + evo_model=evo_model, + atm_func=atm_func, + red_law=redlaw, + filters=filt_list, + mass_sampling=mass_sampling, + iso_dir=iso_dir + ) assert iso_new.recalc == False - + # Check 2: Confirm that adding a new column to an existing isochrone works properly. # Does the new filter get added to the isochrone? And the old ones still there? # Does the computed data for the new filter match the same result if you fully regenerate the isochrone? - iso_new_addfilt = syn.IsochronePhot(logAge, AKs, distance, evo_model=evo_model, - atm_func=atm_func, red_law=redlaw, - filters=filt_list+['2mass,Ks'], - mass_sampling=mass_sampling, iso_dir=iso_dir) + iso_new_addfilt = syn.IsochronePhot( + logAge, + AKs, + distance, + evo_model=evo_model, + atm_func=atm_func, + red_law=redlaw, + filters=filt_list+['2mass,Ks'], + mass_sampling=mass_sampling, + iso_dir=iso_dir + ) assert iso_new_addfilt.recalc == False assert 'm_2mass_Ks' in iso_new_addfilt.points.colnames assert 'm_nirc2_J' in iso_new_addfilt.points.colnames - - iso_new_3filt = syn.IsochronePhot(logAge, AKs, distance, evo_model=evo_model, - atm_func=atm_func, red_law=redlaw, - filters=filt_list+['2mass,Ks'], - mass_sampling=mass_sampling, iso_dir=iso_dir, - recomp=True) + + iso_new_3filt = syn.IsochronePhot( + logAge, + AKs, + distance, + evo_model=evo_model, + atm_func=atm_func, + red_law=redlaw, + filters=filt_list+['2mass,Ks'], + mass_sampling=mass_sampling, + recomp=True, + iso_dir=iso_dir + ) np.testing.assert_almost_equal(iso_new_addfilt.points['m_2mass_Ks'], iso_new_3filt.points['m_2mass_Ks']) assert iso_new_3filt.recalc==True @@ -179,26 +238,47 @@ def test_IsochronePhot(plot=False): evo2 = evolution.MergedBaraffePisaEkstromParsec() mass_sampling=20 - iso_new = syn.IsochronePhot(logAge, AKs, distance, evo_model=evo2, - atm_func=atm_func, red_law=redlaw, - filters=filt_list, - mass_sampling=mass_sampling, iso_dir=iso_dir) + iso_new = syn.IsochronePhot( + logAge, + AKs, + distance, + evo_model=evo2, + atm_func=atm_func, + red_law=redlaw, + filters=filt_list, + mass_sampling=mass_sampling, + iso_dir=iso_dir + ) assert iso_new.recalc == True redlaw2 = reddening.RedLawHosek18b() - iso_new = syn.IsochronePhot(logAge, AKs, distance, evo_model=evo2, - atm_func=atm_func, red_law=redlaw2, - filters=filt_list, - mass_sampling=mass_sampling, iso_dir=iso_dir) + iso_new = syn.IsochronePhot( + logAge, + AKs, + distance, + evo_model=evo2, + atm_func=atm_func, + red_law=redlaw2, + filters=filt_list, + mass_sampling=mass_sampling, + iso_dir=iso_dir + ) assert iso_new.recalc == True atm2 = atmospheres.get_castelli_atmosphere - iso_new = syn.IsochronePhot(logAge, AKs, distance, evo_model=evo2, - atm_func=atm2, red_law=redlaw2, - filters=filt_list, - mass_sampling=mass_sampling, iso_dir=iso_dir) + iso_new = syn.IsochronePhot( + logAge, + AKs, + distance, + evo_model=evo2, + atm_func=atm2, + red_law=redlaw2, + filters=filt_list, + mass_sampling=mass_sampling, + iso_dir=iso_dir + ) assert iso_new.recalc == True @@ -211,21 +291,30 @@ def test_ResolvedCluster(): distance = 4000 cluster_mass = 10**5. mass_sampling=5 + iso_dir = f'{spisea_path}/tests/isochrones' # Test filters filt_list = ['nirc2,J', 'nirc2,Kp'] startTime = time.time() - + evo = evolution.MergedBaraffePisaEkstromParsec() atm_func = atmospheres.get_merged_atmosphere red_law = reddening.RedLawNishiyama09() - - iso = syn.IsochronePhot(logAge, AKs, distance, - evo_model=evo, atm_func=atm_func, - red_law=red_law, filters=filt_list, - mass_sampling=mass_sampling) + + iso = syn.IsochronePhot( + logAge, + AKs, + distance, + evo_model=evo, + atm_func=atm_func, + red_law=red_law, + filters=filt_list, + mass_sampling=mass_sampling, + iso_dir=iso_dir, + recomp=True + ) print('Constructed isochrone: %d seconds' % (time.time() - startTime)) @@ -239,7 +328,7 @@ def test_ResolvedCluster(): my_imf1 = imf.IMF_broken_powerlaw(imf_mass_limits, imf_powers, multiplicity=None) print('Constructed IMF: %d seconds' % (time.time() - startTime)) - + cluster1 = syn.ResolvedCluster(iso, my_imf1, cluster_mass) clust1 = cluster1.star_systems print('Constructed cluster: %d seconds' % (time.time() - startTime)) @@ -267,7 +356,7 @@ def test_ResolvedCluster(): my_imf2 = imf.IMF_broken_powerlaw(imf_mass_limits, imf_powers, multiplicity=multi) print('Constructed IMF with multiples: %d seconds' % (time.time() - startTime)) - + cluster2 = syn.ResolvedCluster(iso, my_imf2, cluster_mass) clust2 = cluster2.star_systems print('Constructed cluster with multiples: %d seconds' % (time.time() - startTime)) @@ -277,7 +366,7 @@ def test_ResolvedCluster(): assert np.sum(clust2['N_companions']) == len(cluster2.companions) ########## - # Plots + # Plots ########## # Plot an IR CMD and compare cluster members to isochrone. plt.figure(1) @@ -287,7 +376,7 @@ def test_ResolvedCluster(): plt.plot(iso.points['m_nirc2_J'] - iso.points['m_nirc2_Kp'], iso.points['m_nirc2_J'], 'c-') plt.gca().invert_yaxis() plt.xlabel('J - Kp (mag)') - plt.ylabel('J (mag') + plt.ylabel('J (mag)') # Plot a mass-magnitude relationship. plt.figure(2) @@ -297,7 +386,7 @@ def test_ResolvedCluster(): plt.gca().invert_yaxis() plt.xlabel('Mass (Msun)') plt.ylabel('J (mag)') - + # # Plot the spectrum of the most massive star # idx = cluster.mass.argmax() # plt.clf() @@ -317,21 +406,29 @@ def test_ResolvedClusterDiffRedden(): cluster_mass = 10**5. deltaAKs = 0.05 mass_sampling=5 + iso_dir = f'{spisea_path}/tests/isochrones' # Test filters filt_list = ['nirc2,J', 'nirc2,Kp'] - + startTime = time.time() - + evo = evolution.MergedBaraffePisaEkstromParsec() atm_func = atmospheres.get_merged_atmosphere red_law = reddening.RedLawNishiyama09() - - iso = syn.IsochronePhot(logAge, AKs, distance, - evo_model=evo, atm_func=atm_func, - red_law=red_law, filters=filt_list, - mass_sampling=mass_sampling) + + iso = syn.IsochronePhot( + logAge, + AKs, + distance, + evo_model=evo, + atm_func=atm_func, + red_law=red_law, + filters=filt_list, + mass_sampling=mass_sampling, + iso_dir=iso_dir + ) print('Constructed isochrone: %d seconds' % (time.time() - startTime)) @@ -344,13 +441,13 @@ def test_ResolvedClusterDiffRedden(): my_imf1 = imf.IMF_broken_powerlaw(imf_mass_limits, imf_powers, multiplicity=None) print('Constructed IMF: %d seconds' % (time.time() - startTime)) - + cluster1 = syn.ResolvedClusterDiffRedden(iso, my_imf1, cluster_mass, deltaAKs) clust1 = cluster1.star_systems print('Constructed cluster: %d seconds' % (time.time() - startTime)) assert len(clust1) > 0 - + plt.figure(3) plt.clf() plt.plot(clust1['m_nirc2_J'] - clust1['m_nirc2_Kp'], clust1['m_nirc2_J'], 'r.') @@ -367,7 +464,7 @@ def test_ResolvedClusterDiffRedden(): my_imf2 = imf.IMF_broken_powerlaw(imf_mass_limits, imf_powers, multiplicity=multi) print('Constructed IMF with multiples: %d seconds' % (time.time() - startTime)) - + cluster2 = syn.ResolvedClusterDiffRedden(iso, my_imf2, cluster_mass, deltaAKs) clust2 = cluster2.star_systems print('Constructed cluster with multiples: %d seconds' % (time.time() - startTime)) @@ -377,7 +474,7 @@ def test_ResolvedClusterDiffRedden(): assert np.sum(clust2['N_companions']) == len(cluster2.companions) ########## - # Plots + # Plots ########## # Plot an IR CMD and compare cluster members to isochrone. plt.figure(1) @@ -399,7 +496,7 @@ def test_ResolvedClusterDiffRedden(): plt.ylabel('J (mag)') return - + def test_UnresolvedCluster(): log_age = 6.7 AKs = 0.0 @@ -407,7 +504,7 @@ def test_UnresolvedCluster(): metallicity=0 cluster_mass = 10**4. - startTime = time.time() + startTime = time.time() multi = multiplicity.MultiplicityUnresolved() imf_in = imf.Kroupa_2001(multiplicity=multi) evo = evolution.MergedBaraffePisaEkstromParsec() @@ -434,22 +531,30 @@ def test_ifmr_multiplicity(): distance = 1000 cluster_mass = 1e6 mass_sampling = 5 + iso_dir = f'{spisea_path}/tests/isochrones' # Test all filters filt_list = ['nirc2,Kp', 'nirc2,H', 'nirc2,J'] startTime = time.time() - + evo = evolution.MISTv1() atm_func = atmospheres.get_merged_atmosphere ifmr_obj = ifmr.IFMR_Raithel18() red_law = reddening.RedLawNishiyama09() - - iso = syn.IsochronePhot(logAge, AKs, distance, - evo_model=evo, atm_func=atm_func, - red_law=red_law, filters=filt_list, - mass_sampling=mass_sampling) + + iso = syn.IsochronePhot( + logAge, + AKs, + distance, + evo_model=evo, + atm_func=atm_func, + red_law=red_law, + filters=filt_list, + mass_sampling=mass_sampling, + iso_dir=iso_dir + ) print('Constructed isochrone: %d seconds' % (time.time() - startTime)) @@ -462,13 +567,13 @@ def test_ifmr_multiplicity(): ########## my_imf1 = imf.IMF_broken_powerlaw(imf_mass_limits, imf_powers, multiplicity=None) - print('Constructed IMF: %d seconds' % (time.time() - startTime)) - + print('Constructed IMF: %d seconds' % (time.time() - startTime)) + cluster1 = syn.ResolvedCluster(iso, my_imf1, cluster_mass, ifmr=ifmr_obj) clust1 = cluster1.star_systems print('Constructed cluster: %d seconds' % (time.time() - startTime)) - + ########## # Test with multiplicity and IFMR ########## @@ -476,7 +581,7 @@ def test_ifmr_multiplicity(): my_imf2 = imf.IMF_broken_powerlaw(imf_mass_limits, imf_powers, multiplicity=multi) print('Constructed IMF with multiples: %d seconds' % (time.time() - startTime)) - + cluster2 = syn.ResolvedCluster(iso, my_imf2, cluster_mass, ifmr=ifmr_obj) clust2 = cluster2.star_systems comps2 = cluster2.companions @@ -512,35 +617,53 @@ def test_metallicity(): Test isochrone generation at different metallicities """ # Define isochrone parameters - logAge = np.log10(5*10**6.) - AKs = 0.8 - dist = 4000 + logAge = np.log10(5*10**6.) + AKs = 0.8 + dist = 4000 evo_model = evolution.MISTv1() atm_func = atmospheres.get_phoenixv16_atmosphere red_law = reddening.RedLawHosek18b() filt_list = ['wfc3,ir,f127m', 'wfc3,ir,f139m', 'wfc3,ir,f153m'] + iso_dir = f'{spisea_path}/tests/isochrones' - # Start with a solar metallicity isochrone - metallicity= 0.0 + # Start with a solar metallicity isochrone + metallicity= 0. + metal_sign = 'm' if metallicity < 0 else 'p' # Make Isochrone object, with high mass_sampling to decrease compute time - my_iso = syn.IsochronePhot(logAge, AKs, dist, metallicity=metallicity, - evo_model=evo_model, atm_func=atm_func, - red_law=red_law, filters=filt_list, - mass_sampling=10) + my_iso = syn.IsochronePhot( + logAge, + AKs, + dist, + metallicity=metallicity, + evo_model=evo_model, + atm_func=atm_func, + red_law=red_law, + filters=filt_list, + mass_sampling=10, + iso_dir=iso_dir + ) # Test isochrone properties assert my_iso.points.meta['METAL_IN'] == 0.0 - assert os.path.exists('iso_6.70_0.80_04000_p00.fits') + assert os.path.exists(f'{iso_dir}/iso_6.70_0.80_04000_p0.00.fits') # Now for non-solar metallicity metallicity= -1.5 # Make Isochrone object, with high mass_sampling to decrease compute time - my_iso = syn.IsochronePhot(logAge, AKs, dist, metallicity=metallicity, - evo_model=evo_model, atm_func=atm_func, - red_law=red_law, filters=filt_list, - mass_sampling=10) + my_iso = syn.IsochronePhot( + logAge, + AKs, + dist, + metallicity=metallicity, + evo_model=evo_model, + atm_func=atm_func, + red_law=red_law, + filters=filt_list, + mass_sampling=10, + iso_dir=iso_dir + ) # MIST model sub-directory names changed in SPISEA v2.1.4 update; # changing what "metal_act" value was. version 1 of MIST grid @@ -555,12 +678,12 @@ def test_metallicity(): metal_act = np.log10(0.00047 / 0.0142) # For Mist isochrones else: metal_act = np.log10(0.00045 / 0.0142) # For Mist isochrones - + # Test isochrone properties assert my_iso.points.meta['METAL_IN'] == -1.5 assert np.isclose(my_iso.points.meta['METAL_ACT'], metal_act) - assert os.path.exists('iso_6.70_0.80_04000_m15.fits') - + assert os.path.exists(f'{iso_dir}/iso_6.70_0.80_04000_m1.50.fits') + return def test_cluster_mass(): @@ -570,21 +693,29 @@ def test_cluster_mass(): distance = 4000 cluster_mass = 10**5. mass_sampling = 5 + iso_dir = f'{spisea_path}/tests/isochrones' # Test filters filt_list = ['nirc2,J', 'nirc2,Kp'] startTime = time.time() - + # Define evolution/atmosphere models and extinction law - evo = evolution.MISTv1() + evo = evolution.MISTv1() atm_func = atmospheres.get_merged_atmosphere red_law = reddening.RedLawHosek18b() - - iso = syn.IsochronePhot(logAge, AKs, distance, - evo_model=evo, atm_func=atm_func, - red_law=red_law, filters=filt_list, - mass_sampling=mass_sampling) + + iso = syn.IsochronePhot( + logAge, + AKs, + distance, + evo_model=evo, + atm_func=atm_func, + red_law=red_law, + filters=filt_list, + mass_sampling=mass_sampling, + iso_dir=iso_dir + ) print('Constructed isochrone: %d seconds' % (time.time() - startTime)) @@ -594,7 +725,7 @@ def test_cluster_mass(): # IFMR my_ifmr = ifmr.IFMR_Raithel18() - + ########## # Start without multiplicity @@ -602,7 +733,7 @@ def test_cluster_mass(): my_imf1 = imf.IMF_broken_powerlaw(imf_mass_limits, imf_powers, multiplicity=None) print('Constructed IMF: %d seconds' % (time.time() - startTime)) - + cluster1 = syn.ResolvedCluster(iso, my_imf1, cluster_mass, ifmr=my_ifmr) clust1 = cluster1.star_systems print('Constructed cluster: %d seconds' % (time.time() - startTime)) @@ -628,7 +759,7 @@ def test_cluster_mass(): my_imf2 = imf.IMF_broken_powerlaw(imf_mass_limits, imf_powers, multiplicity=multi) print('Constructed IMF with multiples: %d seconds' % (time.time() - startTime)) - + cluster2 = syn.ResolvedCluster(iso, my_imf2, cluster_mass, ifmr=my_ifmr) clust2 = cluster2.star_systems print('Constructed cluster with multiples: %d seconds' % (time.time() - startTime)) @@ -642,7 +773,7 @@ def test_cluster_mass(): def test_keep_low_mass_stars(): """ - Test "keep_low_mass_stars = True" functionality introduced in v2.2 + Test "keep_low_mass_stars = True" functionality introduced in v2.2 """ # Define cluster parameters, pulling on an isochrone generated in an earlier test (since # we don't care about isochrone generation here @@ -651,26 +782,34 @@ def test_keep_low_mass_stars(): distance = 4000 cluster_mass = 10**5. mass_sampling = 5 + iso_dir = f'{spisea_path}/tests/isochrones' # Test filters filt_list = ['nirc2,J', 'nirc2,Kp'] - + # Define evolution/atmosphere models and extinction law - evo = evolution.MISTv1() + evo = evolution.MISTv1() atm_func = atmospheres.get_merged_atmosphere red_law = reddening.RedLawHosek18b() - - iso = syn.IsochronePhot(logAge, AKs, distance, - evo_model=evo, atm_func=atm_func, - red_law=red_law, filters=filt_list, - mass_sampling=mass_sampling) + + iso = syn.IsochronePhot( + logAge, + AKs, + distance, + evo_model=evo, + atm_func=atm_func, + red_law=red_law, + filters=filt_list, + mass_sampling=mass_sampling, + iso_dir=iso_dir + ) # Get the minimum mass in the isochrones. This should be the lowest # mass psosbile when keep_low_mass_stars == False. # Make sure this min mass is low enough for a reasonalbe test min_mass_iso = np.min(iso.points['mass']) assert min_mass_iso >= 0.05 - + # Define IMF + IFMR. Make sure IMF goes to really low masses, # below the 0.08 Msun limit of the MIST isochrones imf_min = 0.01 @@ -698,33 +837,41 @@ def test_keep_low_mass_stars(): return - + def test_compact_object_companions(): - + # Define cluster parameters logAge = 6.7 AKs = 2.4 distance = 4000 cluster_mass = 10**4. mass_sampling=5 + iso_dir = f'{spisea_path}/tests/isochrones' # Test filters filt_list = ['nirc2,J', 'nirc2,Kp'] startTime = time.time() - + evo = evolution.MergedBaraffePisaEkstromParsec() atm_func = atmospheres.get_merged_atmosphere red_law = reddening.RedLawNishiyama09() - - iso = syn.IsochronePhot(logAge, AKs, distance, - evo_model=evo, atm_func=atm_func, - red_law=red_law, filters=filt_list, - mass_sampling=mass_sampling) + + iso = syn.IsochronePhot( + logAge, + AKs, + distance, + evo_model=evo, + atm_func=atm_func, + red_law=red_law, + filters=filt_list, + mass_sampling=mass_sampling, + iso_dir=iso_dir + ) print('Constructed isochrone: %d seconds' % (time.time() - startTime)) - + clust_multiplicity = multiplicity.MultiplicityResolvedDK() massLimits = np.array([0.2, 0.5, 1, 120]) # mass segments @@ -748,17 +895,25 @@ def time_test_cluster(): AKs = 2.7 distance = 4000 cluster_mass = 10**4 + iso_dir = f'{spisea_path}/tests/isochrones' startTime = time.time() - + evo = evolution.MergedBaraffePisaEkstromParsec() atm_func = atmospheres.get_merged_atmosphere red_law = reddening.RedLawNishiyama09() filt_list = ['nirc2,J', 'nirc2,Kp'] - - iso = syn.IsochronePhot(logAge, AKs, distance, - evo_model=evo, atm_func=atm_func, - red_law=red_law, filters=filt_list) + + iso = syn.IsochronePhot( + logAge, + AKs, + distance, + evo_model=evo, + atm_func=atm_func, + red_law=red_law, + filters=filt_list, + iso_dir=iso_dir + ) print('Constructed isochrone: %d seconds' % (time.time() - startTime)) imf_limits = np.array([0.07, 0.5, 150]) @@ -766,23 +921,30 @@ def time_test_cluster(): multi = multiplicity.MultiplicityUnresolved() my_imf = imf.IMF_broken_powerlaw(imf_limits, imf_powers, multiplicity=multi) print('Constructed IMF with multiples: %d seconds' % (time.time() - startTime)) - + cluster = syn.ResolvedCluster(iso, my_imf, cluster_mass) print('Constructed cluster: %d seconds' % (time.time() - startTime)) return - + def model_young_cluster_object(resolved=False): log_age = 6.5 AKs = 0.1 distance = 8000.0 cluster_mass = 10000. - + multi = multiplicity.MultiplicityUnresolved() imf_in = imf.Kroupa_2001(multiplicity=multi) - evo = evolution.MergedPisaEkstromParsec() + evo = evolution.MergedBaraffePisaEkstromParsec() atm_func = atmospheres.get_merged_atmosphere - iso = syn.Isochrone(log_age, AKs, distance, evo, mass_sampling=10) + iso = syn.Isochrone( + log_age, + AKs, + distance, + evo_model=evo, + atm_func=atm_func, + mass_sampling=10 + ) if resolved: cluster = syn.ResolvedCluster(iso, imf_in, cluster_mass) @@ -804,30 +966,31 @@ def model_young_cluster_object(resolved=False): plt.plot(wave, flux, 'k.') return - + def time_test_mass_match(): log_age = 6.7 AKs = 2.7 distance = 4000 cluster_mass = 5e3 - + iso_dir = f'{spisea_path}/tests/isochrones' + imf_in = imf.Kroupa_2001(multiplicity=None) start_time = time.time() - iso = syn.IsochronePhot(log_age, AKs, distance) + iso = syn.IsochronePhot(log_age, AKs, distance, iso_dir=iso_dir) iso_masses = iso.points['mass'] print('Generated iso masses in {0:.0f} s'.format(time.time() - start_time)) start_time = time.time() star_masses, isMulti, compMass, sysMass = imf_in.generate_cluster(cluster_mass) print('Generated cluster masses in {0:.0f} s'.format(time.time() - start_time)) - + def match_model_masses1(isoMasses, starMasses): indices = np.empty(len(starMasses), dtype=int) - + for ii in range(len(starMasses)): theMass = starMasses[ii] - + dm = np.abs(isoMasses - theMass) mdx = dm.argmin() @@ -838,12 +1001,12 @@ def match_model_masses1(isoMasses, starMasses): indices[ii] = mdx return indices - + def match_model_masses2(isoMasses, starMasses): isoMasses_tmp = isoMasses.reshape((len(isoMasses), 1)) kdt = KDTree(isoMasses_tmp) - + starMasses_tmp = starMasses.reshape((len(starMasses), 1)) q_results = kdt.query(starMasses_tmp, k=1) indices = q_results[1] @@ -852,7 +1015,7 @@ def match_model_masses2(isoMasses, starMasses): idx = np.where(dm_frac > 0.1)[0] indices[idx] = -1 - + return indices print('Test #1 START') @@ -883,7 +1046,7 @@ def FeH_from_Z(Z): metal = np.array([2.0e-4, 1.0e-3, 2.0e-3, 2.0e-2]) #ensure that all Spera metallicity regimes are represented FeH = FeH_from_Z(metal) #generate death mass takes metallicty as [Fe/H] - #want to get a good range of masses for Spera, should expect 8 invalids, 8 WDs, 3 NSs, and 9 BHs + #want to get a good range of masses for Spera, should expect 8 invalids, 8 WDs, 3 NSs, and 9 BHs ZAMS = np.array([-0.2*np.ones(len(FeH)), 0.2*np.ones(len(FeH)), 4.0*np.ones(len(FeH)), 9.2*np.ones(len(FeH)), 15.0*np.ones(len(FeH)), 30.0*np.ones(len(FeH)), 150.0*np.ones(len(FeH))]) @@ -975,7 +1138,7 @@ def test_Spera15_IFMR_7(): assert len(BH_idx) == 10 , "There are not the right number of BHs for the Spera15 IFMR" return - + def generate_Raithel18_IFMR(): """ Make a set of objects using the Raithel18 IFMR for the purposes of testing @@ -984,7 +1147,7 @@ def generate_Raithel18_IFMR(): """ Raithel = ifmr.IFMR_Raithel18() - ZAMS = np.array([-0.2, 0.2, 1.0, 7.0, 10.0, 14.0, 16.0, 18.0, 18.6, 22.0, 26.0, 28.0, 50.0, 61.0, 119.0, 121.0]) + ZAMS = np.array([-0.2, 0.2, 1.0, 7.0, 10.0, 14.0, 16.0, 18.0, 18.6, 22.0, 26.0, 28.0, 50.0, 61.0, 119.0, 121.0]) #3 invalid indices, 2 WDs, cannot make statements about #of BHs and NSs because the Raithel IFMR has some randomness output_array = Raithel.generate_death_mass(ZAMS) @@ -1052,3 +1215,62 @@ def test_Raithel18_IFMR_5(): assert len(WD_idx) == 2 , "There are not the right number of WDs for the Raithel18 IFMR" return + +def test_ResolvedCluster_random_state(): + """ + Test that the random state is properly set in ResolvedCluster, such that two clusters with the same seed have the same stars. + """ + log_age = 6.7 + AKs = 2.7 + distance = 4000 + cluster_mass = 10**4. + iso_dir = f'{spisea_path}/tests/isochrones' + + evo = evolution.MergedBaraffePisaEkstromParsec() + atm_func = atmospheres.get_merged_atmosphere + red_law = reddening.RedLawNishiyama09() + filt_list = ['nirc2,J', 'nirc2,Kp'] + + iso = syn.IsochronePhot( + log_age, + AKs, + distance, + evo_model=evo, + atm_func=atm_func, + red_law=red_law, + filters=filt_list, + mass_sampling=10, + iso_dir=iso_dir + ) + + imf_limits = np.array([0.07, 0.5, 150]) + imf_powers = np.array([-1.3, -2.35]) + imf_multi = multiplicity.MultiplicityUnresolved() + imf_test = imf.IMF_broken_powerlaw(imf_limits, imf_powers, multiplicity=imf_multi) + + # Test that the same random seed produces the same cluster + imf_test.rng = np.random.default_rng(seed=42) + result1 = imf_test.generate_cluster(cluster_mass) + imf_test.rng = np.random.default_rng(seed=42) + result2 = imf_test.generate_cluster(cluster_mass) + np.testing.assert_equal(result1, result2) + + # Test that two clusters generated with the same seed have the same star systems and companions + cluster1 = syn.ResolvedCluster(iso, imf_test, cluster_mass, seed=42) + cluster2 = syn.ResolvedCluster(iso, imf_test, cluster_mass, seed=42) + np.testing.assert_array_equal(cluster1.star_systems, cluster2.star_systems) + + with open(f'{spisea_path}/tests/test_data/star_systems.pkl', 'rb') as file: + old_star_systems = pickle.load(file) + with open(f'{spisea_path}/tests/test_data/companions.pkl', 'rb') as file: + old_companion = pickle.load(file) + + for key in old_star_systems.colnames: + #np.testing.assert_array_equal(cluster1.star_systems[key], old_star_systems[key]) + assert np.all(np.isclose(cluster1.star_systems[key], old_star_systems[key], rtol=1e-05, atol=1e-08)) + + for key in old_companion.colnames: + # Require values are consistent within reasonable bounds + assert np.all(np.isclose(cluster1.companions[key], old_companion[key], rtol=1e-05, atol=1e-08)) + + return