diff --git a/.gitignore b/.gitignore index b51dfd6d59909a2a6d0f116b0742e88361a1028a..9328fc49fcd5e7d6877907bf420ec2c28845daf7 100755 --- a/.gitignore +++ b/.gitignore @@ -14,6 +14,8 @@ dummy*.txt # cmilos/* cmilos-fits/milos cmilos-fits/*.o +cmilos/milos +cmilos/*.o _RTE.npz *.npz .env @@ -36,4 +38,7 @@ input_jsons/feb_2k_2021_L1.txt input_jsons/nov_2020_L1_feb_flats.txt input_jsons/nov_2020_L1.txt src/create_input_json.py -*.pyc \ No newline at end of file +*.pyc +input_jsons/sep_2021_L1_west.json +input_jsons/sep_2021_L1_south_noflat.json +.ipynb_checkpoints/hrt_pipeline_notebook-checkpoint.ipynb diff --git a/LICENSE b/LICENSE index fd9c55f77540bd269a32d284bbef0bf64219b5b8..fea10a1c0c10a654aeb10cbcb3bc59939b17cb26 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2021 jonas.sinjan +Copyright (c) 2021 Jonas Sinjan Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/README.md b/README.md index 89437818e638c52ef36844934cb2c04011451078..57ef7af6c3df59d7c113e3d3b71a18379ace3e5c 100755 --- a/README.md +++ b/README.md @@ -3,113 +3,132 @@ Reduction software for SO/PHI-HRT instrument on the ESA Solar Orbiter ## **PHI-HRT data reduction** 1. read in science data (+scaling) open path option + open for several scans at once -2. read in flat field (+scaling)- just accepts one flat field fits file +2. read in flat field (+scaling)- accepts only one flat field fits file 3. read in dark field (+scaling) -4. apply dark field -5. option to clean flat field with unsharp masking (Stokes V only) +4. apply dark field (to only science - assumes flat is dark fielded) +5. option to clean flat field with unsharp masking (Stokes QUV, UV or V) 6. normalise flat field 7. apply flat field 8. prefilter correction -9. read in field stop -10. apply field stop -11. demodulate with const demod matrix
+9. apply field stop +10. demodulate with const demod matrix
a) option to output demod to fits file
-12. normalise to quiet sun -13. calibration
+11. normalise to quiet sun +12. calibration
a) cross talk correction
- (if required) b) ghost correction - **not implemented yet**
-14. rte inversion with cmilos
- a) output rte data products to fits file
+13. rte inversion with cmilos
+ a) output rte data products to fits files
-#### **CONFIGURATION** -Any and all steps can be turned on or off as you wish using the keywords in the input json file passed to the `phihrt_pipe` function - -See `/input_jsons/create_input_json.py` for example to create json file - -## **DOWNLOAD INPUT FILES** - -This needs the 'dataproc' environment - see **SETUP** - -EITHER: download from the PHI Image Database (recommended): https://www2.mps.mpg.de/services/proton/phi/imgdb/ -Suggested filters for HRT science data: -- **KEYWORD DETECTOR = 'HRT'**
-- **Filename\* like \*L1_phi-hrt-ilam_date\*** - -To download via the command line (eg: if you want to save the files on a server and not locally) -``` -wget --user yourusername --password yourpassword file_web_address -gunzip file.gz -``` -Gunzip used to unpack the .gz to the file you want
+## **SETUP** -Can also use `/download/download_from_db.py` to perform multi download from database +Operating System Required: Linux -Instructions: - 1. From the database find the files you wish to download - 2. Copy the 'Download File List' that the database will generate - 3. Paste into the `file_names.txt` file - 4. Create a `.env` file with your MPS windows login:
- ```text= - USER_NAME = - PHIDATAPASSWORD = - ``` - 5. Set the target download folder in the `download_from_db.py` file - 6. Run the file (will require dotenv python module to be installed) +############################################################## -OR : use `download_files.py` to download images from the attic repository: https://www2.mps.mpg.de/services/proton/phi/fm/attic/ +If running on bob MPS server and you have access +### RUN bash script - setup.sh - to skip the first 4 steps +############################################################## -## **SETUP** -IMPORTANT -Operating System: Linux +OTHERWISE: -1. Compile milos in both cmilos-fits folder and cmilos folder (cmilos-fits faster): +1. Compile milos in both cmilos-fits folder and cmilos folder (cmilos-fits): ```bash make clear make ``` -2. FOR P-MILOS - multiple steps required - ASSUMING RUNNING ON **BOB** SERVER -- load openmpi_gcc -- load fftw +2. P-MILOS - multiple steps required - ASSUMING RUNNING ON **BOB** SERVER +- module load openmpi_gcc +- module load fftw - load cfitsio (or export through .bashrc) -COMPILE ```bash make clean make ``` -3. Setup virtual environment from requirements.txt +3. Setup virtual environment from requirements.txt with PIP or CONDA using pip - REQUIRES PYTHON >= 3.6 ```bash pip install -r requirements.txt ``` -using conda (Anaconda3) - creates virtual environment called 'dataproc' +OR using conda (Anaconda3) - creates virtual environment called 'hrt_pipeline_env' ```bash conda env create -f environment.yml ``` -4. Activate 'dataproc' +4. Activate 'hrt_pipeline_env' ```bash -source activate dataproc +source activate hrt_pipeline_env +``` +############################################################## +### OPTIONAL - LOAD JUPYTER NOTEBOOK `hrt_pipeline_notebook.ipynb` WHICH HAS ALL THE FOLLOWING STEPS + +(once environment loaded and activated - need the environment to start and use all the steps in the notebook) + +First let jupyter notebook know the environment exists: + +``` +python -m ipykernel install --user --name hrt_pipeline_env ``` +Now start the notebook
+############################################################## 5. Download files - see **DOWNLOAD INPUT FILES** Section -5. Genetate json files with the science, dark and flat you desire to reduce +5. Generate json files with the science, dark and flat you desire to reduce, ensure that all keywords from in the example are used (for limb images you must know the limb in the FOV - can only reduce multiple limb files at the same runtime if all the same limb in FOV) -6. Make sure the correct input.json file is being given to `hrt_pipe` in ```run.py``` +6. Make sure the correct input.json file is given to `hrt_pipe` in ```run.py``` 7. Execute ```run.py``` ```bash python run.py ``` + +## **CONFIGURATION** + +Any and all steps can be turned on or off as you wish using the keywords in the input json file passed to the `phihrt_pipe` function + +See `/input_jsons/create_input_json.py` for example to create json file + +## **DOWNLOAD INPUT FILES** + +This needs the 'dataproc' environment - see **SETUP** + +EITHER: download from the PHI Image Database (recommended): https://www2.mps.mpg.de/services/proton/phi/imgdb/ + +Suggested filters for HRT science data: +- **KEYWORD DETECTOR = 'HRT'**
+- **Filename\* like \*L1_phi-hrt-ilam_date\*** + +To download via the command line (eg: if you want to save the files on a server and not locally) +``` +wget --user yourusername --password yourpassword file_web_address +gunzip file.gz +``` +Gunzip used to unpack the .gz to the file you want
+ +Can also use `/download/download_from_db.py` to perform multi download from database: + +Instructions: + 1. From the database find the files you wish to download + 2. Copy the 'Download File List' that the database will generate + 3. Paste into the `file_names.txt` file + 4. Create a `.env` file with your MPS Windows login:
+ ```text= + USER_NAME = + PHIDATAPASSWORD = + ``` + 5. Set the target download folder in the `download_from_db.py` file + 6. Run the file (will require dotenv python module to be installed - included in `hrt_pipeline_env`) + +OR : use `download_files.py` to download images from the attic repository: https://www2.mps.mpg.de/services/proton/phi/fm/attic/ ## **OUTPUT** #### **Stokes File** @@ -181,4 +200,5 @@ Daniele Calchetti - Max Planck Institute for Solar System Research, Goettingen, - SPGPylibs for the foundation, from which it was expanded upon - CMILOS: RTE INVERSION C code for SO-PHI (based on the IDL code MILOS by D. Orozco) Author: juanp (IAA-CSIC) -- CMILOS-FITS: RTE INVERSION C code with fits interace, fits interfacing developed by Philipp Loeschl (MPS) \ No newline at end of file +- CMILOS-FITS: CMILOS with fits interace, fits interfacing developed by Philipp Loeschl (MPS) +- P-MILOS: Parallellised RTE INVERSION C code Authors: Manuel Cabrera, Juan P. Cobos, Luis Bellot Rubio (IAA-CSIC) \ No newline at end of file diff --git a/cmilos-fits/defines.h b/cmilos-fits/defines.h index 19a2626ff26f4631519bd03579d4648caac381bb..9d74070ff24ded5631c227a12924a0d8a35e8731 100755 --- a/cmilos-fits/defines.h +++ b/cmilos-fits/defines.h @@ -17,7 +17,7 @@ //--------------------------------------------------------- // USER CONFIGURATION -#define CENTRAL_WL 6173.34 +#define CENTRAL_WL 6173.335400 //############################################## @@ -33,15 +33,40 @@ #define LIMITE_INFERIOR_PRECISION_SINCOS pow(2.0,-39) //############################################# -//INITIAL MODEL -#define INITIAL_MODEL_B 1200 -#define INITIAL_MODEL_GM 170 -#define INITIAL_MODEL_AZI 20 -#define INITIAL_MODEL_ETHA0 8 -#define INITIAL_MODEL_LAMBDADOPP 0.04 //en A -#define INITIAL_MODEL_AA 0.18 -#define INITIAL_MODEL_VLOS 0.05 // Km/s -#define INITIAL_MODEL_S0 0.35 +//INITIAL MODEL +// #define INITIAL_MODEL_B 1200 +// #define INITIAL_MODEL_GM 170 +// #define INITIAL_MODEL_AZI 20 +// #define INITIAL_MODEL_ETHA0 8 +// #define INITIAL_MODEL_LAMBDADOPP 0.04 //en A +// #define INITIAL_MODEL_AA 0.18 +// #define INITIAL_MODEL_VLOS 0.05 // Km/s +// #define INITIAL_MODEL_S0 0.35 +// #define INITIAL_MODEL_S1 0.85 + +// RTE init model config ONBOARD +// Inv Iterations = 15 +// Initial Model Continuum Absoprtion = 12.0000 // INITIAL_MODEL_ETHA0 +// Initial Model Vector Magnetic Field Strength = 1200.00000 // INITIAL_MODEL_B +// Initial Model Line-Of-Sight Velocity = 0.05000 // INITIAL_MODEL_VLOS +// Initial Model Doppler Width Of Line = 0.05000 // INITIAL_MODEL_LAMBDADOPP +// Initial Model Damping Parameter = 0.05000 // INITIAL_MODEL_AA +// Initial Model Vector Magnetic Field Inclination = 170.00000 // INITIAL_MODEL_GM +// Initial Model Vector Magnetic Field Azimuth = 25.00000 // INITIAL_MODEL_AZI +// Initial Model Source Function Ordinate At Origin = 0.30000 // INITIAL_MODEL_S0 +// Initial Model Source Function Slope = 0.80000 // INITIAL_MODEL_S1 +// Wavelength Setting Initial Sampling Wavelength = 6173.20117 +// Wavelength Setting Spectral Line Step = 0.07000 +// Wavelength Setting Wavelength Of Line Continuum = 6173.04117 (blue) 6173.64117 (red) + +#define INITIAL_MODEL_B 400 +#define INITIAL_MODEL_GM 30 +#define INITIAL_MODEL_AZI 120 +#define INITIAL_MODEL_ETHA0 3 +#define INITIAL_MODEL_LAMBDADOPP 0.025 //en A +#define INITIAL_MODEL_AA 1.0 +#define INITIAL_MODEL_VLOS 0.01 // Km/s +#define INITIAL_MODEL_S0 0.15 #define INITIAL_MODEL_S1 0.85 diff --git a/cmilos-fits/lib.c b/cmilos-fits/lib.c index 8450760a6949375ce64eb4b8674163a7a16baea6..47615dd205fb4b477838be9c394a35f9a4326ba2 100755 --- a/cmilos-fits/lib.c +++ b/cmilos-fits/lib.c @@ -81,8 +81,8 @@ double fchisqr(PRECISION * spectra,int nspectro,PRECISION *spectro,PRECISION *w, dif=spectra[i+nspectro*j]-spectro[i+nspectro*j]; opa+= (dif*dif); } - // TOT+=((w[j]*opa)/(sig[j]*sig[j])); - TOT+= opa;///(sig[j]*sig[j]); + TOT+=((w[j]*opa)/(sig[j]*sig[j])); + //TOT+= opa;///(sig[j]*sig[j]); } //return TOT/15; diff --git a/cmilos-fits/milos b/cmilos-fits/milos new file mode 100644 index 0000000000000000000000000000000000000000..5c2ff54a991c7742470af8d8af89dd43fef2fb76 Binary files /dev/null and b/cmilos-fits/milos differ diff --git a/cmilos-fits/milos.c b/cmilos-fits/milos.c index a089d77f98b222fe55b4b74d72df66c62aff489a..b4ebb58d1040a0461f3002f31c55976bdea35017 100644 --- a/cmilos-fits/milos.c +++ b/cmilos-fits/milos.c @@ -138,7 +138,7 @@ int main(int argc,char **argv){ double slight; double toplim; int miter; - PRECISION weight[4]={1.,1.,1.,1.}; + PRECISION weight[4]={1.,10.,10.,4.}; //{1.,1.,1.,1.}; int nweight; // CONFIGURACION DE PARAMETROS A INVERTIR @@ -688,12 +688,12 @@ int main_sequential(int argc,char **argv){ * spectra : IQUV por filas, longitud ny=nlambda */ -int lm_mils(Cuantic *cuantic,double * wlines,int nwlines,double *lambda,int nlambda,PRECISION *spectro,int nspectro, +int lm_mils(Cuantic *cuantic,double * wlines,int nwlines,double *lambda,int nlambda,PRECISION *spectro,int nspectro, Init_Model *initModel, PRECISION *spectra,int err,double *chisqrf, int *iterOut, - double slight, double toplim, int miter, PRECISION * weight,int nweight, int * fix, + double slight, double toplim, int miter, PRECISION * weight,int nweight, int * fix, PRECISION *sigma, double filter, double ilambda, double noise, double *pol, double getshi,int triplete) -{ +{ int * diag; int iter; @@ -706,7 +706,7 @@ int lm_mils(Cuantic *cuantic,double * wlines,int nwlines,double *lambda,int nlam double chisqr,ochisqr; int nspectra,nd_spectra,clanda,ind; Init_Model model; - + //Genera aleatoriamente los componentes del vector tiempo(semi); //semilla para generar los valores de la lista de forma aleatoria con srand srand((char)semi); @@ -718,19 +718,19 @@ int lm_mils(Cuantic *cuantic,double * wlines,int nwlines,double *lambda,int nlam nfree=CalculaNfree(spectro,nspectro); //printf("\n nfree! %d:\n",nfree); //exit(-1); - - + + if(nfree==0){ return -1; //'NOT ENOUGH POINTS' } - + flambda=ilambda; - + if(fix==NULL){ fixed=calloc(NTERMS,sizeof(double)); for(i=0;iB); + printf("%f\n",initModel->gm); + printf("%f\n",initModel->az); + printf("%f \n",initModel->eta0); + printf("%f\n",initModel->dopp); + printf("%f\n",initModel->aa); + printf("%f\n",initModel->vlos); //km/s + //printf("alfa \t:%f\n",initModel.alfa); //stay light factor + printf("%f\n",initModel->S0); + printf("%f\n",initModel->S1); + printf("%.10e\n",ochisqr); +*/ }while(iter<=miter); // && !clanda); @@ -848,37 +884,39 @@ int lm_mils(Cuantic *cuantic,double * wlines,int nwlines,double *lambda,int nlam *chisqrf=ochisqr; - + //For debugging -> flambda en lugar de AA + //initModel->aa = flambda; + if(fix==NULL) free(fixed); return 1; -} +} int CalculaNfree(PRECISION *spectro,int nspectro){ int nfree,i,j; nfree=0; - - /* - for(j=0;j<4*nspectro;j++){ - if(spectro[j]!=0.0){ - nfree++; - } - } - nfree=nfree-NTERMS;//NTERMS; - */ - + + + // for(j=0;j<4*nspectro;j++){ + // if(spectro[j]!=0.0){ + // nfree++; + // } + // } + // nfree=nfree-NTERMS;//NTERMS; + + nfree = (nspectro*NPARMS) - NTERMS; - + return nfree; } /* -* +* * * Cálculo de las estimaciones clásicas. * @@ -891,7 +929,7 @@ int CalculaNfree(PRECISION *spectro,int nspectro){ * * * -* @Author: Juan Pedro Cobos Carrascosa (IAA-CSIC) +* @Author: Juan Pedro Cobos Carrascosa (IAA-CSIC) * jpedro@iaa.es * @Date: Nov. 2011 * @@ -903,59 +941,244 @@ void estimacionesClasicas(PRECISION lambda_0,double *lambda,int nlambda,PRECISIO PRECISION L,m,gamma, gamma_rad,tan_gamma,maxV,minV,C,maxWh,minWh; int i,j; + + //Es necesario crear un lambda en FLOAT para probar como se hace en la FPGA + PRECISION *lambda_aux; + lambda_aux= (PRECISION*) calloc(nlambda,sizeof(PRECISION)); + PRECISION lambda0,lambda1,lambda2,lambda3,lambda4; + + lambda0 = 6.1732012e+3 + 0; // RTE_WL_0 + lambda1 = lambda0 + 0.070000000; //RTE_WL_STEP + lambda2 = lambda1 + 0.070000000; + lambda3 = lambda2 + 0.070000000; + lambda4 = lambda3 + 0.070000000; + + lambda_aux[0]=lambda0; + lambda_aux[1]=lambda1; + lambda_aux[2]=lambda2; + lambda_aux[3]=lambda3; + lambda_aux[4]=lambda4; + + //Sino queremos usar el lambda de la FPGA + for(i=0;i1e-15) LM_lambda_plus = x / y; else LM_lambda_plus = 0; - + // LM_lambda_plus = x / y; x=0; y=0; for(i=0;i1e-15) LM_lambda_minus = x / y; else LM_lambda_minus = 0; + // LM_lambda_minus = x / y; C = (CTE4_6_13 * lambda_0 * lambda_0 * cuantic->GEFF); beta_B = 1 / C; - Blos = beta_B * ((LM_lambda_plus - LM_lambda_minus)/2); - Vlos = ( VLIGHT / lambda_0) * ((LM_lambda_plus + LM_lambda_minus)/2); - - - - Blos=Blos ; //factor de correción x campo debil - Vlos = Vlos ; //factor de correción ... + // printf("beta_B %20.12e \n",beta_B/2); + // printf("beta_v %20.12e \n",( VLIGHT / (lambda_0))/2); + // printf("cuantic->GEFF %f \n",cuantic->GEFF); + // exit(-1); - //inclinacion + Blos = beta_B * ((LM_lambda_plus - LM_lambda_minus)/2); + Vlos = ( VLIGHT / (lambda_0)) * ((LM_lambda_plus + LM_lambda_minus)/2); + + + //------------------------------------------------------------------------------------------------------------ + // //Para test del modo "non-polarimetric" --> Calculo de Vlos solo con I + + // x=0; + // y=0; + // for(i=0;i1e-12) + // // LM_lambda_plus = x / y; + // // else + // // LM_lambda_plus = 0; + // + // Vlos = ( VLIGHT / (lambda_0)) * ((LM_lambda_plus)); + + //------------------------------------------------------------------------------------------------------------ + + + //------------------------------------------------------------------------------------------------------------ + // //Para test del modo "Longitudinal" --> Calculo de Blos y Vlos solo con I+V y I-V + // //Los datos vienen en la forma de siempre salvo que spectroI contiene I+V y spectroV contiene I-V + // + // //Para usar los mismos perfiles de siempre es necesario tunearlos y convertirlos a I+V y I-V + // for(i=0;i1e-12) + // // LM_lambda_plus = x / y; + // // else + // // LM_lambda_plus = 0; + // + // + // x=0; + // y=0; + // for(i=0;i1e-12) + // // LM_lambda_minus = x / y; + // // else + // // LM_lambda_minus = 0; + // + // + // C = ((float)CTE4_6_13 * (float)lambda_0 * (float)lambda_0 * (float)cuantic->GEFF); + // beta_B = 1 / C; + // + // // printf("beta_B %20.12e \n",(float)beta_B/2); + // // printf("beta_B (no float cast) %20.12e \n",((1/(CTE4_6_13 * lambda_0 * lambda_0 * cuantic->GEFF))/2)); + // // printf("beta_v %20.12e \n",(float)(( (float)VLIGHT / ((float)lambda_0))/2)); + // // printf("cuantic->GEFF %f \n",cuantic->GEFF); + // // exit(-1); + // + // Blos = beta_B * ((LM_lambda_plus - LM_lambda_minus)/2); + // Vlos = ( VLIGHT / (lambda_0)) * ((LM_lambda_plus + LM_lambda_minus)/2); + // + // // printf("spectroI_0 %20.7e \n",(float)spectroI[0]); + // // printf("spectroI_1 %20.7e \n",(float)spectroI[1]); + // // printf("spectroI_2 %20.7e \n",(float)spectroI[2]); + // // printf("spectroI_3 %20.7e \n",(float)spectroI[3]); + // // printf("spectroI_4 %20.7e \n",(float)spectroI[4]); + // // printf("spectroI_5 %20.7e \n",(float)spectroI[5]); + // // printf("Blos %20.12e \n",(float)Blos); + // // printf("Vlos %20.12e \n",(float)Vlos); + // // + // // exit(-1); + //------------------------------------------------------------------------------------------------------------ + + //------------------------------------------------------------------------------------------------------------ + // //Para probar fórmulación propuesta por D. Orozco (Junio 2017) + //La formula es la 2.7 que proviene del paper: + // Diagnostics for spectropolarimetry and magnetography by Jose Carlos del Toro Iniesta and Valent´ýn Mart´ýnez Pillet + //el 0.08 Es la anchura de la línea en lugar de la resuloción del etalón. + + //Vlos = ( 2*(VLIGHT)*0.08 / (PI*lambda_0)) * atan((spectroI[0]+spectroI[1]-spectroI[3]-spectroI[4])/(spectroI[0]-spectroI[1]-spectroI[3]+spectroI[4])); + + //------------------------------------------------------------------------------------------------------------ + + + Blos=Blos*1 ; //factor de correción x campo debil + Vlos = Vlos * 1 ; //factor de correción ... + + //inclinacion x = 0; y = 0; for(i=0;i 1e-7 ? x : 0; + // y = fabs(y) > 1e-7 ? y : 0; + + tan_gamma = fabs(sqrtf(x/y)); + + // tan_gamma = fabs(tan_gamma) > 1e-7 ? tan_gamma : 0; + gamma_rad = atan(tan_gamma); //gamma en radianes - - gamma = gamma_rad * (180/ PI); //gamma en grados + // if(sqrt(y)<1e-12) + // gamma_rad = PI/2; + // else + // gamma_rad = atan2(sqrt(x),sqrt(y)); //gamma en radianes + + //gamma_rad = atan(sqrtf(x),y)); //gamma en radianes + + // gamma_rad = fabs(gamma_rad) > 1e-7 ? gamma_rad : 0; + + gamma = gamma_rad * (180/ PI); //gamma en grados - //correccion + //correccion //utilizamos el signo de Blos para ver corregir el cuadrante + PRECISION gamma_out = gamma; + if (Blos<0) - gamma = 180-gamma; - - + gamma = (180)-gamma; + + + //azimuth PRECISION tan2phi,phi; int muestra; - + if(nlambda==6) muestra = CLASSICAL_ESTIMATES_SAMPLE_REF; else muestra = nlambda*0.75; - - + + tan2phi=spectroU[muestra]/spectroQ[muestra]; -// printf("tan2phi : %f \n",tan2phi); + // printf("tan2phi : %f \n",tan2phi); + // printf("%.10e \n",spectroU[muestra]); + // printf("%.10e \n",spectroQ[muestra]); - phi= (atan(tan2phi)*180/PI) / 2; -// printf("atan : %f \n",phi*2); - + phi= (atan(tan2phi)*180/PI) / 2; //atan con paso a grados + + //printf("atan : %f \n",phi*2); + + // printf("%.10e \n",atan(tan2phi)); + if(spectroU[muestra] > 0 && spectroQ[muestra] > 0 ) phi=phi; - else + else if (spectroU[muestra] < 0 && spectroQ[muestra] > 0 ) phi=phi + 180; else if (spectroU[muestra] < 0 && spectroQ[muestra] < 0 ) phi=phi + 90; - else + else if (spectroU[muestra] > 0 && spectroQ[muestra]< 0 ) phi=phi + 90; - + // printf("%.10e \n",phi); //printf("Blos : %f \n",Blos); //printf("vlos : %f \n",Vlos); @@ -1018,25 +1265,32 @@ void estimacionesClasicas(PRECISION lambda_0,double *lambda,int nlambda,PRECISIO PRECISION B_aux; - - B_aux = fabs(Blos/cos(gamma_rad));//*2; - - if(Vlos < (-5)) - Vlos= -5; - if(Vlos >(5)) - Vlos=(5); - - if(phi< 0) - phi = 180 + (phi); - if(phi > 180){ - phi = phi -180.0; - } - + B_aux = fabs(Blos/cos(gamma_rad)) * 2; // 2 factor de corrección + + //Vlos = Vlos * 1.5; + if(Vlos < (-20)) + Vlos= -20; + if(Vlos >(20)) + Vlos=(20); + + // if(phi< 0) + // phi = 180 + (phi); + // if(phi > 180){ + // phi = phi -180.0; + // } + + // printf("%.10e \n",phi); + initModel->B = (B_aux>4000?4000:B_aux); initModel->vlos=Vlos;//(Vlos*1.5);//1.5; initModel->gm=gamma; initModel->az=phi; + initModel->S0= Blos; + + + //Liberar memoria del vector de lambda auxiliar + free(lambda_aux); } @@ -1056,35 +1310,46 @@ void AplicaDelta(Init_Model *model,PRECISION * delta,int * fixed,Init_Model *mod if(fixed[0]){ modelout->eta0=model->eta0-delta[0]; // 0 - } + } if(fixed[1]){ if(delta[1]< -800) //300 delta[1]=-800; else if(delta[1] >800) delta[1]=800; - modelout->B=model->B-delta[1];//magnetic field + modelout->B=model->B-delta[1];//magnetic field } if(fixed[2]){ - - if(delta[2]>5) - delta[2] = 5; - if(delta[2]<-5) - delta[2] = -5; + if(delta[2]>2) + delta[2] = 2; + + if(delta[2]<-2) + delta[2] = -2; modelout->vlos=model->vlos-delta[2]; } - if(fixed[3]) + + if(fixed[3]){ + + if(delta[3]>1e-2) + delta[3] = 1e-2; + else + if(delta[3]<-1e-2) + delta[3] = -1e-2; + modelout->dopp=model->dopp-delta[3]; + } + if(fixed[4]) modelout->aa=model->aa-delta[4]; + if(fixed[5]){ - if(delta[5]< -45) //15 - delta[5]=-45; + if(delta[5]< -15) //15 + delta[5]=-15; else - if(delta[5] > 45) - delta[5]=45; + if(delta[5] > 15) + delta[5]=15; modelout->gm=model->gm-delta[5]; //5 } @@ -1093,7 +1358,7 @@ void AplicaDelta(Init_Model *model,PRECISION * delta,int * fixed,Init_Model *mod delta[6]=-15; else if(delta[6] > 15) - delta[6]=15; + delta[6]= 15; modelout->az=model->az-delta[6]; } @@ -1120,14 +1385,14 @@ int mil_svd(PRECISION *h,PRECISION *beta,PRECISION *delta){ static PRECISION v2[TAMANIO_SVD][TAMANIO_SVD],w2[TAMANIO_SVD],v[NTERMS*NTERMS],w[NTERMS]; static PRECISION h1[NTERMS*NTERMS],h_svd[TAMANIO_SVD*TAMANIO_SVD]; static PRECISION aux[NTERMS*NTERMS]; - int i,j,j1; + int i,j; // static double aux2[NTERMS*NTERMS]; static PRECISION aux2[NTERMS]; int aux_nf,aux_nc; PRECISION factor,maximo,minimo; int posi,posj; - epsilon= 1e-15; + epsilon= 1e-12; top=1.0; factor=0; @@ -1139,29 +1404,45 @@ int mil_svd(PRECISION *h,PRECISION *beta,PRECISION *delta){ for(j=0;jmaximo){ + if(fabs(h[j])>maximo){ maximo=fabs(h[j]); } } factor=maximo; - + + //printf("maximo : %.12e \n",maximo); + //exit(-1); + + if(!NORMALIZATION_SVD) factor = 1; for(j=0;j epsilon) ? (1/waux[i]) : 0.0); + aux2[i]= aux2[i]*((fabs(waux[i]) > epsilon) ? (1/waux[i]): 0.0);//((waux[i]>0)?(1/epsilon):(-1/epsilon))); //(1/waux[i]) : 0);// +// aux2[i]= aux2[i]*((fabs(waux[i]) > epsilon) ? (1/waux[i]): (1/epsilon));//((waux[i]>0)?(1/epsilon):(-1/epsilon))); //(1/waux[i]) : 0);// } multmatrix(vaux,NTERMS,NTERMS,aux2,NTERMS,1,delta,&aux_nf,&aux_nc); +/* + printf("\n"); + printf("#################################### delta \n"); + int j1; + for(j1=0;j1gm < 0) model->gm = -(model->gm); if(model->gm > 180) model->gm =180-(((int)floor(model->gm) % 180)+(model->gm-floor(model->gm)));//180-((int)model->gm % 180);*/ - - - //Magnetic field + + //Magnetic field if(model->B < 0){ - model->B = -(model->B); - model->gm = 180.0 -(model->gm); - } + //model->B = 190; + model->B = -(model->B); + model->gm = 180.0 -(model->gm); + } if(model->B > 5000) - model->B= 5000; - + model->B= 5000; + + //Inclination if(model->gm < 0) model->gm = -(model->gm); - if(model->gm > 180) - model->gm = 360.0 - model->gm; + if(model->gm > 180){ + model->gm = 360.0 - model->gm; + // model->gm = 179; //360.0 - model->gm; + } //azimuth if(model->az < 0) - model->az= 180 + (model->az); + model->az= 180 + (model->az); //model->az= 180 + (model->az); if(model->az > 180){ model->az =model->az -180.0; + // model->az = 179.0; } //RANGOS //Eta0 if(model->eta0 < 0.1) model->eta0=0.1; - if(model->eta0 >8) - model->eta0=8; - + + // if(model->eta0 >8) + // model->eta0=8; + if(model->eta0 >2500) //idl 2500 + model->eta0=2500; + //velocity - if(model->vlos < (-5)) //20 - model->vlos= (-5); - if(model->vlos >5) - model->vlos=5; + if(model->vlos < (-20)) //20 + model->vlos= (-20); + if(model->vlos >20) + model->vlos=20; //doppler width ;Do NOT CHANGE THIS if(model->dopp < 0.0001) model->dopp = 0.0001; - - if(model->dopp > 0.800) - model->dopp = 0.800; - - if(model->aa < 0.00001) - model->aa = 0.00001; - if(model->aa > 10) + if(model->dopp > 1.6) // idl 0.6 + model->dopp = 1.6; + + + if(model->aa < 0.0001) // idl 1e-4 + model->aa = 0.0001; + if(model->aa > 10) //10 model->aa = 10; - + //S0 if(model->S0 < 0.0001) model->S0 = 0.0001; - if(model->S0 > 2.000) - model->S0 = 2.000; + if(model->S0 > 1.500) + model->S0 = 1.500; //S1 if(model->S1 < 0.0001) model->S1 = 0.0001; if(model->S1 > 2.000) model->S1 = 2.000; - + //macroturbulence if(model->mac < 0) model->mac = 0; if(model->mac > 4) model->mac = 4; - + //filling factor /* if(model->S1 < 0) model->S1 = 0; if(model->S1 > 1) model->S1 = 1;*/ - + return 1; } - - - - void spectral_synthesis_convolution(){ - + int i; int nlambda=NLAMBDA; - + //convolucionamos los perfiles IQUV (spectra) if(INSTRUMENTAL_CONVOLUTION){ - + PRECISION Ic; - - - + + if(!INSTRUMENTAL_CONVOLUTION_INTERPOLACION){ //convolucion de I Ic=spectra[nlambda-1]; - + for(i=0;i> milos NLAMBDA MAX_ITER CLASSICAL_ESTIMATES [FWHM DELTA NPOINTS] profiles_file.txt > output.txt +// +// NLAMBDA number of lambda of input profiles +// MAX_ITER of inversion +// CLASSICAL_ESTIMATES use classical estimates? 1 yes, 0 no +// [FWHM DELTA NPOINTS] use convolution with a gaussian? if the tree parameteres are defined yes, else no. Units in A. NPOINTS has to be odd. +// profiles_file.txt name of input profiles file +// output.txt name of output file +// +// + +#include "defines.h" + +#include "nrutil.h" +#include "svdcmp.c" +#include "svdcordic.c" +//#include "tridiagonal.c" +#include "convolution.c" + +#include + +float pythag(float a, float b); + +void weights_init(int nlambda,double *sigma,PRECISION *weight,int nweight,PRECISION **wOut,PRECISION **sigOut,double noise); + +int check(Init_Model *Model); +int lm_mils(Cuantic *cuantic,double * wlines,int nwlines,double *lambda,int nlambda,PRECISION *spectro,int nspectro, + Init_Model *initModel, PRECISION *spectra,int err,double *chisqrf, int *iterOut, + double slight, double toplim, int miter, PRECISION * weight,int nweight, int * fix, + PRECISION *sigma, double filter, double ilambda, double noise, double *pol, + double getshi,int triplete); + +int mil_svd(PRECISION *h,PRECISION *beta,PRECISION *delta); + +int multmatrixIDL(double *a,int naf,int nac, double *b,int nbf,int nbc,double **resultOut,int *fil,int *col); +int multmatrix_transposeD(double *a,int naf,int nac, double *b,int nbf,int nbc,double *result,int *fil,int *col); +int multmatrix3(PRECISION *a,int naf,int nac,double *b,int nbf,int nbc,double **result,int *fil,int *col); +double * leeVector(char *nombre,int tam); +double * transpose(double *mat,int fil,int col); + +double total(double * A, int f,int c); +int multmatrix(PRECISION *a,int naf,int nac, PRECISION *b,int nbf,int nbc,PRECISION *result,int *fil,int *col); +int multmatrix2(double *a,int naf,int nac, PRECISION *b,int nbf,int nbc,double **result,int *fil,int *col); + +int covarm(PRECISION *w,PRECISION *sig,int nsig,PRECISION *spectro,int nspectro,PRECISION *spectra,PRECISION *d_spectra, + PRECISION *beta,PRECISION *alpha); + +int CalculaNfree(PRECISION *spectro,int nspectro); + +double fchisqr(PRECISION * spectra,int nspectro,PRECISION *spectro,PRECISION *w,PRECISION *sig,double nfree); + +void AplicaDelta(Init_Model *model,PRECISION * delta,int * fixed,Init_Model *modelout); +void FijaACeroDerivadasNoNecesarias(PRECISION * d_spectra,int *fixed,int nlambda); +void reformarVector(PRECISION **spectro,int neje); +void spectral_synthesis_convolution(); +void response_functions_convolution(); + +void estimacionesClasicas(PRECISION lambda_0,double *lambda,int nlambda,PRECISION *spectro,Init_Model *initModel); + +#define tiempo(ciclos) asm volatile ("rdtsc \n\t":"=A"(ciclos)) +long long int c1,c2,cd,semi,c1a,c2a,cda; //variables de 64 bits para leer ciclos de reloj +long long int c1total,c2total,cdtotal,ctritotal; + + +Cuantic* cuantic; // Variable global, está hecho así, de momento,para parecerse al original +char * concatena(char *a, int n,char*b); + +PRECISION ** PUNTEROS_CALCULOS_COMPARTIDOS; +int POSW_PUNTERO_CALCULOS_COMPARTIDOS; +int POSR_PUNTERO_CALCULOS_COMPARTIDOS; + +PRECISION * gp1,*gp2,*dt,*dti,*gp3,*gp4,*gp5,*gp6,*etai_2; + +//PRECISION gp4_gp2_rhoq[NLAMBDA],gp5_gp2_rhou[NLAMBDA],gp6_gp2_rhov[NLAMBDA]; +PRECISION *gp4_gp2_rhoq,*gp5_gp2_rhou,*gp6_gp2_rhov; + + +PRECISION *dgp1,*dgp2,*dgp3,*dgp4,*dgp5,*dgp6,*d_dt; +PRECISION * d_ei,*d_eq,*d_eu,*d_ev,*d_rq,*d_ru,*d_rv; +PRECISION *dfi,*dshi; +PRECISION CC,CC_2,sin_gm,azi_2,sinis,cosis,cosis_2,cosi,sina,cosa,sinda,cosda,sindi,cosdi,sinis_cosa,sinis_sina; +PRECISION *fi_p,*fi_b,*fi_r,*shi_p,*shi_b,*shi_r; +PRECISION *etain,*etaqn,*etaun,*etavn,*rhoqn,*rhoun,*rhovn; +PRECISION *etai,*etaq,*etau,*etav,*rhoq,*rhou,*rhov; +PRECISION *parcial1,*parcial2,*parcial3; +PRECISION *nubB,*nupB,*nurB; +PRECISION **uuGlobalInicial; +PRECISION **HGlobalInicial; +PRECISION **FGlobalInicial; +PRECISION *perfil_instrumental; +PRECISION * G; +int FGlobal,HGlobal,uuGlobal; + +PRECISION *d_spectra,*spectra; + +//Number of lambdas in the input profiles +int NLAMBDA = 0; + +//Convolutions values +int NMUESTRAS_G = 0; +PRECISION FWHM = 0; +PRECISION DELTA = 0; + +int INSTRUMENTAL_CONVOLUTION = 0; +int CLASSICAL_ESTIMATES = 0; +int RFS = 0; + + +int main(int argc,char **argv){ + + double * wlines; + int nwlines; + double *lambda; + int nlambda; + PRECISION *spectro; + int ny,i,j; + Init_Model initModel; + int err; + double chisqrf; + int iter; + double slight; + double toplim; + int miter; + PRECISION weight[4]={1.,1.,1.,1.}; + int nweight; + + // CONFIGURACION DE PARAMETROS A INVERTIR + //INIT_MODEL=[eta0,magnet,vlos,landadopp,aa,gamma,azi,B1,B2,macro,alfa] + int fix[]={1.,1.,1.,1.,1.,1.,1.,1.,1.,0.,0.}; //Parametros invertidos + //---------------------------------------------- + + double sigma[NPARMS]; + double vsig; + double filter; + double ilambda; + double noise; + double *pol; + double getshi; + + double dat[7]={CUANTIC_NWL,CUANTIC_SLOI,CUANTIC_LLOI,CUANTIC_JLOI,CUANTIC_SUPI,CUANTIC_LUPI,CUANTIC_JUPI}; + + char *nombre,*input_iter; + int Max_iter; + + //milos NLAMBDA MAX_ITER CLASSICAL_ESTIMATES RFS [FWHM DELTA NPOINTS] perfil.txt + + if(argc!=4 && argc!=5 && argc !=8 ){ // changed argc !=8 to !=9 (for RF output) + printf("milos: Error en el numero de parametros: %d .\n Pruebe: milos NLAMBDA MAX_ITER CLASSICAL_ESTIMATES RFS [FWHM(in A) DELTA(in A) NPOINTS] perfil.txt\n",argc); + return -1; + } + + NLAMBDA = atoi(argv[1]); + + input_iter = argv[2]; + Max_iter = atoi(input_iter); + CLASSICAL_ESTIMATES = atoi(argv[3]); + + if(CLASSICAL_ESTIMATES!=0 && CLASSICAL_ESTIMATES != 1){ + printf("milos: Error in CLASSICAL_ESTIMATES parameter. 0 or 1 are valid values. Not accepted: \n",CLASSICAL_ESTIMATES); + return -1; + } + + if(argc ==5){ + nombre = argv[4]; + } + else if(argc == 6) { //DOS + RFS = 1; + } + else { + INSTRUMENTAL_CONVOLUTION = 1; + NMUESTRAS_G = atoi(argv[6]); + FWHM = atof(argv[4]); + DELTA = atof(argv[5]); + nombre = argv[7]; + } + + + //Generamos la gaussiana -> perfil instrumental + if(INSTRUMENTAL_CONVOLUTION){ + G=vgauss(FWHM,NMUESTRAS_G,DELTA); + + //if you wish to convolution with other instrumental profile you have to declare here and to asign it to "G" + } + + + cuantic=create_cuantic(dat); + Inicializar_Puntero_Calculos_Compartidos(); + + toplim=1e-18; + + CC=PI/180.0; + CC_2=CC*2; + + filter=0; + getshi=0; + nweight=4; + + nwlines=1; + wlines=(double*) calloc(2,sizeof(double)); + wlines[0]=1; + wlines[1]= CENTRAL_WL; + + vsig=NOISE_SIGMA; //original 0.001 + sigma[0]=vsig; + sigma[1]=vsig; + sigma[2]=vsig; + sigma[3]=vsig; + pol=NULL; + + noise=NOISE_SIGMA; + ilambda=ILAMBDA; + iter=0; + miter=Max_iter; + + nlambda=NLAMBDA; + lambda=calloc(nlambda,sizeof(double)); + spectro=calloc(nlambda*4,sizeof(PRECISION)); + + FILE *fichero, *fichero_estimacioneClasicas; + + fichero= fopen(nombre,"r"); + if(fichero==NULL){ + printf("Error de apertura, es posible que el fichero no exista.\n"); + printf("Milos: Error de lectura del fichero. ++++++++++++++++++\n"); + return 1; + } + + char * buf; + buf=calloc(strlen(nombre)+15,sizeof(char)); + buf = strcat(buf,nombre); + buf = strcat(buf,"_CL_ESTIMATES"); + + if(CLASSICAL_ESTIMATES){ + fichero_estimacioneClasicas= fopen(buf,"w"); + if(fichero_estimacioneClasicas==NULL){ + printf("Error de apertura ESTIMACIONES CLASICAS, es posible que el fichero no exista.\n"); + printf("Milos: Error de lectura del fichero. ++++++++++++++++++\n"); + return 1; + } + } + int neje; + double lin; + double iin,qin,uin,vin; + int rfscanf; + int contador; + + int totalIter=0; + + contador=0; + + ReservarMemoriaSinteisisDerivadas(nlambda); + + //initializing weights + PRECISION *w,*sig; + weights_init(nlambda,sigma,weight,nweight,&w,&sig,noise); + + c2total=0; + ctritotal=0; + + int nsub,indaux; + indaux=0; + + + + do{ + neje=0; + nsub=0; + while (nejegms de initmodel + nfree=CalculaNfree(spectro,nspectro); + //printf("\n nfree! %d:\n",nfree); + //exit(-1); + + + if(nfree==0){ + return -1; //'NOT ENOUGH POINTS' + } + + flambda=ilambda; + + if(fix==NULL){ + fixed=calloc(NTERMS,sizeof(double)); + for(i=0;iGEFF); + beta_B = 1 / C; + + Blos = beta_B * ((LM_lambda_plus - LM_lambda_minus)/2); + Vlos = ( VLIGHT / lambda_0) * ((LM_lambda_plus + LM_lambda_minus)/2); + + + + Blos=Blos ; //factor de correción x campo debil + Vlos = Vlos ; //factor de correción ... + + //inclinacion + x = 0; + y = 0; + for(i=0;i 0 && spectroQ[muestra] > 0 ) + phi=phi; + else + if (spectroU[muestra] < 0 && spectroQ[muestra] > 0 ) + phi=phi + 180; + else + if (spectroU[muestra] < 0 && spectroQ[muestra] < 0 ) + phi=phi + 90; + else + if (spectroU[muestra] > 0 && spectroQ[muestra]< 0 ) + phi=phi + 90; + + + + //printf("Blos : %f \n",Blos); + //printf("vlos : %f \n",Vlos); + //printf("gamma : %f \n",gamma); + //printf("phi : %f \n",phi); + + + PRECISION B_aux; + + B_aux = fabs(Blos/cos(gamma_rad));//*2; + + + if(Vlos < (-5)) + Vlos= -5; + if(Vlos >(5)) + Vlos=(5); + + if(phi< 0) + phi = 180 + (phi); + if(phi > 180){ + phi = phi -180.0; + } + + initModel->B = (B_aux>4000?4000:B_aux); + initModel->vlos=Vlos;//(Vlos*1.5);//1.5; + initModel->gm=gamma; + initModel->az=phi; + +} + +void FijaACeroDerivadasNoNecesarias(PRECISION * d_spectra,int *fixed,int nlambda){ + + int In,j,i; + for(In=0;Ineta0=model->eta0-delta[0]; // 0 + } + if(fixed[1]){ + if(delta[1]< -800) //300 + delta[1]=-800; + else + if(delta[1] >800) + delta[1]=800; + modelout->B=model->B-delta[1];//magnetic field + } + if(fixed[2]){ + + if(delta[2]>5) + delta[2] = 5; + + if(delta[2]<-5) + delta[2] = -5; + + modelout->vlos=model->vlos-delta[2]; + } + if(fixed[3]) + modelout->dopp=model->dopp-delta[3]; + if(fixed[4]) + modelout->aa=model->aa-delta[4]; + if(fixed[5]){ + if(delta[5]< -45) //15 + delta[5]=-45; + else + if(delta[5] > 45) + delta[5]=45; + + modelout->gm=model->gm-delta[5]; //5 + } + if(fixed[6]){ + if(delta[6]< -15) + delta[6]=-15; + else + if(delta[6] > 15) + delta[6]=15; + + modelout->az=model->az-delta[6]; + } + if(fixed[7]) + modelout->S0=model->S0-delta[7]; + if(fixed[8]) + modelout->S1=model->S1-delta[8]; + if(fixed[9]) + modelout->mac=model->mac-delta[9]; //9 + if(fixed[10]) + modelout->alfa=model->alfa-delta[10]; +} + +/* + Tamaño de H es NTERMS x NTERMS + Tamaño de beta es 1xNTERMS + + return en delta tam 1xNTERMS +*/ + +int mil_svd(PRECISION *h,PRECISION *beta,PRECISION *delta){ + + double epsilon,top; + static PRECISION v2[TAMANIO_SVD][TAMANIO_SVD],w2[TAMANIO_SVD],v[NTERMS*NTERMS],w[NTERMS]; + static PRECISION h1[NTERMS*NTERMS],h_svd[TAMANIO_SVD*TAMANIO_SVD]; + static PRECISION aux[NTERMS*NTERMS]; + int i,j,j1; +// static double aux2[NTERMS*NTERMS]; + static PRECISION aux2[NTERMS]; + int aux_nf,aux_nc; + PRECISION factor,maximo,minimo; + int posi,posj; + + epsilon= 1e-15; + top=1.0; + + factor=0; + maximo=0; + minimo=1000000000; + + + /**/ + for(j=0;jmaximo){ + maximo=fabs(h[j]); + } + } + + factor=maximo; + + if(!NORMALIZATION_SVD) + factor = 1; + + for(j=0;j epsilon) ? (1/waux[i]) : 0.0); + } + + multmatrix(vaux,NTERMS,NTERMS,aux2,NTERMS,1,delta,&aux_nf,&aux_nc); + + return 1; + +} + + + +void weights_init(int nlambda,double *sigma,PRECISION *weight,int nweight,PRECISION **wOut,PRECISION **sigOut,double noise) +{ + int i,j; + PRECISION *w,*sig; + + + sig=calloc(4,sizeof(PRECISION)); + if(sigma==NULL){ + for(i=0;i<4;i++) + sig[i]= noise* noise; + } + else{ + + for(i=0;i<4;i++) + sig[i]= (*sigma);// * (*sigma); + } + + *wOut=w; + *sigOut=sig; + +} + + +int check(Init_Model *model){ + + double offset=0; + double inter; + + + + + //Inclination + /* if(model->gm < 0) + model->gm = -(model->gm); + if(model->gm > 180) + model->gm =180-(((int)floor(model->gm) % 180)+(model->gm-floor(model->gm)));//180-((int)model->gm % 180);*/ + + + //Magnetic field + if(model->B < 0){ + model->B = -(model->B); + model->gm = 180.0 -(model->gm); + } + if(model->B > 5000) + model->B= 5000; + + if(model->gm < 0) + model->gm = -(model->gm); + if(model->gm > 180) + model->gm = 360.0 - model->gm; + + //azimuth + if(model->az < 0) + model->az= 180 + (model->az); + if(model->az > 180){ + model->az =model->az -180.0; + } + + //RANGOS + //Eta0 + if(model->eta0 < 0.1) + model->eta0=0.1; + if(model->eta0 >8) + model->eta0=8; + + //velocity + if(model->vlos < (-5)) //20 + model->vlos= (-5); + if(model->vlos >5) + model->vlos=5; + + //doppler width ;Do NOT CHANGE THIS + if(model->dopp < 0.0001) + model->dopp = 0.0001; + + if(model->dopp > 0.800) + model->dopp = 0.800; + + + if(model->aa < 0.00001) + model->aa = 0.00001; + if(model->aa > 10) + model->aa = 10; + + //S0 + if(model->S0 < 0.0001) + model->S0 = 0.0001; + if(model->S0 > 2.000) + model->S0 = 2.000; + + //S1 + if(model->S1 < 0.0001) + model->S1 = 0.0001; + if(model->S1 > 2.000) + model->S1 = 2.000; + + //macroturbulence + if(model->mac < 0) + model->mac = 0; + if(model->mac > 4) + model->mac = 4; + + //filling factor +/* if(model->S1 < 0) + model->S1 = 0; + if(model->S1 > 1) + model->S1 = 1;*/ + + return 1; +} + + + + + +void spectral_synthesis_convolution(){ + + int i; + int nlambda=NLAMBDA; + + //convolucionamos los perfiles IQUV (spectra) + if(INSTRUMENTAL_CONVOLUTION){ + + PRECISION Ic; + + + + if(!INSTRUMENTAL_CONVOLUTION_INTERPOLACION){ + //convolucion de I + Ic=spectra[nlambda-1]; + + for(i=0;i> milos NLAMBDA MAX_ITER CLASSICAL_ESTIMATES [FWHM DELTA NPOINTS] profiles_file.txt > output.txt +// +// NLAMBDA number of lambda of input profiles +// MAX_ITER of inversion +// CLASSICAL_ESTIMATES use classical estimates? 1 yes, 0 no +// [FWHM DELTA NPOINTS] use convolution with a gaussian? if the tree parameteres are defined yes, else no. Units in A. NPOINTS has to be odd. +// profiles_file.txt name of input profiles file +// output.txt name of output file +// +// + +#include +#include "defines.h" + +#include "nrutil.h" +#include "svdcmp.c" +#include "svdcordic.c" +//#include "tridiagonal.c" +#include "convolution.c" + +#include + +float pythag(float a, float b); + +void weights_init(int nlambda,double *sigma,PRECISION *weight,int nweight,PRECISION **wOut,PRECISION **sigOut,double noise); + +int check(Init_Model *Model); +int lm_mils(Cuantic *cuantic,double * wlines,int nwlines,double *lambda,int nlambda,PRECISION *spectro,int nspectro, + Init_Model *initModel, PRECISION *spectra,int err,double *chisqrf, int *iterOut, + double slight, double toplim, int miter, PRECISION * weight,int nweight, int * fix, + PRECISION *sigma, double filter, double ilambda, double noise, double *pol, + double getshi,int triplete); + +int mil_svd(PRECISION *h,PRECISION *beta,PRECISION *delta); + +int multmatrixIDL(double *a,int naf,int nac, double *b,int nbf,int nbc,double **resultOut,int *fil,int *col); +int multmatrix_transposeD(double *a,int naf,int nac, double *b,int nbf,int nbc,double *result,int *fil,int *col); +int multmatrix3(PRECISION *a,int naf,int nac,double *b,int nbf,int nbc,double **result,int *fil,int *col); +double * leeVector(char *nombre,int tam); +double * transpose(double *mat,int fil,int col); + +double total(double * A, int f,int c); +int multmatrix(PRECISION *a,int naf,int nac, PRECISION *b,int nbf,int nbc,PRECISION *result,int *fil,int *col); +int multmatrix2(double *a,int naf,int nac, PRECISION *b,int nbf,int nbc,double **result,int *fil,int *col); + +int covarm(PRECISION *w,PRECISION *sig,int nsig,PRECISION *spectro,int nspectro,PRECISION *spectra,PRECISION *d_spectra, + PRECISION *beta,PRECISION *alpha); + +int CalculaNfree(PRECISION *spectro,int nspectro); + +double fchisqr(PRECISION * spectra,int nspectro,PRECISION *spectro,PRECISION *w,PRECISION *sig,double nfree); + +void AplicaDelta(Init_Model *model,PRECISION * delta,int * fixed,Init_Model *modelout); +void FijaACeroDerivadasNoNecesarias(PRECISION * d_spectra,int *fixed,int nlambda); +void reformarVector(PRECISION **spectro,int neje); +void spectral_synthesis_convolution(); +void response_functions_convolution(); + +void estimacionesClasicas(PRECISION lambda_0,double *lambda,int nlambda,PRECISION *spectro,Init_Model *initModel); + +#define tiempo(ciclos) asm volatile ("rdtsc \n\t":"=A"(ciclos)) +long long int c1,c2,cd,semi,c1a,c2a,cda; //variables de 64 bits para leer ciclos de reloj +long long int c1total,c2total,cdtotal,ctritotal; + + +Cuantic* cuantic; // Variable global, está hecho así, de momento,para parecerse al original +char * concatena(char *a, int n,char*b); + +PRECISION ** PUNTEROS_CALCULOS_COMPARTIDOS; +int POSW_PUNTERO_CALCULOS_COMPARTIDOS; +int POSR_PUNTERO_CALCULOS_COMPARTIDOS; + +PRECISION * gp1,*gp2,*dt,*dti,*gp3,*gp4,*gp5,*gp6,*etai_2; + +//PRECISION gp4_gp2_rhoq[NLAMBDA],gp5_gp2_rhou[NLAMBDA],gp6_gp2_rhov[NLAMBDA]; +PRECISION *gp4_gp2_rhoq,*gp5_gp2_rhou,*gp6_gp2_rhov; + + +PRECISION *dgp1,*dgp2,*dgp3,*dgp4,*dgp5,*dgp6,*d_dt; +PRECISION * d_ei,*d_eq,*d_eu,*d_ev,*d_rq,*d_ru,*d_rv; +PRECISION *dfi,*dshi; +PRECISION CC,CC_2,sin_gm,azi_2,sinis,cosis,cosis_2,cosi,sina,cosa,sinda,cosda,sindi,cosdi,sinis_cosa,sinis_sina; +PRECISION *fi_p,*fi_b,*fi_r,*shi_p,*shi_b,*shi_r; +PRECISION *etain,*etaqn,*etaun,*etavn,*rhoqn,*rhoun,*rhovn; +PRECISION *etai,*etaq,*etau,*etav,*rhoq,*rhou,*rhov; +PRECISION *parcial1,*parcial2,*parcial3; +PRECISION *nubB,*nupB,*nurB; +PRECISION **uuGlobalInicial; +PRECISION **HGlobalInicial; +PRECISION **FGlobalInicial; +PRECISION *perfil_instrumental; +PRECISION * G; +int FGlobal,HGlobal,uuGlobal; + +PRECISION *d_spectra,*spectra; + +//Number of lambdas in the input profiles +int NLAMBDA = 0; + +//Convolutions values +int NMUESTRAS_G = 0; +PRECISION FWHM = 0; +PRECISION DELTA = 0; + +int INSTRUMENTAL_CONVOLUTION = 0; +int INSTRUMENTAL_CONVOLUTION_WITH_PSF = 0; +int CLASSICAL_ESTIMATES = 0; +int RFS = 0; + +const PRECISION crisp_psf[141] = {0.0004,0.0004,0.0005,0.0005,0.0005,0.0005,0.0006,0.0006,0.0006,0.0007,0.0007,0.0008,0.0008,0.0009,0.0009,0.0010,0.0010,0.0011,0.0012,0.0012,0.0013,0.0014, + 0.0015,0.0016,0.0017,0.0019,0.0020,0.0021,0.0023,0.0025,0.0027,0.0029,0.0031,0.0034,0.0037,0.0040,0.0044,0.0048,0.0052,0.0057,0.0062,0.0069,0.0076,0.0083, + 0.0092,0.0102,0.0114,0.0127,0.0142,0.0159,0.0178,0.0202,0.0229,0.0261,0.0299,0.0343,0.0398,0.0465,0.0546,0.0645,0.0765,0.0918,0.1113,0.1363,0.1678,0.2066, + 0.2501,0.2932,0.3306,0.3569,0.3669,0.3569,0.3306,0.2932,0.2501,0.2066,0.1678,0.1363,0.1113,0.0918,0.0765,0.0645,0.0546,0.0465,0.0398,0.0343,0.0299,0.0261, + 0.0229,0.0202,0.0178,0.0159,0.0142,0.0127,0.0114,0.0102,0.0092,0.0083,0.0076,0.0069,0.0062,0.0057,0.0052,0.0048,0.0044,0.0040,0.0037,0.0034,0.0031,0.0029, + 0.0027,0.0025,0.0023,0.0021,0.0020,0.0019,0.0017,0.0016,0.0015,0.0014,0.0013,0.0012,0.0012,0.0011,0.0010,0.0010,0.0009,0.0009,0.0008,0.0008,0.0007,0.0007, + 0.0006,0.0006,0.0006,0.0005,0.0005,0.0005,0.0005,0.0004,0.0004}; + + + + +int main(int argc,char **argv){ + + double * wlines; + int nwlines; + double *lambda; + int nlambda; + PRECISION *spectro; + int ny,i,j; + Init_Model initModel; + int err; + double chisqrf; + int iter; + double slight; + double toplim; + int miter; + PRECISION weight[4]={1.,1.,1.,1.}; + int nweight; + + clock_t t_ini, t_fin; + double secs, total_secs; + + + // CONFIGURACION DE PARAMETROS A INVERTIR + //INIT_MODEL=[eta0,magnet,vlos,landadopp,aa,gamma,azi,B1,B2,macro,alfa] + int fix[]={1.,1.,1.,1.,1.,1.,1.,1.,1.,0.,0.}; //Parametros invertidos + //---------------------------------------------- + + double sigma[NPARMS]; + double vsig; + double filter; + double ilambda; + double noise; + double *pol; + double getshi; + + double dat[7]={CUANTIC_NWL,CUANTIC_SLOI,CUANTIC_LLOI,CUANTIC_JLOI,CUANTIC_SUPI,CUANTIC_LUPI,CUANTIC_JUPI}; + + char *nombre,*input_iter; + int Max_iter; + + //milos NLAMBDA MAX_ITER CLASSICAL_ESTIMATES RFS [FWHM DELTA NPOINTS] perfil.txt + + if(argc!=6 && argc != 7 && argc !=9){ + printf("milos: Error en el numero de parametros: %d .\n Pruebe: milos NLAMBDA MAX_ITER CLASSICAL_ESTIMATES RFS [FWHM(in A) DELTA(in A) NPOINTS] perfil.txt\n",argc); + printf("O bien: milos NLAMBDA MAX_ITER CLASSICAL_ESTIMATES RFS [DELTA(in A)] perfil.txt\n --> for using stored CRISP's PSF\n"); + printf("Note : CLASSICAL_ESTIMATES=> 0: Disabled, 1: Enabled, 2: Only Classical Estimates.\n"); + printf("RFS : 0: Disabled 1: Synthesis 2: Synthesis and Response Functions\n"); + printf("Note when RFS>0: perfil.txt is considered as models.txt. \n"); + + return -1; + } + + NLAMBDA = atoi(argv[1]); + + input_iter = argv[2]; + Max_iter = atoi(input_iter); + CLASSICAL_ESTIMATES = atoi(argv[3]); + RFS = atoi(argv[4]); + + if(CLASSICAL_ESTIMATES!=0 && CLASSICAL_ESTIMATES != 1 && CLASSICAL_ESTIMATES != 2){ + printf("milos: Error in CLASSICAL_ESTIMATES parameter. [0,1,2] are valid values. Not accepted: %d\n",CLASSICAL_ESTIMATES); + return -1; + } + + if(RFS != 0 && RFS != 1 && RFS != 2){ + printf("milos: Error in RFS parameter. [0,1,2] are valid values. Not accepted: %d\n",RFS); + return -1; + } + + if(argc ==6){ //if no filter declared + nombre = argv[5]; //input file name + } + else{ + INSTRUMENTAL_CONVOLUTION = 1; + if(argc ==7){ + INSTRUMENTAL_CONVOLUTION_WITH_PSF = 1; + DELTA = atof(argv[5]); + nombre = argv[6]; + FWHM = 0.035; + } + else{ + INSTRUMENTAL_CONVOLUTION_WITH_PSF = 0; + FWHM = atof(argv[5]); + DELTA = atof(argv[6]); + NMUESTRAS_G = atoi(argv[7]); + nombre = argv[8]; + } + } + + + nlambda=NLAMBDA; + //Generamos la gaussiana -> perfil instrumental + + + + if(INSTRUMENTAL_CONVOLUTION){ + G=vgauss(FWHM,NMUESTRAS_G,DELTA); + + + if(INSTRUMENTAL_CONVOLUTION_WITH_PSF){ + + free(G); + NMUESTRAS_G = 17; + + G=vgauss(FWHM,NMUESTRAS_G,DELTA); //solo para reservar memoria + + int kk; + PRECISION sum =0; + for(kk=0;kk 140) //140 length de crisp_psf + G[kk]=0; + else + G[kk] = crisp_psf[pos]; //70 es el centro de psf + + sum += G[kk]; + } + + for(kk=0;kk0.25?(lambda[nlambda-1] * 0.35):0.1); //David 0I.3 y Ic*0.7 + // initModel.S1 = (lambda[nlambda-1]>0.25?(lambda[nlambda-1] * 0.65):0.25); + + } + + //inversion + if(CLASSICAL_ESTIMATES!=2 || RFS) + lm_mils(cuantic,wlines,nwlines,lambda, nlambda,spectro,nlambda,&initModel,spectra,err,&chisqrf,&iter,slight,toplim,miter, + weight,nweight,fix,sig,filter,ilambda,noise,pol,getshi,0); + + + secs = (double)(t_fin - t_ini) / CLOCKS_PER_SEC; + //printf("\n\n%.16g milisegundos\n", secs * 1000.0); + + total_secs += secs; + + totalIter+=iter; + + //chisqrf_array[contador]=chisqrf; + + //[contador;iter;B;GM;AZI;etha0;lambdadopp;aa;vlos;S0;S1;final_chisqr]; + printf("%d\n",contador); + printf("%d\n",iter); + printf("%f\n",initModel.B); + printf("%f\n",initModel.gm); + printf("%f\n",initModel.az); + printf("%f \n",initModel.eta0); + printf("%f\n",initModel.dopp); + printf("%f\n",initModel.aa); + printf("%f\n",initModel.vlos); //km/s + //printf("alfa \t:%f\n",initModel.alfa); //stay light factor + printf("%f\n",initModel.S0); + printf("%f\n",initModel.S1); + printf("%.10e\n",chisqrf); + + /* + //For debugging: escribe tambien los perfiles + int kk; + for(kk=0;kkgms de initmodel + nfree=CalculaNfree(spectro,nspectro); + //printf("\n nfree! %d:\n",nfree); + //exit(-1); + + + if(nfree==0){ + return -1; //'NOT ENOUGH POINTS' + } + + flambda=ilambda; + + if(fix==NULL){ + fixed=calloc(NTERMS,sizeof(double)); + for(i=0;iB); + printf("%f\n",initModel->gm); + printf("%f\n",initModel->az); + printf("%f \n",initModel->eta0); + printf("%f\n",initModel->dopp); + printf("%f\n",initModel->aa); + printf("%f\n",initModel->vlos); //km/s + //printf("alfa \t:%f\n",initModel.alfa); //stay light factor + printf("%f\n",initModel->S0); + printf("%f\n",initModel->S1); + printf("%.10e\n",ochisqr); +*/ + + }while(iter<=miter); // && !clanda); + + *iterOut=iter; + + *chisqrf=ochisqr; + + + + if(fix==NULL) + free(fixed); + + + return 1; +} + +int CalculaNfree(PRECISION *spectro,int nspectro){ + int nfree,i,j; + nfree=0; + + /* + for(j=0;j<4*nspectro;j++){ + if(spectro[j]!=0.0){ + nfree++; + } + } + nfree=nfree-NTERMS;//NTERMS; + */ + + nfree = (nspectro*NPARMS) - NTERMS; + + + return nfree; +} + + +/* +* +* +* Cálculo de las estimaciones clásicas. +* +* +* lambda_0 : centro de la línea +* lambda : vector de muestras +* nlambda : numero de muesras +* spectro : vector [I,Q,U,V] +* initModel: Modelo de atmosfera a ser modificado +* +* +* +* @Author: Juan Pedro Cobos Carrascosa (IAA-CSIC) +* jpedro@iaa.es +* @Date: Nov. 2011 +* +*/ +void estimacionesClasicas(PRECISION lambda_0,double *lambda,int nlambda,PRECISION *spectro,Init_Model *initModel){ + + PRECISION x,y,aux,LM_lambda_plus,LM_lambda_minus,Blos,beta_B,Ic,Vlos; + PRECISION *spectroI,*spectroQ,*spectroU,*spectroV; + PRECISION L,m,gamma, gamma_rad,tan_gamma,maxV,minV,C,maxWh,minWh; + int i,j; + + spectroI=spectro; + spectroQ=spectro+nlambda; + spectroU=spectro+nlambda*2; + spectroV=spectro+nlambda*3; + + Ic= spectro[nlambda-1]; // Continuo ultimo valor de I + + + x=0; + y=0; + for(i=0;iGEFF); + beta_B = 1 / C; + + Blos = beta_B * ((LM_lambda_plus - LM_lambda_minus)/2); + Vlos = ( VLIGHT / lambda_0) * ((LM_lambda_plus + LM_lambda_minus)/2); + + + + Blos=Blos ; //factor de correción x campo debil + Vlos = Vlos ; //factor de correción ... + + //inclinacion + x = 0; + y = 0; + for(i=0;i 1e-5 ? x : 0; + y = fabs(y) > 1e- ? y : 0; + + tan_gamma = fabs(sqrtf(x/y)); + + tan_gamma = fabs(tan_gamma) > 1e-5 ? tan_gamma : 0; + + gamma_rad = atanf(tan_gamma); //gamma en radianes + + gamma_rad = fabs(gamma_rad) > 1e-5 ? gamma_rad : 0; + + gamma = gamma_rad * (180/ PI); //gamma en grados + + + //correccion + //utilizamos el signo de Blos para ver corregir el cuadrante + if (Blos<0) + gamma = 180-gamma; + + + + //azimuth + + PRECISION tan2phi,phi; + int muestra; + + if(nlambda==6) + muestra = CLASSICAL_ESTIMATES_SAMPLE_REF; + else + muestra = nlambda*0.5; + + + tan2phi=spectroU[muestra]/spectroQ[muestra]; + + // printf("tan2phi : %f \n",tan2phi); + // printf("%.10e \n",spectroU[muestra]); + // printf("%.10e \n",spectroQ[muestra]); + + + phi= (atan(tan2phi)*180/PI) / 2; //atan con paso a grados + + //printf("atan : %f \n",phi*2); + + // printf("%.10e \n",atan(tan2phi)); + + if(spectroU[muestra] > 0 && spectroQ[muestra] > 0 ) + phi=phi; + else + if (spectroU[muestra] < 0 && spectroQ[muestra] > 0 ) + phi=phi + 180; + else + if (spectroU[muestra] < 0 && spectroQ[muestra] < 0 ) + phi=phi + 90; + else + if (spectroU[muestra] > 0 && spectroQ[muestra]< 0 ) + phi=phi + 90; + + // printf("%.10e \n",phi); + + //printf("Blos : %f \n",Blos); + //printf("vlos : %f \n",Vlos); + //printf("gamma : %f \n",gamma); + //printf("phi : %f \n",phi); + + + PRECISION B_aux; + + B_aux = fabs(Blos/cos(gamma_rad))*1.5; + + //Vlos = Vlos * 1.5; + if(Vlos < (-5)) + Vlos= -5; + if(Vlos >(5)) + Vlos=(5); + + if(phi< 0) + phi = 180 + (phi); + if(phi > 180){ + phi = phi -180.0; + } + + // printf("%.10e \n",phi); + + initModel->B = (B_aux>4000?4000:B_aux); + initModel->vlos=Vlos;//(Vlos*1.5);//1.5; + initModel->gm=gamma; + initModel->az=phi; + initModel->S0= Blos; + +} + +void FijaACeroDerivadasNoNecesarias(PRECISION * d_spectra,int *fixed,int nlambda){ + + int In,j,i; + for(In=0;Ineta0=model->eta0-delta[0]; // 0 + } + if(fixed[1]){ + if(delta[1]< -800) //300 + delta[1]=-800; + else + if(delta[1] >800) + delta[1]=800; + modelout->B=model->B-delta[1];//magnetic field + } + if(fixed[2]){ + + if(delta[2]>2) + delta[2] = 2; + + if(delta[2]<-2) + delta[2] = -2; + + modelout->vlos=model->vlos-delta[2]; + } + + if(fixed[3]){ + + // if(delta[3]>1e-2) + // delta[3] = 1e-2; + // else + // if(delta[3]<-1e-2) + // delta[3] = -1e-2; + + modelout->dopp=model->dopp-delta[3]; + } + + if(fixed[4]) + modelout->aa=model->aa-delta[4]; + + if(fixed[5]){ + if(delta[5]< -30) //15 + delta[5]=-30; + else + if(delta[5] > 30) + delta[5]=30; + + modelout->gm=model->gm-delta[5]; //5 + } + if(fixed[6]){ + if(delta[6]< -30) + delta[6]=-30; + else + if(delta[6] > 30) + delta[6]= 30; + + modelout->az=model->az-delta[6]; + } + if(fixed[7]) + modelout->S0=model->S0-delta[7]; + if(fixed[8]) + modelout->S1=model->S1-delta[8]; + if(fixed[9]) + modelout->mac=model->mac-delta[9]; //9 + if(fixed[10]) + modelout->alfa=model->alfa-delta[10]; +} + +/* + Tamaño de H es NTERMS x NTERMS + Tamaño de beta es 1xNTERMS + + return en delta tam 1xNTERMS +*/ + +int mil_svd(PRECISION *h,PRECISION *beta,PRECISION *delta){ + + double epsilon,top; + static PRECISION v2[TAMANIO_SVD][TAMANIO_SVD],w2[TAMANIO_SVD],v[NTERMS*NTERMS],w[NTERMS]; + static PRECISION h1[NTERMS*NTERMS],h_svd[TAMANIO_SVD*TAMANIO_SVD]; + static PRECISION aux[NTERMS*NTERMS]; + int i,j; +// static double aux2[NTERMS*NTERMS]; + static PRECISION aux2[NTERMS]; + int aux_nf,aux_nc; + PRECISION factor,maximo,minimo; + int posi,posj; + + epsilon= 1e-12; + top=1.0; + + factor=0; + maximo=0; + minimo=1000000000; + + + /**/ + for(j=0;jmaximo){ + maximo=fabs(h[j]); + } + } + + factor=maximo; + + //printf("maximo : %.12e \n",maximo); + //exit(-1); + + + if(!NORMALIZATION_SVD) + factor = 1; + + for(j=0;j epsilon) ? (1/waux[i]): (1/epsilon));//((waux[i]>0)?(1/epsilon):(-1/epsilon))); //(1/waux[i]) : 0);// + } + + multmatrix(vaux,NTERMS,NTERMS,aux2,NTERMS,1,delta,&aux_nf,&aux_nc); + +/* + printf("\n"); + printf("#################################### delta \n"); + int j1; + for(j1=0;j1gm < 0) + model->gm = -(model->gm); + if(model->gm > 180) + model->gm =180-(((int)floor(model->gm) % 180)+(model->gm-floor(model->gm)));//180-((int)model->gm % 180);*/ + + //Magnetic field + if(model->B < 0){ + //model->B = 190; + model->B = -(model->B); + // model->gm = 180.0 -(model->gm); + } + if(model->B > 5000) + model->B= 5000; + + //Inclination + if(model->gm < 0) + model->gm = -(model->gm); + if(model->gm > 180){ + model->gm = 360.0 - model->gm; + // model->gm = 179; //360.0 - model->gm; + } + + //azimuth + if(model->az < 0) + model->az= 180 + (model->az); //model->az= 180 + (model->az); + if(model->az > 180){ + model->az =model->az -180.0; + // model->az = 179.0; + } + + //RANGOS + //Eta0 + if(model->eta0 < 1) + model->eta0=1; + + // if(model->eta0 >8) + // model->eta0=8; + if(model->eta0 >2500) //idl 2500 + model->eta0=2500; + + //velocity + if(model->vlos < (-20)) //20 + model->vlos= (-20); + if(model->vlos >20) + model->vlos=20; + + //doppler width ;Do NOT CHANGE THIS + if(model->dopp < 0.0001) + model->dopp = 0.0001; + + if(model->dopp > 0.6) // idl 0.6 + model->dopp = 0.6; + + + if(model->aa < 0.0001) // idl 1e-4 + model->aa = 0.0001; + if(model->aa > 10) //10 + model->aa = 10; + + //S0 + if(model->S0 < 0.0001) + model->S0 = 0.0001; + if(model->S0 > 1.500) + model->S0 = 1.500; + + //S1 + if(model->S1 < 0.0001) + model->S1 = 0.0001; + if(model->S1 > 2.000) + model->S1 = 2.000; + + //macroturbulence + if(model->mac < 0) + model->mac = 0; + if(model->mac > 4) + model->mac = 4; + + //filling factor +/* if(model->S1 < 0) + model->S1 = 0; + if(model->S1 > 1) + model->S1 = 1;*/ + + return 1; +} + + + + + +void spectral_synthesis_convolution(){ + + int i; + int nlambda=NLAMBDA; + + //convolucionamos los perfiles IQUV (spectra) + if(INSTRUMENTAL_CONVOLUTION){ + + PRECISION Ic; + + + + if(!INSTRUMENTAL_CONVOLUTION_INTERPOLACION){ + //convolucion de I + Ic=spectra[nlambda-1]; + + for(i=0;i + +clock_t t_ini, t_fin; +double secs, total_secs; + +t_ini = clock(); + +call FUNCTION to TEST! + +t_fin = clock(); + +secs = (double)(t_fin - t_ini) / CLOCKS_PER_SEC; +printf("\n\n%.16g milisegundos\n", secs * 1000.0); \ No newline at end of file diff --git a/cmilos/calculosCompartidos.o b/cmilos/calculosCompartidos.o deleted file mode 100644 index 7a2c59611575655749f93e96db2437d6898e3ef3..0000000000000000000000000000000000000000 Binary files a/cmilos/calculosCompartidos.o and /dev/null differ diff --git a/cmilos/create_cuantic.o b/cmilos/create_cuantic.o deleted file mode 100644 index d0de498c77b7c2a9927f5998d27c0f0d58d59e67..0000000000000000000000000000000000000000 Binary files a/cmilos/create_cuantic.o and /dev/null differ diff --git a/cmilos/defines.h b/cmilos/defines.h index c9a5abbda3f33cae70362ae8d3f6085b1a932937..c9ea1a5caf37744b58318c5f9cc3550f7bab63bf 100755 --- a/cmilos/defines.h +++ b/cmilos/defines.h @@ -44,6 +44,21 @@ // #define INITIAL_MODEL_S0 0.35 // #define INITIAL_MODEL_S1 0.85 +// RTE init model config ONBOARD +// Inv Iterations = 15 +// Initial Model Continuum Absoprtion = 12.0000 // INITIAL_MODEL_ETHA0 +// Initial Model Vector Magnetic Field Strength = 1200.00000 // INITIAL_MODEL_B +// Initial Model Line-Of-Sight Velocity = 0.05000 // INITIAL_MODEL_VLOS +// Initial Model Doppler Width Of Line = 0.05000 // INITIAL_MODEL_LAMBDADOPP +// Initial Model Damping Parameter = 0.05000 // INITIAL_MODEL_AA +// Initial Model Vector Magnetic Field Inclination = 170.00000 // INITIAL_MODEL_GM +// Initial Model Vector Magnetic Field Azimuth = 25.00000 // INITIAL_MODEL_AZI +// Initial Model Source Function Ordinate At Origin = 0.30000 // INITIAL_MODEL_S0 +// Initial Model Source Function Slope = 0.80000 // INITIAL_MODEL_S1 +// Wavelength Setting Initial Sampling Wavelength = 6173.20117 +// Wavelength Setting Spectral Line Step = 0.07000 +// Wavelength Setting Wavelength Of Line Continuum = 6173.04117 (blue) 6173.64117 (red) + #define INITIAL_MODEL_B 400 #define INITIAL_MODEL_GM 30 #define INITIAL_MODEL_AZI 120 @@ -54,7 +69,6 @@ #define INITIAL_MODEL_S0 0.15 #define INITIAL_MODEL_S1 0.85 - //NumeroS cuanticos #define CUANTIC_NWL 1 #define CUANTIC_SLOI 2 diff --git a/cmilos/fgauss.o b/cmilos/fgauss.o deleted file mode 100644 index a468fbe1dbae07db2291b511df58c05bd78d91d7..0000000000000000000000000000000000000000 Binary files a/cmilos/fgauss.o and /dev/null differ diff --git a/cmilos/fvoigt.o b/cmilos/fvoigt.o deleted file mode 100644 index edd7471f00fdb77324a5378841f8727845ce5e15..0000000000000000000000000000000000000000 Binary files a/cmilos/fvoigt.o and /dev/null differ diff --git a/cmilos/lib.o b/cmilos/lib.o deleted file mode 100644 index 1592b40703a2afb0158c87683ff72dd07a11512f..0000000000000000000000000000000000000000 Binary files a/cmilos/lib.o and /dev/null differ diff --git a/cmilos/me_der.o b/cmilos/me_der.o deleted file mode 100644 index c320a0e887cadd70e0ef82314b59e3942eab2c8e..0000000000000000000000000000000000000000 Binary files a/cmilos/me_der.o and /dev/null differ diff --git a/cmilos/mil_sinrf.o b/cmilos/mil_sinrf.o deleted file mode 100644 index 28697c53615b100ffad0d437cf9082b4df917cd6..0000000000000000000000000000000000000000 Binary files a/cmilos/mil_sinrf.o and /dev/null differ diff --git a/cmilos/milos b/cmilos/milos index 689942e938c9bba93c356050761be8a9c112e56d..7c45baad52d7649986c524584258b9ee66fb0b82 100755 Binary files a/cmilos/milos and b/cmilos/milos differ diff --git a/cmilos/milos.o b/cmilos/milos.o deleted file mode 100644 index 67b1d878d99ee0315ff4b3458dfb2d3eaf800913..0000000000000000000000000000000000000000 Binary files a/cmilos/milos.o and /dev/null differ diff --git a/environment.yml b/environment.yml index 5a82a79088151171cafbfcb4c21909f4ef45f63f..69189d5905f190d2775dbb08c8f6cbe5b1a8ab8e 100644 --- a/environment.yml +++ b/environment.yml @@ -1,4 +1,4 @@ -name: dataproc +name: hrt_pipeline_env channels: - conda-forge - defaults @@ -77,5 +77,5 @@ dependencies: - xz=5.2.5=h7b6447c_0 - zlib=1.2.11=h7b6447c_3 - zstd=1.4.5=h9ceee32_0 -prefix: /home/sinjan/.conda/envs/dataproc +prefix: /home/sinjan/.conda/envs/hrt_pipeline_env diff --git a/example_input_json.py b/example_input_json.py index 6279074d096a49b9b5634edd66455e151f1490ac..4bd5aa0a628d773277d4bfdeaa8bc21dcc880d79 100644 --- a/example_input_json.py +++ b/example_input_json.py @@ -36,20 +36,23 @@ input_dict = { 'flat_states' : 24, #options 4 (one each pol state), 6 (one each wavelength), 24 'prefilter_f': None, 'fs_c' : True, - 'limb' : 'W', # for limb images - must know what limb in FOV: 'N','E','W','S' + 'limb' : None, # for limb images - must know what limb in FOV: 'N','E','W','S' 'demod' : True, 'norm_stokes' : True, 'ItoQUV' : False, #missing VtoQU - not developed yet + 'ghost_c' : True, 'ctalk_params' : None, #VtoQU parameters will be required in this argument once ready 'rte' : False, #options: ''RTE', 'CE', 'CE+RTE' 'p_milos' : False, #attempted, ran into problems - on hold - 'cmilos_fits_opt': False, #use cmilos with .fits IO - 16% speed up + 'cmilos_fits': False, #use cmilos with .fits IO - 16% speed up #output dir/filenames 'out_dir' : './', 'out_stokes_file' : False, #if True, will save stokes array to fits, the array that is fed into the RTE inversions 'out_stokes_filename' : None, #if specific and not default name 'out_rte_filename' : None, #if specific and not default name + 'config': True, + 'out_intermediate': False, } json.dump(input_dict, open(f"./input_jsons/nov_2020_L1.txt", "w")) diff --git a/hrt_pipeline_notebook.ipynb b/hrt_pipeline_notebook.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..f74226c8a149444e475f8543091ef42b0d36470b --- /dev/null +++ b/hrt_pipeline_notebook.ipynb @@ -0,0 +1,356 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# **PHI-HRT Pipeline**\n", + "\n", + "_____________\n", + "\n", + "Author: Jonas Sinjan (MPS)
\n", + "Contributor: Daniele Calchetti (MPS)\n", + "\n", + "Reductions Steps\n", + "\n", + "1. read in science data (+scaling) open path option + open for several scans at once\n", + "2. read in flat field (+scaling)- accepts only one flat field fits file\n", + "3. read in dark field (+scaling)\n", + "4. apply dark field (to only science - assumes flat is dark fielded)\n", + "5. option to clean flat field with unsharp masking (Stokes QUV, UV or V)\n", + "6. normalise flat field\n", + "7. apply flat field\n", + "8. prefilter correction\n", + "9. apply field stop\n", + "10. demodulate with const demod matrix\n", + "11. normalise to quiet sun\n", + "12. I -> Q,U,V cross talk correction\n", + "13. rte inversion with cmilos\n", + " " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Step 1: Download files\n", + "\n", + "(This requires the hrt_pipeline_env environment - or the correct modules installed with pip (see `requirements.txt`)\n", + "\n", + "Options:\n", + "\n", + "1. Can download manually yourself from the PHI Image Database: https://www2.mps.mpg.de/services/proton/phi/imgdb/\n", + "\n", + " Suggested filters on the database for HRT science data: \n", + " - KEYWORD DETECTOR = 'HRT'
\n", + " - Filename\\* like \\*L1_phi-hrt-ilam_date\\*\n", + " \n", + " Once you find the file you want use the Command Line: \n", + " \n", + " ```\n", + " wget --user yourusername --password yourpassword file_web_address_from_database\n", + " gunzip file.gz\n", + " ```\n", + " \n", + "2. Use the provided script in 'downloads' folder (**Benefit: can download multiple files at once**): `downloads/download_from_db.py`\n", + "\n", + " Instructions:\n", + " 1. From the database find the files you wish to download\n", + " 2. Copy the 'Download File List' that the database will generate\n", + " 3. Paste into the `downloads/file_names.txt` file\n", + " 4. Create a `downloads/.env` file with your MPS Windows login:
\n", + " ```text=\n", + " USER_NAME =\n", + " PHIDATAPASSWORD =\n", + " ``` \n", + " 5. Set the **target download folder** in the `download_from_db.py` file\n", + " 6. Run the file (will require dotenv python module to be installed - included in `hrt_pipeline_env`):\n", + " ```text=\n", + " python download_from_db.py\n", + " ```\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Step 2: Create Input JSON (config) file" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "import json\n", + "\n", + "#example\n", + "\n", + "data_path = ['data_one.fits', 'data_two.fits', 'etc.fits...'] \n", + "\n", + "#can have as many files as you like, contintue the list\n", + "\n", + "\"\"\"\n", + "IMPORTANT\n", + "\n", + "- files will be procssed with the same flat, dark\n", + "- must have the same continuum position, pmp temp etc - checks will be run, and pipeline will exit if they fail\n", + "- if data is a limb image, the limb in the FOV must be known - ie N, S, E, W etc. before processing\n", + "\"\"\"\n", + "\n", + "dir_data_path = '/path/to/your/data/folder'\n", + "data = [dir_data_path + i for i in data_path]\n", + "\n", + "flat_path = '/path/to/your/flat.fits'\n", + "dark_path = '/path/to/your/dark.fits'\n", + "\n", + "input_dict = {\n", + "\n", + " #input data\n", + " 'data_f' : data, #hrt pipeline allows multiple files at once to be processed \n", + " 'flat_f' : flat_path,\n", + " 'dark_f' : dark_path,\n", + " \n", + " #input/output type + scaling\n", + " 'L1_input' : False, #ignore - in development\n", + " 'L1_8_generate': False, #ignore - in development\n", + " 'scale_data' : True, #leave as True if L1 data\n", + " 'accum_scaling' : True, #leave as True if L1 data\n", + " 'bit_conversion' : True, #leave as True if L1 data\n", + " \n", + " #reduction\n", + " 'dark_c' : True, #apply dark field correction\n", + " 'flat_c' : True, #apply flat field correction\n", + " 'norm_f' : True, #normalise the flats before used (DEFAULT: True)\n", + " 'clean_f' : True, #clean the flat fields with unsharp masking\n", + " 'sigma' : 59, #unsharp masking gaussian width\n", + " 'clean_mode' : \"V\", #options 'QUV', 'UV', 'V' for the unsharp masking\n", + " 'flat_states' : 24, #options 24 (DEFAULT), 4 (one each pol state), 6 (one each wavelength), \n", + " 'prefilter_f': None, #provide the path for the prefilter .fits file\n", + " 'fs_c' : True, #apply the field stop\n", + " 'limb' : None, #for limb images - must know what limb in FOV: 'N','E','W','S'\n", + " 'demod' : True, #demodulate to create the stokes maps\n", + " 'norm_stokes' : True, #normalise the stokes maps to I_continuum\n", + " 'ItoQUV' : False, #apply I-> Q,U,V cross talk correction\n", + " 'ghost_c' : False, #if True, excludes ghost region for ItoQUV ctalk calculation \n", + " 'rte' : False, #options: RTE', 'CE' (classical estimates), 'CE+RTE'\n", + " 'p_milos' : False, #DO NOT USE\n", + " 'cmilos_fits': False, #use cmilos with .fits IO - 16% speed up\n", + " \n", + " #output dir/filenames\n", + " 'out_dir' : None, #directory where you want the output files to go (string)\n", + " 'out_stokes_file' : False, #if True, will save the final stokes array to fits file(s)\n", + " 'out_stokes_filename' : None, #if specific required otherwise will default to standard convention\n", + " 'out_rte_filename' : None, #if specific required otherwise will default to standard convention\n", + " 'config' : True, #if True, saves a copy of the input file with runtime in the output folder\n", + " 'out_intermediate': False, #if True, save intermediate steps to fits files in the output folder (OUT_STOKES_FILE MUST BE SET TO TRUE)\n", + " 'vers': \"01\", #desired version number, only 2 characters 01 -> 99, if not specified, '01' default\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "##################################################################\n", + "\n", + "# CHANGE NAME OF CONFIG FILE + CREATE\n", + "\n", + "##################################################################\n", + "\n", + "#json.dump(input_dict, open(f\"./input_jsons/name_your_input_file.json\", \"w\")) #uncomment to run" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Step 3: Run the Pipeline\n", + "\n", + "Two options\n", + "1. Run within notebook
\n", + "2. Run in Terminal" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "scrolled": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The autoreload extension is already loaded. To reload it, use:\n", + " %reload_ext autoreload\n", + "-------------------------------------------------------------- \u001b[92m\n", + "PHI HRT data reduction software \u001b[92m\n", + "-------------------------------------------------------------- \u001b[92m\n", + " \u001b[0m\n", + "-->>>>>>> Reading Data\n", + "Input contains 1 scan(s)\n", + "Dividing by number of accumulations: 20\n", + "Continuum position at wave: 0\n", + "This scan has been flipped in the Y axis to conform to orientation standards. \n", + " File: /data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210914T071515_V202110260809C_0149140401.fits\n", + "All the scan(s) have the same dimension\n", + "All the scan(s) have the same continuum wavelength position\n", + "All the scan(s) have the same PMP Temperature Set Point: 50\n", + "Data shape is (2048, 2048, 24, 1)\n", + "All the scan(s) have the same IMGDIRX keyword: YES\n", + "Data reshaped to: (2048, 2048, 4, 6, 1)\n", + "-------------------------------------------------------------- \u001b[92m\n", + "------------ Load science data time: 0.589 seconds \u001b[92m\n", + "-------------------------------------------------------------- \u001b[92m\n", + " \u001b[0m\n", + "-->>>>>>> Reading Flats\n", + "Dividing by number of accumulations: 150\n", + "Flat field shape is (24, 2048, 2048)\n", + "(2048, 2048, 4, 6)\n", + "Continuum position at wave: 5\n", + "The continuum position of the flat field is at 5 index position\n", + "The flat field continuum position is not the same as the data, trying to correct.\n", + "Flat PMP Temperature Set Point: 50\n", + "-------------------------------------------------------------- \u001b[92m\n", + "------------ Load flats time: 0.615 seconds \u001b[92m\n", + "-------------------------------------------------------------- \u001b[92m\n", + " \u001b[0m\n", + "-->>>>>>> Reading Darks \n", + "Dividing by number of accumulations: 20\n", + "-------------------------------------------------------------- \u001b[92m\n", + "------------ Load darks time: 0.035 seconds \u001b[92m\n", + "-------------------------------------------------------------- \u001b[92m\n", + " \u001b[0m\n", + "-->>>>>>> Subtracting dark field\n", + "-------------------------------------------------------------- \u001b[92m\n", + "------------- Dark Field correction time: 0.068 seconds \u001b[92m\n", + "-------------------------------------------------------------- \u001b[92m\n", + " \u001b[0m\n", + "-->>>>>>> No clean flats mode\n", + " \u001b[0m\n", + "-->>>>>>> Normalising Flats\n", + "-------------------------------------------------------------- \u001b[92m\n", + "------------- Normalising flat time: 0.079 seconds \u001b[92m\n", + "-------------------------------------------------------------- \u001b[92m\n", + " \u001b[0m\n", + "-->>>>>>> Correcting Flatfield\n", + "Dividing by 24 flats, one for each image\n", + "-------------------------------------------------------------- \u001b[92m\n", + "------------- Flat Field correction time: 0.356 seconds \u001b[92m\n", + "-------------------------------------------------------------- \u001b[92m\n", + " \u001b[0m\n", + "-->>>>>>> No prefilter mode\n", + " \u001b[0m\n", + "-->>>>>>> Applying field stop\n", + "-------------------------------------------------------------- \u001b[92m\n", + "------------- Field stop time: 0.252 seconds \u001b[92m\n", + "-------------------------------------------------------------- \u001b[92m\n", + " \u001b[0m\n", + "-->>>>>>> Demodulating data\n", + "Using a constant demodulation matrix for a PMP TEMP of 50 deg\n", + "-------------------------------------------------------------- \u001b[92m\n", + "------------- Demodulation time: 1.819 seconds \u001b[92m\n", + "-------------------------------------------------------------- \u001b[92m\n", + " \u001b[0m\n", + "-->>>>>>> Normalising Stokes to Quiet Sun\n", + "-------------------------------------------------------------- \u001b[92m\n", + "------------- Stokes Normalising time: 1.706 seconds \u001b[92m\n", + "-------------------------------------------------------------- \u001b[92m\n", + " \u001b[0m\n", + "-->>>>>>> Cross-talk correction I to Q,U,V \n", + " ---- >>>>> CT parameters computation of data scan number: 0 .... \n", + "Cross-talk from I to Q: slope = -0.0030 ; off-set = -0.0012 \n", + "Cross-talk from I to U: slope = -0.0031 ; off-set = -0.0008 \n", + "Cross-talk from I to V: slope = -0.0025 ; off-set = -0.0001 \n", + "-------------------------------------------------------------- \u001b[92m\n", + "------------- I -> Q,U,V cross talk correction time: 4.084 seconds \u001b[92m\n", + "-------------------------------------------------------------- \u001b[92m\n", + " \u001b[0m\n", + "Saving demodulated data to one 'stokes' file per scan\n", + "The scans' DIDs are all unique\n", + "Writing out stokes file as: solo_L2_phi-hrt-stokes_20210914T071515_V02_0149140401.fits\n", + " \n", + "-->>>>>>> RUNNING PMILOS \n", + "Pmilos executable located at: /scratch/slam/sinjan/solo_attic_fits/hrt_pipeline/p-milos/\n", + "It is assumed the wavelength array is given by the hdr\n", + "Wave axis is: [-296.1459 -137.007 -67.0983 0. 70.6113 139.4661]\n", + "Saving data into ./p-milos/run/data/input_tmp.fits for pmilos RTE input\n", + "[6173.0392541 6173.198393 6173.2683017 6173.3354 6173.4060113\n", + " 6173.4748661]\n", + " ---- >>>>> Inverting data scan number: 0 .... \n", + "(2048, 2048, 13)\n", + "-------------------------------------------------------------- \u001b[92m\n", + "------------- PMILOS RTE Run Time: 32.692 seconds \u001b[92m\n", + "-------------------------------------------------------------- \u001b[92m\n", + " \u001b[0m\n", + "--------------------------------------------------------------\n", + "------------ Reduction Complete: 44.921 seconds\n", + "--------------------------------------------------------------\n", + "\u001b\u001b[0m\r" + ] + } + ], + "source": [ + "#Example RUN the pipe\n", + "%load_ext autoreload\n", + "%autoreload 2\n", + "\n", + "import sys\n", + "sys.path.insert(1, './src/')\n", + "from hrt_pipe import phihrt_pipe\n", + "\n", + "#insert name of your input file\n", + "\n", + "stokes = phihrt_pipe('./input_jsons/name_of_your_input_file.json')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#Or can run in terminal (either through jupyter or yourself):\n", + "import os\n", + "cwd = os.getcwd()\n", + "os.chdir(cwd + './src/')\n", + "\n", + "#####################################################################\n", + "\n", + "# CHANGE INPUT FILE LOC IN 'phihrt_pipe' function in 'run.py'\n", + "\n", + "#####################################################################\n", + "\n", + "!python run.py\n", + "\n", + "os.chdir(cwd)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "hrt_pipeline_env", + "language": "python", + "name": "hrt_pipeline_env" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.12" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/input_jsons/sep_2021_L1_north.json b/input_jsons/sep_2021_L1_north.json index fde8afe0c486e60e0b88ea6e4e14a3fd8fa95148..4f271bf29fffaae184e7d3cc745cfb7750db771c 100644 --- a/input_jsons/sep_2021_L1_north.json +++ b/input_jsons/sep_2021_L1_north.json @@ -1 +1 @@ -{"data_f": ["/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210914T034515_V202110211713C_0149140201.fits"], "flat_f": "/data/slam/home/sinjan/fits_files/0169111100_DC_9data.fits", "dark_f": "/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210428T130238_V202109240900C_0164281001.fits", "L1_input": true, "L1_8_generate": false, "scale_data": true, "accum_scaling": true, "bit_conversion": true, "dark_c": true, "flat_c": true, "norm_f": true, "clean_f": false, "sigma": 59, "clean_mode": "V", "flat_states": 24, "prefilter_f": null, "fs_c": true, "demod": true, "norm_stokes": true, "ItoQUV": false, "ctalk_params": null, "rte": "none", "p_milos": false, "cmilos_fits_opt": false, "out_dir": "/data/slam/home/sinjan/hrt_pipe_results/sep_2021/", "out_demod_file": true, "out_demod_filename": null, "out_rte_filename": null, "config_file": true} \ No newline at end of file +"/data/slam/home/sinjan/hrt_pipe_results/sep_2021_north/config_file_12_11_2021T10_46_54.json" \ No newline at end of file diff --git a/input_jsons/sep_2021_L1_south_noflat.json b/input_jsons/sep_2021_L1_south_noflat.json index a5839b600c00e9406714206518ee4005a46e1e5e..7a16804b4fc3e84b0bb8600ef0ab58cbda412490 100644 --- a/input_jsons/sep_2021_L1_south_noflat.json +++ b/input_jsons/sep_2021_L1_south_noflat.json @@ -1 +1 @@ -{"data_f": ["/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210914T053015_V202110150939C_0149140301.fits", "/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210914T053445_V202110150939C_0149140302.fits", "/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210914T053909_V202110150939C_0149140303.fits"], "flat_f": "/data/slam/home/sinjan/fits_files/0169111100_DC_9data.fits", "dark_f": "/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210428T130238_V202109240900C_0164281001.fits", "L1_input": false, "L1_8_generate": false, "scale_data": true, "accum_scaling": true, "bit_conversion": true, "dark_c": true, "flat_c": false, "norm_f": false, "clean_f": false, "sigma": 59, "clean_mode": "V", "flat_states": 24, "prefilter_f": null, "fs_c": true, "limb": "S", "demod": true, "norm_stokes": false, "ItoQUV": false, "ctalk_params": null, "rte": "none", "p_milos": false, "cmilos_fits_opt": false, "out_dir": "/data/slam/home/sinjan/hrt_pipe_results/sep_2021_south_no_flat/", "out_stokes_file": true, "out_stokes_filename": null, "out_rte_filename": null, "config_file": false} \ No newline at end of file +"/data/slam/home/sinjan/hrt_pipe_results/sep_2021_south/config_file_12_11_2021T11_00_13.json" \ No newline at end of file diff --git a/input_jsons/sep_2021_L1_west.json b/input_jsons/sep_2021_L1_west.json index a5c8721fa20fe8d58a90ff3979b9f7abf4aec55f..e9e2451d43e9da35e1d11923b9234970905e75d1 100644 --- a/input_jsons/sep_2021_L1_west.json +++ b/input_jsons/sep_2021_L1_west.json @@ -1 +1 @@ -{"data_f": ["/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210914T071515_V202110260809C_0149140401.fits", "/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210914T071945_V202110260809C_0149140402.fits", "/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210914T072409_V202110260809C_0149140403.fits", "/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210914T072833_V202110260809C_0149140404.fits"], "flat_f": "/data/slam/home/sinjan/fits_files/0169111100_DC_9data.fits", "dark_f": "/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210428T130238_V202109240900C_0164281001.fits", "L1_input": true, "L1_8_generate": false, "scale_data": true, "accum_scaling": true, "bit_conversion": true, "dark_c": true, "flat_c": true, "norm_f": true, "clean_f": false, "sigma": 59, "clean_mode": "V", "flat_states": 24, "prefilter_f": null, "fs_c": true, "demod": true, "norm_stokes": true, "ItoQUV": false, "ctalk_params": null, "rte": "none", "p_milos": false, "cmilos_fits_opt": false, "out_dir": "/data/slam/home/sinjan/hrt_pipe_results/sep_2021/", "out_demod_file": true, "out_demod_filename": null, "out_rte_filename": null, "config_file": true} \ No newline at end of file +{"data_f": ["/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210914T071515_V202110260809C_0149140401.fits"], "flat_f": "/data/slam/home/sinjan/fits_files/0169111100_DC_9data.fits", "dark_f": "/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210428T130238_V202109240900C_0164281001.fits", "L1_input": false, "L1_8_generate": false, "scale_data": true, "accum_scaling": true, "bit_conversion": true, "dark_c": true, "flat_c": true, "norm_f": true, "clean_f": true, "sigma": 59, "clean_mode": "UV", "flat_states": 24, "prefilter_f": null, "fs_c": true, "limb": "W", "demod": true, "norm_stokes": true, "ItoQUV": false, "ctalk_params": null, "rte": "RTE", "p_milos": false, "cmilos_fits_opt": false, "out_dir": "/data/slam/home/sinjan/hrt_pipe_results/sep_2021_west_us_check/", "out_stokes_file": true, "out_stokes_filename": null, "out_rte_filename": null, "config": true} \ No newline at end of file diff --git a/setup.sh b/setup.sh new file mode 100644 index 0000000000000000000000000000000000000000..962432613a5d25a4854c952cc35f6c85a9552483 --- /dev/null +++ b/setup.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +module load openmpi_gcc +module load anaconda/3-5.0.1 +modlue load fftw/3.3.5 + +export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/local/cfitsio/cfitsio-3.350/lib + +export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/local/cfitsio/cfitsio-3.350/include + +#compile the rte codes + +cd ./cmilos +make clear +make + +cd .. + +cd ./cmilos-fits +make clear +make + +cd .. + +cd p-milos +make clean +make + +cd .. + +conda env create -f environment.yml +#pip install -r requrements.txt + +source activate hrt_pipeline_env + diff --git a/src/__pycache__/hrt_pipe.cpython-38.pyc b/src/__pycache__/hrt_pipe.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e24829f8e4c11a3a6ad075eebcf3378a3984a14 Binary files /dev/null and b/src/__pycache__/hrt_pipe.cpython-38.pyc differ diff --git a/src/__pycache__/hrt_pipe_sub.cpython-38.pyc b/src/__pycache__/hrt_pipe_sub.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..29f6a089aa6f0119e9fb22094e2a99673eeee6b3 Binary files /dev/null and b/src/__pycache__/hrt_pipe_sub.cpython-38.pyc differ diff --git a/src/__pycache__/utils.cpython-38.pyc b/src/__pycache__/utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7365465bff033080488c00ba841060b2ccf973fa Binary files /dev/null and b/src/__pycache__/utils.cpython-38.pyc differ diff --git a/src/hrt_pipe.py b/src/hrt_pipe.py index 486736cdf789a256732961d053e30bc982eecb27..815d5cdeccd0234d4dfb01a40e2d320d7147dacd 100755 --- a/src/hrt_pipe.py +++ b/src/hrt_pipe.py @@ -2,7 +2,6 @@ from posix import listdir import numpy as np import os.path from astropy.io import fits -# from scipy.ndimage import gaussian_filter import time import datetime from operator import itemgetter @@ -10,9 +9,9 @@ import json import matplotlib.pyplot as plt from numpy.core.numeric import True_ -from .utils import * -from .hrt_pipe_sub import * - +from utils import * +from processes import * +from inversions import * def phihrt_pipe(input_json_file): @@ -74,6 +73,8 @@ def phihrt_pipe(input_json_file): apply dark field correction fs_c: bool, DEFAULT True apply HRT field stop + ghost_c: bool, DEFAULT True + apply HRT field stop and avoid ghost region in CrossTalk parameters computation limb: str, DEFAULT None specify if it is a limb observation, options are 'N', 'S', 'W', 'E' demod: bool, DEFAULT: True @@ -94,6 +95,8 @@ def phihrt_pipe(input_json_file): invert using cmilos, options: 'RTE' for Milne Eddington Inversion, 'CE' for Classical Estimates, 'CE+RTE' for combined out_rte_filename: str, DEFAULT = '' if '', takes last 10 characters of input scan filename (assumes its a DID), change if want other name + out_intermediate: bool, DEFAULT = False + if True, dark corrected, flat corrected, prefilter corrected and demodulated data will be saved p_milos: bool, DEFAULT = True if True, will execute the RTE inversion using the parallel version of the CMILOS code on 16 processors Returns @@ -108,9 +111,11 @@ def phihrt_pipe(input_json_file): SPGYlib ''' + version = 'V1.1 December 15th 2021' printc('--------------------------------------------------------------',bcolors.OKGREEN) printc('PHI HRT data reduction software ',bcolors.OKGREEN) + printc('Version: '+version,bcolors.OKGREEN) printc('--------------------------------------------------------------',bcolors.OKGREEN) #----------------- @@ -139,14 +144,17 @@ def phihrt_pipe(input_json_file): flat_states = input_dict['flat_states'] prefilter_f = input_dict['prefilter_f'] fs_c = input_dict['fs_c'] + ghost_c = input_dict['ghost_c'] # DC 20211116 limb = input_dict['limb'] demod = input_dict['demod'] norm_stokes = input_dict['norm_stokes'] - CT_ItoQUV = input_dict['ItoQUV'] - ctalk_params = input_dict['ctalk_params'] + ItoQUV = input_dict['ItoQUV'] +# ctalk_params = input_dict['ctalk_params'] + out_intermediate = input_dict['out_intermediate'] # DC 20211116 + rte = input_dict['rte'] p_milos = input_dict['p_milos'] - cmilos_fits_opt = input_dict['cmilos_fits_opt'] + cmilos_fits_opt = input_dict['cmilos_fits'] out_dir = input_dict['out_dir'] out_stokes_file = input_dict['out_stokes_file'] @@ -157,15 +165,23 @@ def phihrt_pipe(input_json_file): config = True else: config = input_dict['config'] + + if 'vers' not in input_dict: + vrs = '01' + else: + vrs = input_dict['vers'] + if len(vrs) != 2: + print(f"Desired Version 'vers' from the input file is not 2 characters long: {vrs}") + raise KeyError except Exception as e: print(f"Missing key(s) in the input config file: {e}") raise KeyError - overall_time = time.time() + overall_time = time.perf_counter() if L1_input: - print("L1_input param set to True - Assuming L1 science data") + #print("L1_input param set to True - Assuming L1 science data") accum_scaling = True bit_conversion = True scale_data = True @@ -177,7 +193,7 @@ def phihrt_pipe(input_json_file): print(" ") printc('-->>>>>>> Reading Data',color=bcolors.OKGREEN) - start_time = time.time() + start_time = time.perf_counter() if isinstance(data_f, str): data_f = [data_f] @@ -263,12 +279,16 @@ def phihrt_pipe(input_json_file): cols = slice(start_col,start_col + data_size[1]) ceny = slice(data_size[0]//2 - data_size[0]//4, data_size[0]//2 + data_size[0]//4) cenx = slice(data_size[1]//2 - data_size[1]//4, data_size[1]//2 + data_size[1]//4) + + hdr_arr = setup_header(hdr_arr) printc('--------------------------------------------------------------',bcolors.OKGREEN) - printc(f"------------ Load science data time: {np.round(time.time() - start_time,3)} seconds",bcolors.OKGREEN) + printc(f"------------ Load science data time: {np.round(time.perf_counter() - start_time,3)} seconds",bcolors.OKGREEN) printc('--------------------------------------------------------------',bcolors.OKGREEN) - + for hdr in hdr_arr: + hdr['VERS_SW'] = version #version of pipeline + hdr['VERSION'] = vrs #version of the file V01 #----------------- # READ FLAT FIELDS #----------------- @@ -277,7 +297,7 @@ def phihrt_pipe(input_json_file): print(" ") printc('-->>>>>>> Reading Flats',color=bcolors.OKGREEN) - start_time = time.time() + start_time = time.perf_counter() # flat from IP-5 if '0024151020000' in flat_f or '0024150020000' in flat_f: @@ -326,14 +346,12 @@ def phihrt_pipe(input_json_file): if flat_f[-15:] == '0162201100.fits': # flat_f[-62:] == 'solo_L0_phi-hrt-flat_0667134081_V202103221851C_0162201100.fits' print("This flat has a missing line - filling in with neighbouring pixels") flat_copy = flat.copy() - flat[:,:,1,1] = filling_data(flat_copy[:,:,1,1], 0, mode = {'exact rows':[1345,1346]}, axis=1) + flat[:,:,1,2] = filling_data(flat_copy[:,:,1,2], 0, mode = {'exact rows':[1345,1346]}, axis=1) -# flat[1345, 296:, 1, 1] = flat_copy[1344, 296:, 1, 1] -# flat[1346, :291, 1, 1] = flat_copy[1345, :291, 1, 1] del flat_copy printc('--------------------------------------------------------------',bcolors.OKGREEN) - printc(f"------------ Load flats time: {np.round(time.time() - start_time,3)} seconds",bcolors.OKGREEN) + printc(f"------------ Load flats time: {np.round(time.perf_counter() - start_time,3)} seconds",bcolors.OKGREEN) printc('--------------------------------------------------------------',bcolors.OKGREEN) @@ -350,7 +368,7 @@ def phihrt_pipe(input_json_file): print(" ") printc('-->>>>>>> Reading Darks ',color=bcolors.OKGREEN) - start_time = time.time() + start_time = time.perf_counter() try: @@ -382,7 +400,7 @@ def phihrt_pipe(input_json_file): dark = compare_IMGDIRX(dark[np.newaxis],header_imgdirx_exists,imgdirx_flipped,header_drkdirx_exists,drkdirx_flipped)[0] printc('--------------------------------------------------------------',bcolors.OKGREEN) - printc(f"------------ Load darks time: {np.round(time.time() - start_time,3)} seconds",bcolors.OKGREEN) + printc(f"------------ Load darks time: {np.round(time.perf_counter() - start_time,3)} seconds",bcolors.OKGREEN) printc('--------------------------------------------------------------',bcolors.OKGREEN) except Exception: @@ -401,8 +419,13 @@ def phihrt_pipe(input_json_file): if flat_c == False: flat = np.empty((2048,2048,4,6)) - # if out_intermediate: - # data_darkc = data.copy() + if out_intermediate: + data_darkc = data.copy() + + DID_dark = h['FILENAME'] + + for hdr in hdr_arr: + hdr['CAL_DARK'] = DID_dark else: print(" ") @@ -413,16 +436,22 @@ def phihrt_pipe(input_json_file): # OPTIONAL Unsharp Masking clean the flat field stokes Q, U or V images #----------------- - if clean_f is not None and flat_c: + flat_copy = flat.copy() + + if clean_f and flat_c: print(" ") printc('-->>>>>>> Cleaning flats with Unsharp Masking',color=bcolors.OKGREEN) - start_time = time.time() + start_time = time.perf_counter() flat = unsharp_masking(flat,sigma,flat_pmp_temp,cpos_arr,clean_mode, clean_f = "blurring") + + for hdr in hdr_arr: + hdr['CAL_USH'] = clean_mode + hdr['SIGM_USH'] = sigma printc('--------------------------------------------------------------',bcolors.OKGREEN) - printc(f"------------- Cleaning flat time: {np.round(time.time() - start_time,3)} seconds",bcolors.OKGREEN) + printc(f"------------- Cleaning flat time: {np.round(time.perf_counter() - start_time,3)} seconds",bcolors.OKGREEN) printc('--------------------------------------------------------------',bcolors.OKGREEN) else: @@ -451,11 +480,20 @@ def phihrt_pipe(input_json_file): try: data = flat_correction(data,flat,flat_states,rows,cols) - # if out_intermediate: - # data_flatc = data.copy() + if out_intermediate: + data_flatc = data.copy() + #DID_flat = header_flat['PHIDATID'] + if '/' in flat_f: + filename = flat_f.split('/')[-1] + else: + filename = flat_f + + for hdr in hdr_arr: + hdr['CAL_FLAT'] = filename#DID_flat - not the FILENAME keyword, in case we are trying with extra cleaned flats + printc('--------------------------------------------------------------',bcolors.OKGREEN) - printc(f"------------- Flat Field correction time: {np.round(time.time() - start_time,3)} seconds ",bcolors.OKGREEN) + printc(f"------------- Flat Field correction time: {np.round(time.perf_counter() - start_time,3)} seconds ",bcolors.OKGREEN) printc('--------------------------------------------------------------',bcolors.OKGREEN) except: printc("ERROR, Unable to apply flat fields",color=bcolors.FAIL) @@ -473,7 +511,7 @@ def phihrt_pipe(input_json_file): print(" ") printc('-->>>>>>> Prefilter Correction',color=bcolors.OKGREEN) - start_time = time.time() + start_time = time.perf_counter() prefilter_voltages = [-1300.00,-1234.53,-1169.06,-1103.59,-1038.12,-972.644,-907.173,-841.702,-776.231,-710.760,-645.289, -579.818,-514.347,-448.876,-383.404,-317.933,-252.462,-186.991,-121.520,-56.0490,9.42212,74.8932, @@ -487,9 +525,13 @@ def phihrt_pipe(input_json_file): data = prefilter_correction(data,voltagesData_arr,prefilter,prefilter_voltages) + for hdr in hdr_arr: + hdr['CAL_PRE'] = prefilter_f + + data_PFc = data.copy() # DC 20211116 printc('--------------------------------------------------------------',bcolors.OKGREEN) - printc(f"------------- Prefilter correction time: {np.round(time.time() - start_time,3)} seconds",bcolors.OKGREEN) + printc(f"------------- Prefilter correction time: {np.round(time.perf_counter() - start_time,3)} seconds",bcolors.OKGREEN) printc('--------------------------------------------------------------',bcolors.OKGREEN) else: @@ -504,6 +546,9 @@ def phihrt_pipe(input_json_file): if fs_c: data, field_stop = apply_field_stop(data, rows, cols, header_imgdirx_exists, imgdirx_flipped) + if ghost_c: + field_stop_ghost = load_ghost_field_stop(header_imgdirx_exists, imgdirx_flipped) + else: print(" ") @@ -519,12 +564,16 @@ def phihrt_pipe(input_json_file): print(" ") printc('-->>>>>>> Demodulating data',color=bcolors.OKGREEN) - start_time = time.time() + start_time = time.perf_counter() data,_ = demod_hrt(data, pmp_temp) + + for hdr in hdr_arr: + hdr['CAL_IPOL'] = 'HRT'+pmp_temp + printc('--------------------------------------------------------------',bcolors.OKGREEN) - printc(f"------------- Demodulation time: {np.round(time.time() - start_time,3)} seconds ",bcolors.OKGREEN) + printc(f"------------- Demodulation time: {np.round(time.perf_counter() - start_time,3)} seconds ",bcolors.OKGREEN) printc('--------------------------------------------------------------',bcolors.OKGREEN) else: @@ -541,46 +590,53 @@ def phihrt_pipe(input_json_file): print(" ") printc('-->>>>>>> Normalising Stokes to Quiet Sun',color=bcolors.OKGREEN) - start_time = time.time() + start_time = time.perf_counter() + Ic_mask = np.zeros((data_size[0],data_size[1],data_shape[-1]),dtype=bool) + I_c = np.ones(data_shape[-1]) + if limb is not None: + limb_mask = np.zeros((data_size[0],data_size[1],data_shape[-1])) + for scan in range(data_shape[-1]): - #I_c = np.mean(data[ceny,cenx,0,cpos_arr[0],int(scan)]) #mean of central 1k x 1k of continuum stokes I - #I_c = np.mean(data[50:500,700:1700,0,cpos_arr[0],int(scan)]) # mean in the not-out-of the Sun north limb - #I_c = np.mean(data[1500:2000,800:1300,0,cpos_arr[0],int(scan)]) # mean in the not-out-of the Sun south limb - #I_c = np.mean(data[350:1700,200:900,0,cpos_arr[0],int(scan)]) # West limb - limb_copy = np.copy(data) + #limb_copy = np.copy(data) + #from Daniele Calchetti + if limb is not None: if limb == 'N': - limb_mask, Ic_mask = limb_fitting(data[:,:,0,cpos_arr[0],int(scan)], mode = 'columns', switch = True) + limb_temp, Ic_temp = limb_fitting(data[:,:,0,cpos_arr[0],int(scan)], mode = 'columns', switch = True) if limb == 'S': - limb_mask, Ic_mask = limb_fitting(data[:,:,0,cpos_arr[0],int(scan)], mode = 'columns', switch = False) + limb_temp, Ic_temp = limb_fitting(data[:,:,0,cpos_arr[0],int(scan)], mode = 'columns', switch = False) if limb == 'W': - limb_mask, Ic_mask = limb_fitting(data[:,:,0,cpos_arr[0],int(scan)], mode = 'rows', switch = True) + limb_temp, Ic_temp = limb_fitting(data[:,:,0,cpos_arr[0],int(scan)], mode = 'rows', switch = True) if limb == 'E': - limb_mask, Ic_mask = limb_fitting(data[:,:,0,cpos_arr[0],int(scan)], mode = 'rows', switch = False) + limb_temp, Ic_temp = limb_fitting(data[:,:,0,cpos_arr[0],int(scan)], mode = 'rows', switch = False) - limb_mask = np.where(limb_mask>0,1,0) - Ic_mask = np.where(Ic_mask>0,1,0) + limb_temp = np.where(limb_temp>0,1,0) + Ic_temp = np.where(Ic_temp>0,1,0) - data[:,:,:,:,scan] = data[:,:,:,:,scan] * limb_mask[:,:,np.newaxis,np.newaxis] + data[:,:,:,:,scan] = data[:,:,:,:,scan] * limb_temp[:,:,np.newaxis,np.newaxis] + limb_mask[...,scan] = limb_temp else: - Ic_mask = np.zeros(data_size) - Ic_mask[ceny,cenx] = 1 - Ic_mask = np.where(Ic_mask>0,1,0) + Ic_temp = np.zeros(data_size) + Ic_temp[ceny,cenx] = 1 + Ic_temp = np.where(Ic_temp>0,1,0) if fs_c: - Ic_mask *= field_stop + Ic_temp *= field_stop[rows,cols] - Ic_mask = np.array(Ic_mask, dtype=bool) - I_c = np.mean(data[Ic_mask,0,cpos_arr[0],int(scan)]) - data[:,:,:,:,scan] = data[:,:,:,:,scan]/I_c - - # if out_intermediate: - # data_demod = data.copy() + Ic_temp = np.array(Ic_temp, dtype=bool) + I_c[scan] = np.mean(data[Ic_temp,0,cpos_arr[0],int(scan)]) + data[:,:,:,:,scan] = data[:,:,:,:,scan]/I_c[scan] + Ic_mask[...,scan] = Ic_temp + hdr_arr[scan]['CAL_NORM'] = round(I_c[scan],4) # DC 20211116 + + if out_intermediate: + data_demod_normed = data.copy() + printc('--------------------------------------------------------------',bcolors.OKGREEN) - printc(f"------------- Stokes Normalising time: {np.round(time.time() - start_time,3)} seconds ",bcolors.OKGREEN) + printc(f"------------- Stokes Normalising time: {np.round(time.perf_counter() - start_time,3)} seconds ",bcolors.OKGREEN) printc('--------------------------------------------------------------',bcolors.OKGREEN) else: @@ -592,40 +648,44 @@ def phihrt_pipe(input_json_file): # CROSS-TALK CALCULATION #----------------- - if CT_ItoQUV: + if ItoQUV: print(" ") printc('-->>>>>>> Cross-talk correction I to Q,U,V ',color=bcolors.OKGREEN) - start_time = time.time() - - num_of_scans = data_shape[-1] - - try: - assert ctalk_params.shape == (2,3) - except AssertionError: - print("ctalk_params is not in the required (2,3) shape, please reconcile") - raise AssertionError - + start_time = time.perf_counter() slope, offset = 0, 1 q, u, v = 0, 1, 2 - - for scan_hdr in hdr_arr: + CTparams = np.zeros((2,3,number_of_scans)) + + for scan, scan_hdr in enumerate(hdr_arr): + printc(f' ---- >>>>> CT parameters computation of data scan number: {scan} .... ',color=bcolors.OKGREEN) + if ghost_c: # DC 20211116 + ctalk_params = crosstalk_auto_ItoQUV(data[...,scan],cpos_arr[scan],0,roi=np.asarray(Ic_mask[...,scan]*field_stop_ghost,dtype=bool)) # DC 20211116 + else: # DC 20211116 + ctalk_params = crosstalk_auto_ItoQUV(data[...,scan],cpos_arr[scan],0,roi=Ic_mask[...,scan]) # DC 20211116 + + CTparams[...,scan] = ctalk_params + if 'CAL_CRT0' in scan_hdr: #check to make sure the keywords exist - scan_hdr['CAL_CRT0'] = ctalk_params[slope,q] #I-Q slope - scan_hdr['CAL_CRT2'] = ctalk_params[slope,u] #I-U slope - scan_hdr['CAL_CRT4'] = ctalk_params[slope,v] #I-V slope - - scan_hdr['CAL_CRT1'] = ctalk_params[offset,q] #I-Q offset - scan_hdr['CAL_CRT3'] = ctalk_params[offset,u] #I-U offset - scan_hdr['CAL_CRT5'] = ctalk_params[offset,v] #I-V offset - - ctalk_params = np.repeat(ctalk_params[:,:,np.newaxis], num_of_scans, axis = 2) + + scan_hdr['CAL_CRT0'] = round(ctalk_params[slope,q],4) #I-Q slope + scan_hdr['CAL_CRT2'] = round(ctalk_params[slope,u],4) #I-U slope + scan_hdr['CAL_CRT4'] = round(ctalk_params[slope,v],4) #I-V slope + scan_hdr['CAL_CRT1'] = round(ctalk_params[offset,q],4) #I-Q offset + scan_hdr['CAL_CRT3'] = round(ctalk_params[offset,u],4) #I-U offset + scan_hdr['CAL_CRT5'] = round(ctalk_params[offset,v],4) #I-V offset + + scan_hdr['CAL_CRT6'] = 0 #V-Q slope + scan_hdr['CAL_CRT8'] = 0 #V-U slope + scan_hdr['CAL_CRT7'] = 0 #V-Q offset + scan_hdr['CAL_CRT9'] = 0 #V-U offset + + try: + data = CT_ItoQUV(data, CTparams, norm_stokes, cpos_arr, Ic_mask) - try: - data = CT_ItoQUV(data, ctalk_params, norm_stokes, cpos_arr, Ic_mask) except Exception: print("There was an issue applying the I -> Q,U,V cross talk correction") if 'Ic_mask' not in vars(): @@ -650,13 +710,13 @@ def phihrt_pipe(input_json_file): raise KeyError("'norm_f' keyword in input config file not set to True \n Aborting") printc('--------------------------------------------------------------',bcolors.OKGREEN) - printc(f"------------- I -> Q,U,V cross talk correction time: {np.round(time.time() - start_time,3)} seconds ",bcolors.OKGREEN) + printc(f"------------- I -> Q,U,V cross talk correction time: {np.round(time.perf_counter() - start_time,3)} seconds ",bcolors.OKGREEN) printc('--------------------------------------------------------------',bcolors.OKGREEN) data *= field_stop[rows,cols, np.newaxis, np.newaxis, np.newaxis] # DC change 20211019 only for limb if limb is not None: - data *= limb_mask[rows,cols, np.newaxis, np.newaxis, np.newaxis] + data *= limb_mask[rows,cols, np.newaxis, np.newaxis] else: print(" ") @@ -675,6 +735,10 @@ def phihrt_pipe(input_json_file): #WRITE OUT STOKES VECTOR #----------------- + #rewrite the PARENT keyword to the original FILENAME + for count, scan in enumerate(data_f): + hdr_arr[count]['PARENT'] = hdr_arr[count]['FILENAME'] + #these two ifs need to be outside out_stokes_file if statement - needed for inversion if out_dir[-1] != "/": print("Desired Output directory missing / character, will be added") @@ -685,12 +749,12 @@ def phihrt_pipe(input_json_file): print(f"{out_dir} does not exist -->>>>>>> Creating it") os.makedirs(out_dir) - if out_stokes_file: print(" ") - printc('Saving demodulated data to one _reduced.fits file per scan') + printc('Saving demodulated data to one \'stokes\' file per scan') + #check if user set specific output filenames, check for duplicates, otherwise use the DID if out_stokes_filename is not None: if isinstance(out_stokes_filename,str): @@ -700,48 +764,71 @@ def phihrt_pipe(input_json_file): scan_name_list = out_stokes_filename scan_name_defined = True else: - print("Input demod filenames do not match the number of input arrays, reverting to default naming") + print("Input stokes filenames do not match the number of input arrays, reverting to default naming") scan_name_defined = False else: scan_name_defined = False - if not scan_name_defined: #check if already defined by input, otherwise generate - scan_name_list = check_filenames(data_f) + if not scan_name_defined: #check if already defined by user + scan_name_list = check_filenames(data_f) #extract the DIDs and check no duplicates - for count, scan in enumerate(data_f): + stokes_file = create_output_filenames(scan, scan_name_list[count], version = vrs)[0] + + ntime = datetime.datetime.now() + hdr_arr[count]['DATE'] = ntime.strftime("%Y-%m-%dT%H:%M:%S") + hdr_arr[count]['FILENAME'] = stokes_file + hdr_arr[count]['HISTORY'] = f"Version: {version}. Dark: {dark_f.split('/')[-1]}. Flat: {flat_f.split('/')[-1]}, Unsharp: {clean_f}. Flat norm: {norm_f}. I->QUV ctalk: {ItoQUV}." + with fits.open(scan) as hdu_list: - print(f"Writing out demod file as: {scan_name_list[count]}_reduced.fits") + print(f"Writing out stokes file as: {stokes_file}") hdu_list[0].data = data[:,:,:,:,count] hdu_list[0].header = hdr_arr[count] #update the calibration keywords - hdu_list.writeto(out_dir + scan_name_list[count] + '_reduced.fits', overwrite=True) + hdu_list.writeto(out_dir + stokes_file, overwrite=True) # DC change 20211014 - """ - if out_intermediate: - with fits.open(scan) as hdu_list: - print(f"Writing out demod file as: {scan_name_list[count]}_dark_corrected.fits") - hdu_list[0].data = data_darkc[:,:,:,:,count] - hdu_list[0].header = hdr_arr[count] #update the calibration keywords - hdu_list.writeto(out_dir + scan_name_list[count] + '_dark_corrected.fits', overwrite=True) - - with fits.open(scan) as hdu_list: - print(f"Writing out demod file as: {scan_name_list[count]}_flat_corrected.fits") - hdu_list[0].data = data_flatc[:,:,:,:,count] - hdu_list[0].header = hdr_arr[count] #update the calibration keywords - hdu_list.writeto(out_dir + scan_name_list[count] + '_flat_corrected.fits', overwrite=True) - - with fits.open(scan) as hdu_list: - print(f"Writing out demod file as: {scan_name_list[count]}_demodulated.fits") - hdu_list[0].data = data_demod[:,:,:,:,count] - hdu_list[0].header = hdr_arr[count] #update the calibration keywords - hdu_list.writeto(out_dir + scan_name_list[count] + '_demodulated.fits', overwrite=True) - """ - - # with fits.open(scan) as hdu_list: - # hdu_list[0].data = limb_copy - # hdu_list.writeto(out_dir+scan_name_list[count]+'_limb_fit_input.fits', overwrite=True) + + if out_intermediate: # DC 20211116 + if dark_c: # DC 20211116 + with fits.open(scan) as hdu_list: + print(f"Writing intermediate file as: {scan_name_list[count]}_dark_corrected.fits") + hdu_list[0].data = data_darkc[:,:,:,:,count] + hdu_list[0].header = hdr_arr[count] #update the calibration keywords + hdu_list.writeto(out_dir + scan_name_list[count] + '_dark_corrected.fits', overwrite=True) + + if flat_c: # DC 20211116 + with fits.open(scan) as hdu_list: + print(f"Writing intermediate file as: {scan_name_list[count]}_flat_corrected.fits") + hdu_list[0].data = data_flatc[:,:,:,:,count] + hdu_list[0].header = hdr_arr[count] #update the calibration keywords + hdu_list.writeto(out_dir + scan_name_list[count] + '_flat_corrected.fits', overwrite=True) + + with fits.open(flat_f) as hdu_list: + print(f"Writing flat field file as: {flat_f.split('/')[-1]}") + hdu_list[0].data = flat + #update the calibration keywords + hdu_list.writeto(out_dir + f"{flat_f.split('/')[-1]}", overwrite=True) + + with fits.open(flat_f) as hdu_list: + print(f"Writing flat field copy (before US) file as: copy_{flat_f.split('/')[-1]}") + hdu_list[0].data = flat_copy + #update the calibration keywords + hdu_list.writeto(out_dir + "copy_" + f"{flat_f.split('/')[-1]}", overwrite=True) + + if prefilter_f is not None: # DC 20211116 + with fits.open(scan) as hdu_list: + print(f"Writing intermediate file as: {scan_name_list[count]}_prefilter_corrected.fits") + hdu_list[0].data = data_PFc[:,:,:,:,count] + hdu_list[0].header = hdr_arr[count] #update the calibration keywords + hdu_list.writeto(out_dir + scan_name_list[count] + '_prefilter_corrected.fits', overwrite=True) + + if demod: # DC 20211116 + with fits.open(scan) as hdu_list: + print(f"Writing intermediate file as: {scan_name_list[count]}_demodulated.fits") + hdu_list[0].data = data_demod_normed[:,:,:,:,count] + hdu_list[0].header = hdr_arr[count] #update the calibration keywords + hdu_list.writeto(out_dir + scan_name_list[count] + '_demodulated.fits', overwrite=True) else: print(" ") @@ -762,23 +849,25 @@ def phihrt_pipe(input_json_file): if limb is not None: - mask = limb_mask*field_stop - + mask = limb_mask*field_stop[rows,cols,np.newaxis] + else: + mask = np.ones((data_size[0],data_size[1],data_shape[-1]))*field_stop[rows,cols,np.newaxis] + if p_milos: try: - pmilos(data_f, wve_axis_arr, data_shape, cpos_arr, data, rte, mask, start_row, start_col, out_rte_filename, out_dir) + pmilos(data_f, hdr_arr, wve_axis_arr, data_shape, cpos_arr, data, rte, mask, imgdirx_flipped, out_rte_filename, out_dir, vers = vrs) except ValueError: - print("Running CMILOS instead!") - cmilos(data_f, hdr_arr, wve_axis_arr, data_shape, cpos_arr, data, rte, mask, start_row, start_col, out_rte_filename, out_dir) + print("Running CMILOS txt instead!") + cmilos(data_f, hdr_arr, wve_axis_arr, data_shape, cpos_arr, data, rte, mask,imgdirx_flipped, out_rte_filename, out_dir, vers = vrs) else: if cmilos_fits_opt: - cmilos_fits(data_f, hdr_arr, wve_axis_arr, data_shape, cpos_arr, data, rte, mask, start_row, start_col, out_rte_filename, out_dir) + cmilos_fits(data_f, hdr_arr, wve_axis_arr, data_shape, cpos_arr, data, rte, mask, imgdirx_flipped, out_rte_filename, out_dir, vers = vrs) else: - cmilos(data_f, hdr_arr, wve_axis_arr, data_shape, cpos_arr, data, rte, mask, start_row, start_col, out_rte_filename, out_dir) + cmilos(data_f, hdr_arr, wve_axis_arr, data_shape, cpos_arr, data, rte, mask, imgdirx_flipped, out_rte_filename, out_dir, vers = vrs) else: print(" ") @@ -796,16 +885,13 @@ def phihrt_pipe(input_json_file): dt = datetime.datetime.fromtimestamp(overall_time) runtime = dt.strftime("%d_%m_%YT%H_%M_%S") - with open(input_json_file, "w+") as f: - json.dump(out_dir + f"config_file_{runtime}.json", f) - + json.dump(input_dict, open(out_dir + f"config_file_{runtime}.json", "w")) + print(" ") printc('--------------------------------------------------------------',color=bcolors.OKGREEN) - printc(f'------------ Reduction Complete: {np.round(time.time() - overall_time,3)} seconds',color=bcolors.OKGREEN) + printc(f'------------ Reduction Complete: {np.round(time.perf_counter() - overall_time,3)} seconds',color=bcolors.OKGREEN) printc('--------------------------------------------------------------',color=bcolors.OKGREEN) - if flat_c: - return data, flat - else: - return data + + return data diff --git a/src/hrt_pipe_sub.py b/src/hrt_pipe_sub.py deleted file mode 100644 index 7578182cbf99f6d4b73c26789d3f04f4cc6fecde..0000000000000000000000000000000000000000 --- a/src/hrt_pipe_sub.py +++ /dev/null @@ -1,1037 +0,0 @@ -import numpy as np -from astropy.io import fits -from scipy.ndimage import gaussian_filter -from operator import itemgetter -from .utils import * -import os -import time -import subprocess - -def load_flat(flat_f, accum_scaling, bit_conversion, scale_data, header_imgdirx_exists, imgdirx_flipped, cpos_arr) -> np.ndarray: - """ - load, scale, flip and correct flat - """ - print(" ") - printc('-->>>>>>> Reading Flats',color=bcolors.OKGREEN) - - start_time = time.time() - - # flat from IP-5 - if '0024151020000' in flat_f or '0024150020000' in flat_f: - flat, header_flat = get_data(flat_f, scaling = accum_scaling, bit_convert_scale=bit_conversion, - scale_data=False) - else: - flat, header_flat = get_data(flat_f, scaling = accum_scaling, bit_convert_scale=bit_conversion, - scale_data=scale_data) - - if 'IMGDIRX' in header_flat: - header_fltdirx_exists = True - fltdirx_flipped = str(header_flat['IMGDIRX']) - else: - header_fltdirx_exists = False - fltdirx_flipped = 'NO' - - print(f"Flat field shape is {flat.shape}") - # correction based on science data - see if flat and science are both flipped or not - flat = compare_IMGDIRX(flat,header_imgdirx_exists,imgdirx_flipped,header_fltdirx_exists,fltdirx_flipped) - - flat = np.moveaxis(flat, 0,-1) #so that it is [y,x,24] - flat = flat.reshape(2048,2048,6,4) #separate 24 images, into 6 wavelengths, with each 4 pol states - flat = np.moveaxis(flat, 2,-1) - - print(flat.shape) - - _, _, _, cpos_f = fits_get_sampling(flat_f,verbose = True) #get flat continuum position - - print(f"The continuum position of the flat field is at {cpos_f} index position") - - #-------- - # test if the science and flat have continuum at same position - #-------- - - flat = compare_cpos(flat,cpos_f,cpos_arr[0]) - - flat_pmp_temp = str(header_flat['HPMPTSP1']) - - print(f"Flat PMP Temperature Set Point: {flat_pmp_temp}") - - #-------- - # correct for missing line in particular flat field - #-------- - - if flat_f[-15:] == '0162201100.fits': # flat_f[-62:] == 'solo_L0_phi-hrt-flat_0667134081_V202103221851C_0162201100.fits' - print("This flat has a missing line - filling in with neighbouring pixels") - flat_copy = flat.copy() - flat[:,:,1,1] = filling_data(flat_copy[:,:,1,1], 0, mode = {'exact rows':[1345,1346]}, axis=1) - - del flat_copy - - printc('--------------------------------------------------------------',bcolors.OKGREEN) - printc(f"------------ Load flats time: {np.round(time.time() - start_time,3)} seconds",bcolors.OKGREEN) - printc('--------------------------------------------------------------',bcolors.OKGREEN) - - return flat - - -def load_dark(dark_f) -> np.ndarray: - """ - loads dark field from given path - """ - print(" ") - printc('-->>>>>>> Reading Darks',color=bcolors.OKGREEN) - - start_time = time.time() - - try: - dark,_ = get_data(dark_f) - - dark_shape = dark.shape - - if dark_shape != (2048,2048): - - if dark.ndim > 2: - printc("Dark Field Input File has more dimensions than the expected 2048,2048 format: {}",dark_f,color=bcolors.WARNING) - raise ValueError - - printc("Dark Field Input File not in 2048,2048 format: {}",dark_f,color=bcolors.WARNING) - printc("Attempting to correct ",color=bcolors.WARNING) - - - try: - if dark_shape[0] > 2048: - dark = dark[dark_shape[0]-2048:,:] - - except Exception: - printc("ERROR, Unable to correct shape of dark field data: {}",dark_f,color=bcolors.FAIL) - raise ValueError - - printc('--------------------------------------------------------------',bcolors.OKGREEN) - printc(f"------------ Load darks time: {np.round(time.time() - start_time,3)} seconds",bcolors.OKGREEN) - printc('--------------------------------------------------------------',bcolors.OKGREEN) - - return dark - - except Exception: - printc("ERROR, Unable to open and process darks file: {}",dark_f,color=bcolors.FAIL) - - -def apply_dark_correction(data, flat, dark, rows, cols) -> np.ndarray: - """ - subtracts dark field from flat field and science data - """ - print(" ") - print("-->>>>>>> Subtracting dark field") - - start_time = time.time() - - data -= dark[rows,cols, np.newaxis, np.newaxis, np.newaxis] - #flat -= dark[..., np.newaxis, np.newaxis] - # all processed flat fields should already be dark corrected - - printc('--------------------------------------------------------------',bcolors.OKGREEN) - printc(f"------------- Dark Field correction time: {np.round(time.time() - start_time,3)} seconds",bcolors.OKGREEN) - printc('--------------------------------------------------------------',bcolors.OKGREEN) - - return data, flat - - -def normalise_flat(flat, flat_f, ceny, cenx) -> np.ndarray: - """ - normalise flat fields at each wavelength position to remove the spectral line - """ - print(" ") - printc('-->>>>>>> Normalising Flats',color=bcolors.OKGREEN) - - start_time = time.time() - - try: - norm_fac = np.mean(flat[ceny,cenx, :, :], axis = (0,1))[np.newaxis, np.newaxis, ...] #mean of the central 1k x 1k - flat /= norm_fac - - printc('--------------------------------------------------------------',bcolors.OKGREEN) - printc(f"------------- Normalising flat time: {np.round(time.time() - start_time,3)} seconds",bcolors.OKGREEN) - printc('--------------------------------------------------------------',bcolors.OKGREEN) - - return flat - - except Exception: - printc("ERROR, Unable to normalise the flat fields: {}",flat_f,color=bcolors.FAIL) - - -def demod_hrt(data,pmp_temp, verbose = True) -> np.ndarray: - ''' - Use constant demodulation matrices to demodulate input data - ''' - if pmp_temp == '50': - demod_data = np.array([[ 0.28037298, 0.18741922, 0.25307596, 0.28119895], - [ 0.40408596, 0.10412157, -0.7225681, 0.20825675], - [-0.19126636, -0.5348939, 0.08181918, 0.64422774], - [-0.56897295, 0.58620095, -0.2579202, 0.2414017 ]]) - - elif pmp_temp == '40': - demod_data = np.array([[ 0.26450154, 0.2839626, 0.12642948, 0.3216773 ], - [ 0.59873885, 0.11278069, -0.74991184, 0.03091451], - [ 0.10833212, -0.5317737, -0.1677862, 0.5923593 ], - [-0.46916953, 0.47738808, -0.43824592, 0.42579797]]) - - else: - printc("Demodulation Matrix for PMP TEMP of {pmp_temp} deg is not available", color = bcolors.FAIL) - if verbose: - printc(f'Using a constant demodulation matrix for a PMP TEMP of {pmp_temp} deg',color = bcolors.OKGREEN) - - demod_data = demod_data.reshape((4,4)) - shape = data.shape - demod = np.tile(demod_data, (shape[0],shape[1],1,1)) - - if data.ndim == 5: - #if data array has more than one scan - data = np.moveaxis(data,-1,0) #moving number of scans to first dimension - - data = np.matmul(demod,data) - data = np.moveaxis(data,0,-1) #move scans back to the end - - elif data.ndim == 4: - #for if data has just one scan - data = np.matmul(demod,data) - - return data, demod - - -def unsharp_masking(flat,sigma,flat_pmp_temp,cpos_arr,clean_mode,clean_f,pol_end=4,verbose=True): - """ - unsharp masks the flat fields to blur our polarimetric structures due to solar rotation - clean_f = ['blurring', 'fft'] - """ - flat_demod, demodM = demod_hrt(flat, flat_pmp_temp,verbose) - - norm_factor = np.mean(flat_demod[512:1536,512:1536,0,cpos_arr[0]]) - - flat_demod /= norm_factor - - new_demod_flats = np.copy(flat_demod) - -# b_arr = np.zeros((2048,2048,3,5)) - - if cpos_arr[0] == 0: - wv_range = range(1,6) - - elif cpos_arr[0] == 5: - wv_range = range(5) - - if clean_mode == "QUV": - start_clean_pol = 1 - if verbose: - print("Unsharp Masking Q,U,V") - - elif clean_mode == "UV": - start_clean_pol = 2 - if verbose: - print("Unsharp Masking U,V") - - elif clean_mode == "V": - start_clean_pol = 3 - if verbose: - print("Unsharp Masking V") - - if clean_f == 'blurring': - blur = lambda a: gaussian_filter(a,sigma) - elif clean_f == 'fft': - x = np.fft.fftfreq(2048,1) - fftgaus2d = np.exp(-2*np.pi**2*(x-0)**2*sigma**2)[:,np.newaxis] * np.exp(-2*np.pi**2*(x-0)**2*sigma**2)[np.newaxis] - blur = lambda a : (np.fft.ifftn(fftgaus2d*np.fft.fftn(a.copy()))).real - - for pol in range(start_clean_pol,pol_end): - - for wv in wv_range: #not the continuum - - a = np.copy(np.clip(flat_demod[:,:,pol,wv], -0.02, 0.02)) - b = a - blur(a) -# b_arr[:,:,pol-1,wv-1] = b - c = a - b - - new_demod_flats[:,:,pol,wv] = c - - invM = np.linalg.inv(demodM) - - return np.matmul(invM, new_demod_flats*norm_factor) - - -def flat_correction(data,flat,flat_states,rows,cols) -> np.ndarray: - """ - correct science data with flat fields - """ - print(" ") - printc('-->>>>>>> Correcting Flatfield',color=bcolors.OKGREEN) - - start_time = time.time() - - try: - if flat_states == 6: - - printc("Dividing by 6 flats, one for each wavelength",color=bcolors.OKGREEN) - - tmp = np.mean(flat,axis=-2) #avg over pol states for the wavelength - - return data / tmp[rows,cols, np.newaxis, :, np.newaxis] - - - elif flat_states == 24: - - printc("Dividing by 24 flats, one for each image",color=bcolors.OKGREEN) - - return data / flat[rows,cols, :, :, np.newaxis] #only one new axis for the scans - - elif flat_states == 4: - - printc("Dividing by 4 flats, one for each pol state",color=bcolors.OKGREEN) - - tmp = np.mean(flat,axis=-1) #avg over wavelength - - return data / tmp[rows,cols, :, np.newaxis, np.newaxis] - else: - print(" ") - printc('-->>>>>>> Unable to apply flat correction. Please insert valid flat_states',color=bcolors.WARNING) - - - printc('--------------------------------------------------------------',bcolors.OKGREEN) - printc(f"------------- Flat Field correction time: {np.round(time.time() - start_time,3)} seconds ",bcolors.OKGREEN) - printc('--------------------------------------------------------------',bcolors.OKGREEN) - - return data - - except: - printc("ERROR, Unable to apply flat fields",color=bcolors.FAIL) - - - -def prefilter_correction(data,voltagesData_arr,prefilter,prefilter_voltages): - """ - applies prefilter correction - """ - def _get_v1_index1(x): - index1, v1 = min(enumerate([abs(i) for i in x]), key=itemgetter(1)) - return v1, index1 - - data_shape = data.shape - - for scan in range(data_shape[-1]): - - voltage_list = voltagesData_arr[scan] - - for wv in range(6): - - v = voltage_list[wv] - - vdif = [v - pf for pf in prefilter_voltages] - - v1, index1 = _get_v1_index1(vdif) - - if vdif[index1] >= 0: - v2 = vdif[index1 + 1] - index2 = index1 + 1 - - else: - v2 = vdif[index1-1] - index2 = index1 - 1 - - imprefilter = (prefilter[:,:, index1]*v1 + prefilter[:,:, index2]*v2)/(v1+v2) #interpolation between nearest voltages - - data[:,:,:,wv,scan] /= imprefilter[...,np.newaxis] - - return data - -def apply_field_stop(data, rows, cols, header_imgdirx_exists, imgdirx_flipped) -> np.ndarray: - """ - apply field stop mask to the science data - """ - print(" ") - printc("-->>>>>>> Applying field stop",color=bcolors.OKGREEN) - - start_time = time.time() - - field_stop,_ = load_fits('./field_stop/HRT_field_stop.fits') - - field_stop = np.where(field_stop > 0,1,0) - - if header_imgdirx_exists: - if imgdirx_flipped == 'YES': #should be YES for any L1 data, but mistake in processing software - field_stop = field_stop[:,::-1] #also need to flip the flat data after dark correction - - data *= field_stop[rows,cols,np.newaxis, np.newaxis, np.newaxis] - - printc('--------------------------------------------------------------',bcolors.OKGREEN) - printc(f"------------- Field stop time: {np.round(time.time() - start_time,3)} seconds",bcolors.OKGREEN) - printc('--------------------------------------------------------------',bcolors.OKGREEN) - - return data, field_stop - -def CT_ItoQUV(data, ctalk_params, norm_stokes, cpos_arr, Ic_mask): - """ - performs cross talk correction for I -> Q,U,V - """ - before_ctalk_data = np.copy(data) - data_shape = data.shape - -# ceny = slice(data_shape[0]//2 - data_shape[0]//4, data_shape[0]//2 + data_shape[0]//4) -# cenx = slice(data_shape[1]//2 - data_shape[1]//4, data_shape[1]//2 + data_shape[1]//4) - - cont_stokes = np.mean(data[Ic_mask,0,cpos_arr[0],:], axis = 0) - - for i in range(6): - -# stokes_i_wv_avg = np.mean(data[ceny,cenx,0,i,:], axis = (0,1)) - stokes_i_wv_avg = np.mean(data[Ic_mask,0,i,:], axis = 0) - - if norm_stokes: - #if normed, applies normalised offset to normed stokes - - tmp_param = ctalk_params*np.divide(stokes_i_wv_avg,cont_stokes) - - q_slope = tmp_param[0,0,:] - u_slope = tmp_param[0,1,:] - v_slope = tmp_param[0,2,:] - - q_int = tmp_param[1,0,:] - u_int = tmp_param[1,1,:] - v_int = tmp_param[1,2,:] - - data[:,:,1,i,:] = before_ctalk_data[:,:,1,i,:] - before_ctalk_data[:,:,0,i,:]*q_slope - q_int - - data[:,:,2,i,:] = before_ctalk_data[:,:,2,i,:] - before_ctalk_data[:,:,0,i,:]*u_slope - u_int - - data[:,:,3,i,:] = before_ctalk_data[:,:,3,i,:] - before_ctalk_data[:,:,0,i,:]*v_slope - v_int - - else: - #if not normed, applies raw offset cross talk correction to raw stokes counts - - tmp_param = ctalk_params[0,:,:]*np.divide(stokes_i_wv_avg,cont_stokes) - - q_slope = tmp_param[0,:] - u_slope = tmp_param[1,:] - v_slope = tmp_param[2,:] - - q_int = ctalk_params[1,0,:] - u_int = ctalk_params[1,1,:] - v_int = ctalk_params[1,2,:] - - data[:,:,1,i,:] = before_ctalk_data[:,:,1,i,:] - before_ctalk_data[:,:,0,i,:]*q_slope - q_int*stokes_i_wv_avg - - data[:,:,2,i,:] = before_ctalk_data[:,:,2,i,:] - before_ctalk_data[:,:,0,i,:]*u_slope - u_int*stokes_i_wv_avg - - data[:,:,3,i,:] = before_ctalk_data[:,:,3,i,:] - before_ctalk_data[:,:,0,i,:]*v_slope - v_int*stokes_i_wv_avg - - return data - - -def cmilos(data_f, hdr_arr, wve_axis_arr, data_shape, cpos_arr, data, rte, field_stop, start_row, start_col, out_rte_filename, out_dir): - """ - RTE inversion using CMILOS - """ - print(" ") - printc('-->>>>>>> RUNNING CMILOS ',color=bcolors.OKGREEN) - - try: - CMILOS_LOC = os.path.realpath(__file__) - - CMILOS_LOC = CMILOS_LOC[:-15] + 'cmilos/' #-11 as hrt_pipe.py is 11 characters - - if os.path.isfile(CMILOS_LOC+'milos'): - printc("Cmilos executable located at:", CMILOS_LOC,color=bcolors.WARNING) - - else: - raise ValueError('Cannot find cmilos:', CMILOS_LOC) - - except ValueError as err: - printc(err.args[0],color=bcolors.FAIL) - printc(err.args[1],color=bcolors.FAIL) - return - - wavelength = 6173.3354 - - for scan in range(int(data_shape[-1])): - - start_time = time.time() - - file_path = data_f[scan] - wave_axis = wve_axis_arr[scan] - hdr = hdr_arr[scan] - - #must invert each scan independently, as cmilos only takes in one dataset at a time - - #get wave_axis from the hdr information of the science scans - if cpos_arr[0] == 0: - shift_w = wave_axis[3] - wavelength - elif cpos_arr[0] == 5: - shift_w = wave_axis[2] - wavelength - # DC TEST - wave_axis = wave_axis - shift_w - - print('It is assumed the wavelength array is given by the hdr') - #print(wave_axis,color = bcolors.WARNING) - print("Wave axis is: ", (wave_axis - wavelength)*1000.) - print('Saving data into dummy_in.txt for RTE input') - - sdata = data[:,:,:,:,scan] - y,x,p,l = sdata.shape - #print(y,x,p,l) - - filename = 'dummy_in.txt' - with open(filename,"w") as f: - for i in range(x): - for j in range(y): - for k in range(l): - f.write('%e %e %e %e %e \n' % (wave_axis[k],sdata[j,i,0,k],sdata[j,i,1,k],sdata[j,i,2,k],sdata[j,i,3,k])) #wv, I, Q, U, V - del sdata - - printc(f' ---- >>>>> Inverting data scan number: {scan} .... ',color=bcolors.OKGREEN) - - cmd = CMILOS_LOC+"./milos" - cmd = fix_path(cmd) - - if rte == 'RTE': - rte_on = subprocess.call(cmd+" 6 15 0 0 dummy_in.txt > dummy_out.txt",shell=True) - if rte == 'CE': - rte_on = subprocess.call(cmd+" 6 15 2 0 dummy_in.txt > dummy_out.txt",shell=True) - if rte == 'CE+RTE': - rte_on = subprocess.call(cmd+" 6 15 1 0 dummy_in.txt > dummy_out.txt",shell=True) - - #print(rte_on) - - printc(' ---- >>>>> Reading results.... ',color=bcolors.OKGREEN) - del_dummy = subprocess.call("rm dummy_in.txt",shell=True) - #print(del_dummy) - - res = np.loadtxt('dummy_out.txt') - npixels = res.shape[0]/12. - #print(npixels) - #print(npixels/x) - result = np.zeros((12,y*x)).astype(float) - rte_invs = np.zeros((12,y,x)).astype(float) - for i in range(y*x): - result[:,i] = res[i*12:(i+1)*12] - result = result.reshape(12,y,x) - result = np.einsum('ijk->ikj', result) - rte_invs = result - del result - rte_invs_noth = np.copy(rte_invs) - - """ - From 0 to 11 - Counter (PX Id) - Iterations - Strength - Inclination - Azimuth - Eta0 parameter - Doppler width - Damping - Los velocity - Constant source function - Slope source function - Minimum chisqr value - """ - - noise_in_V = np.mean(data[:,:,3,cpos_arr[0],:]) - low_values_flags = np.max(np.abs(data[:,:,3,:,scan]),axis=-1) < noise_in_V # Where values are low - - rte_invs[2,low_values_flags] = 0 - rte_invs[3,low_values_flags] = 0 - rte_invs[4,low_values_flags] = 0 - - #np.savez_compressed(out_dir+'_RTE', rte_invs=rte_invs, rte_invs_noth=rte_invs_noth) - - del_dummy = subprocess.call("rm dummy_out.txt",shell=True) - #print(del_dummy) - - """ - #vlos S/C vorrection - v_x, v_y, v_z = hdr['HCIX_VOB']/1000, hdr['HCIY_VOB']/1000, hdr['HCIZ_VOB']/1000 - - #need line of sight velocity, should be total HCI velocity in km/s, with sun at origin. - #need to take care for velocities moving towards the sun, (ie negative) #could use continuum position as if towards or away - - if cpos_arr[scan] == 5: #moving away, redshifted - dir_factor = 1 - - elif cpos_arr[scan] == 0: #moving towards, blueshifted - dir_factor == -1 - - v_tot = dir_factor*math.sqrt(v_x**2 + v_y**2+v_z**2) #in km/s - - rte_invs_noth[8,:,:] = rte_invs_noth[8,:,:] - v_tot - """ - - rte_data_products = np.zeros((6,rte_invs_noth.shape[1],rte_invs_noth.shape[2])) - - rte_data_products[0,:,:] = rte_invs_noth[9,:,:] + rte_invs_noth[10,:,:] #continuum - rte_data_products[1,:,:] = rte_invs_noth[2,:,:] #b mag strength - rte_data_products[2,:,:] = rte_invs_noth[3,:,:] #inclination - rte_data_products[3,:,:] = rte_invs_noth[4,:,:] #azimuth - rte_data_products[4,:,:] = rte_invs_noth[8,:,:] #vlos - rte_data_products[5,:,:] = rte_invs_noth[2,:,:]*np.cos(rte_invs_noth[3,:,:]*np.pi/180.) #blos - - rte_data_products *= field_stop[np.newaxis,start_row:start_row + data.shape[0],start_col:start_col + data.shape[1]] #field stop, set outside to 0 - - if out_rte_filename is None: - filename_root = str(file_path.split('.fits')[0][-10:]) - else: - if isinstance(out_rte_filename, list): - filename_root = out_rte_filename[scan] - - elif isinstance(out_rte_filename, str): - filename_root = out_rte_filename - - else: - filename_root = str(file_path.split('.fits')[0][-10:]) - print(f"out_rte_filename neither string nor list, reverting to default: {filename_root}") - - with fits.open(file_path) as hdu_list: - hdu_list[0].hdr = hdr - hdu_list[0].data = rte_data_products - hdu_list.writeto(out_dir+filename_root+'_rte_data_products.fits', overwrite=True) - - with fits.open(file_path) as hdu_list: - hdu_list[0].hdr = hdr - hdu_list[0].data = rte_data_products[5,:,:] - hdu_list.writeto(out_dir+filename_root+'_blos_rte.fits', overwrite=True) - # DC change 20211101 Gherdardo needs separate fits files from inversion - with fits.open(file_path) as hdu_list: - hdu_list[0].hdr = hdr - hdu_list[0].data = rte_data_products[3,:,:] - hdu_list.writeto(out_dir+filename_root+'_bazi_rte.fits', overwrite=True) - - with fits.open(file_path) as hdu_list: - hdu_list[0].hdr = hdr - hdu_list[0].data = rte_data_products[2,:,:] - hdu_list.writeto(out_dir+filename_root+'_binc_rte.fits', overwrite=True) - - with fits.open(file_path) as hdu_list: - hdu_list[0].hdr = hdr - hdu_list[0].data = rte_data_products[1,:,:] - hdu_list.writeto(out_dir+filename_root+'_bmag_rte.fits', overwrite=True) - - with fits.open(file_path) as hdu_list: - hdu_list[0].hdr = hdr - hdu_list[0].data = rte_data_products[4,:,:] - hdu_list.writeto(out_dir+filename_root+'_vlos_rte.fits', overwrite=True) - - with fits.open(file_path) as hdu_list: - hdu_list[0].hdr = hdr - hdu_list[0].data = rte_data_products[0,:,:] - hdu_list.writeto(out_dir+filename_root+'_Icont_rte.fits', overwrite=True) - - printc('--------------------------------------------------------------',bcolors.OKGREEN) - printc(f"------------- CMILOS RTE Run Time: {np.round(time.time() - start_time,3)} seconds ",bcolors.OKGREEN) - printc('--------------------------------------------------------------',bcolors.OKGREEN) - - -def cmilos_fits(data_f, hdr_arr, wve_axis_arr, data_shape, cpos_arr, data, rte, field_stop, start_row, start_col, out_rte_filename, out_dir): - """ - RTE inversion using CMILOS - """ - print(" ") - printc('-->>>>>>> RUNNING CMILOS ',color=bcolors.OKGREEN) - - try: - CMILOS_LOC = os.path.realpath(__file__) - - CMILOS_LOC = CMILOS_LOC[:-15] + 'cmilos-fits/' #-11 as hrt_pipe.py is 11 characters - - if os.path.isfile(CMILOS_LOC+'milos'): - printc("Cmilos-fits executable located at:", CMILOS_LOC,color=bcolors.WARNING) - - else: - raise ValueError('Cannot find cmilos-fits:', CMILOS_LOC) - - except ValueError as err: - printc(err.args[0],color=bcolors.FAIL) - printc(err.args[1],color=bcolors.FAIL) - return - - wavelength = 6173.3354 - - for scan in range(int(data_shape[-1])): - - start_time = time.time() - - file_path = data_f[scan] - wave_axis = wve_axis_arr[scan] - hdr = hdr_arr[scan] - - #must invert each scan independently, as cmilos only takes in one dataset at a time - - #get wave_axis from the hdr information of the science scans - if cpos_arr[0] == 0: - shift_w = wave_axis[3] - wavelength - elif cpos_arr[0] == 5: - shift_w = wave_axis[2] - wavelength - - wave_axis = wave_axis - shift_w - - print('It is assumed the wavelength array is given by the hdr') - #print(wave_axis,color = bcolors.WARNING) - print("Wave axis is: ", (wave_axis - wavelength)*1000.) - print('Saving data into dummy_in.txt for RTE input') - - sdata = data[:,:,:,:,scan] - y,x,p,l = sdata.shape - - #create hdr with wavelength positions - hdr = fits.Header() - print(wave_axis[0]) - hdr['LAMBDA0'] = wave_axis[0]#needs it in Angstrom 6173.1 etc - hdr['LAMBDA1'] = wave_axis[1] - hdr['LAMBDA2'] = wave_axis[2] - hdr['LAMBDA3'] = wave_axis[3] - hdr['LAMBDA4'] = wave_axis[4] - hdr['LAMBDA5'] = wave_axis[5] - - - #write out data to temp fits for cmilos-fits input - input_arr = np.transpose(sdata, axes = (3,2,0,1)) #must transpose due to cfitsio (wl,pol,y,x) #3201 originally - - # DC CHANGE (same number of digits of cmilos) -# hdr['LAMBDA0'] = float('%e' % wave_axis[0])#needs it in Angstrom 6173.1 etc -# hdr['LAMBDA1'] = float('%e' % wave_axis[1]) -# hdr['LAMBDA2'] = float('%e' % wave_axis[2]) -# hdr['LAMBDA3'] = float('%e' % wave_axis[3]) -# hdr['LAMBDA4'] = float('%e' % wave_axis[4]) -# hdr['LAMBDA5'] = float('%e' % wave_axis[5]) -# printc('-->>>>>>> CHANGING DATA PRECISION ',color=bcolors.OKGREEN) -# for w in range(l): -# for s in range(p): -# for i in range(y): -# for j in range(x): -# input_arr[w,s,i,j] = float('%e' % input_arr[w,s,i,j]) - # DC CHANGE END - - hdu1 = fits.PrimaryHDU(data=input_arr, header = hdr) - - #mask - mask = np.ones((sdata.shape[0],sdata.shape[1])) #change this for fdt - hdu2 = fits.ImageHDU(data=mask) - - #write out to temp fits - hdul_tmp = fits.HDUList([hdu1, hdu2]) - hdul_tmp.writeto(out_dir+'temp_cmilos_io.fits', overwrite=True) - - del sdata - - printc(f' ---- >>>>> Inverting data scan number: {scan} .... ',color=bcolors.OKGREEN) - - cmd = CMILOS_LOC+"milos" - #cmd = fix - #fix_path(cmd) - print(cmd) - - if rte == 'RTE': - rte_on = subprocess.call(cmd+f" 6 15 0 {out_dir+'temp_cmilos_io.fits'}",shell=True) - if rte == 'CE': - rte_on = subprocess.call(cmd+f" 6 15 2 {out_dir+'temp_cmilos_io.fits'}",shell=True) - if rte == 'CE+RTE': - rte_on = subprocess.call(cmd+f" 6 15 1 {out_dir+'temp_cmilos_io.fits'}",shell=True) - - print(rte_on) - - printc(' ---- >>>>> Reading results.... ',color=bcolors.OKGREEN) - #print(del_dummy) - - with fits.open(out_dir+'temp_cmilos_io.fits') as hdu_list: - rte_out = hdu_list[0].data - #hdu_list.writeto(out_dir+'rte_out.fits', overwrite=True) - - del input_arr - - """ - From 0 to 11 - Iterations - Strength - Inclination - Azimuth - Eta0 parameter - Doppler width - Damping/aa - Los velocity - alfa? Counter PID? - Constant source function - Slope source function - Minimum chisqr value - """ - - """ - Direct from cmilos-fits/milos.c - inv->iter = malloc(npix*sizeof(int)); - inv->B = malloc(npix*sizeof(double)); - inv->gm = malloc(npix*sizeof(double)); - inv->az = malloc(npix*sizeof(double)); - inv->eta0 = malloc(npix*sizeof(double)); - inv->dopp = malloc(npix*sizeof(double)); - inv->aa = malloc(npix*sizeof(double)); - inv->vlos = malloc(npix*sizeof(double)); //km/s - inv->alfa = malloc(npix*sizeof(double)); //stay light factor - inv->S0 = malloc(npix*sizeof(double)); - inv->S1 = malloc(npix*sizeof(double)); - inv->nchisqrf = malloc(npix*sizeof(double)); - - """ - - """ - noise_in_V = np.mean(data[:,:,3,cpos_arr[0],:]) - low_values_flags = np.max(np.abs(data[:,:,3,:,scan]),axis=-1) < noise_in_V # Where values are low - - rte_out[2,low_values_flags] = 0 #not sure about 2,3,4 indexing here - rte_out[3,low_values_flags] = 0 - rte_out[4,low_values_flags] = 0 - """ - - rte_data_products = np.zeros((6,rte_out.shape[1],rte_out.shape[2])) - - rte_data_products[0,:,:] = rte_out[9,:,:] + rte_out[10,:,:] #continuum - rte_data_products[1,:,:] = rte_out[1,:,:] #b mag strength - rte_data_products[2,:,:] = rte_out[2,:,:] #inclination - rte_data_products[3,:,:] = rte_out[3,:,:] #azimuth - rte_data_products[4,:,:] = rte_out[7,:,:] #vlos - rte_data_products[5,:,:] = rte_out[1,:,:]*np.cos(rte_out[2,:,:]*np.pi/180.) #blos - - rte_data_products *= field_stop[np.newaxis,start_row:start_row + data.shape[0],start_col:start_col + data.shape[1]] #field stop, set outside to 0 - - if out_rte_filename is None: - filename_root = str(file_path.split('.fits')[0][-10:]) - else: - if isinstance(out_rte_filename, list): - filename_root = out_rte_filename[scan] - - elif isinstance(out_rte_filename, str): - filename_root = out_rte_filename - - else: - filename_root = str(file_path.split('.fits')[0][-10:]) - print(f"out_rte_filename neither string nor list, reverting to default: {filename_root}") - - with fits.open(file_path) as hdu_list: - hdu_list[0].hdr = hdr - hdu_list[0].data = rte_data_products - hdu_list.writeto(out_dir+filename_root+'_rte_data_products.fits', overwrite=True) - - with fits.open(file_path) as hdu_list: - hdu_list[0].hdr = hdr - hdu_list[0].data = rte_data_products[5,:,:] - hdu_list.writeto(out_dir+filename_root+'_blos_rte.fits', overwrite=True) - # DC change 20211101 Gherdardo needs separate fits files from inversion - with fits.open(file_path) as hdu_list: - hdu_list[0].hdr = hdr - hdu_list[0].data = rte_data_products[3,:,:] - hdu_list.writeto(out_dir+filename_root+'_bazi_rte.fits', overwrite=True) - - with fits.open(file_path) as hdu_list: - hdu_list[0].hdr = hdr - hdu_list[0].data = rte_data_products[2,:,:] - hdu_list.writeto(out_dir+filename_root+'_binc_rte.fits', overwrite=True) - - with fits.open(file_path) as hdu_list: - hdu_list[0].hdr = hdr - hdu_list[0].data = rte_data_products[1,:,:] - hdu_list.writeto(out_dir+filename_root+'_bmag_rte.fits', overwrite=True) - - with fits.open(file_path) as hdu_list: - hdu_list[0].hdr = hdr - hdu_list[0].data = rte_data_products[4,:,:] - hdu_list.writeto(out_dir+filename_root+'_vlos_rte.fits', overwrite=True) - - with fits.open(file_path) as hdu_list: - hdu_list[0].hdr = hdr - hdu_list[0].data = rte_data_products[0,:,:] - hdu_list.writeto(out_dir+filename_root+'_Icont_rte.fits', overwrite=True) - - printc('--------------------------------------------------------------',bcolors.OKGREEN) - printc(f"------------- CMILOS-FITS RTE Run Time: {np.round(time.time() - start_time,3)} seconds ",bcolors.OKGREEN) - printc('--------------------------------------------------------------',bcolors.OKGREEN) - - - -def pmilos(data_f, wve_axis_arr, data_shape, cpos_arr, data, rte, field_stop, start_row, start_col, out_rte_filename, out_dir): - """ - RTE inversion using PMILOS - """ - print(" ") - printc('-->>>>>>> RUNNING PMILOS ',color=bcolors.OKGREEN) - - try: - PMILOS_LOC = os.path.realpath(__file__) - - PMILOS_LOC = PMILOS_LOC[:-15] + 'p-milos/' #11 as hrt_pipe.py is 11 characters -8 if in utils.py - - if os.path.isfile(PMILOS_LOC+'pmilos.x'): - printc("Pmilos executable located at:", PMILOS_LOC,color=bcolors.WARNING) - - else: - raise ValueError('Cannot find pmilos:', PMILOS_LOC) - - except ValueError as err: - printc(err.args[0],color=bcolors.FAIL) - printc(err.args[1],color=bcolors.FAIL) - return - - wavelength = 6173.3354 - - for scan in range(int(data_shape[-1])): - - start_time = time.time() - - file_path = data_f[scan] - wave_axis = wve_axis_arr[scan] - - #must invert each scan independently, as cmilos only takes in one dataset at a time - - #get wave_axis from the hdr information of the science scans - if cpos_arr[0] == 0: - shift_w = wave_axis[3] - wavelength - elif cpos_arr[0] == 5: - shift_w = wave_axis[2] - wavelength - - wave_axis = wave_axis - shift_w - - print('It is assumed the wavelength array is given by the hdr') - #print(wave_axis,color = bcolors.WARNING) - print("Wave axis is: ", (wave_axis - wavelength)*1000.) - print('Saving data into ./p-milos/run/data/input_tmp.fits for pmilos RTE input') - - #write wavelengths to wavelength.fits file for the settings - - wave_input = np.zeros((2,6)) #cfitsio reads dimensions in opposite order - wave_input[0,:] = 1 - wave_input[1,:] = wave_axis - - print(wave_axis) - - hdr = fits.Header() - - primary_hdu = fits.PrimaryHDU(wave_input, header = hdr) - hdul = fits.HDUList([primary_hdu]) - hdul.writeto(f'./p-milos/run/wavelength_tmp.fits', overwrite=True) - - sdata = data[:,:,:,:,scan].T - sdata = sdata.astype(np.float32) - #create input fits file for pmilos - hdr = fits.Header() - - hdr['CTYPE1'] = 'HPLT-TAN' - hdr['CTYPE2'] = 'HPLN-TAN' - hdr['CTYPE3'] = 'STOKES' #check order of stokes - hdr['CTYPE4'] = 'WAVE-GRI' - - primary_hdu = fits.PrimaryHDU(sdata, header = hdr) - hdul = fits.HDUList([primary_hdu]) - hdul.writeto(f'./p-milos/run/data/input_tmp.fits', overwrite=True) - - if rte == 'RTE': - cmd = "mpiexec -n 64 ../pmilos.x pmilos.minit" #../milos.x pmilos.mtrol" ## - - if rte == 'CE': - cmd = "mpiexec -np 16 ../pmilos.x pmilos_ce.minit" - - if rte == 'CE+RTE': - print("CE+RTE not possible on PMILOS, performing RTE instead") - cmd = "mpiexec -np 16 ../pmilos.x pmilos.minit" - - if rte == 'RTE_seq': - cmd = '../milos.x pmilos.mtrol' - - del sdata - #need to change settings for CE or CE+RTE in the pmilos.minit file here - - printc(f' ---- >>>>> Inverting data scan number: {scan} .... ',color=bcolors.OKGREEN) - - cwd = os.getcwd() - os.chdir("./p-milos/run/") - rte_on = subprocess.call(cmd,shell=True) - os.chdir(cwd) - - if rte == 'CE': - out_file = 'inv__mod_ce.fits' # not sure about this one - - else: - out_file = 'inv__mod.fits' #only when one datacube and 16 processors - - with fits.open(f'./p-milos/run/results/{out_file}') as hdu_list: - result = hdu_list[0].data - - #del_dummy = subprocess.call(f"rm ./p-milos/run/results/{out_file}.fits",shell=True) - del_dummy = subprocess.call(f"rm ./p-milos/run/results/{out_file}",shell=True) #must delete the output file - - #result has dimensions [rows,cols,13] - result = np.moveaxis(result,0,2) - print(result.shape) - #printc(f' ---- >>>>> You are HERE .... ',color=bcolors.WARNING) - """ - PMILOS Output 13 columns - 0. eta0 = line-to-continuum absorption coefficient ratio - 1. B = magnetic field strength [Gauss] - 2. vlos = line-of-sight velocity [km/s] - 3. dopp = Doppler width [Angstroms] - 4. aa = damping parameter - 5. gm = magnetic field inclination [deg] - 6. az = magnetic field azimuth [deg] - 7. S0 = source function constant - 8. S1 = source function gradient - 9. mac = macroturbulent velocity [km/s] - 10. filling factor of the magnetic component [0-1] - 11. Number of iterations performed - 12. Chisqr value - """ - - rte_data_products = np.zeros((6,result.shape[0],result.shape[1])) - - rte_data_products[0,:,:] = result[:,:,7] + result[:,:,8] #continuum - rte_data_products[2,:,:] = result[:,:,5] #inclination - rte_data_products[3,:,:] = result[:,:,6] #azimuth - rte_data_products[4,:,:] = result[:,:,2] #vlos - rte_data_products[5,:,:] = result[:,:,1]*np.cos(result[:,:,5]*np.pi/180.) #blos - - rte_data_products *= field_stop[np.newaxis,start_row:start_row + data.shape[0],start_col:start_col + data.shape[1]] #field stop, set outside to 0 - - #flipping taken care of for the field stop in the hrt_pipe - #printc(f' ---- >>>>> and HERE now .... ',color=bcolors.WARNING) - if out_rte_filename is None: - filename_root = str(file_path.split('.fits')[0][-10:]) - else: - filename_root = out_rte_filename - - with fits.open(file_path) as hdu_list: - hdu_list[0].hdr = hdr - hdu_list[0].data = rte_data_products - hdu_list.writeto(out_dir+filename_root+'_rte_data_products.fits', overwrite=True) - - with fits.open(file_path) as hdu_list: - hdu_list[0].hdr = hdr - hdu_list[0].data = rte_data_products[5,:,:] - hdu_list.writeto(out_dir+filename_root+'_blos_rte.fits', overwrite=True) - # DC change 20211101 Gherdardo needs separate fits files from inversion - with fits.open(file_path) as hdu_list: - hdu_list[0].hdr = hdr - hdu_list[0].data = rte_data_products[3,:,:] - hdu_list.writeto(out_dir+filename_root+'_bazi_rte.fits', overwrite=True) - - with fits.open(file_path) as hdu_list: - hdu_list[0].hdr = hdr - hdu_list[0].data = rte_data_products[2,:,:] - hdu_list.writeto(out_dir+filename_root+'_binc_rte.fits', overwrite=True) - - with fits.open(file_path) as hdu_list: - hdu_list[0].hdr = hdr - hdu_list[0].data = rte_data_products[1,:,:] - hdu_list.writeto(out_dir+filename_root+'_bmag_rte.fits', overwrite=True) - - with fits.open(file_path) as hdu_list: - hdu_list[0].hdr = hdr - hdu_list[0].data = rte_data_products[4,:,:] - hdu_list.writeto(out_dir+filename_root+'_vlos_rte.fits', overwrite=True) - - with fits.open(file_path) as hdu_list: - hdu_list[0].hdr = hdr - hdu_list[0].data = rte_data_products[0,:,:] - hdu_list.writeto(out_dir+filename_root+'_Icont_rte.fits', overwrite=True) - - - printc('--------------------------------------------------------------',bcolors.OKGREEN) - printc(f"------------- PMILOS RTE Run Time: {np.round(time.time() - start_time,3)} seconds ",bcolors.OKGREEN) - printc('--------------------------------------------------------------',bcolors.OKGREEN) diff --git a/src/inversions.py b/src/inversions.py new file mode 100644 index 0000000000000000000000000000000000000000..fb7023a645ded43a3d940d77a90cd17563c599ef --- /dev/null +++ b/src/inversions.py @@ -0,0 +1,584 @@ +import numpy as np +from astropy.io import fits +from utils import * +import os +import time +import subprocess +import datetime + +def create_output_filenames(filename, DID, version = '01'): + """ + creating the L2 output filenames from the input, assuming L1 + """ + try: + file_start = filename.split('solo_')[1] + file_start = 'solo_' + file_start + L2_str = file_start.replace('L1', 'L2') + versioned = L2_str.split('V')[0] + 'V' + version + '_' + DID + '.fits' + stokes_file = versioned.replace('ilam', 'stokes') + icnt_file = versioned.replace('ilam', 'icnt') + bmag_file = versioned.replace('ilam', 'bmag') + bazi_file = versioned.replace('ilam', 'bazi') + binc_file = versioned.replace('ilam', 'binc') + blos_file = versioned.replace('ilam', 'blos') + vlos_file = versioned.replace('ilam', 'vlos') + + return stokes_file, icnt_file, bmag_file, bazi_file, binc_file, blos_file, vlos_file + + except Exception: + print("The input file: {file_path} does not contain 'L1'") + raise KeyError + + +def write_output_inversion(rte_data_products, file_path, scan, hdr_scan, imgdirx_flipped, out_dir, out_rte_filename, vers): + """ + write out the L2 files + stokes + taking care of the azimuth definition if the image is flipped + """ + + if imgdirx_flipped: + print("Input image has been flipped as per convention - converting Azimuth to convention") + azi = rte_data_products[3,:,:].copy() + rte_data_products[3,:,:] = 180 - azi + + if out_rte_filename is None: + filename_root = str(file_path.split('.fits')[0][-10:]) + stokes_file, icnt_file, bmag_file, bazi_file, binc_file, blos_file, vlos_file = create_output_filenames(file_path, filename_root, version = vers) + + else: + if isinstance(out_rte_filename, list): + filename_root = out_rte_filename[scan] + + elif isinstance(out_rte_filename, str): + filename_root = out_rte_filename + + else: + filename_root = str(file_path.split('.fits')[0][-10:]) + print(f"out_rte_filename neither string nor list, reverting to default: {filename_root}") + + blos_file, icnt_file, bmag_file, bazi_file, binc_file, blos_file = filename_root, filename_root, filename_root, filename_root, filename_root, filename_root + + ntime = datetime.datetime.now() + hdr_scan['DATE'] = ntime.strftime("%Y-%m-%dT%H:%M:%S") + + version_k = hdr_scan['VERS_SW'] + if '.fits' in hdr_scan['CAL_DARK']: + dark_f_k = 'True' + else: + dark_f_k = 'False' + if '.fits' in hdr_scan['CAL_FLAT']: + flat_f_k = 'True' + else: + flat_f_k = 'False' + clean_f_k = hdr_scan['CAL_USH'] + if hdr_scan['CAL_CRT1'] > 0: + ItoQUV_k ='True' + else: + ItoQUV_k = 'False' + rte_sw_k = hdr_scan['RTE_SW'] + rte_mod_k = hdr_scan['RTE_MOD'] + + with fits.open(file_path) as hdu_list: + hdu_list[0].header = hdr_scan + hdu_list[0].data = rte_data_products + hdu_list.writeto(out_dir+filename_root+'_rte_data_products.fits', overwrite=True) + + #blos + with fits.open(file_path) as hdu_list: + hdr_scan['FILENAME'] = blos_file + hdr_scan['HISTORY'] = f"Reduced with hrt-pipeline {version_k}, to create a Blos file. Dark field Applied: {dark_f_k}. Flat field Applied: {flat_f_k}, Flat Unsharp Masked Mode: {clean_f_k}. I->QUV ctalk: {ItoQUV_k}. RTE software: {rte_sw_k}. RTE mode: {rte_mod_k}." + hdu_list[0].header = hdr_scan + hdu_list[0].data = rte_data_products[5,:,:] + hdu_list.writeto(out_dir+blos_file, overwrite=True) + + #bazi + with fits.open(file_path) as hdu_list: + hdr_scan['FILENAME'] = bazi_file + hdr_scan['HISTORY'] = f"Reduced with hrt-pipeline {version_k}, to create a Bazi file. Dark field Applied: {dark_f_k}. Flat field Applied: {flat_f_k}, Flat Unsharp Masked Mode: {clean_f_k}. I->QUV ctalk: {ItoQUV_k}. RTE software: {rte_sw_k}. RTE mode: {rte_mod_k}." + hdu_list[0].header = hdr_scan + hdu_list[0].data = rte_data_products[3,:,:] + hdu_list.writeto(out_dir+bazi_file, overwrite=True) + + #binc + with fits.open(file_path) as hdu_list: + hdr_scan['FILENAME'] = binc_file + hdr_scan['HISTORY'] = f"Reduced with hrt-pipeline {version_k}, to create a Binc file. Dark field Applied: {dark_f_k}. Flat field Applied: {flat_f_k}, Flat Unsharp Masked Mode: {clean_f_k}. I->QUV ctalk: {ItoQUV_k}. RTE software: {rte_sw_k}. RTE mode: {rte_mod_k}." + hdu_list[0].header = hdr_scan + hdu_list[0].data = rte_data_products[2,:,:] + hdu_list.writeto(out_dir+binc_file, overwrite=True) + + #bmag + with fits.open(file_path) as hdu_list: + hdr_scan['FILENAME'] = bmag_file + hdr_scan['HISTORY'] = f"Reduced with hrt-pipeline {version_k}, to create a Bmag file. Dark field Applied: {dark_f_k}. Flat field Applied: {flat_f_k}, Flat Unsharp Masked Mode: {clean_f_k}. I->QUV ctalk: {ItoQUV_k}. RTE software: {rte_sw_k}. RTE mode: {rte_mod_k}." + hdu_list[0].header = hdr_scan + hdu_list[0].data = rte_data_products[1,:,:] + hdu_list.writeto(out_dir+bmag_file, overwrite=True) + + #vlos + with fits.open(file_path) as hdu_list: + hdr_scan['FILENAME'] = vlos_file + hdr_scan['HISTORY'] = f"Reduced with hrt-pipeline {version_k}, to create a vlos file. Dark field Applied: {dark_f_k}. Flat field Applied: {flat_f_k}, Flat Unsharp Masked Mode: {clean_f_k}. I->QUV ctalk: {ItoQUV_k}. RTE software: {rte_sw_k}. RTE mode: {rte_mod_k}." + hdu_list[0].header = hdr_scan + hdu_list[0].data = rte_data_products[4,:,:] + hdu_list.writeto(out_dir+vlos_file, overwrite=True) + + #Icnt + with fits.open(file_path) as hdu_list: + hdr_scan['FILENAME'] = icnt_file + hdr_scan['HISTORY'] = f"Reduced with hrt-pipeline {version_k}, to create a Icnt file. Dark field Applied: {dark_f_k}. Flat field Applied: {flat_f_k}, Flat Unsharp Masked Mode: {clean_f_k}. I->QUV ctalk: {ItoQUV_k}. RTE software: {rte_sw_k}. RTE mode: {rte_mod_k}." + hdu_list[0].header = hdr_scan + hdu_list[0].data = rte_data_products[0,:,:] + hdu_list.writeto(out_dir+icnt_file, overwrite=True) + + +def cmilos(data_f, hdr_arr, wve_axis_arr, data_shape, cpos_arr, data, rte, mask, imgdirx_flipped, out_rte_filename, out_dir, vers = '01'): + """ + RTE inversion using CMILOS + """ + print(" ") + printc('-->>>>>>> RUNNING CMILOS ',color=bcolors.OKGREEN) + + try: + CMILOS_LOC = os.path.realpath(__file__) + + CMILOS_LOC = CMILOS_LOC.split('src/')[0] + 'cmilos/' #-11 as hrt_pipe.py is 11 characters + + if os.path.isfile(CMILOS_LOC+'milos'): + printc("Cmilos executable located at:", CMILOS_LOC,color=bcolors.WARNING) + + else: + raise ValueError('Cannot find cmilos:', CMILOS_LOC) + + except ValueError as err: + printc(err.args[0],color=bcolors.FAIL) + printc(err.args[1],color=bcolors.FAIL) + return + + wavelength = 6173.3354 + + for scan in range(int(data_shape[-1])): + + start_time = time.perf_counter() + + file_path = data_f[scan] + wave_axis = wve_axis_arr[scan] + hdr_scan = hdr_arr[scan] + + #must invert each scan independently, as cmilos only takes in one dataset at a time + + #get wave_axis from the hdr information of the science scans + if cpos_arr[0] == 0: + shift_w = wave_axis[3] - wavelength + elif cpos_arr[0] == 5: + shift_w = wave_axis[2] - wavelength + # DC TEST + wave_axis = wave_axis - shift_w + + print('It is assumed the wavelength array is given by the hdr') + #print(wave_axis,color = bcolors.WARNING) + print("Wave axis is: ", (wave_axis - wavelength)*1000.) + print('Saving data into dummy_in.txt for RTE input') + + sdata = data[:,:,:,:,scan] + y,x,p,l = sdata.shape + #print(y,x,p,l) + + filename = 'dummy_in.txt' + with open(filename,"w") as f: + for i in range(x): + for j in range(y): + for k in range(l): + f.write('%e %e %e %e %e \n' % (wave_axis[k],sdata[j,i,0,k],sdata[j,i,1,k],sdata[j,i,2,k],sdata[j,i,3,k])) #wv, I, Q, U, V + del sdata + + printc(f' ---- >>>>> Inverting data scan number: {scan} .... ',color=bcolors.OKGREEN) + + cmd = CMILOS_LOC+"./milos" + cmd = fix_path(cmd) + + if rte == 'RTE': + rte_on = subprocess.call(cmd+" 6 15 0 0 dummy_in.txt > dummy_out.txt",shell=True) + if rte == 'CE': + rte_on = subprocess.call(cmd+" 6 15 2 0 dummy_in.txt > dummy_out.txt",shell=True) + if rte == 'CE+RTE': + rte_on = subprocess.call(cmd+" 6 15 1 0 dummy_in.txt > dummy_out.txt",shell=True) + + printc(' ---- >>>>> Reading results.... ',color=bcolors.OKGREEN) + del_dummy = subprocess.call("rm dummy_in.txt",shell=True) + + res = np.loadtxt('dummy_out.txt') + npixels = res.shape[0]/12. + #print(npixels) + #print(npixels/x) + result = np.zeros((12,y*x)).astype(float) + rte_invs = np.zeros((12,y,x)).astype(float) + for i in range(y*x): + result[:,i] = res[i*12:(i+1)*12] + result = result.reshape(12,y,x) + result = np.einsum('ijk->ikj', result) + rte_invs = result + del result + rte_invs_noth = np.copy(rte_invs) + + """ + From 0 to 11 + Counter (PX Id) + Iterations + Strength + Inclination + Azimuth + Eta0 parameter + Doppler width + Damping + Los velocity + Constant source function + Slope source function + Minimum chisqr value + """ + + noise_in_V = np.mean(data[:,:,3,cpos_arr[0],:]) + low_values_flags = np.max(np.abs(data[:,:,3,:,scan]),axis=-1) < noise_in_V # Where values are low + + rte_invs[2,low_values_flags] = 0 + rte_invs[3,low_values_flags] = 0 + rte_invs[4,low_values_flags] = 0 + + #np.savez_compressed(out_dir+'_RTE', rte_invs=rte_invs, rte_invs_noth=rte_invs_noth) + + del_dummy = subprocess.call("rm dummy_out.txt",shell=True) + + rte_data_products = np.zeros((6,rte_invs_noth.shape[1],rte_invs_noth.shape[2])) + + rte_data_products[0,:,:] = rte_invs_noth[9,:,:] + rte_invs_noth[10,:,:] #continuum + rte_data_products[1,:,:] = rte_invs_noth[2,:,:] #b mag strength + rte_data_products[2,:,:] = rte_invs_noth[3,:,:] #inclination + rte_data_products[3,:,:] = rte_invs_noth[4,:,:] #azimuth + rte_data_products[4,:,:] = rte_invs_noth[8,:,:] #vlos + rte_data_products[5,:,:] = rte_invs_noth[2,:,:]*np.cos(rte_invs_noth[3,:,:]*np.pi/180.) #blos + + rte_data_products *= mask[np.newaxis, :, :, 0] #field stop, set outside to 0 + + hdr_scan['RTE_MOD'] = rte + hdr_scan['RTE_SW'] = 'cmilos' + hdr_scan['RTE_ITER'] = str(15) + + write_output_inversion(rte_data_products, file_path, scan, hdr_scan, imgdirx_flipped, out_dir, out_rte_filename, vers) + + printc('--------------------------------------------------------------',bcolors.OKGREEN) + printc(f"------------- CMILOS RTE Run Time: {np.round(time.perf_counter() - start_time,3)} seconds ",bcolors.OKGREEN) + printc('--------------------------------------------------------------',bcolors.OKGREEN) + + +def cmilos_fits(data_f, hdr_arr, wve_axis_arr, data_shape, cpos_arr, data, rte, mask, imgdirx_flipped, out_rte_filename, out_dir, vers = '01'): + """ + RTE inversion using CMILOS + """ + print(" ") + printc('-->>>>>>> RUNNING CMILOS ',color=bcolors.OKGREEN) + + try: + CMILOS_LOC = os.path.realpath(__file__) + + CMILOS_LOC = CMILOS_LOC.split('src/')[0] + 'cmilos-fits/' #-11 as hrt_pipe.py is 11 characters + + if os.path.isfile(CMILOS_LOC+'milos'): + printc("Cmilos-fits executable located at:", CMILOS_LOC,color=bcolors.WARNING) + + else: + raise ValueError('Cannot find cmilos-fits:', CMILOS_LOC) + + except ValueError as err: + printc(err.args[0],color=bcolors.FAIL) + printc(err.args[1],color=bcolors.FAIL) + return + + wavelength = 6173.3354 + + for scan in range(int(data_shape[-1])): + + start_time = time.perf_counter() + + file_path = data_f[scan] + wave_axis = wve_axis_arr[scan] + hdr_scan = hdr_arr[scan] # DC 20211117 + + #must invert each scan independently, as cmilos only takes in one dataset at a time + + #get wave_axis from the hdr information of the science scans + if cpos_arr[0] == 0: + shift_w = wave_axis[3] - wavelength + elif cpos_arr[0] == 5: + shift_w = wave_axis[2] - wavelength + + wave_axis = wave_axis - shift_w + + print('It is assumed the wavelength array is given by the hdr') + #print(wave_axis,color = bcolors.WARNING) + print("Wave axis is: ", (wave_axis - wavelength)*1000.) + print('Saving data into dummy_in.txt for RTE input') + + sdata = data[:,:,:,:,scan] + y,x,p,l = sdata.shape + + #create hdr with wavelength positions + hdr = fits.Header() + print(wave_axis[0]) + hdr['LAMBDA0'] = wave_axis[0]#needs it in Angstrom 6173.1 etc + hdr['LAMBDA1'] = wave_axis[1] + hdr['LAMBDA2'] = wave_axis[2] + hdr['LAMBDA3'] = wave_axis[3] + hdr['LAMBDA4'] = wave_axis[4] + hdr['LAMBDA5'] = wave_axis[5] + + #write out data to temp fits for cmilos-fits input + input_arr = np.transpose(sdata, axes = (3,2,0,1)) #must transpose due to cfitsio (wl,pol,y,x) #3201 originally + hdu1 = fits.PrimaryHDU(data=input_arr, header = hdr) + + #mask + mask = np.ones((sdata.shape[0],sdata.shape[1])) #change this for fdt + hdu2 = fits.ImageHDU(data=mask) + + #write out to temp fits + hdul_tmp = fits.HDUList([hdu1, hdu2]) + hdul_tmp.writeto(out_dir+'temp_cmilos_io.fits', overwrite=True) + + del sdata + + printc(f' ---- >>>>> Inverting data scan number: {scan} .... ',color=bcolors.OKGREEN) + + cmd = CMILOS_LOC+"milos" + #cmd = fix + #fix_path(cmd) + print(cmd) + + if rte == 'RTE': + rte_on = subprocess.call(cmd+f" 6 15 0 {out_dir+'temp_cmilos_io.fits'}",shell=True) + if rte == 'CE': + rte_on = subprocess.call(cmd+f" 6 15 2 {out_dir+'temp_cmilos_io.fits'}",shell=True) + if rte == 'CE+RTE': + rte_on = subprocess.call(cmd+f" 6 15 1 {out_dir+'temp_cmilos_io.fits'}",shell=True) + + print(rte_on) + + printc(' ---- >>>>> Reading results.... ',color=bcolors.OKGREEN) + #print(del_dummy) + + with fits.open(out_dir+'temp_cmilos_io.fits') as hdu_list: + rte_out = hdu_list[0].data + #hdu_list.writeto(out_dir+'rte_out.fits', overwrite=True) + + del input_arr + + """ + From 0 to 11 + Iterations + Strength + Inclination + Azimuth + Eta0 parameter + Doppler width + Damping/aa + Los velocity + alfa? Counter PID? + Constant source function + Slope source function + Minimum chisqr value + + Direct from cmilos-fits/milos.c + inv->iter = malloc(npix*sizeof(int)); + inv->B = malloc(npix*sizeof(double)); + inv->gm = malloc(npix*sizeof(double)); + inv->az = malloc(npix*sizeof(double)); + inv->eta0 = malloc(npix*sizeof(double)); + inv->dopp = malloc(npix*sizeof(double)); + inv->aa = malloc(npix*sizeof(double)); + inv->vlos = malloc(npix*sizeof(double)); //km/s + inv->alfa = malloc(npix*sizeof(double)); //stay light factor + inv->S0 = malloc(npix*sizeof(double)); + inv->S1 = malloc(npix*sizeof(double)); + inv->nchisqrf = malloc(npix*sizeof(double)); + + noise_in_V = np.mean(data[:,:,3,cpos_arr[0],:]) + low_values_flags = np.max(np.abs(data[:,:,3,:,scan]),axis=-1) < noise_in_V # Where values are low + + rte_out[2,low_values_flags] = 0 #not sure about 2,3,4 indexing here + rte_out[3,low_values_flags] = 0 + rte_out[4,low_values_flags] = 0 + """ + + rte_data_products = np.zeros((6,rte_out.shape[1],rte_out.shape[2])) + + rte_data_products[0,:,:] = rte_out[9,:,:] + rte_out[10,:,:] #continuum + rte_data_products[1,:,:] = rte_out[1,:,:] #b mag strength + rte_data_products[2,:,:] = rte_out[2,:,:] #inclination + rte_data_products[3,:,:] = rte_out[3,:,:] #azimuth + rte_data_products[4,:,:] = rte_out[7,:,:] #vlos + rte_data_products[5,:,:] = rte_out[1,:,:]*np.cos(rte_out[2,:,:]*np.pi/180.) #blos + + rte_data_products *= mask[np.newaxis, :, :, 0] #field stop, set outside to 0 + + hdr_scan['RTE_MOD'] = rte + hdr_scan['RTE_SW'] = 'cmilos-fits' + hdr_scan['RTE_ITER'] = str(15) + + write_output_inversion(rte_data_products, file_path, scan, hdr_scan, imgdirx_flipped, out_dir, out_rte_filename, vers) + + printc('--------------------------------------------------------------',bcolors.OKGREEN) + printc(f"------------- CMILOS-FITS RTE Run Time: {np.round(time.perf_counter() - start_time,3)} seconds ",bcolors.OKGREEN) + printc('--------------------------------------------------------------',bcolors.OKGREEN) + + + +def pmilos(data_f, hdr_arr, wve_axis_arr, data_shape, cpos_arr, data, rte, mask, imgdirx_flipped, out_rte_filename, out_dir, vers = '01'): + """ + RTE inversion using PMILOS + """ + print(" ") + printc('-->>>>>>> RUNNING PMILOS ',color=bcolors.OKGREEN) + + try: + PMILOS_LOC = os.path.realpath(__file__) + + PMILOS_LOC = PMILOS_LOC.split('src/')[0] + 'p-milos/' #11 as hrt_pipe.py is 11 characters -8 if in utils.py + + if os.path.isfile(PMILOS_LOC+'pmilos.x'): + printc("Pmilos executable located at:", PMILOS_LOC,color=bcolors.WARNING) + + else: + raise ValueError('Cannot find pmilos:', PMILOS_LOC) + + except ValueError as err: + printc(err.args[0],color=bcolors.FAIL) + printc(err.args[1],color=bcolors.FAIL) + return + + wavelength = 6173.3354 + + for scan in range(int(data_shape[-1])): + + start_time = time.perf_counter() + + file_path = data_f[scan] + wave_axis = wve_axis_arr[scan] + hdr_scan = hdr_arr[scan] + + #must invert each scan independently, as cmilos only takes in one dataset at a time + + #get wave_axis from the hdr information of the science scans + if cpos_arr[0] == 0: + shift_w = wave_axis[3] - wavelength + elif cpos_arr[0] == 5: + shift_w = wave_axis[2] - wavelength + + wave_axis = wave_axis - shift_w + + print('It is assumed the wavelength array is given by the hdr') + #print(wave_axis,color = bcolors.WARNING) + print("Wave axis is: ", (wave_axis - wavelength)*1000.) + print('Saving data into ./p-milos/run/data/input_tmp.fits for pmilos RTE input') + + #write wavelengths to wavelength.fits file for the settings + + wave_input = np.zeros((2,6)) #cfitsio reads dimensions in opposite order + wave_input[0,:] = 1 + wave_input[1,:] = wave_axis + + print(wave_axis) + + hdr = fits.Header() + + primary_hdu = fits.PrimaryHDU(wave_input, header = hdr) + hdul = fits.HDUList([primary_hdu]) + hdul.writeto(f'./p-milos/run/wavelength_tmp.fits', overwrite=True) + + sdata = data[:,:,:,:,scan].T + sdata = sdata.astype(np.float32) + #create input fits file for pmilos + hdr = fits.Header() + + hdr['CTYPE1'] = 'HPLT-TAN' + hdr['CTYPE2'] = 'HPLN-TAN' + hdr['CTYPE3'] = 'STOKES' #check order of stokes + hdr['CTYPE4'] = 'WAVE-GRI' + + primary_hdu = fits.PrimaryHDU(sdata, header = hdr) + hdul = fits.HDUList([primary_hdu]) + hdul.writeto(f'./p-milos/run/data/input_tmp.fits', overwrite=True) + + if rte == 'RTE': + cmd = "mpiexec -n 64 ../pmilos.x pmilos.minit" #../milos.x pmilos.mtrol" ## + + if rte == 'CE': + cmd = "mpiexec -np 16 ../pmilos.x pmilos_ce.minit" + + if rte == 'CE+RTE': + print("CE+RTE not possible on PMILOS, performing RTE instead") + cmd = "mpiexec -np 16 ../pmilos.x pmilos.minit" + + if rte == 'RTE_seq': + cmd = '../milos.x pmilos.mtrol' + + del sdata + #need to change settings for CE or CE+RTE in the pmilos.minit file here + + printc(f' ---- >>>>> Inverting data scan number: {scan} .... ',color=bcolors.OKGREEN) + + cwd = os.getcwd() + os.chdir("./p-milos/run/") + rte_on = subprocess.call(cmd,shell=True) + os.chdir(cwd) + + if rte == 'CE': + out_file = 'inv__mod_ce.fits' # not sure about this one + + else: + out_file = 'inv__mod.fits' #only when one datacube and 16 processors + + with fits.open(f'./p-milos/run/results/{out_file}') as hdu_list: + result = hdu_list[0].data + + #del_dummy = subprocess.call(f"rm ./p-milos/run/results/{out_file}.fits",shell=True) + del_dummy = subprocess.call(f"rm ./p-milos/run/results/{out_file}",shell=True) #must delete the output file + + #result has dimensions [rows,cols,13] + result = np.moveaxis(result,0,2) + print(result.shape) + #printc(f' ---- >>>>> You are HERE .... ',color=bcolors.WARNING) + """ + PMILOS Output 13 columns + 0. eta0 = line-to-continuum absorption coefficient ratio + 1. B = magnetic field strength [Gauss] + 2. vlos = line-of-sight velocity [km/s] + 3. dopp = Doppler width [Angstroms] + 4. aa = damping parameter + 5. gm = magnetic field inclination [deg] + 6. az = magnetic field azimuth [deg] + 7. S0 = source function constant + 8. S1 = source function gradient + 9. mac = macroturbulent velocity [km/s] + 10. filling factor of the magnetic component [0-1] + 11. Number of iterations performed + 12. Chisqr value + """ + + rte_data_products = np.zeros((6,result.shape[0],result.shape[1])) + + rte_data_products[0,:,:] = result[:,:,7] + result[:,:,8] #continuum + rte_data_products[1,:,:] = result[:,:,1] #b mag strength + rte_data_products[2,:,:] = result[:,:,5] #inclination + rte_data_products[3,:,:] = result[:,:,6] #azimuth + rte_data_products[4,:,:] = result[:,:,2] #vlos + rte_data_products[5,:,:] = result[:,:,1]*np.cos(result[:,:,5]*np.pi/180.) #blos + + rte_data_products *= mask[np.newaxis, :, :, 0] #field stop, set outside to 0 + + hdr_scan['RTE_MOD'] = rte + hdr_scan['RTE_SW'] = 'pmilos' + hdr_scan['RTE_ITER'] = str(15) + + write_output_inversion(rte_data_products, file_path, scan, hdr_scan, imgdirx_flipped, out_dir, out_rte_filename, vers) + + printc('--------------------------------------------------------------',bcolors.OKGREEN) + printc(f"------------- PMILOS RTE Run Time: {np.round(time.perf_counter() - start_time,3)} seconds ",bcolors.OKGREEN) + printc('--------------------------------------------------------------',bcolors.OKGREEN) diff --git a/src/processes.py b/src/processes.py new file mode 100644 index 0000000000000000000000000000000000000000..ce868bc9318128066a175abe99bfe4458f13d3da --- /dev/null +++ b/src/processes.py @@ -0,0 +1,570 @@ +import numpy as np +from scipy.ndimage import gaussian_filter +from operator import itemgetter +from utils import * +import os +import time + +def setup_header(hdr_arr): + k = ['CAL_FLAT','CAL_USH','SIGM_USH', + 'CAL_PRE','CAL_GHST','CAL_REAL', + 'CAL_CRT0','CAL_CRT1','CAL_CRT2','CAL_CRT3','CAL_CRT4','CAL_CRT5', + 'CAL_CRT6','CAL_CRT7','CAL_CRT8','CAL_CRT9', + 'CAL_NORM','CAL_FRIN','CAL_PSF','CAL_IPOL', + 'CAL_SCIP','RTE_MOD','RTE_SW','RTE_ITER'] + + v = [0,' ',' ', + ' ','None ','NA', + 0,0,0,0,0,0, + 0,0,0,0, + ' ','NA','NA',' ', + 'None',' ',' ',4294967295] + + c = ['Onboard calibrated for gain table','Unsharp masking correction','Sigma for unsharp masking [px]', + 'Prefilter correction (DID/file)','Ghost correction (name + version of module','Prealigment of images before demodulation', + 'cross-talk from I to Q (slope)','cross-talk from I to Q (offset)','cross-talk from I to U (slope)','cross-talk from I to U (offset)','cross-talk from I to V (slope)','cross-talk from I to V (offset)', + 'cross-talk from V to Q (slope)','cross-talk from V to Q (offset)','cross-talk from V to U (slope)','cross-talk from V to U (offset)', + 'Normalization (normalization constant PROC_Ic)','Fringe correction (name + version of module)','Onboard calibrated for instrumental PSF','Onboard calibrated for instrumental polarization', + 'Onboard scientific data analysis','Inversion mode','Inversion software','Number RTE inversion iterations'] + + for h in hdr_arr: + for i in range(len(k)): + if k[i] in h: # Check for existence + h[k[i]] = v[i] + else: + if i==0: + h.set(k[i], v[i], c[i], after='CAL_DARK') + else: + h.set(k[i], v[i], c[i], after=k[i-1]) + return hdr_arr + + +def load_flat(flat_f, accum_scaling, bit_conversion, scale_data, header_imgdirx_exists, imgdirx_flipped, cpos_arr) -> np.ndarray: + """ + load, scale, flip and correct flat + """ + print(" ") + printc('-->>>>>>> Reading Flats',color=bcolors.OKGREEN) + + start_time = time.perf_counter() + + # flat from IP-5 + if '0024151020000' in flat_f or '0024150020000' in flat_f: + flat, header_flat = get_data(flat_f, scaling = accum_scaling, bit_convert_scale=bit_conversion, + scale_data=False) + else: + flat, header_flat = get_data(flat_f, scaling = accum_scaling, bit_convert_scale=bit_conversion, + scale_data=scale_data) + + if 'IMGDIRX' in header_flat: + header_fltdirx_exists = True + fltdirx_flipped = str(header_flat['IMGDIRX']) + else: + header_fltdirx_exists = False + fltdirx_flipped = 'NO' + + print(f"Flat field shape is {flat.shape}") + # correction based on science data - see if flat and science are both flipped or not + flat = compare_IMGDIRX(flat,header_imgdirx_exists,imgdirx_flipped,header_fltdirx_exists,fltdirx_flipped) + + flat = np.moveaxis(flat, 0,-1) #so that it is [y,x,24] + flat = flat.reshape(2048,2048,6,4) #separate 24 images, into 6 wavelengths, with each 4 pol states + flat = np.moveaxis(flat, 2,-1) + + print(flat.shape) + + _, _, _, cpos_f = fits_get_sampling(flat_f,verbose = True) #get flat continuum position + + print(f"The continuum position of the flat field is at {cpos_f} index position") + + #-------- + # test if the science and flat have continuum at same position + #-------- + + flat = compare_cpos(flat,cpos_f,cpos_arr[0]) + + flat_pmp_temp = str(header_flat['HPMPTSP1']) + + print(f"Flat PMP Temperature Set Point: {flat_pmp_temp}") + + #-------- + # correct for missing line in particular flat field + #-------- + + if flat_f[-15:] == '0162201100.fits': # flat_f[-62:] == 'solo_L0_phi-hrt-flat_0667134081_V202103221851C_0162201100.fits' + print("This flat has a missing line - filling in with neighbouring pixels") + flat_copy = flat.copy() + flat[:,:,1,1] = filling_data(flat_copy[:,:,1,1], 0, mode = {'exact rows':[1345,1346]}, axis=1) + + del flat_copy + + printc('--------------------------------------------------------------',bcolors.OKGREEN) + printc(f"------------ Load flats time: {np.round(time.perf_counter() - start_time,3)} seconds",bcolors.OKGREEN) + printc('--------------------------------------------------------------',bcolors.OKGREEN) + + return flat + + +def load_dark(dark_f) -> np.ndarray: + """ + loads dark field from given path + """ + print(" ") + printc('-->>>>>>> Reading Darks',color=bcolors.OKGREEN) + + start_time = time.perf_counter() + + try: + dark,_ = get_data(dark_f) + + dark_shape = dark.shape + + if dark_shape != (2048,2048): + + if dark.ndim > 2: + printc("Dark Field Input File has more dimensions than the expected 2048,2048 format: {}",dark_f,color=bcolors.WARNING) + raise ValueError + + printc("Dark Field Input File not in 2048,2048 format: {}",dark_f,color=bcolors.WARNING) + printc("Attempting to correct ",color=bcolors.WARNING) + + + try: + if dark_shape[0] > 2048: + dark = dark[dark_shape[0]-2048:,:] + + except Exception: + printc("ERROR, Unable to correct shape of dark field data: {}",dark_f,color=bcolors.FAIL) + raise ValueError + + printc('--------------------------------------------------------------',bcolors.OKGREEN) + printc(f"------------ Load darks time: {np.round(time.perf_counter() - start_time,3)} seconds",bcolors.OKGREEN) + printc('--------------------------------------------------------------',bcolors.OKGREEN) + + return dark + + except Exception: + printc("ERROR, Unable to open and process darks file: {}",dark_f,color=bcolors.FAIL) + + +def apply_dark_correction(data, flat, dark, rows, cols) -> np.ndarray: + """ + subtracts dark field from flat field and science data + """ + print(" ") + print("-->>>>>>> Subtracting dark field") + + start_time = time.perf_counter() + + data -= dark[rows,cols, np.newaxis, np.newaxis, np.newaxis] + #flat -= dark[..., np.newaxis, np.newaxis] #- # all processed flat fields should already be dark corrected + + printc('--------------------------------------------------------------',bcolors.OKGREEN) + printc(f"------------- Dark Field correction time: {np.round(time.perf_counter() - start_time,3)} seconds",bcolors.OKGREEN) + printc('--------------------------------------------------------------',bcolors.OKGREEN) + + return data, flat + + +def normalise_flat(flat, flat_f, ceny, cenx) -> np.ndarray: + """ + normalise flat fields at each wavelength position to remove the spectral line + """ + print(" ") + printc('-->>>>>>> Normalising Flats',color=bcolors.OKGREEN) + + start_time = time.perf_counter() + + try: + norm_fac = np.mean(flat[ceny,cenx, :, :], axis = (0,1))[np.newaxis, np.newaxis, ...] #mean of the central 1k x 1k + flat /= norm_fac + + printc('--------------------------------------------------------------',bcolors.OKGREEN) + printc(f"------------- Normalising flat time: {np.round(time.perf_counter() - start_time,3)} seconds",bcolors.OKGREEN) + printc('--------------------------------------------------------------',bcolors.OKGREEN) + + return flat + + except Exception: + printc("ERROR, Unable to normalise the flat fields: {}",flat_f,color=bcolors.FAIL) + + +def demod_hrt(data,pmp_temp, verbose = True) -> np.ndarray: + ''' + Use constant demodulation matrices to demodulate input data + ''' + if pmp_temp == '50': + demod_data = np.array([[ 0.28037298, 0.18741922, 0.25307596, 0.28119895], + [ 0.40408596, 0.10412157, -0.7225681, 0.20825675], + [-0.19126636, -0.5348939, 0.08181918, 0.64422774], + [-0.56897295, 0.58620095, -0.2579202, 0.2414017 ]]) + + elif pmp_temp == '40': + demod_data = np.array([[ 0.26450154, 0.2839626, 0.12642948, 0.3216773 ], + [ 0.59873885, 0.11278069, -0.74991184, 0.03091451], + [ 0.10833212, -0.5317737, -0.1677862, 0.5923593 ], + [-0.46916953, 0.47738808, -0.43824592, 0.42579797]]) + + else: + printc("Demodulation Matrix for PMP TEMP of {pmp_temp} deg is not available", color = bcolors.FAIL) + if verbose: + printc(f'Using a constant demodulation matrix for a PMP TEMP of {pmp_temp} deg',color = bcolors.OKGREEN) + + demod_data = demod_data.reshape((4,4)) + shape = data.shape + demod = np.tile(demod_data, (shape[0],shape[1],1,1)) + + if data.ndim == 5: + #if data array has more than one scan + data = np.moveaxis(data,-1,0) #moving number of scans to first dimension + + data = np.matmul(demod,data) + data = np.moveaxis(data,0,-1) #move scans back to the end + + elif data.ndim == 4: + #for if data has just one scan + data = np.matmul(demod,data) + + return data, demod + + +def unsharp_masking(flat,sigma,flat_pmp_temp,cpos_arr,clean_mode,clean_f,pol_end=4,verbose=True): + """ + unsharp masks the flat fields to blur our polarimetric structures due to solar rotation + clean_f = ['blurring', 'fft'] + """ + flat_demod, demodM = demod_hrt(flat, flat_pmp_temp,verbose) + + norm_factor = np.mean(flat_demod[512:1536,512:1536,0,cpos_arr[0]]) + + flat_demod /= norm_factor + + new_demod_flats = np.copy(flat_demod) + +# b_arr = np.zeros((2048,2048,3,5)) + + if cpos_arr[0] == 0: + wv_range = range(1,6) + + elif cpos_arr[0] == 5: + wv_range = range(5) + + if clean_mode == "QUV": + start_clean_pol = 1 + if verbose: + print("Unsharp Masking Q,U,V") + + elif clean_mode == "UV": + start_clean_pol = 2 + if verbose: + print("Unsharp Masking U,V") + + elif clean_mode == "V": + start_clean_pol = 3 + if verbose: + print("Unsharp Masking V") + + if clean_f == 'blurring': + blur = lambda a: gaussian_filter(a,sigma) + elif clean_f == 'fft': + x = np.fft.fftfreq(2048,1) + fftgaus2d = np.exp(-2*np.pi**2*(x-0)**2*sigma**2)[:,np.newaxis] * np.exp(-2*np.pi**2*(x-0)**2*sigma**2)[np.newaxis] + blur = lambda a : (np.fft.ifftn(fftgaus2d*np.fft.fftn(a.copy()))).real + + for pol in range(start_clean_pol,pol_end): + + for wv in wv_range: #not the continuum + + a = np.copy(np.clip(flat_demod[:,:,pol,wv], -0.02, 0.02)) + b = a - blur(a) +# b_arr[:,:,pol-1,wv-1] = b + c = a - b + + new_demod_flats[:,:,pol,wv] = c + + invM = np.linalg.inv(demodM) + + return np.matmul(invM, new_demod_flats*norm_factor) + + +def flat_correction(data,flat,flat_states,rows,cols) -> np.ndarray: + """ + correct science data with flat fields + """ + print(" ") + printc('-->>>>>>> Correcting Flatfield',color=bcolors.OKGREEN) + + start_time = time.perf_counter() + + try: + if flat_states == 6: + + printc("Dividing by 6 flats, one for each wavelength",color=bcolors.OKGREEN) + + tmp = np.mean(flat,axis=-2) #avg over pol states for the wavelength + + return data / tmp[rows,cols, np.newaxis, :, np.newaxis] + + + elif flat_states == 24: + + printc("Dividing by 24 flats, one for each image",color=bcolors.OKGREEN) + + return data / flat[rows,cols, :, :, np.newaxis] #only one new axis for the scans + + elif flat_states == 4: + + printc("Dividing by 4 flats, one for each pol state",color=bcolors.OKGREEN) + + tmp = np.mean(flat,axis=-1) #avg over wavelength + + return data / tmp[rows,cols, :, np.newaxis, np.newaxis] + else: + print(" ") + printc('-->>>>>>> Unable to apply flat correction. Please insert valid flat_states',color=bcolors.WARNING) + + + printc('--------------------------------------------------------------',bcolors.OKGREEN) + printc(f"------------- Flat Field correction time: {np.round(time.perf_counter() - start_time,3)} seconds ",bcolors.OKGREEN) + printc('--------------------------------------------------------------',bcolors.OKGREEN) + + return data + + except: + printc("ERROR, Unable to apply flat fields",color=bcolors.FAIL) + + + +def prefilter_correction(data,voltagesData_arr,prefilter,prefilter_voltages): + """ + applies prefilter correction + adapted from SPGPylibs + """ + def _get_v1_index1(x): + index1, v1 = min(enumerate([abs(i) for i in x]), key=itemgetter(1)) + return v1, index1 + + data_shape = data.shape + # cop = np.copy(data) + # new_data = np.zeros(data_shape) + + for scan in range(data_shape[-1]): + + voltage_list = voltagesData_arr[scan] + + for wv in range(6): + + v = voltage_list[wv] + + vdif = [v - pf for pf in prefilter_voltages] + + v1, index1 = _get_v1_index1(vdif) + + if vdif[index1] >= 0: + v2 = vdif[index1 + 1] + index2 = index1 + 1 + + else: + v2 = vdif[index1-1] + index2 = index1 - 1 + + imprefilter = (prefilter[:,:, index1]*v1 + prefilter[:,:, index2]*v2)/(v1+v2) #interpolation between nearest voltages + + data[:,:,:,wv,scan] /= imprefilter[...,np.newaxis] + + return data + +def apply_field_stop(data, rows, cols, header_imgdirx_exists, imgdirx_flipped) -> np.ndarray: + """ + apply field stop mask to the science data + """ + print(" ") + printc("-->>>>>>> Applying field stop",color=bcolors.OKGREEN) + + start_time = time.perf_counter() + + field_stop_loc = os.path.realpath(__file__) + + field_stop_loc = field_stop_loc.split('src/')[0] + 'field_stop/' + + field_stop,_ = load_fits(field_stop_loc + 'HRT_field_stop.fits') + + field_stop = np.where(field_stop > 0,1,0) + + if header_imgdirx_exists: + if imgdirx_flipped == 'YES': #should be YES for any L1 data, but mistake in processing software + field_stop = field_stop[:,::-1] #also need to flip the flat data after dark correction + + + data *= field_stop[rows,cols,np.newaxis, np.newaxis, np.newaxis] + printc('--------------------------------------------------------------',bcolors.OKGREEN) + printc(f"------------- Field stop time: {np.round(time.perf_counter() - start_time,3)} seconds",bcolors.OKGREEN) + printc('--------------------------------------------------------------',bcolors.OKGREEN) + + return data, field_stop + + +def load_ghost_field_stop(header_imgdirx_exists, imgdirx_flipped) -> np.ndarray: + """ + apply field stop ghost mask to the science data + """ + print(" ") + printc("-->>>>>>> Loading ghost field stop",color=bcolors.OKGREEN) + + start_time = time.perf_counter() + + field_stop_loc = os.path.realpath(__file__) + field_stop_loc = field_stop_loc.split('src/')[0] + 'field_stop/' + + field_stop_ghost,_ = load_fits(field_stop_loc + 'HRT_field_stop_ghost.fits') + field_stop_ghost = np.where(field_stop_ghost > 0,1,0) + + if header_imgdirx_exists: + if imgdirx_flipped == 'YES': #should be YES for any L1 data, but mistake in processing software + field_stop_ghost = field_stop_ghost[:,::-1] + + printc('--------------------------------------------------------------',bcolors.OKGREEN) + printc(f"------------- Load Ghost Field Stop time: {np.round(time.perf_counter() - start_time,3)} seconds",bcolors.OKGREEN) + printc('--------------------------------------------------------------',bcolors.OKGREEN) + return field_stop_ghost + + +def crosstalk_auto_ItoQUV(data_demod,cpos,wl,roi=np.ones((2048,2048)),verbose=0,npoints=5000,limit=0.2): + import random, statistics + from scipy.optimize import curve_fit + def linear(x,a,b): + return a*x + b + my = [] + sy = [] + + x = data_demod[roi>0,0,cpos].flatten() + ids = np.logical_and(x > limit, x < 1.5) + x = x[ids].flatten() + + N = x.size + idx = random.sample(range(N),npoints) + mx = x[idx].mean() + sx = x[idx].std() + xp = np.linspace(x.min(), x.max(), 100) + + A = np.vstack([x, np.ones(len(x))]).T + + # I to Q + yQ = data_demod[roi>0,1,wl].flatten() + yQ = yQ[ids].flatten() + my.append(yQ[idx].mean()) + sy.append(yQ[idx].std()) + cQ = curve_fit(linear,x,yQ,p0=[0,0])[0] + pQ = np.poly1d(cQ) + + # I to U + yU = data_demod[roi>0,2,wl].flatten() + yU = yU[ids].flatten() + my.append(yU[idx].mean()) + sy.append(yU[idx].std()) + cU = curve_fit(linear,x,yU,p0=[0,0])[0] + pU = np.poly1d(cU) + + # I to V + yV = data_demod[roi>0,3,wl].flatten() + yV = yV[ids].flatten() + my.append(yV[idx].mean()) + sy.append(yV[idx].std()) + cV = curve_fit(linear,x,yV,p0=[0,0])[0] + pV = np.poly1d(cV) + + if verbose: + + PLT_RNG = 3 + fig, ax = plt.subplots(figsize=(8, 8)) + ax.scatter(x[idx],yQ[idx],color='red',alpha=0.6,s=10) + ax.plot(xp, pQ(xp), color='red', linestyle='dashed',linewidth=3.0) + + ax.scatter(x[idx],yU[idx],color='blue',alpha=0.6,s=10) + ax.plot(xp, pU(xp), color='blue', linestyle='dashed',linewidth=3.0) + + ax.scatter(x[idx],yV[idx],color='green',alpha=0.6,s=10) + ax.plot(xp, pV(xp), color='green', linestyle='dashed',linewidth=3.0) + + ax.set_xlim([mx - PLT_RNG * sx,mx + PLT_RNG * sx]) + ax.set_ylim([min(my) - 1.8*PLT_RNG * statistics.mean(sy),max(my) + PLT_RNG * statistics.mean(sy)]) + ax.set_xlabel('Stokes I') + ax.set_ylabel('Stokes Q/U/V') + ax.text(mx - 0.9*PLT_RNG * sx, min(my) - 1.4*PLT_RNG * statistics.mean(sy), 'Cross-talk from I to Q: slope = {: {width}.{prec}f} ; off-set = {: {width}.{prec}f} '.format(cQ[0],cQ[1],width=8,prec=4), style='italic',bbox={'facecolor': 'red', 'alpha': 0.1, 'pad': 1}, fontsize=15) + ax.text(mx - 0.9*PLT_RNG * sx, min(my) - 1.55*PLT_RNG * statistics.mean(sy), 'Cross-talk from I to U: slope = {: {width}.{prec}f} ; off-set = {: {width}.{prec}f} '.format(cU[0],cU[1],width=8,prec=4), style='italic',bbox={'facecolor': 'blue', 'alpha': 0.1, 'pad': 1}, fontsize=15) + ax.text(mx - 0.9*PLT_RNG * sx, min(my) - 1.7*PLT_RNG * statistics.mean(sy), 'Cross-talk from I to V: slope = {: {width}.{prec}f} ; off-set = {: {width}.{prec}f} '.format(cV[0],cV[1],width=8,prec=4), style='italic',bbox={'facecolor': 'green', 'alpha': 0.1, 'pad': 1}, fontsize=15) +# fig.show() + + print('Cross-talk from I to Q: slope = {: {width}.{prec}f} ; off-set = {: {width}.{prec}f} '.format(cQ[0],cQ[1],width=8,prec=4)) + print('Cross-talk from I to U: slope = {: {width}.{prec}f} ; off-set = {: {width}.{prec}f} '.format(cU[0],cU[1],width=8,prec=4)) + print('Cross-talk from I to V: slope = {: {width}.{prec}f} ; off-set = {: {width}.{prec}f} '.format(cV[0],cV[1],width=8,prec=4)) + +# return cQ,cU,cV, (idx,x,xp,yQ,yU,yV,pQ,pU,pV,mx,sx,my,sy) + else: + printc('Cross-talk from I to Q: slope = {: {width}.{prec}f} ; off-set = {: {width}.{prec}f} '.format(cQ[0],cQ[1],width=8,prec=4),color=bcolors.OKGREEN) + printc('Cross-talk from I to U: slope = {: {width}.{prec}f} ; off-set = {: {width}.{prec}f} '.format(cU[0],cU[1],width=8,prec=4),color=bcolors.OKGREEN) + printc('Cross-talk from I to V: slope = {: {width}.{prec}f} ; off-set = {: {width}.{prec}f} '.format(cV[0],cV[1],width=8,prec=4),color=bcolors.OKGREEN) + ct = np.asarray((cQ,cU,cV)).T + return ct + +def CT_ItoQUV(data, ctalk_params, norm_stokes, cpos_arr, Ic_mask): + """ + performs cross talk correction for I -> Q,U,V + """ + before_ctalk_data = np.copy(data) + data_shape = data.shape + +# ceny = slice(data_shape[0]//2 - data_shape[0]//4, data_shape[0]//2 + data_shape[0]//4) +# cenx = slice(data_shape[1]//2 - data_shape[1]//4, data_shape[1]//2 + data_shape[1]//4) + + cont_stokes = np.ones(data_shape[-1]) + + for scan in range(data_shape[-1]): + cont_stokes[scan] = np.mean(data[Ic_mask[...,scan],0,cpos_arr[0],scan]) + + for i in range(6): + +# stokes_i_wv_avg = np.mean(data[ceny,cenx,0,i,:], axis = (0,1)) + stokes_i_wv_avg = np.ones(data_shape[-1]) + for scan in range(data_shape[-1]): + stokes_i_wv_avg[scan] = np.mean(data[Ic_mask[...,scan],0,i,scan]) + + if norm_stokes: + #if normed, applies normalised offset to normed stokes + + tmp_param = ctalk_params*np.divide(stokes_i_wv_avg,cont_stokes) + + q_slope = tmp_param[0,0,:] + u_slope = tmp_param[0,1,:] + v_slope = tmp_param[0,2,:] + + q_int = tmp_param[1,0,:] + u_int = tmp_param[1,1,:] + v_int = tmp_param[1,2,:] + + data[:,:,1,i,:] = before_ctalk_data[:,:,1,i,:] - before_ctalk_data[:,:,0,i,:]*q_slope - q_int + + data[:,:,2,i,:] = before_ctalk_data[:,:,2,i,:] - before_ctalk_data[:,:,0,i,:]*u_slope - u_int + + data[:,:,3,i,:] = before_ctalk_data[:,:,3,i,:] - before_ctalk_data[:,:,0,i,:]*v_slope - v_int + + else: + #if not normed, applies raw offset cross talk correction to raw stokes counts + + tmp_param = ctalk_params[0,:,:]*np.divide(stokes_i_wv_avg,cont_stokes) + + q_slope = tmp_param[0,:] + u_slope = tmp_param[1,:] + v_slope = tmp_param[2,:] + + q_int = ctalk_params[1,0,:] + u_int = ctalk_params[1,1,:] + v_int = ctalk_params[1,2,:] + + data[:,:,1,i,:] = before_ctalk_data[:,:,1,i,:] - before_ctalk_data[:,:,0,i,:]*q_slope - q_int*stokes_i_wv_avg + + data[:,:,2,i,:] = before_ctalk_data[:,:,2,i,:] - before_ctalk_data[:,:,0,i,:]*u_slope - u_int*stokes_i_wv_avg + + data[:,:,3,i,:] = before_ctalk_data[:,:,3,i,:] - before_ctalk_data[:,:,0,i,:]*v_slope - v_int*stokes_i_wv_avg + + return data \ No newline at end of file diff --git a/src/utils.py b/src/utils.py index 6fe3aeec7213914455fd59e87fbdabbeb1439d2a..2944bee6e657466eb7c9d2f91458cd7c43356f57 100755 --- a/src/utils.py +++ b/src/utils.py @@ -63,10 +63,13 @@ def get_data(path, scaling = True, bit_convert_scale = True, scale_data = True): printc(f"Dividing by number of accumulations: {accu}",color=bcolors.OKGREEN) if scale_data: #not for commissioning data - - maxRange = fits.open(path)[9].data['PHI_IMG_maxRange'] + + try: + maxRange = fits.open(path)[9].data['PHI_IMG_maxRange'] - data *= maxRange[0]/maxRange[-1] + data *= maxRange[0]/maxRange[-1] + except IndexError: + data *= 81920/128 return data, header @@ -243,7 +246,7 @@ def check_IMGDIRX(hdr_arr): else: header_imgdirx_exists = False print("Not all the scan(s) contain the 'IMGDIRX' keyword! Assuming all not flipped - Proceed with caution") - return header_imgdirx_exists, None + return header_imgdirx_exists, False def compare_IMGDIRX(flat,header_imgdirx_exists,imgdirx_flipped,header_fltdirx_exists,fltdirx_flipped): diff --git a/test/test_jsons/create_input_json.py b/test/test_jsons/create_input_json.py new file mode 100644 index 0000000000000000000000000000000000000000..011362d42e40e6129c94df8bc553bfd694093ec7 --- /dev/null +++ b/test/test_jsons/create_input_json.py @@ -0,0 +1,218 @@ +import json +import numpy as np + + + +""" +#April 2020 L1 + +# CAN also do the L1 files - but must set bit_convert_scale to false, and scale data to False for the flat fields, and presumably same for the input data + +science_april = ['solo_L1_phi-hrt-ilam_20200420T141802_V202107221036C_0024160030.fits', +'solo_L1_phi-hrt-ilam_20200420T142032_V202107221036C_0024160031.fits', +'solo_L1_phi-hrt-ilam_20200420T142302_V202107221036C_0024160032.fits', +'solo_L1_phi-hrt-ilam_20200420T142532_V202107221037C_0024160033.fits', +'solo_L1_phi-hrt-ilam_20200420T142803_V202107221037C_0024160034.fits', +'solo_L1_phi-hrt-ilam_20200420T143033_V202107221037C_0024160035.fits', +'solo_L1_phi-hrt-ilam_20200420T143303_V202107221037C_0024160036.fits', +'solo_L1_phi-hrt-ilam_20200420T143533_V202107221037C_0024160037.fits', +'solo_L1_phi-hrt-ilam_20200420T143803_V202107221037C_0024160038.fits', +'solo_L1_phi-hrt-ilam_20200420T144033_V202107221038C_0024160039.fits'] + +flatfield_fits_filename = '/data/slam/home/sinjan/fits_files/april_avgd_2020_flat.fits' #solo_L0_phi-hrt-flat_0667134081_V202103221851C_0162201100.fits' + +darkfield_fits_filename = '../fits_files/solo_L0_phi-fdt-ilam_20200228T155100_V202002281636_0022210004_000.fits' + +science_april = ['/data/slam/home/sinjan/fits_files/' + i for i in science_april] + +input_dict = { + 'data_f': science_april, + 'flat_f' : flatfield_fits_filename, + 'dark_f' : darkfield_fits_filename +} + +json.dump(input_dict, open(f"./input_jsons/april_2020_L1.txt", "w")) +""" + +""" +#Nov 17 2020 L1 + +# CAN also do the L1 files - but must set bit_convert_scale to false, and scale data to False for the flat fields, and presumably same for the input data + +science_nov = ['solo_L1_phi-hrt-ilam_20201117T170209_V202108301639C_0051170001.fits']#['solo_L1_phi-hrt-ilam_20201117T170209_V202107060747C_0051170001.fits'] + +flatfield_fits_filename = '/data/slam/home/sinjan/fits_files/april_avgd_2020_flat.fits' #solo_L0_phi-hrt-flat_0667134081_V202103221851C_0162201100.fits' + +darkfield_fits_filename = '../fits_files/solo_L0_phi-fdt-ilam_20200228T155100_V202002281636_0022210004_000.fits' + +science_nov = ['/data/slam/home/sinjan/fits_files/' + i for i in science_nov] + +input_dict = { + 'data_f': science_nov, + 'flat_f' : flatfield_fits_filename, + 'dark_f' : darkfield_fits_filename +} + +json.dump(input_dict, open(f"./input_jsons/nov_2020_L1.txt", "w")) +""" + + +""" +#Nov 17 2020 L1 Feb Flats + +# CAN also do the L1 files - but must set bit_convert_scale to false, and scale data to False for the flat fields, and presumably same for the input data + +science_nov = ['solo_L1_phi-hrt-ilam_20201117T170209_V202107060747C_0051170001.fits'] + +flatfield_fits_filename = '/data/slam/home/sinjan/fits_files/solo_L0_phi-hrt-flat_0667134081_V202103221851C_0162201100.fits' #solo_L0_phi-hrt-flat_0667134081_V202103221851C_0162201100.fits' + +darkfield_fits_filename = '../fits_files/solo_L0_phi-fdt-ilam_20200228T155100_V202002281636_0022210004_000.fits' + +science_nov = ['/data/slam/home/sinjan/fits_files/' + i for i in science_nov] + +input_dict = { + 'data_f': science_nov, + 'flat_f' : flatfield_fits_filename, + 'dark_f' : darkfield_fits_filename +} + +json.dump(input_dict, open(f"./input_jsons/nov_2020_L1_feb_flats.txt", "w")) +""" + +""" +#Nov 17 2020 L1 KLL flats + +# CAN also do the L1 files - but must set bit_convert_scale to false, and scale data to False for the flat fields, and presumably same for the input data + +science_nov = ['solo_L1_phi-hrt-ilam_20201117T170209_V202107060747C_0051170001.fits'] + +flatfield_fits_filename = '/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-flat_20210321T210847_V202108301617C_0163211100.fits' #solo_L0_phi-hrt-flat_0667134081_V202103221851C_0162201100.fits' + +darkfield_fits_filename = '../fits_files/solo_L0_phi-fdt-ilam_20200228T155100_V202002281636_0022210004_000.fits' + +science_nov = ['/data/slam/home/sinjan/fits_files/' + i for i in science_nov] + +input_dict = { + 'data_f': science_nov, + 'flat_f' : flatfield_fits_filename, + 'dark_f' : darkfield_fits_filename +} + +json.dump(input_dict, open(f"./input_jsons/nov_2020_L1_kll.txt", "w")) + +""" + + +""" +#Feb 2021 L1 + + +science_feb = ['solo_L1_phi-hrt-ilam_20210223T170002_V202107221048C_0142230201.fits'] + +flatfield_fits_filename = '/data/slam/home/sinjan/fits_files/solo_L0_phi-hrt-flat_0667134081_V202103221851C_0162201100.fits' #solo_L0_phi-hrt-flat_0667134081_V202103221851C_0162201100.fits' + +darkfield_fits_filename = '../fits_files/solo_L0_phi-fdt-ilam_20200228T155100_V202002281636_0022210004_000.fits' + +science_feb = ['/data/slam/home/sinjan/fits_files/' + i for i in science_feb] + +input_dict = { + 'data_f': science_feb, + 'flat_f' : flatfield_fits_filename, + 'dark_f' : darkfield_fits_filename +} + +json.dump(input_dict, open(f"./input_jsons/feb_2k_2021_L1.txt", "w")) + +""" + +""" +#Sep 2021 data + +science_sep = ['solo_L1_phi-hrt-ilam_20210914T053015_V202110150939C_0149140301.fits']#['solo_L1_phi-hrt-ilam_20210914T034515_V202110211713C_0149140201.fits'] + +flat = '/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210911T120504_V202110200555C_0169110100.fits' #solo_L0_phi-hrt-flat_0667134081_V202103221851C_0162201100.fits' + +dark = '/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210428T130238_V202109240900C_0164281001.fits' + +science_nov = ['/data/slam/home/sinjan/fits_files/' + i for i in science_sep] + +input_dict = { + 'data_f': science_nov, + 'flat_f' : flat, + 'dark_f' : dark +} + +json.dump(input_dict, open(f"./input_jsons/sep_2021_L1_sl.txt", "w")) + +""" + +science = ['solo_L1_phi-hrt-ilam_20210914T071515_V202110260809C_0149140401.fits'] + +flat = '/data/slam/home/sinjan/fits_files/0169111100_DC_9data.fits'#solo_L1_phi-hrt-ilam_20210911T120504_V202110200555C_0169110100.fits' #solo_L0_phi-hrt-flat_0667134081_V202103221851C_0162201100.fits' + +dark = '/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210428T130238_V202109240900C_0164281001.fits' + +science_2 = ['/data/slam/home/sinjan/fits_files/' + i for i in science] + + +""" +c_talk_params = np.zeros((2,3)) + +q_slope = -0.0263#0.0038#-0.0140##-0.0098# +u_slope = 0.0023#-0.0077#-0.0008##-0.0003# +v_slope = -0.0116#-0.0009#-0.0073##-0.0070# + +q_int = 0.0138#-0.0056#0.0016#-0.0056#-0.0015# #the offset, normalised to I_c +u_int = -0.0016#0.0031#0.0016##0.0007# +v_int = 0.0057#-0.0002#0.0007##0.0006# + +c_talk_params[0,0] = q_slope +c_talk_params[0,1] = u_slope +c_talk_params[0,2] = v_slope + +c_talk_params[1,0] = q_int +c_talk_params[1,1] = u_int +c_talk_params[1,2] = v_int +""" + + +input_dict = { + #input data + 'data_f': science_2, + 'flat_f' : flat, + 'dark_f' : dark, + + #input/output type + scaling + 'L1_input' : False, + 'L1_8_generate': False, #not developed yet + 'scale_data' : True, #these 3 will be made redundant once L1 data scaling is normalised - needed mainly for comissioning (IP5) data + 'accum_scaling' : True, + 'bit_conversion' : True, + + #reduction + 'dark_c' : True, + 'flat_c' : True, + 'norm_f' : True, + 'clean_f' : False, + 'sigma' : 59, #unsharp masking gaussian width + 'clean_mode' : "UV", #options 'QUV', 'UV', 'V' for the unsharp masking + 'flat_states' : 24, #options 4 (one each pol state), 6 (one each wavelength), 24 + 'prefilter_f': None, + 'fs_c' : True, + 'limb' : 'W', #specify if it is a limb observation, options are 'N', 'S', 'W', 'E' + 'demod' : False, + 'norm_stokes' : False, + 'ItoQUV' : False, #missing VtoQU - not developed yet + 'ctalk_params' : None, #VtoQU parameters will be required in this argument once ready + 'rte' : None, #options: ''RTE', 'CE', 'CE+RTE' + 'p_milos' : False, #attempted, ran into problems - on hold + 'cmilos_fits_opt': False, #whether to use cmilos-fits + + #output dir/filenames + 'out_dir' : '/data/slam/home/sinjan/hrt_pipe_tests/', + 'out_stokes_file' : False, #if True, will save stokes array to fits, the array that is fed into the RTE inversions + 'out_stokes_filename' : None, #if specific and not default name + 'out_rte_filename' : None, #if specific and not default name +} + +json.dump(input_dict, open(f"./dark_flat_test_apr_2020.json", "w")) \ No newline at end of file diff --git a/test/test_jsons/test_1.json b/test/test_jsons/test_1.json index 5938e935726a5f14015720b656aee0171d29e5b3..c9e9d5d89b1273b516160cdb8c554b183eca50c2 100644 --- a/test/test_jsons/test_1.json +++ b/test/test_jsons/test_1.json @@ -1 +1 @@ -{"data_f": ["/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210914T071515_V202110260809C_0149140401.fits"], "flat_f": "/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210911T120504_V202110200555C_0169110100.fits", "dark_f": "/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210428T130238_V202109240900C_0164281001.fits", "L1_input": true, "L1_8_generate": true, "scale_data": false, "accum_scaling": false, "bit_conversion": true, "dark_c": false, "flat_c": false, "norm_f": false, "clean_f": false, "sigma": 59, "clean_mode": "V", "flat_states": 4, "prefilter_f": null, "fs_c": false, "limb":"W", "demod": false, "norm_stokes": false, "ItoQUV": false, "ctalk_params": null, "rte": null, "p_milos": false, "cmilos_fits_opt": false, "out_dir": "/data/slam/home/sinjan/hrt_pipe_results/tests/", "out_stokes_file": false, "out_stokes_filename": null, "out_rte_filename": null, "config": false} \ No newline at end of file +{"data_f": ["/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210914T071515_V202110260809C_0149140401.fits"], "flat_f": "/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210911T120504_V202110200555C_0169110100.fits", "dark_f": "/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210428T130238_V202109240900C_0164281001.fits", "L1_input": true, "L1_8_generate": true, "scale_data": false, "accum_scaling": false, "bit_conversion": true, "dark_c": false, "flat_c": false, "norm_f": false, "clean_f": false, "sigma": 59, "clean_mode": "V", "flat_states": 4, "prefilter_f": null, "fs_c": false, "ghost_c": null, "out_intermediate": null, "limb":"W", "demod": false, "norm_stokes": false, "ItoQUV": false, "ctalk_params": null, "rte": null, "p_milos": false, "cmilos_fits": false, "out_dir": "/data/slam/home/sinjan/hrt_pipe_results/tests/", "out_stokes_file": false, "out_stokes_filename": null, "out_rte_filename": null, "config": false} \ No newline at end of file diff --git a/test/test_jsons/test_2.json b/test/test_jsons/test_2.json index 3924666efc614473ead4ef60eebcba03a161da6f..9c5c2521e7fcf1f31710b3af1c04f79d66af096c 100644 --- a/test/test_jsons/test_2.json +++ b/test/test_jsons/test_2.json @@ -1 +1 @@ -{"data_f": [""], "flat_f": "/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210911T120504_V202110200555C_0169110100.fits", "dark_f": "/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210428T130238_V202109240900C_0164281001.fits", "L1_input": true, "L1_8_generate": true, "scale_data": false, "accum_scaling": false, "bit_conversion": true, "dark_c": false, "flat_c": false, "norm_f": false, "clean_f": false, "sigma": 59, "clean_mode": "V", "flat_states": 4, "prefilter_f": null, "fs_c": false, "limb":"W", "demod": false, "norm_stokes": false, "ItoQUV": false, "ctalk_params": null, "rte": null, "p_milos": false, "cmilos_fits_opt": false, "out_dir": "/data/slam/home/sinjan/hrt_pipe_results/tests/", "out_stokes_file": false, "out_stokes_filename": null, "out_rte_filename": null, "config":false} \ No newline at end of file +{"data_f": [""], "flat_f": "/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210911T120504_V202110200555C_0169110100.fits", "dark_f": "/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210428T130238_V202109240900C_0164281001.fits", "L1_input": true, "L1_8_generate": true, "scale_data": false, "accum_scaling": false, "bit_conversion": true, "dark_c": false, "flat_c": false, "norm_f": false, "clean_f": false, "sigma": 59, "clean_mode": "V", "flat_states": 4, "prefilter_f": null, "ghost_c": null, "out_intermediate": null, "fs_c": false, "limb":"W", "demod": false, "norm_stokes": false, "ItoQUV": false, "ctalk_params": null, "rte": null, "p_milos": false, "cmilos_fits": false, "out_dir": "/data/slam/home/sinjan/hrt_pipe_results/tests/", "out_stokes_file": false, "out_stokes_filename": null, "out_rte_filename": null, "config":false} \ No newline at end of file diff --git a/test/test_jsons/test_3.json b/test/test_jsons/test_3.json index 9067efeb1e50488f68a4e1cc07209285f9a14af6..2ddcee34f3f6f0bcb5ff4d8ca74f10734e3498d2 100644 --- a/test/test_jsons/test_3.json +++ b/test/test_jsons/test_3.json @@ -1 +1 @@ -{"data_f": ["/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210914T071515_V202110260809C_0149140401.fits"], "flat_f": "", "dark_f": "/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210428T130238_V202109240900C_0164281001.fits", "L1_input": true, "L1_8_generate": true, "scale_data": false, "accum_scaling": false, "bit_conversion": true, "dark_c": false, "flat_c": true, "norm_f": false, "clean_f": false, "sigma": 59, "clean_mode": "V", "flat_states": 4, "prefilter_f": null, "fs_c": false, "limb":"W", "demod": false, "norm_stokes": false, "ItoQUV": false, "ctalk_params": null, "rte": null, "p_milos": false, "cmilos_fits_opt": false, "out_dir": "/data/slam/home/sinjan/hrt_pipe_results/tests/", "out_stokes_file": false, "out_stokes_filename": null, "out_rte_filename": null, "config":false} \ No newline at end of file +{"data_f": ["/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210914T071515_V202110260809C_0149140401.fits"], "flat_f": "", "dark_f": "/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210428T130238_V202109240900C_0164281001.fits", "L1_input": true, "L1_8_generate": true, "scale_data": false, "accum_scaling": false, "bit_conversion": true, "dark_c": false, "flat_c": true, "norm_f": false, "clean_f": false, "sigma": 59, "clean_mode": "V", "flat_states": 4, "prefilter_f": null, "ghost_c": null, "out_intermediate": null, "fs_c": false, "limb":"W", "demod": false, "norm_stokes": false, "ItoQUV": false, "ctalk_params": null, "rte": null, "p_milos": false, "cmilos_fits": false, "out_dir": "/data/slam/home/sinjan/hrt_pipe_results/tests/", "out_stokes_file": false, "out_stokes_filename": null, "out_rte_filename": null, "config":false} \ No newline at end of file diff --git a/test/test_jsons/test_4.json b/test/test_jsons/test_4.json index 0c520dd3c7e5e4cea4afa83a26903773b6741c16..c9fbc4d8d70ae0cd4bde5ccb6d8035777a75db15 100644 --- a/test/test_jsons/test_4.json +++ b/test/test_jsons/test_4.json @@ -1 +1 @@ -{"data_f": ["/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210914T071515_V202110260809C_0149140401.fits"], "flat_f": "/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210911T120504_V202110200555C_0169110100.fits", "dark_f": "", "L1_input": false, "L1_8_generate": false, "scale_data": false, "accum_scaling": false, "bit_conversion": false, "dark_c": true, "flat_c": false, "norm_f": false, "clean_f": false, "sigma": 59, "clean_mode": "V", "flat_states": 4, "prefilter_f": null, "fs_c": false, "limb": "W", "demod": false, "norm_stokes": false, "ItoQUV": false, "ctalk_params": null, "rte": null, "p_milos": false, "cmilos_fits_opt": false, "out_dir": "/data/slam/home/sinjan/hrt_pipe_results/sep_2021_cmilos/", "out_stokes_file": false, "out_stokes_filename": null, "out_rte_filename": null, "config": false} \ No newline at end of file +{"data_f": ["/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210914T071515_V202110260809C_0149140401.fits"], "flat_f": "/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210911T120504_V202110200555C_0169110100.fits", "dark_f": "", "L1_input": false, "L1_8_generate": false, "scale_data": false, "accum_scaling": false, "bit_conversion": false, "dark_c": true, "flat_c": false, "norm_f": false, "clean_f": false, "sigma": 59, "clean_mode": "V", "flat_states": 4, "prefilter_f": null, "fs_c": false, "ghost_c": null, "out_intermediate": null, "limb": "W", "demod": false, "norm_stokes": false, "ItoQUV": false, "ctalk_params": null, "rte": null, "p_milos": false, "cmilos_fits": false, "out_dir": "/data/slam/home/sinjan/hrt_pipe_results/sep_2021_cmilos/", "out_stokes_file": false, "out_stokes_filename": null, "out_rte_filename": null, "config": false} \ No newline at end of file diff --git a/test/test_jsons/test_5.json b/test/test_jsons/test_5.json index a806872ff4cb8a9e069427b61bcc29289ab3e217..aa0b935d688fe81a4badb82dd749ddd3535ef699 100644 --- a/test/test_jsons/test_5.json +++ b/test/test_jsons/test_5.json @@ -1 +1 @@ -{"data_f": ["/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210914T071515_V202110260809C_0149140401.fits"], "flat_f": "/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210428T130238_V202109240900C_0164281001.fits", "dark_f": "/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210911T120504_V202110200555C_0169110100.fits", "L1_input": false, "L1_8_generate": false, "scale_data": false, "accum_scaling": false, "bit_conversion": false, "dark_c": true, "flat_c": true, "norm_f": false, "clean_f": false, "sigma": 59, "clean_mode": "V", "flat_states": 4, "prefilter_f": null, "fs_c": false, "limb": "W", "demod": false, "norm_stokes": false, "ItoQUV": false, "ctalk_params": null, "rte": null, "p_milos": false, "cmilos_fits_opt": false, "out_dir": "/data/slam/home/sinjan/hrt_pipe_results/sep_2021_cmilos/", "out_stokes_file": false, "out_stokes_filename": null, "out_rte_filename": null, "config": false} \ No newline at end of file +{"data_f": ["/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210914T071515_V202110260809C_0149140401.fits"], "flat_f": "/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210428T130238_V202109240900C_0164281001.fits", "dark_f": "/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210911T120504_V202110200555C_0169110100.fits", "L1_input": false, "L1_8_generate": false, "scale_data": false, "accum_scaling": false, "bit_conversion": false, "dark_c": true, "flat_c": true, "norm_f": false, "clean_f": false, "sigma": 59, "clean_mode": "V", "flat_states": 4, "prefilter_f": null, "fs_c": false, "ghost_c": null, "out_intermediate": null, "limb": "W", "demod": false, "norm_stokes": false, "ItoQUV": false, "ctalk_params": null, "rte": null, "p_milos": false, "cmilos_fits": false, "out_dir": "/data/slam/home/sinjan/hrt_pipe_results/sep_2021_cmilos/", "out_stokes_file": false, "out_stokes_filename": null, "out_rte_filename": null, "config": false} \ No newline at end of file diff --git a/test/test_jsons/test_6.json b/test/test_jsons/test_6.json index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..e2df2bc9717b85f6215048d68dd6569a4f8efc5e 100644 --- a/test/test_jsons/test_6.json +++ b/test/test_jsons/test_6.json @@ -0,0 +1 @@ +{"data_f": ["/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210914T071515_V202110260809C_0149140401.fits"], "flat_f": "/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210911T120504_V202110200555C_0169110100.fits", "dark_f": "/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210428T130238_V202109240900C_0164281001.fits", "L1_input": false, "L1_8_generate": false, "scale_data": true, "accum_scaling": true, "bit_conversion": true, "dark_c": true, "flat_c": true, "norm_f": true, "clean_f": false, "sigma": 59, "clean_mode": "V", "flat_states": 24, "prefilter_f": null, "fs_c": true, "ghost_c": null, "out_intermediate": null, "limb":"W", "demod": true, "norm_stokes": true, "ItoQUV": true, "ctalk_params": null, "rte": "RTE", "p_milos": true, "cmilos_fits": false, "out_dir": "/data/slam/home/sinjan/hrt_pipe_results/tests/", "out_stokes_file": true, "out_stokes_filename": null, "out_rte_filename": null, "config": false, "vers": "02"} \ No newline at end of file diff --git a/test/test_jsons/test_8.json b/test/test_jsons/test_8.json index 0463149e2ed11535f8efb292d6fc6af668dc1664..fd169811ab32c708ac721168e0d67b3f0c6d4a53 100644 --- a/test/test_jsons/test_8.json +++ b/test/test_jsons/test_8.json @@ -1 +1 @@ -{"data_f": ["/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210914T071515_V202110260809C_0149140401.fits"], "flat_f": "/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210911T120504_V202110200555C_0169110100.fits", "dark_f": "/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210428T130238_V202109240900C_0164281001.fits", "L1_input": false, "L1_8_generate": false, "scale_data": false, "accum_scaling": false, "bit_conversion": false, "dark_c": false, "flat_c": false, "norm_f": false, "clean_f": false, "sigma": 59, "clean_mode": "V", "flat_states": 4, "prefilter_f": null, "fs_c": false, "demod": false, "norm_stokes": false, "ItoQUV": false, "ctalk_params": null, "rte": null, "p_milos": false, "cmilos_fits_opt": false, "out_dir": "/data/slam/home/sinjan/hrt_pipe_results/sep_2021_cmilos/", "out_stokes_file": false, "out_stokes_filename": null, "out_rte_filename": null, "config": false} \ No newline at end of file +{"data_f": ["/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210914T071515_V202110260809C_0149140401.fits"], "flat_f": "/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210911T120504_V202110200555C_0169110100.fits", "dark_f": "/data/slam/home/sinjan/fits_files/solo_L1_phi-hrt-ilam_20210428T130238_V202109240900C_0164281001.fits", "L1_input": false, "L1_8_generate": false, "scale_data": false, "accum_scaling": false, "bit_conversion": false, "dark_c": false, "flat_c": false, "norm_f": false, "clean_f": false, "sigma": 59, "clean_mode": "V", "flat_states": 4, "prefilter_f": null, "fs_c": false, "ghost_c": null, "out_intermediate": null, "demod": false, "norm_stokes": false, "ItoQUV": false, "ctalk_params": null, "rte": null, "p_milos": false, "cmilos_fits": false, "out_dir": "/data/slam/home/sinjan/hrt_pipe_results/sep_2021_cmilos/", "out_stokes_file": false, "out_stokes_filename": null, "out_rte_filename": null, "config": false} \ No newline at end of file diff --git a/test/test_settings.py b/test/test_settings.py index 00d724be29f7189aa18af76b90ba3b5a07bf91ea..de613b7fb9e88612a2ccdd11d78534b2c673c9ac 100644 --- a/test/test_settings.py +++ b/test/test_settings.py @@ -6,8 +6,12 @@ import unittest import numpy as np import sys sys.path.append('../') +sys.path.append('../src/') from src.hrt_pipe import phihrt_pipe +from src.utils import * +from src.inversions import * +from src.processes import * def test_one(): #test with almost everything off after reading in all files