#Pandora functions to convert raw counts to corrected count rates or (ir)radiances from pan_params import * op=pan_params() from pan_xfus import * xfus=pan_xfus() class pan_countconverter: #-------------------------------------------------------------------------- def convert_rc(self,a,cal_data=[],steps=[],slcmeth='simple'): """ cc,ecc,ecc_instr=convert_rc(a,cal_data=[],steps=[],slcmeth='simple') is a list of arrays as it comes out of io.combine_data. This function converts the raw counts in into 'corrected counts' or 'corrected count rates' or 'radiances' or 'irradiances' (output ). Output is the uncertainty of , based on , if this is not empty. The "total" uncertainty is made up of the instrumental uncertainty (output) and the input-uncertainty , which could be calculated using ecc**2=ecc_input**2+ecc_instr**2. is the calibration data list. is a (monotonically increasing) list of integers between 1 and 7. It determines, which corrections are applied to the raw data. Those are the data correction steps: Step 1: dark correction, uses function Step 2: non linearity correction, uses function Step 3: conversion to count rates, uses function Step 4: stray light correction, uses function Step 5: filter correction, uses function Step 6: neutral density correction, uses function Step 7: conversion to (ir)radiances, uses function In every case (even if ==[]) the data will be unscaled, i.e. divided by the scale factor in . Note that saturated data will "stay" at the nominal saturation value through steps 1-2. is the stray light correction method. It can be 'simple': in this case simply the averaged corrected counts below 280nm are subtracted 'complicated': in this case ... """ ##don't forget flat field correction! #Step 0: unscale data #scaled raw counts and uncertainties rc=a[5] if len(a)>=7: erc=a[6] else: erc=[] #dimension dim=rc.shape #scale factor and extended scale factor scf=a[4][:,0].reshape(dim[0],1) scfe=dot(scf,ones((1,dim[1]))) #unscale cc=rc/scfe if erc==[]: ecc=[] else: ecc=erc/scfe ecc_instr=[] #decompose parts of it=a[2][:,0] ncy=a[2][:,1] fw1=a[2][:,2] fw2=a[2][:,3] #saturation index if cal_data<>[]: bcsat=cal_data[-1][1] isat=cc>=bcsat #go through steps #Step 1: dark correction, uses function if len(steps)>0: if steps[0]==1: steps=steps[1:] cc,ecc,ecc_instr,ibc=self.darkcorr(cc,ecc,it,ncy,fw1,fw2,isat,cal_data) #reduce parameters to bright counts only isat=isat[ibc[0],:] it=it[ibc[0]] ncy=ncy[ibc[0]] fw1=fw1[ibc[0]] fw2=fw2[ibc[0]] #Step 2: non linearity correction, uses function if len(steps)>0: if steps[0]==2: steps=steps[1:] cc,ecc,ecc_instr=self.nonlincorr(cc,ecc,ecc_instr,isat,cal_data) #Step 3: conversion to count rates, uses function if len(steps)>0: if steps[0]==3: steps=steps[1:] cc,ecc,ecc_instr=self.rc2cr(cc,ecc,ecc_instr,it,cal_data) #Step 4: stray light correction, uses function if len(steps)>0: if steps[0]==4: steps=steps[1:] pass #Step 5: filter correction, uses function if len(steps)>0: if steps[0]==5: steps=steps[1:] pass #Step 6: neutral density correction, uses function if len(steps)>0: if steps[0]==6: steps=steps[1:] pass #Step 7: conversion to (ir)radiances, uses function if len(steps)>0: if steps[0]==7: steps=steps[1:] pass return cc,ecc,ecc_instr #-------------------------------------------------------------------------- def darkcorr(self,rc,erc,it,ncy,fw1,fw2,isat,cal_data): """ cc,ecc,ecc_instr,ibc=darkcorr(rc,erc,it,ncy,fw1,isat,cal_data) are the raw counts (nmeas,npix). are the raw count uncertainties (nmeas,npix). is the integration time. is the number of cycles. are the filterwheel 1 positions. is the saturation index array. is the calibration data list. This function looks for dark measurements (=OPAQUE filter in fw1) and subtracts from the bright measurements (=any other filter in fw1) the first dark measurements taken afterwrds with the same integration time. Outputs are the dark corrected counts , the total uncertainty , the intrumental uncertainty , and the indices of the bright counts . All outputs have dimension (nbc,npix) with the number of bright measurements. If is 0, all outputs are empty. """ #get indices of dark and bright measurements ig=fw1<0 #none of them for i in cal_data[-1][0]: ig=ig|(fw1==i+1) idc=where(ig) ibc=where(~ig) if len(ibc)>0: bc=rc[ibc[0],:] ebc=erc[ibc[0],:] isat=isat[ibc[0],:] cc=deepcopy(bc) ecc=deepcopy(ebc) ecc_instr=deepcopy(ebc) #sgamma pix=arange(bc.shape[1])+1 npix=len(pix) pixs=xfus.scale_x(pix,[npix]) pp=cal_data[1][7] sgamma=polyval(pp,pixs) if len(idc)>0: #loop through ibc for i in range(len(ibc[0])): #find next dc with same IT itbc=it[ibc[0][i]] ig=where(idc[0]>ibc[0][i]) idcg=idc[0][ig] if len(idcg)>0: itdc=it[idcg] ind=where(itdc==itbc) indg=idcg[ind[0]] dc=rc[indg[0],:] edc=erc[indg[0],:] #dark corrected counts cc[i,:]=bc[i,:]-dc #total uncertainty assuming independent ebc and edc (B6) ecc[i,:]=(ebc[i,:]**2+edc**2)**(0.5) #instrumental uncertainty (B7) ncydc=ncy[indg[0]] ncybc=ncy[ibc[0][i]] ecc_instr[i,:]=(edc**2*(1+ncydc/ncybc)+sgamma*cc[i,:]/ncybc)**(0.5) #set cc from saturated bc to nominal saturation value bcsat=cal_data[-1][1] cc[isat]=bcsat else: #if there are no bright counts cc=[] ecc=[] ecc_instr=[] return cc,ecc,ecc_instr,ibc #-------------------------------------------------------------------------- def nonlincorr(self,ccp,eccp,eccp_instr,isat,cal_data): """ cc,ecc,ecc_instr=nonlincorr(ccp,eccp,eccp_instr,isat,cal_data) are the counts corrected for the previous steps (ncc,npix). are the total uncertainties of (ncc,npix). are the instrumental uncertainties of (ncc,npix). is the saturation index array. is the calibration data list. This function scales the using the saturation count and , and applies the non-linearity correction to the inputs. Outputs are the linearity corrected counts , the total uncertainty and the intrumental uncertainty . All outputs have dimension (ncc,npix). At this stage no uncertainty is assumed in the linearity correction. """ #scale ccp bcsat=cal_data[-1][1] ccs=xfus.scale_x(ccp,[bcsat]) #evaluate polynomial pp=cal_data[1][5] nolincorr=polyval(pp,ccs) cc=ccp/nolincorr #set cc from saturated data to nominal saturation value cc[isat]=bcsat ecc=eccp/nolincorr if eccp_instr<>[]: ecc_instr=eccp_instr/nolincorr else: ecc_instr=[] return cc,ecc,ecc_instr #-------------------------------------------------------------------------- def rc2cr(self,ccp,eccp,eccp_instr,it,cal_data): """ cc,ecc,ecc_instr=rc2cr(ccp,eccp,eccp_instr,it,cal_data) are the counts corrected for the previous steps (ncc,npix). are the total uncertainties of (ncc,npix). are the instrumental uncertainties of (ncc,npix). is the integration time [ms] (ncc,1). is the calibration data list. This function converts the into corrected count rates. Outputs are the corrected count rate [s-1], the total uncertainty and the intrumental uncertainty . All outputs have dimension (ncc,npix). No uncertainty is assumed in the integration time. """ ncc,npix=ccp.shape ite=it.reshape((ncc,1))*ones((1,npix))/1000. #1000 is from ms to s cc=ccp/ite ecc=eccp/ite if eccp_instr<>[]: ecc_instr=eccp_instr/ite else: ecc_instr=[] return cc,ecc,ecc_instr #-------------------------------------------------------------------------- def get_wlshift(self,lo,f,lo0,f0,npolshift=1,npolpol=4, maxiter=10,tolchi=0.0001): """ res,itres=get_wlshift(lo,f,lo0,f0,npolshift=1,npolpol=4, maxiter=10,tolchi=0.0001): is a vector of the best inital guess wavelength centers (usually the wavelengths as determined during the calibration). is a vector of the same dimension as with data from a measured spectrum (corrected count rates or (ir)radiances). and are wavelengths and irradiances of the reference spectrum, is the polynomial order allowed for the wavelength change (e.g. 0=only wavelength shift, 1=shift and squeeze). is the polynomial order used for the polynomial subtracted to adjust for the intensity difference between the measured spectrum and the reference spectrum. and are termination criteria for the iterative process. is the maximum number of iterations used. is the termination tolerance for the cost-function, which is the relative change of the wavelength shift (maximum over all pixels). Output is a 5 element list with final numbers of the iteration process. Element 0: wavelength change polynomial, npolshift+1 elements (starting with shift, then squeeze, then higher orders; this refers to the scaled wavelengths!) Element 1: intensity adjustment polynomial, npolpol+1 elements (starting with 0-order, 1st-order, etc.) Element 2: last cost function Element 3: number of iterations needed to minimize cost function Element 4: wavelength change polynomial evaluated at ) If the iterations were successfull output res=="OK", otherwise it contains an error message. """ err="Could not calculate wavelength shift."+"\n" res="OK" #check data in f eps=1e-3 iu=f0: res=err+"Some data in are too small." itres=[] if res=="OK": lnf=log(f) #interpolation of F0 f0i=xfus.linint(lo0,f0,lo) lnf0i=log(f0i) #difference lnf0 to lnf dlnf=lnf0i-lnf #scaled wavelengths losc=xfus.scale_x(lo,[lo.min(),lo.max()]) #fit polynomial in dlnf pp,resids,rank,sval,rcond=polyfit(losc,dlnf,npolpol,full=True) if ranktolchi): iters=iters+1 hlo=hlo+dlo #interpolation of F0 if iters>1: #at iters==1 it has already been calculated f0i=xfus.linint(lo0,f0,hlo) f0p=xfus.linint(lo0,f0,hlo+losh) f0m=xfus.linint(lo0,f0,hlo-losh) if (f0p==[])or(f0m==[]): res=err+"The shift wants to leave the allowed range." itres=[] else: #derivative [mW/m2/nm/nm] df0dlo=(f0p-f0m)/2/losh lnf0i=log(f0i) dlnf0i=df0dlo/f0i #compose x and y for inversion y=(lnf0i-lnfc).reshape(nlo,1) for i in range(npolshift+1): x[:,i]=-dlnf0i*losc**i #solve linear system in a least square sense a,resids,rank,s=lstsq(x,y) ag=ag+a[:,0] #wavelength change dlo=zeros(nlo) for i in range(npolshift+1): dlo=dlo+a[i]*losc**i chi=abs(dlo).max() ##print iters,chi if res=="OK": #final total dlo dlo=hlo-lo #compose itres=[ag[:npolshift+1],ag[npolshift+1:],chi,iters,dlo] if iters==maxiter: res=err+"The maximum number of iterations was reached." return res,itres #End class pan_countconverter----------------------------------------------