Astropy.io.fits

1 minute read

Published:

Import packages


import numpy as np
import pandas as pd
import os
from astropy.table import Table

from scipy.stats import spearmanr#
from scipy.stats.stats import pearsonr

from astropy.table import Table
from astropy.cosmology import FlatLambdaCDM,Planck13,Planck15,z_at_value
from astropy import units as u
from astropy.cosmology import LambdaCDM
cosmo = LambdaCDM(H0=70, Om0=0.27, Ode0=0.73)
from astropy.time import Time
from astropy.time import TimeYearDayTime


from datetime import datetime
import time
from time import strftime,strptime
import calendar
from dateutil.parser import parse

#from adjustText import adjust_text
import matplotlib as mpl
import matplotlib.pyplot as plt
from pylab import cm
from collections import OrderedDict
#from adjustText import adjust_text

%matplotlib inline
%config InlineBackend.figure_format='svg'
# Edit the font, font size, and axes width
mpl.rcParams['font.family'] = 'Times New Roman' #'Avenir'
plt.rcParams['font.size'] = 18
plt.rcParams['axes.linewidth'] = 2

**Fits **

-[Fits](https://docs.astropy.org/en/stable/io/fits/index.html)

hdul=fits.open(fits_path)
hdul.info()
hdul[0].header

data=hdul[1].data

hdu = fits.BinTableHDU(data=newdata,header=hdr,name='data')
hdu.writeto(path+'qsopar.fits',overwrite=True)







GTI

gtipath='/Volumes/Brettlv_G_m/Astro_sources_data/V404cyg/V404cyg//nustar_v404/90102007002/pipeline_out/V404cygA01_gti.fits'
gti=fits.open(gtipath)
headergti=gti[0].header

gtitime=gti[1].data
gti_made = np.rec.array([(1,2),(3,5),(12,100)],
                      formats='float64,float64',
                      names='START,STOP')
gti[1].data=gti_made   
gti.writeto('newgti.fits')
 

Table


data_path='/path/data.dat'
readme_path='/path/readme'
Tabledata=Table.read(data_path,
                      readme=readme_path,
                      format="ascii.cds",)
 

Table

from astropy.table import Table
tbdata = Table.read(path+"qsopar.fits")
tbdata[:12]
 

pandas loaddata

data_path='/path/data.csv'
data=pd.read_csv(data_path,header=0,delimeter=',')
print(data.columns)

data_path='/path/data.txt'
data=pd.read_csv(data_path,header=None,delimeter='\s+')
#data.columns=[]

data_path='/path/data.xlsx'
data=pd.read_excel(data_path,header=0,sheet_name='sheet1')





                      
html_url='website_url'
data_table_html=pd.read_html(html_url)                      
                      
                      
                      
data_path='/path/data.dat'
data=pd.read_excel(data_path,header=0,delimeter='\s+')

pandas writedata

data.to_csv(data_path,index=False)

data.to_excel(data_path)

json

from __future__ import unicode_literals
import json
import numpy as np
import pickle
with open("Source_Name_dict_pickle.json", "wb") as f:# writr dict
    pickle.dump(Source_Name_dict , f,pickle.HIGHEST_PROTOCOL)


with open('Source_Name_dict_pickle.json', "rb") as f:# load dict
    Source_Name_dict_load_pickle = pickle.load(f)
    
Source_Name_dict_load_pickle_Name2url  = dict(zip(Source_Name_dict_load_pickle .values(), Source_Name_dict_load_pickle .keys())) 

with open("Source_Name_dict_pickle.json", "wb") as f:# writr dict
    pickle.dump(Source_Name_dict , f,pickle.HIGHEST_PROTOCOL)
    


欢迎关注微信公众号:曜灵集