如何解决将图像轮廓的顶点从显示转换为原始数据坐标框
我正在编写一些代码来计算干涉仪望远镜观察天空上给定点的点扩散函数。然后在响应水平为 50% 时获取轮廓。
#!/usr/bin/env python
import numpy as np
import csv
import re
import scipy.constants as con
import matplotlib.pyplot as plt
import datetime
from mpl_toolkits.axes_grid1.anchored_artists import AnchoredSizeBar
from skimage import measure
product_id = "array_1"
current_freq = 650000000
current_coords = "290.66200000000003,18.836777777777776"
current_antennas = "00,01,02,03,04,05,06,07,08,09,10,11,15,17,18,19,20,21,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,48,49,50,51,52,53,54,55,56,57,58,59,60,61,63"
current_time = datetime.datetime.Now()
refAnt = (-30.71106,21.44389,1035)
wavelength = con.c / current_freq
J2000RefTime = datetime.datetime(2000,1,56) # UTC
gridNum = 100000*2
imsize = 500
antlist = [int(a) for a in current_antennas.split(',')]
ants = np.genfromtxt('antenna.csv',delimiter=',',dtype=None,names=["name","","ENU",""],encoding="ascii")
ENUoffsets = []
'''
Get gains for baseline weights
'''
weights = np.zeros(64,)
weights[:] = 1. # equal weight for all antennas
'''
Antenna plot
'''
fig,ax1 = plt.subplots(1,figsize=(10,8))
for a in antlist:
ENUoffsets.append(np.array([float(ants["ENU"][a].split(' ')[2]),float(ants["ENU"][a].split(' ')[3]),float(ants["ENU"][a].split(' ')[4])]))
'''
Create baselines
'''
Baselines = []
for i in range(0,len(antlist)):
row = []
for j in range(0,len(antlist)):
row.append([])
Baselines.append(row)
BaselineList = []
index = 1
for i in range(0,len(antlist)):
for j in range(index,len(antlist)):
#print ENUoffsets[i]-ENUoffsets[j]
Baselines[i][j] = ENUoffsets[i]-ENUoffsets[j]
BaselineList.append(Baselines[i][j])
index += 1
'''
Rotate and project baselines
'''
refLat = np.deg2rad(refAnt[0])
refLon = refAnt[1]
TimeOffset = current_time - J2000RefTime
TimeOffset = TimeOffset.days + TimeOffset.seconds/(60.*60.*24.) + TimeOffset.microseconds/(1000000.*60.*60.*24.)
ObsTime = current_time.hour + current_time.minute/60. + current_time.second/(60.*60.) + current_time.microsecond/(1000000.*60.*60.)
LST = 100.46+0.985647*TimeOffset + refLon + 15*ObsTime
LST = LST % 360.
ra_deg = float(current_coords.split(",")[0])
dec_deg = float(current_coords.split(",")[1])
DEC = np.deg2rad(dec_deg)
HA = np.deg2rad(LST) - np.deg2rad(ra_deg)
RotatedProjectedBaselines = []
maxlength = 0
for b in BaselineList:
epsilon = 0.000000000001
length = np.sqrt(b[0]**2 + b[1]**2 + b[2]**2)
if length > maxlength:
maxlength = length
azim = np.arctan2(b[0],(b[1]+epsilon))
el = np.arcsin(b[2] / (length+epsilon))
# rotation matrix
Rot = np.array([np.cos(refLat)*np.sin(el) - np.sin(refLat) * np.cos(el) * np.cos(azim),np.cos(el)*np.sin(azim),np.sin(refLat) * np.sin(el) + np.cos(refLat)*np.cos(el)*np.cos(azim)])
# projection matrix
Proj = np.array([
[np.sin(HA),np.cos(HA),0],[-np.sin(DEC)*np.cos(HA),np.sin(DEC)*np.sin(HA),np.cos(DEC)],[np.cos(DEC)*np.cos(HA),-np.cos(DEC)*np.sin(HA),np.sin(DEC)]])
RotatedProjectedBaselines.append(np.dot(length*Rot.T,Proj.T))
'''
UV samples
'''
imLength = gridNum/3600 # deg
step = np.deg2rad(imLength)
uvSamples = []
for b in RotatedProjectedBaselines:
u = int(round(b[0]/wavelength/step + (gridNum/2 - 1)))
v = int(round(b[1]/wavelength/step + (gridNum/2 - 1)))
uvSamples.append((u,v))
'''
DFT grid
'''
halfLength = imsize/2
interval = 1
ul = np.mgrid[0:halfLength:interval,0:halfLength:interval]
ur = np.mgrid[0:halfLength:interval,gridNum-halfLength:gridNum:interval]
bl = np.mgrid[gridNum-halfLength:gridNum:interval,0:halfLength:interval]
br = np.mgrid[gridNum-halfLength:gridNum:interval,gridNum-halfLength:gridNum:interval]
imagesCoord = np.array([np.concatenate((np.concatenate((ul[0].T,ur[0].T)).T,np.concatenate((bl[0].T,br[0].T)).T)).flatten(),np.concatenate((np.concatenate((ul[1].T,ur[1].T)).T,np.concatenate((bl[1].T,br[1].T)).T)).flatten()])
'''
DFT
'''
index = 1
weightingList = []
for i in range(0,64):
for j in range(index,64):
weightingList.append(weights[i]*weights[j])
index += 1
weightingList /= np.amax(weightingList)
fringeSum = 0
for p in range(0,len(uvSamples)):
U = imagesCoord[1] * uvSamples[p][0]
V = imagesCoord[0] * uvSamples[p][1]
weight = weightingList[p]
fringeSum += weight*np.exp(1j*2*con.pi*(U + V)/gridNum)
fringeSum = fringeSum.reshape(imsize,imsize)/len(uvSamples)
fringeSum = np.abs(fringeSum)
image = np.fft.fftshift(fringeSum)
image = np.fliplr(image)
image /= np.amax(image)
im = ax1.imshow(image,cmap='inferno',aspect="equal")
ax1.xaxis.set_ticklabels([])
ax1.yaxis.set_ticklabels([])
ax1.set_title('Synthesised Beam',fontdict=dict(fontsize=16))
contours = measure.find_contours(image,0.5)
with open("contour_vertices.csv","w",newline="") as f:
for contour in contours:
ax1.plot(contour[:,1],contour[:,0])
writer = csv.writer(f)
writer.writerows(contour)
fig.colorbar(im,ax=ax1)
scalebar = AnchoredSizeBar(ax1.transData,'1 arcmin','lower left',pad=0.1,color='white',frameon=False,size_vertical=1)
ax1.add_artist(scalebar)
plt.subplots_adjust(left=None,bottom=None,right=None,top=None,wspace=.04,hspace=.2)
plt.savefig('SynthesisedBeam_{}_{}.png'.format(current_coords,current_time),dpi=300)
antenna.csv
是一个逗号分隔的变量,包含有关天线的信息,即
然后我用线 contours = measure.find_contours(image,0.5)
获取轮廓的顶点,并将它们输出到一个 csv 文件。如下图所示,
PSF 的形状被保留,但坐标值是显示/像素坐标而不是数据坐标(度)(对于 imsize = 500
的图像,PSF 的中心在 ({{ 1}}))。尽管图中的 250,250
表明显示坐标和数据坐标之间显然存在映射,并且中心坐标是已知的(AnchoredSizeBar
),但我已尝试使用 { 将坐标转换为原始帧{1}},无济于事。
如何在原始坐标系中输出轮廓的顶点(即以度为单位,以 (290.66200000000003,18.836777777777776
) 为中心?
版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。