...
 
Commits (4)
......@@ -26,22 +26,42 @@ from generate_sequence import *
# all data, n_UE, pilots/frame
#csi, samps = samps2csi(samples, num_cl_tmp, symbol_length, fft_size=64, offset=offset, bound=0, cp=0)
def samps2csi(samps, num_users,samps_per_user=224, fft_size=64, offset=0, bound=94, cp=0):
"""Input samps dims: Frame, Cell, Antenna, User, Sample"""
"""Returns iq with Frame, Cell, User, Pilot Rep, Antenna, Sample"""
"""Returns csi with Frame, Cell, User, Pilot Rep, Antenna, Subcarrier"""
def samps2csi(samps, num_users, samps_per_user=224, fft_size=64, offset=0, bound=94, cp=0):
"""Convert an Argos HDF5 log file with raw IQ in to CSI.
Asumes 802.11 style LTS used for trace collection.
Args:
samps: The h5py or numpy array containing the raw IQ samples,
dims = [Frame, Cell, User, Antenna, Sample].
num_users: Number of users used in trace collection. (Last 'user' is noise.)
samps_per_user: Number of samples allocated to each user in each frame.
Returns:
csi: Complex numpy array with [Frame, Cell, User, Pilot Rep, Antenna, Subcarrier]
iq: Complex numpy array of raw IQ samples [Frame, Cell, User, Pilot Rep, Antenna, samples]
Example:
h5log = h5py.File(filename,'r')
csi,iq = samps2csi(h5log['Pilot_Samples'], h5log.attrs['num_mob_ant']+1, h5log.attrs['samples_per_user'])
"""
debug = False
chunkstart = time.time()
usersamps = np.reshape(samps, (samps.shape[0], samps.shape[1], num_users, samps.shape[3], samps_per_user, 2))
nbat = min([(samps_per_user-bound)//(fft_size+cp),2]) #What is this? It is eiter 1 or 2: 2 LTSs??
iq = np.empty((samps.shape[0],samps.shape[1],num_users,samps.shape[3],nbat,fft_size),dtype='complex64')
usersamps = np.reshape(
samps, (samps.shape[0], samps.shape[1], num_users, samps.shape[3], samps_per_user, 2))
# What is this? It is eiter 1 or 2: 2 LTSs??
pilot_rep = min([(samps_per_user-bound)//(fft_size+cp), 2])
iq = np.empty((samps.shape[0], samps.shape[1], num_users,
samps.shape[3], pilot_rep, fft_size), dtype='complex64')
if debug:
print("chunkstart = {}, usersamps.shape = {}, samps.shape = {}, samps_per_user = {}, nbat= {}, iq.shape = {}".format(chunkstart, usersamps.shape, samps.shape, samps_per_user, nbat, iq.shape))
for i in range(nbat): # 2 first symbols (assumed LTS) seperate estimates
print("chunkstart = {}, usersamps.shape = {}, samps.shape = {}, samps_per_user = {}, nbat= {}, iq.shape = {}".format(
chunkstart, usersamps.shape, samps.shape, samps_per_user, nbat, iq.shape))
for i in range(pilot_rep): # 2 first symbols (assumed LTS) seperate estimates
iq[:, :, :, :, i, :] = (usersamps[:, :, :, :, offset + cp + i*fft_size:offset+cp+(i+1)*fft_size, 0] +
usersamps[:, :, :, :, offset + cp + i*fft_size:offset+cp+(i+1)*fft_size, 1]*1j)*2**-15
iq = iq.swapaxes(3, 4)
iq = iq.swapaxes(3, 4)
if debug:
print("iq.shape after axes swapping: {}".format(iq.shape))
......@@ -49,27 +69,263 @@ def samps2csi(samps, num_users,samps_per_user=224, fft_size=64, offset=0, bound=
csi = np.empty(iq.shape, dtype='complex64')
if fft_size == 64:
# Retrieve frequency-domain LTS sequence
_, lts_freq = generate_training_seq(preamble_type='lts', seq_length=[], cp=32, upsample=1, reps=[])
_, lts_freq = generate_training_seq(
preamble_type='lts', seq_length=[], cp=32, upsample=1, reps=[])
pre_csi = np.fft.fftshift(np.fft.fft(iq, fft_size, 5), 5)
csi = np.fft.fftshift(np.fft.fft(iq, fft_size, 5), 5) * lts_freq
if debug:
print("csi.shape:{} lts_freq.shape: {}, pre_csi.shape = {}".format(csi.shape, lts_freq.shape, pre_csi.shape))
print("csi.shape:{} lts_freq.shape: {}, pre_csi.shape = {}".format(
csi.shape, lts_freq.shape, pre_csi.shape))
endtime = time.time()
if debug:
print("chunk time: %f fft time: %f" % (fftstart - chunkstart, endtime -fftstart) )
csi = np.delete(csi, [0, 1, 2, 3, 4, 5, 32, 59, 60, 61, 62, 63], 5) # remove zero subcarriers
print("chunk time: %f fft time: %f" %
(fftstart - chunkstart, endtime - fftstart))
# remove zero subcarriers
csi = np.delete(csi, [0, 1, 2, 3, 4, 5, 32, 59, 60, 61, 62, 63], 5)
return csi, iq
def samps2csi_large(samps, num_users, samps_per_user=224, offset=47, chunk_size=1000):
"""Wrapper function for samps2csi_main for to speed up large logs by leveraging data-locality. Chunk_size may need to be adjusted based on your computer."""
if samps.shape[0] > chunk_size:
# rather than memmap let's just increase swap... should be just as fast.
#csi = np.memmap(os.path.join(_here,'temp1.mymemmap'), dtype='complex64', mode='w+', shape=(samps.shape[0], num_users, 2, samps.shape[1],52))
#iq = np.memmap(os.path.join(_here,'temp2.mymemmap'), dtype='complex64', mode='w+', shape=(samps.shape[0], num_users, 2, samps.shape[1],64))
csi = np.empty(
(samps.shape[0], num_users, 2, samps.shape[1], 52), dtype='complex64')
iq = np.empty(
(samps.shape[0], num_users, 2, samps.shape[1], 64), dtype='complex64')
chunk_num = samps.shape[0]//chunk_size
for i in range(chunk_num):
csi[i*chunk_size:i*chunk_size+chunk_size], iq[i*chunk_size:i*chunk_size+chunk_size] = samps2csi(
samps[i*chunk_size:(i*chunk_size+chunk_size), :, :, :], num_users, samps_per_user=samps_per_user)
csi[chunk_num*chunk_size:], iq[chunk_num*chunk_size:] = samps2csi(
samps[chunk_num*chunk_size:, :, :, :], num_users, samps_per_user=samps_per_user)
else:
csi, iq = samps2csi(
samps, num_users, samps_per_user=samps_per_user, offset=offset)
return csi, iq
def calCond(userCSI):
"""Calculate the standard matrix condition number.
Args:
userCSI: Complex numpy array with [Frame, User, BS Ant, Subcarrier]
Returns:
condNumber_ave: The average condition number across all users and subcarriers.
condNumber: Numpy array of condition number [Frame, Subcarrier].
"""
condNumber = np.empty(
(userCSI.shape[0], userCSI.shape[3]), dtype='float32')
for sc in range(userCSI.shape[3]):
condNumber[:, sc] = np.linalg.cond(
userCSI[:, :, :, sc])
condNumber_ave = np.average(condNumber)
return condNumber_ave, condNumber
def calDemmel(userCSI):
"""Calculate the Demmel condition number.
Args:
userCSI: Complex numpy array with [Frame, User, BS Ant, Subcarrier]
Returns:
demmelNumber_ave: The average condition number across all users and subcarriers.
demmelNumber: Numpy array of condition number [Frame, Subcarrier].
"""
demmelNumber = np.empty(
(userCSI.shape[0], userCSI.shape[3]), dtype='float32')
for sc in range(userCSI.shape[3]):
# covariance matrix
cov = np.matmul(userCSI[:, :, :, sc], np.transpose(
userCSI[:, :, :, sc], [0, 2, 1]).conj())
eigenvalues = np.abs(np.linalg.eigvals(cov))
demmelNumber[:, sc] = np.sum(
eigenvalues, axis=1)/np.min(eigenvalues, axis=1)
demmelNumber_ave = np.average(demmelNumber)
return demmelNumber_ave, demmelNumber
def calCapacity(userCSI, noise, beamweights, downlink=False):
"""Calculate the capacity of a trace with static beamweights.
Apply a set of beamweights to a set of wideband user channels and calculate the shannon capacity of the resulting channel for every Frame.
Note that if the beamweights are calculated with a frame from the trace, that frame will have unrealistic capacity since it will correlate noise as signal.
Args:
userCSI: Complex numpy array with [Frame, User, BS Ant, Subcarrier]
noise: Complex numpy array with [Frame, BS Ant, Subcarrier]
beamweights: Set of beamweights to apply to userCSI [BS Ant, User, Subcarrier]
downlink: (Boolean) Compute downlink capacity if True, else Uplink
Returns:
cap_total: Total capacity across all users averaged over subarriers in bps/hz [Frame]
cap_u: Capacity per user across averaged over subcarriers in bps/hz [Frame, User]
cap_sc: Capacity per user and subcarrier in bps/hz [Frame, User, Subcarrier]
SINR: Signtal to interference and noise ratio for each frame user and subcarrier [Frame, User, Subcarrier]
cap_su_sc: Single user (no interference) capacity per subcarrier in bps/hz [Frame, User, Subcarrier]
cap_su_u: Single user (no interference) capacity averaged over subcarriers in bps/hz [Frame, User]
SNR: Signtal to noise ratio for each frame user and subcarrier [Frame, User, Subcarrier]
"""
noise_bs_sc = np.mean(np.mean(np.abs(noise), 0),
0) # average over time and the two ltss
sig_intf = np.empty(
(userCSI.shape[0], userCSI.shape[1], userCSI.shape[1], userCSI.shape[3]), dtype='float32')
noise_sc_u = np.empty(
(userCSI.shape[1], userCSI.shape[3]), dtype='float32')
for sc in range(userCSI.shape[3]):
# TODO: can we get rid of the for loop?
sig_intf[:, :, :, sc] = np.square(
np.abs(np.dot(userCSI[:, :, :, sc], beamweights[:, :, sc])))
# noise is uncorrelated, and all we have is average power here (Evan wants to do it per frame, but I think that's a bad idea)
noise_sc_u[:, sc] = np.dot(
np.square(noise_bs_sc[:, sc]), np.square(np.abs(beamweights[:, :, sc])))
# noise_sc_u *= 4 #fudge factor since our noise doesn't include a lot of noise sources
sig_sc = np.diagonal(sig_intf, axis1=1, axis2=2)
sig_sc = np.swapaxes(sig_sc, 1, 2)
# remove noise from signal power (only matters in low snr really...)
sig_sc = sig_sc - noise_sc_u
sig_sc[sig_sc < 0] = 0 # can't have negative power (prevent errors)
intf_sc = np.sum(sig_intf, axis=1+int(downlink)) - sig_sc
SINR = sig_sc/(noise_sc_u+intf_sc)
cap_sc = np.log2(1+SINR)
cap_u = np.mean(cap_sc, axis=2)
cap_total = np.sum(cap_u, axis=1)
SNR = sig_sc/noise_sc_u
cap_su_sc = np.log2(1+SNR)
cap_su_u = np.mean(cap_su_sc, axis=2)
return cap_total, cap_u, cap_sc, SINR, cap_su_sc, cap_su_u, SNR
def calContCapacity(csi, conj=True, downlink=False, offset=1):
"""Calculate the capacity of a trace with continuous beamforming.
For every frame in a trace, calculate beamweights (either conjugate or ZF),
apply them to a set of wideband user channels either from the same frame or some constant offset (delay),
then calculate the shannon capacity of the resulting channel.
The main difference in uplink/downlink is the source of interference (and power allocation).
In uplink the intended user's interference is a result of every other user's signal passed through that user's beamweights.
In downlink the inteded user's interference is a result of every other user's signal passed through their beamweights (applied to the intended user's channel).
Note that every user has a full 802.11 LTS, which is a repitition of the same symbol.
This method uses the first half of the LTS to make beamweights, then applies them to the second half.
Otherwise, noise is correlated, resulting in inaccurate results.
Args:
csi: Full complex numpy array with separate LTSs and noise [Frame, User, BS Ant, Subcarrier] (noise is last user)
conj: (Boolean) If True use conjugate beamforming, else use zeroforcing beamforming.
downlink: (Boolean) Compute downlink capacity if True, else Uplink
offset: Number of frames to delay beamweight application.
Returns:
cap_total: Total capacity across all users averaged over subarriers in bps/hz [Frame]
cap_u: Capacity per user across averaged over subcarriers in bps/hz [Frame, User]
cap_sc: Capacity per user and subcarrier in bps/hz [Frame, User, Subcarrier]
SINR: Signtal to interference and noise ratio for each frame user and subcarrier [Frame, User, Subcarrier]
cap_su_sc: Single user (no interference) capacity per subcarrier in bps/hz [Frame, User, Subcarrier]
cap_su_u: Single user (no interference) capacity averaged over subcarriers in bps/hz [Frame, User]
SNR: Signtal to noise ratio for each frame user and subcarrier [Frame, User, Subcarrier]
"""
csi_sw = np.transpose(
csi, (0, 4, 1, 3, 2)) # hack to avoid for loop (matmul requires last two axes to be matrix) #frame, sc, user, bsant, lts
# noise is last set of data. #frame, sc, bsant, lts
noise = csi_sw[:, :, -1, :, :]
# don't include noise, use first LTS for CSI #frame, sc, user, bsant, lts
userCSI_sw = csi_sw[:, :, :-1, :, 0]
# average over time and the two ltss
noise_sc_bs = np.mean(np.mean(np.abs(noise), 3), 0)
if conj:
'''Calculate weights as conjugate.'''
beamweights = np.transpose(
np.conj(csi_sw[:, :, :-1, :, 1]), (0, 1, 3, 2))
else:
'''Calculate weights using zeroforcing.'''
beamweights = np.empty(
(userCSI_sw.shape[0], userCSI_sw.shape[1], userCSI_sw.shape[3], userCSI_sw.shape[2]), dtype='complex64')
for frame in range(userCSI_sw.shape[0]):
for sc in range(userCSI_sw.shape[1]):
# * np.linalg.norm(csi[frame,:4,0,:,sc]) #either this, or the noise power has to be scaled back accordingly
beamweights[frame, sc, :, :] = np.linalg.pinv(
csi_sw[frame, sc, :-1, :, 1])
if offset > 0:
# delay offset samples
beamweights = np.roll(beamweights, offset, axis=0)
sig_intf = np.square(
np.abs(np.matmul(userCSI_sw[offset:, :, :, :], beamweights[offset:, :, :, :])))
noise_sc_u = np.transpose(np.sum(np.square(
noise_sc_bs)*np.square(np.abs(np.transpose(beamweights, (0, 3, 1, 2)))), 3), (0, 2, 1))
noise_sc_u = noise_sc_u[offset:]
# noise_sc_u *= 4 #fudge factor since our noise doesn't include a lot of noise sources. this should probably be justified/measured or removed
sig_sc = np.diagonal(sig_intf, axis1=2, axis2=3)
# remove noise from signal power (only matters in low snr really...)
sig_sc = sig_sc - noise_sc_u
sig_sc[sig_sc < 0] = 0 # can't have negative power (prevent errors)
# lazy hack -- just sum then subtract the intended signal.
intf_sc = np.sum(sig_intf, axis=2+int(downlink)) - sig_sc
SINR = sig_sc/(noise_sc_u+intf_sc)
cap_sc = np.log2(1+SINR)
cap_u = np.mean(cap_sc, axis=1)
cap_total = np.sum(cap_u, axis=1)
SNR = sig_sc/noise_sc_u
cap_su_sc = np.log2(1+SNR)
cap_su_u = np.mean(cap_su_sc, axis=1)
return cap_total, cap_u, cap_sc, SINR, cap_su_sc, cap_su_u, SNR
def calExpectedCapacity(csi, user=0, max_delay=100, conj=True, downlink=False):
"""Calculate the expected capacity for beamweights calculated with delayed stale CSI.
Args:
csi: Full complex numpy array with separate LTSs and noise [Frame, User, BS Ant, Subcarrier] (noise is last user)
user: Index of user to compute for (note that other users still affect capacity due to their interference)
max_delay: Maximum delay (in frames) to delay the beamweight computation.
conj: (Boolean) If True use conjugate beamforming, else use zeroforcing beamforming.
downlink: (Boolean) Compute downlink capacity if True, else Uplink
Returns:
cap: Average capacity across all frames for a given delay (in frames) in bps/hz [Delay]
"""
cap = []
for d in range(max_delay):
# print([d,time.time()])
delayed = calContCapacity(
csi, conj=conj, downlink=downlink, offset=d)
cap.append(np.mean(delayed[1][:, user]))
return cap
def calCorr(userCSI, corr_vec):
"""
Calculate the instantaneous correlation with a given correlation vector
"""
sig_intf = np.empty((userCSI.shape[0], userCSI.shape[1], userCSI.shape[1], userCSI.shape[3]), dtype='float32')
sig_intf = np.empty(
(userCSI.shape[0], userCSI.shape[1], userCSI.shape[1], userCSI.shape[3]), dtype='float32')
for sc in range(userCSI.shape[3]):
sig_intf[:, :, :, sc] = np.abs(np.dot(userCSI[:, :, :, sc], corr_vec[:, :, sc])) / np.dot(
np.abs(userCSI[:, :, :, sc]), np.abs(corr_vec[:, :, sc]))
np.abs(userCSI[:, :, :, sc]), np.abs(corr_vec[:, :, sc]))
# gets correlation of subcarriers for each user across bs antennas
sig_sc = np.diagonal(sig_intf, axis1=1, axis2=2)
......@@ -85,202 +341,14 @@ def demult(csi, data, method='zf'):
"""data: Frame, Antenna, Subcarrier"""
# Compute beamweights based on the specified frame.
userCSI = np.mean(csi, 2) # average over both LTSs
sig_intf = np.empty((userCSI.shape[0], userCSI.shape[1], userCSI.shape[3]), dtype='complex64')
sig_intf = np.empty(
(userCSI.shape[0], userCSI.shape[1], userCSI.shape[3]), dtype='complex64')
for frame in range(csi.shape[0]):
for sc in range(userCSI.shape[3]):
if method == 'zf':
sig_intf[frame, :, sc] = np.dot(data[frame, :, sc], np.linalg.pinv(userCSI[frame, :, :, sc]))
sig_intf[frame, :, sc] = np.dot(
data[frame, :, sc], np.linalg.pinv(userCSI[frame, :, :, sc]))
else:
sig_intf[frame, :, sc] = np.dot(data[frame, :, sc], np.transpose(np.conj(userCSI[frame, :, :, sc]), (1, 0)))
return sig_intf
def csi_from_pilots(pilots_dump, z_padding = 150, fft_size=64, cp=16, frm_st_idx = 0, frame_to_plot = 0, ref_ant=0):
"""
Finds the end of the pilots' frames, finds all the lts indices relative to that.
Divides the data with lts sequences, calculates csi per lts, csi per frame, csi total.
"""
print("********************* csi_from_pilots(): *********************")
# Reviewing options and vars:
show_plot = True
debug = False
test_mf = False
write_to_file = True
legacy = False
# dimensions of pilots_dump
n_frame = pilots_dump.shape[0] # no. of captured frames
n_cell = pilots_dump.shape[1] # no. of cells
n_ue = pilots_dump.shape[2] # no. of UEs
n_ant = pilots_dump.shape[3] # no. of BS antennas
n_iq = pilots_dump.shape[4] # no. of IQ samples per frame
if debug:
print("input : z_padding = {}, fft_size={}, cp={}, frm_st_idx = {}, frame_to_plot = {}, ref_ant={}".format(z_padding, fft_size, cp, frm_st_idx, frame_to_plot, ref_ant))
print("n_frame = {}, n_cell = {}, n_ue = {}, n_ant = {}, n_iq = {}".format(
n_frame, n_cell, n_ue, n_ant, n_iq) )
if ((n_iq % 2) != 0 ):
print("Size of iq samples:".format(n_iq))
raise Exception(' **** The length of iq samples per frames HAS to be an even number! **** ')
n_cmpx = n_iq // 2 # no. of complex samples
n_csamp = n_cmpx - z_padding # no. of complex samples in a P subframe without pre- and post- fixes
if legacy:
idx_e = np.arange(1, n_iq, 2) # even indices: real part of iq --> ATTENTION: I and Q are flipped at RX for some weird reason! So, even starts from 1!
idx_o = np.arange(0, n_iq, 2) # odd indices: imaginary part of iq --> ATTENTION: I and Q are flipped at RX for some weird reason! So, odd starts from 0!
else:
idx_e = np.arange(0, n_iq, 2) # even indices: real part of iq
idx_o = np.arange(1, n_iq, 2) # odd indices: imaginary part of iq
# make a new data structure where the iq samples become complex numbers
cmpx_pilots = (pilots_dump[:,:,:,:,idx_e] + 1j*pilots_dump[:,:,:,:,idx_o])*2**-15
# take a time-domain lts sequence, concatenate more copies, flip, conjugate
lts_t, lts_f = generate_training_seq(preamble_type='lts', seq_length=[], cp=32, upsample=1, reps=[]) # TD LTS sequences (x2.5), FD LTS sequences
lts_tmp = lts_t[-80:] # last 80 samps (assume 16 cp)
n_lts = len(lts_tmp)
k_lts = n_csamp // n_lts # no. of LTS sequences in a pilot SF
lts_seq = np.tile(lts_tmp, k_lts) # concatenate k LTS's to filter/correlate below
lts_seq = lts_seq[::-1] # flip
lts_seq_conj = np.conjugate(lts_seq) # conjugate the local LTS sequence
l_lts_fc = len(lts_seq_conj) # length of the local LTS seq.
if debug:
print("cmpx_pilots.shape = {}, lts_t.shape = {}".format(cmpx_pilots.shape, lts_t.shape))
#print("idx_e= {}, idx_o= {}".format(idx_e, idx_o))
print("n_cmpx = {}, n_csamp = {}, n_lts = {}, k_lts = {}, lts_seq_conj.shape = {}".format(
n_cmpx, n_csamp, n_lts, k_lts, lts_seq_conj.shape))
# debug/ testing
if debug:
z_pre = np.zeros(82, dtype='complex64')
z_post = np.zeros(68, dtype='complex64')
lts_t_rep = np.tile(lts_tmp, k_lts)
lts_t_rep_tst = np.append(z_pre,lts_t_rep)
lts_t_rep_tst = np.append(lts_t_rep_tst, z_post)
if test_mf:
w = np.random.normal(0, 0.1/2, len(lts_t_rep_tst)) + 1j*np.random.normal(0, 0.1/2, len(lts_t_rep_tst))
lts_t_rep_tst = lts_t_rep_tst + w
cmpx_pilots = np.tile(lts_t_rep_tst,(n_frame,cmpx_pilots.shape[1],cmpx_pilots.shape[2],cmpx_pilots.shape[3],1))
print("if test_mf: Shape of lts_t_rep_tst: {} , cmpx_pilots.shape = {}".format(lts_t_rep_tst.shape, cmpx_pilots.shape))
# normalized matched filter
a = 1;
unos = np.ones(l_lts_fc)
v0 = signal.lfilter(lts_seq_conj, a, cmpx_pilots, axis = 4)
v1 = signal.lfilter(unos, a, (abs(cmpx_pilots)**2), axis = 4)
m_filt = (np.abs(v0)**2)/v1
# clean up nan samples: replace nan with -1
nan_indices = np.argwhere(np.isnan(m_filt))
m_filt[np.isnan(m_filt)] = -0.5 # the only negative value in m_filt
if write_to_file:
# write the nan_indices into a file
np.savetxt("nan_indices.txt", nan_indices, fmt='%i')
if debug:
print("Shape of truncated complex pilots: {} , l_lts_fc = {}, v0.shape = {}, v1.shape = {}, m_filt.shape = {}".
format(cmpx_pilots.shape, l_lts_fc, v0.shape, v1.shape, m_filt.shape))
rho_max = np.amax(m_filt, axis = 4) # maximum peak per SF per antenna
rho_min = np.amin(m_filt, axis = 4) # minimum peak per SF per antenna
ipos = np.argmax(m_filt, axis = 4) # positons of the max peaks
sf_start = ipos - l_lts_fc + 1 # start of every received SF
sf_start = np.where(sf_start < 0, 0, sf_start) #get rid of negative indices in case of an incorrect peak
# get the pilot samples from the cmpx_pilots array and reshape for k_lts LTS pilots:
pilots_rx_t = np.empty([n_frame, n_cell, n_ue, n_ant, k_lts * n_lts], dtype='complex64' )
indexing_start = time.time()
for i in range(n_frame):
for j in range(n_cell):
for k in range(n_ue):
for l in range(n_ant):
pilots_rx_t[i,j,k,l,:] = cmpx_pilots[i,j,k,l, sf_start[i,j,k,l]: sf_start[i,j,k,l] + (k_lts * n_lts)]
indexing_end = time.time()
# *************** This fancy indexing is slower than the for loop! **************
# aaa= np.reshape(cmpx_pilots, (n_frame*n_cell* n_ue * n_ant, n_cmpx))
# idxx = np.expand_dims(sf_start.flatten(), axis=1)
# idxx = np.tile(idxx, (1,k_lts*n_lts))
# idxx = idxx + np.arange(k_lts*n_lts)
# indexing_start2 = time.time()
# m,n = aaa.shape
# #bb = aaa[np.arange(aaa.shape[0])[:,None],idxx]
# bb = np.take(aaa,idxx + n*np.arange(m)[:,None])
# indexing_end2 = time.time()
# cc = np.reshape(bb,(n_frame,n_cell, n_ue , n_ant, k_lts * n_lts) )
# if debug:
# print("Shape of: aaa = {}, bb: {}, cc: {}, flattened sf_start: {}\n".format(aaa.shape, bb.shape, cc.shape, sf_start.flatten().shape))
# print("Indexing time 2: %f \n" % ( indexing_end2 -indexing_start2) )
if debug:
print("Shape of: pilots_rx_t before truncation: {}\n".format(pilots_rx_t.shape))
pilots_rx_t = np.reshape(pilots_rx_t, (n_frame, n_cell, n_ue, n_ant, k_lts, n_lts))
pilots_rx_t = np.delete(pilots_rx_t, range(fft_size,n_lts),5)
if debug:
print("Indexing time: %f \n" % ( indexing_end -indexing_start) )
print("Shape of: pilots_rx_t = {}\n".format(pilots_rx_t.shape))
print("Shape of: rho_max = {}, rho_min = {}, ipos = {}, sf_start = {}".format(
rho_max.shape, rho_min.shape, ipos.shape, sf_start.shape))
# take fft and get the raw CSI matrix (no averaging)
lts_f_shft = np.fft.fftshift(lts_f) # align SCs based on how they were Tx-ec
pilots_rx_f = np.fft.fft(pilots_rx_t, fft_size, 5) # take FFT
zero_sc = np.where(lts_f_shft == 0)[0] # find the zero SCs corresponding to lts_f_shft
lts_f_nzsc = np.delete(lts_f_shft, zero_sc) # remove zero subcarriers
pilots_rx_f = np.delete(pilots_rx_f, zero_sc, 5) # remove zero subcarriers
csi = pilots_rx_f / lts_f_nzsc # take channel estimate by dividing with the non-zero elements of lts_f_shft
csi = np.fft.fftshift(csi, 5) # unecessary step: just to make it in accordance to lts_f as returned by generate_training_seq()
if debug:
print(">>>> number of NaN indices = {} NaN indices =\n{}".format(
nan_indices.shape, nan_indices))
print("Shape of: csi = {}\n".format(csi.shape))
# plot something to see if it worked!
if show_plot:
fig = plt.figure()
ax1 = fig.add_subplot(3, 1, 1)
ax1.grid(True)
ax1.set_title('channel_analysis:csi_from_pilots(): Re of Rx pilot - ref frame {} and ref ant. {} (UE 0)'.format(frame_to_plot, ref_ant))
if debug:
print("cmpx_pilots.shape = {}".format(cmpx_pilots.shape))
ax1.plot(np.real(cmpx_pilots[frame_to_plot - frm_st_idx,0,0,ref_ant,:]))
if debug:
loc_sec = lts_t_rep_tst
else:
z_pre = np.zeros(82, dtype='complex64')
z_post = np.zeros(68, dtype='complex64')
lts_t_rep = np.tile(lts_tmp, k_lts)
loc_sec = np.append(z_pre,lts_t_rep)
loc_sec = np.append(loc_sec, z_post)
ax2 = fig.add_subplot(3, 1, 2)
ax2.grid(True)
ax2.set_title('channel_analysis:csi_from_pilots(): Local LTS sequence zero padded')
ax2.plot(loc_sec)
ax3 = fig.add_subplot(3, 1, 3)
ax3.grid(True)
ax3.set_title('channel_analysis:csi_from_pilots(): MF (uncleared peaks) - ref frame {} and ref ant. {} (UE 0)'.format(frame_to_plot, ref_ant))
ax3.stem(m_filt[frame_to_plot - frm_st_idx, 0,0,ref_ant,:])
ax3.set_xlabel('Samples')
#plt.show()
print("********************* ******************** *********************\n")
return csi, m_filt, sf_start, cmpx_pilots, k_lts, n_lts
sig_intf[frame, :, sc] = np.dot(data[frame, :, sc], np.transpose(
np.conj(userCSI[frame, :, :, sc]), (1, 0)))
return sig_intf
#!/usr/bin/python3
"""
hdf5_lib.py
Library handling recorded hdf5 file from channel sounding (see Sounder/).
Author(s):
C. Nicolas Barati: nicobarati@rice.edu
Oscar Bejarano: obejarano@rice.edu
Rahman Doost-Mohammady: doost@rice.edu
---------------------------------------------------------------------
Copyright © 2018-2019. Rice University.
RENEW OPEN SOURCE LICENSE: http://renew-wireless.org/license
---------------------------------------------------------------------
"""
import sys
import numpy as np
import h5py
import matplotlib.pyplot as plt
import collections
import time
from optparse import OptionParser
from channel_analysis import *
class hdf5_lib:
def __init__(self, filename, n_frames_to_inspect=0, n_fr_insp_st = 0):
self.h5file = None
self.filename = filename
self.h5struct = []
self.data = []
self.metadata = {}
self.pilot_samples = []
self.uplink_samples = []
self.n_frm_st = n_fr_insp_st # index of last frame
self.n_frm_end = self.n_frm_st + n_frames_to_inspect # index of last frame in the range of n_frames_to_inspect
def open_hdf5(self):
"""
Get the most recent log file, open it if necessary.
"""
if (not self.h5file) or (self.filename != self.h5file.filename):
# if it's closed, e.g. for the C version, open it
print('Opening %s...' % self.filename)
try:
self.h5file = h5py.File(self.filename, 'r')
except OSError:
print("File not found. Terminating program now")
sys.exit(0)
# return self.h5file
def log2csi_hdf5(self, filename, offset=0):
"""Convert raw IQ log to CSI.
Converts input Argos HDF5 trace to frequency domain CSI and writes it to the same filename with -csi appended.
Args: filename of log
Returns: None
"""
symbol_len = int(self.h5file.attrs['SYMBOL_LEN'])
num_cl = int(self.h5file.attrs['CL_NUM'])
#compute CSI for each user and get a nice numpy array
#Returns csi with Frame, User, LTS (there are 2), BS ant, Subcarrier
#also, iq samples nic(Last 'user' is noise.)ely chunked out, same dims, but subcarrier is sample.
csi,iq = samps2csi(pilot_samples, num_cl, symbol_len, offset=offset)
# create hdf5 file to dump csi to
h5f = h5py.File(filename[:-5]+'-csi.hdf5', 'w')
h5f.create_dataset('csi', data=csi)
#todo: copy gains
#todo check if file exists os.path.isfile(fname) if f in glob.glob(f):
for k in h5log.attrs:
h5f.attrs[k] = h5log.attrs[k]
h5f.close()
h5log.close()
def get_data(self):
"""
Parse file to retrieve metadata and data.
HDF5 file has been written in DataRecorder.cpp (in Sounder folder)
Output:
Data (hierarchy):
-Path
-Pilot_Samples
--Samples
-UplinkData
--Samples
Dimensions of input sample data (as shown in DataRecorder.cpp in Sounder):
- Pilots
dims_pilot[0] = maxFrame
dims_pilot[1] = number of cells
dims_pilot[2] = number of UEs
dims_pilot[3] = number of antennas (at BS)
dims_pilot[4] = samples per symbol * 2 (IQ)
- Uplink Data
dims_data[0] = maxFrame
dims_data[1] = number of cells
dims_data[2] = uplink symbols per frame
dims_data[3] = number of antennas (at BS)
dims_data[4] = samples per symbol * 2 (IQ)
"""
g = self.h5file
prefix = ''
self.data = collections.defaultdict(lambda: collections.defaultdict(dict))
for key in g.keys():
item = g[key]
path = '{}/{}'.format(prefix, key)
keys = [i for i in item.keys()]
if isinstance(item[keys[0]], h5py.Dataset): # test for dataset
# Path
self.data['path'] = path
# Pilot and UplinkData Samples
for k in keys:
if not isinstance(item[k], h5py.Group):
# dataset = np.array(item[k].value) # dataset.value has been deprecated. dataset[()] instead
dtst_ptr = item[(k)]
n_frm = np.abs(self.n_frm_end - self.n_frm_st)
# check if the number fof requested frames and, upper and lower bounds make sense
# also check if end_frame > strt_frame:
if (n_frm > 0 and self.n_frm_st >=0 and (self.n_frm_end >= 0 and self.n_frm_end > self.n_frm_st) ):
dataset = np.array(dtst_ptr[self.n_frm_st:self.n_frm_end,...])
else:
#if previous if Flase, do as usual:
print("WARNING: No frames_to_inspect given and/or boundries don't make sense. Will process the whole dataset.")
dataset = np.array(dtst_ptr)
self.n_frm_end = self.n_frm_st + dataset.shape[0]
if type(dataset) is np.ndarray:
if dataset.size != 0:
if type(dataset[0]) is np.bytes_:
dataset = [a.decode('ascii') for a in dataset]
# Store samples
self.data[k]['Samples'] = dataset
else:
raise Exception("No datasets found")
if bool(self.data['Pilot_Samples']):
self.pilot_samples = self.data['Pilot_Samples']['Samples']
if bool(self.data['UplinkData']):
self.uplink_samples = self.data['UplinkData']['Samples']
return self.data
def get_metadata(self):
"""
-Attributes
{FREQ, RATE, SYMBOL_LEN_NO_PAD, PREFIX_LEN, POSTFIX_LEN, SYMBOL_LEN, FFT_SIZE, CP_LEN,
BEACON_SEQ_TYPE, PILOT_SEQ_TYPE, BS_HUB_ID, BS_SDR_NUM_PER_CELL, BS_SDR_ID, BS_NUM_CELLS,
BS_CH_PER_RADIO, BS_FRAME_SCHED, BS_RX_GAIN_A, BS_TX_GAIN_A, BS_RX_GAIN_B, BS_TX_GAIN_B,
BS_BEAMSWEEP, BS_BEACON_ANT, BS_NUM_ANT, BS_FRAME_LEN, CL_NUM, CL_CH_PER_RADIO, CL_AGC_EN,
CL_RX_GAIN_A, CL_TX_GAIN_A, CL_RX_GAIN_B, CL_TX_GAIN_B, CL_FRAME_SCHED, CL_SDR_ID,
CL_MODULATION, UL_SYMS}
"""
# Retrieve attributes, translate into python dictionary
#data = self.data
self.metadata = dict(self.h5file['Data'].attrs)
if "CL_SDR_ID" in self.metadata.keys():
cl_present = True
else:
cl_present = False
print('Client information not present. It is likely the client was run separately')
bs_id = self.metadata['BS_SDR_ID'].astype(str)
if bs_id.size == 0:
raise Exception('Base Station information not present')
# Data cleanup
# In OFDM_DATA_CLx and OFDM_PILOT, we have stored both real and imaginary in same vector
# (i.e., RE1,IM1,RE2,IM2...REm,IM,)
# Pilots
pilot_vec = self.metadata['OFDM_PILOT']
# some_list[start:stop:step]
I = pilot_vec[0::2]
Q = pilot_vec[1::2]
pilot_complex = I + Q * 1j
self.metadata['OFDM_PILOT'] = pilot_complex
if cl_present:
# Time-domain OFDM data
num_cl = np.squeeze(self.metadata['CL_NUM'])
ofdm_data_time = [] # np.zeros((num_cl, 320)).astype(complex)
for clIdx in range(num_cl):
this_str = 'OFDM_DATA_TIME_CL' + str(clIdx)
data_per_cl = np.squeeze(self.metadata[this_str])
# some_list[start:stop:step]
if np.any(data_per_cl):
# If data present
I = np.double(data_per_cl[0::2])
Q = np.double(data_per_cl[1::2])
IQ = I + Q * 1j
ofdm_data_time.append(IQ)
self.metadata[this_str] = ofdm_data_time
# Frequency-domain OFDM data
ofdm_data = [] # np.zeros((num_cl, 320)).astype(complex)
for clIdx in range(num_cl):
this_str = 'OFDM_DATA_CL' + str(clIdx)
data_per_cl = np.squeeze(self.metadata[this_str])
# some_list[start:stop:step]
if np.any(data_per_cl):
# If data present
I = np.double(data_per_cl[0::2])
Q = np.double(data_per_cl[1::2])
IQ = I + Q * 1j
ofdm_data.append(IQ)
self.metadata[this_str] = ofdm_data
return self.metadata
def csi_from_pilots(self, pilots_dump, z_padding=150, fft_size=64, cp=16, frm_st_idx=0, frame_to_plot=0, ref_ant=0):
"""
Finds the end of the pilots' frames, finds all the lts indices relative to that.
Divides the data with lts sequences, calculates csi per lts, csi per frame, csi total.
"""
print("********************* csi_from_pilots(): *********************")
# Reviewing options and vars:
show_plot = True
debug = False
test_mf = False
write_to_file = True
legacy = False
# dimensions of pilots_dump
n_frame = pilots_dump.shape[0] # no. of captured frames
n_cell = pilots_dump.shape[1] # no. of cells
n_ue = pilots_dump.shape[2] # no. of UEs
n_ant = pilots_dump.shape[3] # no. of BS antennas
n_iq = pilots_dump.shape[4] # no. of IQ samples per frame
if debug:
print("input : z_padding = {}, fft_size={}, cp={}, frm_st_idx = {}, frame_to_plot = {}, ref_ant={}".format(
z_padding, fft_size, cp, frm_st_idx, frame_to_plot, ref_ant))
print("n_frame = {}, n_cell = {}, n_ue = {}, n_ant = {}, n_iq = {}".format(
n_frame, n_cell, n_ue, n_ant, n_iq))
if ((n_iq % 2) != 0):
print("Size of iq samples:".format(n_iq))
raise Exception(
' **** The length of iq samples per frames HAS to be an even number! **** ')
n_cmpx = n_iq // 2 # no. of complex samples
# no. of complex samples in a P subframe without pre- and post- fixes
n_csamp = n_cmpx - z_padding
if legacy:
# even indices: real part of iq --> ATTENTION: I and Q are flipped at RX for some weird reason! So, even starts from 1!
idx_e = np.arange(1, n_iq, 2)
# odd indices: imaginary part of iq --> ATTENTION: I and Q are flipped at RX for some weird reason! So, odd starts from 0!
idx_o = np.arange(0, n_iq, 2)
else:
idx_e = np.arange(0, n_iq, 2) # even indices: real part of iq
# odd indices: imaginary part of iq
idx_o = np.arange(1, n_iq, 2)
# make a new data structure where the iq samples become complex numbers
cmpx_pilots = (pilots_dump[:, :, :, :, idx_e] +
1j*pilots_dump[:, :, :, :, idx_o])*2**-15
# take a time-domain lts sequence, concatenate more copies, flip, conjugate
lts_t, lts_f = generate_training_seq(preamble_type='lts', seq_length=[
], cp=32, upsample=1, reps=[]) # TD LTS sequences (x2.5), FD LTS sequences
# last 80 samps (assume 16 cp)
lts_tmp = lts_t[-80:]
n_lts = len(lts_tmp)
# no. of LTS sequences in a pilot SF
k_lts = n_csamp // n_lts
# concatenate k LTS's to filter/correlate below
lts_seq = np.tile(lts_tmp, k_lts)
lts_seq = lts_seq[::-1] # flip
# conjugate the local LTS sequence
lts_seq_conj = np.conjugate(lts_seq)
# length of the local LTS seq.
l_lts_fc = len(lts_seq_conj)
if debug:
print("cmpx_pilots.shape = {}, lts_t.shape = {}".format(
cmpx_pilots.shape, lts_t.shape))
#print("idx_e= {}, idx_o= {}".format(idx_e, idx_o))
print("n_cmpx = {}, n_csamp = {}, n_lts = {}, k_lts = {}, lts_seq_conj.shape = {}".format(
n_cmpx, n_csamp, n_lts, k_lts, lts_seq_conj.shape))
# debug/ testing
if debug:
z_pre = np.zeros(82, dtype='complex64')
z_post = np.zeros(68, dtype='complex64')
lts_t_rep = np.tile(lts_tmp, k_lts)
lts_t_rep_tst = np.append(z_pre, lts_t_rep)
lts_t_rep_tst = np.append(lts_t_rep_tst, z_post)
if test_mf:
w = np.random.normal(0, 0.1/2, len(lts_t_rep_tst)) + \
1j*np.random.normal(0, 0.1/2, len(lts_t_rep_tst))
lts_t_rep_tst = lts_t_rep_tst + w
cmpx_pilots = np.tile(
lts_t_rep_tst, (n_frame, cmpx_pilots.shape[1], cmpx_pilots.shape[2], cmpx_pilots.shape[3], 1))
print("if test_mf: Shape of lts_t_rep_tst: {} , cmpx_pilots.shape = {}".format(
lts_t_rep_tst.shape, cmpx_pilots.shape))
# normalized matched filter
a = 1
unos = np.ones(l_lts_fc)
v0 = signal.lfilter(lts_seq_conj, a, cmpx_pilots, axis=4)
v1 = signal.lfilter(unos, a, (abs(cmpx_pilots)**2), axis=4)
m_filt = (np.abs(v0)**2)/v1
# clean up nan samples: replace nan with -1
nan_indices = np.argwhere(np.isnan(m_filt))
m_filt[np.isnan(m_filt)] = -0.5 # the only negative value in m_filt
if write_to_file:
# write the nan_indices into a file
np.savetxt("nan_indices.txt", nan_indices, fmt='%i')
if debug:
print("Shape of truncated complex pilots: {} , l_lts_fc = {}, v0.shape = {}, v1.shape = {}, m_filt.shape = {}".
format(cmpx_pilots.shape, l_lts_fc, v0.shape, v1.shape, m_filt.shape))
rho_max = np.amax(m_filt, axis=4) # maximum peak per SF per antenna
rho_min = np.amin(m_filt, axis=4) # minimum peak per SF per antenna
ipos = np.argmax(m_filt, axis=4) # positons of the max peaks
sf_start = ipos - l_lts_fc + 1 # start of every received SF
# get rid of negative indices in case of an incorrect peak
sf_start = np.where(sf_start < 0, 0, sf_start)
# get the pilot samples from the cmpx_pilots array and reshape for k_lts LTS pilots:
pilots_rx_t = np.empty(
[n_frame, n_cell, n_ue, n_ant, k_lts * n_lts], dtype='complex64')
indexing_start = time.time()
for i in range(n_frame):
for j in range(n_cell):
for k in range(n_ue):
for l in range(n_ant):
pilots_rx_t[i, j, k, l, :] = cmpx_pilots[i, j, k, l,
sf_start[i, j, k, l]: sf_start[i, j, k, l] + (k_lts * n_lts)]
indexing_end = time.time()
# *************** This fancy indexing is slower than the for loop! **************
# aaa= np.reshape(cmpx_pilots, (n_frame*n_cell* n_ue * n_ant, n_cmpx))
# idxx = np.expand_dims(sf_start.flatten(), axis=1)
# idxx = np.tile(idxx, (1,k_lts*n_lts))
# idxx = idxx + np.arange(k_lts*n_lts)
# indexing_start2 = time.time()
# m,n = aaa.shape
# #bb = aaa[np.arange(aaa.shape[0])[:,None],idxx]
# bb = np.take(aaa,idxx + n*np.arange(m)[:,None])
# indexing_end2 = time.time()
# cc = np.reshape(bb,(n_frame,n_cell, n_ue , n_ant, k_lts * n_lts) )
# if debug:
# print("Shape of: aaa = {}, bb: {}, cc: {}, flattened sf_start: {}\n".format(aaa.shape, bb.shape, cc.shape, sf_start.flatten().shape))
# print("Indexing time 2: %f \n" % ( indexing_end2 -indexing_start2) )
if debug:
print("Shape of: pilots_rx_t before truncation: {}\n".format(
pilots_rx_t.shape))
pilots_rx_t = np.reshape(
pilots_rx_t, (n_frame, n_cell, n_ue, n_ant, k_lts, n_lts))
pilots_rx_t = np.delete(pilots_rx_t, range(fft_size, n_lts), 5)
if debug:
print("Indexing time: %f \n" % (indexing_end - indexing_start))
print("Shape of: pilots_rx_t = {}\n".format(pilots_rx_t.shape))
print("Shape of: rho_max = {}, rho_min = {}, ipos = {}, sf_start = {}".format(
rho_max.shape, rho_min.shape, ipos.shape, sf_start.shape))
# take fft and get the raw CSI matrix (no averaging)
# align SCs based on how they were Tx-ec
lts_f_shft = np.fft.fftshift(lts_f)
pilots_rx_f = np.fft.fft(pilots_rx_t, fft_size, 5) # take FFT
# find the zero SCs corresponding to lts_f_shft
zero_sc = np.where(lts_f_shft == 0)[0]
# remove zero subcarriers
lts_f_nzsc = np.delete(lts_f_shft, zero_sc)
# remove zero subcarriers
pilots_rx_f = np.delete(pilots_rx_f, zero_sc, 5)
# take channel estimate by dividing with the non-zero elements of lts_f_shft
csi = pilots_rx_f / lts_f_nzsc
# unecessary step: just to make it in accordance to lts_f as returned by generate_training_seq()
csi = np.fft.fftshift(csi, 5)
if debug:
print(">>>> number of NaN indices = {} NaN indices =\n{}".format(
nan_indices.shape, nan_indices))
print("Shape of: csi = {}\n".format(csi.shape))
# plot something to see if it worked!
if show_plot:
fig = plt.figure()
ax1 = fig.add_subplot(3, 1, 1)
ax1.grid(True)
ax1.set_title(
'channel_analysis:csi_from_pilots(): Re of Rx pilot - ref frame {} and ref ant. {} (UE 0)'.format(frame_to_plot, ref_ant))
if debug:
print("cmpx_pilots.shape = {}".format(cmpx_pilots.shape))
ax1.plot(
np.real(cmpx_pilots[frame_to_plot - frm_st_idx, 0, 0, ref_ant, :]))
if debug:
loc_sec = lts_t_rep_tst
else:
z_pre = np.zeros(82, dtype='complex64')
z_post = np.zeros(68, dtype='complex64')
lts_t_rep = np.tile(lts_tmp, k_lts)
loc_sec = np.append(z_pre, lts_t_rep)
loc_sec = np.append(loc_sec, z_post)
ax2 = fig.add_subplot(3, 1, 2)
ax2.grid(True)
ax2.set_title(
'channel_analysis:csi_from_pilots(): Local LTS sequence zero padded')
ax2.plot(loc_sec)
ax3 = fig.add_subplot(3, 1, 3)
ax3.grid(True)
ax3.set_title(
'channel_analysis:csi_from_pilots(): MF (uncleared peaks) - ref frame {} and ref ant. {} (UE 0)'.format(frame_to_plot, ref_ant))
ax3.stem(m_filt[frame_to_plot - frm_st_idx, 0, 0, ref_ant, :])
ax3.set_xlabel('Samples')
# plt.show()
print("********************* ******************** *********************\n")
return csi, m_filt, sf_start, cmpx_pilots, k_lts, n_lts
# add frame_start for plot indexing!
def frame_sanity(self, match_filt, k_lts, n_lts, st_frame = 0, frame_to_plot = 0, plt_ant=0, cp=16):
"""
Creates a map of the frames per antenna. 3 categories: Good frames, bad frames, probably partial frames.
Good frames are those where all k_lts peaks are present and spaced n_lts samples apart.
Bad frames are those with random peaks.
Potentially partial frames are those with some peaks at the right positions.
This is a random event. Some frames may have accidentally some peaks at the right places.
First the largest peak is detected, peaks at +1/-1 (probably due to multipath) and +CP/-CP samples are cleared out.
Then, the positions of the largest k_lts peaks are determined.
Finally, the function checks if these k_lts peaks are at the correct n_lts offstes.
Disclaimer: This function is good only for a high SNR scenario!
"""
debug = False
dtct_eal_tx = True # Detect early transmission: further processing if this is desired
n_frame = match_filt.shape[0] # no. of captured frames
n_cell = match_filt.shape[1] # no. of cells
n_ue = match_filt.shape[2] # no. of UEs
n_ant = match_filt.shape[3] # no. of BS antennas
n_corr = match_filt.shape[4] # no. of corr. samples
if debug:
print("frame_sanity(): n_frame = {}, n_cell = {}, n_ue = {}, n_ant = {}, n_corr = {}, k_lts = {}".format(
n_frame, n_cell, n_ue, n_ant, n_corr, k_lts) )
# clean up the matched filter of extra peaks:
mf_amax = np.argmax(match_filt, axis = -1)
base_arr = np.arange(0,k_lts*n_lts, n_lts)
for i in range(n_frame):
for j in range(n_cell):
for k in range(n_ue):
for l in range(n_ant):
mfa = mf_amax[i,j,k,l]
# NB: addition: try to detect early packets: TEST it!
if dtct_eal_tx:
sim_thrsh = 0.95 # similarity threshold bewtween two consequitive peaks
for ik in range(k_lts):
mf_prev = match_filt[i,j,k,l, (mfa - n_lts) if (mfa - n_lts) >= 0 else 0]
if 1 - np.abs(match_filt[i,j,k,l, mfa] - mf_prev)/match_filt[i,j,k,l, mfa] >= sim_thrsh:
mfa = (mfa - n_lts) if (mfa - n_lts) >= 0 else 0
else:
break
# NB: addition: Clean everything right of the largest peak.
match_filt[i,j,k,l, mfa+1:] = 0 # we don't care about the peaks after the largest.
# misleading peaks seem to apear at +- argmax and argmax -1/+1/+2 CP and 29-30
for m in range(base_arr.shape[0]):
adj_idx1 = (mfa - 1) - base_arr[m]
adj_idx2 = (mfa + 1) - base_arr[m]
cp_idx1 = (mfa + cp) - base_arr[m]
cp_idx2 = (mfa + 1 + cp) - base_arr[m]
cp_idx3 = (mfa + -1 + cp) - base_arr[m]
idx_30 = (mfa + 30) - base_arr[m]
idx_29 = (mfa + 29) - base_arr[m]
if adj_idx1 >= 0 and adj_idx2 >=0 and adj_idx2 < n_corr:
match_filt[i,j,k,l, adj_idx1 ] = 0
match_filt[i,j,k,l, adj_idx2 ] = 0
if (cp_idx1 >=0) and (cp_idx1 < n_corr) and (cp_idx2 < n_corr) and (cp_idx3 < n_corr):
match_filt[i,j,k,l, cp_idx1 ] = 0
match_filt[i,j,k,l, cp_idx2 ] = 0
match_filt[i,j,k,l, cp_idx3 ] = 0
if (idx_30 >=0) and (idx_30 < n_corr) and (idx_29 >=0) and (idx_29 < n_corr):
match_filt[i,j,k,l,idx_30] = 0
match_filt[i,j,k,l,idx_29] = 0
# get the k_lts largest peaks and their position
k_max = np.sort(match_filt, axis = -1)[:,:,:,:, -k_lts:]
k_amax =np.argsort(match_filt, axis = -1)[:,:,:,:, -k_lts:]
# If the frame is good, the largerst peak is at the last place of k_amax
lst_pk_idx = np.expand_dims(k_amax[:,:,:,:,-1], axis = 4)
lst_pk_idx = np.tile(lst_pk_idx, (1,1,1,1,base_arr.shape[0]))
# create an array with indices n_lts apart from each other relative to lst_pk_idx
pk_idx = lst_pk_idx - np.tile(base_arr[::-1], (n_frame, n_cell, n_ue, n_ant,1))
#subtract. In case of a good frame their should only be zeros in every postion
idx_diff = k_amax - pk_idx
frame_map = (idx_diff ==0).astype(np.int)
# count the 0 and non-zero elements and reshape to n_frame-by-n_ant
frame_map = np.sum(frame_map, axis =-1)
#NB:
zetas = frame_map*n_lts
f_st = mf_amax - zetas
if debug:
print("f_st = {}".format(f_st[frame_to_plot - st_frame,0,0,:]))
print("frame_sanity(): Shape of k_max.shape = {}, k_amax.shape = {}, lst_pk_idx.shape = {}".format(
k_max.shape, k_amax.shape, lst_pk_idx.shape) )
print("frame_sanity(): k_amax = {}".format(k_amax))
print("frame_sanity(): frame_map.shape = {}\n".format(frame_map.shape))
print(k_amax[frame_to_plot - st_frame,0,0,plt_ant,:])
print(idx_diff[frame_to_plot - st_frame,0,0,plt_ant,:])
frame_map[frame_map == 1] = -1
frame_map[frame_map >= (k_lts -1)] = 1
frame_map[frame_map > 1] = 0
if debug:
print("frame_sanity(): frame_map = \n{}".format(frame_map))
print(frame_to_plot - st_frame)
#print results:
n_rf = frame_map.size
n_gf = frame_map[frame_map == 1].size
n_bf = frame_map[frame_map == -1].size
n_pr = frame_map[frame_map == 0].size
print("===================== frame_sanity(): frame status: ============")
print("Out of total {} received frames: \nGood frames: {}\nBad frames: {}\nProbably Partially received or corrupt: {}".format(
n_rf, n_gf, n_bf, n_pr,))
print("===================== ============================= ============")
return match_filt, frame_map, f_st
#!/usr/bin/python3
"""
hdfDump.py
Plotting from HDF5 file
Script to analyze recorded hdf5 file from channel sounding (see Sounder/).
Usage format is:
./hdfDump.py <hdf5_file_name> <frame_to_plot (optional, default=100)>
Example:
./hdfDump.py ../Sounder/logs/test-hdf5.py 150
Author(s):
C. Nicolas Barati: nicobarati@rice.edu
Oscar Bejarano: obejarano@rice.edu
Rahman Doost-Mohamamdy: doost@rice.edu
---------------------------------------------------------------------
Copyright © 2018-2019. Rice University.
RENEW OPEN SOURCE LICENSE: http://renew-wireless.org/license
---------------------------------------------------------------------
"""
import sys
import numpy as np
import h5py
import matplotlib.pyplot as plt
import collections
import time
from optparse import OptionParser
from channel_analysis import *
do_corr = False
# add frame_start for plot indexing!
def frame_sanity(match_filt, k_lts, n_lts, st_frame = 0, frame_to_plot = 0, plt_ant=0, cp=16):
"""
Creates a map of the frames per antenna. 3 categories: Good frames, bad frames, probably partial frames.
Good frames are those where all k_lts peaks are present and spaced n_lts samples apart.
Bad frames are those with random peaks.
Potentially partial frames are those with some peaks at the right positions.
This is a random event. Some frames may have accidentally some peaks at the right places.
First the largest peak is detected, peaks at +1/-1 (probably due to multipath) and +CP/-CP samples are cleared out.
Then, the positions of the largest k_lts peaks are determined.
Finally, the function checks if these k_lts peaks are at the correct n_lts offstes.
Disclaimer: This function is good only for a high SNR scenario!
"""
debug = False
dtct_eal_tx = True # Detect early transmission: further processing if this is desired
n_frame = match_filt.shape[0] # no. of captured frames
n_cell = match_filt.shape[1] # no. of cells
n_ue = match_filt.shape[2] # no. of UEs
n_ant = match_filt.shape[3] # no. of BS antennas
n_corr = match_filt.shape[4] # no. of corr. samples
if debug:
print("frame_sanity(): n_frame = {}, n_cell = {}, n_ue = {}, n_ant = {}, n_corr = {}, k_lts = {}".format(
n_frame, n_cell, n_ue, n_ant, n_corr, k_lts) )
# clean up the matched filter of extra peaks:
mf_amax = np.argmax(match_filt, axis = -1)
base_arr = np.arange(0,k_lts*n_lts, n_lts)
for i in range(n_frame):
for j in range(n_cell):
for k in range(n_ue):
for l in range(n_ant):
mfa = mf_amax[i,j,k,l]
# NB: addition: try to detect early packets: TEST it!
if dtct_eal_tx:
sim_thrsh = 0.95 # similarity threshold bewtween two consequitive peaks
for ik in range(k_lts):
mf_prev = match_filt[i,j,k,l, (mfa - n_lts) if (mfa - n_lts) >= 0 else 0]
if 1 - np.abs(match_filt[i,j,k,l, mfa] - mf_prev)/match_filt[i,j,k,l, mfa] >= sim_thrsh:
mfa = (mfa - n_lts) if (mfa - n_lts) >= 0 else 0
else:
break
# NB: addition: Clean everything right of the largest peak.
match_filt[i,j,k,l, mfa+1:] = 0 # we don't care about the peaks after the largest.
# misleading peaks seem to apear at +- argmax and argmax -1/+1/+2 CP and 29-30
for m in range(base_arr.shape[0]):
adj_idx1 = (mfa - 1) - base_arr[m]
adj_idx2 = (mfa + 1) - base_arr[m]
cp_idx1 = (mfa + cp) - base_arr[m]
cp_idx2 = (mfa + 1 + cp) - base_arr[m]
cp_idx3 = (mfa + -1 + cp) - base_arr[m]
idx_30 = (mfa + 30) - base_arr[m]
idx_29 = (mfa + 29) - base_arr[m]
if adj_idx1 >= 0 and adj_idx2 >=0 and adj_idx2 < n_corr:
match_filt[i,j,k,l, adj_idx1 ] = 0
match_filt[i,j,k,l, adj_idx2 ] = 0
if (cp_idx1 >=0) and (cp_idx1 < n_corr) and (cp_idx2 < n_corr) and (cp_idx3 < n_corr):
match_filt[i,j,k,l, cp_idx1 ] = 0
match_filt[i,j,k,l, cp_idx2 ] = 0
match_filt[i,j,k,l, cp_idx3 ] = 0
if (idx_30 >=0) and (idx_30 < n_corr) and (idx_29 >=0) and (idx_29 < n_corr):
match_filt[i,j,k,l,idx_30] = 0
match_filt[i,j,k,l,idx_29] = 0
# get the k_lts largest peaks and their position
k_max = np.sort(match_filt, axis = -1)[:,:,:,:, -k_lts:]
k_amax =np.argsort(match_filt, axis = -1)[:,:,:,:, -k_lts:]
# If the frame is good, the largerst peak is at the last place of k_amax
lst_pk_idx = np.expand_dims(k_amax[:,:,:,:,-1], axis = 4)
lst_pk_idx = np.tile(lst_pk_idx, (1,1,1,1,base_arr.shape[0]))
# create an array with indices n_lts apart from each other relative to lst_pk_idx
pk_idx = lst_pk_idx - np.tile(base_arr[::-1], (n_frame, n_cell, n_ue, n_ant,1))
#subtract. In case of a good frame their should only be zeros in every postion
idx_diff = k_amax - pk_idx
frame_map = (idx_diff ==0).astype(np.int)
# count the 0 and non-zero elements and reshape to n_frame-by-n_ant
frame_map = np.sum(frame_map, axis =-1)
#NB:
zetas = frame_map*n_lts
f_st = mf_amax - zetas
if debug:
print("f_st = {}".format(f_st[frame_to_plot - st_frame,0,0,:]))
print("frame_sanity(): Shape of k_max.shape = {}, k_amax.shape = {}, lst_pk_idx.shape = {}".format(
k_max.shape, k_amax.shape, lst_pk_idx.shape) )
print("frame_sanity(): k_amax = {}".format(k_amax))
print("frame_sanity(): frame_map.shape = {}\n".format(frame_map.shape))
print(k_amax[frame_to_plot - st_frame,0,0,plt_ant,:])
print(idx_diff[frame_to_plot - st_frame,0,0,plt_ant,:])
frame_map[frame_map == 1] = -1
frame_map[frame_map >= (k_lts -1)] = 1
frame_map[frame_map > 1] = 0
if debug:
print("frame_sanity(): frame_map = \n{}".format(frame_map))
print(frame_to_plot - st_frame)
#print results:
n_rf = frame_map.size
n_gf = frame_map[frame_map == 1].size
n_bf = frame_map[frame_map == -1].size
n_pr = frame_map[frame_map == 0].size
print("===================== frame_sanity(): frame status: ============")
print("Out of total {} received frames: \nGood frames: {}\nBad frames: {}\nProbably Partially received or corrupt: {}".format(
n_rf, n_gf, n_bf, n_pr,))
print("===================== ============================= ============")
return match_filt, frame_map, f_st
class hdfDump:
def __init__(self, filename, n_frames_to_inspect=0, n_fr_insp_st = 0):
self.h5file = None
self.filename = filename
self.h5struct = []
self.data = []
self.metadata = {}
self.samples = {}
self.n_frm_st = n_fr_insp_st # index of last frame
self.n_frm_end = self.n_frm_st + n_frames_to_inspect # index of last frame in the range of n_frames_to_inspect
def get_hdf5(self):
"""
Get the most recent log file, open it if necessary.
"""
if (not self.h5file) or (self.filename != self.h5file.filename):
# if it's closed, e.g. for the C version, open it
print('Opening %s...' % self.filename)
try:
self.h5file = h5py.File(self.filename, 'r')
except OSError:
print("File not found. Terminating program now")
sys.exit(0)
# return self.h5file
def parse_hdf5(self):
"""
Parse file to retrieve metadata and data.
HDF5 file has been written in DataRecorder.cpp (in Sounder folder)
Output:
Data (hierarchy):
-Path
-Pilot_Samples
--Samples
-UplinkData
--Samples
-Attributes
{FREQ, RATE, SYMBOL_LEN_NO_PAD, PREFIX_LEN, POSTFIX_LEN, SYMBOL_LEN, FFT_SIZE, CP_LEN,
BEACON_SEQ_TYPE, PILOT_SEQ_TYPE, BS_HUB_ID, BS_SDR_NUM_PER_CELL, BS_SDR_ID, BS_NUM_CELLS,
BS_CH_PER_RADIO, BS_FRAME_SCHED, BS_RX_GAIN_A, BS_TX_GAIN_A, BS_RX_GAIN_B, BS_TX_GAIN_B,
BS_BEAMSWEEP, BS_BEACON_ANT, BS_NUM_ANT, BS_FRAME_LEN, CL_NUM, CL_CH_PER_RADIO, CL_AGC_EN,
CL_RX_GAIN_A, CL_TX_GAIN_A, CL_RX_GAIN_B, CL_TX_GAIN_B, CL_FRAME_SCHED, CL_SDR_ID,
CL_MODULATION, UL_SYMS}
Dimensions of input sample data (as shown in DataRecorder.cpp in Sounder):
- Pilots
dims_pilot[0] = maxFrame
dims_pilot[1] = number of cells
dims_pilot[2] = number of UEs
dims_pilot[3] = number of antennas (at BS)
dims_pilot[4] = samples per symbol * 2 (IQ)
- Uplink Data
dims_data[0] = maxFrame
dims_data[1] = number of cells
dims_data[2] = uplink symbols per frame
dims_data[3] = number of antennas (at BS)
dims_data[4] = samples per symbol * 2 (IQ)
"""
g = self.h5file
prefix = ''
self.data = collections.defaultdict(lambda: collections.defaultdict(dict))
for key in g.keys():
item = g[key]
path = '{}/{}'.format(prefix, key)
keys = [i for i in item.keys()]
if isinstance(item[keys[0]], h5py.Dataset): # test for dataset
# Path
self.data['path'] = path
# Attributes
for attribute in item.attrs.keys():
# Store attributes
self.data['Attributes'][attribute] = item.attrs[attribute]
# Pilot and UplinkData Samples
for k in keys:
if not isinstance(item[k], h5py.Group):
# dataset = np.array(item[k].value) # dataset.value has been deprecated. dataset[()] instead
dtst_ptr = item[(k)]
n_frm = np.abs(self.n_frm_end - self.n_frm_st)
# check if the number fof requested frames and, upper and lower bounds make sense
# also check if end_frame > strt_frame:
if (n_frm > 0 and self.n_frm_st >=0 and (self.n_frm_end >= 0 and self.n_frm_end > self.n_frm_st) ):
dataset = np.array(dtst_ptr[self.n_frm_st:self.n_frm_end,...])
else:
#if previous if Flase, do as usual:
print("WARNING: No frames_to_inspect given and/or boundries don't make sense. Will process the whole dataset.")
dataset = np.array(dtst_ptr)
self.n_frm_end = self.n_frm_st + dataset.shape[0]
if type(dataset) is np.ndarray:
if dataset.size != 0:
if type(dataset[0]) is np.bytes_:
dataset = [a.decode('ascii') for a in dataset]
# Store samples
self.data[k]['Samples'] = dataset
# for attribute in item[k].attrs.keys():
# # Store attributes
# self.data[k]['Attributes'][attribute] = item[k].attrs[attribute]
else:
raise Exception("No datasets found")
return self.data
def get_attributes(self):
# Retrieve attributes, translate into python dictionary
data = self.data
# Check if attributes are there
client_id = data['Attributes']['CL_SDR_ID'].astype(str)
if client_id.size == 0:
cl_present = False
print('Client information not present. It is likely the client was run separately')
else:
cl_present = True
bs_id = data['Attributes']['BS_SDR_ID'].astype(str)
if bs_id.size == 0:
raise Exception('Base Station information not present')
# Data cleanup
# In OFDM_DATA_CLx and OFDM_PILOT, we have stored both real and imaginary in same vector
# (i.e., RE1,IM1,RE2,IM2...REm,IM,)
# Pilots
pilot_vec = data['Attributes']['OFDM_PILOT']
# some_list[start:stop:step]
I = pilot_vec[0::2]
Q = pilot_vec[1::2]
pilot_complex = I + Q * 1j
if cl_present:
# Time-domain OFDM data
num_cl = np.squeeze(data['Attributes']['CL_NUM'])
ofdm_data_time = [] # np.zeros((num_cl, 320)).astype(complex)
for clIdx in range(num_cl):
this_str = 'OFDM_DATA_TIME_CL' + str(clIdx)
data_per_cl = np.squeeze(data['Attributes'][this_str])
# some_list[start:stop:step]
if np.any(data_per_cl):
# If data present
I = np.double(data_per_cl[0::2])
Q = np.double(data_per_cl[1::2])
IQ = I + Q * 1j
ofdm_data_time.append(IQ)
# Frequency-domain OFDM data
ofdm_data = [] # np.zeros((num_cl, 320)).astype(complex)
for clIdx in range(num_cl):
this_str = 'OFDM_DATA_CL' + str(clIdx)
data_per_cl = np.squeeze(data['Attributes'][this_str])
# some_list[start:stop:step]
if np.any(data_per_cl):
# If data present
I = np.double(data_per_cl[0::2])
Q = np.double(data_per_cl[1::2])
IQ = I + Q * 1j
ofdm_data.append(IQ)
# Populate dictionary
self.metadata = {
'CLIENT_PRESENT': cl_present,
'FREQ': np.squeeze(data['Attributes']['FREQ']),
'RATE': np.squeeze(data['Attributes']['RATE']),
'SYM_LEN_NO_PAD': np.squeeze(data['Attributes']['SYMBOL_LEN_NO_PAD']),
'PREFIX_LEN': np.squeeze(data['Attributes']['PREFIX_LEN']),
'POSTFIX_LEN': np.squeeze(data['Attributes']['POSTFIX_LEN']),
'SYM_LEN': np.squeeze(data['Attributes']['SYMBOL_LEN']),
'FFT_SIZE': np.squeeze(data['Attributes']['FFT_SIZE']),
'CP_LEN': np.squeeze(data['Attributes']['CP_LEN']),
'BEACON_SEQ': np.squeeze(data['Attributes']['BEACON_SEQ_TYPE']).astype(str),
'PILOT_SEQ': np.squeeze(data['Attributes']['PILOT_SEQ_TYPE']).astype(str),
'BS_HUB_ID': np.squeeze(data['Attributes']['BS_HUB_ID']).astype(str),
'BS_SDR_NUM_PER_CELL': np.squeeze(data['Attributes']['BS_SDR_NUM_PER_CELL']).astype(int),
'BS_SDR_ID': np.squeeze(data['Attributes']['BS_SDR_ID']).astype(str),
'BS_NUM_CELLS': np.squeeze(data['Attributes']['BS_NUM_CELLS']),
'BS_CH_PER_RADIO': np.squeeze(data['Attributes']['BS_CH_PER_RADIO']),
'BS_FRAME_SCHED': np.squeeze(data['Attributes']['BS_FRAME_SCHED']).astype(str),
'BS_RX_GAIN_A': np.squeeze(data['Attributes']['BS_RX_GAIN_A']),
'BS_TX_GAIN_A': np.squeeze(data['Attributes']['BS_TX_GAIN_A']),
'BS_RX_GAIN_B': np.squeeze(data['Attributes']['BS_RX_GAIN_B']),
'BS_TX_GAIN_B': np.squeeze(data['Attributes']['BS_TX_GAIN_B']),
'BS_BEAMSWEEP': np.squeeze(data['Attributes']['BS_BEAMSWEEP']),
'BS_BEACON_ANT': np.squeeze(data['Attributes']['BS_BEACON_ANT']),
'BS_NUM_ANT': np.squeeze(data['Attributes']['BS_NUM_ANT']),
'BS_FRAME_LEN': np.squeeze(data['Attributes']['BS_FRAME_LEN']),
'NUM_CLIENTS': np.squeeze(data['Attributes']['CL_NUM']),
'CL_CH_PER_RADIO': np.squeeze(data['Attributes']['CL_CH_PER_RADIO']),
'CL_AGC_EN': np.squeeze(data['Attributes']['CL_AGC_EN']),
'CL_RX_GAIN_A': np.squeeze(data['Attributes']['CL_RX_GAIN_A']),
'CL_TX_GAIN_A': np.squeeze(data['Attributes']['CL_TX_GAIN_A']),
'CL_RX_GAIN_B': np.squeeze(data['Attributes']['CL_RX_GAIN_B']),
'CL_TX_GAIN_B': np.squeeze(data['Attributes']['CL_TX_GAIN_B']),
'CL_FRAME_SCHED': np.squeeze(data['Attributes']['CL_FRAME_SCHED']).astype(str),
'CL_SDR_ID': np.squeeze(data['Attributes']['CL_SDR_ID']).astype(str),
'CL_MODULATION': np.squeeze(data['Attributes']['CL_MODULATION']).astype(str),
'UL_SYMS': np.squeeze(data['Attributes']['UL_SYMS']),
'OFDM_DATA_SC': np.squeeze(data['Attributes']['OFDM_DATA_SC']),
'OFDM_PILOT_SC': np.squeeze(data['Attributes']['OFDM_PILOT_SC']),
'OFDM_PILOT_SC_VALS': np.squeeze(data['Attributes']['OFDM_PILOT_SC_VALS']),
'OFDM_PILOT_TIME': pilot_complex,
'OFDM_DATA': ofdm_data,
'OFDM_DATA_TIME': ofdm_data_time,
}
else:
# Client not present
# Populate dictionary
self.metadata = {
'CLIENT_PRESENT': cl_present,
'FREQ': np.squeeze(data['Attributes']['FREQ']),
'RATE': np.squeeze(data['Attributes']['RATE']),
'SYM_LEN_NO_PAD': np.squeeze(data['Attributes']['SYMBOL_LEN_NO_PAD']),
'PREFIX_LEN': np.squeeze(data['Attributes']['PREFIX_LEN']),
'POSTFIX_LEN': np.squeeze(data['Attributes']['POSTFIX_LEN']),
'SYM_LEN': np.squeeze(data['Attributes']['SYMBOL_LEN']),
'FFT_SIZE': np.squeeze(data['Attributes']['FFT_SIZE']),
'CP_LEN': np.squeeze(data['Attributes']['CP_LEN']),