Commit 797e8483 authored by Rahman's avatar Rahman

moved samps2csi to hdf5_lib

parent 14e3ea80
......@@ -24,89 +24,6 @@ from scipy import signal
import matplotlib.pyplot as plt
from generate_sequence import *
# all data, n_UE, pilots/frame
#csi, samps = samps2csi(samples, num_cl_tmp, symbol_length, fft_size=64, offset=offset, bound=0, cp=0)
def samps2csi(samps, num_users, samps_per_user=224, fft_size=64, offset=0, bound=94, cp=0):
"""Convert an Argos HDF5 log file with raw IQ in to CSI.
Asumes 802.11 style LTS used for trace collection.
Args:
samps: The h5py or numpy array containing the raw IQ samples,
dims = [Frame, Cell, User, Antenna, Sample].
num_users: Number of users used in trace collection. (Last 'user' is noise.)
samps_per_user: Number of samples allocated to each user in each frame.
Returns:
csi: Complex numpy array with [Frame, Cell, User, Pilot Rep, Antenna, Subcarrier]
iq: Complex numpy array of raw IQ samples [Frame, Cell, User, Pilot Rep, Antenna, samples]
Example:
h5log = h5py.File(filename,'r')
csi,iq = samps2csi(h5log['Pilot_Samples'], h5log.attrs['num_mob_ant']+1, h5log.attrs['samples_per_user'])
"""
debug = False
chunkstart = time.time()
usersamps = np.reshape(
samps, (samps.shape[0], samps.shape[1], num_users, samps.shape[3], samps_per_user, 2))
# What is this? It is eiter 1 or 2: 2 LTSs??
pilot_rep = min([(samps_per_user-bound)//(fft_size+cp), 2])
iq = np.empty((samps.shape[0], samps.shape[1], num_users,
samps.shape[3], pilot_rep, fft_size), dtype='complex64')
if debug:
print("chunkstart = {}, usersamps.shape = {}, samps.shape = {}, samps_per_user = {}, nbat= {}, iq.shape = {}".format(
chunkstart, usersamps.shape, samps.shape, samps_per_user, nbat, iq.shape))
for i in range(pilot_rep): # 2 first symbols (assumed LTS) seperate estimates
iq[:, :, :, :, i, :] = (usersamps[:, :, :, :, offset + cp + i*fft_size:offset+cp+(i+1)*fft_size, 0] +
usersamps[:, :, :, :, offset + cp + i*fft_size:offset+cp+(i+1)*fft_size, 1]*1j)*2**-15
iq = iq.swapaxes(3, 4)
if debug:
print("iq.shape after axes swapping: {}".format(iq.shape))
fftstart = time.time()
csi = np.empty(iq.shape, dtype='complex64')
if fft_size == 64:
# Retrieve frequency-domain LTS sequence
_, lts_freq = generate_training_seq(
preamble_type='lts', seq_length=[], cp=32, upsample=1, reps=[])
pre_csi = np.fft.fftshift(np.fft.fft(iq, fft_size, 5), 5)
csi = np.fft.fftshift(np.fft.fft(iq, fft_size, 5), 5) * lts_freq
if debug:
print("csi.shape:{} lts_freq.shape: {}, pre_csi.shape = {}".format(
csi.shape, lts_freq.shape, pre_csi.shape))
endtime = time.time()
if debug:
print("chunk time: %f fft time: %f" %
(fftstart - chunkstart, endtime - fftstart))
# remove zero subcarriers
csi = np.delete(csi, [0, 1, 2, 3, 4, 5, 32, 59, 60, 61, 62, 63], 5)
return csi, iq
def samps2csi_large(samps, num_users, samps_per_user=224, offset=47, chunk_size=1000):
"""Wrapper function for samps2csi_main for to speed up large logs by leveraging data-locality. Chunk_size may need to be adjusted based on your computer."""
if samps.shape[0] > chunk_size:
# rather than memmap let's just increase swap... should be just as fast.
#csi = np.memmap(os.path.join(_here,'temp1.mymemmap'), dtype='complex64', mode='w+', shape=(samps.shape[0], num_users, 2, samps.shape[1],52))
#iq = np.memmap(os.path.join(_here,'temp2.mymemmap'), dtype='complex64', mode='w+', shape=(samps.shape[0], num_users, 2, samps.shape[1],64))
csi = np.empty(
(samps.shape[0], num_users, 2, samps.shape[1], 52), dtype='complex64')
iq = np.empty(
(samps.shape[0], num_users, 2, samps.shape[1], 64), dtype='complex64')
chunk_num = samps.shape[0]//chunk_size
for i in range(chunk_num):
csi[i*chunk_size:i*chunk_size+chunk_size], iq[i*chunk_size:i*chunk_size+chunk_size] = samps2csi(
samps[i*chunk_size:(i*chunk_size+chunk_size), :, :, :], num_users, samps_per_user=samps_per_user)
csi[chunk_num*chunk_size:], iq[chunk_num*chunk_size:] = samps2csi(
samps[chunk_num*chunk_size:, :, :, :], num_users, samps_per_user=samps_per_user)
else:
csi, iq = samps2csi(
samps, num_users, samps_per_user=samps_per_user, offset=offset)
return csi, iq
def calCond(userCSI):
"""Calculate the standard matrix condition number.
......
......@@ -70,7 +70,7 @@ class hdf5_lib:
#compute CSI for each user and get a nice numpy array
#Returns csi with Frame, User, LTS (there are 2), BS ant, Subcarrier
#also, iq samples nic(Last 'user' is noise.)ely chunked out, same dims, but subcarrier is sample.
csi,iq = samps2csi(pilot_samples, num_cl, symbol_len, offset=offset)
csi,iq = self.samps2csi(pilot_samples, num_cl, symbol_len, offset=offset)
# create hdf5 file to dump csi to
h5f = h5py.File(filename[:-5]+'-csi.hdf5', 'w')
......@@ -189,6 +189,86 @@ class hdf5_lib:
return self.metadata
def samps2csi(self, samps, num_users, samps_per_user=224, fft_size=64, offset=0, bound=94, cp=0):
"""Convert an Argos HDF5 log file with raw IQ in to CSI.
Asumes 802.11 style LTS used for trace collection.
Args:
samps: The h5py or numpy array containing the raw IQ samples,
dims = [Frame, Cell, User, Antenna, Sample].
num_users: Number of users used in trace collection. (Last 'user' is noise.)
samps_per_user: Number of samples allocated to each user in each frame.
Returns:
csi: Complex numpy array with [Frame, Cell, User, Pilot Rep, Antenna, Subcarrier]
iq: Complex numpy array of raw IQ samples [Frame, Cell, User, Pilot Rep, Antenna, samples]
Example:
h5log = h5py.File(filename,'r')
csi,iq = samps2csi(h5log['Pilot_Samples'], h5log.attrs['num_mob_ant']+1, h5log.attrs['samples_per_user'])
"""
debug = False
chunkstart = time.time()
usersamps = np.reshape(
samps, (samps.shape[0], samps.shape[1], num_users, samps.shape[3], samps_per_user, 2))
# What is this? It is eiter 1 or 2: 2 LTSs??
pilot_rep = min([(samps_per_user-bound)//(fft_size+cp), 2])
iq = np.empty((samps.shape[0], samps.shape[1], num_users,
samps.shape[3], pilot_rep, fft_size), dtype='complex64')
if debug:
print("chunkstart = {}, usersamps.shape = {}, samps.shape = {}, samps_per_user = {}, nbat= {}, iq.shape = {}".format(
chunkstart, usersamps.shape, samps.shape, samps_per_user, nbat, iq.shape))
for i in range(pilot_rep): # 2 first symbols (assumed LTS) seperate estimates
iq[:, :, :, :, i, :] = (usersamps[:, :, :, :, offset + cp + i*fft_size:offset+cp+(i+1)*fft_size, 0] +
usersamps[:, :, :, :, offset + cp + i*fft_size:offset+cp+(i+1)*fft_size, 1]*1j)*2**-15
iq = iq.swapaxes(3, 4)
if debug:
print("iq.shape after axes swapping: {}".format(iq.shape))
fftstart = time.time()
csi = np.empty(iq.shape, dtype='complex64')
if fft_size == 64:
# Retrieve frequency-domain LTS sequence
_, lts_freq = generate_training_seq(
preamble_type='lts', seq_length=[], cp=32, upsample=1, reps=[])
pre_csi = np.fft.fftshift(np.fft.fft(iq, fft_size, 5), 5)
csi = np.fft.fftshift(np.fft.fft(iq, fft_size, 5), 5) * lts_freq
if debug:
print("csi.shape:{} lts_freq.shape: {}, pre_csi.shape = {}".format(
csi.shape, lts_freq.shape, pre_csi.shape))
endtime = time.time()
if debug:
print("chunk time: %f fft time: %f" %
(fftstart - chunkstart, endtime - fftstart))
# remove zero subcarriers
csi = np.delete(csi, [0, 1, 2, 3, 4, 5, 32, 59, 60, 61, 62, 63], 5)
return csi, iq
def samps2csi_large(self, samps, num_users, samps_per_user=224, offset=47, chunk_size=1000):
"""Wrapper function for samps2csi_main for to speed up large logs by leveraging data-locality. Chunk_size may need to be adjusted based on your computer."""
if samps.shape[0] > chunk_size:
# rather than memmap let's just increase swap... should be just as fast.
#csi = np.memmap(os.path.join(_here,'temp1.mymemmap'), dtype='complex64', mode='w+', shape=(samps.shape[0], num_users, 2, samps.shape[1],52))
#iq = np.memmap(os.path.join(_here,'temp2.mymemmap'), dtype='complex64', mode='w+', shape=(samps.shape[0], num_users, 2, samps.shape[1],64))
csi = np.empty(
(samps.shape[0], num_users, 2, samps.shape[1], 52), dtype='complex64')
iq = np.empty(
(samps.shape[0], num_users, 2, samps.shape[1], 64), dtype='complex64')
chunk_num = samps.shape[0]//chunk_size
for i in range(chunk_num):
csi[i*chunk_size:i*chunk_size+chunk_size], iq[i*chunk_size:i*chunk_size+chunk_size] = self.samps2csi(
samps[i*chunk_size:(i*chunk_size+chunk_size), :, :, :], num_users, samps_per_user=samps_per_user)
csi[chunk_num*chunk_size:], iq[chunk_num*chunk_size:] = self.samps2csi(
samps[chunk_num*chunk_size:, :, :, :], num_users, samps_per_user=samps_per_user)
else:
csi, iq = self.samps2csi(
samps, num_users, samps_per_user=samps_per_user, offset=offset)
return csi, iq
def csi_from_pilots(self, pilots_dump, z_padding=150, fft_size=64, cp=16, frm_st_idx=0, frame_to_plot=0, ref_ant=0):
"""
Finds the end of the pilots' frames, finds all the lts indices relative to that.
......
......@@ -80,7 +80,7 @@ def verify_hdf5(hdf5, default_frame=100, ant_i =0, n_frm_st=0, deep_inspect=Fals
if deep_inspect:
csi_from_pilots_start = time.time()
csi_mat, match_filt, sub_fr_strt, cmpx_pilots, k_lts, n_lts = hdf5.csi_from_pilots(
pilot_samples, z_padding, frm_st_idx=n_frm_st, ref_frame=frm_plt, ref_ant=ant_i)
pilot_samples, z_padding, frm_st_idx=n_frm_st, frame_to_plot=frm_plt, ref_ant=ant_i)
csi_from_pilots_end = time.time()
frame_sanity_start = time.time()
......@@ -110,7 +110,7 @@ def verify_hdf5(hdf5, default_frame=100, ant_i =0, n_frm_st=0, deep_inspect=Fals
# CSI: #Frames, #Cell, #Users, #Pilot Rep, #Antennas, #Subcarrier
# For correlation use a fft size of 64
print("*verify_hdf5(): Calling samps2csi with fft_size = 64, offset = {}, bound = cp = 0 *".format(offset))
csi, samps = samps2csi(samples, num_cl_tmp, symbol_length, fft_size=64, offset=offset, bound=0, cp=0)
csi, samps = hdf5.samps2csi(samples, num_cl_tmp, symbol_length, fft_size=64, offset=offset, bound=0, cp=0)
# Correlation (Debug plot useful for checking sync)
amps = np.mean(np.abs(samps[:, 0, 0, 0, 0, :]), axis=1)
......@@ -128,7 +128,7 @@ def verify_hdf5(hdf5, default_frame=100, ant_i =0, n_frm_st=0, deep_inspect=Fals
# For looking at the whole picture, use a fft size of whole symbol_length as fft window (for visualization),
# and no offset
print("*verify_hdf5():Calling samps2csi *AGAIN*(?) with fft_size = symbol_length, no offset*")
csi, samps = samps2csi(samples, num_cl_tmp, symbol_length, fft_size=symbol_length, offset=0, bound=0, cp=0)
csi, samps = hdf5.samps2csi(samples, num_cl_tmp, symbol_length, fft_size=symbol_length, offset=0, bound=0, cp=0)
# Verify default_frame does not exceed max number of collected frames
ref_frame = min(default_frame - n_frm_st, samps.shape[0])
......@@ -270,7 +270,7 @@ def analyze_hdf5(hdf5, frame=10, cell=0, zoom=0, pl=0):
# compute CSI for each user and get a nice numpy array
# Returns csi with Frame, User, LTS (there are 2), BS ant, Subcarrier
#also, iq samples nicely chunked out, same dims, but subcarrier is sample.
csi, _ = samps2csi(pilot_samples, num_cl, symbol_length, offset=offset)
csi, _ = hdf5.samps2csi(pilot_samples, num_cl, symbol_length, offset=offset)
csi = csi[:, cell, :, :, :, :]
# zoom in too look at behavior around peak (and reduce processing time)
if zoom > 0:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment