-
Notifications
You must be signed in to change notification settings - Fork 7
Expand file tree
/
Copy pathcode.txt
More file actions
149 lines (121 loc) · 5.17 KB
/
code.txt
File metadata and controls
149 lines (121 loc) · 5.17 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import math
# Generate random sentences as data A
data_A = [
"Hello there!",
"How are you doing?",
"PyTorch is great.",
"Machine learning is fascinating.",
"Let's build something amazing.",
"Semantic communication matters.",
"Understanding context is important.",
"AI is changing the world.",
"Keep learning and growing.",
"Innovation drives progress."
]
# Tokenize sentences into words
words = [sentence.split() for sentence in data_A]
# Create a vocabulary
vocab = list(set(word for sentence in words for word in sentence))
vocab_size = len(vocab)
# Convert words to unique indices
word_to_idx = {word: idx for idx, word in enumerate(vocab)}
idx_to_word = {idx: word for word, idx in word_to_idx.items()}
# Convert sentences to numerical representation
numerical_sentences = [[word_to_idx[word] for word in sentence] for sentence in words]
# Semantic Encoder
class SemanticEncoder(nn.Module):
def __init__(self, vocab_size, embedding_size, hidden_size):
super(SemanticEncoder, self).__init__()
self.embedding = nn.Embedding(vocab_size, embedding_size)
self.lstm = nn.LSTM(embedding_size, hidden_size)
def forward(self, input_seq):
embedded = self.embedding(input_seq)
output, (hidden, cell) = self.lstm(embedded)
return output
# Channel Encoder and Decoder (Simple Identity Mapping)
class ChannelEncoder(nn.Module):
def __init__(self, hidden_size):
super(ChannelEncoder, self).__init__()
self.identity = nn.Identity()
def forward(self, input_features):
return self.identity(input_features)
class ChannelDecoder(nn.Module):
def __init__(self, hidden_size):
super(ChannelDecoder, self).__init__()
self.identity = nn.Identity()
def forward(self, received_features):
return self.identity(received_features)
# Define the physical channel, which is a Gaussian white noise channel with a given SNR
class PhysicalChannel(nn.Module):
def __init__(self, snr):
super(PhysicalChannel, self).__init__()
self.snr = snr
def forward(self, x):
# x: (batch_size, output_size)
noise_power = 10 ** (-self.snr / 10) # Calculate the noise power from the SNR
noise = math.sqrt(noise_power) * torch.randn_like(x) # Generate Gaussian white noise with the same shape as x
y = x + noise # Add noise to the signal
return y
# Semantic Decoder
class SemanticDecoder(nn.Module):
def __init__(self, hidden_size, vocab_size):
super(SemanticDecoder, self).__init__()
self.lstm = nn.LSTM(hidden_size, hidden_size)
self.linear = nn.Linear(hidden_size, vocab_size)
def forward(self, hidden):
output, _ = self.lstm(hidden)
output = self.linear(output)
return output
# Instantiate the model components
embedding_size = 64
hidden_size = 128
snr = -10
semantic_encoder = SemanticEncoder(vocab_size, embedding_size, hidden_size)
channel_encoder = ChannelEncoder(hidden_size)
channel_decoder = ChannelDecoder(hidden_size)
semantic_decoder = SemanticDecoder(hidden_size, vocab_size)
physical_channel = PhysicalChannel(snr)
# Define loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(list(semantic_encoder.parameters()) +
list(channel_encoder.parameters()) +
list(channel_decoder.parameters()) +
list(semantic_decoder.parameters()), lr=0.001)
# Train the model
num_epochs = 100
for epoch in range(num_epochs):
total_loss = 0.0
for sentence in numerical_sentences:
optimizer.zero_grad()
input_seq = torch.tensor(sentence) # Input: all words except the last
target_seq = torch.tensor(sentence) # Target: all words except the first
semantic_feature = semantic_encoder(input_seq)
encoded_features = channel_encoder(semantic_feature)
# Simulate the channel (add noise)
received_features = physical_channel(encoded_features) #+ torch.randn_like(encoded_features) * 0.1
decoded_features = channel_decoder(received_features)
output = semantic_decoder(decoded_features)
loss = criterion(output, target_seq)
loss.backward()
optimizer.step()
total_loss += loss.item()
if (epoch + 1) % 10 == 0:
print(f'Epoch [{epoch + 1}/{num_epochs}], Loss: {total_loss:.4f}')
# Test the semantic communication model
for i,sentence in enumerate(numerical_sentences):
test_input = torch.tensor(sentence)
with torch.no_grad():
semantic_feature = semantic_encoder(test_input)
encoded_features = channel_encoder(semantic_feature)
# Simulate the channel (add noise)
received_features = encoded_features + torch.randn_like(encoded_features) * 0.1
decoded_features = channel_decoder(received_features)
output = semantic_decoder(decoded_features)
predicted_indices = torch.argmax(output, dim=1).numpy()
predicted_sentence = ' '.join([idx_to_word[idx] for idx in predicted_indices])
print("Original Sentence:", data_A[i])
print("Predicted Sentence:", predicted_sentence)