A Coding Implementation of End-to-End Brain Decoding from MEG Signals Using NeuralSet and Deep Learning for Predicting Linguistic Features
opt = torch.optim.AdamW(model.parameters(), lr=1e-3, weight_decay=1e-4)
sched = torch.optim.lr_scheduler.CosineAnnealingLR(opt, T_max=EPOCHS)
loss_fn = nn.MSELoss()
hist = {“tr”: [], “va”: [], “r”: []}
def pearson(a, b):
a, b = a – a.mean(), b – b.mean()
return (a*b).sum() / (a.norm()*b.norm() + 1e-8)
print(“\n” + “=”*64)
print(f”{‘Epoch’:>5} | {‘train’:>9} | {‘val’:>9} | {‘val_r’:>7}”)
print(“=”*64)
for ep in range(EPOCHS):
model.train(); tr = []
for batch in train_loader:
x, y = prep(batch)
loss = loss_fn(model(x), y)
opt.zero_grad(); loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
opt.step(); tr.append(loss.item())
sched.step()
model.eval(); va, P, T = [], [], []
with torch.no_grad():
for batch in val_loader:
x, y = prep(batch); p = model(x)
va.append(loss_fn(p, y).item()); P.append(p.cpu()); T.append(y.cpu())
P, T = torch.cat(P), torch.cat(T)
r = pearson(P, T).item()
hist[“tr”].append(np.mean(tr)); hist[“va”].append(np.mean(va)); hist[“r”].append(r)
print(f”{ep+1:>5d} | {np.mean(tr):>9.4f} | {np.mean(va):>9.4f} | {r:>+7.3f}”)
model.eval(); P, T = [], []
with torch.no_grad():
for batch in test_loader:
x, y = prep(batch)
P.append(model(x).cpu()); T.append(y.cpu())
P, T = torch.cat(P), torch.cat(T)
test_r = pearson(P, T).item()
test_mse = ((P – T) ** 2).mean().item()
print(f”\nTEST | Pearson r = {test_r:+.3f} MSE = {test_mse:.3f}”)
print(f”(Synthetic-MEG signals are random by design — small/zero r is expected.)”)
fig, ax = plt.subplots(1, 3, figsize=(15, 4))
ax[0].plot(hist[“tr”], label=”train”); ax[0].plot(hist[“va”], label=”val”)
ax[0].set(xlabel=”Epoch”, ylabel=”MSE”, title=”Loss curves”); ax[0].legend(); ax[0].grid(alpha=.3)
ax[1].plot(hist[“r”], color=”C2″); ax[1].axhline(0, color=”k”, ls=”–“, alpha=.4)
ax[1].set(xlabel=”Epoch”, ylabel=”Pearson r”, title=”Validation correlation”); ax[1].grid(alpha=.3)
m = float(max(T.abs().max(), P.abs().max()))
ax[2].scatter(T.numpy(), P.numpy(), s=10, alpha=.35)
ax[2].plot([-m, m], [-m, m], “k–“, alpha=.4)
ax[2].set(xlabel=”True (z-scored char count)”, ylabel=”Predicted”,
title=f”Test predictions (r = {test_r:+.3f})”); ax[2].grid(alpha=.3)
plt.tight_layout(); plt.show()
print(“\n✅ Tutorial complete!”)
print(f” • Study used : {study_name}”)
print(f” • Pipeline : Chain → Segmenter → SegmentDataset → DataLoader”)
print(f” • Custom extractor : CharCount (subclass of BaseStatic)”)
print(f” • Built-in extractor: MegExtractor @ 100 Hz”)
print(f” • Model : 1×1 spatial conv + 2 temporal convs + linear head”)



