Datasets:

Modalities:
Image
Formats:
parquet
Size:
< 1K
ArXiv:
Libraries:
Datasets
pandas
License:
YutingHe-list commited on
Commit
a899ab4
·
verified ·
1 Parent(s): 7362cbd

Upload 2 files

Browse files
Files changed (2) hide show
  1. simnict_download.py +585 -0
  2. simnict_generator.py +268 -0
simnict_download.py ADDED
@@ -0,0 +1,585 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ """
4
+ SimNICT Dataset Batch Downloader
5
+ Download complete SimNICT datasets from Internet Archive
6
+
7
+ IMPORTANT: This downloader provides access to 8 out of 10 original SimNICT datasets.
8
+ AutoPET and HECKTOR22 are excluded from public release due to licensing restrictions.
9
+
10
+ Usage:
11
+ python download_simnict.py --datasets AMOS COVID_19_NY_SBU --output_dir ./data
12
+ python download_simnict.py --all --output_dir ./data
13
+ python download_simnict.py --list # Show available datasets
14
+
15
+ Author: TAMP Research Group
16
+ Version: 1.0
17
+ """
18
+
19
+ import os
20
+ import sys
21
+ import argparse
22
+ import time
23
+ from pathlib import Path
24
+ from typing import List, Dict, Optional
25
+ import logging
26
+
27
+ try:
28
+ import internetarchive as ia
29
+ except ImportError:
30
+ print("❌ Error: internetarchive library not found")
31
+ print("Please install it using: pip install internetarchive")
32
+ sys.exit(1)
33
+
34
+ # =============================================================================
35
+ # Dataset Configuration
36
+ # =============================================================================
37
+
38
+ SIMNICT_DATASETS = {
39
+ "AMOS": {
40
+ "identifier": "simnict-amos",
41
+ "description": "Abdominal multi-organ segmentation dataset",
42
+ "volumes": 500,
43
+ "files": 504,
44
+ "size_gb": "~22 GB"
45
+ },
46
+ "COVID_19_NY_SBU": {
47
+ "identifier": "simnict-covid-19-ny-sbu",
48
+ "description": "COVID-19 NY-SBU chest CT dataset",
49
+ "volumes": 459,
50
+ "files": 463,
51
+ "size_gb": "~30 GB"
52
+ },
53
+ "CT_Images_COVID19": {
54
+ "identifier": "simnict-ct-images-in-covid-19",
55
+ "description": "CT Images in COVID-19 dataset",
56
+ "volumes": 771,
57
+ "files": 775,
58
+ "size_gb": "~13 GB"
59
+ },
60
+ "CT_COLONOGRAPHY": {
61
+ "identifier": "simnict-ct-colonography",
62
+ "description": "CT colonography screening dataset",
63
+ "volumes": 1730,
64
+ "files": 1734,
65
+ "size_gb": "~271 GB"
66
+ },
67
+ "LNDb": {
68
+ "identifier": "simnict-lndb",
69
+ "description": "Lung nodule database",
70
+ "volumes": 294,
71
+ "files": 298,
72
+ "size_gb": "~34 GB"
73
+ },
74
+ "LUNA": {
75
+ "identifier": "simnict-luna",
76
+ "description": "Lung nodule analysis dataset",
77
+ "volumes": 888,
78
+ "files": 892,
79
+ "size_gb": "~63 GB"
80
+ },
81
+ "MELA": {
82
+ "identifier": "simnict-mela",
83
+ "description": "Melanoma detection dataset",
84
+ "volumes": 1100,
85
+ "files": 1104,
86
+ "size_gb": "~147 GB"
87
+ },
88
+ "STOIC": {
89
+ "identifier": "simnict-stoic",
90
+ "description": "COVID-19 AI challenge dataset",
91
+ "volumes": 2000,
92
+ "files": 2004,
93
+ "size_gb": "~243 GB"
94
+ }
95
+ }
96
+
97
+ # =============================================================================
98
+ # Logging Configuration
99
+ # =============================================================================
100
+
101
+ logging.basicConfig(
102
+ level=logging.INFO,
103
+ format='%(asctime)s - %(levelname)s - %(message)s',
104
+ handlers=[
105
+ logging.FileHandler('simnict_download.log'),
106
+ logging.StreamHandler()
107
+ ]
108
+ )
109
+ logger = logging.getLogger(__name__)
110
+
111
+ # =============================================================================
112
+ # SimNICT Downloader Class
113
+ # =============================================================================
114
+
115
+ class SimNICTDownloader:
116
+ def __init__(self, output_dir: str = "./simnict_data",
117
+ max_retries: int = 3, chunk_size: int = 1024*1024):
118
+ """
119
+ Initialize SimNICT downloader
120
+
121
+ Args:
122
+ output_dir: Directory to save downloaded datasets
123
+ max_retries: Maximum retry attempts for failed downloads
124
+ chunk_size: Download chunk size in bytes (default 1MB)
125
+ """
126
+ self.output_dir = Path(output_dir)
127
+ self.max_retries = max_retries
128
+ self.chunk_size = chunk_size
129
+
130
+ # Create output directory
131
+ self.output_dir.mkdir(parents=True, exist_ok=True)
132
+ logger.info(f"📁 Output directory: {self.output_dir.absolute()}")
133
+
134
+ def list_available_datasets(self) -> None:
135
+ """Display all available SimNICT datasets"""
136
+ print("\n" + "="*80)
137
+ print("📋 Available SimNICT Datasets (8 out of 10 original datasets)")
138
+ print("="*80)
139
+ print("ℹ️ Note: AutoPET and HECKTOR22 excluded due to licensing restrictions")
140
+ print("="*80)
141
+
142
+ total_size = 0
143
+ total_volumes = 0
144
+
145
+ for name, info in SIMNICT_DATASETS.items():
146
+ print(f"\n🔹 {name}")
147
+ print(f" 📝 Description: {info['description']}")
148
+ print(f" 📊 Volumes: {info['volumes']:,}")
149
+ print(f" 📄 Files: {info['files']:,}")
150
+ print(f" 💾 Size: {info['size_gb']}")
151
+ print(f" 🏷️ ID: {info['identifier']}")
152
+ print(f" 🔗 URL: https://archive.org/details/{info['identifier']}")
153
+
154
+ total_volumes += info['volumes']
155
+ # Extract numeric size for total calculation
156
+ size_str = info['size_gb'].replace('~', '').replace(' GB', '')
157
+ try:
158
+ total_size += float(size_str)
159
+ except:
160
+ pass
161
+
162
+ print(f"\n📈 Total Statistics:")
163
+ print(f" 🗂️ Datasets: {len(SIMNICT_DATASETS)}")
164
+ print(f" 📊 Total Volumes: {total_volumes:,}")
165
+ print(f" 💾 Total Size: ~{total_size:.0f} GB")
166
+ print("="*80)
167
+
168
+ def check_dataset_exists(self, identifier: str) -> bool:
169
+ """Check if dataset exists on Internet Archive"""
170
+ try:
171
+ item = ia.get_item(identifier)
172
+ return item.exists
173
+ except Exception as e:
174
+ logger.error(f"Error checking dataset {identifier}: {e}")
175
+ return False
176
+
177
+ def get_dataset_files(self, identifier: str) -> List[str]:
178
+ """Get list of files in a dataset"""
179
+ try:
180
+ item = ia.get_item(identifier)
181
+ if not item.exists:
182
+ return []
183
+
184
+ files = []
185
+ for file_obj in item.files:
186
+ if isinstance(file_obj, dict) and 'name' in file_obj:
187
+ # Only include .nii.gz files (skip metadata)
188
+ filename = file_obj['name']
189
+ if filename.endswith('.nii.gz'):
190
+ files.append(filename)
191
+
192
+ return sorted(files)
193
+ except Exception as e:
194
+ logger.error(f"Error getting files for {identifier}: {e}")
195
+ return []
196
+
197
+ def download_dataset(self, dataset_name: str,
198
+ resume: bool = True,
199
+ verify_checksum: bool = True) -> bool:
200
+ """
201
+ Download a specific SimNICT dataset
202
+
203
+ Args:
204
+ dataset_name: Name of dataset to download
205
+ resume: Whether to resume partial downloads
206
+ verify_checksum: Whether to verify file checksums
207
+
208
+ Returns:
209
+ True if download successful, False otherwise
210
+ """
211
+ if dataset_name not in SIMNICT_DATASETS:
212
+ logger.error(f"❌ Unknown dataset: {dataset_name}")
213
+ logger.info(f"Available datasets: {list(SIMNICT_DATASETS.keys())}")
214
+ return False
215
+
216
+ dataset_info = SIMNICT_DATASETS[dataset_name]
217
+ identifier = dataset_info['identifier']
218
+
219
+ logger.info(f"\n{'='*60}")
220
+ logger.info(f"📤 Starting download: {dataset_name}")
221
+ logger.info(f"🏷️ Identifier: {identifier}")
222
+ logger.info(f"📊 Expected volumes: {dataset_info['volumes']}")
223
+ logger.info(f"💾 Estimated size: {dataset_info['size_gb']}")
224
+ logger.info(f"{'='*60}")
225
+
226
+ # Check if dataset exists
227
+ if not self.check_dataset_exists(identifier):
228
+ logger.error(f"❌ Dataset not found on Internet Archive: {identifier}")
229
+ return False
230
+
231
+ # Create dataset directory
232
+ dataset_dir = self.output_dir / dataset_name
233
+ dataset_dir.mkdir(exist_ok=True)
234
+
235
+ # Get files to download
236
+ files_to_download = self.get_dataset_files(identifier)
237
+ if not files_to_download:
238
+ logger.error(f"❌ No files found for dataset: {dataset_name}")
239
+ return False
240
+
241
+ logger.info(f"📋 Found {len(files_to_download)} files to download")
242
+
243
+ # Check existing files if resuming
244
+ existing_files = set()
245
+ if resume:
246
+ for file_path in dataset_dir.iterdir():
247
+ if file_path.is_file() and file_path.suffix == '.gz':
248
+ existing_files.add(file_path.name)
249
+
250
+ if existing_files:
251
+ logger.info(f"📂 Found {len(existing_files)} existing files (resume mode)")
252
+
253
+ # Download files
254
+ successful_downloads = 0
255
+ failed_downloads = 0
256
+ skipped_files = 0
257
+
258
+ for i, filename in enumerate(files_to_download, 1):
259
+ file_path = dataset_dir / filename
260
+
261
+ # Skip if file exists and resuming
262
+ if resume and filename in existing_files:
263
+ logger.info(f"⏭️ Skipping existing file [{i}/{len(files_to_download)}]: {filename}")
264
+ skipped_files += 1
265
+ continue
266
+
267
+ logger.info(f"📥 Downloading [{i}/{len(files_to_download)}]: {filename}")
268
+
269
+ success = self._download_file_with_retry(
270
+ identifier, filename, file_path, verify_checksum
271
+ )
272
+
273
+ if success:
274
+ successful_downloads += 1
275
+ logger.info(f"✅ Downloaded: {filename}")
276
+ else:
277
+ failed_downloads += 1
278
+ logger.error(f"❌ Failed: {filename}")
279
+
280
+ # Brief pause between downloads
281
+ time.sleep(0.5)
282
+
283
+ # Summary
284
+ logger.info(f"\n📊 Download Summary for {dataset_name}:")
285
+ logger.info(f" ✅ Successful: {successful_downloads}")
286
+ logger.info(f" ⏭️ Skipped: {skipped_files}")
287
+ logger.info(f" ❌ Failed: {failed_downloads}")
288
+ logger.info(f" 📁 Location: {dataset_dir.absolute()}")
289
+
290
+ return failed_downloads == 0
291
+
292
+ def _download_file_with_retry(self, identifier: str, filename: str,
293
+ file_path: Path, verify_checksum: bool) -> bool:
294
+ """Download single file with retry logic"""
295
+ for attempt in range(self.max_retries):
296
+ try:
297
+ # Use internetarchive library to download
298
+ item = ia.get_item(identifier)
299
+
300
+ # Find the file object
301
+ file_obj = None
302
+ for f in item.files:
303
+ if isinstance(f, dict) and f.get('name') == filename:
304
+ file_obj = f
305
+ break
306
+
307
+ if not file_obj:
308
+ logger.error(f"File not found in item: {filename}")
309
+ return False
310
+
311
+ # Download the file
312
+ success = item.download(
313
+ files=[filename],
314
+ destdir=file_path.parent,
315
+ verify=verify_checksum,
316
+ verbose=False,
317
+ retries=1 # Handle retries at our level
318
+ )
319
+
320
+ if success and file_path.exists():
321
+ return True
322
+ else:
323
+ raise Exception("Download failed or file not created")
324
+
325
+ except Exception as e:
326
+ logger.warning(f"⚠️ Attempt {attempt + 1}/{self.max_retries} failed for {filename}: {e}")
327
+
328
+ if attempt < self.max_retries - 1:
329
+ wait_time = (attempt + 1) * 2 # Exponential backoff
330
+ logger.info(f"🔄 Retrying in {wait_time} seconds...")
331
+ time.sleep(wait_time)
332
+ else:
333
+ logger.error(f"💔 All {self.max_retries} attempts failed for {filename}")
334
+ return False
335
+
336
+ return False
337
+
338
+ def download_multiple_datasets(self, dataset_names: List[str],
339
+ resume: bool = True) -> Dict[str, bool]:
340
+ """
341
+ Download multiple SimNICT datasets
342
+
343
+ Args:
344
+ dataset_names: List of dataset names to download
345
+ resume: Whether to resume partial downloads
346
+
347
+ Returns:
348
+ Dictionary mapping dataset names to success status
349
+ """
350
+ if not dataset_names:
351
+ logger.error("❌ No datasets specified")
352
+ return {}
353
+
354
+ logger.info(f"\n🚀 Starting batch download of {len(dataset_names)} datasets")
355
+ logger.info(f"📋 Datasets: {', '.join(dataset_names)}")
356
+
357
+ results = {}
358
+ successful = 0
359
+
360
+ for i, dataset_name in enumerate(dataset_names, 1):
361
+ logger.info(f"\n{'🔄' * 20} Dataset {i}/{len(dataset_names)} {'🔄' * 20}")
362
+
363
+ success = self.download_dataset(dataset_name, resume=resume)
364
+ results[dataset_name] = success
365
+
366
+ if success:
367
+ successful += 1
368
+ logger.info(f"🎉 Successfully downloaded: {dataset_name}")
369
+ else:
370
+ logger.error(f"💔 Failed to download: {dataset_name}")
371
+
372
+ # Final summary
373
+ logger.info(f"\n{'=' * 80}")
374
+ logger.info(f"🏁 Batch Download Complete")
375
+ logger.info(f"{'=' * 80}")
376
+ logger.info(f"✅ Successful: {successful}/{len(dataset_names)}")
377
+ logger.info(f"❌ Failed: {len(dataset_names) - successful}")
378
+
379
+ for dataset_name, success in results.items():
380
+ status = "✅" if success else "❌"
381
+ logger.info(f" {status} {dataset_name}")
382
+
383
+ return results
384
+
385
+ def validate_downloads(self, dataset_names: List[str]) -> Dict[str, Dict]:
386
+ """
387
+ Validate downloaded datasets
388
+
389
+ Args:
390
+ dataset_names: List of dataset names to validate
391
+
392
+ Returns:
393
+ Validation results for each dataset
394
+ """
395
+ logger.info(f"\n🔍 Validating {len(dataset_names)} datasets...")
396
+
397
+ results = {}
398
+
399
+ for dataset_name in dataset_names:
400
+ if dataset_name not in SIMNICT_DATASETS:
401
+ continue
402
+
403
+ dataset_dir = self.output_dir / dataset_name
404
+ expected_info = SIMNICT_DATASETS[dataset_name]
405
+
406
+ if not dataset_dir.exists():
407
+ results[dataset_name] = {
408
+ "status": "missing",
409
+ "message": "Dataset directory not found"
410
+ }
411
+ continue
412
+
413
+ # Count downloaded files
414
+ nii_files = list(dataset_dir.glob("*.nii.gz"))
415
+ file_count = len(nii_files)
416
+
417
+ expected_files = expected_info['files']
418
+ completion_rate = (file_count / expected_files) * 100
419
+
420
+ if file_count == expected_files:
421
+ status = "complete"
422
+ message = f"All {file_count} files downloaded successfully"
423
+ elif file_count > 0:
424
+ status = "partial"
425
+ message = f"Partial download: {file_count}/{expected_files} files ({completion_rate:.1f}%)"
426
+ else:
427
+ status = "empty"
428
+ message = "No files found"
429
+
430
+ results[dataset_name] = {
431
+ "status": status,
432
+ "files_found": file_count,
433
+ "files_expected": expected_files,
434
+ "completion_rate": completion_rate,
435
+ "message": message
436
+ }
437
+
438
+ logger.info(f"📊 {dataset_name}: {message}")
439
+
440
+ return results
441
+
442
+ # =============================================================================
443
+ # Command Line Interface
444
+ # =============================================================================
445
+
446
+ def main():
447
+ parser = argparse.ArgumentParser(
448
+ description="Download SimNICT datasets from Internet Archive",
449
+ formatter_class=argparse.RawDescriptionHelpFormatter,
450
+ epilog="""
451
+ Examples:
452
+ # List available datasets
453
+ python download_simnict.py --list
454
+
455
+ # Download specific datasets
456
+ python download_simnict.py --datasets AMOS COVID_19_NY_SBU --output_dir ./data
457
+
458
+ # Download all datasets
459
+ python download_simnict.py --all --output_dir ./data
460
+
461
+ # Resume interrupted downloads
462
+ python download_simnict.py --datasets STOIC --resume --output_dir ./data
463
+
464
+ # Validate existing downloads
465
+ python download_simnict.py --validate AMOS LUNA --output_dir ./data
466
+ """
467
+ )
468
+
469
+ parser.add_argument(
470
+ "--datasets", nargs="+", metavar="DATASET",
471
+ help="List of datasets to download (e.g., AMOS LUNA STOIC)"
472
+ )
473
+
474
+ parser.add_argument(
475
+ "--all", action="store_true",
476
+ help="Download all available SimNICT datasets"
477
+ )
478
+
479
+ parser.add_argument(
480
+ "--list", action="store_true",
481
+ help="List available datasets and exit"
482
+ )
483
+
484
+ parser.add_argument(
485
+ "--validate", nargs="*", metavar="DATASET",
486
+ help="Validate downloaded datasets"
487
+ )
488
+
489
+ parser.add_argument(
490
+ "--output_dir", default="./simnict_data",
491
+ help="Output directory for downloads (default: ./simnict_data)"
492
+ )
493
+
494
+ parser.add_argument(
495
+ "--resume", action="store_true",
496
+ help="Resume interrupted downloads (skip existing files)"
497
+ )
498
+
499
+ parser.add_argument(
500
+ "--no-checksum", action="store_true",
501
+ help="Skip checksum verification (faster but less safe)"
502
+ )
503
+
504
+ parser.add_argument(
505
+ "--max-retries", type=int, default=3,
506
+ help="Maximum retry attempts for failed downloads (default: 3)"
507
+ )
508
+
509
+ args = parser.parse_args()
510
+
511
+ # Handle list command
512
+ if args.list:
513
+ downloader = SimNICTDownloader()
514
+ downloader.list_available_datasets()
515
+ return
516
+
517
+ # Handle validation
518
+ if args.validate is not None:
519
+ datasets_to_validate = args.validate if args.validate else list(SIMNICT_DATASETS.keys())
520
+ downloader = SimNICTDownloader(args.output_dir)
521
+ results = downloader.validate_downloads(datasets_to_validate)
522
+ return
523
+
524
+ # Determine datasets to download
525
+ if args.all:
526
+ datasets = list(SIMNICT_DATASETS.keys())
527
+ elif args.datasets:
528
+ datasets = args.datasets
529
+ else:
530
+ parser.error("Must specify --datasets, --all, --list, or --validate")
531
+
532
+ # Validate dataset names
533
+ invalid_datasets = [d for d in datasets if d not in SIMNICT_DATASETS]
534
+ if invalid_datasets:
535
+ logger.error(f"❌ Invalid dataset names: {invalid_datasets}")
536
+ logger.info(f"Available datasets: {list(SIMNICT_DATASETS.keys())}")
537
+ return
538
+
539
+ # Initialize downloader
540
+ downloader = SimNICTDownloader(
541
+ output_dir=args.output_dir,
542
+ max_retries=args.max_retries
543
+ )
544
+
545
+ # Show download plan
546
+ logger.info(f"\n📋 Download Plan:")
547
+ total_size = 0
548
+ for dataset in datasets:
549
+ info = SIMNICT_DATASETS[dataset]
550
+ logger.info(f" 🔹 {dataset}: {info['size_gb']} ({info['volumes']} volumes)")
551
+ # Extract size for total calculation
552
+ try:
553
+ size_num = float(info['size_gb'].replace('~', '').replace(' GB', ''))
554
+ total_size += size_num
555
+ except:
556
+ pass
557
+
558
+ logger.info(f" 💾 Total estimated size: ~{total_size:.0f} GB")
559
+
560
+ # Confirm download
561
+ try:
562
+ confirm = input(f"\nProceed with download? (y/N): ").strip().lower()
563
+ if confirm != 'y':
564
+ logger.info("❌ Download cancelled by user")
565
+ return
566
+ except KeyboardInterrupt:
567
+ logger.info("\n❌ Download cancelled by user")
568
+ return
569
+
570
+ # Start downloads
571
+ start_time = time.time()
572
+ results = downloader.download_multiple_datasets(datasets, resume=args.resume)
573
+ end_time = time.time()
574
+
575
+ # Final report
576
+ elapsed = end_time - start_time
577
+ logger.info(f"\n⏱️ Total time: {elapsed:.1f} seconds ({elapsed/60:.1f} minutes)")
578
+
579
+ # Validate downloads
580
+ if any(results.values()):
581
+ logger.info("\n🔍 Validating downloads...")
582
+ validation_results = downloader.validate_downloads(list(results.keys()))
583
+
584
+ if __name__ == "__main__":
585
+ main()
simnict_generator.py ADDED
@@ -0,0 +1,268 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding = utf-8 -*-
2
+
3
+ """
4
+ SimNICT Dataset Generator
5
+ Generates Non-Ideal measurement CT (NICT) simulations from preprocessed ICT data
6
+
7
+ This script creates three types of NICT simulations:
8
+ 1. Sparse-View CT (SVCT): Limited projection views (15-360 views)
9
+ 2. Limited-Angle CT (LACT): Restricted angular range (75°-270°)
10
+ 3. Low-Dose CT (LDCT): Reduced photon dose (5%-75% of normal dose)
11
+
12
+ Usage:
13
+ python simnict_generator.py
14
+
15
+ Dependencies: numpy, torch, nibabel, odl, astra-toolbox, opencv-python, pillow
16
+ """
17
+
18
+ from __future__ import absolute_import, print_function
19
+
20
+ import numpy as np
21
+ import time
22
+ import os
23
+ import nibabel as nib
24
+ import odl
25
+ import random
26
+ import astra
27
+
28
+
29
+ # Dataset configuration
30
+ DATASETS = ['AMOS', 'COVID_19_NY_SBU', 'CT Images in COVID-19', 'CT_COLONOGRAPHY', 'LNDb', 'LUNA', 'MELA', 'STOIC']
31
+
32
+ # Path configuration
33
+ INPUT_PATH = 'M:/' # Original ICT data path
34
+ OUTPUT_SVCT = 'K:/SpV/' # Sparse-view CT output path
35
+ OUTPUT_LACT = 'K:/LmV/' # Limited-angle CT output path
36
+ OUTPUT_LDCT = 'K:/LD/' # Low-dose CT output path
37
+
38
+ # Simulation parameter ranges
39
+ SVCT_VIEW_RANGE = (15, 360) # Number of projection views
40
+ LACT_ANGLE_RANGE = (75, 270) # Angular range in degrees
41
+ LDCT_DOSE_RANGE = (5, 75) # Dose percentage
42
+
43
+
44
+ def process_dataset(input_path, dataset_name):
45
+ """
46
+ Process a complete dataset to generate NICT simulations
47
+
48
+ Args:
49
+ input_path (str): Path to input ICT data
50
+ dataset_name (str): Name of the dataset to process
51
+ """
52
+ ict_path = os.path.join(input_path, dataset_name, "int16/")
53
+
54
+ if not os.path.exists(ict_path):
55
+ print(f"Warning: Path {ict_path} does not exist, skipping {dataset_name}")
56
+ return
57
+
58
+ # Create output directories
59
+ for output_path in [OUTPUT_SVCT, OUTPUT_LACT, OUTPUT_LDCT]:
60
+ os.makedirs(os.path.join(output_path, dataset_name, "int16/"), exist_ok=True)
61
+
62
+ files = os.listdir(ict_path)
63
+ num_files = len(files)
64
+ print(f"Processing {dataset_name}: {num_files} files")
65
+
66
+ for i, filename in enumerate(files):
67
+ print(f"Processing {dataset_name} - File {i+1}/{num_files}: {filename}")
68
+
69
+ # Load ICT volume
70
+ ict_file_path = os.path.join(ict_path, filename)
71
+ image_obj = nib.load(ict_file_path)
72
+ ict_volume = image_obj.get_fdata() + 1024 # Convert to [0, 4096] range
73
+ ict_volume[ict_volume < 0] = 0
74
+
75
+ L, W, S = ict_volume.shape
76
+
77
+ # Initialize output volumes
78
+ svct_volume = np.zeros((L, W, S), dtype=np.int16)
79
+ lact_volume = np.zeros((L, W, S), dtype=np.int16)
80
+ ldct_volume = np.zeros((L, W, S), dtype=np.int16)
81
+
82
+ # Process each slice
83
+ for slice_idx in range(S):
84
+ ict_slice = ict_volume[:, :, slice_idx]
85
+
86
+ # Generate SVCT with random view number
87
+ svct_views = random.randint(*SVCT_VIEW_RANGE)
88
+ svct_slice = create_sparse_view_ct(ict_slice, L, W, svct_views)
89
+ svct_volume[:, :, slice_idx] = svct_slice
90
+
91
+ # Generate LACT with random angular range
92
+ lact_angle = random.randint(*LACT_ANGLE_RANGE)
93
+ lact_slice = create_limited_angle_ct(ict_slice, L, W, lact_angle)
94
+ lact_volume[:, :, slice_idx] = lact_slice
95
+
96
+ # Generate LDCT with random dose level
97
+ ldct_dose = random.randint(*LDCT_DOSE_RANGE)
98
+ ldct_slice = create_low_dose_ct(ict_slice - 1024, L, W, ldct_dose) # Convert back to [-1024, 3072]
99
+ ldct_volume[:, :, slice_idx] = ldct_slice
100
+
101
+ # Save NICT volumes
102
+ save_nict_volume(svct_volume, OUTPUT_SVCT, dataset_name, filename, volume_type="SVCT")
103
+ save_nict_volume(lact_volume, OUTPUT_LACT, dataset_name, filename, volume_type="LACT")
104
+ save_nict_volume(ldct_volume, OUTPUT_LDCT, dataset_name, filename, volume_type="LDCT")
105
+
106
+
107
+ def save_nict_volume(volume, output_path, dataset_name, filename, volume_type):
108
+ """Save NICT volume to NIfTI format"""
109
+ if volume_type in ["SVCT", "LACT"]:
110
+ # Convert from [0, 4096] to [-1024, 3072] range
111
+ volume_output = volume - 1024
112
+ volume_output[volume_output < -1024] = -1024
113
+ else: # LDCT
114
+ # Already in [-1024, 3072] range
115
+ volume_output = volume
116
+ volume_output[volume_output < -1024] = -1024
117
+
118
+ nifti_image = nib.Nifti1Image(volume_output, np.eye(4))
119
+ output_file = os.path.join(output_path, dataset_name, "int16/", filename)
120
+ nib.save(nifti_image, output_file)
121
+
122
+
123
+ def create_sparse_view_ct(ict_slice, height, width, num_views):
124
+ """
125
+ Generate Sparse-View CT using ODL
126
+
127
+ Args:
128
+ ict_slice: Input ICT slice [0, 4096]
129
+ height, width: Image dimensions
130
+ num_views: Number of projection views
131
+ Returns:
132
+ Reconstructed sparse-view CT slice
133
+ """
134
+ # Create reconstruction space
135
+ reco_space = odl.uniform_discr(
136
+ min_pt=[-height/4, -width/4],
137
+ max_pt=[height/4, width/4],
138
+ shape=[height, width],
139
+ dtype='float32'
140
+ )
141
+
142
+ # Define geometry with limited views
143
+ angle_partition = odl.uniform_partition(0, 2 * np.pi, num_views)
144
+ detector_partition = odl.uniform_partition(-360, 360, 1024)
145
+ geometry = odl.tomo.FanBeamGeometry(
146
+ angle_partition, detector_partition,
147
+ src_radius=1270, det_radius=870
148
+ )
149
+
150
+ # Create ray transform and reconstruct
151
+ ray_trafo = odl.tomo.RayTransform(reco_space, geometry)
152
+ projection = ray_trafo(ict_slice.astype('float32'))
153
+ fbp = odl.tomo.fbp_op(ray_trafo)
154
+ reconstruction = fbp(projection)
155
+
156
+ return reconstruction
157
+
158
+
159
+ def create_limited_angle_ct(ict_slice, height, width, angle_range):
160
+ """
161
+ Generate Limited-Angle CT using ODL
162
+
163
+ Args:
164
+ ict_slice: Input ICT slice [0, 4096]
165
+ height, width: Image dimensions
166
+ angle_range: Angular range in degrees
167
+ Returns:
168
+ Reconstructed limited-angle CT slice
169
+ """
170
+ # Create reconstruction space
171
+ reco_space = odl.uniform_discr(
172
+ min_pt=[-height/4, -width/4],
173
+ max_pt=[height/4, width/4],
174
+ shape=[height, width],
175
+ dtype='float32'
176
+ )
177
+
178
+ # Define geometry with limited angular range
179
+ angle_fraction = angle_range / 360
180
+ num_angles = int(720 * angle_fraction)
181
+ angle_partition = odl.uniform_partition(0, 2 * np.pi * angle_fraction, num_angles)
182
+ detector_partition = odl.uniform_partition(-360, 360, 1024)
183
+ geometry = odl.tomo.FanBeamGeometry(
184
+ angle_partition, detector_partition,
185
+ src_radius=1270, det_radius=870
186
+ )
187
+
188
+ # Create ray transform and reconstruct
189
+ ray_trafo = odl.tomo.RayTransform(reco_space, geometry)
190
+ projection = ray_trafo(ict_slice.astype('float32'))
191
+ fbp = odl.tomo.fbp_op(ray_trafo)
192
+ reconstruction = fbp(projection)
193
+
194
+ return reconstruction
195
+
196
+
197
+ def create_low_dose_ct(ict_slice, height, width, dose_percentage):
198
+ """
199
+ Generate Low-Dose CT using ASTRA with Poisson noise simulation
200
+
201
+ Args:
202
+ ict_slice: Input ICT slice [-1024, 3072]
203
+ height, width: Image dimensions
204
+ dose_percentage: Dose level as percentage of normal dose
205
+ Returns:
206
+ Reconstructed low-dose CT slice
207
+ """
208
+ dose_fraction = dose_percentage / 100.0
209
+
210
+ # Convert to attenuation coefficients
211
+ u = 0.0192 # Linear attenuation coefficient
212
+ attenuation_map = ict_slice * u / 1000.0 + u
213
+
214
+ # ASTRA geometry setup
215
+ vol_geom = astra.create_vol_geom([height, width])
216
+ angles = np.linspace(np.pi, -np.pi, 720)
217
+ proj_geom = astra.create_proj_geom(
218
+ 'fanflat', 1.685839319229126, 1024, angles,
219
+ 600.4500331878662, 485.1499423980713
220
+ )
221
+
222
+ # Create projector and forward project
223
+ proj_id = astra.create_projector('cuda', proj_geom, vol_geom)
224
+ operator = astra.OpTomo(proj_id)
225
+
226
+ # Forward projection
227
+ sinogram = operator * np.mat(attenuation_map) / 2
228
+
229
+ # Add Poisson noise based on dose level
230
+ noise = np.random.normal(0, 1, 720 * 1024)
231
+ noise_scaling = np.sqrt((1 - dose_fraction) / dose_fraction * (np.exp(sinogram) / 1e6))
232
+ noisy_sinogram = sinogram + noise * noise_scaling
233
+
234
+ # Reconstruct with FBP
235
+ noisy_sinogram_2d = np.reshape(noisy_sinogram, [720, -1])
236
+ reconstruction = operator.reconstruct('FBP_CUDA', noisy_sinogram_2d)
237
+
238
+ # Convert back to HU values
239
+ reconstruction = reconstruction.reshape((height, width))
240
+ return (reconstruction * 2 - u) / u * 1000
241
+
242
+
243
+ def main():
244
+ """Main processing function"""
245
+ print('SimNICT Dataset Generator Started')
246
+ start_time = time.time()
247
+
248
+ # Process each dataset
249
+ for dataset_name in DATASETS:
250
+ print(f"\n{'='*50}")
251
+ print(f"Processing Dataset: {dataset_name}")
252
+ print(f"{'='*50}")
253
+
254
+ try:
255
+ process_dataset(INPUT_PATH, dataset_name)
256
+ duration = time.time() - start_time
257
+ print(f"Completed {dataset_name} in {duration:.1f} seconds")
258
+ except Exception as e:
259
+ print(f"Error processing {dataset_name}: {str(e)}")
260
+ continue
261
+
262
+ total_duration = time.time() - start_time
263
+ print(f"\nSimNICT Generation Complete - Total time: {total_duration/3600:.2f} hours")
264
+
265
+
266
+ if __name__ == "__main__":
267
+ main()
268
+