@article{oai:nifs-repository.repo.nii.ac.jp:00010787, author = {NAKANISHI, Hideya and Ohsuna, Masaki and Kojima, Mamoru and Imazu, Setsuo and NONOMURA, Miki and EMOTO, Masahiko and YOSHIDA, Masanobu and IWATA, Chie and IDA, Katsumi}, issue = {1}, journal = {IEEE Transactions on Nuclear Science}, month = {Feb}, note = {0000-0001-6388-4489, The LHD data acquisition and archiving system, i.e., LABCOM system, has been fully equipped with high-speed real-time acquisition, streaming, and storage capabilities. To deal with more than 100 MB/s continuously generated data at each data acquisition (DAQ) node, DAQ tasks have been implemented as multitasking and multithreaded ones in which the shared memory plays the most important role for inter-process fast and massive data handling. By introducing a 10-second time chunk named “subshot,” endless data streams can be stored into a consecutive series of fixed length data blocks so that they will soon become readable by other processes even while the write process is continuing. Real-time device and environmental monitoring are also implemented in the same way with further sparse resampling. The central data storage has been separated into two layers to be capable of receiving multiple 100 MB/s inflows in parallel. For the frontend layer, high-speed SSD arrays are used as the GlusterFS distributed filesystem which can provide max. 2 GB/s throughput. Those design optimizations would be informative for implementing the next-generation data archiving system in big physics, such as ITER.}, pages = {222--227}, title = {Real-Time Data Streaming and Storing Structure for the LHD’s Fusion Plasma Experiments}, volume = {63}, year = {2016} }