{{ item1.CnName_FinancialProductName }}
\r\n{{ item1.FixedDailyResponseRate }}%
\r\n{{ item1.MinDailyResponseRate }}% - {{ item1.MaxDailyResponseRate }}%
\r\n{{ $t(\"home_0089\") }}
\r\n{{ currencySymbol }}{{ item1.MinSingleSubscriptionAmount | formatCurrency2}} {{ $t(\"home_0090\") }}
\r\n \r\n{{item.GameName}}
\r\n{{item.GameName}}
\r\n{{item.GameName}}
\r\n{{item.GameName}}
\r\nSorry
\r\nRequested content not found.
\r\n\r\n {{ $t('limited_access_0002') }}:\r\n
\r\n{{ $t('limited_access_0003') }}
\r\n{{ $t('limited_access_0004') }}
\r\n\r\n {{domainLimit}}\r\n
\r\n\r\n Dear Valued Customers:\r\n
\r\n\r\n Viewing and using this website at your current location is prohibited due to its regulatory\r\n rules.\r\n
\r\nWe regret any inconvenience.
\r\n\r\n\r\n From\r\n {{domainLimit}}\r\n
\r\n{{ value.name }}
\r\n{{ value.ChannelName.split(\"(\")[0] }}
\r\n({{ value.ChannelName.split(\"(\")[1] }}
\r\n{{ $t(\"deposit_0014\") }}
\r\n\r\n 1. \r\n {{ $t(\"deposit_0015\") }}\r\n
\r\n\r\n 2. \r\n {{ $t(\"deposit_0016\") }}\r\n
\r\n\r\n 3. \r\n {{ $t(\"deposit_0017\") }}\r\n
\r\n\r\n ●\r\n {{ $t(\"deposit_0018\") }}\r\n
\r\n\r\n ●\r\n {{ $t(\"deposit_0019\") }}\r\n
\r\n\r\n ●\r\n {{ $t(\"deposit_0020\") }}\r\n
\r\n1. {{ $t(\"v1_deposit_0002\") }}
\r\n2. {{ $t(\"v1_deposit_0003\") }}
\r\n3. {{ $t(\"v1_deposit_0004\") }}
\r\n{{ selectedPay.account }} ({{ selectedPay.name }})
\r\n{{ $t(\"deposit_0021\") }}
\r\n \r\n1. {{ $t(\"deposit_0028\") }}
\r\n2. {{ $t(\"deposit_0029\") }}
\r\n3. {{ $t(\"deposit_0030\") }}
\r\n{{ selectedPay.account }} ({{ selectedPay.name }})
\r\n{{ $t(\"deposit_0021\") }}
\r\n \r\n1. {{ $t(\"deposit_0028\") }}
\r\n \r\n2. {{ $t(\"deposit_0030\") }}
\r\n{{ $t(\"vip_manager_0006\") }}
\r\n{{ $t(\"vip_manager_0005\") }}
\r\n{{ $t(\"common_0004\") }}:
\r\n{{ $t(\"withdrawal_0016\") }}
\r\n{{ $t(\"withdrawal_0039\") }}
\r\n{{ $t(\"withdrawal_0040\") }}
\r\n{{ $t(\"withdrawal_0017\") }}
\r\n{{ $t(\"withdrawal_0018\") }}
\r\n{{ notWithdrawText }}
\r\n\r\n {{ $t('vip_club_0023', [$f7.params.name]) }}\r\n VIP0、\r\n VIP1、\r\n VIP2、\r\n VIP3、\r\n VIP4、\r\n VIP5、\r\n VIP6、\r\n VIP7、\r\n VIP8、\r\n {{ $t('vip_club_0024') }}\r\n
\r\n{{ $t('vip_club_0010') }}
\r\n{{ $t('vip_club_0011') }} | \r\n{{ $t('vip_club_0012') }} | \r\n{{ $t('vip_club_0011') }} | \r\n{{ $t('vip_club_0012') }} | \r\n
---|---|---|---|
VIP0 | \r\n0 | \r\nVIP1 | \r\n2000 | \r\n
VIP2 | \r\n5000 | \r\nVIP3 | \r\n10000 | \r\n
VIP4 | \r\n20000 | \r\nVIP5 | \r\n50000 | \r\n
VIP6 | \r\n150000 | \r\nVIP7 | \r\n200000 | \r\n
VIP8 | \r\n300000 | \r\n\r\n | \r\n |
{{ $t('vip_club_0014', [$f7.params.name]) }}
\r\n{{ $t('vip_club_0016') }}
\r\n{{ $t('vip_club_0018', [$f7.params.name]) }}
\r\n{{ $t('vip_club_0020', [$f7.params.name]) }}
\r\n{{ $t('vip_club_0022') }}
\r\n{{item.GameName}}
\r\n{{ $t('v1_ar_0009') }}
\r\n{{ $t('v1_ar_0010') }}
\r\n{{ $t(\"common_0004\") }}:
\r\n1. {{ $t(\"v1_bp_0006\") }}
\r\n2. {{ $t(\"v1_bp_0007\") }}
\r\n3. {{ $t(\"v1_bp_0008\", [$f7.params.name]) }}
\r\n{{ $t(\"v1_pc_0006\") }}
\r\n{{ item.BonusPointGoodsName }}
\r\n{{ item.Price }}{{ $t(\"v1_pc_0007\") }}
\r\n{{ $t(\"common_0004\") }}:
\r\n1. {{ $t(\"v1_pc_0009\") }}
\r\n2. {{ $t(\"v1_pc_0010\") }}
\r\n3. {{ $t(\"v1_pc_0011\", [$f7.params.name]) }}
\r\n\r\n {{userLevelName}} {{ $t('v1_sign_in_0004') }} {{dailyAmount?dailyAmount:0}}{{ $t('v1_sign_in_0005') }}\r\n
\r\n{{ $t('v1_sign_in_0016') }} | \r\n{{ $t('v1_sign_in_0017') }} | \r\n \r\n{{ $t('v1_sign_in_0020') }} | \r\n
---|---|---|
{{item.Name}} | \r\n{{item.DailyAmount}} | \r\n \r\n \r\n{{item.ActivateMonthlyDeposit}} | \r\n
{{ birthdayInfo.BonusRemark }}
\r\n{{ $t(\"common_0004\") }}:
\r\n1. {{ $t(\"v1_vbg_0005\") }}
\r\n2. {{ $t(\"v1_vbg_0006\") }}
\r\n3. {{ $t(\"v1_vbg_0007\") }}
\r\n4. {{ $t(\"v1_vbg_0008\", [$f7.params.name]) }}
\r\n{{ festivalInfo.BonusRemark }}
\r\n{{ $t(\"common_0004\") }}:
\r\n1. {{ $t(\"v1_vhg_0006\", [$f7.params.name]) }}
\r\n2. {{ $t(\"v1_vhg_0007\") }}
\r\n3. {{ $t(\"v1_vhg_0008\") }}
\r\n4. {{ $t(\"v1_vhg_0009\", [$f7.params.name]) }}
\r\n{{monthlySalaryInfo.SalaryRemark}}
\r\n{{ $t('common_0004') }}:
\r\n1. {{ $t('v1_vms_0004') }}
\r\n2. {{ $t('v1_vms_0005') }}
\r\n3. {{ $t('v1_vms_0006') }}
\r\n4. {{ $t('v1_vms_0007', [$f7.params.name]) }}
\r\n{{ item.GameName }}
\r\n× 1
\r\n\r\n ({{ $t(\"betting_records_0021\") }})\r\n \r\n ({{ $t(\"betting_records_0003\") }})\r\n ({{ $t(\"betting_records_0020\") }})\r\n ({{ $t(\"betting_records_0008\") }})\r\n ({{ $t(\"betting_records_0004\") }})\r\n \r\n
\r\n\r\n {{ $t(\"winloserp_0015\") }}\r\n {{ currencySymbol }}{{ item.APIWLAmount }}\r\n
\r\n\r\n {{ $t(\"winloserp_0015\") }}\r\n {{ currencySymbol }}0.00\r\n
\r\n{{ $t(\"common_0004\") }}:
\r\n{{ $t(\"bank_info_0015\") }}
\r\n \r\n{{ $t(\"common_0004\") }}:
\r\n{{ $t(\"referrer_share_0004\") }}
\r\n \r\n{{ item.GameName }}
\r\n× 1
\r\n\r\n ({{ $t(\"betting_records_0021\") }})\r\n \r\n ({{ $t(\"betting_records_0003\") }})\r\n ({{ $t(\"betting_records_0020\") }})\r\n ({{ $t(\"betting_records_0008\") }})\r\n ({{ $t(\"betting_records_0004\") }})\r\n \r\n
\r\n\r\n {{ $t(\"winloserp_0015\") }}\r\n {{ currencySymbol }}{{ item.APIWLAmount }}\r\n
\r\n\r\n {{ $t(\"winloserp_0015\") }}\r\n {{ currencySymbol }}0.00\r\n
\r\n{{ orderInfo.GameName }}
\r\n \r\n1. {{ $t(\"invite_friends_0013\") }}
\r\n2. {{ $t(\"invite_friends_0014\") }}
\r\n3. {{ $t(\"invite_friends_0015\") }}
\r\n4. {{ $t(\"invite_friends_0016\") }}
\r\n{{ $t(\"invite_overview_0019\") }}:{{ userInfo.VipLevelName }}
\r\n{{ $t(\"invite_overview_0020\") }}
\r\n \r\n{{ todayProfitAmount | formatCurrency}}
\r\n{{ $t(\"commission_detail_0002\") }}(Rp)
\r\n{{ monthProfitAmount | formatCurrency}}
\r\n{{ $t(\"commission_detail_0015\") }}(Rp)
\r\n{{ totalProfitAmount | formatCurrency}}
\r\n{{ $t(\"commission_detail_0003\") }}(Rp)
\r\n{{ $t(\"vip_detail_0020\") }} | \r\n{{ $t(\"vip_detail_0021\") }} | \r\n{{ $t(\"vip_detail_0022\") }} | \r\n
---|---|---|
{{ $t(\"vip_detail_0023\") }} | \r\n{{ vipInfo.DailyOrderCountLimit }} | \r\n{{ (vipInfo.DailyOrderCountLimit*vipInfo.FixedProfitAmount) | formatCurrency2 }} | \r\n \r\n
{{ $t(\"vip_detail_0024\") }} | \r\n{{ vipInfo.DailyOrderCountLimit*30 }} | \r\n{{ (vipInfo.DailyOrderCountLimit*30*vipInfo.FixedProfitAmount) | formatCurrency2 }} | \r\n
{{ $t(\"vip_detail_0025\") }} | \r\n{{ vipInfo.DailyOrderCountLimit*365 }} | \r\n{{ (vipInfo.DailyOrderCountLimit*365*vipInfo.FixedProfitAmount) | formatCurrency2 }} | \r\n
{{ $t(\"vip_detail_0027\") }} | \r\n{{ $t(\"vip_detail_0028\") }} | \r\n{{ $t(\"vip_detail_0029\") }} | \r\n
---|---|---|
{{ $t(\"vip_detail_0030\") }} | \r\n{{ vipInfo.VIPLevelUpRebatesRateL1 }}% | \r\n{{ (vipInfo.VIPLevelUpRebatesRateL1*vipInfo.DepositTotalAmount/100) | formatCurrency2 }} | \r\n \r\n
{{ $t(\"vip_detail_0031\") }} | \r\n{{ vipInfo.VIPLevelUpRebatesRateL2 }}% | \r\n{{ (vipInfo.VIPLevelUpRebatesRateL2*vipInfo.DepositTotalAmount/100) | formatCurrency2 }} | \r\n
{{ $t(\"vip_detail_0032\") }} | \r\n{{ vipInfo.VIPLevelUpRebatesRateL3 }}% | \r\n{{ (vipInfo.VIPLevelUpRebatesRateL3*vipInfo.DepositTotalAmount/100) | formatCurrency2 }} | \r\n
{{ $t(\"vip_detail_0034\") }} | \r\n{{ $t(\"vip_detail_0035\") }} | \r\n{{ $t(\"vip_detail_0036\") }} | \r\n
---|---|---|
{{ $t(\"vip_detail_0037\") }} | \r\n{{ vipInfo.OrdersRebatesRateL1 }}% | \r\n{{ (vipInfo.OrdersRebatesRateL1*vipInfo.FixedProfitAmount*vipInfo.DailyOrderCountLimit/100) | formatCurrency2 }} | \r\n
{{ $t(\"vip_detail_0038\") }} | \r\n{{ vipInfo.OrdersRebatesRateL2 }}% | \r\n{{ (vipInfo.OrdersRebatesRateL2*vipInfo.FixedProfitAmount*vipInfo.DailyOrderCountLimit/100) | formatCurrency2 }} | \r\n
{{ $t(\"vip_detail_0039\") }} | \r\n{{ vipInfo.OrdersRebatesRateL3 }}% | \r\n{{ (vipInfo.OrdersRebatesRateL3*vipInfo.FixedProfitAmount*vipInfo.DailyOrderCountLimit/100) | formatCurrency2 }} | \r\n
{{ $t(\"award_0004\") }}
\r\n{{ item.SubCurrActiveCount }}/{{ item.SubActiveCount }}
\r\nitem.SubActiveCount\">
{{ $t(\"award_0010\") }}
\r\n{{ item.TeamCurrActiveCount }}/{{ item.TeamActiveCount }}
\r\nitem.TeamActiveCount\">
\n * use timestamp unless it is undefined, NaN or Infinity\n *
\n */\nconst initPTSFn = (timestamp, timeOffset, initPTS) => {\n if (isFiniteNumber(timestamp)) {\n return timestamp * 90;\n }\n const init90kHz = initPTS ? initPTS.baseTime * 90000 / initPTS.timescale : 0;\n return timeOffset * 90000 + init90kHz;\n};\n\n/**\n * ADTS parser helper\n * @link https://wiki.multimedia.cx/index.php?title=ADTS\n */\nfunction getAudioConfig(observer, data, offset, audioCodec) {\n let adtsObjectType;\n let adtsExtensionSamplingIndex;\n let adtsChannelConfig;\n let config;\n const userAgent = navigator.userAgent.toLowerCase();\n const manifestCodec = audioCodec;\n const adtsSamplingRates = [96000, 88200, 64000, 48000, 44100, 32000, 24000, 22050, 16000, 12000, 11025, 8000, 7350];\n // byte 2\n adtsObjectType = ((data[offset + 2] & 0xc0) >>> 6) + 1;\n const adtsSamplingIndex = (data[offset + 2] & 0x3c) >>> 2;\n if (adtsSamplingIndex > adtsSamplingRates.length - 1) {\n const error = new Error(`invalid ADTS sampling index:${adtsSamplingIndex}`);\n observer.emit(Events.ERROR, Events.ERROR, {\n type: ErrorTypes.MEDIA_ERROR,\n details: ErrorDetails.FRAG_PARSING_ERROR,\n fatal: true,\n error,\n reason: error.message\n });\n return;\n }\n adtsChannelConfig = (data[offset + 2] & 0x01) << 2;\n // byte 3\n adtsChannelConfig |= (data[offset + 3] & 0xc0) >>> 6;\n logger.log(`manifest codec:${audioCodec}, ADTS type:${adtsObjectType}, samplingIndex:${adtsSamplingIndex}`);\n // firefox: freq less than 24kHz = AAC SBR (HE-AAC)\n if (/firefox/i.test(userAgent)) {\n if (adtsSamplingIndex >= 6) {\n adtsObjectType = 5;\n config = new Array(4);\n // HE-AAC uses SBR (Spectral Band Replication) , high frequencies are constructed from low frequencies\n // there is a factor 2 between frame sample rate and output sample rate\n // multiply frequency by 2 (see table below, equivalent to substract 3)\n adtsExtensionSamplingIndex = adtsSamplingIndex - 3;\n } else {\n adtsObjectType = 2;\n config = new Array(2);\n adtsExtensionSamplingIndex = adtsSamplingIndex;\n }\n // Android : always use AAC\n } else if (userAgent.indexOf('android') !== -1) {\n adtsObjectType = 2;\n config = new Array(2);\n adtsExtensionSamplingIndex = adtsSamplingIndex;\n } else {\n /* for other browsers (Chrome/Vivaldi/Opera ...)\n always force audio type to be HE-AAC SBR, as some browsers do not support audio codec switch properly (like Chrome ...)\n */\n adtsObjectType = 5;\n config = new Array(4);\n // if (manifest codec is HE-AAC or HE-AACv2) OR (manifest codec not specified AND frequency less than 24kHz)\n if (audioCodec && (audioCodec.indexOf('mp4a.40.29') !== -1 || audioCodec.indexOf('mp4a.40.5') !== -1) || !audioCodec && adtsSamplingIndex >= 6) {\n // HE-AAC uses SBR (Spectral Band Replication) , high frequencies are constructed from low frequencies\n // there is a factor 2 between frame sample rate and output sample rate\n // multiply frequency by 2 (see table below, equivalent to substract 3)\n adtsExtensionSamplingIndex = adtsSamplingIndex - 3;\n } else {\n // if (manifest codec is AAC) AND (frequency less than 24kHz AND nb channel is 1) OR (manifest codec not specified and mono audio)\n // Chrome fails to play back with low frequency AAC LC mono when initialized with HE-AAC. This is not a problem with stereo.\n if (audioCodec && audioCodec.indexOf('mp4a.40.2') !== -1 && (adtsSamplingIndex >= 6 && adtsChannelConfig === 1 || /vivaldi/i.test(userAgent)) || !audioCodec && adtsChannelConfig === 1) {\n adtsObjectType = 2;\n config = new Array(2);\n }\n adtsExtensionSamplingIndex = adtsSamplingIndex;\n }\n }\n /* refer to http://wiki.multimedia.cx/index.php?title=MPEG-4_Audio#Audio_Specific_Config\n ISO 14496-3 (AAC).pdf - Table 1.13 — Syntax of AudioSpecificConfig()\n Audio Profile / Audio Object Type\n 0: Null\n 1: AAC Main\n 2: AAC LC (Low Complexity)\n 3: AAC SSR (Scalable Sample Rate)\n 4: AAC LTP (Long Term Prediction)\n 5: SBR (Spectral Band Replication)\n 6: AAC Scalable\n sampling freq\n 0: 96000 Hz\n 1: 88200 Hz\n 2: 64000 Hz\n 3: 48000 Hz\n 4: 44100 Hz\n 5: 32000 Hz\n 6: 24000 Hz\n 7: 22050 Hz\n 8: 16000 Hz\n 9: 12000 Hz\n 10: 11025 Hz\n 11: 8000 Hz\n 12: 7350 Hz\n 13: Reserved\n 14: Reserved\n 15: frequency is written explictly\n Channel Configurations\n These are the channel configurations:\n 0: Defined in AOT Specifc Config\n 1: 1 channel: front-center\n 2: 2 channels: front-left, front-right\n */\n // audioObjectType = profile => profile, the MPEG-4 Audio Object Type minus 1\n config[0] = adtsObjectType << 3;\n // samplingFrequencyIndex\n config[0] |= (adtsSamplingIndex & 0x0e) >> 1;\n config[1] |= (adtsSamplingIndex & 0x01) << 7;\n // channelConfiguration\n config[1] |= adtsChannelConfig << 3;\n if (adtsObjectType === 5) {\n // adtsExtensionSamplingIndex\n config[1] |= (adtsExtensionSamplingIndex & 0x0e) >> 1;\n config[2] = (adtsExtensionSamplingIndex & 0x01) << 7;\n // adtsObjectType (force to 2, chrome is checking that object type is less than 5 ???\n // https://chromium.googlesource.com/chromium/src.git/+/master/media/formats/mp4/aac.cc\n config[2] |= 2 << 2;\n config[3] = 0;\n }\n return {\n config,\n samplerate: adtsSamplingRates[adtsSamplingIndex],\n channelCount: adtsChannelConfig,\n codec: 'mp4a.40.' + adtsObjectType,\n manifestCodec\n };\n}\nfunction isHeaderPattern$1(data, offset) {\n return data[offset] === 0xff && (data[offset + 1] & 0xf6) === 0xf0;\n}\nfunction getHeaderLength(data, offset) {\n return data[offset + 1] & 0x01 ? 7 : 9;\n}\nfunction getFullFrameLength(data, offset) {\n return (data[offset + 3] & 0x03) << 11 | data[offset + 4] << 3 | (data[offset + 5] & 0xe0) >>> 5;\n}\nfunction canGetFrameLength(data, offset) {\n return offset + 5 < data.length;\n}\nfunction isHeader$1(data, offset) {\n // Look for ADTS header | 1111 1111 | 1111 X00X | where X can be either 0 or 1\n // Layer bits (position 14 and 15) in header should be always 0 for ADTS\n // More info https://wiki.multimedia.cx/index.php?title=ADTS\n return offset + 1 < data.length && isHeaderPattern$1(data, offset);\n}\nfunction canParse$1(data, offset) {\n return canGetFrameLength(data, offset) && isHeaderPattern$1(data, offset) && getFullFrameLength(data, offset) <= data.length - offset;\n}\nfunction probe$1(data, offset) {\n // same as isHeader but we also check that ADTS frame follows last ADTS frame\n // or end of data is reached\n if (isHeader$1(data, offset)) {\n // ADTS header Length\n const headerLength = getHeaderLength(data, offset);\n if (offset + headerLength >= data.length) {\n return false;\n }\n // ADTS frame Length\n const frameLength = getFullFrameLength(data, offset);\n if (frameLength <= headerLength) {\n return false;\n }\n const newOffset = offset + frameLength;\n return newOffset === data.length || isHeader$1(data, newOffset);\n }\n return false;\n}\nfunction initTrackConfig(track, observer, data, offset, audioCodec) {\n if (!track.samplerate) {\n const config = getAudioConfig(observer, data, offset, audioCodec);\n if (!config) {\n return;\n }\n track.config = config.config;\n track.samplerate = config.samplerate;\n track.channelCount = config.channelCount;\n track.codec = config.codec;\n track.manifestCodec = config.manifestCodec;\n logger.log(`parsed codec:${track.codec}, rate:${config.samplerate}, channels:${config.channelCount}`);\n }\n}\nfunction getFrameDuration(samplerate) {\n return 1024 * 90000 / samplerate;\n}\nfunction parseFrameHeader(data, offset) {\n // The protection skip bit tells us if we have 2 bytes of CRC data at the end of the ADTS header\n const headerLength = getHeaderLength(data, offset);\n if (offset + headerLength <= data.length) {\n // retrieve frame size\n const frameLength = getFullFrameLength(data, offset) - headerLength;\n if (frameLength > 0) {\n // logger.log(`AAC frame, offset/length/total/pts:${offset+headerLength}/${frameLength}/${data.byteLength}`);\n return {\n headerLength,\n frameLength\n };\n }\n }\n}\nfunction appendFrame$2(track, data, offset, pts, frameIndex) {\n const frameDuration = getFrameDuration(track.samplerate);\n const stamp = pts + frameIndex * frameDuration;\n const header = parseFrameHeader(data, offset);\n let unit;\n if (header) {\n const {\n frameLength,\n headerLength\n } = header;\n const _length = headerLength + frameLength;\n const missing = Math.max(0, offset + _length - data.length);\n // logger.log(`AAC frame ${frameIndex}, pts:${stamp} length@offset/total: ${frameLength}@${offset+headerLength}/${data.byteLength} missing: ${missing}`);\n if (missing) {\n unit = new Uint8Array(_length - headerLength);\n unit.set(data.subarray(offset + headerLength, data.length), 0);\n } else {\n unit = data.subarray(offset + headerLength, offset + _length);\n }\n const _sample = {\n unit,\n pts: stamp\n };\n if (!missing) {\n track.samples.push(_sample);\n }\n return {\n sample: _sample,\n length: _length,\n missing\n };\n }\n // overflow incomplete header\n const length = data.length - offset;\n unit = new Uint8Array(length);\n unit.set(data.subarray(offset, data.length), 0);\n const sample = {\n unit,\n pts: stamp\n };\n return {\n sample,\n length,\n missing: -1\n };\n}\n\n/**\n * MPEG parser helper\n */\n\nlet chromeVersion$1 = null;\nconst BitratesMap = [32, 64, 96, 128, 160, 192, 224, 256, 288, 320, 352, 384, 416, 448, 32, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 32, 48, 56, 64, 80, 96, 112, 128, 144, 160, 176, 192, 224, 256, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160];\nconst SamplingRateMap = [44100, 48000, 32000, 22050, 24000, 16000, 11025, 12000, 8000];\nconst SamplesCoefficients = [\n// MPEG 2.5\n[0,\n// Reserved\n72,\n// Layer3\n144,\n// Layer2\n12 // Layer1\n],\n// Reserved\n[0,\n// Reserved\n0,\n// Layer3\n0,\n// Layer2\n0 // Layer1\n],\n// MPEG 2\n[0,\n// Reserved\n72,\n// Layer3\n144,\n// Layer2\n12 // Layer1\n],\n// MPEG 1\n[0,\n// Reserved\n144,\n// Layer3\n144,\n// Layer2\n12 // Layer1\n]];\nconst BytesInSlot = [0,\n// Reserved\n1,\n// Layer3\n1,\n// Layer2\n4 // Layer1\n];\nfunction appendFrame$1(track, data, offset, pts, frameIndex) {\n // Using http://www.datavoyage.com/mpgscript/mpeghdr.htm as a reference\n if (offset + 24 > data.length) {\n return;\n }\n const header = parseHeader(data, offset);\n if (header && offset + header.frameLength <= data.length) {\n const frameDuration = header.samplesPerFrame * 90000 / header.sampleRate;\n const stamp = pts + frameIndex * frameDuration;\n const sample = {\n unit: data.subarray(offset, offset + header.frameLength),\n pts: stamp,\n dts: stamp\n };\n track.config = [];\n track.channelCount = header.channelCount;\n track.samplerate = header.sampleRate;\n track.samples.push(sample);\n return {\n sample,\n length: header.frameLength,\n missing: 0\n };\n }\n}\nfunction parseHeader(data, offset) {\n const mpegVersion = data[offset + 1] >> 3 & 3;\n const mpegLayer = data[offset + 1] >> 1 & 3;\n const bitRateIndex = data[offset + 2] >> 4 & 15;\n const sampleRateIndex = data[offset + 2] >> 2 & 3;\n if (mpegVersion !== 1 && bitRateIndex !== 0 && bitRateIndex !== 15 && sampleRateIndex !== 3) {\n const paddingBit = data[offset + 2] >> 1 & 1;\n const channelMode = data[offset + 3] >> 6;\n const columnInBitrates = mpegVersion === 3 ? 3 - mpegLayer : mpegLayer === 3 ? 3 : 4;\n const bitRate = BitratesMap[columnInBitrates * 14 + bitRateIndex - 1] * 1000;\n const columnInSampleRates = mpegVersion === 3 ? 0 : mpegVersion === 2 ? 1 : 2;\n const sampleRate = SamplingRateMap[columnInSampleRates * 3 + sampleRateIndex];\n const channelCount = channelMode === 3 ? 1 : 2; // If bits of channel mode are `11` then it is a single channel (Mono)\n const sampleCoefficient = SamplesCoefficients[mpegVersion][mpegLayer];\n const bytesInSlot = BytesInSlot[mpegLayer];\n const samplesPerFrame = sampleCoefficient * 8 * bytesInSlot;\n const frameLength = Math.floor(sampleCoefficient * bitRate / sampleRate + paddingBit) * bytesInSlot;\n if (chromeVersion$1 === null) {\n const userAgent = navigator.userAgent || '';\n const result = userAgent.match(/Chrome\\/(\\d+)/i);\n chromeVersion$1 = result ? parseInt(result[1]) : 0;\n }\n const needChromeFix = !!chromeVersion$1 && chromeVersion$1 <= 87;\n if (needChromeFix && mpegLayer === 2 && bitRate >= 224000 && channelMode === 0) {\n // Work around bug in Chromium by setting channelMode to dual-channel (01) instead of stereo (00)\n data[offset + 3] = data[offset + 3] | 0x80;\n }\n return {\n sampleRate,\n channelCount,\n frameLength,\n samplesPerFrame\n };\n }\n}\nfunction isHeaderPattern(data, offset) {\n return data[offset] === 0xff && (data[offset + 1] & 0xe0) === 0xe0 && (data[offset + 1] & 0x06) !== 0x00;\n}\nfunction isHeader(data, offset) {\n // Look for MPEG header | 1111 1111 | 111X XYZX | where X can be either 0 or 1 and Y or Z should be 1\n // Layer bits (position 14 and 15) in header should be always different from 0 (Layer I or Layer II or Layer III)\n // More info http://www.mp3-tech.org/programmer/frame_header.html\n return offset + 1 < data.length && isHeaderPattern(data, offset);\n}\nfunction canParse(data, offset) {\n const headerSize = 4;\n return isHeaderPattern(data, offset) && headerSize <= data.length - offset;\n}\nfunction probe(data, offset) {\n // same as isHeader but we also check that MPEG frame follows last MPEG frame\n // or end of data is reached\n if (offset + 1 < data.length && isHeaderPattern(data, offset)) {\n // MPEG header Length\n const headerLength = 4;\n // MPEG frame Length\n const header = parseHeader(data, offset);\n let frameLength = headerLength;\n if (header != null && header.frameLength) {\n frameLength = header.frameLength;\n }\n const newOffset = offset + frameLength;\n return newOffset === data.length || isHeader(data, newOffset);\n }\n return false;\n}\n\n/**\n * AAC demuxer\n */\nclass AACDemuxer extends BaseAudioDemuxer {\n constructor(observer, config) {\n super();\n this.observer = void 0;\n this.config = void 0;\n this.observer = observer;\n this.config = config;\n }\n resetInitSegment(initSegment, audioCodec, videoCodec, trackDuration) {\n super.resetInitSegment(initSegment, audioCodec, videoCodec, trackDuration);\n this._audioTrack = {\n container: 'audio/adts',\n type: 'audio',\n id: 2,\n pid: -1,\n sequenceNumber: 0,\n segmentCodec: 'aac',\n samples: [],\n manifestCodec: audioCodec,\n duration: trackDuration,\n inputTimeScale: 90000,\n dropped: 0\n };\n }\n\n // Source for probe info - https://wiki.multimedia.cx/index.php?title=ADTS\n static probe(data) {\n if (!data) {\n return false;\n }\n\n // Check for the ADTS sync word\n // Look for ADTS header | 1111 1111 | 1111 X00X | where X can be either 0 or 1\n // Layer bits (position 14 and 15) in header should be always 0 for ADTS\n // More info https://wiki.multimedia.cx/index.php?title=ADTS\n const id3Data = getID3Data(data, 0);\n let offset = (id3Data == null ? void 0 : id3Data.length) || 0;\n if (probe(data, offset)) {\n return false;\n }\n for (let length = data.length; offset < length; offset++) {\n if (probe$1(data, offset)) {\n logger.log('ADTS sync word found !');\n return true;\n }\n }\n return false;\n }\n canParse(data, offset) {\n return canParse$1(data, offset);\n }\n appendFrame(track, data, offset) {\n initTrackConfig(track, this.observer, data, offset, track.manifestCodec);\n const frame = appendFrame$2(track, data, offset, this.basePTS, this.frameIndex);\n if (frame && frame.missing === 0) {\n return frame;\n }\n }\n}\n\nconst emsgSchemePattern = /\\/emsg[-/]ID3/i;\nclass MP4Demuxer {\n constructor(observer, config) {\n this.remainderData = null;\n this.timeOffset = 0;\n this.config = void 0;\n this.videoTrack = void 0;\n this.audioTrack = void 0;\n this.id3Track = void 0;\n this.txtTrack = void 0;\n this.config = config;\n }\n resetTimeStamp() {}\n resetInitSegment(initSegment, audioCodec, videoCodec, trackDuration) {\n const videoTrack = this.videoTrack = dummyTrack('video', 1);\n const audioTrack = this.audioTrack = dummyTrack('audio', 1);\n const captionTrack = this.txtTrack = dummyTrack('text', 1);\n this.id3Track = dummyTrack('id3', 1);\n this.timeOffset = 0;\n if (!(initSegment != null && initSegment.byteLength)) {\n return;\n }\n const initData = parseInitSegment(initSegment);\n if (initData.video) {\n const {\n id,\n timescale,\n codec\n } = initData.video;\n videoTrack.id = id;\n videoTrack.timescale = captionTrack.timescale = timescale;\n videoTrack.codec = codec;\n }\n if (initData.audio) {\n const {\n id,\n timescale,\n codec\n } = initData.audio;\n audioTrack.id = id;\n audioTrack.timescale = timescale;\n audioTrack.codec = codec;\n }\n captionTrack.id = RemuxerTrackIdConfig.text;\n videoTrack.sampleDuration = 0;\n videoTrack.duration = audioTrack.duration = trackDuration;\n }\n resetContiguity() {\n this.remainderData = null;\n }\n static probe(data) {\n return hasMoofData(data);\n }\n demux(data, timeOffset) {\n this.timeOffset = timeOffset;\n // Load all data into the avc track. The CMAF remuxer will look for the data in the samples object; the rest of the fields do not matter\n let videoSamples = data;\n const videoTrack = this.videoTrack;\n const textTrack = this.txtTrack;\n if (this.config.progressive) {\n // Split the bytestream into two ranges: one encompassing all data up until the start of the last moof, and everything else.\n // This is done to guarantee that we're sending valid data to MSE - when demuxing progressively, we have no guarantee\n // that the fetch loader gives us flush moof+mdat pairs. If we push jagged data to MSE, it will throw an exception.\n if (this.remainderData) {\n videoSamples = appendUint8Array(this.remainderData, data);\n }\n const segmentedData = segmentValidRange(videoSamples);\n this.remainderData = segmentedData.remainder;\n videoTrack.samples = segmentedData.valid || new Uint8Array();\n } else {\n videoTrack.samples = videoSamples;\n }\n const id3Track = this.extractID3Track(videoTrack, timeOffset);\n textTrack.samples = parseSamples(timeOffset, videoTrack);\n return {\n videoTrack,\n audioTrack: this.audioTrack,\n id3Track,\n textTrack: this.txtTrack\n };\n }\n flush() {\n const timeOffset = this.timeOffset;\n const videoTrack = this.videoTrack;\n const textTrack = this.txtTrack;\n videoTrack.samples = this.remainderData || new Uint8Array();\n this.remainderData = null;\n const id3Track = this.extractID3Track(videoTrack, this.timeOffset);\n textTrack.samples = parseSamples(timeOffset, videoTrack);\n return {\n videoTrack,\n audioTrack: dummyTrack(),\n id3Track,\n textTrack: dummyTrack()\n };\n }\n extractID3Track(videoTrack, timeOffset) {\n const id3Track = this.id3Track;\n if (videoTrack.samples.length) {\n const emsgs = findBox(videoTrack.samples, ['emsg']);\n if (emsgs) {\n emsgs.forEach(data => {\n const emsgInfo = parseEmsg(data);\n if (emsgSchemePattern.test(emsgInfo.schemeIdUri)) {\n const pts = isFiniteNumber(emsgInfo.presentationTime) ? emsgInfo.presentationTime / emsgInfo.timeScale : timeOffset + emsgInfo.presentationTimeDelta / emsgInfo.timeScale;\n let duration = emsgInfo.eventDuration === 0xffffffff ? Number.POSITIVE_INFINITY : emsgInfo.eventDuration / emsgInfo.timeScale;\n // Safari takes anything <= 0.001 seconds and maps it to Infinity\n if (duration <= 0.001) {\n duration = Number.POSITIVE_INFINITY;\n }\n const payload = emsgInfo.payload;\n id3Track.samples.push({\n data: payload,\n len: payload.byteLength,\n dts: pts,\n pts: pts,\n type: MetadataSchema.emsg,\n duration: duration\n });\n }\n });\n }\n }\n return id3Track;\n }\n demuxSampleAes(data, keyData, timeOffset) {\n return Promise.reject(new Error('The MP4 demuxer does not support SAMPLE-AES decryption'));\n }\n destroy() {}\n}\n\nconst getAudioBSID = (data, offset) => {\n // check the bsid to confirm ac-3 | ec-3\n let bsid = 0;\n let numBits = 5;\n offset += numBits;\n const temp = new Uint32Array(1); // unsigned 32 bit for temporary storage\n const mask = new Uint32Array(1); // unsigned 32 bit mask value\n const byte = new Uint8Array(1); // unsigned 8 bit for temporary storage\n while (numBits > 0) {\n byte[0] = data[offset];\n // read remaining bits, upto 8 bits at a time\n const bits = Math.min(numBits, 8);\n const shift = 8 - bits;\n mask[0] = 0xff000000 >>> 24 + shift << shift;\n temp[0] = (byte[0] & mask[0]) >> shift;\n bsid = !bsid ? temp[0] : bsid << bits | temp[0];\n offset += 1;\n numBits -= bits;\n }\n return bsid;\n};\n\nclass AC3Demuxer extends BaseAudioDemuxer {\n constructor(observer) {\n super();\n this.observer = void 0;\n this.observer = observer;\n }\n resetInitSegment(initSegment, audioCodec, videoCodec, trackDuration) {\n super.resetInitSegment(initSegment, audioCodec, videoCodec, trackDuration);\n this._audioTrack = {\n container: 'audio/ac-3',\n type: 'audio',\n id: 2,\n pid: -1,\n sequenceNumber: 0,\n segmentCodec: 'ac3',\n samples: [],\n manifestCodec: audioCodec,\n duration: trackDuration,\n inputTimeScale: 90000,\n dropped: 0\n };\n }\n canParse(data, offset) {\n return offset + 64 < data.length;\n }\n appendFrame(track, data, offset) {\n const frameLength = appendFrame(track, data, offset, this.basePTS, this.frameIndex);\n if (frameLength !== -1) {\n const sample = track.samples[track.samples.length - 1];\n return {\n sample,\n length: frameLength,\n missing: 0\n };\n }\n }\n static probe(data) {\n if (!data) {\n return false;\n }\n const id3Data = getID3Data(data, 0);\n if (!id3Data) {\n return false;\n }\n\n // look for the ac-3 sync bytes\n const offset = id3Data.length;\n if (data[offset] === 0x0b && data[offset + 1] === 0x77 && getTimeStamp(id3Data) !== undefined &&\n // check the bsid to confirm ac-3\n getAudioBSID(data, offset) < 16) {\n return true;\n }\n return false;\n }\n}\nfunction appendFrame(track, data, start, pts, frameIndex) {\n if (start + 8 > data.length) {\n return -1; // not enough bytes left\n }\n if (data[start] !== 0x0b || data[start + 1] !== 0x77) {\n return -1; // invalid magic\n }\n\n // get sample rate\n const samplingRateCode = data[start + 4] >> 6;\n if (samplingRateCode >= 3) {\n return -1; // invalid sampling rate\n }\n const samplingRateMap = [48000, 44100, 32000];\n const sampleRate = samplingRateMap[samplingRateCode];\n\n // get frame size\n const frameSizeCode = data[start + 4] & 0x3f;\n const frameSizeMap = [64, 69, 96, 64, 70, 96, 80, 87, 120, 80, 88, 120, 96, 104, 144, 96, 105, 144, 112, 121, 168, 112, 122, 168, 128, 139, 192, 128, 140, 192, 160, 174, 240, 160, 175, 240, 192, 208, 288, 192, 209, 288, 224, 243, 336, 224, 244, 336, 256, 278, 384, 256, 279, 384, 320, 348, 480, 320, 349, 480, 384, 417, 576, 384, 418, 576, 448, 487, 672, 448, 488, 672, 512, 557, 768, 512, 558, 768, 640, 696, 960, 640, 697, 960, 768, 835, 1152, 768, 836, 1152, 896, 975, 1344, 896, 976, 1344, 1024, 1114, 1536, 1024, 1115, 1536, 1152, 1253, 1728, 1152, 1254, 1728, 1280, 1393, 1920, 1280, 1394, 1920];\n const frameLength = frameSizeMap[frameSizeCode * 3 + samplingRateCode] * 2;\n if (start + frameLength > data.length) {\n return -1;\n }\n\n // get channel count\n const channelMode = data[start + 6] >> 5;\n let skipCount = 0;\n if (channelMode === 2) {\n skipCount += 2;\n } else {\n if (channelMode & 1 && channelMode !== 1) {\n skipCount += 2;\n }\n if (channelMode & 4) {\n skipCount += 2;\n }\n }\n const lfeon = (data[start + 6] << 8 | data[start + 7]) >> 12 - skipCount & 1;\n const channelsMap = [2, 1, 2, 3, 3, 4, 4, 5];\n const channelCount = channelsMap[channelMode] + lfeon;\n\n // build dac3 box\n const bsid = data[start + 5] >> 3;\n const bsmod = data[start + 5] & 7;\n const config = new Uint8Array([samplingRateCode << 6 | bsid << 1 | bsmod >> 2, (bsmod & 3) << 6 | channelMode << 3 | lfeon << 2 | frameSizeCode >> 4, frameSizeCode << 4 & 0xe0]);\n const frameDuration = 1536 / sampleRate * 90000;\n const stamp = pts + frameIndex * frameDuration;\n const unit = data.subarray(start, start + frameLength);\n track.config = config;\n track.channelCount = channelCount;\n track.samplerate = sampleRate;\n track.samples.push({\n unit,\n pts: stamp\n });\n return frameLength;\n}\n\nclass BaseVideoParser {\n constructor() {\n this.VideoSample = null;\n }\n createVideoSample(key, pts, dts, debug) {\n return {\n key,\n frame: false,\n pts,\n dts,\n units: [],\n debug,\n length: 0\n };\n }\n getLastNalUnit(samples) {\n var _VideoSample;\n let VideoSample = this.VideoSample;\n let lastUnit;\n // try to fallback to previous sample if current one is empty\n if (!VideoSample || VideoSample.units.length === 0) {\n VideoSample = samples[samples.length - 1];\n }\n if ((_VideoSample = VideoSample) != null && _VideoSample.units) {\n const units = VideoSample.units;\n lastUnit = units[units.length - 1];\n }\n return lastUnit;\n }\n pushAccessUnit(VideoSample, videoTrack) {\n if (VideoSample.units.length && VideoSample.frame) {\n // if sample does not have PTS/DTS, patch with last sample PTS/DTS\n if (VideoSample.pts === undefined) {\n const samples = videoTrack.samples;\n const nbSamples = samples.length;\n if (nbSamples) {\n const lastSample = samples[nbSamples - 1];\n VideoSample.pts = lastSample.pts;\n VideoSample.dts = lastSample.dts;\n } else {\n // dropping samples, no timestamp found\n videoTrack.dropped++;\n return;\n }\n }\n videoTrack.samples.push(VideoSample);\n }\n if (VideoSample.debug.length) {\n logger.log(VideoSample.pts + '/' + VideoSample.dts + ':' + VideoSample.debug);\n }\n }\n}\n\n/**\n * Parser for exponential Golomb codes, a variable-bitwidth number encoding scheme used by h264.\n */\n\nclass ExpGolomb {\n constructor(data) {\n this.data = void 0;\n this.bytesAvailable = void 0;\n this.word = void 0;\n this.bitsAvailable = void 0;\n this.data = data;\n // the number of bytes left to examine in this.data\n this.bytesAvailable = data.byteLength;\n // the current word being examined\n this.word = 0; // :uint\n // the number of bits left to examine in the current word\n this.bitsAvailable = 0; // :uint\n }\n\n // ():void\n loadWord() {\n const data = this.data;\n const bytesAvailable = this.bytesAvailable;\n const position = data.byteLength - bytesAvailable;\n const workingBytes = new Uint8Array(4);\n const availableBytes = Math.min(4, bytesAvailable);\n if (availableBytes === 0) {\n throw new Error('no bytes available');\n }\n workingBytes.set(data.subarray(position, position + availableBytes));\n this.word = new DataView(workingBytes.buffer).getUint32(0);\n // track the amount of this.data that has been processed\n this.bitsAvailable = availableBytes * 8;\n this.bytesAvailable -= availableBytes;\n }\n\n // (count:int):void\n skipBits(count) {\n let skipBytes; // :int\n count = Math.min(count, this.bytesAvailable * 8 + this.bitsAvailable);\n if (this.bitsAvailable > count) {\n this.word <<= count;\n this.bitsAvailable -= count;\n } else {\n count -= this.bitsAvailable;\n skipBytes = count >> 3;\n count -= skipBytes << 3;\n this.bytesAvailable -= skipBytes;\n this.loadWord();\n this.word <<= count;\n this.bitsAvailable -= count;\n }\n }\n\n // (size:int):uint\n readBits(size) {\n let bits = Math.min(this.bitsAvailable, size); // :uint\n const valu = this.word >>> 32 - bits; // :uint\n if (size > 32) {\n logger.error('Cannot read more than 32 bits at a time');\n }\n this.bitsAvailable -= bits;\n if (this.bitsAvailable > 0) {\n this.word <<= bits;\n } else if (this.bytesAvailable > 0) {\n this.loadWord();\n } else {\n throw new Error('no bits available');\n }\n bits = size - bits;\n if (bits > 0 && this.bitsAvailable) {\n return valu << bits | this.readBits(bits);\n } else {\n return valu;\n }\n }\n\n // ():uint\n skipLZ() {\n let leadingZeroCount; // :uint\n for (leadingZeroCount = 0; leadingZeroCount < this.bitsAvailable; ++leadingZeroCount) {\n if ((this.word & 0x80000000 >>> leadingZeroCount) !== 0) {\n // the first bit of working word is 1\n this.word <<= leadingZeroCount;\n this.bitsAvailable -= leadingZeroCount;\n return leadingZeroCount;\n }\n }\n // we exhausted word and still have not found a 1\n this.loadWord();\n return leadingZeroCount + this.skipLZ();\n }\n\n // ():void\n skipUEG() {\n this.skipBits(1 + this.skipLZ());\n }\n\n // ():void\n skipEG() {\n this.skipBits(1 + this.skipLZ());\n }\n\n // ():uint\n readUEG() {\n const clz = this.skipLZ(); // :uint\n return this.readBits(clz + 1) - 1;\n }\n\n // ():int\n readEG() {\n const valu = this.readUEG(); // :int\n if (0x01 & valu) {\n // the number is odd if the low order bit is set\n return 1 + valu >>> 1; // add 1 to make it even, and divide by 2\n } else {\n return -1 * (valu >>> 1); // divide by two then make it negative\n }\n }\n\n // Some convenience functions\n // :Boolean\n readBoolean() {\n return this.readBits(1) === 1;\n }\n\n // ():int\n readUByte() {\n return this.readBits(8);\n }\n\n // ():int\n readUShort() {\n return this.readBits(16);\n }\n\n // ():int\n readUInt() {\n return this.readBits(32);\n }\n\n /**\n * Advance the ExpGolomb decoder past a scaling list. The scaling\n * list is optionally transmitted as part of a sequence parameter\n * set and is not relevant to transmuxing.\n * @param count the number of entries in this scaling list\n * @see Recommendation ITU-T H.264, Section 7.3.2.1.1.1\n */\n skipScalingList(count) {\n let lastScale = 8;\n let nextScale = 8;\n let deltaScale;\n for (let j = 0; j < count; j++) {\n if (nextScale !== 0) {\n deltaScale = this.readEG();\n nextScale = (lastScale + deltaScale + 256) % 256;\n }\n lastScale = nextScale === 0 ? lastScale : nextScale;\n }\n }\n\n /**\n * Read a sequence parameter set and return some interesting video\n * properties. A sequence parameter set is the H264 metadata that\n * describes the properties of upcoming video frames.\n * @returns an object with configuration parsed from the\n * sequence parameter set, including the dimensions of the\n * associated video frames.\n */\n readSPS() {\n let frameCropLeftOffset = 0;\n let frameCropRightOffset = 0;\n let frameCropTopOffset = 0;\n let frameCropBottomOffset = 0;\n let numRefFramesInPicOrderCntCycle;\n let scalingListCount;\n let i;\n const readUByte = this.readUByte.bind(this);\n const readBits = this.readBits.bind(this);\n const readUEG = this.readUEG.bind(this);\n const readBoolean = this.readBoolean.bind(this);\n const skipBits = this.skipBits.bind(this);\n const skipEG = this.skipEG.bind(this);\n const skipUEG = this.skipUEG.bind(this);\n const skipScalingList = this.skipScalingList.bind(this);\n readUByte();\n const profileIdc = readUByte(); // profile_idc\n readBits(5); // profileCompat constraint_set[0-4]_flag, u(5)\n skipBits(3); // reserved_zero_3bits u(3),\n readUByte(); // level_idc u(8)\n skipUEG(); // seq_parameter_set_id\n // some profiles have more optional data we don't need\n if (profileIdc === 100 || profileIdc === 110 || profileIdc === 122 || profileIdc === 244 || profileIdc === 44 || profileIdc === 83 || profileIdc === 86 || profileIdc === 118 || profileIdc === 128) {\n const chromaFormatIdc = readUEG();\n if (chromaFormatIdc === 3) {\n skipBits(1);\n } // separate_colour_plane_flag\n\n skipUEG(); // bit_depth_luma_minus8\n skipUEG(); // bit_depth_chroma_minus8\n skipBits(1); // qpprime_y_zero_transform_bypass_flag\n if (readBoolean()) {\n // seq_scaling_matrix_present_flag\n scalingListCount = chromaFormatIdc !== 3 ? 8 : 12;\n for (i = 0; i < scalingListCount; i++) {\n if (readBoolean()) {\n // seq_scaling_list_present_flag[ i ]\n if (i < 6) {\n skipScalingList(16);\n } else {\n skipScalingList(64);\n }\n }\n }\n }\n }\n skipUEG(); // log2_max_frame_num_minus4\n const picOrderCntType = readUEG();\n if (picOrderCntType === 0) {\n readUEG(); // log2_max_pic_order_cnt_lsb_minus4\n } else if (picOrderCntType === 1) {\n skipBits(1); // delta_pic_order_always_zero_flag\n skipEG(); // offset_for_non_ref_pic\n skipEG(); // offset_for_top_to_bottom_field\n numRefFramesInPicOrderCntCycle = readUEG();\n for (i = 0; i < numRefFramesInPicOrderCntCycle; i++) {\n skipEG();\n } // offset_for_ref_frame[ i ]\n }\n skipUEG(); // max_num_ref_frames\n skipBits(1); // gaps_in_frame_num_value_allowed_flag\n const picWidthInMbsMinus1 = readUEG();\n const picHeightInMapUnitsMinus1 = readUEG();\n const frameMbsOnlyFlag = readBits(1);\n if (frameMbsOnlyFlag === 0) {\n skipBits(1);\n } // mb_adaptive_frame_field_flag\n\n skipBits(1); // direct_8x8_inference_flag\n if (readBoolean()) {\n // frame_cropping_flag\n frameCropLeftOffset = readUEG();\n frameCropRightOffset = readUEG();\n frameCropTopOffset = readUEG();\n frameCropBottomOffset = readUEG();\n }\n let pixelRatio = [1, 1];\n if (readBoolean()) {\n // vui_parameters_present_flag\n if (readBoolean()) {\n // aspect_ratio_info_present_flag\n const aspectRatioIdc = readUByte();\n switch (aspectRatioIdc) {\n case 1:\n pixelRatio = [1, 1];\n break;\n case 2:\n pixelRatio = [12, 11];\n break;\n case 3:\n pixelRatio = [10, 11];\n break;\n case 4:\n pixelRatio = [16, 11];\n break;\n case 5:\n pixelRatio = [40, 33];\n break;\n case 6:\n pixelRatio = [24, 11];\n break;\n case 7:\n pixelRatio = [20, 11];\n break;\n case 8:\n pixelRatio = [32, 11];\n break;\n case 9:\n pixelRatio = [80, 33];\n break;\n case 10:\n pixelRatio = [18, 11];\n break;\n case 11:\n pixelRatio = [15, 11];\n break;\n case 12:\n pixelRatio = [64, 33];\n break;\n case 13:\n pixelRatio = [160, 99];\n break;\n case 14:\n pixelRatio = [4, 3];\n break;\n case 15:\n pixelRatio = [3, 2];\n break;\n case 16:\n pixelRatio = [2, 1];\n break;\n case 255:\n {\n pixelRatio = [readUByte() << 8 | readUByte(), readUByte() << 8 | readUByte()];\n break;\n }\n }\n }\n }\n return {\n width: Math.ceil((picWidthInMbsMinus1 + 1) * 16 - frameCropLeftOffset * 2 - frameCropRightOffset * 2),\n height: (2 - frameMbsOnlyFlag) * (picHeightInMapUnitsMinus1 + 1) * 16 - (frameMbsOnlyFlag ? 2 : 4) * (frameCropTopOffset + frameCropBottomOffset),\n pixelRatio: pixelRatio\n };\n }\n readSliceType() {\n // skip NALu type\n this.readUByte();\n // discard first_mb_in_slice\n this.readUEG();\n // return slice_type\n return this.readUEG();\n }\n}\n\nclass AvcVideoParser extends BaseVideoParser {\n parseAVCPES(track, textTrack, pes, last, duration) {\n const units = this.parseAVCNALu(track, pes.data);\n let VideoSample = this.VideoSample;\n let push;\n let spsfound = false;\n // free pes.data to save up some memory\n pes.data = null;\n\n // if new NAL units found and last sample still there, let's push ...\n // this helps parsing streams with missing AUD (only do this if AUD never found)\n if (VideoSample && units.length && !track.audFound) {\n this.pushAccessUnit(VideoSample, track);\n VideoSample = this.VideoSample = this.createVideoSample(false, pes.pts, pes.dts, '');\n }\n units.forEach(unit => {\n var _VideoSample2;\n switch (unit.type) {\n // NDR\n case 1:\n {\n let iskey = false;\n push = true;\n const data = unit.data;\n // only check slice type to detect KF in case SPS found in same packet (any keyframe is preceded by SPS ...)\n if (spsfound && data.length > 4) {\n // retrieve slice type by parsing beginning of NAL unit (follow H264 spec, slice_header definition) to detect keyframe embedded in NDR\n const sliceType = new ExpGolomb(data).readSliceType();\n // 2 : I slice, 4 : SI slice, 7 : I slice, 9: SI slice\n // SI slice : A slice that is coded using intra prediction only and using quantisation of the prediction samples.\n // An SI slice can be coded such that its decoded samples can be constructed identically to an SP slice.\n // I slice: A slice that is not an SI slice that is decoded using intra prediction only.\n // if (sliceType === 2 || sliceType === 7) {\n if (sliceType === 2 || sliceType === 4 || sliceType === 7 || sliceType === 9) {\n iskey = true;\n }\n }\n if (iskey) {\n var _VideoSample;\n // if we have non-keyframe data already, that cannot belong to the same frame as a keyframe, so force a push\n if ((_VideoSample = VideoSample) != null && _VideoSample.frame && !VideoSample.key) {\n this.pushAccessUnit(VideoSample, track);\n VideoSample = this.VideoSample = null;\n }\n }\n if (!VideoSample) {\n VideoSample = this.VideoSample = this.createVideoSample(true, pes.pts, pes.dts, '');\n }\n VideoSample.frame = true;\n VideoSample.key = iskey;\n break;\n // IDR\n }\n case 5:\n push = true;\n // handle PES not starting with AUD\n // if we have frame data already, that cannot belong to the same frame, so force a push\n if ((_VideoSample2 = VideoSample) != null && _VideoSample2.frame && !VideoSample.key) {\n this.pushAccessUnit(VideoSample, track);\n VideoSample = this.VideoSample = null;\n }\n if (!VideoSample) {\n VideoSample = this.VideoSample = this.createVideoSample(true, pes.pts, pes.dts, '');\n }\n VideoSample.key = true;\n VideoSample.frame = true;\n break;\n // SEI\n case 6:\n {\n push = true;\n parseSEIMessageFromNALu(unit.data, 1, pes.pts, textTrack.samples);\n break;\n // SPS\n }\n case 7:\n {\n var _track$pixelRatio, _track$pixelRatio2;\n push = true;\n spsfound = true;\n const sps = unit.data;\n const expGolombDecoder = new ExpGolomb(sps);\n const config = expGolombDecoder.readSPS();\n if (!track.sps || track.width !== config.width || track.height !== config.height || ((_track$pixelRatio = track.pixelRatio) == null ? void 0 : _track$pixelRatio[0]) !== config.pixelRatio[0] || ((_track$pixelRatio2 = track.pixelRatio) == null ? void 0 : _track$pixelRatio2[1]) !== config.pixelRatio[1]) {\n track.width = config.width;\n track.height = config.height;\n track.pixelRatio = config.pixelRatio;\n track.sps = [sps];\n track.duration = duration;\n const codecarray = sps.subarray(1, 4);\n let codecstring = 'avc1.';\n for (let i = 0; i < 3; i++) {\n let h = codecarray[i].toString(16);\n if (h.length < 2) {\n h = '0' + h;\n }\n codecstring += h;\n }\n track.codec = codecstring;\n }\n break;\n }\n // PPS\n case 8:\n push = true;\n track.pps = [unit.data];\n break;\n // AUD\n case 9:\n push = true;\n track.audFound = true;\n if (VideoSample) {\n this.pushAccessUnit(VideoSample, track);\n }\n VideoSample = this.VideoSample = this.createVideoSample(false, pes.pts, pes.dts, '');\n break;\n // Filler Data\n case 12:\n push = true;\n break;\n default:\n push = false;\n if (VideoSample) {\n VideoSample.debug += 'unknown NAL ' + unit.type + ' ';\n }\n break;\n }\n if (VideoSample && push) {\n const units = VideoSample.units;\n units.push(unit);\n }\n });\n // if last PES packet, push samples\n if (last && VideoSample) {\n this.pushAccessUnit(VideoSample, track);\n this.VideoSample = null;\n }\n }\n parseAVCNALu(track, array) {\n const len = array.byteLength;\n let state = track.naluState || 0;\n const lastState = state;\n const units = [];\n let i = 0;\n let value;\n let overflow;\n let unitType;\n let lastUnitStart = -1;\n let lastUnitType = 0;\n // logger.log('PES:' + Hex.hexDump(array));\n\n if (state === -1) {\n // special use case where we found 3 or 4-byte start codes exactly at the end of previous PES packet\n lastUnitStart = 0;\n // NALu type is value read from offset 0\n lastUnitType = array[0] & 0x1f;\n state = 0;\n i = 1;\n }\n while (i < len) {\n value = array[i++];\n // optimization. state 0 and 1 are the predominant case. let's handle them outside of the switch/case\n if (!state) {\n state = value ? 0 : 1;\n continue;\n }\n if (state === 1) {\n state = value ? 0 : 2;\n continue;\n }\n // here we have state either equal to 2 or 3\n if (!value) {\n state = 3;\n } else if (value === 1) {\n overflow = i - state - 1;\n if (lastUnitStart >= 0) {\n const unit = {\n data: array.subarray(lastUnitStart, overflow),\n type: lastUnitType\n };\n // logger.log('pushing NALU, type/size:' + unit.type + '/' + unit.data.byteLength);\n units.push(unit);\n } else {\n // lastUnitStart is undefined => this is the first start code found in this PES packet\n // first check if start code delimiter is overlapping between 2 PES packets,\n // ie it started in last packet (lastState not zero)\n // and ended at the beginning of this PES packet (i <= 4 - lastState)\n const lastUnit = this.getLastNalUnit(track.samples);\n if (lastUnit) {\n if (lastState && i <= 4 - lastState) {\n // start delimiter overlapping between PES packets\n // strip start delimiter bytes from the end of last NAL unit\n // check if lastUnit had a state different from zero\n if (lastUnit.state) {\n // strip last bytes\n lastUnit.data = lastUnit.data.subarray(0, lastUnit.data.byteLength - lastState);\n }\n }\n // If NAL units are not starting right at the beginning of the PES packet, push preceding data into previous NAL unit.\n\n if (overflow > 0) {\n // logger.log('first NALU found with overflow:' + overflow);\n lastUnit.data = appendUint8Array(lastUnit.data, array.subarray(0, overflow));\n lastUnit.state = 0;\n }\n }\n }\n // check if we can read unit type\n if (i < len) {\n unitType = array[i] & 0x1f;\n // logger.log('find NALU @ offset:' + i + ',type:' + unitType);\n lastUnitStart = i;\n lastUnitType = unitType;\n state = 0;\n } else {\n // not enough byte to read unit type. let's read it on next PES parsing\n state = -1;\n }\n } else {\n state = 0;\n }\n }\n if (lastUnitStart >= 0 && state >= 0) {\n const unit = {\n data: array.subarray(lastUnitStart, len),\n type: lastUnitType,\n state: state\n };\n units.push(unit);\n // logger.log('pushing NALU, type/size/state:' + unit.type + '/' + unit.data.byteLength + '/' + state);\n }\n // no NALu found\n if (units.length === 0) {\n // append pes.data to previous NAL unit\n const lastUnit = this.getLastNalUnit(track.samples);\n if (lastUnit) {\n lastUnit.data = appendUint8Array(lastUnit.data, array);\n }\n }\n track.naluState = state;\n return units;\n }\n}\n\n/**\n * SAMPLE-AES decrypter\n */\n\nclass SampleAesDecrypter {\n constructor(observer, config, keyData) {\n this.keyData = void 0;\n this.decrypter = void 0;\n this.keyData = keyData;\n this.decrypter = new Decrypter(config, {\n removePKCS7Padding: false\n });\n }\n decryptBuffer(encryptedData) {\n return this.decrypter.decrypt(encryptedData, this.keyData.key.buffer, this.keyData.iv.buffer);\n }\n\n // AAC - encrypt all full 16 bytes blocks starting from offset 16\n decryptAacSample(samples, sampleIndex, callback) {\n const curUnit = samples[sampleIndex].unit;\n if (curUnit.length <= 16) {\n // No encrypted portion in this sample (first 16 bytes is not\n // encrypted, see https://developer.apple.com/library/archive/documentation/AudioVideo/Conceptual/HLS_Sample_Encryption/Encryption/Encryption.html),\n return;\n }\n const encryptedData = curUnit.subarray(16, curUnit.length - curUnit.length % 16);\n const encryptedBuffer = encryptedData.buffer.slice(encryptedData.byteOffset, encryptedData.byteOffset + encryptedData.length);\n this.decryptBuffer(encryptedBuffer).then(decryptedBuffer => {\n const decryptedData = new Uint8Array(decryptedBuffer);\n curUnit.set(decryptedData, 16);\n if (!this.decrypter.isSync()) {\n this.decryptAacSamples(samples, sampleIndex + 1, callback);\n }\n });\n }\n decryptAacSamples(samples, sampleIndex, callback) {\n for (;; sampleIndex++) {\n if (sampleIndex >= samples.length) {\n callback();\n return;\n }\n if (samples[sampleIndex].unit.length < 32) {\n continue;\n }\n this.decryptAacSample(samples, sampleIndex, callback);\n if (!this.decrypter.isSync()) {\n return;\n }\n }\n }\n\n // AVC - encrypt one 16 bytes block out of ten, starting from offset 32\n getAvcEncryptedData(decodedData) {\n const encryptedDataLen = Math.floor((decodedData.length - 48) / 160) * 16 + 16;\n const encryptedData = new Int8Array(encryptedDataLen);\n let outputPos = 0;\n for (let inputPos = 32; inputPos < decodedData.length - 16; inputPos += 160, outputPos += 16) {\n encryptedData.set(decodedData.subarray(inputPos, inputPos + 16), outputPos);\n }\n return encryptedData;\n }\n getAvcDecryptedUnit(decodedData, decryptedData) {\n const uint8DecryptedData = new Uint8Array(decryptedData);\n let inputPos = 0;\n for (let outputPos = 32; outputPos < decodedData.length - 16; outputPos += 160, inputPos += 16) {\n decodedData.set(uint8DecryptedData.subarray(inputPos, inputPos + 16), outputPos);\n }\n return decodedData;\n }\n decryptAvcSample(samples, sampleIndex, unitIndex, callback, curUnit) {\n const decodedData = discardEPB(curUnit.data);\n const encryptedData = this.getAvcEncryptedData(decodedData);\n this.decryptBuffer(encryptedData.buffer).then(decryptedBuffer => {\n curUnit.data = this.getAvcDecryptedUnit(decodedData, decryptedBuffer);\n if (!this.decrypter.isSync()) {\n this.decryptAvcSamples(samples, sampleIndex, unitIndex + 1, callback);\n }\n });\n }\n decryptAvcSamples(samples, sampleIndex, unitIndex, callback) {\n if (samples instanceof Uint8Array) {\n throw new Error('Cannot decrypt samples of type Uint8Array');\n }\n for (;; sampleIndex++, unitIndex = 0) {\n if (sampleIndex >= samples.length) {\n callback();\n return;\n }\n const curUnits = samples[sampleIndex].units;\n for (;; unitIndex++) {\n if (unitIndex >= curUnits.length) {\n break;\n }\n const curUnit = curUnits[unitIndex];\n if (curUnit.data.length <= 48 || curUnit.type !== 1 && curUnit.type !== 5) {\n continue;\n }\n this.decryptAvcSample(samples, sampleIndex, unitIndex, callback, curUnit);\n if (!this.decrypter.isSync()) {\n return;\n }\n }\n }\n }\n}\n\nconst PACKET_LENGTH = 188;\nclass TSDemuxer {\n constructor(observer, config, typeSupported) {\n this.observer = void 0;\n this.config = void 0;\n this.typeSupported = void 0;\n this.sampleAes = null;\n this.pmtParsed = false;\n this.audioCodec = void 0;\n this.videoCodec = void 0;\n this._duration = 0;\n this._pmtId = -1;\n this._videoTrack = void 0;\n this._audioTrack = void 0;\n this._id3Track = void 0;\n this._txtTrack = void 0;\n this.aacOverFlow = null;\n this.remainderData = null;\n this.videoParser = void 0;\n this.observer = observer;\n this.config = config;\n this.typeSupported = typeSupported;\n this.videoParser = new AvcVideoParser();\n }\n static probe(data) {\n const syncOffset = TSDemuxer.syncOffset(data);\n if (syncOffset > 0) {\n logger.warn(`MPEG2-TS detected but first sync word found @ offset ${syncOffset}`);\n }\n return syncOffset !== -1;\n }\n static syncOffset(data) {\n const length = data.length;\n let scanwindow = Math.min(PACKET_LENGTH * 5, length - PACKET_LENGTH) + 1;\n let i = 0;\n while (i < scanwindow) {\n // a TS init segment should contain at least 2 TS packets: PAT and PMT, each starting with 0x47\n let foundPat = false;\n let packetStart = -1;\n let tsPackets = 0;\n for (let j = i; j < length; j += PACKET_LENGTH) {\n if (data[j] === 0x47 && (length - j === PACKET_LENGTH || data[j + PACKET_LENGTH] === 0x47)) {\n tsPackets++;\n if (packetStart === -1) {\n packetStart = j;\n // First sync word found at offset, increase scan length (#5251)\n if (packetStart !== 0) {\n scanwindow = Math.min(packetStart + PACKET_LENGTH * 99, data.length - PACKET_LENGTH) + 1;\n }\n }\n if (!foundPat) {\n foundPat = parsePID(data, j) === 0;\n }\n // Sync word found at 0 with 3 packets, or found at offset least 2 packets up to scanwindow (#5501)\n if (foundPat && tsPackets > 1 && (packetStart === 0 && tsPackets > 2 || j + PACKET_LENGTH > scanwindow)) {\n return packetStart;\n }\n } else if (tsPackets) {\n // Exit if sync word found, but does not contain contiguous packets\n return -1;\n } else {\n break;\n }\n }\n i++;\n }\n return -1;\n }\n\n /**\n * Creates a track model internal to demuxer used to drive remuxing input\n */\n static createTrack(type, duration) {\n return {\n container: type === 'video' || type === 'audio' ? 'video/mp2t' : undefined,\n type,\n id: RemuxerTrackIdConfig[type],\n pid: -1,\n inputTimeScale: 90000,\n sequenceNumber: 0,\n samples: [],\n dropped: 0,\n duration: type === 'audio' ? duration : undefined\n };\n }\n\n /**\n * Initializes a new init segment on the demuxer/remuxer interface. Needed for discontinuities/track-switches (or at stream start)\n * Resets all internal track instances of the demuxer.\n */\n resetInitSegment(initSegment, audioCodec, videoCodec, trackDuration) {\n this.pmtParsed = false;\n this._pmtId = -1;\n this._videoTrack = TSDemuxer.createTrack('video');\n this._audioTrack = TSDemuxer.createTrack('audio', trackDuration);\n this._id3Track = TSDemuxer.createTrack('id3');\n this._txtTrack = TSDemuxer.createTrack('text');\n this._audioTrack.segmentCodec = 'aac';\n\n // flush any partial content\n this.aacOverFlow = null;\n this.remainderData = null;\n this.audioCodec = audioCodec;\n this.videoCodec = videoCodec;\n this._duration = trackDuration;\n }\n resetTimeStamp() {}\n resetContiguity() {\n const {\n _audioTrack,\n _videoTrack,\n _id3Track\n } = this;\n if (_audioTrack) {\n _audioTrack.pesData = null;\n }\n if (_videoTrack) {\n _videoTrack.pesData = null;\n }\n if (_id3Track) {\n _id3Track.pesData = null;\n }\n this.aacOverFlow = null;\n this.remainderData = null;\n }\n demux(data, timeOffset, isSampleAes = false, flush = false) {\n if (!isSampleAes) {\n this.sampleAes = null;\n }\n let pes;\n const videoTrack = this._videoTrack;\n const audioTrack = this._audioTrack;\n const id3Track = this._id3Track;\n const textTrack = this._txtTrack;\n let videoPid = videoTrack.pid;\n let videoData = videoTrack.pesData;\n let audioPid = audioTrack.pid;\n let id3Pid = id3Track.pid;\n let audioData = audioTrack.pesData;\n let id3Data = id3Track.pesData;\n let unknownPID = null;\n let pmtParsed = this.pmtParsed;\n let pmtId = this._pmtId;\n let len = data.length;\n if (this.remainderData) {\n data = appendUint8Array(this.remainderData, data);\n len = data.length;\n this.remainderData = null;\n }\n if (len < PACKET_LENGTH && !flush) {\n this.remainderData = data;\n return {\n audioTrack,\n videoTrack,\n id3Track,\n textTrack\n };\n }\n const syncOffset = Math.max(0, TSDemuxer.syncOffset(data));\n len -= (len - syncOffset) % PACKET_LENGTH;\n if (len < data.byteLength && !flush) {\n this.remainderData = new Uint8Array(data.buffer, len, data.buffer.byteLength - len);\n }\n\n // loop through TS packets\n let tsPacketErrors = 0;\n for (let start = syncOffset; start < len; start += PACKET_LENGTH) {\n if (data[start] === 0x47) {\n const stt = !!(data[start + 1] & 0x40);\n const pid = parsePID(data, start);\n const atf = (data[start + 3] & 0x30) >> 4;\n\n // if an adaption field is present, its length is specified by the fifth byte of the TS packet header.\n let offset;\n if (atf > 1) {\n offset = start + 5 + data[start + 4];\n // continue if there is only adaptation field\n if (offset === start + PACKET_LENGTH) {\n continue;\n }\n } else {\n offset = start + 4;\n }\n switch (pid) {\n case videoPid:\n if (stt) {\n if (videoData && (pes = parsePES(videoData))) {\n this.videoParser.parseAVCPES(videoTrack, textTrack, pes, false, this._duration);\n }\n videoData = {\n data: [],\n size: 0\n };\n }\n if (videoData) {\n videoData.data.push(data.subarray(offset, start + PACKET_LENGTH));\n videoData.size += start + PACKET_LENGTH - offset;\n }\n break;\n case audioPid:\n if (stt) {\n if (audioData && (pes = parsePES(audioData))) {\n switch (audioTrack.segmentCodec) {\n case 'aac':\n this.parseAACPES(audioTrack, pes);\n break;\n case 'mp3':\n this.parseMPEGPES(audioTrack, pes);\n break;\n case 'ac3':\n {\n this.parseAC3PES(audioTrack, pes);\n }\n break;\n }\n }\n audioData = {\n data: [],\n size: 0\n };\n }\n if (audioData) {\n audioData.data.push(data.subarray(offset, start + PACKET_LENGTH));\n audioData.size += start + PACKET_LENGTH - offset;\n }\n break;\n case id3Pid:\n if (stt) {\n if (id3Data && (pes = parsePES(id3Data))) {\n this.parseID3PES(id3Track, pes);\n }\n id3Data = {\n data: [],\n size: 0\n };\n }\n if (id3Data) {\n id3Data.data.push(data.subarray(offset, start + PACKET_LENGTH));\n id3Data.size += start + PACKET_LENGTH - offset;\n }\n break;\n case 0:\n if (stt) {\n offset += data[offset] + 1;\n }\n pmtId = this._pmtId = parsePAT(data, offset);\n // logger.log('PMT PID:' + this._pmtId);\n break;\n case pmtId:\n {\n if (stt) {\n offset += data[offset] + 1;\n }\n const parsedPIDs = parsePMT(data, offset, this.typeSupported, isSampleAes, this.observer);\n\n // only update track id if track PID found while parsing PMT\n // this is to avoid resetting the PID to -1 in case\n // track PID transiently disappears from the stream\n // this could happen in case of transient missing audio samples for example\n // NOTE this is only the PID of the track as found in TS,\n // but we are not using this for MP4 track IDs.\n videoPid = parsedPIDs.videoPid;\n if (videoPid > 0) {\n videoTrack.pid = videoPid;\n videoTrack.segmentCodec = parsedPIDs.segmentVideoCodec;\n }\n audioPid = parsedPIDs.audioPid;\n if (audioPid > 0) {\n audioTrack.pid = audioPid;\n audioTrack.segmentCodec = parsedPIDs.segmentAudioCodec;\n }\n id3Pid = parsedPIDs.id3Pid;\n if (id3Pid > 0) {\n id3Track.pid = id3Pid;\n }\n if (unknownPID !== null && !pmtParsed) {\n logger.warn(`MPEG-TS PMT found at ${start} after unknown PID '${unknownPID}'. Backtracking to sync byte @${syncOffset} to parse all TS packets.`);\n unknownPID = null;\n // we set it to -188, the += 188 in the for loop will reset start to 0\n start = syncOffset - 188;\n }\n pmtParsed = this.pmtParsed = true;\n break;\n }\n case 0x11:\n case 0x1fff:\n break;\n default:\n unknownPID = pid;\n break;\n }\n } else {\n tsPacketErrors++;\n }\n }\n if (tsPacketErrors > 0) {\n emitParsingError(this.observer, new Error(`Found ${tsPacketErrors} TS packet/s that do not start with 0x47`));\n }\n videoTrack.pesData = videoData;\n audioTrack.pesData = audioData;\n id3Track.pesData = id3Data;\n const demuxResult = {\n audioTrack,\n videoTrack,\n id3Track,\n textTrack\n };\n if (flush) {\n this.extractRemainingSamples(demuxResult);\n }\n return demuxResult;\n }\n flush() {\n const {\n remainderData\n } = this;\n this.remainderData = null;\n let result;\n if (remainderData) {\n result = this.demux(remainderData, -1, false, true);\n } else {\n result = {\n videoTrack: this._videoTrack,\n audioTrack: this._audioTrack,\n id3Track: this._id3Track,\n textTrack: this._txtTrack\n };\n }\n this.extractRemainingSamples(result);\n if (this.sampleAes) {\n return this.decrypt(result, this.sampleAes);\n }\n return result;\n }\n extractRemainingSamples(demuxResult) {\n const {\n audioTrack,\n videoTrack,\n id3Track,\n textTrack\n } = demuxResult;\n const videoData = videoTrack.pesData;\n const audioData = audioTrack.pesData;\n const id3Data = id3Track.pesData;\n // try to parse last PES packets\n let pes;\n if (videoData && (pes = parsePES(videoData))) {\n this.videoParser.parseAVCPES(videoTrack, textTrack, pes, true, this._duration);\n videoTrack.pesData = null;\n } else {\n // either avcData null or PES truncated, keep it for next frag parsing\n videoTrack.pesData = videoData;\n }\n if (audioData && (pes = parsePES(audioData))) {\n switch (audioTrack.segmentCodec) {\n case 'aac':\n this.parseAACPES(audioTrack, pes);\n break;\n case 'mp3':\n this.parseMPEGPES(audioTrack, pes);\n break;\n case 'ac3':\n {\n this.parseAC3PES(audioTrack, pes);\n }\n break;\n }\n audioTrack.pesData = null;\n } else {\n if (audioData != null && audioData.size) {\n logger.log('last AAC PES packet truncated,might overlap between fragments');\n }\n\n // either audioData null or PES truncated, keep it for next frag parsing\n audioTrack.pesData = audioData;\n }\n if (id3Data && (pes = parsePES(id3Data))) {\n this.parseID3PES(id3Track, pes);\n id3Track.pesData = null;\n } else {\n // either id3Data null or PES truncated, keep it for next frag parsing\n id3Track.pesData = id3Data;\n }\n }\n demuxSampleAes(data, keyData, timeOffset) {\n const demuxResult = this.demux(data, timeOffset, true, !this.config.progressive);\n const sampleAes = this.sampleAes = new SampleAesDecrypter(this.observer, this.config, keyData);\n return this.decrypt(demuxResult, sampleAes);\n }\n decrypt(demuxResult, sampleAes) {\n return new Promise(resolve => {\n const {\n audioTrack,\n videoTrack\n } = demuxResult;\n if (audioTrack.samples && audioTrack.segmentCodec === 'aac') {\n sampleAes.decryptAacSamples(audioTrack.samples, 0, () => {\n if (videoTrack.samples) {\n sampleAes.decryptAvcSamples(videoTrack.samples, 0, 0, () => {\n resolve(demuxResult);\n });\n } else {\n resolve(demuxResult);\n }\n });\n } else if (videoTrack.samples) {\n sampleAes.decryptAvcSamples(videoTrack.samples, 0, 0, () => {\n resolve(demuxResult);\n });\n }\n });\n }\n destroy() {\n this._duration = 0;\n }\n parseAACPES(track, pes) {\n let startOffset = 0;\n const aacOverFlow = this.aacOverFlow;\n let data = pes.data;\n if (aacOverFlow) {\n this.aacOverFlow = null;\n const frameMissingBytes = aacOverFlow.missing;\n const sampleLength = aacOverFlow.sample.unit.byteLength;\n // logger.log(`AAC: append overflowing ${sampleLength} bytes to beginning of new PES`);\n if (frameMissingBytes === -1) {\n data = appendUint8Array(aacOverFlow.sample.unit, data);\n } else {\n const frameOverflowBytes = sampleLength - frameMissingBytes;\n aacOverFlow.sample.unit.set(data.subarray(0, frameMissingBytes), frameOverflowBytes);\n track.samples.push(aacOverFlow.sample);\n startOffset = aacOverFlow.missing;\n }\n }\n // look for ADTS header (0xFFFx)\n let offset;\n let len;\n for (offset = startOffset, len = data.length; offset < len - 1; offset++) {\n if (isHeader$1(data, offset)) {\n break;\n }\n }\n // if ADTS header does not start straight from the beginning of the PES payload, raise an error\n if (offset !== startOffset) {\n let reason;\n const recoverable = offset < len - 1;\n if (recoverable) {\n reason = `AAC PES did not start with ADTS header,offset:${offset}`;\n } else {\n reason = 'No ADTS header found in AAC PES';\n }\n emitParsingError(this.observer, new Error(reason), recoverable);\n if (!recoverable) {\n return;\n }\n }\n initTrackConfig(track, this.observer, data, offset, this.audioCodec);\n let pts;\n if (pes.pts !== undefined) {\n pts = pes.pts;\n } else if (aacOverFlow) {\n // if last AAC frame is overflowing, we should ensure timestamps are contiguous:\n // first sample PTS should be equal to last sample PTS + frameDuration\n const frameDuration = getFrameDuration(track.samplerate);\n pts = aacOverFlow.sample.pts + frameDuration;\n } else {\n logger.warn('[tsdemuxer]: AAC PES unknown PTS');\n return;\n }\n\n // scan for aac samples\n let frameIndex = 0;\n let frame;\n while (offset < len) {\n frame = appendFrame$2(track, data, offset, pts, frameIndex);\n offset += frame.length;\n if (!frame.missing) {\n frameIndex++;\n for (; offset < len - 1; offset++) {\n if (isHeader$1(data, offset)) {\n break;\n }\n }\n } else {\n this.aacOverFlow = frame;\n break;\n }\n }\n }\n parseMPEGPES(track, pes) {\n const data = pes.data;\n const length = data.length;\n let frameIndex = 0;\n let offset = 0;\n const pts = pes.pts;\n if (pts === undefined) {\n logger.warn('[tsdemuxer]: MPEG PES unknown PTS');\n return;\n }\n while (offset < length) {\n if (isHeader(data, offset)) {\n const frame = appendFrame$1(track, data, offset, pts, frameIndex);\n if (frame) {\n offset += frame.length;\n frameIndex++;\n } else {\n // logger.log('Unable to parse Mpeg audio frame');\n break;\n }\n } else {\n // nothing found, keep looking\n offset++;\n }\n }\n }\n parseAC3PES(track, pes) {\n {\n const data = pes.data;\n const pts = pes.pts;\n if (pts === undefined) {\n logger.warn('[tsdemuxer]: AC3 PES unknown PTS');\n return;\n }\n const length = data.length;\n let frameIndex = 0;\n let offset = 0;\n let parsed;\n while (offset < length && (parsed = appendFrame(track, data, offset, pts, frameIndex++)) > 0) {\n offset += parsed;\n }\n }\n }\n parseID3PES(id3Track, pes) {\n if (pes.pts === undefined) {\n logger.warn('[tsdemuxer]: ID3 PES unknown PTS');\n return;\n }\n const id3Sample = _extends({}, pes, {\n type: this._videoTrack ? MetadataSchema.emsg : MetadataSchema.audioId3,\n duration: Number.POSITIVE_INFINITY\n });\n id3Track.samples.push(id3Sample);\n }\n}\nfunction parsePID(data, offset) {\n // pid is a 13-bit field starting at the last bit of TS[1]\n return ((data[offset + 1] & 0x1f) << 8) + data[offset + 2];\n}\nfunction parsePAT(data, offset) {\n // skip the PSI header and parse the first PMT entry\n return (data[offset + 10] & 0x1f) << 8 | data[offset + 11];\n}\nfunction parsePMT(data, offset, typeSupported, isSampleAes, observer) {\n const result = {\n audioPid: -1,\n videoPid: -1,\n id3Pid: -1,\n segmentVideoCodec: 'avc',\n segmentAudioCodec: 'aac'\n };\n const sectionLength = (data[offset + 1] & 0x0f) << 8 | data[offset + 2];\n const tableEnd = offset + 3 + sectionLength - 4;\n // to determine where the table is, we have to figure out how\n // long the program info descriptors are\n const programInfoLength = (data[offset + 10] & 0x0f) << 8 | data[offset + 11];\n // advance the offset to the first entry in the mapping table\n offset += 12 + programInfoLength;\n while (offset < tableEnd) {\n const pid = parsePID(data, offset);\n const esInfoLength = (data[offset + 3] & 0x0f) << 8 | data[offset + 4];\n switch (data[offset]) {\n case 0xcf:\n // SAMPLE-AES AAC\n if (!isSampleAes) {\n logEncryptedSamplesFoundInUnencryptedStream('ADTS AAC');\n break;\n }\n /* falls through */\n case 0x0f:\n // ISO/IEC 13818-7 ADTS AAC (MPEG-2 lower bit-rate audio)\n // logger.log('AAC PID:' + pid);\n if (result.audioPid === -1) {\n result.audioPid = pid;\n }\n break;\n\n // Packetized metadata (ID3)\n case 0x15:\n // logger.log('ID3 PID:' + pid);\n if (result.id3Pid === -1) {\n result.id3Pid = pid;\n }\n break;\n case 0xdb:\n // SAMPLE-AES AVC\n if (!isSampleAes) {\n logEncryptedSamplesFoundInUnencryptedStream('H.264');\n break;\n }\n /* falls through */\n case 0x1b:\n // ITU-T Rec. H.264 and ISO/IEC 14496-10 (lower bit-rate video)\n // logger.log('AVC PID:' + pid);\n if (result.videoPid === -1) {\n result.videoPid = pid;\n result.segmentVideoCodec = 'avc';\n }\n break;\n\n // ISO/IEC 11172-3 (MPEG-1 audio)\n // or ISO/IEC 13818-3 (MPEG-2 halved sample rate audio)\n case 0x03:\n case 0x04:\n // logger.log('MPEG PID:' + pid);\n if (!typeSupported.mpeg && !typeSupported.mp3) {\n logger.log('MPEG audio found, not supported in this browser');\n } else if (result.audioPid === -1) {\n result.audioPid = pid;\n result.segmentAudioCodec = 'mp3';\n }\n break;\n case 0xc1:\n // SAMPLE-AES AC3\n if (!isSampleAes) {\n logEncryptedSamplesFoundInUnencryptedStream('AC-3');\n break;\n }\n /* falls through */\n case 0x81:\n {\n if (!typeSupported.ac3) {\n logger.log('AC-3 audio found, not supported in this browser');\n } else if (result.audioPid === -1) {\n result.audioPid = pid;\n result.segmentAudioCodec = 'ac3';\n }\n }\n break;\n case 0x06:\n // stream_type 6 can mean a lot of different things in case of DVB.\n // We need to look at the descriptors. Right now, we're only interested\n // in AC-3 audio, so we do the descriptor parsing only when we don't have\n // an audio PID yet.\n if (result.audioPid === -1 && esInfoLength > 0) {\n let parsePos = offset + 5;\n let remaining = esInfoLength;\n while (remaining > 2) {\n const descriptorId = data[parsePos];\n switch (descriptorId) {\n case 0x6a:\n // DVB Descriptor for AC-3\n {\n if (typeSupported.ac3 !== true) {\n logger.log('AC-3 audio found, not supported in this browser for now');\n } else {\n result.audioPid = pid;\n result.segmentAudioCodec = 'ac3';\n }\n }\n break;\n }\n const descriptorLen = data[parsePos + 1] + 2;\n parsePos += descriptorLen;\n remaining -= descriptorLen;\n }\n }\n break;\n case 0xc2: // SAMPLE-AES EC3\n /* falls through */\n case 0x87:\n emitParsingError(observer, new Error('Unsupported EC-3 in M2TS found'));\n return result;\n case 0x24:\n emitParsingError(observer, new Error('Unsupported HEVC in M2TS found'));\n return result;\n }\n // move to the next table entry\n // skip past the elementary stream descriptors, if present\n offset += esInfoLength + 5;\n }\n return result;\n}\nfunction emitParsingError(observer, error, levelRetry) {\n logger.warn(`parsing error: ${error.message}`);\n observer.emit(Events.ERROR, Events.ERROR, {\n type: ErrorTypes.MEDIA_ERROR,\n details: ErrorDetails.FRAG_PARSING_ERROR,\n fatal: false,\n levelRetry,\n error,\n reason: error.message\n });\n}\nfunction logEncryptedSamplesFoundInUnencryptedStream(type) {\n logger.log(`${type} with AES-128-CBC encryption found in unencrypted stream`);\n}\nfunction parsePES(stream) {\n let i = 0;\n let frag;\n let pesLen;\n let pesHdrLen;\n let pesPts;\n let pesDts;\n const data = stream.data;\n // safety check\n if (!stream || stream.size === 0) {\n return null;\n }\n\n // we might need up to 19 bytes to read PES header\n // if first chunk of data is less than 19 bytes, let's merge it with following ones until we get 19 bytes\n // usually only one merge is needed (and this is rare ...)\n while (data[0].length < 19 && data.length > 1) {\n data[0] = appendUint8Array(data[0], data[1]);\n data.splice(1, 1);\n }\n // retrieve PTS/DTS from first fragment\n frag = data[0];\n const pesPrefix = (frag[0] << 16) + (frag[1] << 8) + frag[2];\n if (pesPrefix === 1) {\n pesLen = (frag[4] << 8) + frag[5];\n // if PES parsed length is not zero and greater than total received length, stop parsing. PES might be truncated\n // minus 6 : PES header size\n if (pesLen && pesLen > stream.size - 6) {\n return null;\n }\n const pesFlags = frag[7];\n if (pesFlags & 0xc0) {\n /* PES header described here : http://dvd.sourceforge.net/dvdinfo/pes-hdr.html\n as PTS / DTS is 33 bit we cannot use bitwise operator in JS,\n as Bitwise operators treat their operands as a sequence of 32 bits */\n pesPts = (frag[9] & 0x0e) * 536870912 +\n // 1 << 29\n (frag[10] & 0xff) * 4194304 +\n // 1 << 22\n (frag[11] & 0xfe) * 16384 +\n // 1 << 14\n (frag[12] & 0xff) * 128 +\n // 1 << 7\n (frag[13] & 0xfe) / 2;\n if (pesFlags & 0x40) {\n pesDts = (frag[14] & 0x0e) * 536870912 +\n // 1 << 29\n (frag[15] & 0xff) * 4194304 +\n // 1 << 22\n (frag[16] & 0xfe) * 16384 +\n // 1 << 14\n (frag[17] & 0xff) * 128 +\n // 1 << 7\n (frag[18] & 0xfe) / 2;\n if (pesPts - pesDts > 60 * 90000) {\n logger.warn(`${Math.round((pesPts - pesDts) / 90000)}s delta between PTS and DTS, align them`);\n pesPts = pesDts;\n }\n } else {\n pesDts = pesPts;\n }\n }\n pesHdrLen = frag[8];\n // 9 bytes : 6 bytes for PES header + 3 bytes for PES extension\n let payloadStartOffset = pesHdrLen + 9;\n if (stream.size <= payloadStartOffset) {\n return null;\n }\n stream.size -= payloadStartOffset;\n // reassemble PES packet\n const pesData = new Uint8Array(stream.size);\n for (let j = 0, dataLen = data.length; j < dataLen; j++) {\n frag = data[j];\n let len = frag.byteLength;\n if (payloadStartOffset) {\n if (payloadStartOffset > len) {\n // trim full frag if PES header bigger than frag\n payloadStartOffset -= len;\n continue;\n } else {\n // trim partial frag if PES header smaller than frag\n frag = frag.subarray(payloadStartOffset);\n len -= payloadStartOffset;\n payloadStartOffset = 0;\n }\n }\n pesData.set(frag, i);\n i += len;\n }\n if (pesLen) {\n // payload size : remove PES header + PES extension\n pesLen -= pesHdrLen + 3;\n }\n return {\n data: pesData,\n pts: pesPts,\n dts: pesDts,\n len: pesLen\n };\n }\n return null;\n}\n\n/**\n * MP3 demuxer\n */\nclass MP3Demuxer extends BaseAudioDemuxer {\n resetInitSegment(initSegment, audioCodec, videoCodec, trackDuration) {\n super.resetInitSegment(initSegment, audioCodec, videoCodec, trackDuration);\n this._audioTrack = {\n container: 'audio/mpeg',\n type: 'audio',\n id: 2,\n pid: -1,\n sequenceNumber: 0,\n segmentCodec: 'mp3',\n samples: [],\n manifestCodec: audioCodec,\n duration: trackDuration,\n inputTimeScale: 90000,\n dropped: 0\n };\n }\n static probe(data) {\n if (!data) {\n return false;\n }\n\n // check if data contains ID3 timestamp and MPEG sync word\n // Look for MPEG header | 1111 1111 | 111X XYZX | where X can be either 0 or 1 and Y or Z should be 1\n // Layer bits (position 14 and 15) in header should be always different from 0 (Layer I or Layer II or Layer III)\n // More info http://www.mp3-tech.org/programmer/frame_header.html\n const id3Data = getID3Data(data, 0);\n let offset = (id3Data == null ? void 0 : id3Data.length) || 0;\n\n // Check for ac-3|ec-3 sync bytes and return false if present\n if (id3Data && data[offset] === 0x0b && data[offset + 1] === 0x77 && getTimeStamp(id3Data) !== undefined &&\n // check the bsid to confirm ac-3 or ec-3 (not mp3)\n getAudioBSID(data, offset) <= 16) {\n return false;\n }\n for (let length = data.length; offset < length; offset++) {\n if (probe(data, offset)) {\n logger.log('MPEG Audio sync word found !');\n return true;\n }\n }\n return false;\n }\n canParse(data, offset) {\n return canParse(data, offset);\n }\n appendFrame(track, data, offset) {\n if (this.basePTS === null) {\n return;\n }\n return appendFrame$1(track, data, offset, this.basePTS, this.frameIndex);\n }\n}\n\n/**\n * AAC helper\n */\n\nclass AAC {\n static getSilentFrame(codec, channelCount) {\n switch (codec) {\n case 'mp4a.40.2':\n if (channelCount === 1) {\n return new Uint8Array([0x00, 0xc8, 0x00, 0x80, 0x23, 0x80]);\n } else if (channelCount === 2) {\n return new Uint8Array([0x21, 0x00, 0x49, 0x90, 0x02, 0x19, 0x00, 0x23, 0x80]);\n } else if (channelCount === 3) {\n return new Uint8Array([0x00, 0xc8, 0x00, 0x80, 0x20, 0x84, 0x01, 0x26, 0x40, 0x08, 0x64, 0x00, 0x8e]);\n } else if (channelCount === 4) {\n return new Uint8Array([0x00, 0xc8, 0x00, 0x80, 0x20, 0x84, 0x01, 0x26, 0x40, 0x08, 0x64, 0x00, 0x80, 0x2c, 0x80, 0x08, 0x02, 0x38]);\n } else if (channelCount === 5) {\n return new Uint8Array([0x00, 0xc8, 0x00, 0x80, 0x20, 0x84, 0x01, 0x26, 0x40, 0x08, 0x64, 0x00, 0x82, 0x30, 0x04, 0x99, 0x00, 0x21, 0x90, 0x02, 0x38]);\n } else if (channelCount === 6) {\n return new Uint8Array([0x00, 0xc8, 0x00, 0x80, 0x20, 0x84, 0x01, 0x26, 0x40, 0x08, 0x64, 0x00, 0x82, 0x30, 0x04, 0x99, 0x00, 0x21, 0x90, 0x02, 0x00, 0xb2, 0x00, 0x20, 0x08, 0xe0]);\n }\n break;\n // handle HE-AAC below (mp4a.40.5 / mp4a.40.29)\n default:\n if (channelCount === 1) {\n // ffmpeg -y -f lavfi -i \"aevalsrc=0:d=0.05\" -c:a libfdk_aac -profile:a aac_he -b:a 4k output.aac && hexdump -v -e '16/1 \"0x%x,\" \"\\n\"' -v output.aac\n return new Uint8Array([0x1, 0x40, 0x22, 0x80, 0xa3, 0x4e, 0xe6, 0x80, 0xba, 0x8, 0x0, 0x0, 0x0, 0x1c, 0x6, 0xf1, 0xc1, 0xa, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5e]);\n } else if (channelCount === 2) {\n // ffmpeg -y -f lavfi -i \"aevalsrc=0|0:d=0.05\" -c:a libfdk_aac -profile:a aac_he_v2 -b:a 4k output.aac && hexdump -v -e '16/1 \"0x%x,\" \"\\n\"' -v output.aac\n return new Uint8Array([0x1, 0x40, 0x22, 0x80, 0xa3, 0x5e, 0xe6, 0x80, 0xba, 0x8, 0x0, 0x0, 0x0, 0x0, 0x95, 0x0, 0x6, 0xf1, 0xa1, 0xa, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5e]);\n } else if (channelCount === 3) {\n // ffmpeg -y -f lavfi -i \"aevalsrc=0|0|0:d=0.05\" -c:a libfdk_aac -profile:a aac_he_v2 -b:a 4k output.aac && hexdump -v -e '16/1 \"0x%x,\" \"\\n\"' -v output.aac\n return new Uint8Array([0x1, 0x40, 0x22, 0x80, 0xa3, 0x5e, 0xe6, 0x80, 0xba, 0x8, 0x0, 0x0, 0x0, 0x0, 0x95, 0x0, 0x6, 0xf1, 0xa1, 0xa, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5e]);\n }\n break;\n }\n return undefined;\n }\n}\n\n/**\n * Generate MP4 Box\n */\n\nconst UINT32_MAX = Math.pow(2, 32) - 1;\nclass MP4 {\n static init() {\n MP4.types = {\n avc1: [],\n // codingname\n avcC: [],\n btrt: [],\n dinf: [],\n dref: [],\n esds: [],\n ftyp: [],\n hdlr: [],\n mdat: [],\n mdhd: [],\n mdia: [],\n mfhd: [],\n minf: [],\n moof: [],\n moov: [],\n mp4a: [],\n '.mp3': [],\n dac3: [],\n 'ac-3': [],\n mvex: [],\n mvhd: [],\n pasp: [],\n sdtp: [],\n stbl: [],\n stco: [],\n stsc: [],\n stsd: [],\n stsz: [],\n stts: [],\n tfdt: [],\n tfhd: [],\n traf: [],\n trak: [],\n trun: [],\n trex: [],\n tkhd: [],\n vmhd: [],\n smhd: []\n };\n let i;\n for (i in MP4.types) {\n if (MP4.types.hasOwnProperty(i)) {\n MP4.types[i] = [i.charCodeAt(0), i.charCodeAt(1), i.charCodeAt(2), i.charCodeAt(3)];\n }\n }\n const videoHdlr = new Uint8Array([0x00,\n // version 0\n 0x00, 0x00, 0x00,\n // flags\n 0x00, 0x00, 0x00, 0x00,\n // pre_defined\n 0x76, 0x69, 0x64, 0x65,\n // handler_type: 'vide'\n 0x00, 0x00, 0x00, 0x00,\n // reserved\n 0x00, 0x00, 0x00, 0x00,\n // reserved\n 0x00, 0x00, 0x00, 0x00,\n // reserved\n 0x56, 0x69, 0x64, 0x65, 0x6f, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x00 // name: 'VideoHandler'\n ]);\n const audioHdlr = new Uint8Array([0x00,\n // version 0\n 0x00, 0x00, 0x00,\n // flags\n 0x00, 0x00, 0x00, 0x00,\n // pre_defined\n 0x73, 0x6f, 0x75, 0x6e,\n // handler_type: 'soun'\n 0x00, 0x00, 0x00, 0x00,\n // reserved\n 0x00, 0x00, 0x00, 0x00,\n // reserved\n 0x00, 0x00, 0x00, 0x00,\n // reserved\n 0x53, 0x6f, 0x75, 0x6e, 0x64, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x00 // name: 'SoundHandler'\n ]);\n MP4.HDLR_TYPES = {\n video: videoHdlr,\n audio: audioHdlr\n };\n const dref = new Uint8Array([0x00,\n // version 0\n 0x00, 0x00, 0x00,\n // flags\n 0x00, 0x00, 0x00, 0x01,\n // entry_count\n 0x00, 0x00, 0x00, 0x0c,\n // entry_size\n 0x75, 0x72, 0x6c, 0x20,\n // 'url' type\n 0x00,\n // version 0\n 0x00, 0x00, 0x01 // entry_flags\n ]);\n const stco = new Uint8Array([0x00,\n // version\n 0x00, 0x00, 0x00,\n // flags\n 0x00, 0x00, 0x00, 0x00 // entry_count\n ]);\n MP4.STTS = MP4.STSC = MP4.STCO = stco;\n MP4.STSZ = new Uint8Array([0x00,\n // version\n 0x00, 0x00, 0x00,\n // flags\n 0x00, 0x00, 0x00, 0x00,\n // sample_size\n 0x00, 0x00, 0x00, 0x00 // sample_count\n ]);\n MP4.VMHD = new Uint8Array([0x00,\n // version\n 0x00, 0x00, 0x01,\n // flags\n 0x00, 0x00,\n // graphicsmode\n 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // opcolor\n ]);\n MP4.SMHD = new Uint8Array([0x00,\n // version\n 0x00, 0x00, 0x00,\n // flags\n 0x00, 0x00,\n // balance\n 0x00, 0x00 // reserved\n ]);\n MP4.STSD = new Uint8Array([0x00,\n // version 0\n 0x00, 0x00, 0x00,\n // flags\n 0x00, 0x00, 0x00, 0x01]); // entry_count\n\n const majorBrand = new Uint8Array([105, 115, 111, 109]); // isom\n const avc1Brand = new Uint8Array([97, 118, 99, 49]); // avc1\n const minorVersion = new Uint8Array([0, 0, 0, 1]);\n MP4.FTYP = MP4.box(MP4.types.ftyp, majorBrand, minorVersion, majorBrand, avc1Brand);\n MP4.DINF = MP4.box(MP4.types.dinf, MP4.box(MP4.types.dref, dref));\n }\n static box(type, ...payload) {\n let size = 8;\n let i = payload.length;\n const len = i;\n // calculate the total size we need to allocate\n while (i--) {\n size += payload[i].byteLength;\n }\n const result = new Uint8Array(size);\n result[0] = size >> 24 & 0xff;\n result[1] = size >> 16 & 0xff;\n result[2] = size >> 8 & 0xff;\n result[3] = size & 0xff;\n result.set(type, 4);\n // copy the payload into the result\n for (i = 0, size = 8; i < len; i++) {\n // copy payload[i] array @ offset size\n result.set(payload[i], size);\n size += payload[i].byteLength;\n }\n return result;\n }\n static hdlr(type) {\n return MP4.box(MP4.types.hdlr, MP4.HDLR_TYPES[type]);\n }\n static mdat(data) {\n return MP4.box(MP4.types.mdat, data);\n }\n static mdhd(timescale, duration) {\n duration *= timescale;\n const upperWordDuration = Math.floor(duration / (UINT32_MAX + 1));\n const lowerWordDuration = Math.floor(duration % (UINT32_MAX + 1));\n return MP4.box(MP4.types.mdhd, new Uint8Array([0x01,\n // version 1\n 0x00, 0x00, 0x00,\n // flags\n 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02,\n // creation_time\n 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03,\n // modification_time\n timescale >> 24 & 0xff, timescale >> 16 & 0xff, timescale >> 8 & 0xff, timescale & 0xff,\n // timescale\n upperWordDuration >> 24, upperWordDuration >> 16 & 0xff, upperWordDuration >> 8 & 0xff, upperWordDuration & 0xff, lowerWordDuration >> 24, lowerWordDuration >> 16 & 0xff, lowerWordDuration >> 8 & 0xff, lowerWordDuration & 0xff, 0x55, 0xc4,\n // 'und' language (undetermined)\n 0x00, 0x00]));\n }\n static mdia(track) {\n return MP4.box(MP4.types.mdia, MP4.mdhd(track.timescale, track.duration), MP4.hdlr(track.type), MP4.minf(track));\n }\n static mfhd(sequenceNumber) {\n return MP4.box(MP4.types.mfhd, new Uint8Array([0x00, 0x00, 0x00, 0x00,\n // flags\n sequenceNumber >> 24, sequenceNumber >> 16 & 0xff, sequenceNumber >> 8 & 0xff, sequenceNumber & 0xff // sequence_number\n ]));\n }\n static minf(track) {\n if (track.type === 'audio') {\n return MP4.box(MP4.types.minf, MP4.box(MP4.types.smhd, MP4.SMHD), MP4.DINF, MP4.stbl(track));\n } else {\n return MP4.box(MP4.types.minf, MP4.box(MP4.types.vmhd, MP4.VMHD), MP4.DINF, MP4.stbl(track));\n }\n }\n static moof(sn, baseMediaDecodeTime, track) {\n return MP4.box(MP4.types.moof, MP4.mfhd(sn), MP4.traf(track, baseMediaDecodeTime));\n }\n static moov(tracks) {\n let i = tracks.length;\n const boxes = [];\n while (i--) {\n boxes[i] = MP4.trak(tracks[i]);\n }\n return MP4.box.apply(null, [MP4.types.moov, MP4.mvhd(tracks[0].timescale, tracks[0].duration)].concat(boxes).concat(MP4.mvex(tracks)));\n }\n static mvex(tracks) {\n let i = tracks.length;\n const boxes = [];\n while (i--) {\n boxes[i] = MP4.trex(tracks[i]);\n }\n return MP4.box.apply(null, [MP4.types.mvex, ...boxes]);\n }\n static mvhd(timescale, duration) {\n duration *= timescale;\n const upperWordDuration = Math.floor(duration / (UINT32_MAX + 1));\n const lowerWordDuration = Math.floor(duration % (UINT32_MAX + 1));\n const bytes = new Uint8Array([0x01,\n // version 1\n 0x00, 0x00, 0x00,\n // flags\n 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02,\n // creation_time\n 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03,\n // modification_time\n timescale >> 24 & 0xff, timescale >> 16 & 0xff, timescale >> 8 & 0xff, timescale & 0xff,\n // timescale\n upperWordDuration >> 24, upperWordDuration >> 16 & 0xff, upperWordDuration >> 8 & 0xff, upperWordDuration & 0xff, lowerWordDuration >> 24, lowerWordDuration >> 16 & 0xff, lowerWordDuration >> 8 & 0xff, lowerWordDuration & 0xff, 0x00, 0x01, 0x00, 0x00,\n // 1.0 rate\n 0x01, 0x00,\n // 1.0 volume\n 0x00, 0x00,\n // reserved\n 0x00, 0x00, 0x00, 0x00,\n // reserved\n 0x00, 0x00, 0x00, 0x00,\n // reserved\n 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,\n // transformation: unity matrix\n 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n // pre_defined\n 0xff, 0xff, 0xff, 0xff // next_track_ID\n ]);\n return MP4.box(MP4.types.mvhd, bytes);\n }\n static sdtp(track) {\n const samples = track.samples || [];\n const bytes = new Uint8Array(4 + samples.length);\n let i;\n let flags;\n // leave the full box header (4 bytes) all zero\n // write the sample table\n for (i = 0; i < samples.length; i++) {\n flags = samples[i].flags;\n bytes[i + 4] = flags.dependsOn << 4 | flags.isDependedOn << 2 | flags.hasRedundancy;\n }\n return MP4.box(MP4.types.sdtp, bytes);\n }\n static stbl(track) {\n return MP4.box(MP4.types.stbl, MP4.stsd(track), MP4.box(MP4.types.stts, MP4.STTS), MP4.box(MP4.types.stsc, MP4.STSC), MP4.box(MP4.types.stsz, MP4.STSZ), MP4.box(MP4.types.stco, MP4.STCO));\n }\n static avc1(track) {\n let sps = [];\n let pps = [];\n let i;\n let data;\n let len;\n // assemble the SPSs\n\n for (i = 0; i < track.sps.length; i++) {\n data = track.sps[i];\n len = data.byteLength;\n sps.push(len >>> 8 & 0xff);\n sps.push(len & 0xff);\n\n // SPS\n sps = sps.concat(Array.prototype.slice.call(data));\n }\n\n // assemble the PPSs\n for (i = 0; i < track.pps.length; i++) {\n data = track.pps[i];\n len = data.byteLength;\n pps.push(len >>> 8 & 0xff);\n pps.push(len & 0xff);\n pps = pps.concat(Array.prototype.slice.call(data));\n }\n const avcc = MP4.box(MP4.types.avcC, new Uint8Array([0x01,\n // version\n sps[3],\n // profile\n sps[4],\n // profile compat\n sps[5],\n // level\n 0xfc | 3,\n // lengthSizeMinusOne, hard-coded to 4 bytes\n 0xe0 | track.sps.length // 3bit reserved (111) + numOfSequenceParameterSets\n ].concat(sps).concat([track.pps.length // numOfPictureParameterSets\n ]).concat(pps))); // \"PPS\"\n const width = track.width;\n const height = track.height;\n const hSpacing = track.pixelRatio[0];\n const vSpacing = track.pixelRatio[1];\n return MP4.box(MP4.types.avc1, new Uint8Array([0x00, 0x00, 0x00,\n // reserved\n 0x00, 0x00, 0x00,\n // reserved\n 0x00, 0x01,\n // data_reference_index\n 0x00, 0x00,\n // pre_defined\n 0x00, 0x00,\n // reserved\n 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n // pre_defined\n width >> 8 & 0xff, width & 0xff,\n // width\n height >> 8 & 0xff, height & 0xff,\n // height\n 0x00, 0x48, 0x00, 0x00,\n // horizresolution\n 0x00, 0x48, 0x00, 0x00,\n // vertresolution\n 0x00, 0x00, 0x00, 0x00,\n // reserved\n 0x00, 0x01,\n // frame_count\n 0x12, 0x64, 0x61, 0x69, 0x6c,\n // dailymotion/hls.js\n 0x79, 0x6d, 0x6f, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x68, 0x6c, 0x73, 0x2e, 0x6a, 0x73, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n // compressorname\n 0x00, 0x18,\n // depth = 24\n 0x11, 0x11]),\n // pre_defined = -1\n avcc, MP4.box(MP4.types.btrt, new Uint8Array([0x00, 0x1c, 0x9c, 0x80,\n // bufferSizeDB\n 0x00, 0x2d, 0xc6, 0xc0,\n // maxBitrate\n 0x00, 0x2d, 0xc6, 0xc0])),\n // avgBitrate\n MP4.box(MP4.types.pasp, new Uint8Array([hSpacing >> 24,\n // hSpacing\n hSpacing >> 16 & 0xff, hSpacing >> 8 & 0xff, hSpacing & 0xff, vSpacing >> 24,\n // vSpacing\n vSpacing >> 16 & 0xff, vSpacing >> 8 & 0xff, vSpacing & 0xff])));\n }\n static esds(track) {\n const configlen = track.config.length;\n return new Uint8Array([0x00,\n // version 0\n 0x00, 0x00, 0x00,\n // flags\n\n 0x03,\n // descriptor_type\n 0x17 + configlen,\n // length\n 0x00, 0x01,\n // es_id\n 0x00,\n // stream_priority\n\n 0x04,\n // descriptor_type\n 0x0f + configlen,\n // length\n 0x40,\n // codec : mpeg4_audio\n 0x15,\n // stream_type\n 0x00, 0x00, 0x00,\n // buffer_size\n 0x00, 0x00, 0x00, 0x00,\n // maxBitrate\n 0x00, 0x00, 0x00, 0x00,\n // avgBitrate\n\n 0x05 // descriptor_type\n ].concat([configlen]).concat(track.config).concat([0x06, 0x01, 0x02])); // GASpecificConfig)); // length + audio config descriptor\n }\n static audioStsd(track) {\n const samplerate = track.samplerate;\n return new Uint8Array([0x00, 0x00, 0x00,\n // reserved\n 0x00, 0x00, 0x00,\n // reserved\n 0x00, 0x01,\n // data_reference_index\n 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n // reserved\n 0x00, track.channelCount,\n // channelcount\n 0x00, 0x10,\n // sampleSize:16bits\n 0x00, 0x00, 0x00, 0x00,\n // reserved2\n samplerate >> 8 & 0xff, samplerate & 0xff,\n //\n 0x00, 0x00]);\n }\n static mp4a(track) {\n return MP4.box(MP4.types.mp4a, MP4.audioStsd(track), MP4.box(MP4.types.esds, MP4.esds(track)));\n }\n static mp3(track) {\n return MP4.box(MP4.types['.mp3'], MP4.audioStsd(track));\n }\n static ac3(track) {\n return MP4.box(MP4.types['ac-3'], MP4.audioStsd(track), MP4.box(MP4.types.dac3, track.config));\n }\n static stsd(track) {\n if (track.type === 'audio') {\n if (track.segmentCodec === 'mp3' && track.codec === 'mp3') {\n return MP4.box(MP4.types.stsd, MP4.STSD, MP4.mp3(track));\n }\n if (track.segmentCodec === 'ac3') {\n return MP4.box(MP4.types.stsd, MP4.STSD, MP4.ac3(track));\n }\n return MP4.box(MP4.types.stsd, MP4.STSD, MP4.mp4a(track));\n } else {\n return MP4.box(MP4.types.stsd, MP4.STSD, MP4.avc1(track));\n }\n }\n static tkhd(track) {\n const id = track.id;\n const duration = track.duration * track.timescale;\n const width = track.width;\n const height = track.height;\n const upperWordDuration = Math.floor(duration / (UINT32_MAX + 1));\n const lowerWordDuration = Math.floor(duration % (UINT32_MAX + 1));\n return MP4.box(MP4.types.tkhd, new Uint8Array([0x01,\n // version 1\n 0x00, 0x00, 0x07,\n // flags\n 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02,\n // creation_time\n 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03,\n // modification_time\n id >> 24 & 0xff, id >> 16 & 0xff, id >> 8 & 0xff, id & 0xff,\n // track_ID\n 0x00, 0x00, 0x00, 0x00,\n // reserved\n upperWordDuration >> 24, upperWordDuration >> 16 & 0xff, upperWordDuration >> 8 & 0xff, upperWordDuration & 0xff, lowerWordDuration >> 24, lowerWordDuration >> 16 & 0xff, lowerWordDuration >> 8 & 0xff, lowerWordDuration & 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n // reserved\n 0x00, 0x00,\n // layer\n 0x00, 0x00,\n // alternate_group\n 0x00, 0x00,\n // non-audio track volume\n 0x00, 0x00,\n // reserved\n 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,\n // transformation: unity matrix\n width >> 8 & 0xff, width & 0xff, 0x00, 0x00,\n // width\n height >> 8 & 0xff, height & 0xff, 0x00, 0x00 // height\n ]));\n }\n static traf(track, baseMediaDecodeTime) {\n const sampleDependencyTable = MP4.sdtp(track);\n const id = track.id;\n const upperWordBaseMediaDecodeTime = Math.floor(baseMediaDecodeTime / (UINT32_MAX + 1));\n const lowerWordBaseMediaDecodeTime = Math.floor(baseMediaDecodeTime % (UINT32_MAX + 1));\n return MP4.box(MP4.types.traf, MP4.box(MP4.types.tfhd, new Uint8Array([0x00,\n // version 0\n 0x00, 0x00, 0x00,\n // flags\n id >> 24, id >> 16 & 0xff, id >> 8 & 0xff, id & 0xff // track_ID\n ])), MP4.box(MP4.types.tfdt, new Uint8Array([0x01,\n // version 1\n 0x00, 0x00, 0x00,\n // flags\n upperWordBaseMediaDecodeTime >> 24, upperWordBaseMediaDecodeTime >> 16 & 0xff, upperWordBaseMediaDecodeTime >> 8 & 0xff, upperWordBaseMediaDecodeTime & 0xff, lowerWordBaseMediaDecodeTime >> 24, lowerWordBaseMediaDecodeTime >> 16 & 0xff, lowerWordBaseMediaDecodeTime >> 8 & 0xff, lowerWordBaseMediaDecodeTime & 0xff])), MP4.trun(track, sampleDependencyTable.length + 16 +\n // tfhd\n 20 +\n // tfdt\n 8 +\n // traf header\n 16 +\n // mfhd\n 8 +\n // moof header\n 8),\n // mdat header\n sampleDependencyTable);\n }\n\n /**\n * Generate a track box.\n * @param track a track definition\n */\n static trak(track) {\n track.duration = track.duration || 0xffffffff;\n return MP4.box(MP4.types.trak, MP4.tkhd(track), MP4.mdia(track));\n }\n static trex(track) {\n const id = track.id;\n return MP4.box(MP4.types.trex, new Uint8Array([0x00,\n // version 0\n 0x00, 0x00, 0x00,\n // flags\n id >> 24, id >> 16 & 0xff, id >> 8 & 0xff, id & 0xff,\n // track_ID\n 0x00, 0x00, 0x00, 0x01,\n // default_sample_description_index\n 0x00, 0x00, 0x00, 0x00,\n // default_sample_duration\n 0x00, 0x00, 0x00, 0x00,\n // default_sample_size\n 0x00, 0x01, 0x00, 0x01 // default_sample_flags\n ]));\n }\n static trun(track, offset) {\n const samples = track.samples || [];\n const len = samples.length;\n const arraylen = 12 + 16 * len;\n const array = new Uint8Array(arraylen);\n let i;\n let sample;\n let duration;\n let size;\n let flags;\n let cts;\n offset += 8 + arraylen;\n array.set([track.type === 'video' ? 0x01 : 0x00,\n // version 1 for video with signed-int sample_composition_time_offset\n 0x00, 0x0f, 0x01,\n // flags\n len >>> 24 & 0xff, len >>> 16 & 0xff, len >>> 8 & 0xff, len & 0xff,\n // sample_count\n offset >>> 24 & 0xff, offset >>> 16 & 0xff, offset >>> 8 & 0xff, offset & 0xff // data_offset\n ], 0);\n for (i = 0; i < len; i++) {\n sample = samples[i];\n duration = sample.duration;\n size = sample.size;\n flags = sample.flags;\n cts = sample.cts;\n array.set([duration >>> 24 & 0xff, duration >>> 16 & 0xff, duration >>> 8 & 0xff, duration & 0xff,\n // sample_duration\n size >>> 24 & 0xff, size >>> 16 & 0xff, size >>> 8 & 0xff, size & 0xff,\n // sample_size\n flags.isLeading << 2 | flags.dependsOn, flags.isDependedOn << 6 | flags.hasRedundancy << 4 | flags.paddingValue << 1 | flags.isNonSync, flags.degradPrio & 0xf0 << 8, flags.degradPrio & 0x0f,\n // sample_flags\n cts >>> 24 & 0xff, cts >>> 16 & 0xff, cts >>> 8 & 0xff, cts & 0xff // sample_composition_time_offset\n ], 12 + 16 * i);\n }\n return MP4.box(MP4.types.trun, array);\n }\n static initSegment(tracks) {\n if (!MP4.types) {\n MP4.init();\n }\n const movie = MP4.moov(tracks);\n const result = appendUint8Array(MP4.FTYP, movie);\n return result;\n }\n}\nMP4.types = void 0;\nMP4.HDLR_TYPES = void 0;\nMP4.STTS = void 0;\nMP4.STSC = void 0;\nMP4.STCO = void 0;\nMP4.STSZ = void 0;\nMP4.VMHD = void 0;\nMP4.SMHD = void 0;\nMP4.STSD = void 0;\nMP4.FTYP = void 0;\nMP4.DINF = void 0;\n\nconst MPEG_TS_CLOCK_FREQ_HZ = 90000;\nfunction toTimescaleFromBase(baseTime, destScale, srcBase = 1, round = false) {\n const result = baseTime * destScale * srcBase; // equivalent to `(value * scale) / (1 / base)`\n return round ? Math.round(result) : result;\n}\nfunction toTimescaleFromScale(baseTime, destScale, srcScale = 1, round = false) {\n return toTimescaleFromBase(baseTime, destScale, 1 / srcScale, round);\n}\nfunction toMsFromMpegTsClock(baseTime, round = false) {\n return toTimescaleFromBase(baseTime, 1000, 1 / MPEG_TS_CLOCK_FREQ_HZ, round);\n}\nfunction toMpegTsClockFromTimescale(baseTime, srcScale = 1) {\n return toTimescaleFromBase(baseTime, MPEG_TS_CLOCK_FREQ_HZ, 1 / srcScale);\n}\n\nconst MAX_SILENT_FRAME_DURATION = 10 * 1000; // 10 seconds\nconst AAC_SAMPLES_PER_FRAME = 1024;\nconst MPEG_AUDIO_SAMPLE_PER_FRAME = 1152;\nconst AC3_SAMPLES_PER_FRAME = 1536;\nlet chromeVersion = null;\nlet safariWebkitVersion = null;\nclass MP4Remuxer {\n constructor(observer, config, typeSupported, vendor = '') {\n this.observer = void 0;\n this.config = void 0;\n this.typeSupported = void 0;\n this.ISGenerated = false;\n this._initPTS = null;\n this._initDTS = null;\n this.nextAvcDts = null;\n this.nextAudioPts = null;\n this.videoSampleDuration = null;\n this.isAudioContiguous = false;\n this.isVideoContiguous = false;\n this.videoTrackConfig = void 0;\n this.observer = observer;\n this.config = config;\n this.typeSupported = typeSupported;\n this.ISGenerated = false;\n if (chromeVersion === null) {\n const userAgent = navigator.userAgent || '';\n const result = userAgent.match(/Chrome\\/(\\d+)/i);\n chromeVersion = result ? parseInt(result[1]) : 0;\n }\n if (safariWebkitVersion === null) {\n const result = navigator.userAgent.match(/Safari\\/(\\d+)/i);\n safariWebkitVersion = result ? parseInt(result[1]) : 0;\n }\n }\n destroy() {\n // @ts-ignore\n this.config = this.videoTrackConfig = this._initPTS = this._initDTS = null;\n }\n resetTimeStamp(defaultTimeStamp) {\n logger.log('[mp4-remuxer]: initPTS & initDTS reset');\n this._initPTS = this._initDTS = defaultTimeStamp;\n }\n resetNextTimestamp() {\n logger.log('[mp4-remuxer]: reset next timestamp');\n this.isVideoContiguous = false;\n this.isAudioContiguous = false;\n }\n resetInitSegment() {\n logger.log('[mp4-remuxer]: ISGenerated flag reset');\n this.ISGenerated = false;\n this.videoTrackConfig = undefined;\n }\n getVideoStartPts(videoSamples) {\n let rolloverDetected = false;\n const startPTS = videoSamples.reduce((minPTS, sample) => {\n const delta = sample.pts - minPTS;\n if (delta < -4294967296) {\n // 2^32, see PTSNormalize for reasoning, but we're hitting a rollover here, and we don't want that to impact the timeOffset calculation\n rolloverDetected = true;\n return normalizePts(minPTS, sample.pts);\n } else if (delta > 0) {\n return minPTS;\n } else {\n return sample.pts;\n }\n }, videoSamples[0].pts);\n if (rolloverDetected) {\n logger.debug('PTS rollover detected');\n }\n return startPTS;\n }\n remux(audioTrack, videoTrack, id3Track, textTrack, timeOffset, accurateTimeOffset, flush, playlistType) {\n let video;\n let audio;\n let initSegment;\n let text;\n let id3;\n let independent;\n let audioTimeOffset = timeOffset;\n let videoTimeOffset = timeOffset;\n\n // If we're remuxing audio and video progressively, wait until we've received enough samples for each track before proceeding.\n // This is done to synchronize the audio and video streams. We know if the current segment will have samples if the \"pid\"\n // parameter is greater than -1. The pid is set when the PMT is parsed, which contains the tracks list.\n // However, if the initSegment has already been generated, or we've reached the end of a segment (flush),\n // then we can remux one track without waiting for the other.\n const hasAudio = audioTrack.pid > -1;\n const hasVideo = videoTrack.pid > -1;\n const length = videoTrack.samples.length;\n const enoughAudioSamples = audioTrack.samples.length > 0;\n const enoughVideoSamples = flush && length > 0 || length > 1;\n const canRemuxAvc = (!hasAudio || enoughAudioSamples) && (!hasVideo || enoughVideoSamples) || this.ISGenerated || flush;\n if (canRemuxAvc) {\n if (this.ISGenerated) {\n var _videoTrack$pixelRati, _config$pixelRatio, _videoTrack$pixelRati2, _config$pixelRatio2;\n const config = this.videoTrackConfig;\n if (config && (videoTrack.width !== config.width || videoTrack.height !== config.height || ((_videoTrack$pixelRati = videoTrack.pixelRatio) == null ? void 0 : _videoTrack$pixelRati[0]) !== ((_config$pixelRatio = config.pixelRatio) == null ? void 0 : _config$pixelRatio[0]) || ((_videoTrack$pixelRati2 = videoTrack.pixelRatio) == null ? void 0 : _videoTrack$pixelRati2[1]) !== ((_config$pixelRatio2 = config.pixelRatio) == null ? void 0 : _config$pixelRatio2[1]))) {\n this.resetInitSegment();\n }\n } else {\n initSegment = this.generateIS(audioTrack, videoTrack, timeOffset, accurateTimeOffset);\n }\n const isVideoContiguous = this.isVideoContiguous;\n let firstKeyFrameIndex = -1;\n let firstKeyFramePTS;\n if (enoughVideoSamples) {\n firstKeyFrameIndex = findKeyframeIndex(videoTrack.samples);\n if (!isVideoContiguous && this.config.forceKeyFrameOnDiscontinuity) {\n independent = true;\n if (firstKeyFrameIndex > 0) {\n logger.warn(`[mp4-remuxer]: Dropped ${firstKeyFrameIndex} out of ${length} video samples due to a missing keyframe`);\n const startPTS = this.getVideoStartPts(videoTrack.samples);\n videoTrack.samples = videoTrack.samples.slice(firstKeyFrameIndex);\n videoTrack.dropped += firstKeyFrameIndex;\n videoTimeOffset += (videoTrack.samples[0].pts - startPTS) / videoTrack.inputTimeScale;\n firstKeyFramePTS = videoTimeOffset;\n } else if (firstKeyFrameIndex === -1) {\n logger.warn(`[mp4-remuxer]: No keyframe found out of ${length} video samples`);\n independent = false;\n }\n }\n }\n if (this.ISGenerated) {\n if (enoughAudioSamples && enoughVideoSamples) {\n // timeOffset is expected to be the offset of the first timestamp of this fragment (first DTS)\n // if first audio DTS is not aligned with first video DTS then we need to take that into account\n // when providing timeOffset to remuxAudio / remuxVideo. if we don't do that, there might be a permanent / small\n // drift between audio and video streams\n const startPTS = this.getVideoStartPts(videoTrack.samples);\n const tsDelta = normalizePts(audioTrack.samples[0].pts, startPTS) - startPTS;\n const audiovideoTimestampDelta = tsDelta / videoTrack.inputTimeScale;\n audioTimeOffset += Math.max(0, audiovideoTimestampDelta);\n videoTimeOffset += Math.max(0, -audiovideoTimestampDelta);\n }\n\n // Purposefully remuxing audio before video, so that remuxVideo can use nextAudioPts, which is calculated in remuxAudio.\n if (enoughAudioSamples) {\n // if initSegment was generated without audio samples, regenerate it again\n if (!audioTrack.samplerate) {\n logger.warn('[mp4-remuxer]: regenerate InitSegment as audio detected');\n initSegment = this.generateIS(audioTrack, videoTrack, timeOffset, accurateTimeOffset);\n }\n audio = this.remuxAudio(audioTrack, audioTimeOffset, this.isAudioContiguous, accurateTimeOffset, hasVideo || enoughVideoSamples || playlistType === PlaylistLevelType.AUDIO ? videoTimeOffset : undefined);\n if (enoughVideoSamples) {\n const audioTrackLength = audio ? audio.endPTS - audio.startPTS : 0;\n // if initSegment was generated without video samples, regenerate it again\n if (!videoTrack.inputTimeScale) {\n logger.warn('[mp4-remuxer]: regenerate InitSegment as video detected');\n initSegment = this.generateIS(audioTrack, videoTrack, timeOffset, accurateTimeOffset);\n }\n video = this.remuxVideo(videoTrack, videoTimeOffset, isVideoContiguous, audioTrackLength);\n }\n } else if (enoughVideoSamples) {\n video = this.remuxVideo(videoTrack, videoTimeOffset, isVideoContiguous, 0);\n }\n if (video) {\n video.firstKeyFrame = firstKeyFrameIndex;\n video.independent = firstKeyFrameIndex !== -1;\n video.firstKeyFramePTS = firstKeyFramePTS;\n }\n }\n }\n\n // Allow ID3 and text to remux, even if more audio/video samples are required\n if (this.ISGenerated && this._initPTS && this._initDTS) {\n if (id3Track.samples.length) {\n id3 = flushTextTrackMetadataCueSamples(id3Track, timeOffset, this._initPTS, this._initDTS);\n }\n if (textTrack.samples.length) {\n text = flushTextTrackUserdataCueSamples(textTrack, timeOffset, this._initPTS);\n }\n }\n return {\n audio,\n video,\n initSegment,\n independent,\n text,\n id3\n };\n }\n generateIS(audioTrack, videoTrack, timeOffset, accurateTimeOffset) {\n const audioSamples = audioTrack.samples;\n const videoSamples = videoTrack.samples;\n const typeSupported = this.typeSupported;\n const tracks = {};\n const _initPTS = this._initPTS;\n let computePTSDTS = !_initPTS || accurateTimeOffset;\n let container = 'audio/mp4';\n let initPTS;\n let initDTS;\n let timescale;\n if (computePTSDTS) {\n initPTS = initDTS = Infinity;\n }\n if (audioTrack.config && audioSamples.length) {\n // let's use audio sampling rate as MP4 time scale.\n // rationale is that there is a integer nb of audio frames per audio sample (1024 for AAC)\n // using audio sampling rate here helps having an integer MP4 frame duration\n // this avoids potential rounding issue and AV sync issue\n audioTrack.timescale = audioTrack.samplerate;\n switch (audioTrack.segmentCodec) {\n case 'mp3':\n if (typeSupported.mpeg) {\n // Chrome and Safari\n container = 'audio/mpeg';\n audioTrack.codec = '';\n } else if (typeSupported.mp3) {\n // Firefox\n audioTrack.codec = 'mp3';\n }\n break;\n case 'ac3':\n audioTrack.codec = 'ac-3';\n break;\n }\n tracks.audio = {\n id: 'audio',\n container: container,\n codec: audioTrack.codec,\n initSegment: audioTrack.segmentCodec === 'mp3' && typeSupported.mpeg ? new Uint8Array(0) : MP4.initSegment([audioTrack]),\n metadata: {\n channelCount: audioTrack.channelCount\n }\n };\n if (computePTSDTS) {\n timescale = audioTrack.inputTimeScale;\n if (!_initPTS || timescale !== _initPTS.timescale) {\n // remember first PTS of this demuxing context. for audio, PTS = DTS\n initPTS = initDTS = audioSamples[0].pts - Math.round(timescale * timeOffset);\n } else {\n computePTSDTS = false;\n }\n }\n }\n if (videoTrack.sps && videoTrack.pps && videoSamples.length) {\n // let's use input time scale as MP4 video timescale\n // we use input time scale straight away to avoid rounding issues on frame duration / cts computation\n videoTrack.timescale = videoTrack.inputTimeScale;\n tracks.video = {\n id: 'main',\n container: 'video/mp4',\n codec: videoTrack.codec,\n initSegment: MP4.initSegment([videoTrack]),\n metadata: {\n width: videoTrack.width,\n height: videoTrack.height\n }\n };\n if (computePTSDTS) {\n timescale = videoTrack.inputTimeScale;\n if (!_initPTS || timescale !== _initPTS.timescale) {\n const startPTS = this.getVideoStartPts(videoSamples);\n const startOffset = Math.round(timescale * timeOffset);\n initDTS = Math.min(initDTS, normalizePts(videoSamples[0].dts, startPTS) - startOffset);\n initPTS = Math.min(initPTS, startPTS - startOffset);\n } else {\n computePTSDTS = false;\n }\n }\n this.videoTrackConfig = {\n width: videoTrack.width,\n height: videoTrack.height,\n pixelRatio: videoTrack.pixelRatio\n };\n }\n if (Object.keys(tracks).length) {\n this.ISGenerated = true;\n if (computePTSDTS) {\n this._initPTS = {\n baseTime: initPTS,\n timescale: timescale\n };\n this._initDTS = {\n baseTime: initDTS,\n timescale: timescale\n };\n } else {\n initPTS = timescale = undefined;\n }\n return {\n tracks,\n initPTS,\n timescale\n };\n }\n }\n remuxVideo(track, timeOffset, contiguous, audioTrackLength) {\n const timeScale = track.inputTimeScale;\n const inputSamples = track.samples;\n const outputSamples = [];\n const nbSamples = inputSamples.length;\n const initPTS = this._initPTS;\n let nextAvcDts = this.nextAvcDts;\n let offset = 8;\n let mp4SampleDuration = this.videoSampleDuration;\n let firstDTS;\n let lastDTS;\n let minPTS = Number.POSITIVE_INFINITY;\n let maxPTS = Number.NEGATIVE_INFINITY;\n let sortSamples = false;\n\n // if parsed fragment is contiguous with last one, let's use last DTS value as reference\n if (!contiguous || nextAvcDts === null) {\n const pts = timeOffset * timeScale;\n const cts = inputSamples[0].pts - normalizePts(inputSamples[0].dts, inputSamples[0].pts);\n if (chromeVersion && nextAvcDts !== null && Math.abs(pts - cts - nextAvcDts) < 15000) {\n // treat as contigous to adjust samples that would otherwise produce video buffer gaps in Chrome\n contiguous = true;\n } else {\n // if not contiguous, let's use target timeOffset\n nextAvcDts = pts - cts;\n }\n }\n\n // PTS is coded on 33bits, and can loop from -2^32 to 2^32\n // PTSNormalize will make PTS/DTS value monotonic, we use last known DTS value as reference value\n const initTime = initPTS.baseTime * timeScale / initPTS.timescale;\n for (let i = 0; i < nbSamples; i++) {\n const sample = inputSamples[i];\n sample.pts = normalizePts(sample.pts - initTime, nextAvcDts);\n sample.dts = normalizePts(sample.dts - initTime, nextAvcDts);\n if (sample.dts < inputSamples[i > 0 ? i - 1 : i].dts) {\n sortSamples = true;\n }\n }\n\n // sort video samples by DTS then PTS then demux id order\n if (sortSamples) {\n inputSamples.sort(function (a, b) {\n const deltadts = a.dts - b.dts;\n const deltapts = a.pts - b.pts;\n return deltadts || deltapts;\n });\n }\n\n // Get first/last DTS\n firstDTS = inputSamples[0].dts;\n lastDTS = inputSamples[inputSamples.length - 1].dts;\n\n // Sample duration (as expected by trun MP4 boxes), should be the delta between sample DTS\n // set this constant duration as being the avg delta between consecutive DTS.\n const inputDuration = lastDTS - firstDTS;\n const averageSampleDuration = inputDuration ? Math.round(inputDuration / (nbSamples - 1)) : mp4SampleDuration || track.inputTimeScale / 30;\n\n // if fragment are contiguous, detect hole/overlapping between fragments\n if (contiguous) {\n // check timestamp continuity across consecutive fragments (this is to remove inter-fragment gap/hole)\n const delta = firstDTS - nextAvcDts;\n const foundHole = delta > averageSampleDuration;\n const foundOverlap = delta < -1;\n if (foundHole || foundOverlap) {\n if (foundHole) {\n logger.warn(`AVC: ${toMsFromMpegTsClock(delta, true)} ms (${delta}dts) hole between fragments detected at ${timeOffset.toFixed(3)}`);\n } else {\n logger.warn(`AVC: ${toMsFromMpegTsClock(-delta, true)} ms (${delta}dts) overlapping between fragments detected at ${timeOffset.toFixed(3)}`);\n }\n if (!foundOverlap || nextAvcDts >= inputSamples[0].pts || chromeVersion) {\n firstDTS = nextAvcDts;\n const firstPTS = inputSamples[0].pts - delta;\n if (foundHole) {\n inputSamples[0].dts = firstDTS;\n inputSamples[0].pts = firstPTS;\n } else {\n for (let i = 0; i < inputSamples.length; i++) {\n if (inputSamples[i].dts > firstPTS) {\n break;\n }\n inputSamples[i].dts -= delta;\n inputSamples[i].pts -= delta;\n }\n }\n logger.log(`Video: Initial PTS/DTS adjusted: ${toMsFromMpegTsClock(firstPTS, true)}/${toMsFromMpegTsClock(firstDTS, true)}, delta: ${toMsFromMpegTsClock(delta, true)} ms`);\n }\n }\n }\n firstDTS = Math.max(0, firstDTS);\n let nbNalu = 0;\n let naluLen = 0;\n let dtsStep = firstDTS;\n for (let i = 0; i < nbSamples; i++) {\n // compute total/avc sample length and nb of NAL units\n const sample = inputSamples[i];\n const units = sample.units;\n const nbUnits = units.length;\n let sampleLen = 0;\n for (let j = 0; j < nbUnits; j++) {\n sampleLen += units[j].data.length;\n }\n naluLen += sampleLen;\n nbNalu += nbUnits;\n sample.length = sampleLen;\n\n // ensure sample monotonic DTS\n if (sample.dts < dtsStep) {\n sample.dts = dtsStep;\n dtsStep += averageSampleDuration / 4 | 0 || 1;\n } else {\n dtsStep = sample.dts;\n }\n minPTS = Math.min(sample.pts, minPTS);\n maxPTS = Math.max(sample.pts, maxPTS);\n }\n lastDTS = inputSamples[nbSamples - 1].dts;\n\n /* concatenate the video data and construct the mdat in place\n (need 8 more bytes to fill length and mpdat type) */\n const mdatSize = naluLen + 4 * nbNalu + 8;\n let mdat;\n try {\n mdat = new Uint8Array(mdatSize);\n } catch (err) {\n this.observer.emit(Events.ERROR, Events.ERROR, {\n type: ErrorTypes.MUX_ERROR,\n details: ErrorDetails.REMUX_ALLOC_ERROR,\n fatal: false,\n error: err,\n bytes: mdatSize,\n reason: `fail allocating video mdat ${mdatSize}`\n });\n return;\n }\n const view = new DataView(mdat.buffer);\n view.setUint32(0, mdatSize);\n mdat.set(MP4.types.mdat, 4);\n let stretchedLastFrame = false;\n let minDtsDelta = Number.POSITIVE_INFINITY;\n let minPtsDelta = Number.POSITIVE_INFINITY;\n let maxDtsDelta = Number.NEGATIVE_INFINITY;\n let maxPtsDelta = Number.NEGATIVE_INFINITY;\n for (let i = 0; i < nbSamples; i++) {\n const VideoSample = inputSamples[i];\n const VideoSampleUnits = VideoSample.units;\n let mp4SampleLength = 0;\n // convert NALU bitstream to MP4 format (prepend NALU with size field)\n for (let j = 0, nbUnits = VideoSampleUnits.length; j < nbUnits; j++) {\n const unit = VideoSampleUnits[j];\n const unitData = unit.data;\n const unitDataLen = unit.data.byteLength;\n view.setUint32(offset, unitDataLen);\n offset += 4;\n mdat.set(unitData, offset);\n offset += unitDataLen;\n mp4SampleLength += 4 + unitDataLen;\n }\n\n // expected sample duration is the Decoding Timestamp diff of consecutive samples\n let ptsDelta;\n if (i < nbSamples - 1) {\n mp4SampleDuration = inputSamples[i + 1].dts - VideoSample.dts;\n ptsDelta = inputSamples[i + 1].pts - VideoSample.pts;\n } else {\n const config = this.config;\n const lastFrameDuration = i > 0 ? VideoSample.dts - inputSamples[i - 1].dts : averageSampleDuration;\n ptsDelta = i > 0 ? VideoSample.pts - inputSamples[i - 1].pts : averageSampleDuration;\n if (config.stretchShortVideoTrack && this.nextAudioPts !== null) {\n // In some cases, a segment's audio track duration may exceed the video track duration.\n // Since we've already remuxed audio, and we know how long the audio track is, we look to\n // see if the delta to the next segment is longer than maxBufferHole.\n // If so, playback would potentially get stuck, so we artificially inflate\n // the duration of the last frame to minimize any potential gap between segments.\n const gapTolerance = Math.floor(config.maxBufferHole * timeScale);\n const deltaToFrameEnd = (audioTrackLength ? minPTS + audioTrackLength * timeScale : this.nextAudioPts) - VideoSample.pts;\n if (deltaToFrameEnd > gapTolerance) {\n // We subtract lastFrameDuration from deltaToFrameEnd to try to prevent any video\n // frame overlap. maxBufferHole should be >> lastFrameDuration anyway.\n mp4SampleDuration = deltaToFrameEnd - lastFrameDuration;\n if (mp4SampleDuration < 0) {\n mp4SampleDuration = lastFrameDuration;\n } else {\n stretchedLastFrame = true;\n }\n logger.log(`[mp4-remuxer]: It is approximately ${deltaToFrameEnd / 90} ms to the next segment; using duration ${mp4SampleDuration / 90} ms for the last video frame.`);\n } else {\n mp4SampleDuration = lastFrameDuration;\n }\n } else {\n mp4SampleDuration = lastFrameDuration;\n }\n }\n const compositionTimeOffset = Math.round(VideoSample.pts - VideoSample.dts);\n minDtsDelta = Math.min(minDtsDelta, mp4SampleDuration);\n maxDtsDelta = Math.max(maxDtsDelta, mp4SampleDuration);\n minPtsDelta = Math.min(minPtsDelta, ptsDelta);\n maxPtsDelta = Math.max(maxPtsDelta, ptsDelta);\n outputSamples.push(new Mp4Sample(VideoSample.key, mp4SampleDuration, mp4SampleLength, compositionTimeOffset));\n }\n if (outputSamples.length) {\n if (chromeVersion) {\n if (chromeVersion < 70) {\n // Chrome workaround, mark first sample as being a Random Access Point (keyframe) to avoid sourcebuffer append issue\n // https://code.google.com/p/chromium/issues/detail?id=229412\n const flags = outputSamples[0].flags;\n flags.dependsOn = 2;\n flags.isNonSync = 0;\n }\n } else if (safariWebkitVersion) {\n // Fix for \"CNN special report, with CC\" in test-streams (Safari browser only)\n // Ignore DTS when frame durations are irregular. Safari MSE does not handle this leading to gaps.\n if (maxPtsDelta - minPtsDelta < maxDtsDelta - minDtsDelta && averageSampleDuration / maxDtsDelta < 0.025 && outputSamples[0].cts === 0) {\n logger.warn('Found irregular gaps in sample duration. Using PTS instead of DTS to determine MP4 sample duration.');\n let dts = firstDTS;\n for (let i = 0, len = outputSamples.length; i < len; i++) {\n const nextDts = dts + outputSamples[i].duration;\n const pts = dts + outputSamples[i].cts;\n if (i < len - 1) {\n const nextPts = nextDts + outputSamples[i + 1].cts;\n outputSamples[i].duration = nextPts - pts;\n } else {\n outputSamples[i].duration = i ? outputSamples[i - 1].duration : averageSampleDuration;\n }\n outputSamples[i].cts = 0;\n dts = nextDts;\n }\n }\n }\n }\n // next AVC sample DTS should be equal to last sample DTS + last sample duration (in PES timescale)\n mp4SampleDuration = stretchedLastFrame || !mp4SampleDuration ? averageSampleDuration : mp4SampleDuration;\n this.nextAvcDts = nextAvcDts = lastDTS + mp4SampleDuration;\n this.videoSampleDuration = mp4SampleDuration;\n this.isVideoContiguous = true;\n const moof = MP4.moof(track.sequenceNumber++, firstDTS, _extends({}, track, {\n samples: outputSamples\n }));\n const type = 'video';\n const data = {\n data1: moof,\n data2: mdat,\n startPTS: minPTS / timeScale,\n endPTS: (maxPTS + mp4SampleDuration) / timeScale,\n startDTS: firstDTS / timeScale,\n endDTS: nextAvcDts / timeScale,\n type,\n hasAudio: false,\n hasVideo: true,\n nb: outputSamples.length,\n dropped: track.dropped\n };\n track.samples = [];\n track.dropped = 0;\n return data;\n }\n getSamplesPerFrame(track) {\n switch (track.segmentCodec) {\n case 'mp3':\n return MPEG_AUDIO_SAMPLE_PER_FRAME;\n case 'ac3':\n return AC3_SAMPLES_PER_FRAME;\n default:\n return AAC_SAMPLES_PER_FRAME;\n }\n }\n remuxAudio(track, timeOffset, contiguous, accurateTimeOffset, videoTimeOffset) {\n const inputTimeScale = track.inputTimeScale;\n const mp4timeScale = track.samplerate ? track.samplerate : inputTimeScale;\n const scaleFactor = inputTimeScale / mp4timeScale;\n const mp4SampleDuration = this.getSamplesPerFrame(track);\n const inputSampleDuration = mp4SampleDuration * scaleFactor;\n const initPTS = this._initPTS;\n const rawMPEG = track.segmentCodec === 'mp3' && this.typeSupported.mpeg;\n const outputSamples = [];\n const alignedWithVideo = videoTimeOffset !== undefined;\n let inputSamples = track.samples;\n let offset = rawMPEG ? 0 : 8;\n let nextAudioPts = this.nextAudioPts || -1;\n\n // window.audioSamples ? window.audioSamples.push(inputSamples.map(s => s.pts)) : (window.audioSamples = [inputSamples.map(s => s.pts)]);\n\n // for audio samples, also consider consecutive fragments as being contiguous (even if a level switch occurs),\n // for sake of clarity:\n // consecutive fragments are frags with\n // - less than 100ms gaps between new time offset (if accurate) and next expected PTS OR\n // - less than 20 audio frames distance\n // contiguous fragments are consecutive fragments from same quality level (same level, new SN = old SN + 1)\n // this helps ensuring audio continuity\n // and this also avoids audio glitches/cut when switching quality, or reporting wrong duration on first audio frame\n const timeOffsetMpegTS = timeOffset * inputTimeScale;\n const initTime = initPTS.baseTime * inputTimeScale / initPTS.timescale;\n this.isAudioContiguous = contiguous = contiguous || inputSamples.length && nextAudioPts > 0 && (accurateTimeOffset && Math.abs(timeOffsetMpegTS - nextAudioPts) < 9000 || Math.abs(normalizePts(inputSamples[0].pts - initTime, timeOffsetMpegTS) - nextAudioPts) < 20 * inputSampleDuration);\n\n // compute normalized PTS\n inputSamples.forEach(function (sample) {\n sample.pts = normalizePts(sample.pts - initTime, timeOffsetMpegTS);\n });\n if (!contiguous || nextAudioPts < 0) {\n // filter out sample with negative PTS that are not playable anyway\n // if we don't remove these negative samples, they will shift all audio samples forward.\n // leading to audio overlap between current / next fragment\n inputSamples = inputSamples.filter(sample => sample.pts >= 0);\n\n // in case all samples have negative PTS, and have been filtered out, return now\n if (!inputSamples.length) {\n return;\n }\n if (videoTimeOffset === 0) {\n // Set the start to 0 to match video so that start gaps larger than inputSampleDuration are filled with silence\n nextAudioPts = 0;\n } else if (accurateTimeOffset && !alignedWithVideo) {\n // When not seeking, not live, and LevelDetails.PTSKnown, use fragment start as predicted next audio PTS\n nextAudioPts = Math.max(0, timeOffsetMpegTS);\n } else {\n // if frags are not contiguous and if we cant trust time offset, let's use first sample PTS as next audio PTS\n nextAudioPts = inputSamples[0].pts;\n }\n }\n\n // If the audio track is missing samples, the frames seem to get \"left-shifted\" within the\n // resulting mp4 segment, causing sync issues and leaving gaps at the end of the audio segment.\n // In an effort to prevent this from happening, we inject frames here where there are gaps.\n // When possible, we inject a silent frame; when that's not possible, we duplicate the last\n // frame.\n\n if (track.segmentCodec === 'aac') {\n const maxAudioFramesDrift = this.config.maxAudioFramesDrift;\n for (let i = 0, nextPts = nextAudioPts; i < inputSamples.length; i++) {\n // First, let's see how far off this frame is from where we expect it to be\n const sample = inputSamples[i];\n const pts = sample.pts;\n const delta = pts - nextPts;\n const duration = Math.abs(1000 * delta / inputTimeScale);\n\n // When remuxing with video, if we're overlapping by more than a duration, drop this sample to stay in sync\n if (delta <= -maxAudioFramesDrift * inputSampleDuration && alignedWithVideo) {\n if (i === 0) {\n logger.warn(`Audio frame @ ${(pts / inputTimeScale).toFixed(3)}s overlaps nextAudioPts by ${Math.round(1000 * delta / inputTimeScale)} ms.`);\n this.nextAudioPts = nextAudioPts = nextPts = pts;\n }\n } // eslint-disable-line brace-style\n\n // Insert missing frames if:\n // 1: We're more than maxAudioFramesDrift frame away\n // 2: Not more than MAX_SILENT_FRAME_DURATION away\n // 3: currentTime (aka nextPtsNorm) is not 0\n // 4: remuxing with video (videoTimeOffset !== undefined)\n else if (delta >= maxAudioFramesDrift * inputSampleDuration && duration < MAX_SILENT_FRAME_DURATION && alignedWithVideo) {\n let missing = Math.round(delta / inputSampleDuration);\n // Adjust nextPts so that silent samples are aligned with media pts. This will prevent media samples from\n // later being shifted if nextPts is based on timeOffset and delta is not a multiple of inputSampleDuration.\n nextPts = pts - missing * inputSampleDuration;\n if (nextPts < 0) {\n missing--;\n nextPts += inputSampleDuration;\n }\n if (i === 0) {\n this.nextAudioPts = nextAudioPts = nextPts;\n }\n logger.warn(`[mp4-remuxer]: Injecting ${missing} audio frame @ ${(nextPts / inputTimeScale).toFixed(3)}s due to ${Math.round(1000 * delta / inputTimeScale)} ms gap.`);\n for (let j = 0; j < missing; j++) {\n const newStamp = Math.max(nextPts, 0);\n let fillFrame = AAC.getSilentFrame(track.manifestCodec || track.codec, track.channelCount);\n if (!fillFrame) {\n logger.log('[mp4-remuxer]: Unable to get silent frame for given audio codec; duplicating last frame instead.');\n fillFrame = sample.unit.subarray();\n }\n inputSamples.splice(i, 0, {\n unit: fillFrame,\n pts: newStamp\n });\n nextPts += inputSampleDuration;\n i++;\n }\n }\n sample.pts = nextPts;\n nextPts += inputSampleDuration;\n }\n }\n let firstPTS = null;\n let lastPTS = null;\n let mdat;\n let mdatSize = 0;\n let sampleLength = inputSamples.length;\n while (sampleLength--) {\n mdatSize += inputSamples[sampleLength].unit.byteLength;\n }\n for (let j = 0, _nbSamples = inputSamples.length; j < _nbSamples; j++) {\n const audioSample = inputSamples[j];\n const unit = audioSample.unit;\n let pts = audioSample.pts;\n if (lastPTS !== null) {\n // If we have more than one sample, set the duration of the sample to the \"real\" duration; the PTS diff with\n // the previous sample\n const prevSample = outputSamples[j - 1];\n prevSample.duration = Math.round((pts - lastPTS) / scaleFactor);\n } else {\n if (contiguous && track.segmentCodec === 'aac') {\n // set PTS/DTS to expected PTS/DTS\n pts = nextAudioPts;\n }\n // remember first PTS of our audioSamples\n firstPTS = pts;\n if (mdatSize > 0) {\n /* concatenate the audio data and construct the mdat in place\n (need 8 more bytes to fill length and mdat type) */\n mdatSize += offset;\n try {\n mdat = new Uint8Array(mdatSize);\n } catch (err) {\n this.observer.emit(Events.ERROR, Events.ERROR, {\n type: ErrorTypes.MUX_ERROR,\n details: ErrorDetails.REMUX_ALLOC_ERROR,\n fatal: false,\n error: err,\n bytes: mdatSize,\n reason: `fail allocating audio mdat ${mdatSize}`\n });\n return;\n }\n if (!rawMPEG) {\n const view = new DataView(mdat.buffer);\n view.setUint32(0, mdatSize);\n mdat.set(MP4.types.mdat, 4);\n }\n } else {\n // no audio samples\n return;\n }\n }\n mdat.set(unit, offset);\n const unitLen = unit.byteLength;\n offset += unitLen;\n // Default the sample's duration to the computed mp4SampleDuration, which will either be 1024 for AAC or 1152 for MPEG\n // In the case that we have 1 sample, this will be the duration. If we have more than one sample, the duration\n // becomes the PTS diff with the previous sample\n outputSamples.push(new Mp4Sample(true, mp4SampleDuration, unitLen, 0));\n lastPTS = pts;\n }\n\n // We could end up with no audio samples if all input samples were overlapping with the previously remuxed ones\n const nbSamples = outputSamples.length;\n if (!nbSamples) {\n return;\n }\n\n // The next audio sample PTS should be equal to last sample PTS + duration\n const lastSample = outputSamples[outputSamples.length - 1];\n this.nextAudioPts = nextAudioPts = lastPTS + scaleFactor * lastSample.duration;\n\n // Set the track samples from inputSamples to outputSamples before remuxing\n const moof = rawMPEG ? new Uint8Array(0) : MP4.moof(track.sequenceNumber++, firstPTS / scaleFactor, _extends({}, track, {\n samples: outputSamples\n }));\n\n // Clear the track samples. This also clears the samples array in the demuxer, since the reference is shared\n track.samples = [];\n const start = firstPTS / inputTimeScale;\n const end = nextAudioPts / inputTimeScale;\n const type = 'audio';\n const audioData = {\n data1: moof,\n data2: mdat,\n startPTS: start,\n endPTS: end,\n startDTS: start,\n endDTS: end,\n type,\n hasAudio: true,\n hasVideo: false,\n nb: nbSamples\n };\n this.isAudioContiguous = true;\n return audioData;\n }\n remuxEmptyAudio(track, timeOffset, contiguous, videoData) {\n const inputTimeScale = track.inputTimeScale;\n const mp4timeScale = track.samplerate ? track.samplerate : inputTimeScale;\n const scaleFactor = inputTimeScale / mp4timeScale;\n const nextAudioPts = this.nextAudioPts;\n // sync with video's timestamp\n const initDTS = this._initDTS;\n const init90kHz = initDTS.baseTime * 90000 / initDTS.timescale;\n const startDTS = (nextAudioPts !== null ? nextAudioPts : videoData.startDTS * inputTimeScale) + init90kHz;\n const endDTS = videoData.endDTS * inputTimeScale + init90kHz;\n // one sample's duration value\n const frameDuration = scaleFactor * AAC_SAMPLES_PER_FRAME;\n // samples count of this segment's duration\n const nbSamples = Math.ceil((endDTS - startDTS) / frameDuration);\n // silent frame\n const silentFrame = AAC.getSilentFrame(track.manifestCodec || track.codec, track.channelCount);\n logger.warn('[mp4-remuxer]: remux empty Audio');\n // Can't remux if we can't generate a silent frame...\n if (!silentFrame) {\n logger.trace('[mp4-remuxer]: Unable to remuxEmptyAudio since we were unable to get a silent frame for given audio codec');\n return;\n }\n const samples = [];\n for (let i = 0; i < nbSamples; i++) {\n const stamp = startDTS + i * frameDuration;\n samples.push({\n unit: silentFrame,\n pts: stamp,\n dts: stamp\n });\n }\n track.samples = samples;\n return this.remuxAudio(track, timeOffset, contiguous, false);\n }\n}\nfunction normalizePts(value, reference) {\n let offset;\n if (reference === null) {\n return value;\n }\n if (reference < value) {\n // - 2^33\n offset = -8589934592;\n } else {\n // + 2^33\n offset = 8589934592;\n }\n /* PTS is 33bit (from 0 to 2^33 -1)\n if diff between value and reference is bigger than half of the amplitude (2^32) then it means that\n PTS looping occured. fill the gap */\n while (Math.abs(value - reference) > 4294967296) {\n value += offset;\n }\n return value;\n}\nfunction findKeyframeIndex(samples) {\n for (let i = 0; i < samples.length; i++) {\n if (samples[i].key) {\n return i;\n }\n }\n return -1;\n}\nfunction flushTextTrackMetadataCueSamples(track, timeOffset, initPTS, initDTS) {\n const length = track.samples.length;\n if (!length) {\n return;\n }\n const inputTimeScale = track.inputTimeScale;\n for (let index = 0; index < length; index++) {\n const sample = track.samples[index];\n // setting id3 pts, dts to relative time\n // using this._initPTS and this._initDTS to calculate relative time\n sample.pts = normalizePts(sample.pts - initPTS.baseTime * inputTimeScale / initPTS.timescale, timeOffset * inputTimeScale) / inputTimeScale;\n sample.dts = normalizePts(sample.dts - initDTS.baseTime * inputTimeScale / initDTS.timescale, timeOffset * inputTimeScale) / inputTimeScale;\n }\n const samples = track.samples;\n track.samples = [];\n return {\n samples\n };\n}\nfunction flushTextTrackUserdataCueSamples(track, timeOffset, initPTS) {\n const length = track.samples.length;\n if (!length) {\n return;\n }\n const inputTimeScale = track.inputTimeScale;\n for (let index = 0; index < length; index++) {\n const sample = track.samples[index];\n // setting text pts, dts to relative time\n // using this._initPTS and this._initDTS to calculate relative time\n sample.pts = normalizePts(sample.pts - initPTS.baseTime * inputTimeScale / initPTS.timescale, timeOffset * inputTimeScale) / inputTimeScale;\n }\n track.samples.sort((a, b) => a.pts - b.pts);\n const samples = track.samples;\n track.samples = [];\n return {\n samples\n };\n}\nclass Mp4Sample {\n constructor(isKeyframe, duration, size, cts) {\n this.size = void 0;\n this.duration = void 0;\n this.cts = void 0;\n this.flags = void 0;\n this.duration = duration;\n this.size = size;\n this.cts = cts;\n this.flags = {\n isLeading: 0,\n isDependedOn: 0,\n hasRedundancy: 0,\n degradPrio: 0,\n dependsOn: isKeyframe ? 2 : 1,\n isNonSync: isKeyframe ? 0 : 1\n };\n }\n}\n\nclass PassThroughRemuxer {\n constructor() {\n this.emitInitSegment = false;\n this.audioCodec = void 0;\n this.videoCodec = void 0;\n this.initData = void 0;\n this.initPTS = null;\n this.initTracks = void 0;\n this.lastEndTime = null;\n }\n destroy() {}\n resetTimeStamp(defaultInitPTS) {\n this.initPTS = defaultInitPTS;\n this.lastEndTime = null;\n }\n resetNextTimestamp() {\n this.lastEndTime = null;\n }\n resetInitSegment(initSegment, audioCodec, videoCodec, decryptdata) {\n this.audioCodec = audioCodec;\n this.videoCodec = videoCodec;\n this.generateInitSegment(patchEncyptionData(initSegment, decryptdata));\n this.emitInitSegment = true;\n }\n generateInitSegment(initSegment) {\n let {\n audioCodec,\n videoCodec\n } = this;\n if (!(initSegment != null && initSegment.byteLength)) {\n this.initTracks = undefined;\n this.initData = undefined;\n return;\n }\n const initData = this.initData = parseInitSegment(initSegment);\n\n // Get codec from initSegment or fallback to default\n if (initData.audio) {\n audioCodec = getParsedTrackCodec(initData.audio, ElementaryStreamTypes.AUDIO);\n }\n if (initData.video) {\n videoCodec = getParsedTrackCodec(initData.video, ElementaryStreamTypes.VIDEO);\n }\n const tracks = {};\n if (initData.audio && initData.video) {\n tracks.audiovideo = {\n container: 'video/mp4',\n codec: audioCodec + ',' + videoCodec,\n initSegment,\n id: 'main'\n };\n } else if (initData.audio) {\n tracks.audio = {\n container: 'audio/mp4',\n codec: audioCodec,\n initSegment,\n id: 'audio'\n };\n } else if (initData.video) {\n tracks.video = {\n container: 'video/mp4',\n codec: videoCodec,\n initSegment,\n id: 'main'\n };\n } else {\n logger.warn('[passthrough-remuxer.ts]: initSegment does not contain moov or trak boxes.');\n }\n this.initTracks = tracks;\n }\n remux(audioTrack, videoTrack, id3Track, textTrack, timeOffset, accurateTimeOffset) {\n var _initData, _initData2;\n let {\n initPTS,\n lastEndTime\n } = this;\n const result = {\n audio: undefined,\n video: undefined,\n text: textTrack,\n id3: id3Track,\n initSegment: undefined\n };\n\n // If we haven't yet set a lastEndDTS, or it was reset, set it to the provided timeOffset. We want to use the\n // lastEndDTS over timeOffset whenever possible; during progressive playback, the media source will not update\n // the media duration (which is what timeOffset is provided as) before we need to process the next chunk.\n if (!isFiniteNumber(lastEndTime)) {\n lastEndTime = this.lastEndTime = timeOffset || 0;\n }\n\n // The binary segment data is added to the videoTrack in the mp4demuxer. We don't check to see if the data is only\n // audio or video (or both); adding it to video was an arbitrary choice.\n const data = videoTrack.samples;\n if (!(data != null && data.length)) {\n return result;\n }\n const initSegment = {\n initPTS: undefined,\n timescale: 1\n };\n let initData = this.initData;\n if (!((_initData = initData) != null && _initData.length)) {\n this.generateInitSegment(data);\n initData = this.initData;\n }\n if (!((_initData2 = initData) != null && _initData2.length)) {\n // We can't remux if the initSegment could not be generated\n logger.warn('[passthrough-remuxer.ts]: Failed to generate initSegment.');\n return result;\n }\n if (this.emitInitSegment) {\n initSegment.tracks = this.initTracks;\n this.emitInitSegment = false;\n }\n const duration = getDuration(data, initData);\n const startDTS = getStartDTS(initData, data);\n const decodeTime = startDTS === null ? timeOffset : startDTS;\n if (isInvalidInitPts(initPTS, decodeTime, timeOffset, duration) || initSegment.timescale !== initPTS.timescale && accurateTimeOffset) {\n initSegment.initPTS = decodeTime - timeOffset;\n if (initPTS && initPTS.timescale === 1) {\n logger.warn(`Adjusting initPTS by ${initSegment.initPTS - initPTS.baseTime}`);\n }\n this.initPTS = initPTS = {\n baseTime: initSegment.initPTS,\n timescale: 1\n };\n }\n const startTime = audioTrack ? decodeTime - initPTS.baseTime / initPTS.timescale : lastEndTime;\n const endTime = startTime + duration;\n offsetStartDTS(initData, data, initPTS.baseTime / initPTS.timescale);\n if (duration > 0) {\n this.lastEndTime = endTime;\n } else {\n logger.warn('Duration parsed from mp4 should be greater than zero');\n this.resetNextTimestamp();\n }\n const hasAudio = !!initData.audio;\n const hasVideo = !!initData.video;\n let type = '';\n if (hasAudio) {\n type += 'audio';\n }\n if (hasVideo) {\n type += 'video';\n }\n const track = {\n data1: data,\n startPTS: startTime,\n startDTS: startTime,\n endPTS: endTime,\n endDTS: endTime,\n type,\n hasAudio,\n hasVideo,\n nb: 1,\n dropped: 0\n };\n result.audio = track.type === 'audio' ? track : undefined;\n result.video = track.type !== 'audio' ? track : undefined;\n result.initSegment = initSegment;\n result.id3 = flushTextTrackMetadataCueSamples(id3Track, timeOffset, initPTS, initPTS);\n if (textTrack.samples.length) {\n result.text = flushTextTrackUserdataCueSamples(textTrack, timeOffset, initPTS);\n }\n return result;\n }\n}\nfunction isInvalidInitPts(initPTS, startDTS, timeOffset, duration) {\n if (initPTS === null) {\n return true;\n }\n // InitPTS is invalid when distance from program would be more than segment duration or a minimum of one second\n const minDuration = Math.max(duration, 1);\n const startTime = startDTS - initPTS.baseTime / initPTS.timescale;\n return Math.abs(startTime - timeOffset) > minDuration;\n}\nfunction getParsedTrackCodec(track, type) {\n const parsedCodec = track == null ? void 0 : track.codec;\n if (parsedCodec && parsedCodec.length > 4) {\n return parsedCodec;\n }\n if (type === ElementaryStreamTypes.AUDIO) {\n if (parsedCodec === 'ec-3' || parsedCodec === 'ac-3' || parsedCodec === 'alac') {\n return parsedCodec;\n }\n if (parsedCodec === 'fLaC' || parsedCodec === 'Opus') {\n // Opting not to get `preferManagedMediaSource` from player config for isSupported() check for simplicity\n const preferManagedMediaSource = false;\n return getCodecCompatibleName(parsedCodec, preferManagedMediaSource);\n }\n const result = 'mp4a.40.5';\n logger.info(`Parsed audio codec \"${parsedCodec}\" or audio object type not handled. Using \"${result}\"`);\n return result;\n }\n // Provide defaults based on codec type\n // This allows for some playback of some fmp4 playlists without CODECS defined in manifest\n logger.warn(`Unhandled video codec \"${parsedCodec}\"`);\n if (parsedCodec === 'hvc1' || parsedCodec === 'hev1') {\n return 'hvc1.1.6.L120.90';\n }\n if (parsedCodec === 'av01') {\n return 'av01.0.04M.08';\n }\n return 'avc1.42e01e';\n}\n\nlet now;\n// performance.now() not available on WebWorker, at least on Safari Desktop\ntry {\n now = self.performance.now.bind(self.performance);\n} catch (err) {\n logger.debug('Unable to use Performance API on this environment');\n now = optionalSelf == null ? void 0 : optionalSelf.Date.now;\n}\nconst muxConfig = [{\n demux: MP4Demuxer,\n remux: PassThroughRemuxer\n}, {\n demux: TSDemuxer,\n remux: MP4Remuxer\n}, {\n demux: AACDemuxer,\n remux: MP4Remuxer\n}, {\n demux: MP3Demuxer,\n remux: MP4Remuxer\n}];\n{\n muxConfig.splice(2, 0, {\n demux: AC3Demuxer,\n remux: MP4Remuxer\n });\n}\nclass Transmuxer {\n constructor(observer, typeSupported, config, vendor, id) {\n this.async = false;\n this.observer = void 0;\n this.typeSupported = void 0;\n this.config = void 0;\n this.vendor = void 0;\n this.id = void 0;\n this.demuxer = void 0;\n this.remuxer = void 0;\n this.decrypter = void 0;\n this.probe = void 0;\n this.decryptionPromise = null;\n this.transmuxConfig = void 0;\n this.currentTransmuxState = void 0;\n this.observer = observer;\n this.typeSupported = typeSupported;\n this.config = config;\n this.vendor = vendor;\n this.id = id;\n }\n configure(transmuxConfig) {\n this.transmuxConfig = transmuxConfig;\n if (this.decrypter) {\n this.decrypter.reset();\n }\n }\n push(data, decryptdata, chunkMeta, state) {\n const stats = chunkMeta.transmuxing;\n stats.executeStart = now();\n let uintData = new Uint8Array(data);\n const {\n currentTransmuxState,\n transmuxConfig\n } = this;\n if (state) {\n this.currentTransmuxState = state;\n }\n const {\n contiguous,\n discontinuity,\n trackSwitch,\n accurateTimeOffset,\n timeOffset,\n initSegmentChange\n } = state || currentTransmuxState;\n const {\n audioCodec,\n videoCodec,\n defaultInitPts,\n duration,\n initSegmentData\n } = transmuxConfig;\n const keyData = getEncryptionType(uintData, decryptdata);\n if (keyData && keyData.method === 'AES-128') {\n const decrypter = this.getDecrypter();\n // Software decryption is synchronous; webCrypto is not\n if (decrypter.isSync()) {\n // Software decryption is progressive. Progressive decryption may not return a result on each call. Any cached\n // data is handled in the flush() call\n let decryptedData = decrypter.softwareDecrypt(uintData, keyData.key.buffer, keyData.iv.buffer);\n // For Low-Latency HLS Parts, decrypt in place, since part parsing is expected on push progress\n const loadingParts = chunkMeta.part > -1;\n if (loadingParts) {\n decryptedData = decrypter.flush();\n }\n if (!decryptedData) {\n stats.executeEnd = now();\n return emptyResult(chunkMeta);\n }\n uintData = new Uint8Array(decryptedData);\n } else {\n this.decryptionPromise = decrypter.webCryptoDecrypt(uintData, keyData.key.buffer, keyData.iv.buffer).then(decryptedData => {\n // Calling push here is important; if flush() is called while this is still resolving, this ensures that\n // the decrypted data has been transmuxed\n const result = this.push(decryptedData, null, chunkMeta);\n this.decryptionPromise = null;\n return result;\n });\n return this.decryptionPromise;\n }\n }\n const resetMuxers = this.needsProbing(discontinuity, trackSwitch);\n if (resetMuxers) {\n const error = this.configureTransmuxer(uintData);\n if (error) {\n logger.warn(`[transmuxer] ${error.message}`);\n this.observer.emit(Events.ERROR, Events.ERROR, {\n type: ErrorTypes.MEDIA_ERROR,\n details: ErrorDetails.FRAG_PARSING_ERROR,\n fatal: false,\n error,\n reason: error.message\n });\n stats.executeEnd = now();\n return emptyResult(chunkMeta);\n }\n }\n if (discontinuity || trackSwitch || initSegmentChange || resetMuxers) {\n this.resetInitSegment(initSegmentData, audioCodec, videoCodec, duration, decryptdata);\n }\n if (discontinuity || initSegmentChange || resetMuxers) {\n this.resetInitialTimestamp(defaultInitPts);\n }\n if (!contiguous) {\n this.resetContiguity();\n }\n const result = this.transmux(uintData, keyData, timeOffset, accurateTimeOffset, chunkMeta);\n const currentState = this.currentTransmuxState;\n currentState.contiguous = true;\n currentState.discontinuity = false;\n currentState.trackSwitch = false;\n stats.executeEnd = now();\n return result;\n }\n\n // Due to data caching, flush calls can produce more than one TransmuxerResult (hence the Array type)\n flush(chunkMeta) {\n const stats = chunkMeta.transmuxing;\n stats.executeStart = now();\n const {\n decrypter,\n currentTransmuxState,\n decryptionPromise\n } = this;\n if (decryptionPromise) {\n // Upon resolution, the decryption promise calls push() and returns its TransmuxerResult up the stack. Therefore\n // only flushing is required for async decryption\n return decryptionPromise.then(() => {\n return this.flush(chunkMeta);\n });\n }\n const transmuxResults = [];\n const {\n timeOffset\n } = currentTransmuxState;\n if (decrypter) {\n // The decrypter may have data cached, which needs to be demuxed. In this case we'll have two TransmuxResults\n // This happens in the case that we receive only 1 push call for a segment (either for non-progressive downloads,\n // or for progressive downloads with small segments)\n const decryptedData = decrypter.flush();\n if (decryptedData) {\n // Push always returns a TransmuxerResult if decryptdata is null\n transmuxResults.push(this.push(decryptedData, null, chunkMeta));\n }\n }\n const {\n demuxer,\n remuxer\n } = this;\n if (!demuxer || !remuxer) {\n // If probing failed, then Hls.js has been given content its not able to handle\n stats.executeEnd = now();\n return [emptyResult(chunkMeta)];\n }\n const demuxResultOrPromise = demuxer.flush(timeOffset);\n if (isPromise(demuxResultOrPromise)) {\n // Decrypt final SAMPLE-AES samples\n return demuxResultOrPromise.then(demuxResult => {\n this.flushRemux(transmuxResults, demuxResult, chunkMeta);\n return transmuxResults;\n });\n }\n this.flushRemux(transmuxResults, demuxResultOrPromise, chunkMeta);\n return transmuxResults;\n }\n flushRemux(transmuxResults, demuxResult, chunkMeta) {\n const {\n audioTrack,\n videoTrack,\n id3Track,\n textTrack\n } = demuxResult;\n const {\n accurateTimeOffset,\n timeOffset\n } = this.currentTransmuxState;\n logger.log(`[transmuxer.ts]: Flushed fragment ${chunkMeta.sn}${chunkMeta.part > -1 ? ' p: ' + chunkMeta.part : ''} of level ${chunkMeta.level}`);\n const remuxResult = this.remuxer.remux(audioTrack, videoTrack, id3Track, textTrack, timeOffset, accurateTimeOffset, true, this.id);\n transmuxResults.push({\n remuxResult,\n chunkMeta\n });\n chunkMeta.transmuxing.executeEnd = now();\n }\n resetInitialTimestamp(defaultInitPts) {\n const {\n demuxer,\n remuxer\n } = this;\n if (!demuxer || !remuxer) {\n return;\n }\n demuxer.resetTimeStamp(defaultInitPts);\n remuxer.resetTimeStamp(defaultInitPts);\n }\n resetContiguity() {\n const {\n demuxer,\n remuxer\n } = this;\n if (!demuxer || !remuxer) {\n return;\n }\n demuxer.resetContiguity();\n remuxer.resetNextTimestamp();\n }\n resetInitSegment(initSegmentData, audioCodec, videoCodec, trackDuration, decryptdata) {\n const {\n demuxer,\n remuxer\n } = this;\n if (!demuxer || !remuxer) {\n return;\n }\n demuxer.resetInitSegment(initSegmentData, audioCodec, videoCodec, trackDuration);\n remuxer.resetInitSegment(initSegmentData, audioCodec, videoCodec, decryptdata);\n }\n destroy() {\n if (this.demuxer) {\n this.demuxer.destroy();\n this.demuxer = undefined;\n }\n if (this.remuxer) {\n this.remuxer.destroy();\n this.remuxer = undefined;\n }\n }\n transmux(data, keyData, timeOffset, accurateTimeOffset, chunkMeta) {\n let result;\n if (keyData && keyData.method === 'SAMPLE-AES') {\n result = this.transmuxSampleAes(data, keyData, timeOffset, accurateTimeOffset, chunkMeta);\n } else {\n result = this.transmuxUnencrypted(data, timeOffset, accurateTimeOffset, chunkMeta);\n }\n return result;\n }\n transmuxUnencrypted(data, timeOffset, accurateTimeOffset, chunkMeta) {\n const {\n audioTrack,\n videoTrack,\n id3Track,\n textTrack\n } = this.demuxer.demux(data, timeOffset, false, !this.config.progressive);\n const remuxResult = this.remuxer.remux(audioTrack, videoTrack, id3Track, textTrack, timeOffset, accurateTimeOffset, false, this.id);\n return {\n remuxResult,\n chunkMeta\n };\n }\n transmuxSampleAes(data, decryptData, timeOffset, accurateTimeOffset, chunkMeta) {\n return this.demuxer.demuxSampleAes(data, decryptData, timeOffset).then(demuxResult => {\n const remuxResult = this.remuxer.remux(demuxResult.audioTrack, demuxResult.videoTrack, demuxResult.id3Track, demuxResult.textTrack, timeOffset, accurateTimeOffset, false, this.id);\n return {\n remuxResult,\n chunkMeta\n };\n });\n }\n configureTransmuxer(data) {\n const {\n config,\n observer,\n typeSupported,\n vendor\n } = this;\n // probe for content type\n let mux;\n for (let i = 0, len = muxConfig.length; i < len; i++) {\n var _muxConfig$i$demux;\n if ((_muxConfig$i$demux = muxConfig[i].demux) != null && _muxConfig$i$demux.probe(data)) {\n mux = muxConfig[i];\n break;\n }\n }\n if (!mux) {\n return new Error('Failed to find demuxer by probing fragment data');\n }\n // so let's check that current remuxer and demuxer are still valid\n const demuxer = this.demuxer;\n const remuxer = this.remuxer;\n const Remuxer = mux.remux;\n const Demuxer = mux.demux;\n if (!remuxer || !(remuxer instanceof Remuxer)) {\n this.remuxer = new Remuxer(observer, config, typeSupported, vendor);\n }\n if (!demuxer || !(demuxer instanceof Demuxer)) {\n this.demuxer = new Demuxer(observer, config, typeSupported);\n this.probe = Demuxer.probe;\n }\n }\n needsProbing(discontinuity, trackSwitch) {\n // in case of continuity change, or track switch\n // we might switch from content type (AAC container to TS container, or TS to fmp4 for example)\n return !this.demuxer || !this.remuxer || discontinuity || trackSwitch;\n }\n getDecrypter() {\n let decrypter = this.decrypter;\n if (!decrypter) {\n decrypter = this.decrypter = new Decrypter(this.config);\n }\n return decrypter;\n }\n}\nfunction getEncryptionType(data, decryptData) {\n let encryptionType = null;\n if (data.byteLength > 0 && (decryptData == null ? void 0 : decryptData.key) != null && decryptData.iv !== null && decryptData.method != null) {\n encryptionType = decryptData;\n }\n return encryptionType;\n}\nconst emptyResult = chunkMeta => ({\n remuxResult: {},\n chunkMeta\n});\nfunction isPromise(p) {\n return 'then' in p && p.then instanceof Function;\n}\nclass TransmuxConfig {\n constructor(audioCodec, videoCodec, initSegmentData, duration, defaultInitPts) {\n this.audioCodec = void 0;\n this.videoCodec = void 0;\n this.initSegmentData = void 0;\n this.duration = void 0;\n this.defaultInitPts = void 0;\n this.audioCodec = audioCodec;\n this.videoCodec = videoCodec;\n this.initSegmentData = initSegmentData;\n this.duration = duration;\n this.defaultInitPts = defaultInitPts || null;\n }\n}\nclass TransmuxState {\n constructor(discontinuity, contiguous, accurateTimeOffset, trackSwitch, timeOffset, initSegmentChange) {\n this.discontinuity = void 0;\n this.contiguous = void 0;\n this.accurateTimeOffset = void 0;\n this.trackSwitch = void 0;\n this.timeOffset = void 0;\n this.initSegmentChange = void 0;\n this.discontinuity = discontinuity;\n this.contiguous = contiguous;\n this.accurateTimeOffset = accurateTimeOffset;\n this.trackSwitch = trackSwitch;\n this.timeOffset = timeOffset;\n this.initSegmentChange = initSegmentChange;\n }\n}\n\nvar eventemitter3 = {exports: {}};\n\n(function (module) {\n\n\tvar has = Object.prototype.hasOwnProperty\n\t , prefix = '~';\n\n\t/**\n\t * Constructor to create a storage for our `EE` objects.\n\t * An `Events` instance is a plain object whose properties are event names.\n\t *\n\t * @constructor\n\t * @private\n\t */\n\tfunction Events() {}\n\n\t//\n\t// We try to not inherit from `Object.prototype`. In some engines creating an\n\t// instance in this way is faster than calling `Object.create(null)` directly.\n\t// If `Object.create(null)` is not supported we prefix the event names with a\n\t// character to make sure that the built-in object properties are not\n\t// overridden or used as an attack vector.\n\t//\n\tif (Object.create) {\n\t Events.prototype = Object.create(null);\n\n\t //\n\t // This hack is needed because the `__proto__` property is still inherited in\n\t // some old browsers like Android 4, iPhone 5.1, Opera 11 and Safari 5.\n\t //\n\t if (!new Events().__proto__) prefix = false;\n\t}\n\n\t/**\n\t * Representation of a single event listener.\n\t *\n\t * @param {Function} fn The listener function.\n\t * @param {*} context The context to invoke the listener with.\n\t * @param {Boolean} [once=false] Specify if the listener is a one-time listener.\n\t * @constructor\n\t * @private\n\t */\n\tfunction EE(fn, context, once) {\n\t this.fn = fn;\n\t this.context = context;\n\t this.once = once || false;\n\t}\n\n\t/**\n\t * Add a listener for a given event.\n\t *\n\t * @param {EventEmitter} emitter Reference to the `EventEmitter` instance.\n\t * @param {(String|Symbol)} event The event name.\n\t * @param {Function} fn The listener function.\n\t * @param {*} context The context to invoke the listener with.\n\t * @param {Boolean} once Specify if the listener is a one-time listener.\n\t * @returns {EventEmitter}\n\t * @private\n\t */\n\tfunction addListener(emitter, event, fn, context, once) {\n\t if (typeof fn !== 'function') {\n\t throw new TypeError('The listener must be a function');\n\t }\n\n\t var listener = new EE(fn, context || emitter, once)\n\t , evt = prefix ? prefix + event : event;\n\n\t if (!emitter._events[evt]) emitter._events[evt] = listener, emitter._eventsCount++;\n\t else if (!emitter._events[evt].fn) emitter._events[evt].push(listener);\n\t else emitter._events[evt] = [emitter._events[evt], listener];\n\n\t return emitter;\n\t}\n\n\t/**\n\t * Clear event by name.\n\t *\n\t * @param {EventEmitter} emitter Reference to the `EventEmitter` instance.\n\t * @param {(String|Symbol)} evt The Event name.\n\t * @private\n\t */\n\tfunction clearEvent(emitter, evt) {\n\t if (--emitter._eventsCount === 0) emitter._events = new Events();\n\t else delete emitter._events[evt];\n\t}\n\n\t/**\n\t * Minimal `EventEmitter` interface that is molded against the Node.js\n\t * `EventEmitter` interface.\n\t *\n\t * @constructor\n\t * @public\n\t */\n\tfunction EventEmitter() {\n\t this._events = new Events();\n\t this._eventsCount = 0;\n\t}\n\n\t/**\n\t * Return an array listing the events for which the emitter has registered\n\t * listeners.\n\t *\n\t * @returns {Array}\n\t * @public\n\t */\n\tEventEmitter.prototype.eventNames = function eventNames() {\n\t var names = []\n\t , events\n\t , name;\n\n\t if (this._eventsCount === 0) return names;\n\n\t for (name in (events = this._events)) {\n\t if (has.call(events, name)) names.push(prefix ? name.slice(1) : name);\n\t }\n\n\t if (Object.getOwnPropertySymbols) {\n\t return names.concat(Object.getOwnPropertySymbols(events));\n\t }\n\n\t return names;\n\t};\n\n\t/**\n\t * Return the listeners registered for a given event.\n\t *\n\t * @param {(String|Symbol)} event The event name.\n\t * @returns {Array} The registered listeners.\n\t * @public\n\t */\n\tEventEmitter.prototype.listeners = function listeners(event) {\n\t var evt = prefix ? prefix + event : event\n\t , handlers = this._events[evt];\n\n\t if (!handlers) return [];\n\t if (handlers.fn) return [handlers.fn];\n\n\t for (var i = 0, l = handlers.length, ee = new Array(l); i < l; i++) {\n\t ee[i] = handlers[i].fn;\n\t }\n\n\t return ee;\n\t};\n\n\t/**\n\t * Return the number of listeners listening to a given event.\n\t *\n\t * @param {(String|Symbol)} event The event name.\n\t * @returns {Number} The number of listeners.\n\t * @public\n\t */\n\tEventEmitter.prototype.listenerCount = function listenerCount(event) {\n\t var evt = prefix ? prefix + event : event\n\t , listeners = this._events[evt];\n\n\t if (!listeners) return 0;\n\t if (listeners.fn) return 1;\n\t return listeners.length;\n\t};\n\n\t/**\n\t * Calls each of the listeners registered for a given event.\n\t *\n\t * @param {(String|Symbol)} event The event name.\n\t * @returns {Boolean} `true` if the event had listeners, else `false`.\n\t * @public\n\t */\n\tEventEmitter.prototype.emit = function emit(event, a1, a2, a3, a4, a5) {\n\t var evt = prefix ? prefix + event : event;\n\n\t if (!this._events[evt]) return false;\n\n\t var listeners = this._events[evt]\n\t , len = arguments.length\n\t , args\n\t , i;\n\n\t if (listeners.fn) {\n\t if (listeners.once) this.removeListener(event, listeners.fn, undefined, true);\n\n\t switch (len) {\n\t case 1: return listeners.fn.call(listeners.context), true;\n\t case 2: return listeners.fn.call(listeners.context, a1), true;\n\t case 3: return listeners.fn.call(listeners.context, a1, a2), true;\n\t case 4: return listeners.fn.call(listeners.context, a1, a2, a3), true;\n\t case 5: return listeners.fn.call(listeners.context, a1, a2, a3, a4), true;\n\t case 6: return listeners.fn.call(listeners.context, a1, a2, a3, a4, a5), true;\n\t }\n\n\t for (i = 1, args = new Array(len -1); i < len; i++) {\n\t args[i - 1] = arguments[i];\n\t }\n\n\t listeners.fn.apply(listeners.context, args);\n\t } else {\n\t var length = listeners.length\n\t , j;\n\n\t for (i = 0; i < length; i++) {\n\t if (listeners[i].once) this.removeListener(event, listeners[i].fn, undefined, true);\n\n\t switch (len) {\n\t case 1: listeners[i].fn.call(listeners[i].context); break;\n\t case 2: listeners[i].fn.call(listeners[i].context, a1); break;\n\t case 3: listeners[i].fn.call(listeners[i].context, a1, a2); break;\n\t case 4: listeners[i].fn.call(listeners[i].context, a1, a2, a3); break;\n\t default:\n\t if (!args) for (j = 1, args = new Array(len -1); j < len; j++) {\n\t args[j - 1] = arguments[j];\n\t }\n\n\t listeners[i].fn.apply(listeners[i].context, args);\n\t }\n\t }\n\t }\n\n\t return true;\n\t};\n\n\t/**\n\t * Add a listener for a given event.\n\t *\n\t * @param {(String|Symbol)} event The event name.\n\t * @param {Function} fn The listener function.\n\t * @param {*} [context=this] The context to invoke the listener with.\n\t * @returns {EventEmitter} `this`.\n\t * @public\n\t */\n\tEventEmitter.prototype.on = function on(event, fn, context) {\n\t return addListener(this, event, fn, context, false);\n\t};\n\n\t/**\n\t * Add a one-time listener for a given event.\n\t *\n\t * @param {(String|Symbol)} event The event name.\n\t * @param {Function} fn The listener function.\n\t * @param {*} [context=this] The context to invoke the listener with.\n\t * @returns {EventEmitter} `this`.\n\t * @public\n\t */\n\tEventEmitter.prototype.once = function once(event, fn, context) {\n\t return addListener(this, event, fn, context, true);\n\t};\n\n\t/**\n\t * Remove the listeners of a given event.\n\t *\n\t * @param {(String|Symbol)} event The event name.\n\t * @param {Function} fn Only remove the listeners that match this function.\n\t * @param {*} context Only remove the listeners that have this context.\n\t * @param {Boolean} once Only remove one-time listeners.\n\t * @returns {EventEmitter} `this`.\n\t * @public\n\t */\n\tEventEmitter.prototype.removeListener = function removeListener(event, fn, context, once) {\n\t var evt = prefix ? prefix + event : event;\n\n\t if (!this._events[evt]) return this;\n\t if (!fn) {\n\t clearEvent(this, evt);\n\t return this;\n\t }\n\n\t var listeners = this._events[evt];\n\n\t if (listeners.fn) {\n\t if (\n\t listeners.fn === fn &&\n\t (!once || listeners.once) &&\n\t (!context || listeners.context === context)\n\t ) {\n\t clearEvent(this, evt);\n\t }\n\t } else {\n\t for (var i = 0, events = [], length = listeners.length; i < length; i++) {\n\t if (\n\t listeners[i].fn !== fn ||\n\t (once && !listeners[i].once) ||\n\t (context && listeners[i].context !== context)\n\t ) {\n\t events.push(listeners[i]);\n\t }\n\t }\n\n\t //\n\t // Reset the array, or remove it completely if we have no more listeners.\n\t //\n\t if (events.length) this._events[evt] = events.length === 1 ? events[0] : events;\n\t else clearEvent(this, evt);\n\t }\n\n\t return this;\n\t};\n\n\t/**\n\t * Remove all listeners, or those of the specified event.\n\t *\n\t * @param {(String|Symbol)} [event] The event name.\n\t * @returns {EventEmitter} `this`.\n\t * @public\n\t */\n\tEventEmitter.prototype.removeAllListeners = function removeAllListeners(event) {\n\t var evt;\n\n\t if (event) {\n\t evt = prefix ? prefix + event : event;\n\t if (this._events[evt]) clearEvent(this, evt);\n\t } else {\n\t this._events = new Events();\n\t this._eventsCount = 0;\n\t }\n\n\t return this;\n\t};\n\n\t//\n\t// Alias methods names because people roll like that.\n\t//\n\tEventEmitter.prototype.off = EventEmitter.prototype.removeListener;\n\tEventEmitter.prototype.addListener = EventEmitter.prototype.on;\n\n\t//\n\t// Expose the prefix.\n\t//\n\tEventEmitter.prefixed = prefix;\n\n\t//\n\t// Allow `EventEmitter` to be imported as module namespace.\n\t//\n\tEventEmitter.EventEmitter = EventEmitter;\n\n\t//\n\t// Expose the module.\n\t//\n\t{\n\t module.exports = EventEmitter;\n\t} \n} (eventemitter3));\n\nvar eventemitter3Exports = eventemitter3.exports;\nvar EventEmitter = /*@__PURE__*/getDefaultExportFromCjs(eventemitter3Exports);\n\nclass TransmuxerInterface {\n constructor(hls, id, onTransmuxComplete, onFlush) {\n this.error = null;\n this.hls = void 0;\n this.id = void 0;\n this.observer = void 0;\n this.frag = null;\n this.part = null;\n this.useWorker = void 0;\n this.workerContext = null;\n this.onwmsg = void 0;\n this.transmuxer = null;\n this.onTransmuxComplete = void 0;\n this.onFlush = void 0;\n const config = hls.config;\n this.hls = hls;\n this.id = id;\n this.useWorker = !!config.enableWorker;\n this.onTransmuxComplete = onTransmuxComplete;\n this.onFlush = onFlush;\n const forwardMessage = (ev, data) => {\n data = data || {};\n data.frag = this.frag;\n data.id = this.id;\n if (ev === Events.ERROR) {\n this.error = data.error;\n }\n this.hls.trigger(ev, data);\n };\n\n // forward events to main thread\n this.observer = new EventEmitter();\n this.observer.on(Events.FRAG_DECRYPTED, forwardMessage);\n this.observer.on(Events.ERROR, forwardMessage);\n const MediaSource = getMediaSource(config.preferManagedMediaSource) || {\n isTypeSupported: () => false\n };\n const m2tsTypeSupported = {\n mpeg: MediaSource.isTypeSupported('audio/mpeg'),\n mp3: MediaSource.isTypeSupported('audio/mp4; codecs=\"mp3\"'),\n ac3: MediaSource.isTypeSupported('audio/mp4; codecs=\"ac-3\"') \n };\n if (this.useWorker && typeof Worker !== 'undefined') {\n const canCreateWorker = config.workerPath || hasUMDWorker();\n if (canCreateWorker) {\n try {\n if (config.workerPath) {\n logger.log(`loading Web Worker ${config.workerPath} for \"${id}\"`);\n this.workerContext = loadWorker(config.workerPath);\n } else {\n logger.log(`injecting Web Worker for \"${id}\"`);\n this.workerContext = injectWorker();\n }\n this.onwmsg = event => this.onWorkerMessage(event);\n const {\n worker\n } = this.workerContext;\n worker.addEventListener('message', this.onwmsg);\n worker.onerror = event => {\n const error = new Error(`${event.message} (${event.filename}:${event.lineno})`);\n config.enableWorker = false;\n logger.warn(`Error in \"${id}\" Web Worker, fallback to inline`);\n this.hls.trigger(Events.ERROR, {\n type: ErrorTypes.OTHER_ERROR,\n details: ErrorDetails.INTERNAL_EXCEPTION,\n fatal: false,\n event: 'demuxerWorker',\n error\n });\n };\n worker.postMessage({\n cmd: 'init',\n typeSupported: m2tsTypeSupported,\n vendor: '',\n id: id,\n config: JSON.stringify(config)\n });\n } catch (err) {\n logger.warn(`Error setting up \"${id}\" Web Worker, fallback to inline`, err);\n this.resetWorker();\n this.error = null;\n this.transmuxer = new Transmuxer(this.observer, m2tsTypeSupported, config, '', id);\n }\n return;\n }\n }\n this.transmuxer = new Transmuxer(this.observer, m2tsTypeSupported, config, '', id);\n }\n resetWorker() {\n if (this.workerContext) {\n const {\n worker,\n objectURL\n } = this.workerContext;\n if (objectURL) {\n // revoke the Object URL that was used to create transmuxer worker, so as not to leak it\n self.URL.revokeObjectURL(objectURL);\n }\n worker.removeEventListener('message', this.onwmsg);\n worker.onerror = null;\n worker.terminate();\n this.workerContext = null;\n }\n }\n destroy() {\n if (this.workerContext) {\n this.resetWorker();\n this.onwmsg = undefined;\n } else {\n const transmuxer = this.transmuxer;\n if (transmuxer) {\n transmuxer.destroy();\n this.transmuxer = null;\n }\n }\n const observer = this.observer;\n if (observer) {\n observer.removeAllListeners();\n }\n this.frag = null;\n // @ts-ignore\n this.observer = null;\n // @ts-ignore\n this.hls = null;\n }\n push(data, initSegmentData, audioCodec, videoCodec, frag, part, duration, accurateTimeOffset, chunkMeta, defaultInitPTS) {\n var _frag$initSegment, _lastFrag$initSegment;\n chunkMeta.transmuxing.start = self.performance.now();\n const {\n transmuxer\n } = this;\n const timeOffset = part ? part.start : frag.start;\n // TODO: push \"clear-lead\" decrypt data for unencrypted fragments in streams with encrypted ones\n const decryptdata = frag.decryptdata;\n const lastFrag = this.frag;\n const discontinuity = !(lastFrag && frag.cc === lastFrag.cc);\n const trackSwitch = !(lastFrag && chunkMeta.level === lastFrag.level);\n const snDiff = lastFrag ? chunkMeta.sn - lastFrag.sn : -1;\n const partDiff = this.part ? chunkMeta.part - this.part.index : -1;\n const progressive = snDiff === 0 && chunkMeta.id > 1 && chunkMeta.id === (lastFrag == null ? void 0 : lastFrag.stats.chunkCount);\n const contiguous = !trackSwitch && (snDiff === 1 || snDiff === 0 && (partDiff === 1 || progressive && partDiff <= 0));\n const now = self.performance.now();\n if (trackSwitch || snDiff || frag.stats.parsing.start === 0) {\n frag.stats.parsing.start = now;\n }\n if (part && (partDiff || !contiguous)) {\n part.stats.parsing.start = now;\n }\n const initSegmentChange = !(lastFrag && ((_frag$initSegment = frag.initSegment) == null ? void 0 : _frag$initSegment.url) === ((_lastFrag$initSegment = lastFrag.initSegment) == null ? void 0 : _lastFrag$initSegment.url));\n const state = new TransmuxState(discontinuity, contiguous, accurateTimeOffset, trackSwitch, timeOffset, initSegmentChange);\n if (!contiguous || discontinuity || initSegmentChange) {\n logger.log(`[transmuxer-interface, ${frag.type}]: Starting new transmux session for sn: ${chunkMeta.sn} p: ${chunkMeta.part} level: ${chunkMeta.level} id: ${chunkMeta.id}\n discontinuity: ${discontinuity}\n trackSwitch: ${trackSwitch}\n contiguous: ${contiguous}\n accurateTimeOffset: ${accurateTimeOffset}\n timeOffset: ${timeOffset}\n initSegmentChange: ${initSegmentChange}`);\n const config = new TransmuxConfig(audioCodec, videoCodec, initSegmentData, duration, defaultInitPTS);\n this.configureTransmuxer(config);\n }\n this.frag = frag;\n this.part = part;\n\n // Frags with sn of 'initSegment' are not transmuxed\n if (this.workerContext) {\n // post fragment payload as transferable objects for ArrayBuffer (no copy)\n this.workerContext.worker.postMessage({\n cmd: 'demux',\n data,\n decryptdata,\n chunkMeta,\n state\n }, data instanceof ArrayBuffer ? [data] : []);\n } else if (transmuxer) {\n const transmuxResult = transmuxer.push(data, decryptdata, chunkMeta, state);\n if (isPromise(transmuxResult)) {\n transmuxer.async = true;\n transmuxResult.then(data => {\n this.handleTransmuxComplete(data);\n }).catch(error => {\n this.transmuxerError(error, chunkMeta, 'transmuxer-interface push error');\n });\n } else {\n transmuxer.async = false;\n this.handleTransmuxComplete(transmuxResult);\n }\n }\n }\n flush(chunkMeta) {\n chunkMeta.transmuxing.start = self.performance.now();\n const {\n transmuxer\n } = this;\n if (this.workerContext) {\n this.workerContext.worker.postMessage({\n cmd: 'flush',\n chunkMeta\n });\n } else if (transmuxer) {\n let transmuxResult = transmuxer.flush(chunkMeta);\n const asyncFlush = isPromise(transmuxResult);\n if (asyncFlush || transmuxer.async) {\n if (!isPromise(transmuxResult)) {\n transmuxResult = Promise.resolve(transmuxResult);\n }\n transmuxResult.then(data => {\n this.handleFlushResult(data, chunkMeta);\n }).catch(error => {\n this.transmuxerError(error, chunkMeta, 'transmuxer-interface flush error');\n });\n } else {\n this.handleFlushResult(transmuxResult, chunkMeta);\n }\n }\n }\n transmuxerError(error, chunkMeta, reason) {\n if (!this.hls) {\n return;\n }\n this.error = error;\n this.hls.trigger(Events.ERROR, {\n type: ErrorTypes.MEDIA_ERROR,\n details: ErrorDetails.FRAG_PARSING_ERROR,\n chunkMeta,\n frag: this.frag || undefined,\n fatal: false,\n error,\n err: error,\n reason\n });\n }\n handleFlushResult(results, chunkMeta) {\n results.forEach(result => {\n this.handleTransmuxComplete(result);\n });\n this.onFlush(chunkMeta);\n }\n onWorkerMessage(event) {\n const data = event.data;\n if (!(data != null && data.event)) {\n logger.warn(`worker message received with no ${data ? 'event name' : 'data'}`);\n return;\n }\n const hls = this.hls;\n if (!this.hls) {\n return;\n }\n switch (data.event) {\n case 'init':\n {\n var _this$workerContext;\n const objectURL = (_this$workerContext = this.workerContext) == null ? void 0 : _this$workerContext.objectURL;\n if (objectURL) {\n // revoke the Object URL that was used to create transmuxer worker, so as not to leak it\n self.URL.revokeObjectURL(objectURL);\n }\n break;\n }\n case 'transmuxComplete':\n {\n this.handleTransmuxComplete(data.data);\n break;\n }\n case 'flush':\n {\n this.onFlush(data.data);\n break;\n }\n\n // pass logs from the worker thread to the main logger\n case 'workerLog':\n if (logger[data.data.logType]) {\n logger[data.data.logType](data.data.message);\n }\n break;\n default:\n {\n data.data = data.data || {};\n data.data.frag = this.frag;\n data.data.id = this.id;\n hls.trigger(data.event, data.data);\n break;\n }\n }\n }\n configureTransmuxer(config) {\n const {\n transmuxer\n } = this;\n if (this.workerContext) {\n this.workerContext.worker.postMessage({\n cmd: 'configure',\n config\n });\n } else if (transmuxer) {\n transmuxer.configure(config);\n }\n }\n handleTransmuxComplete(result) {\n result.chunkMeta.transmuxing.end = self.performance.now();\n this.onTransmuxComplete(result);\n }\n}\n\nfunction subtitleOptionsIdentical(trackList1, trackList2) {\n if (trackList1.length !== trackList2.length) {\n return false;\n }\n for (let i = 0; i < trackList1.length; i++) {\n if (!mediaAttributesIdentical(trackList1[i].attrs, trackList2[i].attrs)) {\n return false;\n }\n }\n return true;\n}\nfunction mediaAttributesIdentical(attrs1, attrs2, customAttributes) {\n // Media options with the same rendition ID must be bit identical\n const stableRenditionId = attrs1['STABLE-RENDITION-ID'];\n if (stableRenditionId && !customAttributes) {\n return stableRenditionId === attrs2['STABLE-RENDITION-ID'];\n }\n // When rendition ID is not present, compare attributes\n return !(customAttributes || ['LANGUAGE', 'NAME', 'CHARACTERISTICS', 'AUTOSELECT', 'DEFAULT', 'FORCED', 'ASSOC-LANGUAGE']).some(subtitleAttribute => attrs1[subtitleAttribute] !== attrs2[subtitleAttribute]);\n}\nfunction subtitleTrackMatchesTextTrack(subtitleTrack, textTrack) {\n return textTrack.label.toLowerCase() === subtitleTrack.name.toLowerCase() && (!textTrack.language || textTrack.language.toLowerCase() === (subtitleTrack.lang || '').toLowerCase());\n}\n\nconst TICK_INTERVAL$2 = 100; // how often to tick in ms\n\nclass AudioStreamController extends BaseStreamController {\n constructor(hls, fragmentTracker, keyLoader) {\n super(hls, fragmentTracker, keyLoader, '[audio-stream-controller]', PlaylistLevelType.AUDIO);\n this.videoBuffer = null;\n this.videoTrackCC = -1;\n this.waitingVideoCC = -1;\n this.bufferedTrack = null;\n this.switchingTrack = null;\n this.trackId = -1;\n this.waitingData = null;\n this.mainDetails = null;\n this.flushing = false;\n this.bufferFlushed = false;\n this.cachedTrackLoadedData = null;\n this._registerListeners();\n }\n onHandlerDestroying() {\n this._unregisterListeners();\n super.onHandlerDestroying();\n this.mainDetails = null;\n this.bufferedTrack = null;\n this.switchingTrack = null;\n }\n _registerListeners() {\n const {\n hls\n } = this;\n hls.on(Events.MEDIA_ATTACHED, this.onMediaAttached, this);\n hls.on(Events.MEDIA_DETACHING, this.onMediaDetaching, this);\n hls.on(Events.MANIFEST_LOADING, this.onManifestLoading, this);\n hls.on(Events.LEVEL_LOADED, this.onLevelLoaded, this);\n hls.on(Events.AUDIO_TRACKS_UPDATED, this.onAudioTracksUpdated, this);\n hls.on(Events.AUDIO_TRACK_SWITCHING, this.onAudioTrackSwitching, this);\n hls.on(Events.AUDIO_TRACK_LOADED, this.onAudioTrackLoaded, this);\n hls.on(Events.ERROR, this.onError, this);\n hls.on(Events.BUFFER_RESET, this.onBufferReset, this);\n hls.on(Events.BUFFER_CREATED, this.onBufferCreated, this);\n hls.on(Events.BUFFER_FLUSHING, this.onBufferFlushing, this);\n hls.on(Events.BUFFER_FLUSHED, this.onBufferFlushed, this);\n hls.on(Events.INIT_PTS_FOUND, this.onInitPtsFound, this);\n hls.on(Events.FRAG_BUFFERED, this.onFragBuffered, this);\n }\n _unregisterListeners() {\n const {\n hls\n } = this;\n hls.off(Events.MEDIA_ATTACHED, this.onMediaAttached, this);\n hls.off(Events.MEDIA_DETACHING, this.onMediaDetaching, this);\n hls.off(Events.MANIFEST_LOADING, this.onManifestLoading, this);\n hls.off(Events.LEVEL_LOADED, this.onLevelLoaded, this);\n hls.off(Events.AUDIO_TRACKS_UPDATED, this.onAudioTracksUpdated, this);\n hls.off(Events.AUDIO_TRACK_SWITCHING, this.onAudioTrackSwitching, this);\n hls.off(Events.AUDIO_TRACK_LOADED, this.onAudioTrackLoaded, this);\n hls.off(Events.ERROR, this.onError, this);\n hls.off(Events.BUFFER_RESET, this.onBufferReset, this);\n hls.off(Events.BUFFER_CREATED, this.onBufferCreated, this);\n hls.off(Events.BUFFER_FLUSHING, this.onBufferFlushing, this);\n hls.off(Events.BUFFER_FLUSHED, this.onBufferFlushed, this);\n hls.off(Events.INIT_PTS_FOUND, this.onInitPtsFound, this);\n hls.off(Events.FRAG_BUFFERED, this.onFragBuffered, this);\n }\n\n // INIT_PTS_FOUND is triggered when the video track parsed in the stream-controller has a new PTS value\n onInitPtsFound(event, {\n frag,\n id,\n initPTS,\n timescale\n }) {\n // Always update the new INIT PTS\n // Can change due level switch\n if (id === 'main') {\n const cc = frag.cc;\n this.initPTS[frag.cc] = {\n baseTime: initPTS,\n timescale\n };\n this.log(`InitPTS for cc: ${cc} found from main: ${initPTS}`);\n this.videoTrackCC = cc;\n // If we are waiting, tick immediately to unblock audio fragment transmuxing\n if (this.state === State.WAITING_INIT_PTS) {\n this.tick();\n }\n }\n }\n startLoad(startPosition) {\n if (!this.levels) {\n this.startPosition = startPosition;\n this.state = State.STOPPED;\n return;\n }\n const lastCurrentTime = this.lastCurrentTime;\n this.stopLoad();\n this.setInterval(TICK_INTERVAL$2);\n if (lastCurrentTime > 0 && startPosition === -1) {\n this.log(`Override startPosition with lastCurrentTime @${lastCurrentTime.toFixed(3)}`);\n startPosition = lastCurrentTime;\n this.state = State.IDLE;\n } else {\n this.loadedmetadata = false;\n this.state = State.WAITING_TRACK;\n }\n this.nextLoadPosition = this.startPosition = this.lastCurrentTime = startPosition;\n this.tick();\n }\n doTick() {\n switch (this.state) {\n case State.IDLE:\n this.doTickIdle();\n break;\n case State.WAITING_TRACK:\n {\n var _levels$trackId;\n const {\n levels,\n trackId\n } = this;\n const details = levels == null ? void 0 : (_levels$trackId = levels[trackId]) == null ? void 0 : _levels$trackId.details;\n if (details) {\n if (this.waitForCdnTuneIn(details)) {\n break;\n }\n this.state = State.WAITING_INIT_PTS;\n }\n break;\n }\n case State.FRAG_LOADING_WAITING_RETRY:\n {\n var _this$media;\n const now = performance.now();\n const retryDate = this.retryDate;\n // if current time is gt than retryDate, or if media seeking let's switch to IDLE state to retry loading\n if (!retryDate || now >= retryDate || (_this$media = this.media) != null && _this$media.seeking) {\n const {\n levels,\n trackId\n } = this;\n this.log('RetryDate reached, switch back to IDLE state');\n this.resetStartWhenNotLoaded((levels == null ? void 0 : levels[trackId]) || null);\n this.state = State.IDLE;\n }\n break;\n }\n case State.WAITING_INIT_PTS:\n {\n // Ensure we don't get stuck in the WAITING_INIT_PTS state if the waiting frag CC doesn't match any initPTS\n const waitingData = this.waitingData;\n if (waitingData) {\n const {\n frag,\n part,\n cache,\n complete\n } = waitingData;\n if (this.initPTS[frag.cc] !== undefined) {\n this.waitingData = null;\n this.waitingVideoCC = -1;\n this.state = State.FRAG_LOADING;\n const payload = cache.flush();\n const data = {\n frag,\n part,\n payload,\n networkDetails: null\n };\n this._handleFragmentLoadProgress(data);\n if (complete) {\n super._handleFragmentLoadComplete(data);\n }\n } else if (this.videoTrackCC !== this.waitingVideoCC) {\n // Drop waiting fragment if videoTrackCC has changed since waitingFragment was set and initPTS was not found\n this.log(`Waiting fragment cc (${frag.cc}) cancelled because video is at cc ${this.videoTrackCC}`);\n this.clearWaitingFragment();\n } else {\n // Drop waiting fragment if an earlier fragment is needed\n const pos = this.getLoadPosition();\n const bufferInfo = BufferHelper.bufferInfo(this.mediaBuffer, pos, this.config.maxBufferHole);\n const waitingFragmentAtPosition = fragmentWithinToleranceTest(bufferInfo.end, this.config.maxFragLookUpTolerance, frag);\n if (waitingFragmentAtPosition < 0) {\n this.log(`Waiting fragment cc (${frag.cc}) @ ${frag.start} cancelled because another fragment at ${bufferInfo.end} is needed`);\n this.clearWaitingFragment();\n }\n }\n } else {\n this.state = State.IDLE;\n }\n }\n }\n this.onTickEnd();\n }\n clearWaitingFragment() {\n const waitingData = this.waitingData;\n if (waitingData) {\n this.fragmentTracker.removeFragment(waitingData.frag);\n this.waitingData = null;\n this.waitingVideoCC = -1;\n this.state = State.IDLE;\n }\n }\n resetLoadingState() {\n this.clearWaitingFragment();\n super.resetLoadingState();\n }\n onTickEnd() {\n const {\n media\n } = this;\n if (!(media != null && media.readyState)) {\n // Exit early if we don't have media or if the media hasn't buffered anything yet (readyState 0)\n return;\n }\n this.lastCurrentTime = media.currentTime;\n }\n doTickIdle() {\n const {\n hls,\n levels,\n media,\n trackId\n } = this;\n const config = hls.config;\n\n // 1. if video not attached AND\n // start fragment already requested OR start frag prefetch not enabled\n // 2. if tracks or track not loaded and selected\n // then exit loop\n // => if media not attached but start frag prefetch is enabled and start frag not requested yet, we will not exit loop\n if (!media && (this.startFragRequested || !config.startFragPrefetch) || !(levels != null && levels[trackId])) {\n return;\n }\n const levelInfo = levels[trackId];\n const trackDetails = levelInfo.details;\n if (!trackDetails || trackDetails.live && this.levelLastLoaded !== levelInfo || this.waitForCdnTuneIn(trackDetails)) {\n this.state = State.WAITING_TRACK;\n return;\n }\n const bufferable = this.mediaBuffer ? this.mediaBuffer : this.media;\n if (this.bufferFlushed && bufferable) {\n this.bufferFlushed = false;\n this.afterBufferFlushed(bufferable, ElementaryStreamTypes.AUDIO, PlaylistLevelType.AUDIO);\n }\n const bufferInfo = this.getFwdBufferInfo(bufferable, PlaylistLevelType.AUDIO);\n if (bufferInfo === null) {\n return;\n }\n const {\n bufferedTrack,\n switchingTrack\n } = this;\n if (!switchingTrack && this._streamEnded(bufferInfo, trackDetails)) {\n hls.trigger(Events.BUFFER_EOS, {\n type: 'audio'\n });\n this.state = State.ENDED;\n return;\n }\n const mainBufferInfo = this.getFwdBufferInfo(this.videoBuffer ? this.videoBuffer : this.media, PlaylistLevelType.MAIN);\n const bufferLen = bufferInfo.len;\n const maxBufLen = this.getMaxBufferLength(mainBufferInfo == null ? void 0 : mainBufferInfo.len);\n const fragments = trackDetails.fragments;\n const start = fragments[0].start;\n let targetBufferTime = this.flushing ? this.getLoadPosition() : bufferInfo.end;\n if (switchingTrack && media) {\n const pos = this.getLoadPosition();\n // STABLE\n if (bufferedTrack && !mediaAttributesIdentical(switchingTrack.attrs, bufferedTrack.attrs)) {\n targetBufferTime = pos;\n }\n // if currentTime (pos) is less than alt audio playlist start time, it means that alt audio is ahead of currentTime\n if (trackDetails.PTSKnown && pos < start) {\n // if everything is buffered from pos to start or if audio buffer upfront, let's seek to start\n if (bufferInfo.end > start || bufferInfo.nextStart) {\n this.log('Alt audio track ahead of main track, seek to start of alt audio track');\n media.currentTime = start + 0.05;\n }\n }\n }\n\n // if buffer length is less than maxBufLen, or near the end, find a fragment to load\n if (bufferLen >= maxBufLen && !switchingTrack && targetBufferTime < fragments[fragments.length - 1].start) {\n return;\n }\n let frag = this.getNextFragment(targetBufferTime, trackDetails);\n let atGap = false;\n // Avoid loop loading by using nextLoadPosition set for backtracking and skipping consecutive GAP tags\n if (frag && this.isLoopLoading(frag, targetBufferTime)) {\n atGap = !!frag.gap;\n frag = this.getNextFragmentLoopLoading(frag, trackDetails, bufferInfo, PlaylistLevelType.MAIN, maxBufLen);\n }\n if (!frag) {\n this.bufferFlushed = true;\n return;\n }\n\n // Buffer audio up to one target duration ahead of main buffer\n const atBufferSyncLimit = mainBufferInfo && frag.start > mainBufferInfo.end + trackDetails.targetduration;\n if (atBufferSyncLimit ||\n // Or wait for main buffer after buffing some audio\n !(mainBufferInfo != null && mainBufferInfo.len) && bufferInfo.len) {\n // Check fragment-tracker for main fragments since GAP segments do not show up in bufferInfo\n const mainFrag = this.getAppendedFrag(frag.start, PlaylistLevelType.MAIN);\n if (mainFrag === null) {\n return;\n }\n // Bridge gaps in main buffer\n atGap || (atGap = !!mainFrag.gap || !!atBufferSyncLimit && mainBufferInfo.len === 0);\n if (atBufferSyncLimit && !atGap || atGap && bufferInfo.nextStart && bufferInfo.nextStart < mainFrag.end) {\n return;\n }\n }\n this.loadFragment(frag, levelInfo, targetBufferTime);\n }\n getMaxBufferLength(mainBufferLength) {\n const maxConfigBuffer = super.getMaxBufferLength();\n if (!mainBufferLength) {\n return maxConfigBuffer;\n }\n return Math.min(Math.max(maxConfigBuffer, mainBufferLength), this.config.maxMaxBufferLength);\n }\n onMediaDetaching() {\n this.videoBuffer = null;\n this.bufferFlushed = this.flushing = false;\n super.onMediaDetaching();\n }\n onAudioTracksUpdated(event, {\n audioTracks\n }) {\n // Reset tranxmuxer is essential for large context switches (Content Steering)\n this.resetTransmuxer();\n this.levels = audioTracks.map(mediaPlaylist => new Level(mediaPlaylist));\n }\n onAudioTrackSwitching(event, data) {\n // if any URL found on new audio track, it is an alternate audio track\n const altAudio = !!data.url;\n this.trackId = data.id;\n const {\n fragCurrent\n } = this;\n if (fragCurrent) {\n fragCurrent.abortRequests();\n this.removeUnbufferedFrags(fragCurrent.start);\n }\n this.resetLoadingState();\n // destroy useless transmuxer when switching audio to main\n if (!altAudio) {\n this.resetTransmuxer();\n } else {\n // switching to audio track, start timer if not already started\n this.setInterval(TICK_INTERVAL$2);\n }\n\n // should we switch tracks ?\n if (altAudio) {\n this.switchingTrack = data;\n // main audio track are handled by stream-controller, just do something if switching to alt audio track\n this.state = State.IDLE;\n this.flushAudioIfNeeded(data);\n } else {\n this.switchingTrack = null;\n this.bufferedTrack = data;\n this.state = State.STOPPED;\n }\n this.tick();\n }\n onManifestLoading() {\n this.fragmentTracker.removeAllFragments();\n this.startPosition = this.lastCurrentTime = 0;\n this.bufferFlushed = this.flushing = false;\n this.levels = this.mainDetails = this.waitingData = this.bufferedTrack = this.cachedTrackLoadedData = this.switchingTrack = null;\n this.startFragRequested = false;\n this.trackId = this.videoTrackCC = this.waitingVideoCC = -1;\n }\n onLevelLoaded(event, data) {\n this.mainDetails = data.details;\n if (this.cachedTrackLoadedData !== null) {\n this.hls.trigger(Events.AUDIO_TRACK_LOADED, this.cachedTrackLoadedData);\n this.cachedTrackLoadedData = null;\n }\n }\n onAudioTrackLoaded(event, data) {\n var _track$details;\n if (this.mainDetails == null) {\n this.cachedTrackLoadedData = data;\n return;\n }\n const {\n levels\n } = this;\n const {\n details: newDetails,\n id: trackId\n } = data;\n if (!levels) {\n this.warn(`Audio tracks were reset while loading level ${trackId}`);\n return;\n }\n this.log(`Audio track ${trackId} loaded [${newDetails.startSN},${newDetails.endSN}]${newDetails.lastPartSn ? `[part-${newDetails.lastPartSn}-${newDetails.lastPartIndex}]` : ''},duration:${newDetails.totalduration}`);\n const track = levels[trackId];\n let sliding = 0;\n if (newDetails.live || (_track$details = track.details) != null && _track$details.live) {\n this.checkLiveUpdate(newDetails);\n const mainDetails = this.mainDetails;\n if (newDetails.deltaUpdateFailed || !mainDetails) {\n return;\n }\n if (!track.details && newDetails.hasProgramDateTime && mainDetails.hasProgramDateTime) {\n // Make sure our audio rendition is aligned with the \"main\" rendition, using\n // pdt as our reference times.\n alignMediaPlaylistByPDT(newDetails, mainDetails);\n sliding = newDetails.fragments[0].start;\n } else {\n var _this$levelLastLoaded;\n sliding = this.alignPlaylists(newDetails, track.details, (_this$levelLastLoaded = this.levelLastLoaded) == null ? void 0 : _this$levelLastLoaded.details);\n }\n }\n track.details = newDetails;\n this.levelLastLoaded = track;\n\n // compute start position if we are aligned with the main playlist\n if (!this.startFragRequested && (this.mainDetails || !newDetails.live)) {\n this.setStartPosition(this.mainDetails || newDetails, sliding);\n }\n // only switch back to IDLE state if we were waiting for track to start downloading a new fragment\n if (this.state === State.WAITING_TRACK && !this.waitForCdnTuneIn(newDetails)) {\n this.state = State.IDLE;\n }\n\n // trigger handler right now\n this.tick();\n }\n _handleFragmentLoadProgress(data) {\n var _frag$initSegment;\n const {\n frag,\n part,\n payload\n } = data;\n const {\n config,\n trackId,\n levels\n } = this;\n if (!levels) {\n this.warn(`Audio tracks were reset while fragment load was in progress. Fragment ${frag.sn} of level ${frag.level} will not be buffered`);\n return;\n }\n const track = levels[trackId];\n if (!track) {\n this.warn('Audio track is undefined on fragment load progress');\n return;\n }\n const details = track.details;\n if (!details) {\n this.warn('Audio track details undefined on fragment load progress');\n this.removeUnbufferedFrags(frag.start);\n return;\n }\n const audioCodec = config.defaultAudioCodec || track.audioCodec || 'mp4a.40.2';\n let transmuxer = this.transmuxer;\n if (!transmuxer) {\n transmuxer = this.transmuxer = new TransmuxerInterface(this.hls, PlaylistLevelType.AUDIO, this._handleTransmuxComplete.bind(this), this._handleTransmuxerFlush.bind(this));\n }\n\n // Check if we have video initPTS\n // If not we need to wait for it\n const initPTS = this.initPTS[frag.cc];\n const initSegmentData = (_frag$initSegment = frag.initSegment) == null ? void 0 : _frag$initSegment.data;\n if (initPTS !== undefined) {\n // this.log(`Transmuxing ${sn} of [${details.startSN} ,${details.endSN}],track ${trackId}`);\n // time Offset is accurate if level PTS is known, or if playlist is not sliding (not live)\n const accurateTimeOffset = false; // details.PTSKnown || !details.live;\n const partIndex = part ? part.index : -1;\n const partial = partIndex !== -1;\n const chunkMeta = new ChunkMetadata(frag.level, frag.sn, frag.stats.chunkCount, payload.byteLength, partIndex, partial);\n transmuxer.push(payload, initSegmentData, audioCodec, '', frag, part, details.totalduration, accurateTimeOffset, chunkMeta, initPTS);\n } else {\n this.log(`Unknown video PTS for cc ${frag.cc}, waiting for video PTS before demuxing audio frag ${frag.sn} of [${details.startSN} ,${details.endSN}],track ${trackId}`);\n const {\n cache\n } = this.waitingData = this.waitingData || {\n frag,\n part,\n cache: new ChunkCache(),\n complete: false\n };\n cache.push(new Uint8Array(payload));\n this.waitingVideoCC = this.videoTrackCC;\n this.state = State.WAITING_INIT_PTS;\n }\n }\n _handleFragmentLoadComplete(fragLoadedData) {\n if (this.waitingData) {\n this.waitingData.complete = true;\n return;\n }\n super._handleFragmentLoadComplete(fragLoadedData);\n }\n onBufferReset( /* event: Events.BUFFER_RESET */\n ) {\n // reset reference to sourcebuffers\n this.mediaBuffer = this.videoBuffer = null;\n this.loadedmetadata = false;\n }\n onBufferCreated(event, data) {\n const audioTrack = data.tracks.audio;\n if (audioTrack) {\n this.mediaBuffer = audioTrack.buffer || null;\n }\n if (data.tracks.video) {\n this.videoBuffer = data.tracks.video.buffer || null;\n }\n }\n onFragBuffered(event, data) {\n const {\n frag,\n part\n } = data;\n if (frag.type !== PlaylistLevelType.AUDIO) {\n if (!this.loadedmetadata && frag.type === PlaylistLevelType.MAIN) {\n const bufferable = this.videoBuffer || this.media;\n if (bufferable) {\n const bufferedTimeRanges = BufferHelper.getBuffered(bufferable);\n if (bufferedTimeRanges.length) {\n this.loadedmetadata = true;\n }\n }\n }\n return;\n }\n if (this.fragContextChanged(frag)) {\n // If a level switch was requested while a fragment was buffering, it will emit the FRAG_BUFFERED event upon completion\n // Avoid setting state back to IDLE or concluding the audio switch; otherwise, the switched-to track will not buffer\n this.warn(`Fragment ${frag.sn}${part ? ' p: ' + part.index : ''} of level ${frag.level} finished buffering, but was aborted. state: ${this.state}, audioSwitch: ${this.switchingTrack ? this.switchingTrack.name : 'false'}`);\n return;\n }\n if (frag.sn !== 'initSegment') {\n this.fragPrevious = frag;\n const track = this.switchingTrack;\n if (track) {\n this.bufferedTrack = track;\n this.switchingTrack = null;\n this.hls.trigger(Events.AUDIO_TRACK_SWITCHED, _objectSpread2({}, track));\n }\n }\n this.fragBufferedComplete(frag, part);\n }\n onError(event, data) {\n var _data$context;\n if (data.fatal) {\n this.state = State.ERROR;\n return;\n }\n switch (data.details) {\n case ErrorDetails.FRAG_GAP:\n case ErrorDetails.FRAG_PARSING_ERROR:\n case ErrorDetails.FRAG_DECRYPT_ERROR:\n case ErrorDetails.FRAG_LOAD_ERROR:\n case ErrorDetails.FRAG_LOAD_TIMEOUT:\n case ErrorDetails.KEY_LOAD_ERROR:\n case ErrorDetails.KEY_LOAD_TIMEOUT:\n this.onFragmentOrKeyLoadError(PlaylistLevelType.AUDIO, data);\n break;\n case ErrorDetails.AUDIO_TRACK_LOAD_ERROR:\n case ErrorDetails.AUDIO_TRACK_LOAD_TIMEOUT:\n case ErrorDetails.LEVEL_PARSING_ERROR:\n // in case of non fatal error while loading track, if not retrying to load track, switch back to IDLE\n if (!data.levelRetry && this.state === State.WAITING_TRACK && ((_data$context = data.context) == null ? void 0 : _data$context.type) === PlaylistContextType.AUDIO_TRACK) {\n this.state = State.IDLE;\n }\n break;\n case ErrorDetails.BUFFER_APPEND_ERROR:\n case ErrorDetails.BUFFER_FULL_ERROR:\n if (!data.parent || data.parent !== 'audio') {\n return;\n }\n if (data.details === ErrorDetails.BUFFER_APPEND_ERROR) {\n this.resetLoadingState();\n return;\n }\n if (this.reduceLengthAndFlushBuffer(data)) {\n this.bufferedTrack = null;\n super.flushMainBuffer(0, Number.POSITIVE_INFINITY, 'audio');\n }\n break;\n case ErrorDetails.INTERNAL_EXCEPTION:\n this.recoverWorkerError(data);\n break;\n }\n }\n onBufferFlushing(event, {\n type\n }) {\n if (type !== ElementaryStreamTypes.VIDEO) {\n this.flushing = true;\n }\n }\n onBufferFlushed(event, {\n type\n }) {\n if (type !== ElementaryStreamTypes.VIDEO) {\n this.flushing = false;\n this.bufferFlushed = true;\n if (this.state === State.ENDED) {\n this.state = State.IDLE;\n }\n const mediaBuffer = this.mediaBuffer || this.media;\n if (mediaBuffer) {\n this.afterBufferFlushed(mediaBuffer, type, PlaylistLevelType.AUDIO);\n this.tick();\n }\n }\n }\n _handleTransmuxComplete(transmuxResult) {\n var _id3$samples;\n const id = 'audio';\n const {\n hls\n } = this;\n const {\n remuxResult,\n chunkMeta\n } = transmuxResult;\n const context = this.getCurrentContext(chunkMeta);\n if (!context) {\n this.resetWhenMissingContext(chunkMeta);\n return;\n }\n const {\n frag,\n part,\n level\n } = context;\n const {\n details\n } = level;\n const {\n audio,\n text,\n id3,\n initSegment\n } = remuxResult;\n\n // Check if the current fragment has been aborted. We check this by first seeing if we're still playing the current level.\n // If we are, subsequently check if the currently loading fragment (fragCurrent) has changed.\n if (this.fragContextChanged(frag) || !details) {\n this.fragmentTracker.removeFragment(frag);\n return;\n }\n this.state = State.PARSING;\n if (this.switchingTrack && audio) {\n this.completeAudioSwitch(this.switchingTrack);\n }\n if (initSegment != null && initSegment.tracks) {\n const mapFragment = frag.initSegment || frag;\n this._bufferInitSegment(level, initSegment.tracks, mapFragment, chunkMeta);\n hls.trigger(Events.FRAG_PARSING_INIT_SEGMENT, {\n frag: mapFragment,\n id,\n tracks: initSegment.tracks\n });\n // Only flush audio from old audio tracks when PTS is known on new audio track\n }\n if (audio) {\n const {\n startPTS,\n endPTS,\n startDTS,\n endDTS\n } = audio;\n if (part) {\n part.elementaryStreams[ElementaryStreamTypes.AUDIO] = {\n startPTS,\n endPTS,\n startDTS,\n endDTS\n };\n }\n frag.setElementaryStreamInfo(ElementaryStreamTypes.AUDIO, startPTS, endPTS, startDTS, endDTS);\n this.bufferFragmentData(audio, frag, part, chunkMeta);\n }\n if (id3 != null && (_id3$samples = id3.samples) != null && _id3$samples.length) {\n const emittedID3 = _extends({\n id,\n frag,\n details\n }, id3);\n hls.trigger(Events.FRAG_PARSING_METADATA, emittedID3);\n }\n if (text) {\n const emittedText = _extends({\n id,\n frag,\n details\n }, text);\n hls.trigger(Events.FRAG_PARSING_USERDATA, emittedText);\n }\n }\n _bufferInitSegment(currentLevel, tracks, frag, chunkMeta) {\n if (this.state !== State.PARSING) {\n return;\n }\n // delete any video track found on audio transmuxer\n if (tracks.video) {\n delete tracks.video;\n }\n\n // include levelCodec in audio and video tracks\n const track = tracks.audio;\n if (!track) {\n return;\n }\n track.id = 'audio';\n const variantAudioCodecs = currentLevel.audioCodec;\n this.log(`Init audio buffer, container:${track.container}, codecs[level/parsed]=[${variantAudioCodecs}/${track.codec}]`);\n // SourceBuffer will use track.levelCodec if defined\n if (variantAudioCodecs && variantAudioCodecs.split(',').length === 1) {\n track.levelCodec = variantAudioCodecs;\n }\n this.hls.trigger(Events.BUFFER_CODECS, tracks);\n const initSegment = track.initSegment;\n if (initSegment != null && initSegment.byteLength) {\n const segment = {\n type: 'audio',\n frag,\n part: null,\n chunkMeta,\n parent: frag.type,\n data: initSegment\n };\n this.hls.trigger(Events.BUFFER_APPENDING, segment);\n }\n // trigger handler right now\n this.tickImmediate();\n }\n loadFragment(frag, track, targetBufferTime) {\n // only load if fragment is not loaded or if in audio switch\n const fragState = this.fragmentTracker.getState(frag);\n this.fragCurrent = frag;\n\n // we force a frag loading in audio switch as fragment tracker might not have evicted previous frags in case of quick audio switch\n if (this.switchingTrack || fragState === FragmentState.NOT_LOADED || fragState === FragmentState.PARTIAL) {\n var _track$details2;\n if (frag.sn === 'initSegment') {\n this._loadInitSegment(frag, track);\n } else if ((_track$details2 = track.details) != null && _track$details2.live && !this.initPTS[frag.cc]) {\n this.log(`Waiting for video PTS in continuity counter ${frag.cc} of live stream before loading audio fragment ${frag.sn} of level ${this.trackId}`);\n this.state = State.WAITING_INIT_PTS;\n const mainDetails = this.mainDetails;\n if (mainDetails && mainDetails.fragments[0].start !== track.details.fragments[0].start) {\n alignMediaPlaylistByPDT(track.details, mainDetails);\n }\n } else {\n this.startFragRequested = true;\n super.loadFragment(frag, track, targetBufferTime);\n }\n } else {\n this.clearTrackerIfNeeded(frag);\n }\n }\n flushAudioIfNeeded(switchingTrack) {\n const {\n media,\n bufferedTrack\n } = this;\n const bufferedAttributes = bufferedTrack == null ? void 0 : bufferedTrack.attrs;\n const switchAttributes = switchingTrack.attrs;\n if (media && bufferedAttributes && (bufferedAttributes.CHANNELS !== switchAttributes.CHANNELS || bufferedTrack.name !== switchingTrack.name || bufferedTrack.lang !== switchingTrack.lang)) {\n this.log('Switching audio track : flushing all audio');\n super.flushMainBuffer(0, Number.POSITIVE_INFINITY, 'audio');\n this.bufferedTrack = null;\n }\n }\n completeAudioSwitch(switchingTrack) {\n const {\n hls\n } = this;\n this.flushAudioIfNeeded(switchingTrack);\n this.bufferedTrack = switchingTrack;\n this.switchingTrack = null;\n hls.trigger(Events.AUDIO_TRACK_SWITCHED, _objectSpread2({}, switchingTrack));\n }\n}\n\nclass AudioTrackController extends BasePlaylistController {\n constructor(hls) {\n super(hls, '[audio-track-controller]');\n this.tracks = [];\n this.groupIds = null;\n this.tracksInGroup = [];\n this.trackId = -1;\n this.currentTrack = null;\n this.selectDefaultTrack = true;\n this.registerListeners();\n }\n registerListeners() {\n const {\n hls\n } = this;\n hls.on(Events.MANIFEST_LOADING, this.onManifestLoading, this);\n hls.on(Events.MANIFEST_PARSED, this.onManifestParsed, this);\n hls.on(Events.LEVEL_LOADING, this.onLevelLoading, this);\n hls.on(Events.LEVEL_SWITCHING, this.onLevelSwitching, this);\n hls.on(Events.AUDIO_TRACK_LOADED, this.onAudioTrackLoaded, this);\n hls.on(Events.ERROR, this.onError, this);\n }\n unregisterListeners() {\n const {\n hls\n } = this;\n hls.off(Events.MANIFEST_LOADING, this.onManifestLoading, this);\n hls.off(Events.MANIFEST_PARSED, this.onManifestParsed, this);\n hls.off(Events.LEVEL_LOADING, this.onLevelLoading, this);\n hls.off(Events.LEVEL_SWITCHING, this.onLevelSwitching, this);\n hls.off(Events.AUDIO_TRACK_LOADED, this.onAudioTrackLoaded, this);\n hls.off(Events.ERROR, this.onError, this);\n }\n destroy() {\n this.unregisterListeners();\n this.tracks.length = 0;\n this.tracksInGroup.length = 0;\n this.currentTrack = null;\n super.destroy();\n }\n onManifestLoading() {\n this.tracks = [];\n this.tracksInGroup = [];\n this.groupIds = null;\n this.currentTrack = null;\n this.trackId = -1;\n this.selectDefaultTrack = true;\n }\n onManifestParsed(event, data) {\n this.tracks = data.audioTracks || [];\n }\n onAudioTrackLoaded(event, data) {\n const {\n id,\n groupId,\n details\n } = data;\n const trackInActiveGroup = this.tracksInGroup[id];\n if (!trackInActiveGroup || trackInActiveGroup.groupId !== groupId) {\n this.warn(`Audio track with id:${id} and group:${groupId} not found in active group ${trackInActiveGroup == null ? void 0 : trackInActiveGroup.groupId}`);\n return;\n }\n const curDetails = trackInActiveGroup.details;\n trackInActiveGroup.details = data.details;\n this.log(`Audio track ${id} \"${trackInActiveGroup.name}\" lang:${trackInActiveGroup.lang} group:${groupId} loaded [${details.startSN}-${details.endSN}]`);\n if (id === this.trackId) {\n this.playlistLoaded(id, data, curDetails);\n }\n }\n onLevelLoading(event, data) {\n this.switchLevel(data.level);\n }\n onLevelSwitching(event, data) {\n this.switchLevel(data.level);\n }\n switchLevel(levelIndex) {\n const levelInfo = this.hls.levels[levelIndex];\n if (!levelInfo) {\n return;\n }\n const audioGroups = levelInfo.audioGroups || null;\n const currentGroups = this.groupIds;\n let currentTrack = this.currentTrack;\n if (!audioGroups || (currentGroups == null ? void 0 : currentGroups.length) !== (audioGroups == null ? void 0 : audioGroups.length) || audioGroups != null && audioGroups.some(groupId => (currentGroups == null ? void 0 : currentGroups.indexOf(groupId)) === -1)) {\n this.groupIds = audioGroups;\n this.trackId = -1;\n this.currentTrack = null;\n const audioTracks = this.tracks.filter(track => !audioGroups || audioGroups.indexOf(track.groupId) !== -1);\n if (audioTracks.length) {\n // Disable selectDefaultTrack if there are no default tracks\n if (this.selectDefaultTrack && !audioTracks.some(track => track.default)) {\n this.selectDefaultTrack = false;\n }\n // track.id should match hls.audioTracks index\n audioTracks.forEach((track, i) => {\n track.id = i;\n });\n } else if (!currentTrack && !this.tracksInGroup.length) {\n // Do not dispatch AUDIO_TRACKS_UPDATED when there were and are no tracks\n return;\n }\n this.tracksInGroup = audioTracks;\n\n // Find preferred track\n const audioPreference = this.hls.config.audioPreference;\n if (!currentTrack && audioPreference) {\n const groupIndex = findMatchingOption(audioPreference, audioTracks, audioMatchPredicate);\n if (groupIndex > -1) {\n currentTrack = audioTracks[groupIndex];\n } else {\n const allIndex = findMatchingOption(audioPreference, this.tracks);\n currentTrack = this.tracks[allIndex];\n }\n }\n\n // Select initial track\n let trackId = this.findTrackId(currentTrack);\n if (trackId === -1 && currentTrack) {\n trackId = this.findTrackId(null);\n }\n\n // Dispatch events and load track if needed\n const audioTracksUpdated = {\n audioTracks\n };\n this.log(`Updating audio tracks, ${audioTracks.length} track(s) found in group(s): ${audioGroups == null ? void 0 : audioGroups.join(',')}`);\n this.hls.trigger(Events.AUDIO_TRACKS_UPDATED, audioTracksUpdated);\n const selectedTrackId = this.trackId;\n if (trackId !== -1 && selectedTrackId === -1) {\n this.setAudioTrack(trackId);\n } else if (audioTracks.length && selectedTrackId === -1) {\n var _this$groupIds;\n const error = new Error(`No audio track selected for current audio group-ID(s): ${(_this$groupIds = this.groupIds) == null ? void 0 : _this$groupIds.join(',')} track count: ${audioTracks.length}`);\n this.warn(error.message);\n this.hls.trigger(Events.ERROR, {\n type: ErrorTypes.MEDIA_ERROR,\n details: ErrorDetails.AUDIO_TRACK_LOAD_ERROR,\n fatal: true,\n error\n });\n }\n } else if (this.shouldReloadPlaylist(currentTrack)) {\n // Retry playlist loading if no playlist is or has been loaded yet\n this.setAudioTrack(this.trackId);\n }\n }\n onError(event, data) {\n if (data.fatal || !data.context) {\n return;\n }\n if (data.context.type === PlaylistContextType.AUDIO_TRACK && data.context.id === this.trackId && (!this.groupIds || this.groupIds.indexOf(data.context.groupId) !== -1)) {\n this.requestScheduled = -1;\n this.checkRetry(data);\n }\n }\n get allAudioTracks() {\n return this.tracks;\n }\n get audioTracks() {\n return this.tracksInGroup;\n }\n get audioTrack() {\n return this.trackId;\n }\n set audioTrack(newId) {\n // If audio track is selected from API then don't choose from the manifest default track\n this.selectDefaultTrack = false;\n this.setAudioTrack(newId);\n }\n setAudioOption(audioOption) {\n const hls = this.hls;\n hls.config.audioPreference = audioOption;\n if (audioOption) {\n const allAudioTracks = this.allAudioTracks;\n this.selectDefaultTrack = false;\n if (allAudioTracks.length) {\n // First see if current option matches (no switch op)\n const currentTrack = this.currentTrack;\n if (currentTrack && matchesOption(audioOption, currentTrack, audioMatchPredicate)) {\n return currentTrack;\n }\n // Find option in available tracks (tracksInGroup)\n const groupIndex = findMatchingOption(audioOption, this.tracksInGroup, audioMatchPredicate);\n if (groupIndex > -1) {\n const track = this.tracksInGroup[groupIndex];\n this.setAudioTrack(groupIndex);\n return track;\n } else if (currentTrack) {\n // Find option in nearest level audio group\n let searchIndex = hls.loadLevel;\n if (searchIndex === -1) {\n searchIndex = hls.firstAutoLevel;\n }\n const switchIndex = findClosestLevelWithAudioGroup(audioOption, hls.levels, allAudioTracks, searchIndex, audioMatchPredicate);\n if (switchIndex === -1) {\n // could not find matching variant\n return null;\n }\n // and switch level to acheive the audio group switch\n hls.nextLoadLevel = switchIndex;\n }\n if (audioOption.channels || audioOption.audioCodec) {\n // Could not find a match with codec / channels predicate\n // Find a match without channels or codec\n const withoutCodecAndChannelsMatch = findMatchingOption(audioOption, allAudioTracks);\n if (withoutCodecAndChannelsMatch > -1) {\n return allAudioTracks[withoutCodecAndChannelsMatch];\n }\n }\n }\n }\n return null;\n }\n setAudioTrack(newId) {\n const tracks = this.tracksInGroup;\n\n // check if level idx is valid\n if (newId < 0 || newId >= tracks.length) {\n this.warn(`Invalid audio track id: ${newId}`);\n return;\n }\n\n // stopping live reloading timer if any\n this.clearTimer();\n this.selectDefaultTrack = false;\n const lastTrack = this.currentTrack;\n const track = tracks[newId];\n const trackLoaded = track.details && !track.details.live;\n if (newId === this.trackId && track === lastTrack && trackLoaded) {\n return;\n }\n this.log(`Switching to audio-track ${newId} \"${track.name}\" lang:${track.lang} group:${track.groupId} channels:${track.channels}`);\n this.trackId = newId;\n this.currentTrack = track;\n this.hls.trigger(Events.AUDIO_TRACK_SWITCHING, _objectSpread2({}, track));\n // Do not reload track unless live\n if (trackLoaded) {\n return;\n }\n const hlsUrlParameters = this.switchParams(track.url, lastTrack == null ? void 0 : lastTrack.details, track.details);\n this.loadPlaylist(hlsUrlParameters);\n }\n findTrackId(currentTrack) {\n const audioTracks = this.tracksInGroup;\n for (let i = 0; i < audioTracks.length; i++) {\n const track = audioTracks[i];\n if (this.selectDefaultTrack && !track.default) {\n continue;\n }\n if (!currentTrack || matchesOption(currentTrack, track, audioMatchPredicate)) {\n return i;\n }\n }\n if (currentTrack) {\n const {\n name,\n lang,\n assocLang,\n characteristics,\n audioCodec,\n channels\n } = currentTrack;\n for (let i = 0; i < audioTracks.length; i++) {\n const track = audioTracks[i];\n if (matchesOption({\n name,\n lang,\n assocLang,\n characteristics,\n audioCodec,\n channels\n }, track, audioMatchPredicate)) {\n return i;\n }\n }\n for (let i = 0; i < audioTracks.length; i++) {\n const track = audioTracks[i];\n if (mediaAttributesIdentical(currentTrack.attrs, track.attrs, ['LANGUAGE', 'ASSOC-LANGUAGE', 'CHARACTERISTICS'])) {\n return i;\n }\n }\n for (let i = 0; i < audioTracks.length; i++) {\n const track = audioTracks[i];\n if (mediaAttributesIdentical(currentTrack.attrs, track.attrs, ['LANGUAGE'])) {\n return i;\n }\n }\n }\n return -1;\n }\n loadPlaylist(hlsUrlParameters) {\n const audioTrack = this.currentTrack;\n if (this.shouldLoadPlaylist(audioTrack) && audioTrack) {\n super.loadPlaylist();\n const id = audioTrack.id;\n const groupId = audioTrack.groupId;\n let url = audioTrack.url;\n if (hlsUrlParameters) {\n try {\n url = hlsUrlParameters.addDirectives(url);\n } catch (error) {\n this.warn(`Could not construct new URL with HLS Delivery Directives: ${error}`);\n }\n }\n // track not retrieved yet, or live playlist we need to (re)load it\n this.log(`loading audio-track playlist ${id} \"${audioTrack.name}\" lang:${audioTrack.lang} group:${groupId}`);\n this.clearTimer();\n this.hls.trigger(Events.AUDIO_TRACK_LOADING, {\n url,\n id,\n groupId,\n deliveryDirectives: hlsUrlParameters || null\n });\n }\n }\n}\n\nconst TICK_INTERVAL$1 = 500; // how often to tick in ms\n\nclass SubtitleStreamController extends BaseStreamController {\n constructor(hls, fragmentTracker, keyLoader) {\n super(hls, fragmentTracker, keyLoader, '[subtitle-stream-controller]', PlaylistLevelType.SUBTITLE);\n this.currentTrackId = -1;\n this.tracksBuffered = [];\n this.mainDetails = null;\n this._registerListeners();\n }\n onHandlerDestroying() {\n this._unregisterListeners();\n super.onHandlerDestroying();\n this.mainDetails = null;\n }\n _registerListeners() {\n const {\n hls\n } = this;\n hls.on(Events.MEDIA_ATTACHED, this.onMediaAttached, this);\n hls.on(Events.MEDIA_DETACHING, this.onMediaDetaching, this);\n hls.on(Events.MANIFEST_LOADING, this.onManifestLoading, this);\n hls.on(Events.LEVEL_LOADED, this.onLevelLoaded, this);\n hls.on(Events.ERROR, this.onError, this);\n hls.on(Events.SUBTITLE_TRACKS_UPDATED, this.onSubtitleTracksUpdated, this);\n hls.on(Events.SUBTITLE_TRACK_SWITCH, this.onSubtitleTrackSwitch, this);\n hls.on(Events.SUBTITLE_TRACK_LOADED, this.onSubtitleTrackLoaded, this);\n hls.on(Events.SUBTITLE_FRAG_PROCESSED, this.onSubtitleFragProcessed, this);\n hls.on(Events.BUFFER_FLUSHING, this.onBufferFlushing, this);\n hls.on(Events.FRAG_BUFFERED, this.onFragBuffered, this);\n }\n _unregisterListeners() {\n const {\n hls\n } = this;\n hls.off(Events.MEDIA_ATTACHED, this.onMediaAttached, this);\n hls.off(Events.MEDIA_DETACHING, this.onMediaDetaching, this);\n hls.off(Events.MANIFEST_LOADING, this.onManifestLoading, this);\n hls.off(Events.LEVEL_LOADED, this.onLevelLoaded, this);\n hls.off(Events.ERROR, this.onError, this);\n hls.off(Events.SUBTITLE_TRACKS_UPDATED, this.onSubtitleTracksUpdated, this);\n hls.off(Events.SUBTITLE_TRACK_SWITCH, this.onSubtitleTrackSwitch, this);\n hls.off(Events.SUBTITLE_TRACK_LOADED, this.onSubtitleTrackLoaded, this);\n hls.off(Events.SUBTITLE_FRAG_PROCESSED, this.onSubtitleFragProcessed, this);\n hls.off(Events.BUFFER_FLUSHING, this.onBufferFlushing, this);\n hls.off(Events.FRAG_BUFFERED, this.onFragBuffered, this);\n }\n startLoad(startPosition) {\n this.stopLoad();\n this.state = State.IDLE;\n this.setInterval(TICK_INTERVAL$1);\n this.nextLoadPosition = this.startPosition = this.lastCurrentTime = startPosition;\n this.tick();\n }\n onManifestLoading() {\n this.mainDetails = null;\n this.fragmentTracker.removeAllFragments();\n }\n onMediaDetaching() {\n this.tracksBuffered = [];\n super.onMediaDetaching();\n }\n onLevelLoaded(event, data) {\n this.mainDetails = data.details;\n }\n onSubtitleFragProcessed(event, data) {\n const {\n frag,\n success\n } = data;\n this.fragPrevious = frag;\n this.state = State.IDLE;\n if (!success) {\n return;\n }\n const buffered = this.tracksBuffered[this.currentTrackId];\n if (!buffered) {\n return;\n }\n\n // Create/update a buffered array matching the interface used by BufferHelper.bufferedInfo\n // so we can re-use the logic used to detect how much has been buffered\n let timeRange;\n const fragStart = frag.start;\n for (let i = 0; i < buffered.length; i++) {\n if (fragStart >= buffered[i].start && fragStart <= buffered[i].end) {\n timeRange = buffered[i];\n break;\n }\n }\n const fragEnd = frag.start + frag.duration;\n if (timeRange) {\n timeRange.end = fragEnd;\n } else {\n timeRange = {\n start: fragStart,\n end: fragEnd\n };\n buffered.push(timeRange);\n }\n this.fragmentTracker.fragBuffered(frag);\n this.fragBufferedComplete(frag, null);\n }\n onBufferFlushing(event, data) {\n const {\n startOffset,\n endOffset\n } = data;\n if (startOffset === 0 && endOffset !== Number.POSITIVE_INFINITY) {\n const endOffsetSubtitles = endOffset - 1;\n if (endOffsetSubtitles <= 0) {\n return;\n }\n data.endOffsetSubtitles = Math.max(0, endOffsetSubtitles);\n this.tracksBuffered.forEach(buffered => {\n for (let i = 0; i < buffered.length;) {\n if (buffered[i].end <= endOffsetSubtitles) {\n buffered.shift();\n continue;\n } else if (buffered[i].start < endOffsetSubtitles) {\n buffered[i].start = endOffsetSubtitles;\n } else {\n break;\n }\n i++;\n }\n });\n this.fragmentTracker.removeFragmentsInRange(startOffset, endOffsetSubtitles, PlaylistLevelType.SUBTITLE);\n }\n }\n onFragBuffered(event, data) {\n if (!this.loadedmetadata && data.frag.type === PlaylistLevelType.MAIN) {\n var _this$media;\n if ((_this$media = this.media) != null && _this$media.buffered.length) {\n this.loadedmetadata = true;\n }\n }\n }\n\n // If something goes wrong, proceed to next frag, if we were processing one.\n onError(event, data) {\n const frag = data.frag;\n if ((frag == null ? void 0 : frag.type) === PlaylistLevelType.SUBTITLE) {\n if (data.details === ErrorDetails.FRAG_GAP) {\n this.fragmentTracker.fragBuffered(frag, true);\n }\n if (this.fragCurrent) {\n this.fragCurrent.abortRequests();\n }\n if (this.state !== State.STOPPED) {\n this.state = State.IDLE;\n }\n }\n }\n\n // Got all new subtitle levels.\n onSubtitleTracksUpdated(event, {\n subtitleTracks\n }) {\n if (this.levels && subtitleOptionsIdentical(this.levels, subtitleTracks)) {\n this.levels = subtitleTracks.map(mediaPlaylist => new Level(mediaPlaylist));\n return;\n }\n this.tracksBuffered = [];\n this.levels = subtitleTracks.map(mediaPlaylist => {\n const level = new Level(mediaPlaylist);\n this.tracksBuffered[level.id] = [];\n return level;\n });\n this.fragmentTracker.removeFragmentsInRange(0, Number.POSITIVE_INFINITY, PlaylistLevelType.SUBTITLE);\n this.fragPrevious = null;\n this.mediaBuffer = null;\n }\n onSubtitleTrackSwitch(event, data) {\n var _this$levels;\n this.currentTrackId = data.id;\n if (!((_this$levels = this.levels) != null && _this$levels.length) || this.currentTrackId === -1) {\n this.clearInterval();\n return;\n }\n\n // Check if track has the necessary details to load fragments\n const currentTrack = this.levels[this.currentTrackId];\n if (currentTrack != null && currentTrack.details) {\n this.mediaBuffer = this.mediaBufferTimeRanges;\n } else {\n this.mediaBuffer = null;\n }\n if (currentTrack) {\n this.setInterval(TICK_INTERVAL$1);\n }\n }\n\n // Got a new set of subtitle fragments.\n onSubtitleTrackLoaded(event, data) {\n var _track$details;\n const {\n currentTrackId,\n levels\n } = this;\n const {\n details: newDetails,\n id: trackId\n } = data;\n if (!levels) {\n this.warn(`Subtitle tracks were reset while loading level ${trackId}`);\n return;\n }\n const track = levels[trackId];\n if (trackId >= levels.length || !track) {\n return;\n }\n this.log(`Subtitle track ${trackId} loaded [${newDetails.startSN},${newDetails.endSN}]${newDetails.lastPartSn ? `[part-${newDetails.lastPartSn}-${newDetails.lastPartIndex}]` : ''},duration:${newDetails.totalduration}`);\n this.mediaBuffer = this.mediaBufferTimeRanges;\n let sliding = 0;\n if (newDetails.live || (_track$details = track.details) != null && _track$details.live) {\n const mainDetails = this.mainDetails;\n if (newDetails.deltaUpdateFailed || !mainDetails) {\n return;\n }\n const mainSlidingStartFragment = mainDetails.fragments[0];\n if (!track.details) {\n if (newDetails.hasProgramDateTime && mainDetails.hasProgramDateTime) {\n alignMediaPlaylistByPDT(newDetails, mainDetails);\n sliding = newDetails.fragments[0].start;\n } else if (mainSlidingStartFragment) {\n // line up live playlist with main so that fragments in range are loaded\n sliding = mainSlidingStartFragment.start;\n addSliding(newDetails, sliding);\n }\n } else {\n var _this$levelLastLoaded;\n sliding = this.alignPlaylists(newDetails, track.details, (_this$levelLastLoaded = this.levelLastLoaded) == null ? void 0 : _this$levelLastLoaded.details);\n if (sliding === 0 && mainSlidingStartFragment) {\n // realign with main when there is no overlap with last refresh\n sliding = mainSlidingStartFragment.start;\n addSliding(newDetails, sliding);\n }\n }\n }\n track.details = newDetails;\n this.levelLastLoaded = track;\n if (trackId !== currentTrackId) {\n return;\n }\n if (!this.startFragRequested && (this.mainDetails || !newDetails.live)) {\n this.setStartPosition(this.mainDetails || newDetails, sliding);\n }\n\n // trigger handler right now\n this.tick();\n\n // If playlist is misaligned because of bad PDT or drift, delete details to resync with main on reload\n if (newDetails.live && !this.fragCurrent && this.media && this.state === State.IDLE) {\n const foundFrag = findFragmentByPTS(null, newDetails.fragments, this.media.currentTime, 0);\n if (!foundFrag) {\n this.warn('Subtitle playlist not aligned with playback');\n track.details = undefined;\n }\n }\n }\n _handleFragmentLoadComplete(fragLoadedData) {\n const {\n frag,\n payload\n } = fragLoadedData;\n const decryptData = frag.decryptdata;\n const hls = this.hls;\n if (this.fragContextChanged(frag)) {\n return;\n }\n // check to see if the payload needs to be decrypted\n if (payload && payload.byteLength > 0 && decryptData != null && decryptData.key && decryptData.iv && decryptData.method === 'AES-128') {\n const startTime = performance.now();\n // decrypt the subtitles\n this.decrypter.decrypt(new Uint8Array(payload), decryptData.key.buffer, decryptData.iv.buffer).catch(err => {\n hls.trigger(Events.ERROR, {\n type: ErrorTypes.MEDIA_ERROR,\n details: ErrorDetails.FRAG_DECRYPT_ERROR,\n fatal: false,\n error: err,\n reason: err.message,\n frag\n });\n throw err;\n }).then(decryptedData => {\n const endTime = performance.now();\n hls.trigger(Events.FRAG_DECRYPTED, {\n frag,\n payload: decryptedData,\n stats: {\n tstart: startTime,\n tdecrypt: endTime\n }\n });\n }).catch(err => {\n this.warn(`${err.name}: ${err.message}`);\n this.state = State.IDLE;\n });\n }\n }\n doTick() {\n if (!this.media) {\n this.state = State.IDLE;\n return;\n }\n if (this.state === State.IDLE) {\n const {\n currentTrackId,\n levels\n } = this;\n const track = levels == null ? void 0 : levels[currentTrackId];\n if (!track || !levels.length || !track.details) {\n return;\n }\n const {\n config\n } = this;\n const currentTime = this.getLoadPosition();\n const bufferedInfo = BufferHelper.bufferedInfo(this.tracksBuffered[this.currentTrackId] || [], currentTime, config.maxBufferHole);\n const {\n end: targetBufferTime,\n len: bufferLen\n } = bufferedInfo;\n const mainBufferInfo = this.getFwdBufferInfo(this.media, PlaylistLevelType.MAIN);\n const trackDetails = track.details;\n const maxBufLen = this.getMaxBufferLength(mainBufferInfo == null ? void 0 : mainBufferInfo.len) + trackDetails.levelTargetDuration;\n if (bufferLen > maxBufLen) {\n return;\n }\n const fragments = trackDetails.fragments;\n const fragLen = fragments.length;\n const end = trackDetails.edge;\n let foundFrag = null;\n const fragPrevious = this.fragPrevious;\n if (targetBufferTime < end) {\n const tolerance = config.maxFragLookUpTolerance;\n const lookupTolerance = targetBufferTime > end - tolerance ? 0 : tolerance;\n foundFrag = findFragmentByPTS(fragPrevious, fragments, Math.max(fragments[0].start, targetBufferTime), lookupTolerance);\n if (!foundFrag && fragPrevious && fragPrevious.start < fragments[0].start) {\n foundFrag = fragments[0];\n }\n } else {\n foundFrag = fragments[fragLen - 1];\n }\n if (!foundFrag) {\n return;\n }\n foundFrag = this.mapToInitFragWhenRequired(foundFrag);\n if (foundFrag.sn !== 'initSegment') {\n // Load earlier fragment in same discontinuity to make up for misaligned playlists and cues that extend beyond end of segment\n const curSNIdx = foundFrag.sn - trackDetails.startSN;\n const prevFrag = fragments[curSNIdx - 1];\n if (prevFrag && prevFrag.cc === foundFrag.cc && this.fragmentTracker.getState(prevFrag) === FragmentState.NOT_LOADED) {\n foundFrag = prevFrag;\n }\n }\n if (this.fragmentTracker.getState(foundFrag) === FragmentState.NOT_LOADED) {\n // only load if fragment is not loaded\n this.loadFragment(foundFrag, track, targetBufferTime);\n }\n }\n }\n getMaxBufferLength(mainBufferLength) {\n const maxConfigBuffer = super.getMaxBufferLength();\n if (!mainBufferLength) {\n return maxConfigBuffer;\n }\n return Math.max(maxConfigBuffer, mainBufferLength);\n }\n loadFragment(frag, level, targetBufferTime) {\n this.fragCurrent = frag;\n if (frag.sn === 'initSegment') {\n this._loadInitSegment(frag, level);\n } else {\n this.startFragRequested = true;\n super.loadFragment(frag, level, targetBufferTime);\n }\n }\n get mediaBufferTimeRanges() {\n return new BufferableInstance(this.tracksBuffered[this.currentTrackId] || []);\n }\n}\nclass BufferableInstance {\n constructor(timeranges) {\n this.buffered = void 0;\n const getRange = (name, index, length) => {\n index = index >>> 0;\n if (index > length - 1) {\n throw new DOMException(`Failed to execute '${name}' on 'TimeRanges': The index provided (${index}) is greater than the maximum bound (${length})`);\n }\n return timeranges[index][name];\n };\n this.buffered = {\n get length() {\n return timeranges.length;\n },\n end(index) {\n return getRange('end', index, timeranges.length);\n },\n start(index) {\n return getRange('start', index, timeranges.length);\n }\n };\n }\n}\n\nclass SubtitleTrackController extends BasePlaylistController {\n constructor(hls) {\n super(hls, '[subtitle-track-controller]');\n this.media = null;\n this.tracks = [];\n this.groupIds = null;\n this.tracksInGroup = [];\n this.trackId = -1;\n this.currentTrack = null;\n this.selectDefaultTrack = true;\n this.queuedDefaultTrack = -1;\n this.asyncPollTrackChange = () => this.pollTrackChange(0);\n this.useTextTrackPolling = false;\n this.subtitlePollingInterval = -1;\n this._subtitleDisplay = true;\n this.onTextTracksChanged = () => {\n if (!this.useTextTrackPolling) {\n self.clearInterval(this.subtitlePollingInterval);\n }\n // Media is undefined when switching streams via loadSource()\n if (!this.media || !this.hls.config.renderTextTracksNatively) {\n return;\n }\n let textTrack = null;\n const tracks = filterSubtitleTracks(this.media.textTracks);\n for (let i = 0; i < tracks.length; i++) {\n if (tracks[i].mode === 'hidden') {\n // Do not break in case there is a following track with showing.\n textTrack = tracks[i];\n } else if (tracks[i].mode === 'showing') {\n textTrack = tracks[i];\n break;\n }\n }\n\n // Find internal track index for TextTrack\n const trackId = this.findTrackForTextTrack(textTrack);\n if (this.subtitleTrack !== trackId) {\n this.setSubtitleTrack(trackId);\n }\n };\n this.registerListeners();\n }\n destroy() {\n this.unregisterListeners();\n this.tracks.length = 0;\n this.tracksInGroup.length = 0;\n this.currentTrack = null;\n this.onTextTracksChanged = this.asyncPollTrackChange = null;\n super.destroy();\n }\n get subtitleDisplay() {\n return this._subtitleDisplay;\n }\n set subtitleDisplay(value) {\n this._subtitleDisplay = value;\n if (this.trackId > -1) {\n this.toggleTrackModes();\n }\n }\n registerListeners() {\n const {\n hls\n } = this;\n hls.on(Events.MEDIA_ATTACHED, this.onMediaAttached, this);\n hls.on(Events.MEDIA_DETACHING, this.onMediaDetaching, this);\n hls.on(Events.MANIFEST_LOADING, this.onManifestLoading, this);\n hls.on(Events.MANIFEST_PARSED, this.onManifestParsed, this);\n hls.on(Events.LEVEL_LOADING, this.onLevelLoading, this);\n hls.on(Events.LEVEL_SWITCHING, this.onLevelSwitching, this);\n hls.on(Events.SUBTITLE_TRACK_LOADED, this.onSubtitleTrackLoaded, this);\n hls.on(Events.ERROR, this.onError, this);\n }\n unregisterListeners() {\n const {\n hls\n } = this;\n hls.off(Events.MEDIA_ATTACHED, this.onMediaAttached, this);\n hls.off(Events.MEDIA_DETACHING, this.onMediaDetaching, this);\n hls.off(Events.MANIFEST_LOADING, this.onManifestLoading, this);\n hls.off(Events.MANIFEST_PARSED, this.onManifestParsed, this);\n hls.off(Events.LEVEL_LOADING, this.onLevelLoading, this);\n hls.off(Events.LEVEL_SWITCHING, this.onLevelSwitching, this);\n hls.off(Events.SUBTITLE_TRACK_LOADED, this.onSubtitleTrackLoaded, this);\n hls.off(Events.ERROR, this.onError, this);\n }\n\n // Listen for subtitle track change, then extract the current track ID.\n onMediaAttached(event, data) {\n this.media = data.media;\n if (!this.media) {\n return;\n }\n if (this.queuedDefaultTrack > -1) {\n this.subtitleTrack = this.queuedDefaultTrack;\n this.queuedDefaultTrack = -1;\n }\n this.useTextTrackPolling = !(this.media.textTracks && 'onchange' in this.media.textTracks);\n if (this.useTextTrackPolling) {\n this.pollTrackChange(500);\n } else {\n this.media.textTracks.addEventListener('change', this.asyncPollTrackChange);\n }\n }\n pollTrackChange(timeout) {\n self.clearInterval(this.subtitlePollingInterval);\n this.subtitlePollingInterval = self.setInterval(this.onTextTracksChanged, timeout);\n }\n onMediaDetaching() {\n if (!this.media) {\n return;\n }\n self.clearInterval(this.subtitlePollingInterval);\n if (!this.useTextTrackPolling) {\n this.media.textTracks.removeEventListener('change', this.asyncPollTrackChange);\n }\n if (this.trackId > -1) {\n this.queuedDefaultTrack = this.trackId;\n }\n const textTracks = filterSubtitleTracks(this.media.textTracks);\n // Clear loaded cues on media detachment from tracks\n textTracks.forEach(track => {\n clearCurrentCues(track);\n });\n // Disable all subtitle tracks before detachment so when reattached only tracks in that content are enabled.\n this.subtitleTrack = -1;\n this.media = null;\n }\n onManifestLoading() {\n this.tracks = [];\n this.groupIds = null;\n this.tracksInGroup = [];\n this.trackId = -1;\n this.currentTrack = null;\n this.selectDefaultTrack = true;\n }\n\n // Fired whenever a new manifest is loaded.\n onManifestParsed(event, data) {\n this.tracks = data.subtitleTracks;\n }\n onSubtitleTrackLoaded(event, data) {\n const {\n id,\n groupId,\n details\n } = data;\n const trackInActiveGroup = this.tracksInGroup[id];\n if (!trackInActiveGroup || trackInActiveGroup.groupId !== groupId) {\n this.warn(`Subtitle track with id:${id} and group:${groupId} not found in active group ${trackInActiveGroup == null ? void 0 : trackInActiveGroup.groupId}`);\n return;\n }\n const curDetails = trackInActiveGroup.details;\n trackInActiveGroup.details = data.details;\n this.log(`Subtitle track ${id} \"${trackInActiveGroup.name}\" lang:${trackInActiveGroup.lang} group:${groupId} loaded [${details.startSN}-${details.endSN}]`);\n if (id === this.trackId) {\n this.playlistLoaded(id, data, curDetails);\n }\n }\n onLevelLoading(event, data) {\n this.switchLevel(data.level);\n }\n onLevelSwitching(event, data) {\n this.switchLevel(data.level);\n }\n switchLevel(levelIndex) {\n const levelInfo = this.hls.levels[levelIndex];\n if (!levelInfo) {\n return;\n }\n const subtitleGroups = levelInfo.subtitleGroups || null;\n const currentGroups = this.groupIds;\n let currentTrack = this.currentTrack;\n if (!subtitleGroups || (currentGroups == null ? void 0 : currentGroups.length) !== (subtitleGroups == null ? void 0 : subtitleGroups.length) || subtitleGroups != null && subtitleGroups.some(groupId => (currentGroups == null ? void 0 : currentGroups.indexOf(groupId)) === -1)) {\n this.groupIds = subtitleGroups;\n this.trackId = -1;\n this.currentTrack = null;\n const subtitleTracks = this.tracks.filter(track => !subtitleGroups || subtitleGroups.indexOf(track.groupId) !== -1);\n if (subtitleTracks.length) {\n // Disable selectDefaultTrack if there are no default tracks\n if (this.selectDefaultTrack && !subtitleTracks.some(track => track.default)) {\n this.selectDefaultTrack = false;\n }\n // track.id should match hls.audioTracks index\n subtitleTracks.forEach((track, i) => {\n track.id = i;\n });\n } else if (!currentTrack && !this.tracksInGroup.length) {\n // Do not dispatch SUBTITLE_TRACKS_UPDATED when there were and are no tracks\n return;\n }\n this.tracksInGroup = subtitleTracks;\n\n // Find preferred track\n const subtitlePreference = this.hls.config.subtitlePreference;\n if (!currentTrack && subtitlePreference) {\n this.selectDefaultTrack = false;\n const groupIndex = findMatchingOption(subtitlePreference, subtitleTracks);\n if (groupIndex > -1) {\n currentTrack = subtitleTracks[groupIndex];\n } else {\n const allIndex = findMatchingOption(subtitlePreference, this.tracks);\n currentTrack = this.tracks[allIndex];\n }\n }\n\n // Select initial track\n let trackId = this.findTrackId(currentTrack);\n if (trackId === -1 && currentTrack) {\n trackId = this.findTrackId(null);\n }\n\n // Dispatch events and load track if needed\n const subtitleTracksUpdated = {\n subtitleTracks\n };\n this.log(`Updating subtitle tracks, ${subtitleTracks.length} track(s) found in \"${subtitleGroups == null ? void 0 : subtitleGroups.join(',')}\" group-id`);\n this.hls.trigger(Events.SUBTITLE_TRACKS_UPDATED, subtitleTracksUpdated);\n if (trackId !== -1 && this.trackId === -1) {\n this.setSubtitleTrack(trackId);\n }\n } else if (this.shouldReloadPlaylist(currentTrack)) {\n // Retry playlist loading if no playlist is or has been loaded yet\n this.setSubtitleTrack(this.trackId);\n }\n }\n findTrackId(currentTrack) {\n const tracks = this.tracksInGroup;\n const selectDefault = this.selectDefaultTrack;\n for (let i = 0; i < tracks.length; i++) {\n const track = tracks[i];\n if (selectDefault && !track.default || !selectDefault && !currentTrack) {\n continue;\n }\n if (!currentTrack || matchesOption(track, currentTrack)) {\n return i;\n }\n }\n if (currentTrack) {\n for (let i = 0; i < tracks.length; i++) {\n const track = tracks[i];\n if (mediaAttributesIdentical(currentTrack.attrs, track.attrs, ['LANGUAGE', 'ASSOC-LANGUAGE', 'CHARACTERISTICS'])) {\n return i;\n }\n }\n for (let i = 0; i < tracks.length; i++) {\n const track = tracks[i];\n if (mediaAttributesIdentical(currentTrack.attrs, track.attrs, ['LANGUAGE'])) {\n return i;\n }\n }\n }\n return -1;\n }\n findTrackForTextTrack(textTrack) {\n if (textTrack) {\n const tracks = this.tracksInGroup;\n for (let i = 0; i < tracks.length; i++) {\n const track = tracks[i];\n if (subtitleTrackMatchesTextTrack(track, textTrack)) {\n return i;\n }\n }\n }\n return -1;\n }\n onError(event, data) {\n if (data.fatal || !data.context) {\n return;\n }\n if (data.context.type === PlaylistContextType.SUBTITLE_TRACK && data.context.id === this.trackId && (!this.groupIds || this.groupIds.indexOf(data.context.groupId) !== -1)) {\n this.checkRetry(data);\n }\n }\n get allSubtitleTracks() {\n return this.tracks;\n }\n\n /** get alternate subtitle tracks list from playlist **/\n get subtitleTracks() {\n return this.tracksInGroup;\n }\n\n /** get/set index of the selected subtitle track (based on index in subtitle track lists) **/\n get subtitleTrack() {\n return this.trackId;\n }\n set subtitleTrack(newId) {\n this.selectDefaultTrack = false;\n this.setSubtitleTrack(newId);\n }\n setSubtitleOption(subtitleOption) {\n this.hls.config.subtitlePreference = subtitleOption;\n if (subtitleOption) {\n const allSubtitleTracks = this.allSubtitleTracks;\n this.selectDefaultTrack = false;\n if (allSubtitleTracks.length) {\n // First see if current option matches (no switch op)\n const currentTrack = this.currentTrack;\n if (currentTrack && matchesOption(subtitleOption, currentTrack)) {\n return currentTrack;\n }\n // Find option in current group\n const groupIndex = findMatchingOption(subtitleOption, this.tracksInGroup);\n if (groupIndex > -1) {\n const track = this.tracksInGroup[groupIndex];\n this.setSubtitleTrack(groupIndex);\n return track;\n } else if (currentTrack) {\n // If this is not the initial selection return null\n // option should have matched one in active group\n return null;\n } else {\n // Find the option in all tracks for initial selection\n const allIndex = findMatchingOption(subtitleOption, allSubtitleTracks);\n if (allIndex > -1) {\n return allSubtitleTracks[allIndex];\n }\n }\n }\n }\n return null;\n }\n loadPlaylist(hlsUrlParameters) {\n super.loadPlaylist();\n const currentTrack = this.currentTrack;\n if (this.shouldLoadPlaylist(currentTrack) && currentTrack) {\n const id = currentTrack.id;\n const groupId = currentTrack.groupId;\n let url = currentTrack.url;\n if (hlsUrlParameters) {\n try {\n url = hlsUrlParameters.addDirectives(url);\n } catch (error) {\n this.warn(`Could not construct new URL with HLS Delivery Directives: ${error}`);\n }\n }\n this.log(`Loading subtitle playlist for id ${id}`);\n this.hls.trigger(Events.SUBTITLE_TRACK_LOADING, {\n url,\n id,\n groupId,\n deliveryDirectives: hlsUrlParameters || null\n });\n }\n }\n\n /**\n * Disables the old subtitleTrack and sets current mode on the next subtitleTrack.\n * This operates on the DOM textTracks.\n * A value of -1 will disable all subtitle tracks.\n */\n toggleTrackModes() {\n const {\n media\n } = this;\n if (!media) {\n return;\n }\n const textTracks = filterSubtitleTracks(media.textTracks);\n const currentTrack = this.currentTrack;\n let nextTrack;\n if (currentTrack) {\n nextTrack = textTracks.filter(textTrack => subtitleTrackMatchesTextTrack(currentTrack, textTrack))[0];\n if (!nextTrack) {\n this.warn(`Unable to find subtitle TextTrack with name \"${currentTrack.name}\" and language \"${currentTrack.lang}\"`);\n }\n }\n [].slice.call(textTracks).forEach(track => {\n if (track.mode !== 'disabled' && track !== nextTrack) {\n track.mode = 'disabled';\n }\n });\n if (nextTrack) {\n const mode = this.subtitleDisplay ? 'showing' : 'hidden';\n if (nextTrack.mode !== mode) {\n nextTrack.mode = mode;\n }\n }\n }\n\n /**\n * This method is responsible for validating the subtitle index and periodically reloading if live.\n * Dispatches the SUBTITLE_TRACK_SWITCH event, which instructs the subtitle-stream-controller to load the selected track.\n */\n setSubtitleTrack(newId) {\n const tracks = this.tracksInGroup;\n\n // setting this.subtitleTrack will trigger internal logic\n // if media has not been attached yet, it will fail\n // we keep a reference to the default track id\n // and we'll set subtitleTrack when onMediaAttached is triggered\n if (!this.media) {\n this.queuedDefaultTrack = newId;\n return;\n }\n\n // exit if track id as already set or invalid\n if (newId < -1 || newId >= tracks.length || !isFiniteNumber(newId)) {\n this.warn(`Invalid subtitle track id: ${newId}`);\n return;\n }\n\n // stopping live reloading timer if any\n this.clearTimer();\n this.selectDefaultTrack = false;\n const lastTrack = this.currentTrack;\n const track = tracks[newId] || null;\n this.trackId = newId;\n this.currentTrack = track;\n this.toggleTrackModes();\n if (!track) {\n // switch to -1\n this.hls.trigger(Events.SUBTITLE_TRACK_SWITCH, {\n id: newId\n });\n return;\n }\n const trackLoaded = !!track.details && !track.details.live;\n if (newId === this.trackId && track === lastTrack && trackLoaded) {\n return;\n }\n this.log(`Switching to subtitle-track ${newId}` + (track ? ` \"${track.name}\" lang:${track.lang} group:${track.groupId}` : ''));\n const {\n id,\n groupId = '',\n name,\n type,\n url\n } = track;\n this.hls.trigger(Events.SUBTITLE_TRACK_SWITCH, {\n id,\n groupId,\n name,\n type,\n url\n });\n const hlsUrlParameters = this.switchParams(track.url, lastTrack == null ? void 0 : lastTrack.details, track.details);\n this.loadPlaylist(hlsUrlParameters);\n }\n}\n\nclass BufferOperationQueue {\n constructor(sourceBufferReference) {\n this.buffers = void 0;\n this.queues = {\n video: [],\n audio: [],\n audiovideo: []\n };\n this.buffers = sourceBufferReference;\n }\n append(operation, type, pending) {\n const queue = this.queues[type];\n queue.push(operation);\n if (queue.length === 1 && !pending) {\n this.executeNext(type);\n }\n }\n insertAbort(operation, type) {\n const queue = this.queues[type];\n queue.unshift(operation);\n this.executeNext(type);\n }\n appendBlocker(type) {\n let execute;\n const promise = new Promise(resolve => {\n execute = resolve;\n });\n const operation = {\n execute,\n onStart: () => {},\n onComplete: () => {},\n onError: () => {}\n };\n this.append(operation, type);\n return promise;\n }\n executeNext(type) {\n const queue = this.queues[type];\n if (queue.length) {\n const operation = queue[0];\n try {\n // Operations are expected to result in an 'updateend' event being fired. If not, the queue will lock. Operations\n // which do not end with this event must call _onSBUpdateEnd manually\n operation.execute();\n } catch (error) {\n logger.warn(`[buffer-operation-queue]: Exception executing \"${type}\" SourceBuffer operation: ${error}`);\n operation.onError(error);\n\n // Only shift the current operation off, otherwise the updateend handler will do this for us\n const sb = this.buffers[type];\n if (!(sb != null && sb.updating)) {\n this.shiftAndExecuteNext(type);\n }\n }\n }\n }\n shiftAndExecuteNext(type) {\n this.queues[type].shift();\n this.executeNext(type);\n }\n current(type) {\n return this.queues[type][0];\n }\n}\n\nconst VIDEO_CODEC_PROFILE_REPLACE = /(avc[1234]|hvc1|hev1|dvh[1e]|vp09|av01)(?:\\.[^.,]+)+/;\nclass BufferController {\n constructor(hls) {\n // The level details used to determine duration, target-duration and live\n this.details = null;\n // cache the self generated object url to detect hijack of video tag\n this._objectUrl = null;\n // A queue of buffer operations which require the SourceBuffer to not be updating upon execution\n this.operationQueue = void 0;\n // References to event listeners for each SourceBuffer, so that they can be referenced for event removal\n this.listeners = void 0;\n this.hls = void 0;\n // The number of BUFFER_CODEC events received before any sourceBuffers are created\n this.bufferCodecEventsExpected = 0;\n // The total number of BUFFER_CODEC events received\n this._bufferCodecEventsTotal = 0;\n // A reference to the attached media element\n this.media = null;\n // A reference to the active media source\n this.mediaSource = null;\n // Last MP3 audio chunk appended\n this.lastMpegAudioChunk = null;\n this.appendSource = void 0;\n // counters\n this.appendErrors = {\n audio: 0,\n video: 0,\n audiovideo: 0\n };\n this.tracks = {};\n this.pendingTracks = {};\n this.sourceBuffer = void 0;\n this.log = void 0;\n this.warn = void 0;\n this.error = void 0;\n this._onEndStreaming = event => {\n if (!this.hls) {\n return;\n }\n this.hls.pauseBuffering();\n };\n this._onStartStreaming = event => {\n if (!this.hls) {\n return;\n }\n this.hls.resumeBuffering();\n };\n // Keep as arrow functions so that we can directly reference these functions directly as event listeners\n this._onMediaSourceOpen = () => {\n const {\n media,\n mediaSource\n } = this;\n this.log('Media source opened');\n if (media) {\n media.removeEventListener('emptied', this._onMediaEmptied);\n this.updateMediaElementDuration();\n this.hls.trigger(Events.MEDIA_ATTACHED, {\n media,\n mediaSource: mediaSource\n });\n }\n if (mediaSource) {\n // once received, don't listen anymore to sourceopen event\n mediaSource.removeEventListener('sourceopen', this._onMediaSourceOpen);\n }\n this.checkPendingTracks();\n };\n this._onMediaSourceClose = () => {\n this.log('Media source closed');\n };\n this._onMediaSourceEnded = () => {\n this.log('Media source ended');\n };\n this._onMediaEmptied = () => {\n const {\n mediaSrc,\n _objectUrl\n } = this;\n if (mediaSrc !== _objectUrl) {\n logger.error(`Media element src was set while attaching MediaSource (${_objectUrl} > ${mediaSrc})`);\n }\n };\n this.hls = hls;\n const logPrefix = '[buffer-controller]';\n this.appendSource = isManagedMediaSource(getMediaSource(hls.config.preferManagedMediaSource));\n this.log = logger.log.bind(logger, logPrefix);\n this.warn = logger.warn.bind(logger, logPrefix);\n this.error = logger.error.bind(logger, logPrefix);\n this._initSourceBuffer();\n this.registerListeners();\n }\n hasSourceTypes() {\n return this.getSourceBufferTypes().length > 0 || Object.keys(this.pendingTracks).length > 0;\n }\n destroy() {\n this.unregisterListeners();\n this.details = null;\n this.lastMpegAudioChunk = null;\n // @ts-ignore\n this.hls = null;\n }\n registerListeners() {\n const {\n hls\n } = this;\n hls.on(Events.MEDIA_ATTACHING, this.onMediaAttaching, this);\n hls.on(Events.MEDIA_DETACHING, this.onMediaDetaching, this);\n hls.on(Events.MANIFEST_LOADING, this.onManifestLoading, this);\n hls.on(Events.MANIFEST_PARSED, this.onManifestParsed, this);\n hls.on(Events.BUFFER_RESET, this.onBufferReset, this);\n hls.on(Events.BUFFER_APPENDING, this.onBufferAppending, this);\n hls.on(Events.BUFFER_CODECS, this.onBufferCodecs, this);\n hls.on(Events.BUFFER_EOS, this.onBufferEos, this);\n hls.on(Events.BUFFER_FLUSHING, this.onBufferFlushing, this);\n hls.on(Events.LEVEL_UPDATED, this.onLevelUpdated, this);\n hls.on(Events.FRAG_PARSED, this.onFragParsed, this);\n hls.on(Events.FRAG_CHANGED, this.onFragChanged, this);\n }\n unregisterListeners() {\n const {\n hls\n } = this;\n hls.off(Events.MEDIA_ATTACHING, this.onMediaAttaching, this);\n hls.off(Events.MEDIA_DETACHING, this.onMediaDetaching, this);\n hls.off(Events.MANIFEST_LOADING, this.onManifestLoading, this);\n hls.off(Events.MANIFEST_PARSED, this.onManifestParsed, this);\n hls.off(Events.BUFFER_RESET, this.onBufferReset, this);\n hls.off(Events.BUFFER_APPENDING, this.onBufferAppending, this);\n hls.off(Events.BUFFER_CODECS, this.onBufferCodecs, this);\n hls.off(Events.BUFFER_EOS, this.onBufferEos, this);\n hls.off(Events.BUFFER_FLUSHING, this.onBufferFlushing, this);\n hls.off(Events.LEVEL_UPDATED, this.onLevelUpdated, this);\n hls.off(Events.FRAG_PARSED, this.onFragParsed, this);\n hls.off(Events.FRAG_CHANGED, this.onFragChanged, this);\n }\n _initSourceBuffer() {\n this.sourceBuffer = {};\n this.operationQueue = new BufferOperationQueue(this.sourceBuffer);\n this.listeners = {\n audio: [],\n video: [],\n audiovideo: []\n };\n this.appendErrors = {\n audio: 0,\n video: 0,\n audiovideo: 0\n };\n this.lastMpegAudioChunk = null;\n }\n onManifestLoading() {\n this.bufferCodecEventsExpected = this._bufferCodecEventsTotal = 0;\n this.details = null;\n }\n onManifestParsed(event, data) {\n // in case of alt audio 2 BUFFER_CODECS events will be triggered, one per stream controller\n // sourcebuffers will be created all at once when the expected nb of tracks will be reached\n // in case alt audio is not used, only one BUFFER_CODEC event will be fired from main stream controller\n // it will contain the expected nb of source buffers, no need to compute it\n let codecEvents = 2;\n if (data.audio && !data.video || !data.altAudio || !true) {\n codecEvents = 1;\n }\n this.bufferCodecEventsExpected = this._bufferCodecEventsTotal = codecEvents;\n this.log(`${this.bufferCodecEventsExpected} bufferCodec event(s) expected`);\n }\n onMediaAttaching(event, data) {\n const media = this.media = data.media;\n const MediaSource = getMediaSource(this.appendSource);\n if (media && MediaSource) {\n var _ms$constructor;\n const ms = this.mediaSource = new MediaSource();\n this.log(`created media source: ${(_ms$constructor = ms.constructor) == null ? void 0 : _ms$constructor.name}`);\n // MediaSource listeners are arrow functions with a lexical scope, and do not need to be bound\n ms.addEventListener('sourceopen', this._onMediaSourceOpen);\n ms.addEventListener('sourceended', this._onMediaSourceEnded);\n ms.addEventListener('sourceclose', this._onMediaSourceClose);\n if (this.appendSource) {\n ms.addEventListener('startstreaming', this._onStartStreaming);\n ms.addEventListener('endstreaming', this._onEndStreaming);\n }\n\n // cache the locally generated object url\n const objectUrl = this._objectUrl = self.URL.createObjectURL(ms);\n // link video and media Source\n if (this.appendSource) {\n try {\n media.removeAttribute('src');\n // ManagedMediaSource will not open without disableRemotePlayback set to false or source alternatives\n const MMS = self.ManagedMediaSource;\n media.disableRemotePlayback = media.disableRemotePlayback || MMS && ms instanceof MMS;\n removeSourceChildren(media);\n addSource(media, objectUrl);\n media.load();\n } catch (error) {\n media.src = objectUrl;\n }\n } else {\n media.src = objectUrl;\n }\n media.addEventListener('emptied', this._onMediaEmptied);\n }\n }\n onMediaDetaching() {\n const {\n media,\n mediaSource,\n _objectUrl\n } = this;\n if (mediaSource) {\n this.log('media source detaching');\n if (mediaSource.readyState === 'open') {\n try {\n // endOfStream could trigger exception if any sourcebuffer is in updating state\n // we don't really care about checking sourcebuffer state here,\n // as we are anyway detaching the MediaSource\n // let's just avoid this exception to propagate\n mediaSource.endOfStream();\n } catch (err) {\n this.warn(`onMediaDetaching: ${err.message} while calling endOfStream`);\n }\n }\n // Clean up the SourceBuffers by invoking onBufferReset\n this.onBufferReset();\n mediaSource.removeEventListener('sourceopen', this._onMediaSourceOpen);\n mediaSource.removeEventListener('sourceended', this._onMediaSourceEnded);\n mediaSource.removeEventListener('sourceclose', this._onMediaSourceClose);\n if (this.appendSource) {\n mediaSource.removeEventListener('startstreaming', this._onStartStreaming);\n mediaSource.removeEventListener('endstreaming', this._onEndStreaming);\n }\n\n // Detach properly the MediaSource from the HTMLMediaElement as\n // suggested in https://github.com/w3c/media-source/issues/53.\n if (media) {\n media.removeEventListener('emptied', this._onMediaEmptied);\n if (_objectUrl) {\n self.URL.revokeObjectURL(_objectUrl);\n }\n\n // clean up video tag src only if it's our own url. some external libraries might\n // hijack the video tag and change its 'src' without destroying the Hls instance first\n if (this.mediaSrc === _objectUrl) {\n media.removeAttribute('src');\n if (this.appendSource) {\n removeSourceChildren(media);\n }\n media.load();\n } else {\n this.warn('media|source.src was changed by a third party - skip cleanup');\n }\n }\n this.mediaSource = null;\n this.media = null;\n this._objectUrl = null;\n this.bufferCodecEventsExpected = this._bufferCodecEventsTotal;\n this.pendingTracks = {};\n this.tracks = {};\n }\n this.hls.trigger(Events.MEDIA_DETACHED, undefined);\n }\n onBufferReset() {\n this.getSourceBufferTypes().forEach(type => {\n this.resetBuffer(type);\n });\n this._initSourceBuffer();\n }\n resetBuffer(type) {\n const sb = this.sourceBuffer[type];\n try {\n if (sb) {\n var _this$mediaSource;\n this.removeBufferListeners(type);\n // Synchronously remove the SB from the map before the next call in order to prevent an async function from\n // accessing it\n this.sourceBuffer[type] = undefined;\n if ((_this$mediaSource = this.mediaSource) != null && _this$mediaSource.sourceBuffers.length) {\n this.mediaSource.removeSourceBuffer(sb);\n }\n }\n } catch (err) {\n this.warn(`onBufferReset ${type}`, err);\n }\n }\n onBufferCodecs(event, data) {\n const sourceBufferCount = this.getSourceBufferTypes().length;\n const trackNames = Object.keys(data);\n trackNames.forEach(trackName => {\n if (sourceBufferCount) {\n // check if SourceBuffer codec needs to change\n const track = this.tracks[trackName];\n if (track && typeof track.buffer.changeType === 'function') {\n var _trackCodec;\n const {\n id,\n codec,\n levelCodec,\n container,\n metadata\n } = data[trackName];\n const currentCodecFull = pickMostCompleteCodecName(track.codec, track.levelCodec);\n const currentCodec = currentCodecFull == null ? void 0 : currentCodecFull.replace(VIDEO_CODEC_PROFILE_REPLACE, '$1');\n let trackCodec = pickMostCompleteCodecName(codec, levelCodec);\n const nextCodec = (_trackCodec = trackCodec) == null ? void 0 : _trackCodec.replace(VIDEO_CODEC_PROFILE_REPLACE, '$1');\n if (trackCodec && currentCodec !== nextCodec) {\n if (trackName.slice(0, 5) === 'audio') {\n trackCodec = getCodecCompatibleName(trackCodec, this.appendSource);\n }\n const mimeType = `${container};codecs=${trackCodec}`;\n this.appendChangeType(trackName, mimeType);\n this.log(`switching codec ${currentCodecFull} to ${trackCodec}`);\n this.tracks[trackName] = {\n buffer: track.buffer,\n codec,\n container,\n levelCodec,\n metadata,\n id\n };\n }\n }\n } else {\n // if source buffer(s) not created yet, appended buffer tracks in this.pendingTracks\n this.pendingTracks[trackName] = data[trackName];\n }\n });\n\n // if sourcebuffers already created, do nothing ...\n if (sourceBufferCount) {\n return;\n }\n const bufferCodecEventsExpected = Math.max(this.bufferCodecEventsExpected - 1, 0);\n if (this.bufferCodecEventsExpected !== bufferCodecEventsExpected) {\n this.log(`${bufferCodecEventsExpected} bufferCodec event(s) expected ${trackNames.join(',')}`);\n this.bufferCodecEventsExpected = bufferCodecEventsExpected;\n }\n if (this.mediaSource && this.mediaSource.readyState === 'open') {\n this.checkPendingTracks();\n }\n }\n appendChangeType(type, mimeType) {\n const {\n operationQueue\n } = this;\n const operation = {\n execute: () => {\n const sb = this.sourceBuffer[type];\n if (sb) {\n this.log(`changing ${type} sourceBuffer type to ${mimeType}`);\n sb.changeType(mimeType);\n }\n operationQueue.shiftAndExecuteNext(type);\n },\n onStart: () => {},\n onComplete: () => {},\n onError: error => {\n this.warn(`Failed to change ${type} SourceBuffer type`, error);\n }\n };\n operationQueue.append(operation, type, !!this.pendingTracks[type]);\n }\n onBufferAppending(event, eventData) {\n const {\n hls,\n operationQueue,\n tracks\n } = this;\n const {\n data,\n type,\n frag,\n part,\n chunkMeta\n } = eventData;\n const chunkStats = chunkMeta.buffering[type];\n const bufferAppendingStart = self.performance.now();\n chunkStats.start = bufferAppendingStart;\n const fragBuffering = frag.stats.buffering;\n const partBuffering = part ? part.stats.buffering : null;\n if (fragBuffering.start === 0) {\n fragBuffering.start = bufferAppendingStart;\n }\n if (partBuffering && partBuffering.start === 0) {\n partBuffering.start = bufferAppendingStart;\n }\n\n // TODO: Only update timestampOffset when audio/mpeg fragment or part is not contiguous with previously appended\n // Adjusting `SourceBuffer.timestampOffset` (desired point in the timeline where the next frames should be appended)\n // in Chrome browser when we detect MPEG audio container and time delta between level PTS and `SourceBuffer.timestampOffset`\n // is greater than 100ms (this is enough to handle seek for VOD or level change for LIVE videos).\n // More info here: https://github.com/video-dev/hls.js/issues/332#issuecomment-257986486\n const audioTrack = tracks.audio;\n let checkTimestampOffset = false;\n if (type === 'audio' && (audioTrack == null ? void 0 : audioTrack.container) === 'audio/mpeg') {\n checkTimestampOffset = !this.lastMpegAudioChunk || chunkMeta.id === 1 || this.lastMpegAudioChunk.sn !== chunkMeta.sn;\n this.lastMpegAudioChunk = chunkMeta;\n }\n const fragStart = frag.start;\n const operation = {\n execute: () => {\n chunkStats.executeStart = self.performance.now();\n if (checkTimestampOffset) {\n const sb = this.sourceBuffer[type];\n if (sb) {\n const delta = fragStart - sb.timestampOffset;\n if (Math.abs(delta) >= 0.1) {\n this.log(`Updating audio SourceBuffer timestampOffset to ${fragStart} (delta: ${delta}) sn: ${frag.sn})`);\n sb.timestampOffset = fragStart;\n }\n }\n }\n this.appendExecutor(data, type);\n },\n onStart: () => {\n // logger.debug(`[buffer-controller]: ${type} SourceBuffer updatestart`);\n },\n onComplete: () => {\n // logger.debug(`[buffer-controller]: ${type} SourceBuffer updateend`);\n const end = self.performance.now();\n chunkStats.executeEnd = chunkStats.end = end;\n if (fragBuffering.first === 0) {\n fragBuffering.first = end;\n }\n if (partBuffering && partBuffering.first === 0) {\n partBuffering.first = end;\n }\n const {\n sourceBuffer\n } = this;\n const timeRanges = {};\n for (const type in sourceBuffer) {\n timeRanges[type] = BufferHelper.getBuffered(sourceBuffer[type]);\n }\n this.appendErrors[type] = 0;\n if (type === 'audio' || type === 'video') {\n this.appendErrors.audiovideo = 0;\n } else {\n this.appendErrors.audio = 0;\n this.appendErrors.video = 0;\n }\n this.hls.trigger(Events.BUFFER_APPENDED, {\n type,\n frag,\n part,\n chunkMeta,\n parent: frag.type,\n timeRanges\n });\n },\n onError: error => {\n // in case any error occured while appending, put back segment in segments table\n const event = {\n type: ErrorTypes.MEDIA_ERROR,\n parent: frag.type,\n details: ErrorDetails.BUFFER_APPEND_ERROR,\n sourceBufferName: type,\n frag,\n part,\n chunkMeta,\n error,\n err: error,\n fatal: false\n };\n if (error.code === DOMException.QUOTA_EXCEEDED_ERR) {\n // QuotaExceededError: http://www.w3.org/TR/html5/infrastructure.html#quotaexceedederror\n // let's stop appending any segments, and report BUFFER_FULL_ERROR error\n event.details = ErrorDetails.BUFFER_FULL_ERROR;\n } else {\n const appendErrorCount = ++this.appendErrors[type];\n event.details = ErrorDetails.BUFFER_APPEND_ERROR;\n /* with UHD content, we could get loop of quota exceeded error until\n browser is able to evict some data from sourcebuffer. Retrying can help recover.\n */\n this.warn(`Failed ${appendErrorCount}/${hls.config.appendErrorMaxRetry} times to append segment in \"${type}\" sourceBuffer`);\n if (appendErrorCount >= hls.config.appendErrorMaxRetry) {\n event.fatal = true;\n }\n }\n hls.trigger(Events.ERROR, event);\n }\n };\n operationQueue.append(operation, type, !!this.pendingTracks[type]);\n }\n onBufferFlushing(event, data) {\n const {\n operationQueue\n } = this;\n const flushOperation = type => ({\n execute: this.removeExecutor.bind(this, type, data.startOffset, data.endOffset),\n onStart: () => {\n // logger.debug(`[buffer-controller]: Started flushing ${data.startOffset} -> ${data.endOffset} for ${type} Source Buffer`);\n },\n onComplete: () => {\n // logger.debug(`[buffer-controller]: Finished flushing ${data.startOffset} -> ${data.endOffset} for ${type} Source Buffer`);\n this.hls.trigger(Events.BUFFER_FLUSHED, {\n type\n });\n },\n onError: error => {\n this.warn(`Failed to remove from ${type} SourceBuffer`, error);\n }\n });\n if (data.type) {\n operationQueue.append(flushOperation(data.type), data.type);\n } else {\n this.getSourceBufferTypes().forEach(type => {\n operationQueue.append(flushOperation(type), type);\n });\n }\n }\n onFragParsed(event, data) {\n const {\n frag,\n part\n } = data;\n const buffersAppendedTo = [];\n const elementaryStreams = part ? part.elementaryStreams : frag.elementaryStreams;\n if (elementaryStreams[ElementaryStreamTypes.AUDIOVIDEO]) {\n buffersAppendedTo.push('audiovideo');\n } else {\n if (elementaryStreams[ElementaryStreamTypes.AUDIO]) {\n buffersAppendedTo.push('audio');\n }\n if (elementaryStreams[ElementaryStreamTypes.VIDEO]) {\n buffersAppendedTo.push('video');\n }\n }\n const onUnblocked = () => {\n const now = self.performance.now();\n frag.stats.buffering.end = now;\n if (part) {\n part.stats.buffering.end = now;\n }\n const stats = part ? part.stats : frag.stats;\n this.hls.trigger(Events.FRAG_BUFFERED, {\n frag,\n part,\n stats,\n id: frag.type\n });\n };\n if (buffersAppendedTo.length === 0) {\n this.warn(`Fragments must have at least one ElementaryStreamType set. type: ${frag.type} level: ${frag.level} sn: ${frag.sn}`);\n }\n this.blockBuffers(onUnblocked, buffersAppendedTo);\n }\n onFragChanged(event, data) {\n this.trimBuffers();\n }\n\n // on BUFFER_EOS mark matching sourcebuffer(s) as ended and trigger checkEos()\n // an undefined data.type will mark all buffers as EOS.\n onBufferEos(event, data) {\n const ended = this.getSourceBufferTypes().reduce((acc, type) => {\n const sb = this.sourceBuffer[type];\n if (sb && (!data.type || data.type === type)) {\n sb.ending = true;\n if (!sb.ended) {\n sb.ended = true;\n this.log(`${type} sourceBuffer now EOS`);\n }\n }\n return acc && !!(!sb || sb.ended);\n }, true);\n if (ended) {\n this.log(`Queueing mediaSource.endOfStream()`);\n this.blockBuffers(() => {\n this.getSourceBufferTypes().forEach(type => {\n const sb = this.sourceBuffer[type];\n if (sb) {\n sb.ending = false;\n }\n });\n const {\n mediaSource\n } = this;\n if (!mediaSource || mediaSource.readyState !== 'open') {\n if (mediaSource) {\n this.log(`Could not call mediaSource.endOfStream(). mediaSource.readyState: ${mediaSource.readyState}`);\n }\n return;\n }\n this.log(`Calling mediaSource.endOfStream()`);\n // Allow this to throw and be caught by the enqueueing function\n mediaSource.endOfStream();\n });\n }\n }\n onLevelUpdated(event, {\n details\n }) {\n if (!details.fragments.length) {\n return;\n }\n this.details = details;\n if (this.getSourceBufferTypes().length) {\n this.blockBuffers(this.updateMediaElementDuration.bind(this));\n } else {\n this.updateMediaElementDuration();\n }\n }\n trimBuffers() {\n const {\n hls,\n details,\n media\n } = this;\n if (!media || details === null) {\n return;\n }\n const sourceBufferTypes = this.getSourceBufferTypes();\n if (!sourceBufferTypes.length) {\n return;\n }\n const config = hls.config;\n const currentTime = media.currentTime;\n const targetDuration = details.levelTargetDuration;\n\n // Support for deprecated liveBackBufferLength\n const backBufferLength = details.live && config.liveBackBufferLength !== null ? config.liveBackBufferLength : config.backBufferLength;\n if (isFiniteNumber(backBufferLength) && backBufferLength > 0) {\n const maxBackBufferLength = Math.max(backBufferLength, targetDuration);\n const targetBackBufferPosition = Math.floor(currentTime / targetDuration) * targetDuration - maxBackBufferLength;\n this.flushBackBuffer(currentTime, targetDuration, targetBackBufferPosition);\n }\n if (isFiniteNumber(config.frontBufferFlushThreshold) && config.frontBufferFlushThreshold > 0) {\n const frontBufferLength = Math.max(config.maxBufferLength, config.frontBufferFlushThreshold);\n const maxFrontBufferLength = Math.max(frontBufferLength, targetDuration);\n const targetFrontBufferPosition = Math.floor(currentTime / targetDuration) * targetDuration + maxFrontBufferLength;\n this.flushFrontBuffer(currentTime, targetDuration, targetFrontBufferPosition);\n }\n }\n flushBackBuffer(currentTime, targetDuration, targetBackBufferPosition) {\n const {\n details,\n sourceBuffer\n } = this;\n const sourceBufferTypes = this.getSourceBufferTypes();\n sourceBufferTypes.forEach(type => {\n const sb = sourceBuffer[type];\n if (sb) {\n const buffered = BufferHelper.getBuffered(sb);\n // when target buffer start exceeds actual buffer start\n if (buffered.length > 0 && targetBackBufferPosition > buffered.start(0)) {\n this.hls.trigger(Events.BACK_BUFFER_REACHED, {\n bufferEnd: targetBackBufferPosition\n });\n\n // Support for deprecated event:\n if (details != null && details.live) {\n this.hls.trigger(Events.LIVE_BACK_BUFFER_REACHED, {\n bufferEnd: targetBackBufferPosition\n });\n } else if (sb.ended && buffered.end(buffered.length - 1) - currentTime < targetDuration * 2) {\n this.log(`Cannot flush ${type} back buffer while SourceBuffer is in ended state`);\n return;\n }\n this.hls.trigger(Events.BUFFER_FLUSHING, {\n startOffset: 0,\n endOffset: targetBackBufferPosition,\n type\n });\n }\n }\n });\n }\n flushFrontBuffer(currentTime, targetDuration, targetFrontBufferPosition) {\n const {\n sourceBuffer\n } = this;\n const sourceBufferTypes = this.getSourceBufferTypes();\n sourceBufferTypes.forEach(type => {\n const sb = sourceBuffer[type];\n if (sb) {\n const buffered = BufferHelper.getBuffered(sb);\n const numBufferedRanges = buffered.length;\n // The buffer is either empty or contiguous\n if (numBufferedRanges < 2) {\n return;\n }\n const bufferStart = buffered.start(numBufferedRanges - 1);\n const bufferEnd = buffered.end(numBufferedRanges - 1);\n // No flush if we can tolerate the current buffer length or the current buffer range we would flush is contiguous with current position\n if (targetFrontBufferPosition > bufferStart || currentTime >= bufferStart && currentTime <= bufferEnd) {\n return;\n } else if (sb.ended && currentTime - bufferEnd < 2 * targetDuration) {\n this.log(`Cannot flush ${type} front buffer while SourceBuffer is in ended state`);\n return;\n }\n this.hls.trigger(Events.BUFFER_FLUSHING, {\n startOffset: bufferStart,\n endOffset: Infinity,\n type\n });\n }\n });\n }\n\n /**\n * Update Media Source duration to current level duration or override to Infinity if configuration parameter\n * 'liveDurationInfinity` is set to `true`\n * More details: https://github.com/video-dev/hls.js/issues/355\n */\n updateMediaElementDuration() {\n if (!this.details || !this.media || !this.mediaSource || this.mediaSource.readyState !== 'open') {\n return;\n }\n const {\n details,\n hls,\n media,\n mediaSource\n } = this;\n const levelDuration = details.fragments[0].start + details.totalduration;\n const mediaDuration = media.duration;\n const msDuration = isFiniteNumber(mediaSource.duration) ? mediaSource.duration : 0;\n if (details.live && hls.config.liveDurationInfinity) {\n // Override duration to Infinity\n mediaSource.duration = Infinity;\n this.updateSeekableRange(details);\n } else if (levelDuration > msDuration && levelDuration > mediaDuration || !isFiniteNumber(mediaDuration)) {\n // levelDuration was the last value we set.\n // not using mediaSource.duration as the browser may tweak this value\n // only update Media Source duration if its value increase, this is to avoid\n // flushing already buffered portion when switching between quality level\n this.log(`Updating Media Source duration to ${levelDuration.toFixed(3)}`);\n mediaSource.duration = levelDuration;\n }\n }\n updateSeekableRange(levelDetails) {\n const mediaSource = this.mediaSource;\n const fragments = levelDetails.fragments;\n const len = fragments.length;\n if (len && levelDetails.live && mediaSource != null && mediaSource.setLiveSeekableRange) {\n const start = Math.max(0, fragments[0].start);\n const end = Math.max(start, start + levelDetails.totalduration);\n this.log(`Media Source duration is set to ${mediaSource.duration}. Setting seekable range to ${start}-${end}.`);\n mediaSource.setLiveSeekableRange(start, end);\n }\n }\n checkPendingTracks() {\n const {\n bufferCodecEventsExpected,\n operationQueue,\n pendingTracks\n } = this;\n\n // Check if we've received all of the expected bufferCodec events. When none remain, create all the sourceBuffers at once.\n // This is important because the MSE spec allows implementations to throw QuotaExceededErrors if creating new sourceBuffers after\n // data has been appended to existing ones.\n // 2 tracks is the max (one for audio, one for video). If we've reach this max go ahead and create the buffers.\n const pendingTracksCount = Object.keys(pendingTracks).length;\n if (pendingTracksCount && (!bufferCodecEventsExpected || pendingTracksCount === 2 || 'audiovideo' in pendingTracks)) {\n // ok, let's create them now !\n this.createSourceBuffers(pendingTracks);\n this.pendingTracks = {};\n // append any pending segments now !\n const buffers = this.getSourceBufferTypes();\n if (buffers.length) {\n this.hls.trigger(Events.BUFFER_CREATED, {\n tracks: this.tracks\n });\n buffers.forEach(type => {\n operationQueue.executeNext(type);\n });\n } else {\n const error = new Error('could not create source buffer for media codec(s)');\n this.hls.trigger(Events.ERROR, {\n type: ErrorTypes.MEDIA_ERROR,\n details: ErrorDetails.BUFFER_INCOMPATIBLE_CODECS_ERROR,\n fatal: true,\n error,\n reason: error.message\n });\n }\n }\n }\n createSourceBuffers(tracks) {\n const {\n sourceBuffer,\n mediaSource\n } = this;\n if (!mediaSource) {\n throw Error('createSourceBuffers called when mediaSource was null');\n }\n for (const trackName in tracks) {\n if (!sourceBuffer[trackName]) {\n var _track$levelCodec;\n const track = tracks[trackName];\n if (!track) {\n throw Error(`source buffer exists for track ${trackName}, however track does not`);\n }\n // use levelCodec as first priority unless it contains multiple comma-separated codec values\n let codec = ((_track$levelCodec = track.levelCodec) == null ? void 0 : _track$levelCodec.indexOf(',')) === -1 ? track.levelCodec : track.codec;\n if (codec) {\n if (trackName.slice(0, 5) === 'audio') {\n codec = getCodecCompatibleName(codec, this.appendSource);\n }\n }\n const mimeType = `${track.container};codecs=${codec}`;\n this.log(`creating sourceBuffer(${mimeType})`);\n try {\n const sb = sourceBuffer[trackName] = mediaSource.addSourceBuffer(mimeType);\n const sbName = trackName;\n this.addBufferListener(sbName, 'updatestart', this._onSBUpdateStart);\n this.addBufferListener(sbName, 'updateend', this._onSBUpdateEnd);\n this.addBufferListener(sbName, 'error', this._onSBUpdateError);\n // ManagedSourceBuffer bufferedchange event\n if (this.appendSource) {\n this.addBufferListener(sbName, 'bufferedchange', (type, event) => {\n // If media was ejected check for a change. Added ranges are redundant with changes on 'updateend' event.\n const removedRanges = event.removedRanges;\n if (removedRanges != null && removedRanges.length) {\n this.hls.trigger(Events.BUFFER_FLUSHED, {\n type: trackName\n });\n }\n });\n }\n this.tracks[trackName] = {\n buffer: sb,\n codec: codec,\n container: track.container,\n levelCodec: track.levelCodec,\n metadata: track.metadata,\n id: track.id\n };\n } catch (err) {\n this.error(`error while trying to add sourceBuffer: ${err.message}`);\n this.hls.trigger(Events.ERROR, {\n type: ErrorTypes.MEDIA_ERROR,\n details: ErrorDetails.BUFFER_ADD_CODEC_ERROR,\n fatal: false,\n error: err,\n sourceBufferName: trackName,\n mimeType: mimeType\n });\n }\n }\n }\n }\n get mediaSrc() {\n var _this$media;\n const media = ((_this$media = this.media) == null ? void 0 : _this$media.firstChild) || this.media;\n return media == null ? void 0 : media.src;\n }\n _onSBUpdateStart(type) {\n const {\n operationQueue\n } = this;\n const operation = operationQueue.current(type);\n operation.onStart();\n }\n _onSBUpdateEnd(type) {\n var _this$mediaSource2;\n if (((_this$mediaSource2 = this.mediaSource) == null ? void 0 : _this$mediaSource2.readyState) === 'closed') {\n this.resetBuffer(type);\n return;\n }\n const {\n operationQueue\n } = this;\n const operation = operationQueue.current(type);\n operation.onComplete();\n operationQueue.shiftAndExecuteNext(type);\n }\n _onSBUpdateError(type, event) {\n var _this$mediaSource3;\n const error = new Error(`${type} SourceBuffer error. MediaSource readyState: ${(_this$mediaSource3 = this.mediaSource) == null ? void 0 : _this$mediaSource3.readyState}`);\n this.error(`${error}`, event);\n // according to http://www.w3.org/TR/media-source/#sourcebuffer-append-error\n // SourceBuffer errors are not necessarily fatal; if so, the HTMLMediaElement will fire an error event\n this.hls.trigger(Events.ERROR, {\n type: ErrorTypes.MEDIA_ERROR,\n details: ErrorDetails.BUFFER_APPENDING_ERROR,\n sourceBufferName: type,\n error,\n fatal: false\n });\n // updateend is always fired after error, so we'll allow that to shift the current operation off of the queue\n const operation = this.operationQueue.current(type);\n if (operation) {\n operation.onError(error);\n }\n }\n\n // This method must result in an updateend event; if remove is not called, _onSBUpdateEnd must be called manually\n removeExecutor(type, startOffset, endOffset) {\n const {\n media,\n mediaSource,\n operationQueue,\n sourceBuffer\n } = this;\n const sb = sourceBuffer[type];\n if (!media || !mediaSource || !sb) {\n this.warn(`Attempting to remove from the ${type} SourceBuffer, but it does not exist`);\n operationQueue.shiftAndExecuteNext(type);\n return;\n }\n const mediaDuration = isFiniteNumber(media.duration) ? media.duration : Infinity;\n const msDuration = isFiniteNumber(mediaSource.duration) ? mediaSource.duration : Infinity;\n const removeStart = Math.max(0, startOffset);\n const removeEnd = Math.min(endOffset, mediaDuration, msDuration);\n if (removeEnd > removeStart && (!sb.ending || sb.ended)) {\n sb.ended = false;\n this.log(`Removing [${removeStart},${removeEnd}] from the ${type} SourceBuffer`);\n sb.remove(removeStart, removeEnd);\n } else {\n // Cycle the queue\n operationQueue.shiftAndExecuteNext(type);\n }\n }\n\n // This method must result in an updateend event; if append is not called, _onSBUpdateEnd must be called manually\n appendExecutor(data, type) {\n const sb = this.sourceBuffer[type];\n if (!sb) {\n if (!this.pendingTracks[type]) {\n throw new Error(`Attempting to append to the ${type} SourceBuffer, but it does not exist`);\n }\n return;\n }\n sb.ended = false;\n sb.appendBuffer(data);\n }\n\n // Enqueues an operation to each SourceBuffer queue which, upon execution, resolves a promise. When all promises\n // resolve, the onUnblocked function is executed. Functions calling this method do not need to unblock the queue\n // upon completion, since we already do it here\n blockBuffers(onUnblocked, buffers = this.getSourceBufferTypes()) {\n if (!buffers.length) {\n this.log('Blocking operation requested, but no SourceBuffers exist');\n Promise.resolve().then(onUnblocked);\n return;\n }\n const {\n operationQueue\n } = this;\n\n // logger.debug(`[buffer-controller]: Blocking ${buffers} SourceBuffer`);\n const blockingOperations = buffers.map(type => operationQueue.appendBlocker(type));\n Promise.all(blockingOperations).then(() => {\n // logger.debug(`[buffer-controller]: Blocking operation resolved; unblocking ${buffers} SourceBuffer`);\n onUnblocked();\n buffers.forEach(type => {\n const sb = this.sourceBuffer[type];\n // Only cycle the queue if the SB is not updating. There's a bug in Chrome which sets the SB updating flag to\n // true when changing the MediaSource duration (https://bugs.chromium.org/p/chromium/issues/detail?id=959359&can=2&q=mediasource%20duration)\n // While this is a workaround, it's probably useful to have around\n if (!(sb != null && sb.updating)) {\n operationQueue.shiftAndExecuteNext(type);\n }\n });\n });\n }\n getSourceBufferTypes() {\n return Object.keys(this.sourceBuffer);\n }\n addBufferListener(type, event, fn) {\n const buffer = this.sourceBuffer[type];\n if (!buffer) {\n return;\n }\n const listener = fn.bind(this, type);\n this.listeners[type].push({\n event,\n listener\n });\n buffer.addEventListener(event, listener);\n }\n removeBufferListeners(type) {\n const buffer = this.sourceBuffer[type];\n if (!buffer) {\n return;\n }\n this.listeners[type].forEach(l => {\n buffer.removeEventListener(l.event, l.listener);\n });\n }\n}\nfunction removeSourceChildren(node) {\n const sourceChildren = node.querySelectorAll('source');\n [].slice.call(sourceChildren).forEach(source => {\n node.removeChild(source);\n });\n}\nfunction addSource(media, url) {\n const source = self.document.createElement('source');\n source.type = 'video/mp4';\n source.src = url;\n media.appendChild(source);\n}\n\n/**\n *\n * This code was ported from the dash.js project at:\n * https://github.com/Dash-Industry-Forum/dash.js/blob/development/externals/cea608-parser.js\n * https://github.com/Dash-Industry-Forum/dash.js/commit/8269b26a761e0853bb21d78780ed945144ecdd4d#diff-71bc295a2d6b6b7093a1d3290d53a4b2\n *\n * The original copyright appears below:\n *\n * The copyright in this software is being made available under the BSD License,\n * included below. This software may be subject to other third party and contributor\n * rights, including patent rights, and no such rights are granted under this license.\n *\n * Copyright (c) 2015-2016, DASH Industry Forum.\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without modification,\n * are permitted provided that the following conditions are met:\n * 1. Redistributions of source code must retain the above copyright notice, this\n * list of conditions and the following disclaimer.\n * * Redistributions in binary form must reproduce the above copyright notice,\n * this list of conditions and the following disclaimer in the documentation and/or\n * other materials provided with the distribution.\n * 2. Neither the name of Dash Industry Forum nor the names of its\n * contributors may be used to endorse or promote products derived from this software\n * without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS AS IS AND ANY\n * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n * IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,\n * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\n * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n * POSSIBILITY OF SUCH DAMAGE.\n */\n/**\n * Exceptions from regular ASCII. CodePoints are mapped to UTF-16 codes\n */\n\nconst specialCea608CharsCodes = {\n 0x2a: 0xe1,\n // lowercase a, acute accent\n 0x5c: 0xe9,\n // lowercase e, acute accent\n 0x5e: 0xed,\n // lowercase i, acute accent\n 0x5f: 0xf3,\n // lowercase o, acute accent\n 0x60: 0xfa,\n // lowercase u, acute accent\n 0x7b: 0xe7,\n // lowercase c with cedilla\n 0x7c: 0xf7,\n // division symbol\n 0x7d: 0xd1,\n // uppercase N tilde\n 0x7e: 0xf1,\n // lowercase n tilde\n 0x7f: 0x2588,\n // Full block\n // THIS BLOCK INCLUDES THE 16 EXTENDED (TWO-BYTE) LINE 21 CHARACTERS\n // THAT COME FROM HI BYTE=0x11 AND LOW BETWEEN 0x30 AND 0x3F\n // THIS MEANS THAT \\x50 MUST BE ADDED TO THE VALUES\n 0x80: 0xae,\n // Registered symbol (R)\n 0x81: 0xb0,\n // degree sign\n 0x82: 0xbd,\n // 1/2 symbol\n 0x83: 0xbf,\n // Inverted (open) question mark\n 0x84: 0x2122,\n // Trademark symbol (TM)\n 0x85: 0xa2,\n // Cents symbol\n 0x86: 0xa3,\n // Pounds sterling\n 0x87: 0x266a,\n // Music 8'th note\n 0x88: 0xe0,\n // lowercase a, grave accent\n 0x89: 0x20,\n // transparent space (regular)\n 0x8a: 0xe8,\n // lowercase e, grave accent\n 0x8b: 0xe2,\n // lowercase a, circumflex accent\n 0x8c: 0xea,\n // lowercase e, circumflex accent\n 0x8d: 0xee,\n // lowercase i, circumflex accent\n 0x8e: 0xf4,\n // lowercase o, circumflex accent\n 0x8f: 0xfb,\n // lowercase u, circumflex accent\n // THIS BLOCK INCLUDES THE 32 EXTENDED (TWO-BYTE) LINE 21 CHARACTERS\n // THAT COME FROM HI BYTE=0x12 AND LOW BETWEEN 0x20 AND 0x3F\n 0x90: 0xc1,\n // capital letter A with acute\n 0x91: 0xc9,\n // capital letter E with acute\n 0x92: 0xd3,\n // capital letter O with acute\n 0x93: 0xda,\n // capital letter U with acute\n 0x94: 0xdc,\n // capital letter U with diaresis\n 0x95: 0xfc,\n // lowercase letter U with diaeresis\n 0x96: 0x2018,\n // opening single quote\n 0x97: 0xa1,\n // inverted exclamation mark\n 0x98: 0x2a,\n // asterisk\n 0x99: 0x2019,\n // closing single quote\n 0x9a: 0x2501,\n // box drawings heavy horizontal\n 0x9b: 0xa9,\n // copyright sign\n 0x9c: 0x2120,\n // Service mark\n 0x9d: 0x2022,\n // (round) bullet\n 0x9e: 0x201c,\n // Left double quotation mark\n 0x9f: 0x201d,\n // Right double quotation mark\n 0xa0: 0xc0,\n // uppercase A, grave accent\n 0xa1: 0xc2,\n // uppercase A, circumflex\n 0xa2: 0xc7,\n // uppercase C with cedilla\n 0xa3: 0xc8,\n // uppercase E, grave accent\n 0xa4: 0xca,\n // uppercase E, circumflex\n 0xa5: 0xcb,\n // capital letter E with diaresis\n 0xa6: 0xeb,\n // lowercase letter e with diaresis\n 0xa7: 0xce,\n // uppercase I, circumflex\n 0xa8: 0xcf,\n // uppercase I, with diaresis\n 0xa9: 0xef,\n // lowercase i, with diaresis\n 0xaa: 0xd4,\n // uppercase O, circumflex\n 0xab: 0xd9,\n // uppercase U, grave accent\n 0xac: 0xf9,\n // lowercase u, grave accent\n 0xad: 0xdb,\n // uppercase U, circumflex\n 0xae: 0xab,\n // left-pointing double angle quotation mark\n 0xaf: 0xbb,\n // right-pointing double angle quotation mark\n // THIS BLOCK INCLUDES THE 32 EXTENDED (TWO-BYTE) LINE 21 CHARACTERS\n // THAT COME FROM HI BYTE=0x13 AND LOW BETWEEN 0x20 AND 0x3F\n 0xb0: 0xc3,\n // Uppercase A, tilde\n 0xb1: 0xe3,\n // Lowercase a, tilde\n 0xb2: 0xcd,\n // Uppercase I, acute accent\n 0xb3: 0xcc,\n // Uppercase I, grave accent\n 0xb4: 0xec,\n // Lowercase i, grave accent\n 0xb5: 0xd2,\n // Uppercase O, grave accent\n 0xb6: 0xf2,\n // Lowercase o, grave accent\n 0xb7: 0xd5,\n // Uppercase O, tilde\n 0xb8: 0xf5,\n // Lowercase o, tilde\n 0xb9: 0x7b,\n // Open curly brace\n 0xba: 0x7d,\n // Closing curly brace\n 0xbb: 0x5c,\n // Backslash\n 0xbc: 0x5e,\n // Caret\n 0xbd: 0x5f,\n // Underscore\n 0xbe: 0x7c,\n // Pipe (vertical line)\n 0xbf: 0x223c,\n // Tilde operator\n 0xc0: 0xc4,\n // Uppercase A, umlaut\n 0xc1: 0xe4,\n // Lowercase A, umlaut\n 0xc2: 0xd6,\n // Uppercase O, umlaut\n 0xc3: 0xf6,\n // Lowercase o, umlaut\n 0xc4: 0xdf,\n // Esszett (sharp S)\n 0xc5: 0xa5,\n // Yen symbol\n 0xc6: 0xa4,\n // Generic currency sign\n 0xc7: 0x2503,\n // Box drawings heavy vertical\n 0xc8: 0xc5,\n // Uppercase A, ring\n 0xc9: 0xe5,\n // Lowercase A, ring\n 0xca: 0xd8,\n // Uppercase O, stroke\n 0xcb: 0xf8,\n // Lowercase o, strok\n 0xcc: 0x250f,\n // Box drawings heavy down and right\n 0xcd: 0x2513,\n // Box drawings heavy down and left\n 0xce: 0x2517,\n // Box drawings heavy up and right\n 0xcf: 0x251b // Box drawings heavy up and left\n};\n\n/**\n * Utils\n */\nconst getCharForByte = byte => String.fromCharCode(specialCea608CharsCodes[byte] || byte);\nconst NR_ROWS = 15;\nconst NR_COLS = 100;\n// Tables to look up row from PAC data\nconst rowsLowCh1 = {\n 0x11: 1,\n 0x12: 3,\n 0x15: 5,\n 0x16: 7,\n 0x17: 9,\n 0x10: 11,\n 0x13: 12,\n 0x14: 14\n};\nconst rowsHighCh1 = {\n 0x11: 2,\n 0x12: 4,\n 0x15: 6,\n 0x16: 8,\n 0x17: 10,\n 0x13: 13,\n 0x14: 15\n};\nconst rowsLowCh2 = {\n 0x19: 1,\n 0x1a: 3,\n 0x1d: 5,\n 0x1e: 7,\n 0x1f: 9,\n 0x18: 11,\n 0x1b: 12,\n 0x1c: 14\n};\nconst rowsHighCh2 = {\n 0x19: 2,\n 0x1a: 4,\n 0x1d: 6,\n 0x1e: 8,\n 0x1f: 10,\n 0x1b: 13,\n 0x1c: 15\n};\nconst backgroundColors = ['white', 'green', 'blue', 'cyan', 'red', 'yellow', 'magenta', 'black', 'transparent'];\nclass CaptionsLogger {\n constructor() {\n this.time = null;\n this.verboseLevel = 0;\n }\n log(severity, msg) {\n if (this.verboseLevel >= severity) {\n const m = typeof msg === 'function' ? msg() : msg;\n logger.log(`${this.time} [${severity}] ${m}`);\n }\n }\n}\nconst numArrayToHexArray = function numArrayToHexArray(numArray) {\n const hexArray = [];\n for (let j = 0; j < numArray.length; j++) {\n hexArray.push(numArray[j].toString(16));\n }\n return hexArray;\n};\nclass PenState {\n constructor() {\n this.foreground = 'white';\n this.underline = false;\n this.italics = false;\n this.background = 'black';\n this.flash = false;\n }\n reset() {\n this.foreground = 'white';\n this.underline = false;\n this.italics = false;\n this.background = 'black';\n this.flash = false;\n }\n setStyles(styles) {\n const attribs = ['foreground', 'underline', 'italics', 'background', 'flash'];\n for (let i = 0; i < attribs.length; i++) {\n const style = attribs[i];\n if (styles.hasOwnProperty(style)) {\n this[style] = styles[style];\n }\n }\n }\n isDefault() {\n return this.foreground === 'white' && !this.underline && !this.italics && this.background === 'black' && !this.flash;\n }\n equals(other) {\n return this.foreground === other.foreground && this.underline === other.underline && this.italics === other.italics && this.background === other.background && this.flash === other.flash;\n }\n copy(newPenState) {\n this.foreground = newPenState.foreground;\n this.underline = newPenState.underline;\n this.italics = newPenState.italics;\n this.background = newPenState.background;\n this.flash = newPenState.flash;\n }\n toString() {\n return 'color=' + this.foreground + ', underline=' + this.underline + ', italics=' + this.italics + ', background=' + this.background + ', flash=' + this.flash;\n }\n}\n\n/**\n * Unicode character with styling and background.\n * @constructor\n */\nclass StyledUnicodeChar {\n constructor() {\n this.uchar = ' ';\n this.penState = new PenState();\n }\n reset() {\n this.uchar = ' ';\n this.penState.reset();\n }\n setChar(uchar, newPenState) {\n this.uchar = uchar;\n this.penState.copy(newPenState);\n }\n setPenState(newPenState) {\n this.penState.copy(newPenState);\n }\n equals(other) {\n return this.uchar === other.uchar && this.penState.equals(other.penState);\n }\n copy(newChar) {\n this.uchar = newChar.uchar;\n this.penState.copy(newChar.penState);\n }\n isEmpty() {\n return this.uchar === ' ' && this.penState.isDefault();\n }\n}\n\n/**\n * CEA-608 row consisting of NR_COLS instances of StyledUnicodeChar.\n * @constructor\n */\nclass Row {\n constructor(logger) {\n this.chars = [];\n this.pos = 0;\n this.currPenState = new PenState();\n this.cueStartTime = null;\n this.logger = void 0;\n for (let i = 0; i < NR_COLS; i++) {\n this.chars.push(new StyledUnicodeChar());\n }\n this.logger = logger;\n }\n equals(other) {\n for (let i = 0; i < NR_COLS; i++) {\n if (!this.chars[i].equals(other.chars[i])) {\n return false;\n }\n }\n return true;\n }\n copy(other) {\n for (let i = 0; i < NR_COLS; i++) {\n this.chars[i].copy(other.chars[i]);\n }\n }\n isEmpty() {\n let empty = true;\n for (let i = 0; i < NR_COLS; i++) {\n if (!this.chars[i].isEmpty()) {\n empty = false;\n break;\n }\n }\n return empty;\n }\n\n /**\n * Set the cursor to a valid column.\n */\n setCursor(absPos) {\n if (this.pos !== absPos) {\n this.pos = absPos;\n }\n if (this.pos < 0) {\n this.logger.log(3, 'Negative cursor position ' + this.pos);\n this.pos = 0;\n } else if (this.pos > NR_COLS) {\n this.logger.log(3, 'Too large cursor position ' + this.pos);\n this.pos = NR_COLS;\n }\n }\n\n /**\n * Move the cursor relative to current position.\n */\n moveCursor(relPos) {\n const newPos = this.pos + relPos;\n if (relPos > 1) {\n for (let i = this.pos + 1; i < newPos + 1; i++) {\n this.chars[i].setPenState(this.currPenState);\n }\n }\n this.setCursor(newPos);\n }\n\n /**\n * Backspace, move one step back and clear character.\n */\n backSpace() {\n this.moveCursor(-1);\n this.chars[this.pos].setChar(' ', this.currPenState);\n }\n insertChar(byte) {\n if (byte >= 0x90) {\n // Extended char\n this.backSpace();\n }\n const char = getCharForByte(byte);\n if (this.pos >= NR_COLS) {\n this.logger.log(0, () => 'Cannot insert ' + byte.toString(16) + ' (' + char + ') at position ' + this.pos + '. Skipping it!');\n return;\n }\n this.chars[this.pos].setChar(char, this.currPenState);\n this.moveCursor(1);\n }\n clearFromPos(startPos) {\n let i;\n for (i = startPos; i < NR_COLS; i++) {\n this.chars[i].reset();\n }\n }\n clear() {\n this.clearFromPos(0);\n this.pos = 0;\n this.currPenState.reset();\n }\n clearToEndOfRow() {\n this.clearFromPos(this.pos);\n }\n getTextString() {\n const chars = [];\n let empty = true;\n for (let i = 0; i < NR_COLS; i++) {\n const char = this.chars[i].uchar;\n if (char !== ' ') {\n empty = false;\n }\n chars.push(char);\n }\n if (empty) {\n return '';\n } else {\n return chars.join('');\n }\n }\n setPenStyles(styles) {\n this.currPenState.setStyles(styles);\n const currChar = this.chars[this.pos];\n currChar.setPenState(this.currPenState);\n }\n}\n\n/**\n * Keep a CEA-608 screen of 32x15 styled characters\n * @constructor\n */\nclass CaptionScreen {\n constructor(logger) {\n this.rows = [];\n this.currRow = NR_ROWS - 1;\n this.nrRollUpRows = null;\n this.lastOutputScreen = null;\n this.logger = void 0;\n for (let i = 0; i < NR_ROWS; i++) {\n this.rows.push(new Row(logger));\n }\n this.logger = logger;\n }\n reset() {\n for (let i = 0; i < NR_ROWS; i++) {\n this.rows[i].clear();\n }\n this.currRow = NR_ROWS - 1;\n }\n equals(other) {\n let equal = true;\n for (let i = 0; i < NR_ROWS; i++) {\n if (!this.rows[i].equals(other.rows[i])) {\n equal = false;\n break;\n }\n }\n return equal;\n }\n copy(other) {\n for (let i = 0; i < NR_ROWS; i++) {\n this.rows[i].copy(other.rows[i]);\n }\n }\n isEmpty() {\n let empty = true;\n for (let i = 0; i < NR_ROWS; i++) {\n if (!this.rows[i].isEmpty()) {\n empty = false;\n break;\n }\n }\n return empty;\n }\n backSpace() {\n const row = this.rows[this.currRow];\n row.backSpace();\n }\n clearToEndOfRow() {\n const row = this.rows[this.currRow];\n row.clearToEndOfRow();\n }\n\n /**\n * Insert a character (without styling) in the current row.\n */\n insertChar(char) {\n const row = this.rows[this.currRow];\n row.insertChar(char);\n }\n setPen(styles) {\n const row = this.rows[this.currRow];\n row.setPenStyles(styles);\n }\n moveCursor(relPos) {\n const row = this.rows[this.currRow];\n row.moveCursor(relPos);\n }\n setCursor(absPos) {\n this.logger.log(2, 'setCursor: ' + absPos);\n const row = this.rows[this.currRow];\n row.setCursor(absPos);\n }\n setPAC(pacData) {\n this.logger.log(2, () => 'pacData = ' + JSON.stringify(pacData));\n let newRow = pacData.row - 1;\n if (this.nrRollUpRows && newRow < this.nrRollUpRows - 1) {\n newRow = this.nrRollUpRows - 1;\n }\n\n // Make sure this only affects Roll-up Captions by checking this.nrRollUpRows\n if (this.nrRollUpRows && this.currRow !== newRow) {\n // clear all rows first\n for (let i = 0; i < NR_ROWS; i++) {\n this.rows[i].clear();\n }\n\n // Copy this.nrRollUpRows rows from lastOutputScreen and place it in the newRow location\n // topRowIndex - the start of rows to copy (inclusive index)\n const topRowIndex = this.currRow + 1 - this.nrRollUpRows;\n // We only copy if the last position was already shown.\n // We use the cueStartTime value to check this.\n const lastOutputScreen = this.lastOutputScreen;\n if (lastOutputScreen) {\n const prevLineTime = lastOutputScreen.rows[topRowIndex].cueStartTime;\n const time = this.logger.time;\n if (prevLineTime !== null && time !== null && prevLineTime < time) {\n for (let i = 0; i < this.nrRollUpRows; i++) {\n this.rows[newRow - this.nrRollUpRows + i + 1].copy(lastOutputScreen.rows[topRowIndex + i]);\n }\n }\n }\n }\n this.currRow = newRow;\n const row = this.rows[this.currRow];\n if (pacData.indent !== null) {\n const indent = pacData.indent;\n const prevPos = Math.max(indent - 1, 0);\n row.setCursor(pacData.indent);\n pacData.color = row.chars[prevPos].penState.foreground;\n }\n const styles = {\n foreground: pacData.color,\n underline: pacData.underline,\n italics: pacData.italics,\n background: 'black',\n flash: false\n };\n this.setPen(styles);\n }\n\n /**\n * Set background/extra foreground, but first do back_space, and then insert space (backwards compatibility).\n */\n setBkgData(bkgData) {\n this.logger.log(2, () => 'bkgData = ' + JSON.stringify(bkgData));\n this.backSpace();\n this.setPen(bkgData);\n this.insertChar(0x20); // Space\n }\n setRollUpRows(nrRows) {\n this.nrRollUpRows = nrRows;\n }\n rollUp() {\n if (this.nrRollUpRows === null) {\n this.logger.log(3, 'roll_up but nrRollUpRows not set yet');\n return; // Not properly setup\n }\n this.logger.log(1, () => this.getDisplayText());\n const topRowIndex = this.currRow + 1 - this.nrRollUpRows;\n const topRow = this.rows.splice(topRowIndex, 1)[0];\n topRow.clear();\n this.rows.splice(this.currRow, 0, topRow);\n this.logger.log(2, 'Rolling up');\n // this.logger.log(VerboseLevel.TEXT, this.get_display_text())\n }\n\n /**\n * Get all non-empty rows with as unicode text.\n */\n getDisplayText(asOneRow) {\n asOneRow = asOneRow || false;\n const displayText = [];\n let text = '';\n let rowNr = -1;\n for (let i = 0; i < NR_ROWS; i++) {\n const rowText = this.rows[i].getTextString();\n if (rowText) {\n rowNr = i + 1;\n if (asOneRow) {\n displayText.push('Row ' + rowNr + \": '\" + rowText + \"'\");\n } else {\n displayText.push(rowText.trim());\n }\n }\n }\n if (displayText.length > 0) {\n if (asOneRow) {\n text = '[' + displayText.join(' | ') + ']';\n } else {\n text = displayText.join('\\n');\n }\n }\n return text;\n }\n getTextAndFormat() {\n return this.rows;\n }\n}\n\n// var modes = ['MODE_ROLL-UP', 'MODE_POP-ON', 'MODE_PAINT-ON', 'MODE_TEXT'];\n\nclass Cea608Channel {\n constructor(channelNumber, outputFilter, logger) {\n this.chNr = void 0;\n this.outputFilter = void 0;\n this.mode = void 0;\n this.verbose = void 0;\n this.displayedMemory = void 0;\n this.nonDisplayedMemory = void 0;\n this.lastOutputScreen = void 0;\n this.currRollUpRow = void 0;\n this.writeScreen = void 0;\n this.cueStartTime = void 0;\n this.logger = void 0;\n this.chNr = channelNumber;\n this.outputFilter = outputFilter;\n this.mode = null;\n this.verbose = 0;\n this.displayedMemory = new CaptionScreen(logger);\n this.nonDisplayedMemory = new CaptionScreen(logger);\n this.lastOutputScreen = new CaptionScreen(logger);\n this.currRollUpRow = this.displayedMemory.rows[NR_ROWS - 1];\n this.writeScreen = this.displayedMemory;\n this.mode = null;\n this.cueStartTime = null; // Keeps track of where a cue started.\n this.logger = logger;\n }\n reset() {\n this.mode = null;\n this.displayedMemory.reset();\n this.nonDisplayedMemory.reset();\n this.lastOutputScreen.reset();\n this.outputFilter.reset();\n this.currRollUpRow = this.displayedMemory.rows[NR_ROWS - 1];\n this.writeScreen = this.displayedMemory;\n this.mode = null;\n this.cueStartTime = null;\n }\n getHandler() {\n return this.outputFilter;\n }\n setHandler(newHandler) {\n this.outputFilter = newHandler;\n }\n setPAC(pacData) {\n this.writeScreen.setPAC(pacData);\n }\n setBkgData(bkgData) {\n this.writeScreen.setBkgData(bkgData);\n }\n setMode(newMode) {\n if (newMode === this.mode) {\n return;\n }\n this.mode = newMode;\n this.logger.log(2, () => 'MODE=' + newMode);\n if (this.mode === 'MODE_POP-ON') {\n this.writeScreen = this.nonDisplayedMemory;\n } else {\n this.writeScreen = this.displayedMemory;\n this.writeScreen.reset();\n }\n if (this.mode !== 'MODE_ROLL-UP') {\n this.displayedMemory.nrRollUpRows = null;\n this.nonDisplayedMemory.nrRollUpRows = null;\n }\n this.mode = newMode;\n }\n insertChars(chars) {\n for (let i = 0; i < chars.length; i++) {\n this.writeScreen.insertChar(chars[i]);\n }\n const screen = this.writeScreen === this.displayedMemory ? 'DISP' : 'NON_DISP';\n this.logger.log(2, () => screen + ': ' + this.writeScreen.getDisplayText(true));\n if (this.mode === 'MODE_PAINT-ON' || this.mode === 'MODE_ROLL-UP') {\n this.logger.log(1, () => 'DISPLAYED: ' + this.displayedMemory.getDisplayText(true));\n this.outputDataUpdate();\n }\n }\n ccRCL() {\n // Resume Caption Loading (switch mode to Pop On)\n this.logger.log(2, 'RCL - Resume Caption Loading');\n this.setMode('MODE_POP-ON');\n }\n ccBS() {\n // BackSpace\n this.logger.log(2, 'BS - BackSpace');\n if (this.mode === 'MODE_TEXT') {\n return;\n }\n this.writeScreen.backSpace();\n if (this.writeScreen === this.displayedMemory) {\n this.outputDataUpdate();\n }\n }\n ccAOF() {\n // Reserved (formerly Alarm Off)\n }\n ccAON() {\n // Reserved (formerly Alarm On)\n }\n ccDER() {\n // Delete to End of Row\n this.logger.log(2, 'DER- Delete to End of Row');\n this.writeScreen.clearToEndOfRow();\n this.outputDataUpdate();\n }\n ccRU(nrRows) {\n // Roll-Up Captions-2,3,or 4 Rows\n this.logger.log(2, 'RU(' + nrRows + ') - Roll Up');\n this.writeScreen = this.displayedMemory;\n this.setMode('MODE_ROLL-UP');\n this.writeScreen.setRollUpRows(nrRows);\n }\n ccFON() {\n // Flash On\n this.logger.log(2, 'FON - Flash On');\n this.writeScreen.setPen({\n flash: true\n });\n }\n ccRDC() {\n // Resume Direct Captioning (switch mode to PaintOn)\n this.logger.log(2, 'RDC - Resume Direct Captioning');\n this.setMode('MODE_PAINT-ON');\n }\n ccTR() {\n // Text Restart in text mode (not supported, however)\n this.logger.log(2, 'TR');\n this.setMode('MODE_TEXT');\n }\n ccRTD() {\n // Resume Text Display in Text mode (not supported, however)\n this.logger.log(2, 'RTD');\n this.setMode('MODE_TEXT');\n }\n ccEDM() {\n // Erase Displayed Memory\n this.logger.log(2, 'EDM - Erase Displayed Memory');\n this.displayedMemory.reset();\n this.outputDataUpdate(true);\n }\n ccCR() {\n // Carriage Return\n this.logger.log(2, 'CR - Carriage Return');\n this.writeScreen.rollUp();\n this.outputDataUpdate(true);\n }\n ccENM() {\n // Erase Non-Displayed Memory\n this.logger.log(2, 'ENM - Erase Non-displayed Memory');\n this.nonDisplayedMemory.reset();\n }\n ccEOC() {\n // End of Caption (Flip Memories)\n this.logger.log(2, 'EOC - End Of Caption');\n if (this.mode === 'MODE_POP-ON') {\n const tmp = this.displayedMemory;\n this.displayedMemory = this.nonDisplayedMemory;\n this.nonDisplayedMemory = tmp;\n this.writeScreen = this.nonDisplayedMemory;\n this.logger.log(1, () => 'DISP: ' + this.displayedMemory.getDisplayText());\n }\n this.outputDataUpdate(true);\n }\n ccTO(nrCols) {\n // Tab Offset 1,2, or 3 columns\n this.logger.log(2, 'TO(' + nrCols + ') - Tab Offset');\n this.writeScreen.moveCursor(nrCols);\n }\n ccMIDROW(secondByte) {\n // Parse MIDROW command\n const styles = {\n flash: false\n };\n styles.underline = secondByte % 2 === 1;\n styles.italics = secondByte >= 0x2e;\n if (!styles.italics) {\n const colorIndex = Math.floor(secondByte / 2) - 0x10;\n const colors = ['white', 'green', 'blue', 'cyan', 'red', 'yellow', 'magenta'];\n styles.foreground = colors[colorIndex];\n } else {\n styles.foreground = 'white';\n }\n this.logger.log(2, 'MIDROW: ' + JSON.stringify(styles));\n this.writeScreen.setPen(styles);\n }\n outputDataUpdate(dispatch = false) {\n const time = this.logger.time;\n if (time === null) {\n return;\n }\n if (this.outputFilter) {\n if (this.cueStartTime === null && !this.displayedMemory.isEmpty()) {\n // Start of a new cue\n this.cueStartTime = time;\n } else {\n if (!this.displayedMemory.equals(this.lastOutputScreen)) {\n this.outputFilter.newCue(this.cueStartTime, time, this.lastOutputScreen);\n if (dispatch && this.outputFilter.dispatchCue) {\n this.outputFilter.dispatchCue();\n }\n this.cueStartTime = this.displayedMemory.isEmpty() ? null : time;\n }\n }\n this.lastOutputScreen.copy(this.displayedMemory);\n }\n }\n cueSplitAtTime(t) {\n if (this.outputFilter) {\n if (!this.displayedMemory.isEmpty()) {\n if (this.outputFilter.newCue) {\n this.outputFilter.newCue(this.cueStartTime, t, this.displayedMemory);\n }\n this.cueStartTime = t;\n }\n }\n }\n}\n\n// Will be 1 or 2 when parsing captions\n\nclass Cea608Parser {\n constructor(field, out1, out2) {\n this.channels = void 0;\n this.currentChannel = 0;\n this.cmdHistory = createCmdHistory();\n this.logger = void 0;\n const logger = this.logger = new CaptionsLogger();\n this.channels = [null, new Cea608Channel(field, out1, logger), new Cea608Channel(field + 1, out2, logger)];\n }\n getHandler(channel) {\n return this.channels[channel].getHandler();\n }\n setHandler(channel, newHandler) {\n this.channels[channel].setHandler(newHandler);\n }\n\n /**\n * Add data for time t in forms of list of bytes (unsigned ints). The bytes are treated as pairs.\n */\n addData(time, byteList) {\n this.logger.time = time;\n for (let i = 0; i < byteList.length; i += 2) {\n const a = byteList[i] & 0x7f;\n const b = byteList[i + 1] & 0x7f;\n let cmdFound = false;\n let charsFound = null;\n if (a === 0 && b === 0) {\n continue;\n } else {\n this.logger.log(3, () => '[' + numArrayToHexArray([byteList[i], byteList[i + 1]]) + '] -> (' + numArrayToHexArray([a, b]) + ')');\n }\n const cmdHistory = this.cmdHistory;\n const isControlCode = a >= 0x10 && a <= 0x1f;\n if (isControlCode) {\n // Skip redundant control codes\n if (hasCmdRepeated(a, b, cmdHistory)) {\n setLastCmd(null, null, cmdHistory);\n this.logger.log(3, () => 'Repeated command (' + numArrayToHexArray([a, b]) + ') is dropped');\n continue;\n }\n setLastCmd(a, b, this.cmdHistory);\n cmdFound = this.parseCmd(a, b);\n if (!cmdFound) {\n cmdFound = this.parseMidrow(a, b);\n }\n if (!cmdFound) {\n cmdFound = this.parsePAC(a, b);\n }\n if (!cmdFound) {\n cmdFound = this.parseBackgroundAttributes(a, b);\n }\n } else {\n setLastCmd(null, null, cmdHistory);\n }\n if (!cmdFound) {\n charsFound = this.parseChars(a, b);\n if (charsFound) {\n const currChNr = this.currentChannel;\n if (currChNr && currChNr > 0) {\n const channel = this.channels[currChNr];\n channel.insertChars(charsFound);\n } else {\n this.logger.log(2, 'No channel found yet. TEXT-MODE?');\n }\n }\n }\n if (!cmdFound && !charsFound) {\n this.logger.log(2, () => \"Couldn't parse cleaned data \" + numArrayToHexArray([a, b]) + ' orig: ' + numArrayToHexArray([byteList[i], byteList[i + 1]]));\n }\n }\n }\n\n /**\n * Parse Command.\n * @returns True if a command was found\n */\n parseCmd(a, b) {\n const cond1 = (a === 0x14 || a === 0x1c || a === 0x15 || a === 0x1d) && b >= 0x20 && b <= 0x2f;\n const cond2 = (a === 0x17 || a === 0x1f) && b >= 0x21 && b <= 0x23;\n if (!(cond1 || cond2)) {\n return false;\n }\n const chNr = a === 0x14 || a === 0x15 || a === 0x17 ? 1 : 2;\n const channel = this.channels[chNr];\n if (a === 0x14 || a === 0x15 || a === 0x1c || a === 0x1d) {\n if (b === 0x20) {\n channel.ccRCL();\n } else if (b === 0x21) {\n channel.ccBS();\n } else if (b === 0x22) {\n channel.ccAOF();\n } else if (b === 0x23) {\n channel.ccAON();\n } else if (b === 0x24) {\n channel.ccDER();\n } else if (b === 0x25) {\n channel.ccRU(2);\n } else if (b === 0x26) {\n channel.ccRU(3);\n } else if (b === 0x27) {\n channel.ccRU(4);\n } else if (b === 0x28) {\n channel.ccFON();\n } else if (b === 0x29) {\n channel.ccRDC();\n } else if (b === 0x2a) {\n channel.ccTR();\n } else if (b === 0x2b) {\n channel.ccRTD();\n } else if (b === 0x2c) {\n channel.ccEDM();\n } else if (b === 0x2d) {\n channel.ccCR();\n } else if (b === 0x2e) {\n channel.ccENM();\n } else if (b === 0x2f) {\n channel.ccEOC();\n }\n } else {\n // a == 0x17 || a == 0x1F\n channel.ccTO(b - 0x20);\n }\n this.currentChannel = chNr;\n return true;\n }\n\n /**\n * Parse midrow styling command\n */\n parseMidrow(a, b) {\n let chNr = 0;\n if ((a === 0x11 || a === 0x19) && b >= 0x20 && b <= 0x2f) {\n if (a === 0x11) {\n chNr = 1;\n } else {\n chNr = 2;\n }\n if (chNr !== this.currentChannel) {\n this.logger.log(0, 'Mismatch channel in midrow parsing');\n return false;\n }\n const channel = this.channels[chNr];\n if (!channel) {\n return false;\n }\n channel.ccMIDROW(b);\n this.logger.log(3, () => 'MIDROW (' + numArrayToHexArray([a, b]) + ')');\n return true;\n }\n return false;\n }\n\n /**\n * Parse Preable Access Codes (Table 53).\n * @returns {Boolean} Tells if PAC found\n */\n parsePAC(a, b) {\n let row;\n const case1 = (a >= 0x11 && a <= 0x17 || a >= 0x19 && a <= 0x1f) && b >= 0x40 && b <= 0x7f;\n const case2 = (a === 0x10 || a === 0x18) && b >= 0x40 && b <= 0x5f;\n if (!(case1 || case2)) {\n return false;\n }\n const chNr = a <= 0x17 ? 1 : 2;\n if (b >= 0x40 && b <= 0x5f) {\n row = chNr === 1 ? rowsLowCh1[a] : rowsLowCh2[a];\n } else {\n // 0x60 <= b <= 0x7F\n row = chNr === 1 ? rowsHighCh1[a] : rowsHighCh2[a];\n }\n const channel = this.channels[chNr];\n if (!channel) {\n return false;\n }\n channel.setPAC(this.interpretPAC(row, b));\n this.currentChannel = chNr;\n return true;\n }\n\n /**\n * Interpret the second byte of the pac, and return the information.\n * @returns pacData with style parameters\n */\n interpretPAC(row, byte) {\n let pacIndex;\n const pacData = {\n color: null,\n italics: false,\n indent: null,\n underline: false,\n row: row\n };\n if (byte > 0x5f) {\n pacIndex = byte - 0x60;\n } else {\n pacIndex = byte - 0x40;\n }\n pacData.underline = (pacIndex & 1) === 1;\n if (pacIndex <= 0xd) {\n pacData.color = ['white', 'green', 'blue', 'cyan', 'red', 'yellow', 'magenta', 'white'][Math.floor(pacIndex / 2)];\n } else if (pacIndex <= 0xf) {\n pacData.italics = true;\n pacData.color = 'white';\n } else {\n pacData.indent = Math.floor((pacIndex - 0x10) / 2) * 4;\n }\n return pacData; // Note that row has zero offset. The spec uses 1.\n }\n\n /**\n * Parse characters.\n * @returns An array with 1 to 2 codes corresponding to chars, if found. null otherwise.\n */\n parseChars(a, b) {\n let channelNr;\n let charCodes = null;\n let charCode1 = null;\n if (a >= 0x19) {\n channelNr = 2;\n charCode1 = a - 8;\n } else {\n channelNr = 1;\n charCode1 = a;\n }\n if (charCode1 >= 0x11 && charCode1 <= 0x13) {\n // Special character\n let oneCode;\n if (charCode1 === 0x11) {\n oneCode = b + 0x50;\n } else if (charCode1 === 0x12) {\n oneCode = b + 0x70;\n } else {\n oneCode = b + 0x90;\n }\n this.logger.log(2, () => \"Special char '\" + getCharForByte(oneCode) + \"' in channel \" + channelNr);\n charCodes = [oneCode];\n } else if (a >= 0x20 && a <= 0x7f) {\n charCodes = b === 0 ? [a] : [a, b];\n }\n if (charCodes) {\n this.logger.log(3, () => 'Char codes = ' + numArrayToHexArray(charCodes).join(','));\n }\n return charCodes;\n }\n\n /**\n * Parse extended background attributes as well as new foreground color black.\n * @returns True if background attributes are found\n */\n parseBackgroundAttributes(a, b) {\n const case1 = (a === 0x10 || a === 0x18) && b >= 0x20 && b <= 0x2f;\n const case2 = (a === 0x17 || a === 0x1f) && b >= 0x2d && b <= 0x2f;\n if (!(case1 || case2)) {\n return false;\n }\n let index;\n const bkgData = {};\n if (a === 0x10 || a === 0x18) {\n index = Math.floor((b - 0x20) / 2);\n bkgData.background = backgroundColors[index];\n if (b % 2 === 1) {\n bkgData.background = bkgData.background + '_semi';\n }\n } else if (b === 0x2d) {\n bkgData.background = 'transparent';\n } else {\n bkgData.foreground = 'black';\n if (b === 0x2f) {\n bkgData.underline = true;\n }\n }\n const chNr = a <= 0x17 ? 1 : 2;\n const channel = this.channels[chNr];\n channel.setBkgData(bkgData);\n return true;\n }\n\n /**\n * Reset state of parser and its channels.\n */\n reset() {\n for (let i = 0; i < Object.keys(this.channels).length; i++) {\n const channel = this.channels[i];\n if (channel) {\n channel.reset();\n }\n }\n setLastCmd(null, null, this.cmdHistory);\n }\n\n /**\n * Trigger the generation of a cue, and the start of a new one if displayScreens are not empty.\n */\n cueSplitAtTime(t) {\n for (let i = 0; i < this.channels.length; i++) {\n const channel = this.channels[i];\n if (channel) {\n channel.cueSplitAtTime(t);\n }\n }\n }\n}\nfunction setLastCmd(a, b, cmdHistory) {\n cmdHistory.a = a;\n cmdHistory.b = b;\n}\nfunction hasCmdRepeated(a, b, cmdHistory) {\n return cmdHistory.a === a && cmdHistory.b === b;\n}\nfunction createCmdHistory() {\n return {\n a: null,\n b: null\n };\n}\n\nclass OutputFilter {\n constructor(timelineController, trackName) {\n this.timelineController = void 0;\n this.cueRanges = [];\n this.trackName = void 0;\n this.startTime = null;\n this.endTime = null;\n this.screen = null;\n this.timelineController = timelineController;\n this.trackName = trackName;\n }\n dispatchCue() {\n if (this.startTime === null) {\n return;\n }\n this.timelineController.addCues(this.trackName, this.startTime, this.endTime, this.screen, this.cueRanges);\n this.startTime = null;\n }\n newCue(startTime, endTime, screen) {\n if (this.startTime === null || this.startTime > startTime) {\n this.startTime = startTime;\n }\n this.endTime = endTime;\n this.screen = screen;\n this.timelineController.createCaptionsTrack(this.trackName);\n }\n reset() {\n this.cueRanges = [];\n this.startTime = null;\n }\n}\n\n/**\n * Copyright 2013 vtt.js Contributors\n *\n * Licensed under the Apache License, Version 2.0 (the 'License');\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an 'AS IS' BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nvar VTTCue = (function () {\n if (optionalSelf != null && optionalSelf.VTTCue) {\n return self.VTTCue;\n }\n const AllowedDirections = ['', 'lr', 'rl'];\n const AllowedAlignments = ['start', 'middle', 'end', 'left', 'right'];\n function isAllowedValue(allowed, value) {\n if (typeof value !== 'string') {\n return false;\n }\n // necessary for assuring the generic conforms to the Array interface\n if (!Array.isArray(allowed)) {\n return false;\n }\n // reset the type so that the next narrowing works well\n const lcValue = value.toLowerCase();\n // use the allow list to narrow the type to a specific subset of strings\n if (~allowed.indexOf(lcValue)) {\n return lcValue;\n }\n return false;\n }\n function findDirectionSetting(value) {\n return isAllowedValue(AllowedDirections, value);\n }\n function findAlignSetting(value) {\n return isAllowedValue(AllowedAlignments, value);\n }\n function extend(obj, ...rest) {\n let i = 1;\n for (; i < arguments.length; i++) {\n const cobj = arguments[i];\n for (const p in cobj) {\n obj[p] = cobj[p];\n }\n }\n return obj;\n }\n function VTTCue(startTime, endTime, text) {\n const cue = this;\n const baseObj = {\n enumerable: true\n };\n /**\n * Shim implementation specific properties. These properties are not in\n * the spec.\n */\n\n // Lets us know when the VTTCue's data has changed in such a way that we need\n // to recompute its display state. This lets us compute its display state\n // lazily.\n cue.hasBeenReset = false;\n\n /**\n * VTTCue and TextTrackCue properties\n * http://dev.w3.org/html5/webvtt/#vttcue-interface\n */\n\n let _id = '';\n let _pauseOnExit = false;\n let _startTime = startTime;\n let _endTime = endTime;\n let _text = text;\n let _region = null;\n let _vertical = '';\n let _snapToLines = true;\n let _line = 'auto';\n let _lineAlign = 'start';\n let _position = 50;\n let _positionAlign = 'middle';\n let _size = 50;\n let _align = 'middle';\n Object.defineProperty(cue, 'id', extend({}, baseObj, {\n get: function () {\n return _id;\n },\n set: function (value) {\n _id = '' + value;\n }\n }));\n Object.defineProperty(cue, 'pauseOnExit', extend({}, baseObj, {\n get: function () {\n return _pauseOnExit;\n },\n set: function (value) {\n _pauseOnExit = !!value;\n }\n }));\n Object.defineProperty(cue, 'startTime', extend({}, baseObj, {\n get: function () {\n return _startTime;\n },\n set: function (value) {\n if (typeof value !== 'number') {\n throw new TypeError('Start time must be set to a number.');\n }\n _startTime = value;\n this.hasBeenReset = true;\n }\n }));\n Object.defineProperty(cue, 'endTime', extend({}, baseObj, {\n get: function () {\n return _endTime;\n },\n set: function (value) {\n if (typeof value !== 'number') {\n throw new TypeError('End time must be set to a number.');\n }\n _endTime = value;\n this.hasBeenReset = true;\n }\n }));\n Object.defineProperty(cue, 'text', extend({}, baseObj, {\n get: function () {\n return _text;\n },\n set: function (value) {\n _text = '' + value;\n this.hasBeenReset = true;\n }\n }));\n\n // todo: implement VTTRegion polyfill?\n Object.defineProperty(cue, 'region', extend({}, baseObj, {\n get: function () {\n return _region;\n },\n set: function (value) {\n _region = value;\n this.hasBeenReset = true;\n }\n }));\n Object.defineProperty(cue, 'vertical', extend({}, baseObj, {\n get: function () {\n return _vertical;\n },\n set: function (value) {\n const setting = findDirectionSetting(value);\n // Have to check for false because the setting an be an empty string.\n if (setting === false) {\n throw new SyntaxError('An invalid or illegal string was specified.');\n }\n _vertical = setting;\n this.hasBeenReset = true;\n }\n }));\n Object.defineProperty(cue, 'snapToLines', extend({}, baseObj, {\n get: function () {\n return _snapToLines;\n },\n set: function (value) {\n _snapToLines = !!value;\n this.hasBeenReset = true;\n }\n }));\n Object.defineProperty(cue, 'line', extend({}, baseObj, {\n get: function () {\n return _line;\n },\n set: function (value) {\n if (typeof value !== 'number' && value !== 'auto') {\n throw new SyntaxError('An invalid number or illegal string was specified.');\n }\n _line = value;\n this.hasBeenReset = true;\n }\n }));\n Object.defineProperty(cue, 'lineAlign', extend({}, baseObj, {\n get: function () {\n return _lineAlign;\n },\n set: function (value) {\n const setting = findAlignSetting(value);\n if (!setting) {\n throw new SyntaxError('An invalid or illegal string was specified.');\n }\n _lineAlign = setting;\n this.hasBeenReset = true;\n }\n }));\n Object.defineProperty(cue, 'position', extend({}, baseObj, {\n get: function () {\n return _position;\n },\n set: function (value) {\n if (value < 0 || value > 100) {\n throw new Error('Position must be between 0 and 100.');\n }\n _position = value;\n this.hasBeenReset = true;\n }\n }));\n Object.defineProperty(cue, 'positionAlign', extend({}, baseObj, {\n get: function () {\n return _positionAlign;\n },\n set: function (value) {\n const setting = findAlignSetting(value);\n if (!setting) {\n throw new SyntaxError('An invalid or illegal string was specified.');\n }\n _positionAlign = setting;\n this.hasBeenReset = true;\n }\n }));\n Object.defineProperty(cue, 'size', extend({}, baseObj, {\n get: function () {\n return _size;\n },\n set: function (value) {\n if (value < 0 || value > 100) {\n throw new Error('Size must be between 0 and 100.');\n }\n _size = value;\n this.hasBeenReset = true;\n }\n }));\n Object.defineProperty(cue, 'align', extend({}, baseObj, {\n get: function () {\n return _align;\n },\n set: function (value) {\n const setting = findAlignSetting(value);\n if (!setting) {\n throw new SyntaxError('An invalid or illegal string was specified.');\n }\n _align = setting;\n this.hasBeenReset = true;\n }\n }));\n\n /**\n * Other