Browse Source
* WIP with new transcoder progress monitor * A whole different WIP in progress monitoring via local PUTs * Use an actual hls playlist parser to rewrite master playlist * Cleanup * Private vs public path for thumbnail generation * Allow each storage provider to make decisions of how to store different types of files * Simplify inbound file writes * Revert * Split out set stream as connected/disconnected state methods * Update videojs * Add comment about the hls handler * Rework of the offline stream state. For #85 * Delete old unreferenced video segment files from disk * Cleanup all segments and revert to a completely offline state after 5min * Stop thumbnail generation on stream stop. Copy logo to thumbnail on cleanup. * Update transcoder test * Add comment * Return http 200 on success to transcoder. Tweak how files are written to disk * Force pixel color format in transcoder * Add debugging info for S3 transfers. Add default ACL. * Fix cleanup timer * Reset session stats when we cleanup the session. * Put log file back * Update test * File should not be a part of this commit * Add centralized shared performance timer for use anywhere * Post-rebase cleanup * Support returning nil from storage provider save * Updates to reflect package changes + other updates in master * Fix storage providers being overwritten * Do not return pointer in save. Support cache headers with S3 providers * Split out videojs + vhs and point to specific working versions of them * Bump vjs and vhs versions * Fix test * Remove unused * Update upload warning message * No longer valid comment * Pin videojs and vhs versionspull/244/head
43 changed files with 32162 additions and 58567 deletions
@ -1,14 +0,0 @@
@@ -1,14 +0,0 @@
|
||||
package ffmpeg |
||||
|
||||
import ( |
||||
"github.com/owncast/owncast/config" |
||||
) |
||||
|
||||
//ShowStreamOfflineState generates and shows the stream's offline state
|
||||
func ShowStreamOfflineState() { |
||||
transcoder := NewTranscoder() |
||||
transcoder.SetSegmentLength(10) |
||||
transcoder.SetAppendToStream(true) |
||||
transcoder.SetInput(config.Config.GetOfflineContentPath()) |
||||
transcoder.Start() |
||||
} |
@ -0,0 +1,103 @@
@@ -0,0 +1,103 @@
|
||||
package ffmpeg |
||||
|
||||
import ( |
||||
"bytes" |
||||
"io" |
||||
"os" |
||||
"path/filepath" |
||||
"strconv" |
||||
"strings" |
||||
|
||||
"net/http" |
||||
|
||||
"github.com/owncast/owncast/config" |
||||
"github.com/owncast/owncast/utils" |
||||
log "github.com/sirupsen/logrus" |
||||
) |
||||
|
||||
// FileWriterReceiverServiceCallback are to be fired when transcoder responses are written to disk
|
||||
type FileWriterReceiverServiceCallback interface { |
||||
SegmentWritten(localFilePath string) |
||||
VariantPlaylistWritten(localFilePath string) |
||||
MasterPlaylistWritten(localFilePath string) |
||||
} |
||||
|
||||
// FileWriterReceiverService accepts transcoder responses via HTTP and fires the callbacks
|
||||
type FileWriterReceiverService struct { |
||||
callbacks FileWriterReceiverServiceCallback |
||||
} |
||||
|
||||
// SetupFileWriterReceiverService will start listening for transcoder responses
|
||||
func (s *FileWriterReceiverService) SetupFileWriterReceiverService(callbacks FileWriterReceiverServiceCallback) { |
||||
s.callbacks = callbacks |
||||
|
||||
httpServer := http.NewServeMux() |
||||
httpServer.HandleFunc("/", s.uploadHandler) |
||||
|
||||
localListenerAddress := "127.0.0.1:" + strconv.Itoa(config.Config.GetPublicWebServerPort()+1) |
||||
go http.ListenAndServe(localListenerAddress, httpServer) |
||||
log.Traceln("Transcoder response listening on: " + localListenerAddress) |
||||
} |
||||
|
||||
func (s *FileWriterReceiverService) uploadHandler(w http.ResponseWriter, r *http.Request) { |
||||
if r.Method != "PUT" { |
||||
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest) |
||||
return |
||||
} |
||||
|
||||
path := r.URL.Path |
||||
writePath := filepath.Join(config.PrivateHLSStoragePath, path) |
||||
|
||||
var buf bytes.Buffer |
||||
io.Copy(&buf, r.Body) |
||||
data := buf.Bytes() |
||||
f, err := os.Create(writePath) |
||||
if err != nil { |
||||
returnError(err, w, r) |
||||
return |
||||
} |
||||
|
||||
defer f.Close() |
||||
_, err = f.Write(data) |
||||
if err != nil { |
||||
returnError(err, w, r) |
||||
return |
||||
} |
||||
|
||||
s.fileWritten(writePath) |
||||
w.WriteHeader(http.StatusOK) |
||||
} |
||||
|
||||
var _inWarningState = false |
||||
|
||||
func (s *FileWriterReceiverService) fileWritten(path string) { |
||||
index := utils.GetIndexFromFilePath(path) |
||||
|
||||
if utils.GetRelativePathFromAbsolutePath(path) == "hls/stream.m3u8" { |
||||
s.callbacks.MasterPlaylistWritten(path) |
||||
|
||||
} else if strings.HasSuffix(path, ".ts") { |
||||
performanceMonitorKey := "segmentWritten-" + index |
||||
averagePerformance := utils.GetAveragePerformance(performanceMonitorKey) |
||||
|
||||
utils.StartPerformanceMonitor(performanceMonitorKey) |
||||
s.callbacks.SegmentWritten(path) |
||||
|
||||
if averagePerformance != 0 && averagePerformance > float64(float64(config.Config.GetVideoSegmentSecondsLength())) { |
||||
if !_inWarningState { |
||||
log.Warnln("slow encoding for variant", index, "if this continues you may see buffering or errors. troubleshoot this issue by visiting https://owncast.online/docs/troubleshooting/") |
||||
_inWarningState = true |
||||
} |
||||
} else { |
||||
_inWarningState = false |
||||
} |
||||
|
||||
} else if strings.HasSuffix(path, ".m3u8") { |
||||
s.callbacks.VariantPlaylistWritten(path) |
||||
} |
||||
} |
||||
|
||||
func returnError(err error, w http.ResponseWriter, r *http.Request) { |
||||
log.Errorln(err) |
||||
http.Error(w, http.StatusText(http.StatusInternalServerError)+": "+err.Error(), http.StatusInternalServerError) |
||||
} |
@ -0,0 +1,63 @@
@@ -0,0 +1,63 @@
|
||||
package ffmpeg |
||||
|
||||
import ( |
||||
log "github.com/sirupsen/logrus" |
||||
|
||||
"os" |
||||
"path/filepath" |
||||
"sort" |
||||
|
||||
"github.com/owncast/owncast/config" |
||||
) |
||||
|
||||
// Cleanup will delete old files off disk that are no longer being referenced
|
||||
// in the stream.
|
||||
func Cleanup(directoryPath string) { |
||||
// Determine how many files we should keep on disk
|
||||
maxNumber := config.Config.GetMaxNumberOfReferencedSegmentsInPlaylist() |
||||
buffer := 10 |
||||
|
||||
files, err := getSegmentFiles(directoryPath) |
||||
if err != nil { |
||||
log.Fatal(err) |
||||
} |
||||
|
||||
if len(files) < maxNumber+buffer { |
||||
return |
||||
} |
||||
|
||||
// Delete old files on disk
|
||||
filesToDelete := files[maxNumber+buffer:] |
||||
for _, file := range filesToDelete { |
||||
os.Remove(filepath.Join(directoryPath, file.Name())) |
||||
} |
||||
} |
||||
|
||||
func getSegmentFiles(dirname string) ([]os.FileInfo, error) { |
||||
f, err := os.Open(dirname) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
list, err := f.Readdir(-1) // -1 says to get a list of all files
|
||||
f.Close() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
filteredList := make([]os.FileInfo, 0) |
||||
|
||||
// Filter out playlists because we don't want to clean them up
|
||||
for _, file := range list { |
||||
if filepath.Ext(file.Name()) == ".m3u8" { |
||||
continue |
||||
} |
||||
filteredList = append(filteredList, file) |
||||
} |
||||
|
||||
// Sort by date so we can delete old files
|
||||
sort.Slice(filteredList, func(i, j int) bool { |
||||
return filteredList[i].ModTime().UnixNano() > filteredList[j].ModTime().UnixNano() |
||||
}) |
||||
|
||||
return filteredList, nil |
||||
} |
@ -0,0 +1,25 @@
@@ -0,0 +1,25 @@
|
||||
package ffmpeg |
||||
|
||||
import ( |
||||
"github.com/owncast/owncast/models" |
||||
) |
||||
|
||||
// HLSHandler gets told about available HLS playlists and segments
|
||||
type HLSHandler struct { |
||||
Storage models.StorageProvider |
||||
} |
||||
|
||||
// SegmentWritten is fired when a HLS segment is written to disk
|
||||
func (h *HLSHandler) SegmentWritten(localFilePath string) { |
||||
h.Storage.SegmentWritten(localFilePath) |
||||
} |
||||
|
||||
// VariantPlaylistWritten is fired when a HLS variant playlist is written to disk
|
||||
func (h *HLSHandler) VariantPlaylistWritten(localFilePath string) { |
||||
h.Storage.VariantPlaylistWritten(localFilePath) |
||||
} |
||||
|
||||
// MasterPlaylistWritten is fired when a HLS master playlist is written to disk
|
||||
func (h *HLSHandler) MasterPlaylistWritten(localFilePath string) { |
||||
h.Storage.MasterPlaylistWritten(localFilePath) |
||||
} |
@ -1,157 +0,0 @@
@@ -1,157 +0,0 @@
|
||||
package playlist |
||||
|
||||
import ( |
||||
"io/ioutil" |
||||
"path" |
||||
"path/filepath" |
||||
"strconv" |
||||
"strings" |
||||
"time" |
||||
|
||||
log "github.com/sirupsen/logrus" |
||||
|
||||
"github.com/radovskyb/watcher" |
||||
|
||||
"github.com/owncast/owncast/config" |
||||
"github.com/owncast/owncast/models" |
||||
"github.com/owncast/owncast/utils" |
||||
) |
||||
|
||||
var ( |
||||
_storage models.ChunkStorageProvider |
||||
variants []models.Variant |
||||
) |
||||
|
||||
//StartVideoContentMonitor starts the video content monitor
|
||||
func StartVideoContentMonitor(storage models.ChunkStorageProvider) error { |
||||
_storage = storage |
||||
|
||||
pathToMonitor := config.PrivateHLSStoragePath |
||||
|
||||
// Create at least one structure to store the segments for the different stream variants
|
||||
variants = make([]models.Variant, len(config.Config.VideoSettings.StreamQualities)) |
||||
if len(config.Config.VideoSettings.StreamQualities) > 0 { |
||||
for index := range variants { |
||||
variants[index] = models.Variant{ |
||||
VariantIndex: index, |
||||
Segments: make(map[string]*models.Segment), |
||||
} |
||||
} |
||||
} else { |
||||
variants[0] = models.Variant{ |
||||
VariantIndex: 0, |
||||
Segments: make(map[string]*models.Segment), |
||||
} |
||||
} |
||||
|
||||
// log.Printf("Using directory %s for storing files with %d variants...\n", pathToMonitor, len(variants))
|
||||
|
||||
w := watcher.New() |
||||
|
||||
go func() { |
||||
for { |
||||
select { |
||||
case event := <-w.Event: |
||||
|
||||
relativePath := utils.GetRelativePathFromAbsolutePath(event.Path) |
||||
if path.Ext(relativePath) == ".tmp" { |
||||
continue |
||||
} |
||||
|
||||
// Ignore removals
|
||||
if event.Op == watcher.Remove { |
||||
continue |
||||
} |
||||
|
||||
// Handle updates to the master playlist by copying it to webroot
|
||||
if relativePath == path.Join(config.PrivateHLSStoragePath, "stream.m3u8") { |
||||
utils.Copy(event.Path, path.Join(config.PublicHLSStoragePath, "stream.m3u8")) |
||||
|
||||
} else if filepath.Ext(event.Path) == ".m3u8" { |
||||
// Handle updates to playlists, but not the master playlist
|
||||
updateVariantPlaylist(event.Path) |
||||
|
||||
} else if filepath.Ext(event.Path) == ".ts" { |
||||
segment, err := getSegmentFromPath(event.Path) |
||||
if err != nil { |
||||
log.Error("failed to get the segment from path") |
||||
panic(err) |
||||
} |
||||
|
||||
newObjectPathChannel := make(chan string, 1) |
||||
go func() { |
||||
newObjectPath, err := storage.Save(path.Join(config.PrivateHLSStoragePath, segment.RelativeUploadPath), 0) |
||||
if err != nil { |
||||
log.Errorln("failed to save the file to the chunk storage.", err) |
||||
} |
||||
|
||||
newObjectPathChannel <- newObjectPath |
||||
}() |
||||
|
||||
newObjectPath := <-newObjectPathChannel |
||||
segment.RemoteID = newObjectPath |
||||
// fmt.Println("Uploaded", segment.RelativeUploadPath, "as", newObjectPath)
|
||||
|
||||
variants[segment.VariantIndex].Segments[filepath.Base(segment.RelativeUploadPath)] = &segment |
||||
|
||||
// Force a variant's playlist to be updated after a file is uploaded.
|
||||
associatedVariantPlaylist := strings.ReplaceAll(event.Path, path.Base(event.Path), "stream.m3u8") |
||||
updateVariantPlaylist(associatedVariantPlaylist) |
||||
} |
||||
case err := <-w.Error: |
||||
panic(err) |
||||
case <-w.Closed: |
||||
return |
||||
} |
||||
} |
||||
}() |
||||
|
||||
// Watch the hls segment storage folder recursively for changes.
|
||||
w.FilterOps(watcher.Write, watcher.Rename, watcher.Create) |
||||
|
||||
if err := w.AddRecursive(pathToMonitor); err != nil { |
||||
return err |
||||
} |
||||
|
||||
return w.Start(time.Millisecond * 200) |
||||
} |
||||
|
||||
func getSegmentFromPath(fullDiskPath string) (models.Segment, error) { |
||||
segment := models.Segment{ |
||||
FullDiskPath: fullDiskPath, |
||||
RelativeUploadPath: utils.GetRelativePathFromAbsolutePath(fullDiskPath), |
||||
} |
||||
|
||||
index, err := strconv.Atoi(segment.RelativeUploadPath[0:1]) |
||||
if err != nil { |
||||
return segment, err |
||||
} |
||||
|
||||
segment.VariantIndex = index |
||||
|
||||
return segment, nil |
||||
} |
||||
|
||||
func getVariantIndexFromPath(fullDiskPath string) (int, error) { |
||||
return strconv.Atoi(fullDiskPath[0:1]) |
||||
} |
||||
|
||||
func updateVariantPlaylist(fullPath string) error { |
||||
relativePath := utils.GetRelativePathFromAbsolutePath(fullPath) |
||||
variantIndex, err := getVariantIndexFromPath(relativePath) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
variant := variants[variantIndex] |
||||
|
||||
playlistBytes, err := ioutil.ReadFile(fullPath) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
playlistString := string(playlistBytes) |
||||
playlistString = _storage.GenerateRemotePlaylist(playlistString, variant) |
||||
|
||||
return WritePlaylist(playlistString, path.Join(config.PublicHLSStoragePath, relativePath)) |
||||
} |
@ -0,0 +1,63 @@
@@ -0,0 +1,63 @@
|
||||
package storageproviders |
||||
|
||||
import ( |
||||
"path/filepath" |
||||
|
||||
log "github.com/sirupsen/logrus" |
||||
|
||||
"github.com/owncast/owncast/config" |
||||
"github.com/owncast/owncast/core/ffmpeg" |
||||
"github.com/owncast/owncast/utils" |
||||
) |
||||
|
||||
type LocalStorage struct { |
||||
} |
||||
|
||||
// Setup configures this storage provider
|
||||
func (s *LocalStorage) Setup() error { |
||||
// no-op
|
||||
return nil |
||||
} |
||||
|
||||
// SegmentWritten is called when a single segment of video is written
|
||||
func (s *LocalStorage) SegmentWritten(localFilePath string) { |
||||
s.Save(localFilePath, 0) |
||||
} |
||||
|
||||
// VariantPlaylistWritten is called when a variant hls playlist is written
|
||||
func (s *LocalStorage) VariantPlaylistWritten(localFilePath string) { |
||||
_, error := s.Save(localFilePath, 0) |
||||
if error != nil { |
||||
log.Errorln(error) |
||||
return |
||||
} |
||||
} |
||||
|
||||
// MasterPlaylistWritten is called when the master hls playlist is written
|
||||
func (s *LocalStorage) MasterPlaylistWritten(localFilePath string) { |
||||
s.Save(localFilePath, 0) |
||||
} |
||||
|
||||
// Save will save a local filepath using the storage provider
|
||||
func (s *LocalStorage) Save(filePath string, retryCount int) (string, error) { |
||||
newPath := "" |
||||
|
||||
// This is a hack
|
||||
if filePath == "hls/stream.m3u8" { |
||||
newPath = filepath.Join(config.PublicHLSStoragePath, filepath.Base(filePath)) |
||||
} else { |
||||
newPath = filepath.Join(config.WebRoot, filePath) |
||||
} |
||||
|
||||
// Move video segments to the destination directory.
|
||||
// Copy playlists to the destination directory so they can still be referenced in
|
||||
// the private hls working directory.
|
||||
if filepath.Ext(filePath) == ".m3u8" { |
||||
utils.Copy(filePath, newPath) |
||||
} else { |
||||
utils.Move(filePath, newPath) |
||||
ffmpeg.Cleanup(filepath.Dir(newPath)) |
||||
} |
||||
|
||||
return newPath, nil |
||||
} |
@ -0,0 +1,132 @@
@@ -0,0 +1,132 @@
|
||||
package core |
||||
|
||||
import ( |
||||
"bufio" |
||||
"fmt" |
||||
"os" |
||||
"path/filepath" |
||||
"time" |
||||
|
||||
log "github.com/sirupsen/logrus" |
||||
|
||||
"github.com/owncast/owncast/config" |
||||
"github.com/owncast/owncast/core/ffmpeg" |
||||
"github.com/owncast/owncast/utils" |
||||
|
||||
"github.com/grafov/m3u8" |
||||
) |
||||
|
||||
var _cleanupTimer *time.Timer |
||||
|
||||
//SetStreamAsConnected sets the stream as connected
|
||||
func SetStreamAsConnected() { |
||||
_stats.StreamConnected = true |
||||
_stats.LastConnectTime = utils.NullTime{time.Now(), true} |
||||
_stats.LastDisconnectTime = utils.NullTime{time.Now(), false} |
||||
|
||||
StopCleanupTimer() |
||||
|
||||
segmentPath := config.PublicHLSStoragePath |
||||
if config.Config.S3.Enabled { |
||||
segmentPath = config.PrivateHLSStoragePath |
||||
} |
||||
|
||||
go func() { |
||||
_transcoder = ffmpeg.NewTranscoder() |
||||
_transcoder.TranscoderCompleted = func(error) { |
||||
|
||||
SetStreamAsDisconnected() |
||||
} |
||||
_transcoder.Start() |
||||
}() |
||||
|
||||
ffmpeg.StartThumbnailGenerator(segmentPath, config.Config.VideoSettings.HighestQualityStreamIndex) |
||||
} |
||||
|
||||
//SetStreamAsDisconnected sets the stream as disconnected.
|
||||
func SetStreamAsDisconnected() { |
||||
_stats.StreamConnected = false |
||||
_stats.LastDisconnectTime = utils.NullTime{time.Now(), true} |
||||
|
||||
offlineFilename := "offline.ts" |
||||
offlineFilePath := "static/" + offlineFilename |
||||
|
||||
ffmpeg.StopThumbnailGenerator() |
||||
|
||||
for index := range config.Config.GetVideoStreamQualities() { |
||||
playlistFilePath := fmt.Sprintf(filepath.Join(config.PrivateHLSStoragePath, "%d/stream.m3u8"), index) |
||||
segmentFilePath := fmt.Sprintf(filepath.Join(config.PrivateHLSStoragePath, "%d/%s"), index, offlineFilename) |
||||
|
||||
utils.Copy(offlineFilePath, segmentFilePath) |
||||
_storage.Save(segmentFilePath, 0) |
||||
|
||||
if utils.DoesFileExists(playlistFilePath) { |
||||
f, err := os.OpenFile(playlistFilePath, os.O_CREATE|os.O_RDWR, os.ModePerm) |
||||
defer f.Close() |
||||
if err != nil { |
||||
log.Errorln(err) |
||||
} |
||||
|
||||
playlist, _, err := m3u8.DecodeFrom(bufio.NewReader(f), true) |
||||
variantPlaylist := playlist.(*m3u8.MediaPlaylist) |
||||
if len(variantPlaylist.Segments) > config.Config.GetMaxNumberOfReferencedSegmentsInPlaylist() { |
||||
variantPlaylist.Segments = variantPlaylist.Segments[:len(variantPlaylist.Segments)] |
||||
} |
||||
|
||||
err = variantPlaylist.Append(offlineFilename, 8.0, "") |
||||
variantPlaylist.SetDiscontinuity() |
||||
_, err = f.WriteAt(variantPlaylist.Encode().Bytes(), 0) |
||||
if err != nil { |
||||
log.Errorln(err) |
||||
} |
||||
} else { |
||||
p, err := m3u8.NewMediaPlaylist(1, 1) |
||||
if err != nil { |
||||
log.Errorln(err) |
||||
} |
||||
|
||||
// If "offline" content gets changed then change the duration below
|
||||
err = p.Append(offlineFilename, 8.0, "") |
||||
if err != nil { |
||||
log.Errorln(err) |
||||
} |
||||
|
||||
p.Close() |
||||
f, err := os.Create(playlistFilePath) |
||||
if err != nil { |
||||
log.Errorln(err) |
||||
} |
||||
defer f.Close() |
||||
_, err = f.Write(p.Encode().Bytes()) |
||||
if err != nil { |
||||
log.Errorln(err) |
||||
} |
||||
} |
||||
_storage.Save(playlistFilePath, 0) |
||||
} |
||||
|
||||
StartCleanupTimer() |
||||
} |
||||
|
||||
// StartCleanupTimer will fire a cleanup after n minutes being disconnected
|
||||
func StartCleanupTimer() { |
||||
_cleanupTimer = time.NewTimer(5 * time.Minute) |
||||
go func() { |
||||
for { |
||||
select { |
||||
case <-_cleanupTimer.C: |
||||
// Reset the session count since the session is over
|
||||
_stats.SessionMaxViewerCount = 0 |
||||
resetDirectories() |
||||
transitionToOfflineVideoStreamContent() |
||||
} |
||||
} |
||||
}() |
||||
} |
||||
|
||||
// StopCleanupTimer will stop the previous cleanup timer
|
||||
func StopCleanupTimer() { |
||||
if _cleanupTimer != nil { |
||||
_cleanupTimer.Stop() |
||||
} |
||||
} |
@ -1,8 +1,11 @@
@@ -1,8 +1,11 @@
|
||||
package models |
||||
|
||||
//ChunkStorageProvider is how a chunk storage provider should be implemented
|
||||
type ChunkStorageProvider interface { |
||||
//StorageProvider is how a chunk storage provider should be implemented
|
||||
type StorageProvider interface { |
||||
Setup() error |
||||
Save(filePath string, retryCount int) (string, error) |
||||
GenerateRemotePlaylist(playlist string, variant Variant) string |
||||
|
||||
SegmentWritten(localFilePath string) |
||||
VariantPlaylistWritten(localFilePath string) |
||||
MasterPlaylistWritten(localFilePath string) |
||||
} |
||||
|
Binary file not shown.
Binary file not shown.
@ -1,19 +0,0 @@
@@ -1,19 +0,0 @@
|
||||
package test |
||||
|
||||
import ( |
||||
"time" |
||||
|
||||
log "github.com/sirupsen/logrus" |
||||
) |
||||
|
||||
var timestamp time.Time |
||||
|
||||
func Mark() { |
||||
now := time.Now() |
||||
if !timestamp.IsZero() { |
||||
delta := now.Sub(timestamp) |
||||
log.Println(delta.Milliseconds(), "ms") |
||||
} |
||||
|
||||
timestamp = now |
||||
} |
@ -0,0 +1,50 @@
@@ -0,0 +1,50 @@
|
||||
package utils |
||||
|
||||
import ( |
||||
"sort" |
||||
"time" |
||||
) |
||||
|
||||
// The "start" timestamp of a timing event
|
||||
var _pointsInTime = make(map[string]time.Time) |
||||
|
||||
// A collection of timestamp durations for returning the average of
|
||||
var _durationStorage = make(map[string][]float64) |
||||
|
||||
// StartPerformanceMonitor will keep track of the start time of this event
|
||||
func StartPerformanceMonitor(key string) { |
||||
if len(_durationStorage[key]) > 30 { |
||||
_durationStorage[key] = removeHighAndLow(_durationStorage[key]) |
||||
} |
||||
_pointsInTime[key] = time.Now() |
||||
} |
||||
|
||||
// GetAveragePerformance will return the average durations for the event
|
||||
func GetAveragePerformance(key string) float64 { |
||||
timestamp := _pointsInTime[key] |
||||
if timestamp.IsZero() { |
||||
return 0 |
||||
} |
||||
|
||||
delta := time.Since(timestamp).Seconds() |
||||
_durationStorage[key] = append(_durationStorage[key], delta) |
||||
if len(_durationStorage[key]) < 10 { |
||||
return 0 |
||||
} |
||||
_durationStorage[key] = removeHighAndLow(_durationStorage[key]) |
||||
return avg(_durationStorage[key]) |
||||
} |
||||
|
||||
func removeHighAndLow(values []float64) []float64 { |
||||
sort.Float64s(values) |
||||
return values[1 : len(values)-1] |
||||
} |
||||
|
||||
func avg(values []float64) float64 { |
||||
total := 0.0 |
||||
for _, number := range values { |
||||
total = total + number |
||||
} |
||||
average := total / float64(len(values)) |
||||
return average |
||||
} |
File diff suppressed because one or more lines are too long
@ -1,25 +0,0 @@
@@ -1,25 +0,0 @@
|
||||
var commonjsGlobal = typeof globalThis !== 'undefined' ? globalThis : typeof window !== 'undefined' ? window : typeof global !== 'undefined' ? global : typeof self !== 'undefined' ? self : {}; |
||||
|
||||
function getDefaultExportFromCjs (x) { |
||||
return x && x.__esModule && Object.prototype.hasOwnProperty.call(x, 'default') ? x['default'] : x; |
||||
} |
||||
|
||||
function createCommonjsModule(fn, basedir, module) { |
||||
return module = { |
||||
path: basedir, |
||||
exports: {}, |
||||
require: function (path, base) { |
||||
return commonjsRequire(path, (base === undefined || base === null) ? module.path : base); |
||||
} |
||||
}, fn(module, module.exports), module.exports; |
||||
} |
||||
|
||||
function getDefaultExportFromNamespaceIfNotNamed (n) { |
||||
return n && Object.prototype.hasOwnProperty.call(n, 'default') && Object.keys(n).length === 1 ? n['default'] : n; |
||||
} |
||||
|
||||
function commonjsRequire () { |
||||
throw new Error('Dynamic requires are not currently supported by @rollup/plugin-commonjs'); |
||||
} |
||||
|
||||
export { commonjsGlobal as a, getDefaultExportFromNamespaceIfNotNamed as b, createCommonjsModule as c, getDefaultExportFromCjs as g }; |
File diff suppressed because it is too large
Load Diff
@ -1,66 +0,0 @@
@@ -1,66 +0,0 @@
|
||||
var commonjsGlobal = typeof globalThis !== 'undefined' ? globalThis : typeof window !== 'undefined' ? window : typeof global !== 'undefined' ? global : typeof self !== 'undefined' ? self : {}; |
||||
|
||||
function getDefaultExportFromCjs (x) { |
||||
return x && x.__esModule && Object.prototype.hasOwnProperty.call(x, 'default') ? x['default'] : x; |
||||
} |
||||
|
||||
function createCommonjsModule(fn, basedir, module) { |
||||
return module = { |
||||
path: basedir, |
||||
exports: {}, |
||||
require: function (path, base) { |
||||
return commonjsRequire(path, (base === undefined || base === null) ? module.path : base); |
||||
} |
||||
}, fn(module, module.exports), module.exports; |
||||
} |
||||
|
||||
function getDefaultExportFromNamespaceIfNotNamed (n) { |
||||
return n && Object.prototype.hasOwnProperty.call(n, 'default') && Object.keys(n).length === 1 ? n['default'] : n; |
||||
} |
||||
|
||||
function commonjsRequire () { |
||||
throw new Error('Dynamic requires are not currently supported by @rollup/plugin-commonjs'); |
||||
} |
||||
|
||||
var _nodeResolve_empty = {}; |
||||
|
||||
var _nodeResolve_empty$1 = /*#__PURE__*/Object.freeze({ |
||||
__proto__: null, |
||||
'default': _nodeResolve_empty |
||||
}); |
||||
|
||||
var minDoc = /*@__PURE__*/getDefaultExportFromNamespaceIfNotNamed(_nodeResolve_empty$1); |
||||
|
||||
var topLevel = typeof commonjsGlobal !== 'undefined' ? commonjsGlobal : |
||||
typeof window !== 'undefined' ? window : {}; |
||||
|
||||
|
||||
var doccy; |
||||
|
||||
if (typeof document !== 'undefined') { |
||||
doccy = document; |
||||
} else { |
||||
doccy = topLevel['__GLOBAL_DOCUMENT_CACHE@4']; |
||||
|
||||
if (!doccy) { |
||||
doccy = topLevel['__GLOBAL_DOCUMENT_CACHE@4'] = minDoc; |
||||
} |
||||
} |
||||
|
||||
var document_1 = doccy; |
||||
|
||||
var win; |
||||
|
||||
if (typeof window !== "undefined") { |
||||
win = window; |
||||
} else if (typeof commonjsGlobal !== "undefined") { |
||||
win = commonjsGlobal; |
||||
} else if (typeof self !== "undefined"){ |
||||
win = self; |
||||
} else { |
||||
win = {}; |
||||
} |
||||
|
||||
var window_1 = win; |
||||
|
||||
export { commonjsGlobal as a, createCommonjsModule as c, document_1 as d, getDefaultExportFromCjs as g, window_1 as w }; |
@ -1,44 +0,0 @@
@@ -1,44 +0,0 @@
|
||||
import { b as getDefaultExportFromNamespaceIfNotNamed, a as commonjsGlobal } from './_commonjsHelpers-37fa8da4.js'; |
||||
|
||||
var _nodeResolve_empty = {}; |
||||
|
||||
var _nodeResolve_empty$1 = /*#__PURE__*/Object.freeze({ |
||||
__proto__: null, |
||||
'default': _nodeResolve_empty |
||||
}); |
||||
|
||||
var minDoc = /*@__PURE__*/getDefaultExportFromNamespaceIfNotNamed(_nodeResolve_empty$1); |
||||
|
||||
var topLevel = typeof commonjsGlobal !== 'undefined' ? commonjsGlobal : |
||||
typeof window !== 'undefined' ? window : {}; |
||||
|
||||
|
||||
var doccy; |
||||
|
||||
if (typeof document !== 'undefined') { |
||||
doccy = document; |
||||
} else { |
||||
doccy = topLevel['__GLOBAL_DOCUMENT_CACHE@4']; |
||||
|
||||
if (!doccy) { |
||||
doccy = topLevel['__GLOBAL_DOCUMENT_CACHE@4'] = minDoc; |
||||
} |
||||
} |
||||
|
||||
var document_1 = doccy; |
||||
|
||||
var win; |
||||
|
||||
if (typeof window !== "undefined") { |
||||
win = window; |
||||
} else if (typeof commonjsGlobal !== "undefined") { |
||||
win = commonjsGlobal; |
||||
} else if (typeof self !== "undefined"){ |
||||
win = self; |
||||
} else { |
||||
win = {}; |
||||
} |
||||
|
||||
var window_1 = win; |
||||
|
||||
export { document_1 as d, window_1 as w }; |
@ -0,0 +1 @@
@@ -0,0 +1 @@
|
||||
export { a as default } from '../common/core-02e93804.js'; |
File diff suppressed because one or more lines are too long
Loading…
Reference in new issue