-
Notifications
You must be signed in to change notification settings - Fork 1
Reduce the diff with the pytorch:main branch #3
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -1030,15 +1030,15 @@ AudioFramesOutput SingleStreamDecoder::getFramesPlayedInRangeAudio( | |
| frames.push_back(*lastSamples); | ||
| } | ||
|
|
||
| // TORCH_CHECK( | ||
| // frames.size() > 0 && firstFramePtsSeconds.has_value(), | ||
| // "No audio frames were decoded. ", | ||
| // "This is probably because start_seconds is too high(", | ||
| // startSeconds, | ||
| // "),", | ||
| // "or because stop_seconds(", | ||
| // stopSecondsOptional, | ||
| // ") is too low."); | ||
| TORCH_CHECK( | ||
| frames.size() > 0 && firstFramePtsSeconds.has_value(), | ||
| "No audio frames were decoded. ", | ||
| "This is probably because start_seconds is too high(", | ||
| startSeconds, | ||
| "),", | ||
| "or because stop_seconds(", | ||
| stopSecondsOptional, | ||
| ") is too low."); | ||
|
|
||
| return AudioFramesOutput{torch::cat(frames, 1), *firstFramePtsSeconds}; | ||
| } | ||
|
|
@@ -1419,11 +1419,8 @@ std::optional<torch::Tensor> SingleStreamDecoder::maybeFlushSwrBuffers() { | |
| auto actualNumRemainingSamples = swr_convert( | ||
| swrContext_.get(), outputBuffers.data(), numRemainingSamples, nullptr, 0); | ||
|
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 先别管这些了,你看看 diff 减少的同时, |
||
|
|
||
| throw std::runtime_error( | ||
| "SingleStreamDecoder::maybeFlushSwrBuffers is not implemented yet."); | ||
|
|
||
| // return lastSamples.narrow( | ||
| // /*dim=*/1, /*start=*/0, /*length=*/actualNumRemainingSamples); | ||
| return lastSamples.narrow( | ||
| /*dim=*/1, /*start=*/0, /*length=*/actualNumRemainingSamples); | ||
| } | ||
|
|
||
| // -------------------------------------------------------------------------- | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -10,12 +10,12 @@ | |
| #include <string> | ||
| #include "c10/core/SymIntArrayRef.h" | ||
| #include "c10/util/Exception.h" | ||
| #include "torch/library.h" | ||
| #include "src/torchcodec/_core/AVIOFileLikeContext.h" | ||
| #include "src/torchcodec/_core/AVIOTensorContext.h" | ||
| #include "src/torchcodec/_core/Encoder.h" | ||
| #include "src/torchcodec/_core/SingleStreamDecoder.h" | ||
| #include "src/torchcodec/_core/ValidationUtils.h" | ||
| #include "torch/library.h" | ||
|
|
||
| namespace facebook::torchcodec { | ||
|
|
||
|
|
@@ -118,7 +118,7 @@ OpsFrameOutput makeOpsFrameOutput(FrameOutput& frame) { | |
| // frame.data, | ||
| // torch::tensor(frame.ptsSeconds, torch::dtype(torch::kFloat64)), | ||
| // torch::tensor(frame.durationSeconds, torch::dtype(torch::kFloat64))); | ||
| return std::make_tuple( | ||
| return std::make_tuple( | ||
| frame.data, | ||
| torch::full({}, frame.ptsSeconds, torch::kFloat64), | ||
| torch::full({}, frame.durationSeconds, torch::kFloat64)); | ||
|
|
@@ -920,15 +920,15 @@ void scan_all_streams_to_update_metadata(at::Tensor& decoder) { | |
| videoDecoder->scanFileAndUpdateMetadataAndIndex(); | ||
| } | ||
|
|
||
| TORCH_LIBRARY_IMPL(torchcodec_ns, CPU, m) { | ||
| TORCH_LIBRARY_IMPL(torchcodec_ns, BackendSelect, m) { | ||
| m.impl("create_from_file", &create_from_file); | ||
| m.impl("create_from_tensor", &create_from_tensor); | ||
| m.impl("_create_from_file_like", &_create_from_file_like); | ||
|
Comment on lines
+923
to
926
|
||
| m.impl( | ||
| "_get_json_ffmpeg_library_versions", &_get_json_ffmpeg_library_versions); | ||
| // } | ||
| } | ||
|
|
||
| // TORCH_LIBRARY_IMPL(torchcodec_ns, CPU, m) { | ||
| TORCH_LIBRARY_IMPL(torchcodec_ns, CPU, m) { | ||
| m.impl("encode_audio_to_file", &encode_audio_to_file); | ||
| m.impl("encode_audio_to_tensor", &encode_audio_to_tensor); | ||
| m.impl("_encode_audio_to_file_like", &_encode_audio_to_file_like); | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -146,6 +146,7 @@ def __init__( | |
| # if isinstance(device, torch_device): | ||
| # device = str(device) | ||
| import paddle | ||
|
|
||
| if isinstance(device, paddle.base.core.Place): | ||
| if device.is_cpu_place(): | ||
| return "cpu" | ||
|
|
@@ -158,12 +159,11 @@ def __init__( | |
|
|
||
| core.add_video_stream( | ||
| self._decoder, | ||
| num_threads=num_ffmpeg_threads, | ||
| dimension_order=dimension_order, | ||
| stream_index=stream_index, | ||
| dimension_order=dimension_order, | ||
| num_threads=num_ffmpeg_threads, | ||
| device=device, | ||
| device_variant=device_variant, | ||
| transform_specs="", | ||
| custom_frame_mappings=custom_frame_mappings_data, | ||
| ) | ||
|
|
||
|
|
@@ -265,9 +265,6 @@ def get_frames_at(self, indices: Union[torch.Tensor, list[int]]) -> FrameBatch: | |
| FrameBatch: The frames at the given indices. | ||
| """ | ||
|
|
||
| if isinstance(indices, list): | ||
| indices = torch.tensor(indices, dtype=torch.int64).cpu() | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 这能删吗?你在 CPU 机器上可能是对的,在 GPU 机器上可能就跑到 GPU 上了,这得确认下,CI 测不出来 There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 奇怪,我在 3.3.1 都复现不了了,难道是哪个 PR 修复了?当初也没定位到是哪一层的转换与 PyTorch 不一致导致 |
||
|
|
||
| data, pts_seconds, duration_seconds = core.get_frames_at_indices( | ||
| self._decoder, frame_indices=indices | ||
| ) | ||
|
|
@@ -347,9 +344,6 @@ def get_frames_played_at( | |
| FrameBatch: The frames that are played at ``seconds``. | ||
| """ | ||
|
|
||
| if isinstance(seconds, list): | ||
| seconds = torch.tensor(seconds, dtype=torch.float32).cpu() | ||
|
|
||
| data, pts_seconds, duration_seconds = core.get_frames_by_pts( | ||
| self._decoder, timestamps=seconds | ||
| ) | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
swr_convert()can return a negative value on error. Passing that directly as thelengthtolastSamples.narrow(...)will throw an unrelated/cryptic error (or potentially create an invalid slice). Add an explicit check foractualNumRemainingSamples < 0and surface a clear failure (e.g.,TORCH_CHECK(actualNumRemainingSamples >= 0, ...)) before narrowing; optionally also handle the== 0case explicitly.