2015-11-26 8 views
7

मैं कोड क्रम ऑडियो और एक mp4 Cordova के device.capture.captureVideo के माध्यम से दर्ज की गई से वीडियो निकालने के लिए में ExtractDecodeEditEncodeMuxTest.java में पाया अनुकूल करने के लिए कोशिश कर रहा हूँ, ऑडियो को डिकोड, डीकोड ऑडियो नमूने संपादित करें, सांकेतिक शब्दों में बदलना ऑडियो, और वीडियो के साथ ऑडियो वापस mux और फिर एक एमपी 4 के रूप में सहेजें।एंड्रॉयड निकालें डिकोड एनकोड Mux ऑडियो

मेरा पहला प्रयास किसी भी ऑडियो नमूने को संपादित करने के बिना निकालने, डीकोड, एन्कोड और मक्स ऑडियो निकालने के लिए है - अगर मैं ऐसा कर सकता हूं तो मैं काफी निश्चित हूं कि मैं वांछित नमूने को वांछित रूप से संपादित कर सकता हूं। मुझे वीडियो को संपादित करने की आवश्यकता नहीं है, इसलिए मुझे लगता है कि मैं वीडियो ट्रैक निकालने और mux करने के लिए बस MediaExtractor का उपयोग कर सकता हूं।

हालांकि, मेरी समस्या यह है कि मुझे ऑडियो डिकोडिंग/एन्कोडिंग प्रक्रिया सही नहीं लग रही है। क्या हो रहा है यह है कि मक्सर निकाले गए वीडियो ट्रैक से एमपी 4 बनाता है और निकाला गया -> डीकोडेड -> एन्कोडेड ऑडियो ट्रैक, लेकिन जब वीडियो ठीक खेलता है, तो ऑडियो शोर के एक छोटे से फटने से शुरू होता है, फिर आखिरी की तरह क्या लगता है ऑडियो डेटा के दो सेकंड सामान्य रूप से खेल रहे हैं (लेकिन वीडियो की शुरुआत में), फिर बाकी वीडियो के लिए चुप्पी।

प्रासंगिक क्षेत्रों में से कुछ:

private MediaFormat audioFormat; 
private MediaFormat videoFormat; 
private int videoTrackIndex = -1; 
private int audioTrackIndex = -1; 
private static final int MAX_BUFFER_SIZE = 256 * 1024; 

// parameters for the audio encoder 
private static final String OUTPUT_AUDIO_MIME_TYPE = "audio/mp4a-latm"; // Advanced Audio Coding 
private static final int OUTPUT_AUDIO_CHANNEL_COUNT = 2; // Must match the input stream. not using this, getting from input format 
private static final int OUTPUT_AUDIO_BIT_RATE = 128 * 1024; 
private static final int OUTPUT_AUDIO_AAC_PROFILE = MediaCodecInfo.CodecProfileLevel.AACObjectHE; //not using this, getting from input format 
private static final int OUTPUT_AUDIO_SAMPLE_RATE_HZ = 44100; // Must match the input stream 
private static final String TAG = "vvsLog"; 
private static final Boolean DEBUG = false; 
private static final Boolean INFO = true; 
/** How long to wait for the next buffer to become available. */ 
private static final int TIMEOUT_USEC = 10000; 
private String videoPath; 

कोड विकोडक, एनकोडर और म्युक्सर को विन्यस्त:

MediaCodecInfo audioCodecInfo = selectCodec(OUTPUT_AUDIO_MIME_TYPE); 
    if (audioCodecInfo == null) { 
     // Don't fail CTS if they don't have an AAC codec (not here, anyway). 
     Log.e(TAG, "Unable to find an appropriate codec for " + OUTPUT_AUDIO_MIME_TYPE); 
     return; 
    } 

    MediaExtractor videoExtractor = null; 
    MediaExtractor audioExtractor = null; 
    MediaCodec audioDecoder = null; 
    MediaCodec audioEncoder = null; 
    MediaMuxer muxer = null; 

    try { 

     /** 
     * Video 
     * just need to configure the extractor, no codec processing required 
     */ 
     videoExtractor = createExtractor(originalAssetPath); 
     String vidMimeStartsWith = "video/"; 
     int videoInputTrack = getAndSelectTrackIndex(videoExtractor, vidMimeStartsWith); 
     videoFormat = videoExtractor.getTrackFormat(videoInputTrack); 

     /** 
     * Audio 
     * needs an extractor plus an audio decoder and encoder 
     */ 
     audioExtractor = createExtractor(originalAssetPath); 
     String audMimeStartsWith = "audio/"; 
     int audioInputTrack = getAndSelectTrackIndex(audioExtractor, audMimeStartsWith); 
     audioFormat = audioExtractor.getTrackFormat(audioInputTrack); 
     audioFormat.setInteger(MediaFormat.KEY_SAMPLE_RATE,OUTPUT_AUDIO_SAMPLE_RATE_HZ); 

     MediaFormat outputAudioFormat = MediaFormat.createAudioFormat(OUTPUT_AUDIO_MIME_TYPE, 
       audioFormat.getInteger(MediaFormat.KEY_SAMPLE_RATE), 
       audioFormat.getInteger(MediaFormat.KEY_CHANNEL_COUNT)); 
     outputAudioFormat.setInteger(MediaFormat.KEY_AAC_PROFILE, audioFormat.getInteger(MediaFormat.KEY_AAC_PROFILE)); 
     outputAudioFormat.setInteger(MediaFormat.KEY_BIT_RATE, OUTPUT_AUDIO_BIT_RATE); 

     // Create a MediaCodec for the decoder, based on the extractor's format, configure and start it. 
     audioDecoder = createAudioDecoder(audioFormat); 
     // Create a MediaCodec for the desired codec, then configure it as an encoder and start it. 
     audioEncoder = createAudioEncoder(audioCodecInfo, outputAudioFormat); 

     //create muxer to overwrite original asset path 
     muxer = createMuxer(originalAssetPath); 

     //add the video and audio tracks 
     /** 
     * need to wait to add the audio track until after the first encoder output buffer is created 
     * since the encoder changes the MediaFormat at that time 
     * and the muxer needs the correct format, including the correct Coded Specific Data (CSD) ByteBuffer 
     */ 

     doExtractDecodeEditEncodeMux(
       videoExtractor, 
       audioExtractor, 
       audioDecoder, 
       audioEncoder, 
       muxer); 

    } 

राक्षस doExtractDecodeEditEncodeMux विधि:

private void doExtractDecodeEditEncodeMux(
     MediaExtractor videoExtractor, 
     MediaExtractor audioExtractor, 
     MediaCodec audioDecoder, 
     MediaCodec audioEncoder, 
     MediaMuxer muxer) { 

    ByteBuffer videoInputBuffer = ByteBuffer.allocate(MAX_BUFFER_SIZE); 
    MediaCodec.BufferInfo videoBufferInfo = new MediaCodec.BufferInfo(); 

    ByteBuffer[] audioDecoderInputBuffers = null; 
    ByteBuffer[] audioDecoderOutputBuffers = null; 
    ByteBuffer[] audioEncoderInputBuffers = null; 
    ByteBuffer[] audioEncoderOutputBuffers = null; 
    MediaCodec.BufferInfo audioDecoderOutputBufferInfo = null; 
    MediaCodec.BufferInfo audioEncoderOutputBufferInfo = null; 

    audioDecoderInputBuffers = audioDecoder.getInputBuffers(); 
    audioDecoderOutputBuffers = audioDecoder.getOutputBuffers(); 
    audioEncoderInputBuffers = audioEncoder.getInputBuffers(); 
    audioEncoderOutputBuffers = audioEncoder.getOutputBuffers(); 
    audioDecoderOutputBufferInfo = new MediaCodec.BufferInfo(); 
    audioEncoderOutputBufferInfo = new MediaCodec.BufferInfo(); 

    /** 
    * sanity checks 
    */ 
    //frames 
    int videoExtractedFrameCount = 0; 
    int audioExtractedFrameCount = 0; 
    int audioDecodedFrameCount = 0; 
    int audioEncodedFrameCount = 0; 
    //times 
    long lastPresentationTimeVideoExtractor = 0; 
    long lastPresentationTimeAudioExtractor = 0; 
    long lastPresentationTimeAudioDecoder = 0; 
    long lastPresentationTimeAudioEncoder = 0; 

    // We will get these from the decoders when notified of a format change. 
    MediaFormat decoderOutputAudioFormat = null; 
    // We will get these from the encoders when notified of a format change. 
    MediaFormat encoderOutputAudioFormat = null; 
    // We will determine these once we have the output format. 
    int outputAudioTrack = -1; 
    // Whether things are done on the video side. 
    boolean videoExtractorDone = false; 
    // Whether things are done on the audio side. 
    boolean audioExtractorDone = false; 
    boolean audioDecoderDone = false; 
    boolean audioEncoderDone = false; 
    // The audio decoder output buffer to process, -1 if none. 
    int pendingAudioDecoderOutputBufferIndex = -1; 

    boolean muxing = false; 

    /** 
    * need to wait to add the audio track until after the first encoder output buffer is created 
    * since the encoder changes the MediaFormat at that time 
    * and the muxer needs the correct format, including the correct Coded Specific Data (CSD) ByteBuffer 
    * muxer.start(); 
    * muxing = true; 
    */ 

    MediaMetadataRetriever retrieverTest = new MediaMetadataRetriever(); 
    retrieverTest.setDataSource(videoPath); 
    String degreesStr = retrieverTest.extractMetadata(MediaMetadataRetriever.METADATA_KEY_VIDEO_ROTATION); 
    if (degreesStr != null) { 
     Integer degrees = Integer.parseInt(degreesStr); 
     if (degrees >= 0) { 
      muxer.setOrientationHint(degrees); 
     } 
    } 

    while (!videoExtractorDone || !audioEncoderDone) { 
     if (INFO) { 
      Log.d(TAG, String.format("ex:%d at %d | de:%d at %d | en:%d at %d ", 
        audioExtractedFrameCount, lastPresentationTimeAudioExtractor, 
        audioDecodedFrameCount, lastPresentationTimeAudioDecoder, 
        audioEncodedFrameCount, lastPresentationTimeAudioEncoder 
        )); 
     } 
     /** 
     * Extract and mux video 
     */ 
     while (!videoExtractorDone && muxing) { 

      try { 
       videoBufferInfo.size = videoExtractor.readSampleData(videoInputBuffer, 0); 
      } catch (Exception e) { 
       e.printStackTrace(); 
      } 

      if (videoBufferInfo.size < 0) { 
       videoBufferInfo.size = 0; 
       videoExtractorDone = true; 
      } else { 
       videoBufferInfo.presentationTimeUs = videoExtractor.getSampleTime(); 
       lastPresentationTimeVideoExtractor = videoBufferInfo.presentationTimeUs; 
         videoBufferInfo.flags = videoExtractor.getSampleFlags(); 
       muxer.writeSampleData(videoTrackIndex, videoInputBuffer, videoBufferInfo); 
       videoExtractor.advance(); 
       videoExtractedFrameCount++; 
      } 
     } 

     /** 
     * Extract, decode, watermark, encode and mux audio 
     */ 

     /** Extract audio from file and feed to decoder. **/ 
     while (!audioExtractorDone && (encoderOutputAudioFormat == null || muxing)) { 
      int decoderInputBufferIndex = audioDecoder.dequeueInputBuffer(TIMEOUT_USEC); 
      if (decoderInputBufferIndex == MediaCodec.INFO_TRY_AGAIN_LATER) { 
       break; 
      } 
      if (DEBUG) { 
       Log.d(TAG, "audio decoder: returned input buffer: " + decoderInputBufferIndex); 
      } 
      ByteBuffer decoderInputBuffer = audioDecoderInputBuffers[decoderInputBufferIndex]; 
      int size = audioExtractor.readSampleData(decoderInputBuffer, 0); 
      long presentationTime = audioExtractor.getSampleTime(); 
      lastPresentationTimeAudioExtractor = presentationTime; 
      if (DEBUG) { 
       Log.d(TAG, "audio extractor: returned buffer of size " + size); 
       Log.d(TAG, "audio extractor: returned buffer for time " + presentationTime); 
      } 
      if (size >= 0) { 
       audioDecoder.queueInputBuffer(
         decoderInputBufferIndex, 
         0, 
         size, 
         presentationTime, 
         audioExtractor.getSampleFlags()); 
      } 
      audioExtractorDone = !audioExtractor.advance(); 
      if (audioExtractorDone) { 
       if (DEBUG) Log.d(TAG, "audio extractor: EOS"); 
       audioDecoder.queueInputBuffer(
         decoderInputBufferIndex, 
         0, 
         0, 
         0, 
         MediaCodec.BUFFER_FLAG_END_OF_STREAM); 
      } 
      audioExtractedFrameCount++; 
      // We extracted a frame, let's try something else next. 
      break; 
     } 

     /** 
     * Poll output frames from the audio decoder. 
     * Do not poll if we already have a pending buffer to feed to the encoder. 
     */ 
     while (!audioDecoderDone && pendingAudioDecoderOutputBufferIndex == -1 && (encoderOutputAudioFormat == null || muxing)) { 
      int decoderOutputBufferIndex = 
        audioDecoder.dequeueOutputBuffer(
          audioDecoderOutputBufferInfo, TIMEOUT_USEC); 
      if (decoderOutputBufferIndex == MediaCodec.INFO_TRY_AGAIN_LATER) { 
       if (DEBUG) Log.d(TAG, "no audio decoder output buffer"); 
       break; 
      } 
      if (decoderOutputBufferIndex == MediaCodec.INFO_OUTPUT_BUFFERS_CHANGED) { 
       if (DEBUG) Log.d(TAG, "audio decoder: output buffers changed"); 
       audioDecoderOutputBuffers = audioDecoder.getOutputBuffers(); 
       break; 
      } 
      if (decoderOutputBufferIndex == MediaCodec.INFO_OUTPUT_FORMAT_CHANGED) { 
       decoderOutputAudioFormat = audioDecoder.getOutputFormat(); 
       if (DEBUG) { 
        Log.d(TAG, "audio decoder: output format changed: " 
          + decoderOutputAudioFormat); 
       } 
       break; 
      } 
      if (DEBUG) { 
       Log.d(TAG, "audio decoder: returned output buffer: " 
         + decoderOutputBufferIndex); 
      } 
      if (DEBUG) { 
       Log.d(TAG, "audio decoder: returned buffer of size " 
         + audioDecoderOutputBufferInfo.size); 
      } 
      ByteBuffer decoderOutputBuffer = 
        audioDecoderOutputBuffers[decoderOutputBufferIndex]; 
      if ((audioDecoderOutputBufferInfo.flags & MediaCodec.BUFFER_FLAG_CODEC_CONFIG) 
        != 0) { 
       if (DEBUG) Log.d(TAG, "audio decoder: codec config buffer"); 
       audioDecoder.releaseOutputBuffer(decoderOutputBufferIndex, false); 
       break; 
      } 
      if (DEBUG) { 
       Log.d(TAG, "audio decoder: returned buffer for time " 
         + audioDecoderOutputBufferInfo.presentationTimeUs); 
      } 
      if (DEBUG) { 
       Log.d(TAG, "audio decoder: output buffer is now pending: " 
         + pendingAudioDecoderOutputBufferIndex); 
      } 
      pendingAudioDecoderOutputBufferIndex = decoderOutputBufferIndex; 
      audioDecodedFrameCount++; 
      // We extracted a pending frame, let's try something else next. 
      break; 
     } 

     // Feed the pending decoded audio buffer to the audio encoder. 
     while (pendingAudioDecoderOutputBufferIndex != -1) { 
      if (DEBUG) { 
       Log.d(TAG, "audio decoder: attempting to process pending buffer: " 
         + pendingAudioDecoderOutputBufferIndex); 
      } 
      int encoderInputBufferIndex = audioEncoder.dequeueInputBuffer(TIMEOUT_USEC); 
      if (encoderInputBufferIndex == MediaCodec.INFO_TRY_AGAIN_LATER) { 
       if (DEBUG) Log.d(TAG, "no audio encoder input buffer"); 
       break; 
      } 
      if (DEBUG) { 
       Log.d(TAG, "audio encoder: returned input buffer: " + encoderInputBufferIndex); 
      } 
      ByteBuffer encoderInputBuffer = audioEncoderInputBuffers[encoderInputBufferIndex]; 
      int size = audioDecoderOutputBufferInfo.size; 
      long presentationTime = audioDecoderOutputBufferInfo.presentationTimeUs; 
      lastPresentationTimeAudioDecoder = presentationTime; 
      if (DEBUG) { 
       Log.d(TAG, "audio decoder: processing pending buffer: " 
         + pendingAudioDecoderOutputBufferIndex); 
      } 
      if (DEBUG) { 
       Log.d(TAG, "audio decoder: pending buffer of size " + size); 
       Log.d(TAG, "audio decoder: pending buffer for time " + presentationTime); 
      } 
      if (size >= 0) { 
       ByteBuffer decoderOutputBuffer = 
         audioDecoderOutputBuffers[pendingAudioDecoderOutputBufferIndex] 
           .duplicate(); 
       decoderOutputBuffer.position(audioDecoderOutputBufferInfo.offset); 
       decoderOutputBuffer.limit(audioDecoderOutputBufferInfo.offset + size); 
       encoderInputBuffer.position(0); 
       encoderInputBuffer.put(decoderOutputBuffer); 
       audioEncoder.queueInputBuffer(
         encoderInputBufferIndex, 
         0, 
         size, 
         presentationTime, 
         audioDecoderOutputBufferInfo.flags); 
      } 
      audioDecoder.releaseOutputBuffer(pendingAudioDecoderOutputBufferIndex, false); 
      pendingAudioDecoderOutputBufferIndex = -1; 
      if ((audioDecoderOutputBufferInfo.flags 
        & MediaCodec.BUFFER_FLAG_END_OF_STREAM) != 0) { 
       if (DEBUG) Log.d(TAG, "audio decoder: EOS"); 
       audioDecoderDone = true; 
      } 
      // We enqueued a pending frame, let's try something else next. 
      break; 
     } 

     // Poll frames from the audio encoder and send them to the muxer. 
     while (!audioEncoderDone && (encoderOutputAudioFormat == null || muxing)) { 
      int encoderOutputBufferIndex = audioEncoder.dequeueOutputBuffer(
        audioEncoderOutputBufferInfo, TIMEOUT_USEC); 
      if (encoderOutputBufferIndex == MediaCodec.INFO_TRY_AGAIN_LATER) { 
       if (DEBUG) Log.d(TAG, "no audio encoder output buffer"); 
       break; 
      } 
      if (encoderOutputBufferIndex == MediaCodec.INFO_OUTPUT_BUFFERS_CHANGED) { 
       if (DEBUG) Log.d(TAG, "audio encoder: output buffers changed"); 
       audioEncoderOutputBuffers = audioEncoder.getOutputBuffers(); 
       break; 
      } 
      if (encoderOutputBufferIndex == MediaCodec.INFO_OUTPUT_FORMAT_CHANGED) { 
       encoderOutputAudioFormat = audioEncoder.getOutputFormat(); 
       if (DEBUG) { 
        Log.d(TAG, "audio encoder: output format changed"); 
       } 
       if (outputAudioTrack >= 0) { 
        Log.e(TAG,"audio encoder changed its output format again?"); 
       } 
       break; 
      } 
      if (DEBUG) { 
       Log.d(TAG, "audio encoder: returned output buffer: " 
         + encoderOutputBufferIndex); 
       Log.d(TAG, "audio encoder: returned buffer of size " 
         + audioEncoderOutputBufferInfo.size); 
      } 
      ByteBuffer encoderOutputBuffer = 
        audioEncoderOutputBuffers[encoderOutputBufferIndex]; 
      if ((audioEncoderOutputBufferInfo.flags & MediaCodec.BUFFER_FLAG_CODEC_CONFIG) 
        != 0) { 
       if (DEBUG) Log.d(TAG, "audio encoder: codec config buffer"); 
       // Simply ignore codec config buffers. 
       audioEncoder.releaseOutputBuffer(encoderOutputBufferIndex, false); 
       break; 
      } 
      if (DEBUG) { 
       Log.d(TAG, "audio encoder: returned buffer for time " 
         + audioEncoderOutputBufferInfo.presentationTimeUs); 
      } 
      if (audioEncoderOutputBufferInfo.size != 0) { 
       lastPresentationTimeAudioEncoder = audioEncoderOutputBufferInfo.presentationTimeUs; 
       muxer.writeSampleData(
         audioTrackIndex, encoderOutputBuffer, audioEncoderOutputBufferInfo); 
      } 
      if ((audioEncoderOutputBufferInfo.flags & MediaCodec.BUFFER_FLAG_END_OF_STREAM) 
        != 0) { 
       if (DEBUG) Log.d(TAG, "audio encoder: EOS"); 
       audioEncoderDone = true; 
      } 
      audioEncoder.releaseOutputBuffer(encoderOutputBufferIndex, false); 
      audioEncodedFrameCount++; 
      // We enqueued an encoded frame, let's try something else next. 
      break; 
     } 

     if (!muxing && (encoderOutputAudioFormat != null)) { 

      Log.d(TAG, "muxer: adding video track."); 
      videoTrackIndex = muxer.addTrack(videoFormat); 

      Log.d(TAG, "muxer: adding audio track."); 
      audioTrackIndex = muxer.addTrack(encoderOutputAudioFormat); 

      Log.d(TAG, "muxer: starting"); 
      muxer.start(); 
      muxing = true; 
     } 
    } 
    /** 
    * Done processing audio and video 
    */ 
    Log.d(TAG,"encoded and decoded audio frame counts should match. decoded:"+audioDecodedFrameCount+" encoded:"+audioEncodedFrameCount); 

    Log.d(TAG,"decoded frame count should be less than extracted frame coun. decoded:"+audioDecodedFrameCount+" extracted:"+audioExtractedFrameCount); 
    Log.d(TAG,"no audio frame should be pending "+pendingAudioDecoderOutputBufferIndex); 

    PluginResult result = new PluginResult(PluginResult.Status.OK, videoPath); 
    result.setKeepCallback(false); 
    callbackContext.sendPluginResult(result); 

} 

मैं इस देख रहा हूँ निकाले गए पहले कई सौ ऑडियो फ्रेम के लिए ACodec त्रुटि:

11-25 20:49:58.497 9807-13101/com.vvs.VVS430011 E/ACodec﹕ OMXCodec::onEvent, OMX_ErrorStreamCorrupt 
11-25 20:49:58.497 9807-13101/com.vvs.VVS430011 W/AHierarchicalStateMachine﹕ Warning message AMessage(what = 'omx ', target = 8) = { 
    int32_t type = 0 
    int32_t node = 7115 
    int32_t event = 1 
    int32_t data1 = -2147479541 
    int32_t data2 = 0 
    } unhandled in root state. 

यहाँ पूरे logcat के pastebin, जिनमें से प्रारूप में कुछ मानसिक स्वास्थ्य की जांच लॉग करती है:

D/vvsLog﹕ ex:{extracted frame #} at {presentationTime} | de:{decoded frame #} at {presentationTime} | en:{encoded frame #} at {presentationTime} 

एन्कोड की presentationTime और फ्रेम डीकोड बहुत जल्दी incrementing किया जा रहा है, जबकि उन OMX_ErrorStreamCorrupt संदेशों दिखाई दे रहे हैं जब वे रुकते हैं, तो डीकोडेड और एन्कोडेड फ्रेम के लिए प्रेजेंटेशनटाइम "सामान्य" पर लौटता प्रतीत होता है, और वीडियो की शुरुआत में मैंने सुना "वास्तविक" ऑडियो के साथ मिलकर लगता है - "अच्छा" ऑडियो मूल ऑडियो ट्रैक का अंत।

मैं उम्मीद कर रहा हूं कि इन निम्न-स्तरीय एंड्रॉइड मल्टीमीडिया एपीआई के साथ बहुत अधिक अनुभव वाला कोई व्यक्ति मुझे यह समझने में मदद कर सकता है कि यह क्यों हो रहा है। ध्यान रखें कि मुझे अच्छी तरह से पता है कि यह कोड अनुकूलित नहीं है, अलग थ्रेड में चल रहा है, आदि .. - मूलभूत निकालने का एक कार्य उदाहरण होने के बाद मैं चीजों को साफ करने के लिए दोबारा प्रतिक्रिया दूंगा-> डीकोड-> संपादित करें-> एन्कोड- > मक्स प्रक्रिया।

धन्यवाद!

उत्तर

2

उपर्युक्त कोड ठीक काम करता है - जब तक आप उसी फ़ाइल को mux करने की कोशिश नहीं कर रहे हैं, जिसे आप एक ही समय में निकाल रहे हैं।

:-)

मैं इस कि निकाले, तो एक ही फाइल को muxed पटरियों के पिछले संस्करण था, और है कि बदलने के लिए इस संस्करण में भूल गया था।

इस छोटी विधि ने दिन को बचाया।

private String getMuxedAssetPath() { 
    String muxedAssetPath = Environment.getExternalStoragePublicDirectory(Environment.DIRECTORY_DCIM) + "/" + CAMERA_DIRECTORY + "/muxedAudioVideo.mp4"; 

    File file = new File(muxedAssetPath); 
    if (!file.exists()) { 
     try { 
      file.createNewFile(); 
     } catch (IOException e) { 
      e.printStackTrace(); 
      muxedAssetPath = null; 
     } 
    } 

    return muxedAssetPath; 
} 
संबंधित मुद्दे