快速开始

Sambert语音合成

说明

支持的领域 / 任务:audio(音频) / tts(语音合成)。

Sambert语音合成API基于达摩院改良的自回归韵律模型,支持文本至语音的实时流式合成。可被应用于:

  • 智能设备/机器人播报的语音内容,如智能客服机器人、智能音箱、数字人等。

  • 音视频创作中需要将文字转为语音播报的场景,如小说阅读、新闻播报、影视解说、配音等。

前提条件

示例代码

将合成音频保存为文件

以下代码展示了将流式返回的二进制音频,保存为本地文件。

说明

需要使用您的api-key替换示例中的 your-dashscope-api-key ,代码才能正常运行。

# coding=utf-8

import sys

import dashscope
from dashscope.audio.tts import SpeechSynthesizer

dashscope.api_key = 'your-dashscope-api-key'

result = SpeechSynthesizer.call(model='sambert-zhichu-v1',
                                text='今天天气怎么样',
                                sample_rate=48000)
if result.get_audio_data() is not None:
    with open('output.wav', 'wb') as f:
        f.write(result.get_audio_data())
    print('SUCCESS: get audio data: %dbytes in output.wav' %
          (sys.getsizeof(result.get_audio_data())))
else:
    print('ERROR: response is %s' % (result.get_response()))
package com.alibaba.dashscope.sample;

import com.alibaba.dashscope.audio.tts.SpeechSynthesizer;
import com.alibaba.dashscope.audio.tts.SpeechSynthesisParam;
import com.alibaba.dashscope.audio.tts.SpeechSynthesisResult;
import com.alibaba.dashscope.audio.tts.SpeechSynthesisAudioFormat;
import com.alibaba.dashscope.common.ResultCallback;
import com.alibaba.dashscope.common.Status;

import java.io.*;
import java.nio.ByteBuffer;

public class Main {

    public static void SyncAudioDataToFile() {
        SpeechSynthesizer synthesizer = new SpeechSynthesizer();
        SpeechSynthesisParam param =
                SpeechSynthesisParam.builder()
                        .apiKey("your-dashscope-api-key")
                        .model("sambert-zhichu-v1")
                        .text("今天天气怎么样")
                        .sampleRate(48000)
                        .build();

        File file = new File("output.wav");
        // 调用call方法,传入param参数,获取合成音频
        ByteBuffer audio = synthesizer.call(param);
        try (FileOutputStream fos = new FileOutputStream(file)) {
            fos.write(audio.array());
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
    }

    public static void main(String[] args) {
        SyncAudioDataToFile();
        System.exit(0);
    }
}

将合成音频通过设备播放

调用成功后,通过本地设备播放实时返回的音频内容。

说明
  • 需要使用您的api-key替换示例中的 your-dashscope-api-key ,代码才能正常运行。

  • 运行Python示例前,需要通过pip安装第三方音频播放套件。

# coding=utf-8
#
# Installation instructions for pyaudio:
# APPLE Mac OS X
#   brew install portaudio 
#   pip install pyaudio
# Debian/Ubuntu
#   sudo apt-get install python-pyaudio python3-pyaudio
#   or
#   pip install pyaudio
# CentOS
#   sudo yum install -y portaudio portaudio-devel && pip install pyaudio
# Microsoft Windows
#   python -m pip install pyaudio

import dashscope
import sys
import pyaudio
from dashscope.api_entities.dashscope_response import SpeechSynthesisResponse
from dashscope.audio.tts import ResultCallback, SpeechSynthesizer, SpeechSynthesisResult

dashscope.api_key='your-dashscope-api-key'

class Callback(ResultCallback):
    _player = None
    _stream = None

    def on_open(self):
        print('Speech synthesizer is opened.')
        self._player = pyaudio.PyAudio()
        self._stream = self._player.open(
            format=pyaudio.paInt16,
            channels=1, 
            rate=48000,
            output=True)

    def on_complete(self):
        print('Speech synthesizer is completed.')

    def on_error(self, response: SpeechSynthesisResponse):
        print('Speech synthesizer failed, response is %s' % (str(response)))

    def on_close(self):
        print('Speech synthesizer is closed.')
        self._stream.stop_stream()
        self._stream.close()
        self._player.terminate()

    def on_event(self, result: SpeechSynthesisResult):
        if result.get_audio_frame() is not None:
            print('audio result length:', sys.getsizeof(result.get_audio_frame()))
            self._stream.write(result.get_audio_frame())

        if result.get_timestamp() is not None:
            print('timestamp result:', str(result.get_timestamp()))

callback = Callback()
SpeechSynthesizer.call(model='sambert-zhichu-v1',
                       text='今天天气怎么样',
                       sample_rate=48000,
                       format='pcm',
                       callback=callback)
package com.alibaba.dashscope.sample;

import com.alibaba.dashscope.audio.tts.SpeechSynthesizer;
import com.alibaba.dashscope.audio.tts.SpeechSynthesisAudioFormat;
import com.alibaba.dashscope.audio.tts.SpeechSynthesisParam;
import com.alibaba.dashscope.audio.tts.SpeechSynthesisResult;
import com.alibaba.dashscope.common.ResultCallback;
import com.alibaba.dashscope.common.Status;

import java.io.*;
import java.nio.ByteBuffer;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicBoolean;

import javax.sound.sampled.*;

public class Main {
  
  public static void StreamAuidoDataToSpeaker() {
        CountDownLatch latch = new CountDownLatch(1);
        SpeechSynthesizer synthesizer = new SpeechSynthesizer();
        SpeechSynthesisParam param =
                SpeechSynthesisParam.builder()
                        .apiKey("your-dashscope-api-key") // 需要替换成您实际的apikey
                        .text("今天天气怎么样")
                        .model("sambert-zhichu-v1")
                        .sampleRate(48000)
                        .format(SpeechSynthesisAudioFormat.PCM) // 流式合成使用PCM或者MP3
                        .build();

    		// 播放线程
        class PlaybackRunnable implements Runnable {
            // 设置音频格式,请根据实际自身设备,合成音频参数和平台选择配置
            // 这里选择48k16bit单通道,建议客户根据选用的模型采样率情况和自身设备兼容性选择其他采样率和格式
            private AudioFormat af = new AudioFormat(48000, 16, 1, true, false);
            private DataLine.Info info = new DataLine.Info(SourceDataLine.class, af);
            private SourceDataLine targetSource = null;
            private AtomicBoolean runFlag = new AtomicBoolean(true);
            private ConcurrentLinkedQueue<ByteBuffer> queue = new ConcurrentLinkedQueue<>();

            // 准备播放器
            public void prepare() throws LineUnavailableException {
                targetSource = (SourceDataLine) AudioSystem.getLine(info);
                targetSource.open(af, 4096);
                targetSource.start();
            }

            public void put(ByteBuffer buffer) {
                queue.add(buffer);
            }

            // 停止播放
            public void stop() {
                runFlag.set(false);
            }

            @Override
            public void run() {
                if (targetSource == null) {
                    return;
                }

                while (runFlag.get()) {
                  	if (queue.isEmpty()) {
                        try {
                            Thread.sleep(100);
                        } catch (InterruptedException e) {
                            
                        }
                        continue;
                    }

                    ByteBuffer buffer = queue.poll();
                    if (buffer == null) {
                        continue;
                    }

                    byte[] data = buffer.array();
                    targetSource.write(data, 0, data.length);
                }
								
              	// 将缓存全部播放完
                if (!queue.isEmpty()) {
                    ByteBuffer buffer = null;
                    while ((buffer = queue.poll()) != null) {
                        byte[] data = buffer.array();
                        targetSource.write(data, 0, data.length);
                    }
                }

               
              	// 释放播放器
                targetSource.drain();
                targetSource.stop();
                targetSource.close();
            }
        }

        // 创建一个继承自ResultCallback<SpeechSynthesisResult>的子类来实现回调接口
        class ReactCallback extends ResultCallback<SpeechSynthesisResult> {
            private PlaybackRunnable playbackRunnable = null;

            public ReactCallback(PlaybackRunnable playbackRunnable) {
                this.playbackRunnable = playbackRunnable;
            }

            // 当服务侧返回流式合成结果后回调
            @Override
            public void onEvent(SpeechSynthesisResult result) {
                // 通过getAudio获取流式结果二进制数据
                if (result.getAudioFrame() != null) {
                    // 将数据流式推给播放器 
                    playbackRunnable.put(result.getAudioFrame());
                }
            }

            // 当服务侧完成合成后回调
            @Override
            public void onComplete() {
              	// 告知播放线程结束
                playbackRunnable.stop();
                latch.countDown();
            }

            // 当出现错误时回调
            @Override
            public void onError(Exception e) {
              	// 告诉播放线程结束
                System.out.println(e);
                playbackRunnable.stop();
                latch.countDown();
            }
        }

        PlaybackRunnable playbackRunnable = new PlaybackRunnable();
        try {
            playbackRunnable.prepare();
        } catch (LineUnavailableException e) {
            throw new RuntimeException(e);
        }
        Thread playbackThread = new Thread(playbackRunnable);
        // 启动播放线程
        playbackThread.start();
        // 带Callback的call方法将不会阻塞当前线程
        synthesizer.call(param, new ReactCallback(playbackRunnable));
        // 等待合成完成
        try {
            latch.await();
            // 等待播放线程全部播放完
            playbackThread.join();
        } catch (InterruptedException e) {
            throw new RuntimeException(e);
        }
    }

    public static void main(String[] args) {
        StreamAuidoDataToSpeaker();
        System.exit(0);
    }
}

了解更多

  • 有关Sambert语音合成模型服务的详细调用文档可前往API详情页面进行了解。

  • 不同发音人、适用场景、支持语言等信息,参考模型列表