C# DirectX对声卡的操作

雨师88 2010-11-26 10:51:36
我想实现的是通过采集的数字信号(这个数字信号可能是txt中保存的一些数据),输入到声卡,然后实时输出,不知如何处理,

还请各位高手帮忙!谢谢~~

下面是我写的代码:(但是输出的wave文件有,但是听不到声音)

public partial class Form3 : Form
{
#region 用户变量
private string strRecSaveFile = string.Empty;//文件保存路径
private Notify myNotify = null;//缓冲区提示事件
private FileStream fsWav = null;//保存的文件流
private int iNotifyNum = 16;//通知的个数
private int iBufferOffset = 0;//本次数据起始点, 上一次数据的终点。
private int iSampleSize = 0;//所采集到的数据大小
private int iNotifySize = 0;//通知所在区域大小
private int iBufferSize = 0;//缓冲区大小
private BinaryWriter mWriter;
private Capture capture = null;//捕捉设备对象
private CaptureBuffer capturebuffer = null;//捕捉缓冲区
private AutoResetEvent notifyevent = null;
private Thread notifythread = null;
private WaveFormat mWavFormat;//PCM格式
#endregion

/// <summary>
/// 设置PCM格式;
/// </summary>
/// <returns></returns>
private WaveFormat SetWaveFormat()
{
WaveFormat format = new WaveFormat();
format.FormatTag = WaveFormatTag.Pcm;//设置音频类型
format.SamplesPerSecond = 22050;//采样率(单位:赫兹)典型值:11025、22050、44100Hz
format.BitsPerSample = 16;//采样位数
format.Channels = 1;//声道
format.BlockAlign = (short)(format.Channels * (format.BitsPerSample / 8));//单位采样点的字节数
format.AverageBytesPerSecond = format.BlockAlign * format.SamplesPerSecond;
return format;
//按照以上采样规格,可知采样1秒钟的字节数为22050*2=55100B 约为 53K
}
/// <summary>
/// 创建wave文件;
/// </summary>
/// <param name="strFileName"></param>
private void CreateWaveFile(string strFileName)
{
fsWav = new FileStream(strFileName, FileMode.CreateNew);
mWriter = new BinaryWriter(fsWav);
char[] ChunkRiff = { 'R', 'I', 'F', 'F' };
char[] ChunkType = { 'W', 'A', 'V', 'E' };
char[] ChunkFmt = { 'f', 'm', 't', ' ' };
char[] ChunkData = { 'd', 'a', 't', 'a' };
short shPad = 1; // File padding
int nFormatChunkLength = 0x10; // Format chunk length.
int nLength = 0; // File length, minus first 8 bytes of RIFF description. This will be filled in later.
short shBytesPerSample = 0; // Bytes per sample.
// 一个样本点的字节数目
if (8 == mWavFormat.BitsPerSample && 1 == mWavFormat.Channels)
shBytesPerSample = 1;
else if ((8 == mWavFormat.BitsPerSample && 2 == mWavFormat.Channels) || (16 == mWavFormat.BitsPerSample && 1 == mWavFormat.Channels))
shBytesPerSample = 2;
else if (16 == mWavFormat.BitsPerSample && 2 == mWavFormat.Channels)
shBytesPerSample = 4;
// RIFF 块
mWriter.Write(ChunkRiff);
mWriter.Write(nLength);
mWriter.Write(ChunkType);
// WAVE块
mWriter.Write(ChunkFmt);
mWriter.Write(nFormatChunkLength);
mWriter.Write(shPad);
mWriter.Write(mWavFormat.Channels);
mWriter.Write(mWavFormat.SamplesPerSecond);
mWriter.Write(mWavFormat.AverageBytesPerSecond);
mWriter.Write(shBytesPerSample);
mWriter.Write(mWavFormat.BitsPerSample);
// 数据块
mWriter.Write(ChunkData);
mWriter.Write((int)0); // The sample length will be written in later.
}
/// <summary>
/// 建立两个对象;
/// </summary>
/// <returns></returns>
private bool CreateCaputerDevice()
{
//首先要玫举可用的捕捉设备
CaptureDevicesCollection capturedev = new CaptureDevicesCollection();
Guid devguid;
if (capturedev.Count > 0)
{
devguid = capturedev[0].DriverGuid;
}
else
{
MessageBox.Show("当前没有可用于音频捕捉的设备", "系统提示");
return false;
}
//利用设备GUID来建立一个捕捉设备对象
capture = new Capture(devguid);
return true;
}

private void CreateCaptureBuffer()
{//想要创建一个捕捉缓冲区必须要两个参数:缓冲区信息(描述这个缓冲区中的格式等),缓冲设备。

CaptureBufferDescription bufferdescription = new CaptureBufferDescription();
bufferdescription.Format = mWavFormat;//设置缓冲区要捕捉的数据格式
iNotifySize = 1024;//设置通知大小
iBufferSize = iNotifyNum * iNotifySize;
bufferdescription.BufferBytes = iBufferSize;
capturebuffer = new CaptureBuffer(bufferdescription, capture);//建立设备缓冲区对象
}

/// <summary>
/// 设置通知以及相应事件;
/// </summary>
private void CreateNotification()
{
BufferPositionNotify[] bpn = new BufferPositionNotify[iNotifyNum];//设置缓冲区通知个数
//设置通知事件
notifyevent = new AutoResetEvent(false);
notifythread = new Thread(RecoData);
notifythread.Start();
for (int i = 0; i < iNotifyNum; i++)
{
bpn[i].Offset = iNotifySize + i * iNotifySize - 1;//设置具体每个的位置
bpn[i].EventNotifyHandle = notifyevent.Handle;
}
myNotify = new Notify(capturebuffer);
myNotify.SetNotificationPositions(bpn);

}
//线程中的事件
private void RecoData()
{
while (true)
{
// 等待缓冲区的通知消息
notifyevent.WaitOne(Timeout.Infinite, true);
// 录制数据
RecordCapturedData();
}
}

//真正转移数据的事件,其实就是把数据转移到WAV文件中。
private void RecordCapturedData()
{
byte[] capturedata = null;
int readpos = 0, capturepos = 0, locksize = 0;
capturebuffer.GetCurrentPosition(out capturepos, out readpos);
locksize = readpos - iBufferOffset;//这个大小就是我们可以安全读取的大小
if (locksize == 0)
{
return;
}
if (locksize < 0)
{//因为我们是循环的使用缓冲区,所以有一种情况下为负:当文以载读指针回到第一个通知点,而Ibuffeoffset还在最后一个通知处
locksize += iBufferSize;
}

capturedata = (byte[])capturebuffer.Read(iBufferOffset, typeof(byte), LockFlag.FromWriteCursor, locksize);
mWriter.Write(capturedata, 0, capturedata.Length);//写入到文件
iSampleSize += capturedata.Length;
iBufferOffset += capturedata.Length;
iBufferOffset %= iBufferSize;//取模是因为缓冲区是循环的。
}

/// <summary>
/// 结束捕捉写入wave文件
/// </summary>
private void stoprec()
{
capturebuffer.Stop();//调用缓冲区的停止方法。停止采集声音
if (notifyevent != null)
notifyevent.Set();//关闭通知
notifythread.Abort();//结束线程
RecordCapturedData();//将缓冲区最后一部分数据写入到文件中
//写WAV文件尾
mWriter.Seek(4, SeekOrigin.Begin);
mWriter.Write((int)(iSampleSize + 36)); // 写文件长度
mWriter.Seek(40, SeekOrigin.Begin);
mWriter.Write(iSampleSize); // 写数据长度
mWriter.Close();
fsWav.Close();
mWriter = null;
fsWav = null;
}

public Form3()
{
InitializeComponent();
}

private void button1_Click(object sender, EventArgs e)
{
//设置格式;
mWavFormat = SetWaveFormat();
//设置缓冲区设备;
CreateCaputerDevice();
//设置缓冲区;
CreateCaptureBuffer();
CreateWaveFile(@"E:\aa.wav");
// 建立通知消息,当缓冲区满的时候处理方法
CreateNotification();
capturebuffer.Start(true);
}

private void button2_Click(object sender, EventArgs e)
{
stoprec();
}
}
...全文
923 16 打赏 收藏 转发到动态 举报
写回复
用AI写文章
16 条回复
切换为时间正序
请发表友善的回复…
发表回复
三江831 2013-06-28
  • 打赏
  • 举报
回复
AnalysisPlay没有程序引用!!!
yangguangdexiaosa 2012-01-10
  • 打赏
  • 举报
回复
哥 你真不错 这程序调试是没有错误和警告的 但是却执行不出来 郁闷啊
mooniscrazy 2010-11-29
  • 打赏
  • 举报
回复
冤枉lz了,上次看到的帖子是楼上发的。lz照Ls的方法解决应该是可以的。
mooniscrazy 2010-11-29
  • 打赏
  • 举报
回复
恭喜ls问题解决。呵呵。
Mr_Ling_Lee 2010-11-29
  • 打赏
  • 举报
回复
用我这个吧,我的这个没有问题,把注释掉的代码去掉不要,那是我在用来做 播放音频流测试的。
这套代码就能实现你要的效果,其实是跟你一样的,。
Mr_Ling_Lee 2010-11-29
  • 打赏
  • 举报
回复
/// <summary>
/// 接收缓冲区满消息的处理线程
/// </summary>
private void WaitThread()
{
while (true)
{
// 等待缓冲区的通知消息
mNotificationEvent.WaitOne(Timeout.Infinite, true);

// 录制数据
RecordCapturedData();
}
}

/// <summary>
/// 创建保存的波形文件,并写入必要的文件头
/// </summary>
private void CreateSoundFile()
{
/**************************************************************************



Here is where the file will be created. A


wave file is a RIFF file, which has chunks


of data that describe what the file contains.


A wave RIFF file is put together like this:






The 12 byte RIFF chunk is constructed like this:


Bytes 0 – 3 : ‘R’ ‘I’ ‘F’ ‘F’


Bytes 4 – 7 : Length of file, minus the first 8 bytes of the RIFF description.


(4 bytes for "WAVE" + 24 bytes for format chunk length +


8 bytes for data chunk description + actual sample data size.)


Bytes 8 – 11: ‘W’ ‘A’ ‘V’ ‘E’






The 24 byte FORMAT chunk is constructed like this:


Bytes 0 – 3 : ‘f’ ‘m’ ‘t’ ‘ ‘


Bytes 4 – 7 : The format chunk length. This is always 16.


Bytes 8 – 9 : File padding. Always 1.


Bytes 10- 11: Number of channels. Either 1 for mono, or 2 for stereo.


Bytes 12- 15: Sample rate.


Bytes 16- 19: Number of bytes per second.


Bytes 20- 21: Bytes per sample. 1 for 8 bit mono, 2 for 8 bit stereo or


16 bit mono, 4 for 16 bit stereo.


Bytes 22- 23: Number of bits per sample.






The DATA chunk is constructed like this:


Bytes 0 – 3 : ‘d’ ‘a’ ‘t’ ‘a’


Bytes 4 – 7 : Length of data, in bytes.


Bytes 8 -…: Actual sample data.


***************************************************************************/

mWaveFile = new FileStream(mFileName, FileMode.Create);
mWriter = new BinaryWriter(mWaveFile);

// Set up file with RIFF chunk info.
char[] ChunkRiff = { 'R','I','F','F' };
char[] ChunkType = { 'W', 'A', 'V', 'E' };
char[] ChunkFmt = { 'f', 'm', 't', ' ' };
char[] ChunkData = { 'd', 'a', 't', 'a' };

short shPad = 1; // File padding
int nFormatChunkLength = 0x10; // Formt chunk length
int nLength = 0; // File Length,minus first 8 bytes of RIFF description. This will be filld in later.
short shBytesPerSample = 0; // Bytes per sample.

// 一个样本点的字节数目
if (mWavFormat.BitsPerSample == 8 && mWavFormat.Channels == 1)
shBytesPerSample = 1;
else if ((mWavFormat.BitsPerSample == 8 && mWavFormat.Channels == 2) || (mWavFormat.BitsPerSample == 16 && mWavFormat.Channels == 1))
shBytesPerSample = 2;
else if (mWavFormat.BitsPerSample == 16 && mWavFormat.Channels == 2)
shBytesPerSample = 4;

//#region 音频头编码流
//_memStream = new MemoryStream(44);

//// “RIFF”
//_memStream.Write(new byte[] { 0x52, 0x49, 0x46, 0x46 }, 0, 4);

//// 文件长度 = 音频数据长度 + 44个字节的header长度 - 8个字节(上面的4个字节和这里的4个字节)
//_memStream.Write(BitConverter.GetBytes((UInt32)(nLength)), 0, 4);

//// “WAVE”
//_memStream.Write(new byte[] { 0x57, 0x41, 0x56, 0x45 }, 0, 4);

//// “fmt ”
//_memStream.Write(new byte[] { 0x66, 0x6d, 0x74, 0x20 }, 0, 4);

//// 对于 PCM 来说,这里的值为 16
//_memStream.Write(BitConverter.GetBytes((UInt32)nFormatChunkLength), 0, 4);

//// “1”代表未压缩
//_memStream.Write(BitConverter.GetBytes((UInt16)shPad), 0, 2);

//// 通道数:“1”代表单声道;“2”代表双声道
//_memStream.Write(BitConverter.GetBytes((UInt16)mWavFormat.Channels), 0, 2);

//// 采样率(采样频率),即每秒样本数
//_memStream.Write(BitConverter.GetBytes((UInt32)mWavFormat.SamplesPerSecond), 0, 4);

//// 比特率,即每秒字节数。其值为通道数×每秒数据位数×每样本的数据位数/8(SampleRate * ChannelCount * BitsPerSample / 8)
//_memStream.Write(BitConverter.GetBytes((UInt32)(mWavFormat.AverageBytesPerSecond)), 0, 4);

//// 通道数×每样本的数据位数/8(ChannelCount * BitsPerSample / 8)
//_memStream.Write(BitConverter.GetBytes((UInt16)(shBytesPerSample)), 0, 2);

//// 采样位数(采样分辨率),即每样本的数据位数
//_memStream.Write(BitConverter.GetBytes((UInt16)mWavFormat.BitsPerSample), 0, 2);

//// “data”
//_memStream.Write(new byte[] { 0x64, 0x61, 0x74, 0x61 }, 0, 4);

//// 语音数据的长度
//_memStream.Write(BitConverter.GetBytes((UInt32)0), 0, 4);

//#endregion

// FIFF 块
mWriter.Write(ChunkRiff);
mWriter.Write(nLength);
mWriter.Write(ChunkType);

// WAVE 块
mWriter.Write(ChunkFmt);
mWriter.Write(nFormatChunkLength);
mWriter.Write(shPad);
mWriter.Write(mWavFormat.Channels);
mWriter.Write(mWavFormat.SamplesPerSecond);
mWriter.Write(mWavFormat.AverageBytesPerSecond);
mWriter.Write(shBytesPerSample);
mWriter.Write(mWavFormat.BitsPerSample);

// 数据块
mWriter.Write(ChunkData);
mWriter.Write((int)0); // The sample length will be written in later.
}
}
}
Mr_Ling_Lee 2010-11-29
  • 打赏
  • 举报
回复
using System;
using System.IO;
using System.Threading;
using System.Windows.Forms;
using System.Media;

namespace MTIM
{
using Microsoft.DirectX;
using Microsoft.DirectX.DirectSound;

public class SoundRecord
{
public const int cNotifyNum = 16; // 缓冲队列的数目

private int mNextCaptureOffset = 0; // 该次录音缓冲区的起始值
private int mSampleCount = 0; // 录制的样本数目

private int mNotifySize = 0; // 每次通知大小
private int mBufferSize = 0; // 缓冲队列大小

private string mFileName = string.Empty; // 文件名
private FileStream mWaveFile = null; // 文件流
private BinaryWriter mWriter = null; // 写文件
private MemoryStream _memStream = null;

private Capture mCapDev = null; // 音频捕捉设备
private CaptureBuffer mRecBuffer = null; // 缓冲区对象
private Notify mNotify = null; // 消息通知对象

private WaveFormat mWavFormat; // 录音的格式
private Thread mNotifyThread = null; // 处理缓冲区消息的线程
private AutoResetEvent mNotificationEvent = null; // 通知事件

AnalysisPlay ap = null;

public SoundRecord()
{
ap = new AnalysisPlay();

//初始化音频捕捉设备
InitCaptureDevice();

//设定录音格式
mWavFormat = CreateWaveFormat();
}

/// <summary>
/// 设定录音结束后保存的文件,包括路径
/// </summary>
/// <param name="filename"></param>
public void SetFileName(string filename)
{
mFileName = filename;
}

/// <summary>
/// 开始录音
/// </summary>
public void RecStart()
{
// 创建录音文件
CreateSoundFile();

// 创建一个录音缓冲区,并开始录音
CreateCaptureBuffe();

// 建立通知消息,当缓冲区满的时候处理方法
InitNotifications();

mRecBuffer.Start(true);
}

/// <summary>
/// 停止录音
/// </summary>
public void RecStop()
{
if (mNotificationEvent != null)
mNotificationEvent.Set();

//停止录音
mRecBuffer.Stop();

// 写入缓冲区最后的数据
RecordCapturedData();

// 回写长度信息
mWriter.Seek(4, SeekOrigin.Begin);
mWriter.Write((int)(mSampleCount + 36)); // 写文件长度
mWriter.Seek(40, SeekOrigin.Begin);
mWriter.Write(mSampleCount); // 写数据长度

mWriter.Close();
mWaveFile.Close();
mWriter = null;
mWaveFile = null;
}

/// <summary>
/// 初始化录音设备,此处使用主录音设备
/// </summary>
/// <returns></returns>
private bool InitCaptureDevice()
{
// 获取默认音频捕捉设备
CaptureDevicesCollection devices = new CaptureDevicesCollection(); // 枚举音频捕捉设备
Guid deviceGuid = Guid.Empty;

if (devices.Count > 0)
deviceGuid = devices[0].DriverGuid;
else
{
MessageBox.Show("系统中没有音频捕捉设备!");
return false;
}

// 用指定的捕捉设备创建Capture对象

try
{
mCapDev = new Capture(deviceGuid);
}
catch(DirectXException e)
{
MessageBox.Show(e.ToString());
return false;
}

return true;
}

/// <summary>
/// 创建录音格式,此处使用16Bit,16KHz,Mono的录音格式
/// </summary>
/// <returns></returns>
private WaveFormat CreateWaveFormat()
{
WaveFormat format = new WaveFormat();

format.FormatTag = WaveFormatTag.Pcm; // PCM
format.SamplesPerSecond = 44100; // 16KHz
format.BitsPerSample = 16; // 16Bit
format.Channels = 2; // Mono
format.BlockAlign = (short)(format.Channels * (format.BitsPerSample / 8));
format.AverageBytesPerSecond = ((format.BitsPerSample) / 8) * format.Channels * format.SamplesPerSecond;

return format;
}

/// <summary>
/// 创建录音使用的缓冲区
/// </summary>
private void CreateCaptureBuffe()
{
// 缓冲区的描述对象
CaptureBufferDescription bufferdescription = new CaptureBufferDescription();

if (mNotify != null)
{
mNotify.Dispose();

mNotify = null;
}

if (mRecBuffer != null)
{
mRecBuffer.Dispose();

mRecBuffer = null;
}

// 设定通知的大小,默认为1s种
mNotifySize = (1024 > mWavFormat.AverageBytesPerSecond / 8) ? 1024 : (mWavFormat.AverageBytesPerSecond / 8);
mNotifySize -= mNotifySize % mWavFormat.BlockAlign;

// 设定缓冲区大小
mBufferSize = mNotifySize * cNotifyNum;

// 创建缓冲区描述
bufferdescription.BufferBytes = mBufferSize;
bufferdescription.Format = mWavFormat; // 录音格式

// 创建缓冲区
mRecBuffer = new CaptureBuffer(bufferdescription,mCapDev);

mNextCaptureOffset = 0;
}

/// <summary>
/// 初始化通知事件,将原缓冲区分成16个缓冲队列,在每个缓冲队列的结束点设定通知点。
/// </summary>
/// <returns>是否成功</returns>
private bool InitNotifications()
{
if (mRecBuffer == null)
{
MessageBox.Show("未创建录音缓冲区!");
return false;
}

// 创建一个通知事件,当缓冲队列满了就激发该事件
mNotificationEvent = new AutoResetEvent(false);

// 创建一个线程管理缓冲区事件
if (mNotifyThread == null)
{
mNotifyThread = new Thread(new ThreadStart(WaitThread));
mNotifyThread.Start();
}

// 设定通知的位置
BufferPositionNotify[] PositionNotify = new BufferPositionNotify[cNotifyNum + 1];
for (int i = 0; i < cNotifyNum; i++)
{
PositionNotify[i].Offset = (mNotifySize * i) + mNotifySize - 1;
PositionNotify[i].EventNotifyHandle = mNotificationEvent.Handle;
}

mNotify = new Notify(mRecBuffer);
mNotify.SetNotificationPositions(PositionNotify, cNotifyNum);

return true;
}

/// <summary>
/// 将录制的数据写入WAV文件
/// </summary>
private void RecordCapturedData()
{
byte[] CaptureData = null;
int ReadPos, CapturePos, LockSize;

mRecBuffer.GetCurrentPosition(out CapturePos, out ReadPos);
LockSize = ReadPos - mNextCaptureOffset;
if (LockSize < 0)
LockSize += mBufferSize;

LockSize -= (LockSize % mNotifySize);

if (LockSize == 0)
return;

// 读取缓冲区内地数据
CaptureData = (byte[])mRecBuffer.Read(mNextCaptureOffset, typeof(byte), LockFlag.None, LockSize);


// 写入WAV文件
mWriter.Write(CaptureData, 0, CaptureData.Length);

_memStream.Write(CaptureData, 0, CaptureData.Length);


//_memStream.Seek(4, SeekOrigin.Begin);
//_memStream.Write(new byte[mSampleCount + 36], 0, mSampleCount + 36);
//_memStream.Seek(40, SeekOrigin.Begin);
//_memStream.Write(new byte[mSampleCount], 0, mSampleCount);

//_memStream.Seek(4, SeekOrigin.Begin);
//_memStream.Write((int)(mSampleCount + 36)); // 写文件长度
//_memStream.Seek(40, SeekOrigin.Begin);
//_memStream.Write(mSampleCount); // 写数据长度

//// 播放音频流
//SoundPlayer sp = new SoundPlayer(_memStream as Stream);
//sp.Play();




// 更新已经录制的数据长度.
mSampleCount += CaptureData.Length;

// 移动录制数据的起始点,通知消息只负责产生消息的位置,并不记录上次录制的位置.
mNextCaptureOffset += CaptureData.Length;
mNextCaptureOffset %= mBufferSize; // Circular buffer
}

Teng_s2000 2010-11-26
  • 打赏
  • 举报
回复
[Quote=引用 7 楼 shibinysy 的回复:]
代码太长了 也没有调整好格式看起来比较费力.估计这是没有人帮你的原因吧.
[/Quote]
同感,我是看晕了啊

Button1 button2的直接就出来别人知道这是干嘛的呢也不注释一下
shibinysy 2010-11-26
  • 打赏
  • 举报
回复
代码太长了 也没有调整好格式看起来比较费力.估计这是没有人帮你的原因吧.
雨师88 2010-11-26
  • 打赏
  • 举报
回复
[Quote=引用 3 楼 mooniscrazy 的回复:]

嘿嘿,好像看过这个帖子了,也回答过了。没有结贴啊。我是来赚分的。
[/Quote]
在回答一次啊,呵呵
雨师88 2010-11-26
  • 打赏
  • 举报
回复
[Quote=引用 2 楼 happyrain2010 的回复:]

可不可以用medieaplayer播放呢
[/Quote]
不能,目的就是用声卡播放数字信息,但是先要把数字信息转换为wave格式
mooniscrazy 2010-11-26
  • 打赏
  • 举报
回复
没有放出声音,肯定是放的方法不对,或者是声音文件格式不对。或者设备没有就绪。只能提供个思路啊,要动手很花时间的。我忙着要赚分呢。哎,上班忙死了。
mooniscrazy 2010-11-26
  • 打赏
  • 举报
回复
嘿嘿,好像看过这个帖子了,也回答过了。没有结贴啊。我是来赚分的。
happyrain2010 2010-11-26
  • 打赏
  • 举报
回复
可不可以用medieaplayer播放呢
雨师88 2010-11-26
  • 打赏
  • 举报
回复
怎么没人帮忙呢~~~~~~~~(>_<)~~~~
雨师88 2010-11-26
  • 打赏
  • 举报
回复
忘了,呵呵,button1是录音~,button2是结束录音

110,534

社区成员

发帖
与我相关
我的任务
社区描述
.NET技术 C#
社区管理员
  • C#
  • Web++
  • by_封爱
加入社区
  • 近7日
  • 近30日
  • 至今
社区公告

让您成为最强悍的C#开发者

试试用AI创作助手写篇文章吧